Three.js 带偏移的缩放以适合
Three.js Zoom-to-Fit with offset
我正在尝试提出一个缩放到适合的功能,以确保点列表完全适合绘图区域,同时还在图像的所有边上添加可配置的偏移量。 IE。缩放以适合框架区域而不是整个查看器区域:
(请注意,此图像中的偏移量不准确)
我这里用的是透视相机。该函数必须更新相机位置,而不是它的参数或视图方向。
我找到了一个工作良好的缩放以适合功能*,但我在实现偏移方面遇到了困难。
我的第一种方法只是偏移点坐标(使用相机的坐标系)没有成功。显示了更多图像,但我选择的点并没有出现在该区域的边缘。回想起来这是有道理的,因为透视变形会使点远离它们的预期位置。
任何人都可以提供有关如何正确计算相机距离和位置的可能解决方案吗?
* Three.js 没有缩放到适合的功能,但是网上有很多关于如何实现这个逻辑的示例和问题。这种用例最好的可能是 CameraViewBox. I have adopted their example to my use-case in this fiddle:
import * as THREE from 'https://cdn.skypack.dev/three@0.130.1';
import { OrbitControls } from 'https://cdn.skypack.dev/three@0.130.1/examples/jsm/controls/OrbitControls.js';
let camera, controls, scene, renderer, material;
let isDragging = false;
let cameraViewBox;
const raycaster = new THREE.Raycaster();
const mouse = new THREE.Vector2();
const meshes = [];
const selection = new Set();
const selectedMaterial = new THREE.MeshPhongMaterial({ color: 0xff0000, flatShading: true });
const floorPlane = new THREE.Plane(new THREE.Vector3(0, 1, 0));
init();
animate();
function init() {
scene = new THREE.Scene();
scene.background = new THREE.Color(0xcccccc);
scene.fog = new THREE.FogExp2(0xcccccc, 0.002);
renderer = new THREE.WebGLRenderer({
antialias: true
});
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
camera = new THREE.PerspectiveCamera(60, window.innerWidth / window.innerHeight, 1, 1000);
camera.position.set(400, 200, 0);
// Create the cameraViewBox
cameraViewBox = new THREE.CameraViewBox();
cameraViewBox.setViewFromCamera(camera);
// controls
controls = new OrbitControls(camera, renderer.domElement);
controls.minDistance = 100;
controls.maxDistance = 500;
controls.maxPolarAngle = Math.PI / 2;
// world
const geometry = new THREE.BoxGeometry(1, 1, 1);
geometry.translate(0, 0.5, 0);
material = new THREE.MeshPhongMaterial({
color: 0xffffff,
flatShading: true
});
for (let i = 0; i < 500; i++) {
const mesh = new THREE.Mesh(geometry, material);
mesh.position.x = Math.random() * 1600 - 800;
mesh.position.y = 0;
mesh.position.z = Math.random() * 1600 - 800;
mesh.scale.x = 20;
mesh.scale.y = Math.random() * 80 + 10;
mesh.scale.z = 20;
mesh.updateMatrix();
mesh.matrixAutoUpdate = false;
scene.add(mesh);
meshes.push(mesh);
}
// lights
const dirLight1 = new THREE.DirectionalLight(0xffffff);
dirLight1.position.set(1, 1, 1);
scene.add(dirLight1);
const dirLight2 = new THREE.DirectionalLight(0x002288);
dirLight2.position.set(-1, -1, -1);
scene.add(dirLight2);
const ambientLight = new THREE.AmbientLight(0x222222);
scene.add(ambientLight);
window.addEventListener('resize', onWindowResize);
// Add DOM events
renderer.domElement.addEventListener('mousedown', onMouseDown, false);
window.addEventListener('mousemove', onMouseMove, false);
renderer.domElement.addEventListener('mouseup', onMouseUp, false);
}
function onWindowResize() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
}
function animate() {
requestAnimationFrame(animate);
renderer.render(scene, camera);
}
// Add selection support
function onMouseDown() {
isDragging = false;
}
function onMouseMove() {
isDragging = true;
}
function onMouseUp(event) {
if (isDragging) {
isDragging = false;
return;
} else {
isDragging = false;
}
mouse.x = (event.clientX / window.innerWidth) * 2 - 1;
mouse.y = -(event.clientY / window.innerHeight) * 2 + 1;
raycaster.setFromCamera(mouse, camera);
var intersects = raycaster.intersectObjects(meshes);
if (intersects.length > 0) {
var mesh = intersects[0].object;
if (selection.has(mesh)) {
mesh.material = material;
selection.delete(mesh);
} else {
mesh.material = selectedMaterial;
selection.add(mesh);
}
}
}
function centerOnSelection() {
if (selection.size === 0) {
return;
}
cameraViewBox.setViewFromCamera(camera);
cameraViewBox.setFromObjects(Array.from(selection));
cameraViewBox.getCameraPositionAndTarget(camera.position, controls.target, floorPlane);
controls.update();
}
我现在能够在某种程度上自己解决这个问题。如果我们从对称偏移量开始,这会非常简单:
使用较窄的 FOV 角度(绿色)计算相机位置会使最终图像中的投影点偏移一定量。如果我们找到正确的角度,这些点最终会出现在我们正在寻找的精确偏移处。
我们可以使用基本三角学计算这个角度。我们计算到归一化设备坐标平面的距离(即 height/width 的 -1 到 1;图像中的蓝色)然后应用偏移量(百分比值范围从 0.0 到 1.0)并创建一个新角度:
tan(FOV / 2) = 1 / dist => dist = 1 / tan(FOV / 2)
tan(FOVg / 2) = (1 - offset) / dist => FOVg = atan((1 - offset) / dist) * 2
对水平 FOV(按纵横比修改)重复此操作,使用相同或不同的偏移值。然后根据这些新角度应用现有的缩放以适合逻辑。
这种方法适用于对称偏移。通过计算 4 个单独的新角度,对于不对称偏移可能也是如此。棘手的部分是使用这些计算正确的相机位置和缩放...
我正在尝试提出一个缩放到适合的功能,以确保点列表完全适合绘图区域,同时还在图像的所有边上添加可配置的偏移量。 IE。缩放以适合框架区域而不是整个查看器区域:
(请注意,此图像中的偏移量不准确)
我这里用的是透视相机。该函数必须更新相机位置,而不是它的参数或视图方向。
我找到了一个工作良好的缩放以适合功能*,但我在实现偏移方面遇到了困难。
我的第一种方法只是偏移点坐标(使用相机的坐标系)没有成功。显示了更多图像,但我选择的点并没有出现在该区域的边缘。回想起来这是有道理的,因为透视变形会使点远离它们的预期位置。
任何人都可以提供有关如何正确计算相机距离和位置的可能解决方案吗?
* Three.js 没有缩放到适合的功能,但是网上有很多关于如何实现这个逻辑的示例和问题。这种用例最好的可能是 CameraViewBox. I have adopted their example to my use-case in this fiddle:
import * as THREE from 'https://cdn.skypack.dev/three@0.130.1';
import { OrbitControls } from 'https://cdn.skypack.dev/three@0.130.1/examples/jsm/controls/OrbitControls.js';
let camera, controls, scene, renderer, material;
let isDragging = false;
let cameraViewBox;
const raycaster = new THREE.Raycaster();
const mouse = new THREE.Vector2();
const meshes = [];
const selection = new Set();
const selectedMaterial = new THREE.MeshPhongMaterial({ color: 0xff0000, flatShading: true });
const floorPlane = new THREE.Plane(new THREE.Vector3(0, 1, 0));
init();
animate();
function init() {
scene = new THREE.Scene();
scene.background = new THREE.Color(0xcccccc);
scene.fog = new THREE.FogExp2(0xcccccc, 0.002);
renderer = new THREE.WebGLRenderer({
antialias: true
});
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
camera = new THREE.PerspectiveCamera(60, window.innerWidth / window.innerHeight, 1, 1000);
camera.position.set(400, 200, 0);
// Create the cameraViewBox
cameraViewBox = new THREE.CameraViewBox();
cameraViewBox.setViewFromCamera(camera);
// controls
controls = new OrbitControls(camera, renderer.domElement);
controls.minDistance = 100;
controls.maxDistance = 500;
controls.maxPolarAngle = Math.PI / 2;
// world
const geometry = new THREE.BoxGeometry(1, 1, 1);
geometry.translate(0, 0.5, 0);
material = new THREE.MeshPhongMaterial({
color: 0xffffff,
flatShading: true
});
for (let i = 0; i < 500; i++) {
const mesh = new THREE.Mesh(geometry, material);
mesh.position.x = Math.random() * 1600 - 800;
mesh.position.y = 0;
mesh.position.z = Math.random() * 1600 - 800;
mesh.scale.x = 20;
mesh.scale.y = Math.random() * 80 + 10;
mesh.scale.z = 20;
mesh.updateMatrix();
mesh.matrixAutoUpdate = false;
scene.add(mesh);
meshes.push(mesh);
}
// lights
const dirLight1 = new THREE.DirectionalLight(0xffffff);
dirLight1.position.set(1, 1, 1);
scene.add(dirLight1);
const dirLight2 = new THREE.DirectionalLight(0x002288);
dirLight2.position.set(-1, -1, -1);
scene.add(dirLight2);
const ambientLight = new THREE.AmbientLight(0x222222);
scene.add(ambientLight);
window.addEventListener('resize', onWindowResize);
// Add DOM events
renderer.domElement.addEventListener('mousedown', onMouseDown, false);
window.addEventListener('mousemove', onMouseMove, false);
renderer.domElement.addEventListener('mouseup', onMouseUp, false);
}
function onWindowResize() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
}
function animate() {
requestAnimationFrame(animate);
renderer.render(scene, camera);
}
// Add selection support
function onMouseDown() {
isDragging = false;
}
function onMouseMove() {
isDragging = true;
}
function onMouseUp(event) {
if (isDragging) {
isDragging = false;
return;
} else {
isDragging = false;
}
mouse.x = (event.clientX / window.innerWidth) * 2 - 1;
mouse.y = -(event.clientY / window.innerHeight) * 2 + 1;
raycaster.setFromCamera(mouse, camera);
var intersects = raycaster.intersectObjects(meshes);
if (intersects.length > 0) {
var mesh = intersects[0].object;
if (selection.has(mesh)) {
mesh.material = material;
selection.delete(mesh);
} else {
mesh.material = selectedMaterial;
selection.add(mesh);
}
}
}
function centerOnSelection() {
if (selection.size === 0) {
return;
}
cameraViewBox.setViewFromCamera(camera);
cameraViewBox.setFromObjects(Array.from(selection));
cameraViewBox.getCameraPositionAndTarget(camera.position, controls.target, floorPlane);
controls.update();
}
我现在能够在某种程度上自己解决这个问题。如果我们从对称偏移量开始,这会非常简单:
使用较窄的 FOV 角度(绿色)计算相机位置会使最终图像中的投影点偏移一定量。如果我们找到正确的角度,这些点最终会出现在我们正在寻找的精确偏移处。
我们可以使用基本三角学计算这个角度。我们计算到归一化设备坐标平面的距离(即 height/width 的 -1 到 1;图像中的蓝色)然后应用偏移量(百分比值范围从 0.0 到 1.0)并创建一个新角度:
tan(FOV / 2) = 1 / dist => dist = 1 / tan(FOV / 2)
tan(FOVg / 2) = (1 - offset) / dist => FOVg = atan((1 - offset) / dist) * 2
对水平 FOV(按纵横比修改)重复此操作,使用相同或不同的偏移值。然后根据这些新角度应用现有的缩放以适合逻辑。
这种方法适用于对称偏移。通过计算 4 个单独的新角度,对于不对称偏移可能也是如此。棘手的部分是使用这些计算正确的相机位置和缩放...