在 WebGL / Threejs 中的变形矩形内用正确的 UV 剪辑纹理
Clip a texture with correct UVs inside a deformed rectangle in WebGL / Threejs
嘿,我正在尝试在这样的框架效果中制作裁剪纹理。这个想法是框架可以自由包裹和变换,同时纹理保持正确采样,因此我指的是 "portal" 或 "frame" 效果:
这是我的代码片段以及相关的 JS 和着色器代码:
const photoUrl = 'https://images.unsplash.com/photo-1583287916880-530d57049c66?ixlib=rb-1.2.1&q=80&fm=jpg&crop=entropy&cs=tinysrgb&w=400&fit=max&ixid=eyJhcHBfaWQiOjE0NTg5fQ'
const appWidth = innerWidth
const appHeight = innerHeight
const renderer = new THREE.WebGLRenderer()
const camera = new THREE.OrthographicCamera(appWidth / - 2, appWidth / 2, appHeight / 2, appHeight / - 2, 1, 1000)
const scene = new THREE.Scene()
camera.position.set(0, 0, 50)
camera.lookAt(new THREE.Vector3(0, 0, 0))
renderer.setClearColor(0xAAAAAA)
renderer.setPixelRatio(window.devicePixelRatio || 1)
renderer.setSize(appWidth, appHeight)
document.body.appendChild(renderer.domElement)
const vertexShader = `
varying vec2 v_uv;
void main () {
vec3 newPosition = position;
float dist = distance(position, vec3(0.0));
newPosition.x += sin(3.0) * dist * 3.0;
// newPosition.y += cos(0.1) * dist * 0.2;
gl_Position = projectionMatrix * modelViewMatrix * vec4(newPosition, 1.0);
v_uv = uv;
// I guess do some magic with v_uv to inverse transform it based on the vertices transformation??
}
`
const fragmentShader = `
uniform sampler2D u_sampler;
uniform vec2 u_imageSize;
varying vec2 v_uv;
void main () {
vec2 s = vec2(250.0, 400.0);
vec2 i = u_imageSize;
float rs = s.x / s.y;
float ri = i.x / i.y;
vec2 new = rs < ri ? vec2(i.x * s.y / i.y, s.y) : vec2(s.x, i.y * s.x / i.x);
vec2 offset = (rs < ri ? vec2((new.x - s.x) / 2.0, 0.0) : vec2(0.0, (new.y - s.y) / 2.0)) / new;
vec2 uv = v_uv * s / new + offset;
// sample texture like css background-size: cover
// OR if the texture is the same size as plane, apply some matrix transform to UVs so there is extra padding and you dont see the texture corners when the plane is deformed at maximum?
gl_FragColor = texture2D(u_sampler, uv);
}
`
const mesh = new THREE.Mesh(
new THREE.PlaneGeometry(250, 400, 20, 20),
new THREE.ShaderMaterial({
uniforms: {
u_sampler: { value: null },
u_imageSize: { value: new THREE.Vector2(0, 0) },
},
vertexShader,
fragmentShader,
})
)
scene.add(mesh)
new THREE.TextureLoader().load(photoUrl, texture => {
mesh.material.uniforms.u_sampler.value = texture
mesh.material.uniforms.u_imageSize.value.x = texture.image.naturalWidth
mesh.material.uniforms.u_imageSize.value.y = texture.image.naturalHeight
mesh.material.needsUpdate = true
})
updateFrame()
function updateFrame () {
renderer.render(scene, camera)
requestAnimationFrame(updateFrame)
}
* { margin: 0; padding: 0; }
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/110/three.min.js"></script>
正如预期的那样,这会生成一个同时包含顶点位置和 UV 的平面。我正在努力思考其背后的数学和操作顺序。提前致谢!
P.S。此外,如果我使用的纹理在像素大小方面与我的平面完全相同(我使用正交相机),我想我需要放大它以扩展到平面边界之外,所以当平面转换时你看不到它边缘/出血?应该怎么做?
vUv 的原始公式为
v_uv.x = position.x/width+0.5;
因为你改变了立场。现在变成了,
v_uv.x = newPosition.x/width+0.5;
const photoUrl = 'https://images.unsplash.com/photo-1583287916880-530d57049c66?ixlib=rb-1.2.1&q=80&fm=jpg&crop=entropy&cs=tinysrgb&w=400&fit=max&ixid=eyJhcHBfaWQiOjE0NTg5fQ'
const appWidth = innerWidth
const appHeight = innerHeight
const renderer = new THREE.WebGLRenderer()
const camera = new THREE.OrthographicCamera(appWidth / - 2, appWidth / 2, appHeight / 2, appHeight / - 2, 1, 1000)
const scene = new THREE.Scene()
camera.position.set(0, 0, 50)
camera.lookAt(new THREE.Vector3(0, 0, 0))
renderer.setClearColor(0xAAAAAA)
renderer.setPixelRatio(window.devicePixelRatio || 1)
renderer.setSize(appWidth, appHeight)
document.body.appendChild(renderer.domElement)
const vertexShader = `
varying vec2 v_uv;
uniform vec2 u_imageSize;
void main () {
vec3 newPosition = position;
float dist = distance(position, vec3(0.0));
newPosition.x += sin(3.0) * dist * 3.0;
newPosition.y += cos(0.1) * dist * 0.2;
gl_Position = projectionMatrix * modelViewMatrix * vec4(newPosition, 1.0);
v_uv = uv;
//v_uv.x = position.x/250.0+0.5;
//v_uv.y = position.y/400.0+0.5;
v_uv.x = newPosition.x/250.0+0.5;
v_uv.y = newPosition.y/400.0+0.5;
}
`
const fragmentShader = `
uniform sampler2D u_sampler;
uniform vec2 u_imageSize;
varying vec2 v_uv;
void main () {
vec2 s = vec2(250.0, 400.0);
vec2 i = u_imageSize;
float rs = s.x / s.y;
float ri = i.x / i.y;
vec2 new = rs < ri ? vec2(i.x * s.y / i.y, s.y) : vec2(s.x, i.y * s.x / i.x);
vec2 offset = (rs < ri ? vec2((new.x - s.x) / 2.0, 0.0) : vec2(0.0, (new.y - s.y) / 2.0)) / new;
vec2 uv = v_uv * s / new + offset;
// sample texture like css background-size: cover
// OR if the texture is the same size as plane, apply some matrix transform to UVs so there is extra padding and you dont see the texture corners when the plane is deformed at maximum?
gl_FragColor = texture2D(u_sampler, uv);
}
`
const mesh = new THREE.Mesh(
new THREE.PlaneGeometry(250, 400, 20, 20),
new THREE.ShaderMaterial({
uniforms: {
u_sampler: { value: null },
u_imageSize: { value: new THREE.Vector2(0, 0) },
},
vertexShader,
fragmentShader,
})
)
scene.add(mesh)
new THREE.TextureLoader().load(photoUrl, texture => {
mesh.material.uniforms.u_sampler.value = texture
mesh.material.uniforms.u_imageSize.value.x = texture.image.naturalWidth
mesh.material.uniforms.u_imageSize.value.y = texture.image.naturalHeight
mesh.material.needsUpdate = true
})
updateFrame()
function updateFrame () {
renderer.render(scene, camera)
requestAnimationFrame(updateFrame)
}
* { margin: 0; padding: 0; }
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/110/three.min.js"></script>
嘿,我正在尝试在这样的框架效果中制作裁剪纹理。这个想法是框架可以自由包裹和变换,同时纹理保持正确采样,因此我指的是 "portal" 或 "frame" 效果:
这是我的代码片段以及相关的 JS 和着色器代码:
const photoUrl = 'https://images.unsplash.com/photo-1583287916880-530d57049c66?ixlib=rb-1.2.1&q=80&fm=jpg&crop=entropy&cs=tinysrgb&w=400&fit=max&ixid=eyJhcHBfaWQiOjE0NTg5fQ'
const appWidth = innerWidth
const appHeight = innerHeight
const renderer = new THREE.WebGLRenderer()
const camera = new THREE.OrthographicCamera(appWidth / - 2, appWidth / 2, appHeight / 2, appHeight / - 2, 1, 1000)
const scene = new THREE.Scene()
camera.position.set(0, 0, 50)
camera.lookAt(new THREE.Vector3(0, 0, 0))
renderer.setClearColor(0xAAAAAA)
renderer.setPixelRatio(window.devicePixelRatio || 1)
renderer.setSize(appWidth, appHeight)
document.body.appendChild(renderer.domElement)
const vertexShader = `
varying vec2 v_uv;
void main () {
vec3 newPosition = position;
float dist = distance(position, vec3(0.0));
newPosition.x += sin(3.0) * dist * 3.0;
// newPosition.y += cos(0.1) * dist * 0.2;
gl_Position = projectionMatrix * modelViewMatrix * vec4(newPosition, 1.0);
v_uv = uv;
// I guess do some magic with v_uv to inverse transform it based on the vertices transformation??
}
`
const fragmentShader = `
uniform sampler2D u_sampler;
uniform vec2 u_imageSize;
varying vec2 v_uv;
void main () {
vec2 s = vec2(250.0, 400.0);
vec2 i = u_imageSize;
float rs = s.x / s.y;
float ri = i.x / i.y;
vec2 new = rs < ri ? vec2(i.x * s.y / i.y, s.y) : vec2(s.x, i.y * s.x / i.x);
vec2 offset = (rs < ri ? vec2((new.x - s.x) / 2.0, 0.0) : vec2(0.0, (new.y - s.y) / 2.0)) / new;
vec2 uv = v_uv * s / new + offset;
// sample texture like css background-size: cover
// OR if the texture is the same size as plane, apply some matrix transform to UVs so there is extra padding and you dont see the texture corners when the plane is deformed at maximum?
gl_FragColor = texture2D(u_sampler, uv);
}
`
const mesh = new THREE.Mesh(
new THREE.PlaneGeometry(250, 400, 20, 20),
new THREE.ShaderMaterial({
uniforms: {
u_sampler: { value: null },
u_imageSize: { value: new THREE.Vector2(0, 0) },
},
vertexShader,
fragmentShader,
})
)
scene.add(mesh)
new THREE.TextureLoader().load(photoUrl, texture => {
mesh.material.uniforms.u_sampler.value = texture
mesh.material.uniforms.u_imageSize.value.x = texture.image.naturalWidth
mesh.material.uniforms.u_imageSize.value.y = texture.image.naturalHeight
mesh.material.needsUpdate = true
})
updateFrame()
function updateFrame () {
renderer.render(scene, camera)
requestAnimationFrame(updateFrame)
}
* { margin: 0; padding: 0; }
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/110/three.min.js"></script>
正如预期的那样,这会生成一个同时包含顶点位置和 UV 的平面。我正在努力思考其背后的数学和操作顺序。提前致谢!
P.S。此外,如果我使用的纹理在像素大小方面与我的平面完全相同(我使用正交相机),我想我需要放大它以扩展到平面边界之外,所以当平面转换时你看不到它边缘/出血?应该怎么做?
vUv 的原始公式为
v_uv.x = position.x/width+0.5;
因为你改变了立场。现在变成了,
v_uv.x = newPosition.x/width+0.5;
const photoUrl = 'https://images.unsplash.com/photo-1583287916880-530d57049c66?ixlib=rb-1.2.1&q=80&fm=jpg&crop=entropy&cs=tinysrgb&w=400&fit=max&ixid=eyJhcHBfaWQiOjE0NTg5fQ'
const appWidth = innerWidth
const appHeight = innerHeight
const renderer = new THREE.WebGLRenderer()
const camera = new THREE.OrthographicCamera(appWidth / - 2, appWidth / 2, appHeight / 2, appHeight / - 2, 1, 1000)
const scene = new THREE.Scene()
camera.position.set(0, 0, 50)
camera.lookAt(new THREE.Vector3(0, 0, 0))
renderer.setClearColor(0xAAAAAA)
renderer.setPixelRatio(window.devicePixelRatio || 1)
renderer.setSize(appWidth, appHeight)
document.body.appendChild(renderer.domElement)
const vertexShader = `
varying vec2 v_uv;
uniform vec2 u_imageSize;
void main () {
vec3 newPosition = position;
float dist = distance(position, vec3(0.0));
newPosition.x += sin(3.0) * dist * 3.0;
newPosition.y += cos(0.1) * dist * 0.2;
gl_Position = projectionMatrix * modelViewMatrix * vec4(newPosition, 1.0);
v_uv = uv;
//v_uv.x = position.x/250.0+0.5;
//v_uv.y = position.y/400.0+0.5;
v_uv.x = newPosition.x/250.0+0.5;
v_uv.y = newPosition.y/400.0+0.5;
}
`
const fragmentShader = `
uniform sampler2D u_sampler;
uniform vec2 u_imageSize;
varying vec2 v_uv;
void main () {
vec2 s = vec2(250.0, 400.0);
vec2 i = u_imageSize;
float rs = s.x / s.y;
float ri = i.x / i.y;
vec2 new = rs < ri ? vec2(i.x * s.y / i.y, s.y) : vec2(s.x, i.y * s.x / i.x);
vec2 offset = (rs < ri ? vec2((new.x - s.x) / 2.0, 0.0) : vec2(0.0, (new.y - s.y) / 2.0)) / new;
vec2 uv = v_uv * s / new + offset;
// sample texture like css background-size: cover
// OR if the texture is the same size as plane, apply some matrix transform to UVs so there is extra padding and you dont see the texture corners when the plane is deformed at maximum?
gl_FragColor = texture2D(u_sampler, uv);
}
`
const mesh = new THREE.Mesh(
new THREE.PlaneGeometry(250, 400, 20, 20),
new THREE.ShaderMaterial({
uniforms: {
u_sampler: { value: null },
u_imageSize: { value: new THREE.Vector2(0, 0) },
},
vertexShader,
fragmentShader,
})
)
scene.add(mesh)
new THREE.TextureLoader().load(photoUrl, texture => {
mesh.material.uniforms.u_sampler.value = texture
mesh.material.uniforms.u_imageSize.value.x = texture.image.naturalWidth
mesh.material.uniforms.u_imageSize.value.y = texture.image.naturalHeight
mesh.material.needsUpdate = true
})
updateFrame()
function updateFrame () {
renderer.render(scene, camera)
requestAnimationFrame(updateFrame)
}
* { margin: 0; padding: 0; }
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/110/three.min.js"></script>