Blender 中纹理的实时变化
Real Time Change of Texture in Blender
我是 Blender 的新手,Python 需要实时更改平面的纹理。我有一个 Processing sketch,它不断地从 USB 相机中保存黑白静止图像,这些静止图像将充当 Blender 上平面的置换纹理。
在同一个文件夹中,我有 Processing 草图、Blender 文件和来自 USB 相机的 20 帧,它们会不断更新。我已经成功地在 Processing 和 Blender 之间创建了一个序列link。
我基本上需要纹理文件的名称在保存静止帧时不断从 1 更改为 20。理想情况下,我还可以通过它导出动画序列。但正如我所说,我是一个完全的初学者!
我在网上找到了类似的东西,但我认为这是Python的旧版本,因为它不起作用!
import Blender
from Blender import Image
from Blender import Texture
from Blender import Material
from Blender import Object
from Blender import Window
from Blender import Modifier
from Blender import Scene
import serial
myPort = serial.Serial('COM11', 9600)
texture=Texture.Get('Textura')
texture.setType('Image')
scene=Scene.getCurrent()
plane=Object.Get('Plane')
modifier=plane.modifiers[0]
myPort.flushInput()
for i in range(1, 301):
y = ord(myPort.read(size=1))
name=str(y)+".png"
image=Image.Load(name)
texture.image=image
modifier[Modifier.Settings.TEXTURE] = texture
Window.EditMode(1)
Window.EditMode(0)
Window.Redraw()
我从一个非常有力的假设开始:如果可以取代,您将单独使用 Processing,从而完全不需要 Processing 和 Blender 以及 Blender 之间的串行通信。
(题外话:为了方便进程间通信,我发现 OSC 协议更易于使用。检查 Processing 的 oscP5 库和示例以及您选择的 Python OSC 模块)
通过快速搜索,我发现了 this post,其中包含一个代码示例,说明如何生成被柏林噪声置换的细分平面。这是 Cedric 代码的调整版本:
float noiseScale= .12;
int meshSize = 10;
int resX = 50;
int resY = 50;
float[][] val = new float[resX][resY];
void setup() {
size(900, 600, P3D);
smooth();
background(255);
}
void draw() {
// sample perline noise
noiseScale= mouseX*.0002;
float xoff = 0.0;
for (int x =0; x<resX; x++) {
xoff +=noiseScale;
float yoff = 0.0;
for (int y =0; y<resY; y++) {
yoff +=noiseScale;
val[x][y] = noise(xoff, yoff)*255;
}
}
// render displaced subdivided plane
background(0);
translate(width / 2, height / 2);
rotateY(map(mouseX, 0, width, -PI, PI));
rotateX(map(mouseY, 0, height, PI, -PI));
translate(-resX/2*meshSize, -resY/2*meshSize);
beginShape(QUADS);
colorMode(HSB, 255);
for (int x =0; x<resX-1; x++) {
for (int y =0; y<resY-1; y++) {
fill( val[x][y], 255, 255);
vertex(x*meshSize, y*meshSize, val[x][y] );
vertex((x+1)*meshSize, y*meshSize, val[x+1][y] );
vertex((x+1)*meshSize, (y+1)*meshSize, val[x+1][y+1] );
vertex(x*meshSize, (y+1)*meshSize, val[x][y+1] );
}
}
endShape();
}
在这种情况下,50x50 是网络摄像头图像的大小,val
是等效的像素数据。
调整它以使用来自网络摄像头的像素亮度看起来像这样:
import processing.video.*;
int meshSize = 10;
int resX = 64;
int resY = 48;
Capture webcam;
void setup(){
size(640, 480, P3D);
webcam = new Capture(this, resX, resY);
webcam.start();
}
void draw(){
background(0);
// 2D
image(webcam, 0, 0);
// 3D : isolate coordinate space using push/pop matrix
pushMatrix();
translate(width / 2, height / 2);
rotateY(map(mouseX, 0, width, -PI, PI));
rotateX(map(mouseY, 0, height, PI, -PI));
drawWebcamDisplacement(webcam);
popMatrix();
}
void drawWebcamDisplacement(PImage frame){
// prep. pixels for reading
frame.loadPixels();
// isolate coordinate space
pushMatrix();
translate(-resX/2*meshSize, -resY/2*meshSize);
// start pairing 4 vergices as quads
beginShape(QUADS);
colorMode(HSB, 255);
// for each x,y subdivision
for (int x =0; x < resX-1; x++) {
for (int y =0; y < resY-1; y++) {
// sample 4 neighbouring pixels (current (top left), to the right (TR), bellow (BL) and bellow diagonally (BR)
color pixelTL = frame.pixels[x + (y * resX)];
color pixelTR = frame.pixels[(x + 1) + (y * resX)];
color pixelBR = frame.pixels[(x + 1) + ((y + 1) * resX)];
color pixelBL = frame.pixels[x + ((y + 1) * resX)];
// extract brightness
float brigthnessTL = brightness(pixelTL);
float brigthnessTR = brightness(pixelTR);
float brigthnessBR = brightness(pixelBR);
float brigthnessBL = brightness(pixelBL);
fill( brigthnessTL, 255, 255);
vertex(x*meshSize , y*meshSize , brigthnessTL );
vertex((x+1)*meshSize, y*meshSize , brigthnessTR );
vertex((x+1)*meshSize, (y+1)*meshSize, brigthnessBR );
vertex(x*meshSize , (y+1)*meshSize, brigthnessBL );
}
}
endShape();
// restore coordinate space
popMatrix();
}
void captureEvent(Capture cam){
cam.read();
}
不幸的是,我现在无法访问网络摄像头,因此可能存在错误,但希望注释代码能够说明这个想法。 (仔细检查您的网络摄像头是否真的支持此分辨率/宽高比,如果不调整并尝试使用单独的 PImage(例如 webcam.get()
),您可以 resize()
)。 (HSB 颜色模式是 Cedric 示例中的 optional/lifted。当然,您可以根据需要随意调整)。
(应该可以将置换贴图实现为 GLSL 着色器(在 Processing 中实现为 PShader)但是这可能不是新手友好的方法)
更新
为了提高性能,可以使用 PShape
缓存几何图形,并稍后在 draw()
中简单地更新现有(预分配)顶点。这是上面使用 PShape
:
的修改版本
import processing.video.*;
Capture webcam;
int meshSize = 15;
int resX = 64;
int resY = 48;
PShape grid;
boolean sampleZ = true;
void settings(){
size(640, 480, P3D);
//fullScreen();
}
void setup(){
textAlign(RIGHT);
colorMode(HSB, 255);
webcam = new Capture(this, resX, resY);
webcam.start();
grid = createPShapeGrid(resX, resY, meshSize);
}
void draw(){
// update
if(sampleZ) gridSampleBrightness(grid, webcam);
else gridSamplePixels(grid, webcam);
// render
background(0);
// 2D
//image(webcam, 0, 0);
text((int)frameRate+"fps\npress any key to change sampling\nmode: " + (sampleZ ? "Z" : "pixels")+"\nmouse press for lights", width - 10, 15);
// 3D : isolate coordinate space using push/pop matrix
if(mousePressed) lights();
else noLights();
pushMatrix();
translate(width * 0.5, height * 0.5, -(resX + resY) * 5);
rotateY(map(mouseX, 0, width, -PI, PI));
rotateX(map(mouseY, 0, height, PI, -PI));
pushMatrix();
// offset to centre
translate(-resX * 0.5 * meshSize, -resY * 0.5 * meshSize);
shape(grid);
popMatrix();
popMatrix();
}
void keyPressed(){
sampleZ = !sampleZ;
}
void captureEvent(Capture cam){
cam.read();
}
PShape createPShapeGrid(int resX, int resY, int meshSize){
PShape grid = createShape(PShape.GEOMETRY);
grid.setStroke(false);
for (int x =0; x < resX-1; x++) {
for (int y =0; y < resY-1; y++) {
grid.beginShape(QUAD);
grid.vertex(x*meshSize , y*meshSize , 0 );
grid.vertex((x+1)*meshSize, y*meshSize , 0 );
grid.vertex((x+1)*meshSize, (y+1)*meshSize, 0 );
grid.vertex(x*meshSize , (y+1)*meshSize, 0 );
grid.endShape();
}
}
return grid;
}
// sample pixels, map birghtness to Z, colour from Z (usually HSB)
void gridSampleBrightness(PShape grid, PImage frame){
frame.loadPixels();
// start pairing 4 vertices as quads
// for each x,y subdivision
int vertexIndex = 0;
for (int x =0; x < resX-1; x++) {
for (int y =0; y < resY-1; y++) {
// sample 4 neighbouring pixels (current (top left), to the right (TR), bellow (BL) and bellow diagonally (BR)
color pixelTL = frame.pixels[x + (y * resX)];
color pixelTR = frame.pixels[(x + 1) + (y * resX)];
color pixelBR = frame.pixels[(x + 1) + ((y + 1) * resX)];
color pixelBL = frame.pixels[x + ((y + 1) * resX)];
// extract brightness
float brigthnessTL = brightness(pixelTL);
float brigthnessTR = brightness(pixelTR);
float brigthnessBR = brightness(pixelBR);
float brigthnessBL = brightness(pixelBL);
color brigthnessTLHSB = color(brigthnessTL, 255, 255);
grid.setFill(vertexIndex , brigthnessTLHSB);
grid.setFill(vertexIndex + 1, brigthnessTLHSB);
grid.setFill(vertexIndex + 2, brigthnessTLHSB);
grid.setFill(vertexIndex + 3, brigthnessTLHSB);
grid.setVertex(vertexIndex++,x*meshSize , y*meshSize , brigthnessTL );
grid.setVertex(vertexIndex++,(x+1)*meshSize, y*meshSize , brigthnessTR );
grid.setVertex(vertexIndex++,(x+1)*meshSize, (y+1)*meshSize, brigthnessBR );
grid.setVertex(vertexIndex++,x*meshSize , (y+1)*meshSize, brigthnessBL );
}
}
}
// sample pixels, map birghtness to Z, colour from pixels
void gridSamplePixels(PShape grid, PImage frame){
frame.loadPixels();
// start pairing 4 vertices as quads
// for each x,y subdivision
int vertexIndex = 0;
for (int x =0; x < resX-1; x++) {
for (int y =0; y < resY-1; y++) {
// sample 4 neighbouring pixels (current (top left), to the right (TR), bellow (BL) and bellow diagonally (BR)
color pixelTL = frame.pixels[x + (y * resX)];
color pixelTR = frame.pixels[(x + 1) + (y * resX)];
color pixelBR = frame.pixels[(x + 1) + ((y + 1) * resX)];
color pixelBL = frame.pixels[x + ((y + 1) * resX)];
// extract brightness
float brigthnessTL = brightness(pixelTL);
float brigthnessTR = brightness(pixelTR);
float brigthnessBR = brightness(pixelBR);
float brigthnessBL = brightness(pixelBL);
grid.setFill(vertexIndex , pixelTL);
grid.setFill(vertexIndex + 1, pixelTR);
grid.setFill(vertexIndex + 2, pixelBR);
grid.setFill(vertexIndex + 3, pixelBL);
grid.setVertex(vertexIndex++,x*meshSize , y*meshSize , brigthnessTL );
grid.setVertex(vertexIndex++,(x+1)*meshSize, y*meshSize , brigthnessTR );
grid.setVertex(vertexIndex++,(x+1)*meshSize, (y+1)*meshSize, brigthnessBR );
grid.setVertex(vertexIndex++,x*meshSize , (y+1)*meshSize, brigthnessBL );
}
}
}
使用这种方法时,最好注意网格分辨率:顶点越多,初始化过程越慢。主要的危险是如果 PShape
是在 setup()
中创建和填充的:如果 P3D
花费超过 15 秒来初始化这可能会出现问题。一个解决方法可能是使用布尔标志从 draw()
进行一次函数调用,例如:
if(!isPShapeReady){
setupPShape();
isPShapeReady = true;
}
我是 Blender 的新手,Python 需要实时更改平面的纹理。我有一个 Processing sketch,它不断地从 USB 相机中保存黑白静止图像,这些静止图像将充当 Blender 上平面的置换纹理。
在同一个文件夹中,我有 Processing 草图、Blender 文件和来自 USB 相机的 20 帧,它们会不断更新。我已经成功地在 Processing 和 Blender 之间创建了一个序列link。
我基本上需要纹理文件的名称在保存静止帧时不断从 1 更改为 20。理想情况下,我还可以通过它导出动画序列。但正如我所说,我是一个完全的初学者!
我在网上找到了类似的东西,但我认为这是Python的旧版本,因为它不起作用!
import Blender
from Blender import Image
from Blender import Texture
from Blender import Material
from Blender import Object
from Blender import Window
from Blender import Modifier
from Blender import Scene
import serial
myPort = serial.Serial('COM11', 9600)
texture=Texture.Get('Textura')
texture.setType('Image')
scene=Scene.getCurrent()
plane=Object.Get('Plane')
modifier=plane.modifiers[0]
myPort.flushInput()
for i in range(1, 301):
y = ord(myPort.read(size=1))
name=str(y)+".png"
image=Image.Load(name)
texture.image=image
modifier[Modifier.Settings.TEXTURE] = texture
Window.EditMode(1)
Window.EditMode(0)
Window.Redraw()
我从一个非常有力的假设开始:如果可以取代,您将单独使用 Processing,从而完全不需要 Processing 和 Blender 以及 Blender 之间的串行通信。 (题外话:为了方便进程间通信,我发现 OSC 协议更易于使用。检查 Processing 的 oscP5 库和示例以及您选择的 Python OSC 模块)
通过快速搜索,我发现了 this post,其中包含一个代码示例,说明如何生成被柏林噪声置换的细分平面。这是 Cedric 代码的调整版本:
float noiseScale= .12;
int meshSize = 10;
int resX = 50;
int resY = 50;
float[][] val = new float[resX][resY];
void setup() {
size(900, 600, P3D);
smooth();
background(255);
}
void draw() {
// sample perline noise
noiseScale= mouseX*.0002;
float xoff = 0.0;
for (int x =0; x<resX; x++) {
xoff +=noiseScale;
float yoff = 0.0;
for (int y =0; y<resY; y++) {
yoff +=noiseScale;
val[x][y] = noise(xoff, yoff)*255;
}
}
// render displaced subdivided plane
background(0);
translate(width / 2, height / 2);
rotateY(map(mouseX, 0, width, -PI, PI));
rotateX(map(mouseY, 0, height, PI, -PI));
translate(-resX/2*meshSize, -resY/2*meshSize);
beginShape(QUADS);
colorMode(HSB, 255);
for (int x =0; x<resX-1; x++) {
for (int y =0; y<resY-1; y++) {
fill( val[x][y], 255, 255);
vertex(x*meshSize, y*meshSize, val[x][y] );
vertex((x+1)*meshSize, y*meshSize, val[x+1][y] );
vertex((x+1)*meshSize, (y+1)*meshSize, val[x+1][y+1] );
vertex(x*meshSize, (y+1)*meshSize, val[x][y+1] );
}
}
endShape();
}
在这种情况下,50x50 是网络摄像头图像的大小,val
是等效的像素数据。
调整它以使用来自网络摄像头的像素亮度看起来像这样:
import processing.video.*;
int meshSize = 10;
int resX = 64;
int resY = 48;
Capture webcam;
void setup(){
size(640, 480, P3D);
webcam = new Capture(this, resX, resY);
webcam.start();
}
void draw(){
background(0);
// 2D
image(webcam, 0, 0);
// 3D : isolate coordinate space using push/pop matrix
pushMatrix();
translate(width / 2, height / 2);
rotateY(map(mouseX, 0, width, -PI, PI));
rotateX(map(mouseY, 0, height, PI, -PI));
drawWebcamDisplacement(webcam);
popMatrix();
}
void drawWebcamDisplacement(PImage frame){
// prep. pixels for reading
frame.loadPixels();
// isolate coordinate space
pushMatrix();
translate(-resX/2*meshSize, -resY/2*meshSize);
// start pairing 4 vergices as quads
beginShape(QUADS);
colorMode(HSB, 255);
// for each x,y subdivision
for (int x =0; x < resX-1; x++) {
for (int y =0; y < resY-1; y++) {
// sample 4 neighbouring pixels (current (top left), to the right (TR), bellow (BL) and bellow diagonally (BR)
color pixelTL = frame.pixels[x + (y * resX)];
color pixelTR = frame.pixels[(x + 1) + (y * resX)];
color pixelBR = frame.pixels[(x + 1) + ((y + 1) * resX)];
color pixelBL = frame.pixels[x + ((y + 1) * resX)];
// extract brightness
float brigthnessTL = brightness(pixelTL);
float brigthnessTR = brightness(pixelTR);
float brigthnessBR = brightness(pixelBR);
float brigthnessBL = brightness(pixelBL);
fill( brigthnessTL, 255, 255);
vertex(x*meshSize , y*meshSize , brigthnessTL );
vertex((x+1)*meshSize, y*meshSize , brigthnessTR );
vertex((x+1)*meshSize, (y+1)*meshSize, brigthnessBR );
vertex(x*meshSize , (y+1)*meshSize, brigthnessBL );
}
}
endShape();
// restore coordinate space
popMatrix();
}
void captureEvent(Capture cam){
cam.read();
}
不幸的是,我现在无法访问网络摄像头,因此可能存在错误,但希望注释代码能够说明这个想法。 (仔细检查您的网络摄像头是否真的支持此分辨率/宽高比,如果不调整并尝试使用单独的 PImage(例如 webcam.get()
),您可以 resize()
)。 (HSB 颜色模式是 Cedric 示例中的 optional/lifted。当然,您可以根据需要随意调整)。
(应该可以将置换贴图实现为 GLSL 着色器(在 Processing 中实现为 PShader)但是这可能不是新手友好的方法)
更新
为了提高性能,可以使用 PShape
缓存几何图形,并稍后在 draw()
中简单地更新现有(预分配)顶点。这是上面使用 PShape
:
import processing.video.*;
Capture webcam;
int meshSize = 15;
int resX = 64;
int resY = 48;
PShape grid;
boolean sampleZ = true;
void settings(){
size(640, 480, P3D);
//fullScreen();
}
void setup(){
textAlign(RIGHT);
colorMode(HSB, 255);
webcam = new Capture(this, resX, resY);
webcam.start();
grid = createPShapeGrid(resX, resY, meshSize);
}
void draw(){
// update
if(sampleZ) gridSampleBrightness(grid, webcam);
else gridSamplePixels(grid, webcam);
// render
background(0);
// 2D
//image(webcam, 0, 0);
text((int)frameRate+"fps\npress any key to change sampling\nmode: " + (sampleZ ? "Z" : "pixels")+"\nmouse press for lights", width - 10, 15);
// 3D : isolate coordinate space using push/pop matrix
if(mousePressed) lights();
else noLights();
pushMatrix();
translate(width * 0.5, height * 0.5, -(resX + resY) * 5);
rotateY(map(mouseX, 0, width, -PI, PI));
rotateX(map(mouseY, 0, height, PI, -PI));
pushMatrix();
// offset to centre
translate(-resX * 0.5 * meshSize, -resY * 0.5 * meshSize);
shape(grid);
popMatrix();
popMatrix();
}
void keyPressed(){
sampleZ = !sampleZ;
}
void captureEvent(Capture cam){
cam.read();
}
PShape createPShapeGrid(int resX, int resY, int meshSize){
PShape grid = createShape(PShape.GEOMETRY);
grid.setStroke(false);
for (int x =0; x < resX-1; x++) {
for (int y =0; y < resY-1; y++) {
grid.beginShape(QUAD);
grid.vertex(x*meshSize , y*meshSize , 0 );
grid.vertex((x+1)*meshSize, y*meshSize , 0 );
grid.vertex((x+1)*meshSize, (y+1)*meshSize, 0 );
grid.vertex(x*meshSize , (y+1)*meshSize, 0 );
grid.endShape();
}
}
return grid;
}
// sample pixels, map birghtness to Z, colour from Z (usually HSB)
void gridSampleBrightness(PShape grid, PImage frame){
frame.loadPixels();
// start pairing 4 vertices as quads
// for each x,y subdivision
int vertexIndex = 0;
for (int x =0; x < resX-1; x++) {
for (int y =0; y < resY-1; y++) {
// sample 4 neighbouring pixels (current (top left), to the right (TR), bellow (BL) and bellow diagonally (BR)
color pixelTL = frame.pixels[x + (y * resX)];
color pixelTR = frame.pixels[(x + 1) + (y * resX)];
color pixelBR = frame.pixels[(x + 1) + ((y + 1) * resX)];
color pixelBL = frame.pixels[x + ((y + 1) * resX)];
// extract brightness
float brigthnessTL = brightness(pixelTL);
float brigthnessTR = brightness(pixelTR);
float brigthnessBR = brightness(pixelBR);
float brigthnessBL = brightness(pixelBL);
color brigthnessTLHSB = color(brigthnessTL, 255, 255);
grid.setFill(vertexIndex , brigthnessTLHSB);
grid.setFill(vertexIndex + 1, brigthnessTLHSB);
grid.setFill(vertexIndex + 2, brigthnessTLHSB);
grid.setFill(vertexIndex + 3, brigthnessTLHSB);
grid.setVertex(vertexIndex++,x*meshSize , y*meshSize , brigthnessTL );
grid.setVertex(vertexIndex++,(x+1)*meshSize, y*meshSize , brigthnessTR );
grid.setVertex(vertexIndex++,(x+1)*meshSize, (y+1)*meshSize, brigthnessBR );
grid.setVertex(vertexIndex++,x*meshSize , (y+1)*meshSize, brigthnessBL );
}
}
}
// sample pixels, map birghtness to Z, colour from pixels
void gridSamplePixels(PShape grid, PImage frame){
frame.loadPixels();
// start pairing 4 vertices as quads
// for each x,y subdivision
int vertexIndex = 0;
for (int x =0; x < resX-1; x++) {
for (int y =0; y < resY-1; y++) {
// sample 4 neighbouring pixels (current (top left), to the right (TR), bellow (BL) and bellow diagonally (BR)
color pixelTL = frame.pixels[x + (y * resX)];
color pixelTR = frame.pixels[(x + 1) + (y * resX)];
color pixelBR = frame.pixels[(x + 1) + ((y + 1) * resX)];
color pixelBL = frame.pixels[x + ((y + 1) * resX)];
// extract brightness
float brigthnessTL = brightness(pixelTL);
float brigthnessTR = brightness(pixelTR);
float brigthnessBR = brightness(pixelBR);
float brigthnessBL = brightness(pixelBL);
grid.setFill(vertexIndex , pixelTL);
grid.setFill(vertexIndex + 1, pixelTR);
grid.setFill(vertexIndex + 2, pixelBR);
grid.setFill(vertexIndex + 3, pixelBL);
grid.setVertex(vertexIndex++,x*meshSize , y*meshSize , brigthnessTL );
grid.setVertex(vertexIndex++,(x+1)*meshSize, y*meshSize , brigthnessTR );
grid.setVertex(vertexIndex++,(x+1)*meshSize, (y+1)*meshSize, brigthnessBR );
grid.setVertex(vertexIndex++,x*meshSize , (y+1)*meshSize, brigthnessBL );
}
}
}
使用这种方法时,最好注意网格分辨率:顶点越多,初始化过程越慢。主要的危险是如果 PShape
是在 setup()
中创建和填充的:如果 P3D
花费超过 15 秒来初始化这可能会出现问题。一个解决方法可能是使用布尔标志从 draw()
进行一次函数调用,例如:
if(!isPShapeReady){
setupPShape();
isPShapeReady = true;
}