Search code examples
pythonprocessingblenderreal-time-data

Real Time Change of Texture in Blender


I'm a complete newbie in Blender and Python and need to change a texture of a plane in real time. I have a Processing sketch which constantly saves b&w stills from a USB camera, which are to act as displacement textures to a plane on Blender.

In the same folder I have the Processing sketch, the Blender File, and 20 frames from the USB camera which constantly update. I've managed to create a serial link between Processing and Blender.

I basically need the name of the texture file to change constantly from 1 to 20 as the still frames are being saved. Ideally i can also export an animation sequence through this. But as I said, I am a complete beginner!

I have found something similar online, but I think this is an older version of Python as it doesn't work!

import Blender
from Blender import Image
from Blender import Texture
from Blender import Material
from Blender import Object
from Blender import Window
from Blender import Modifier
from Blender import Scene
import serial
myPort = serial.Serial('COM11', 9600)

texture=Texture.Get('Textura')
texture.setType('Image')
scene=Scene.getCurrent()
plane=Object.Get('Plane')
modifier=plane.modifiers[0] 
myPort.flushInput()

for i in range(1, 301):
    y = ord(myPort.read(size=1))
    name=str(y)+".png"
    image=Image.Load(name)
    texture.image=image
    modifier[Modifier.Settings.TEXTURE] = texture
    Window.EditMode(1)
    Window.EditMode(0)
    Window.Redraw()

Solution

  • I'm starting with a pretty strong assumption: that you would go with Processing alone if you could displace, removing the need for serial communication between Processing and Blender and Blender altogether. (Off topic: for easy interprocess communication I find the OSC protocol easier to use. Checkout Processing's oscP5 library and examples and a Python OSC module of your choice)

    Doing a quick search I spotted this post that includes a code example of how to generate a subdivided plane that is displaced by perlin noise. Here's a tweaked version of Cedric's code:

    float noiseScale= .12;
    
    
    int meshSize = 10;
    int resX = 50;
    int resY = 50;
    float[][] val = new float[resX][resY];
    
    void setup() {
      size(900, 600, P3D);
      smooth();
      background(255);
    }
    
    void draw() {
      // sample perline noise
      noiseScale= mouseX*.0002;
    
      float xoff = 0.0;
      for (int x =0; x<resX; x++) {
        xoff +=noiseScale;
        float yoff = 0.0;
        for (int y =0; y<resY; y++) {
    
          yoff +=noiseScale;
    
          val[x][y] = noise(xoff, yoff)*255;
        }
      }
      // render displaced subdivided plane
      background(0);
      translate(width / 2, height / 2);
      rotateY(map(mouseX, 0, width, -PI, PI));
      rotateX(map(mouseY, 0, height, PI, -PI));
      translate(-resX/2*meshSize, -resY/2*meshSize);
      
      beginShape(QUADS);
      colorMode(HSB, 255);
    
      for (int x =0; x<resX-1; x++) {
        for (int y =0; y<resY-1; y++) {
          fill( val[x][y], 255, 255);
    
          vertex(x*meshSize, y*meshSize, val[x][y] );
          vertex((x+1)*meshSize, y*meshSize, val[x+1][y] );
          vertex((x+1)*meshSize, (y+1)*meshSize, val[x+1][y+1] );
          vertex(x*meshSize, (y+1)*meshSize, val[x][y+1] );
        }
      }
    
      endShape();
    
    
    }
    

    In this case 50x50 would be the size of the webcam image and val would be the pixel data equivalent.

    Tweaking that to use the brightness of pixels from a webcam would look something like this:

    import processing.video.*;
    
    int meshSize = 10;
    int resX = 64;
    int resY = 48;
    
    
    Capture webcam;
    
    void setup(){
     size(640, 480, P3D); 
     
     webcam = new Capture(this, resX, resY);
     webcam.start();
    }
    
    void draw(){
      background(0);
      // 2D
      image(webcam, 0, 0);
      // 3D : isolate coordinate space using push/pop matrix
      pushMatrix();
      translate(width / 2, height / 2);
      rotateY(map(mouseX, 0, width, -PI, PI));
      rotateX(map(mouseY, 0, height, PI, -PI));
      
      drawWebcamDisplacement(webcam);
      popMatrix();
    }
    
    void drawWebcamDisplacement(PImage frame){
      // prep. pixels for reading
      frame.loadPixels();
      // isolate coordinate space
      pushMatrix();
      translate(-resX/2*meshSize, -resY/2*meshSize);
      
      // start pairing 4 vergices as quads
      beginShape(QUADS);
      colorMode(HSB, 255);
      // for each  x,y subdivision
      for (int x =0; x < resX-1; x++) {
        for (int y =0; y < resY-1; y++) {
          // sample 4 neighbouring pixels (current (top left), to the right (TR), bellow (BL) and bellow diagonally (BR)
          color pixelTL = frame.pixels[x + (y * resX)];
          color pixelTR = frame.pixels[(x + 1) + (y * resX)];
          color pixelBR = frame.pixels[(x + 1) + ((y + 1) * resX)];
          color pixelBL = frame.pixels[x + ((y + 1) * resX)];
          // extract brightness
          float brigthnessTL = brightness(pixelTL);
          float brigthnessTR = brightness(pixelTR);
          float brigthnessBR = brightness(pixelBR);
          float brigthnessBL = brightness(pixelBL);
          
          
          fill( brigthnessTL, 255, 255);
    
          vertex(x*meshSize    , y*meshSize    , brigthnessTL );
          vertex((x+1)*meshSize, y*meshSize    , brigthnessTR );
          vertex((x+1)*meshSize, (y+1)*meshSize, brigthnessBR );
          vertex(x*meshSize    , (y+1)*meshSize, brigthnessBL );
        }
      }
    
      endShape();
      // restore coordinate space
      popMatrix();
    }
    
    void captureEvent(Capture cam){
      cam.read(); 
    }
    

    Unfortunately I can't access a webcam right now, so there might be bugs but hopefully the commented code illustrates the idea. (Double check if your webcam actually supports this resolution / aspect ration, if not adjust and try to use a separate PImage (e.g. webcam.get()) you could then resize()). (The HSB colour mode is optional/lifted from Cedric's example. Feel free to adjust as needed of course).

    (It should be possible to implement the displacement map as a GLSL shader (implemented in Processing as PShader) however this might not be a newbie friendly approach)

    Update

    For improved performance it's possible to cache the geometry using PShape and simply update existing (pre-allocated) vertices later in draw(). Here's a modified version of the above using PShape:

    import processing.video.*;
    
    Capture webcam;
    
    int meshSize = 15;
    int resX = 64;
    int resY = 48;
    PShape grid;
    
    boolean sampleZ = true;
    
    void settings(){
     size(640, 480, P3D);
     //fullScreen();
    }
    
    void setup(){
     textAlign(RIGHT);
     colorMode(HSB, 255);
     
     webcam = new Capture(this, resX, resY);
     webcam.start();
     
     grid = createPShapeGrid(resX, resY, meshSize);
    }
    
    void draw(){
      // update
      if(sampleZ) gridSampleBrightness(grid, webcam);
      else        gridSamplePixels(grid, webcam);
      // render
      background(0);
      // 2D
      //image(webcam, 0, 0);
      text((int)frameRate+"fps\npress any key to change sampling\nmode: " + (sampleZ ? "Z" : "pixels")+"\nmouse press for lights", width - 10, 15);
      // 3D : isolate coordinate space using push/pop matrix
      if(mousePressed) lights();
      else noLights();
      pushMatrix();
        translate(width * 0.5, height * 0.5, -(resX + resY) * 5);
        rotateY(map(mouseX, 0, width, -PI,  PI));
        rotateX(map(mouseY, 0, height, PI, -PI));
        pushMatrix();
          // offset to centre
          translate(-resX * 0.5 * meshSize, -resY * 0.5 * meshSize);
          shape(grid);
        popMatrix();
      popMatrix();
    }
    
    void keyPressed(){
      sampleZ = !sampleZ;
    }
    
    void captureEvent(Capture cam){
      cam.read(); 
    }
    
    PShape createPShapeGrid(int resX, int resY, int meshSize){
      PShape grid = createShape(PShape.GEOMETRY);
      
      grid.setStroke(false);
      
      for (int x =0; x < resX-1; x++) {
        for (int y =0; y < resY-1; y++) {
          grid.beginShape(QUAD);
          grid.vertex(x*meshSize    , y*meshSize    , 0 );
          grid.vertex((x+1)*meshSize, y*meshSize    , 0 );
          grid.vertex((x+1)*meshSize, (y+1)*meshSize, 0 );
          grid.vertex(x*meshSize    , (y+1)*meshSize, 0 );
          grid.endShape();
        }
      }
      
      return grid;
    }
    
    // sample pixels, map birghtness to Z, colour from Z (usually HSB)
    void gridSampleBrightness(PShape grid, PImage frame){
      frame.loadPixels();
      // start pairing 4 vertices as quads
      // for each  x,y subdivision
      int vertexIndex = 0;
      for (int x =0; x < resX-1; x++) {
        for (int y =0; y < resY-1; y++) {
          // sample 4 neighbouring pixels (current (top left), to the right (TR), bellow (BL) and bellow diagonally (BR)
          color pixelTL = frame.pixels[x + (y * resX)];
          color pixelTR = frame.pixels[(x + 1) + (y * resX)];
          color pixelBR = frame.pixels[(x + 1) + ((y + 1) * resX)];
          color pixelBL = frame.pixels[x + ((y + 1) * resX)];
          // extract brightness
          float brigthnessTL = brightness(pixelTL);
          float brigthnessTR = brightness(pixelTR);
          float brigthnessBR = brightness(pixelBR);
          float brigthnessBL = brightness(pixelBL);
          
          color brigthnessTLHSB = color(brigthnessTL, 255, 255);
          
          grid.setFill(vertexIndex    , brigthnessTLHSB);
          grid.setFill(vertexIndex + 1, brigthnessTLHSB);
          grid.setFill(vertexIndex + 2, brigthnessTLHSB);
          grid.setFill(vertexIndex + 3, brigthnessTLHSB);
          
          grid.setVertex(vertexIndex++,x*meshSize    , y*meshSize    , brigthnessTL );
          grid.setVertex(vertexIndex++,(x+1)*meshSize, y*meshSize    , brigthnessTR );
          grid.setVertex(vertexIndex++,(x+1)*meshSize, (y+1)*meshSize, brigthnessBR );
          grid.setVertex(vertexIndex++,x*meshSize    , (y+1)*meshSize, brigthnessBL );
        }
      }
    }
    
    // sample pixels, map birghtness to Z, colour from pixels
    void gridSamplePixels(PShape grid, PImage frame){
      frame.loadPixels();
      // start pairing 4 vertices as quads
      // for each  x,y subdivision
      int vertexIndex = 0;
      for (int x =0; x < resX-1; x++) {
        for (int y =0; y < resY-1; y++) {
          // sample 4 neighbouring pixels (current (top left), to the right (TR), bellow (BL) and bellow diagonally (BR)
          color pixelTL = frame.pixels[x + (y * resX)];
          color pixelTR = frame.pixels[(x + 1) + (y * resX)];
          color pixelBR = frame.pixels[(x + 1) + ((y + 1) * resX)];
          color pixelBL = frame.pixels[x + ((y + 1) * resX)];
          // extract brightness
          float brigthnessTL = brightness(pixelTL);
          float brigthnessTR = brightness(pixelTR);
          float brigthnessBR = brightness(pixelBR);
          float brigthnessBL = brightness(pixelBL);
          
          grid.setFill(vertexIndex    , pixelTL);
          grid.setFill(vertexIndex + 1, pixelTR);
          grid.setFill(vertexIndex + 2, pixelBR);
          grid.setFill(vertexIndex + 3, pixelBL);
          
          grid.setVertex(vertexIndex++,x*meshSize    , y*meshSize    , brigthnessTL );
          grid.setVertex(vertexIndex++,(x+1)*meshSize, y*meshSize    , brigthnessTR );
          grid.setVertex(vertexIndex++,(x+1)*meshSize, (y+1)*meshSize, brigthnessBR );
          grid.setVertex(vertexIndex++,x*meshSize    , (y+1)*meshSize, brigthnessBL );
        }
      }
    }
    

    3D quad grid sampling (webcam) pixel brightness to offset Z positions

    With this approach it's good to be mindful of the mesh resolution: the more vertices the slower it the initialisation process. The main danger is if the PShape is created and populated in setup(): if P3D takes more than 15s to initialize this may become problematic. A workaround for this might be to use a boolean flag to make a function call only once from draw(), for example:

    if(!isPShapeReady){
      setupPShape();
      isPShapeReady = true;
    }