SimpleOpenNI és una llibreria per Processing que permet usar la Kinect des de Processing.
Segueix l'enllaç:
Aquest programa (que es la modificació d'un dels exemples de SimpleOpenNI), es un prototipus per scanejar en color una imatge en 3D .
import processing.opengl.*;
import SimpleOpenNI.*;
SimpleOpenNI context;
float z =0.3f;
float ox = radians(180);
float oy = radians(0);
float rotx = radians(180), roty = radians(0), rotz = 0, incX = 0, incY = 0, incZ = 0;
final float RATE = 0.01; // Velocitat de rotació
int despX=14, despY=56; //24, 48 -22 +56
float VE=0.001;
PImage rgbTrans = new PImage(640,480);
void setup(){
size(1024,768,OPENGL);
context = new SimpleOpenNI(this);
context.setMirror(false);
context.enableDepth();
context.enableRGB();
smooth();
perspective(95, float(width)/float(height), 10, 150000);
//context.update();
}
void draw(){
int[] depthMap = context.depthMap();
int steps = 2; // to speed up the drawing, draw every third point
int index;
int i;
float x1,x2,x3,x4,y1,y2,y3,y4,z1,z2,z3,z4;
PVector realWorldPoint, projectivePoint = new PVector();
background(0,0,0); translate(width/2, height/2, 0); rotateX(rotx); rotateY(roty); scale(z);
translate(0,0,-1000); // set the rotation center of the scene 1000 infront of the camera
println(frameRate);
context.update();
rgbTrans.copy(context.rgbImage(),
despX,despY, context.rgbImage().width, context.rgbImage().height,
0, 0, context.rgbImage().width, context.rgbImage().height);
for(int y=0;y < context.depthHeight()-steps;y+=steps){
for(int x=0;x < context.depthWidth()-steps;x+=steps) {
beginShape(QUADS);
index = x + y * context.depthWidth();
if(depthMap[index] > 0){
fill(rgbTrans.pixels[index]); noStroke();
i = index; realWorldPoint = context.depthMapRealWorld()[i];
x1 = realWorldPoint.x; y1 = realWorldPoint.y; z1 = realWorldPoint.z;
i = index+steps; realWorldPoint = context.depthMapRealWorld()[i];
x2 = realWorldPoint.x; y2 = realWorldPoint.y; z2 = realWorldPoint.z;
i = x+steps + (y+steps) * context.depthWidth(); realWorldPoint = context.depthMapRealWorld()[i];
x3 = realWorldPoint.x; y3 = realWorldPoint.y; z3 = realWorldPoint.z;
i = x + (y+steps) * context.depthWidth(); realWorldPoint = context.depthMapRealWorld()[i];
x4 = realWorldPoint.x; y4 = realWorldPoint.y; z4 = realWorldPoint.z;
if(z1 >0 && z2 >0 && z3 >0 && z4 >0){
vertex(x1,y1,z1); vertex(x2,y2,z2); vertex(x3,y3,z3); vertex(x4,y4,z4);
}
// realWorldPoint = context.depthMapRealWorld()[index];
////context.convertRealWorldToProjective(realWorldPoint,projectivePoint);
////stroke(context.rgbImage().pixels[(int)(projectivePoint.x + projectivePoint.y * context.depthWidth())]);
// stroke(rgbTrans.pixels[index]);
// point(realWorldPoint.x,realWorldPoint.y,realWorldPoint.z); // make realworld z negative, in the 3d
//drawing coordsystem +z points in the direction of the eye
}
endShape(CLOSE);
}
}
}
void mouseDragged() {
if(mouseButton == LEFT){
incX = (mouseY-pmouseY) * RATE;
incY = (mouseX-pmouseX) * RATE;
rotx -= incX;
roty -= incY;
}else{
incX = (mouseY-pmouseY) * RATE*100;
incY = (mouseX-pmouseX) * RATE*100;
despX +=incY;
despY +=incX;
println(despX+" "+despY);
}
}
void keyPressed(){
switch(keyCode) {
case UP:
z += VE;
break;
case DOWN:
z -= VE;
break;
}
}