处理中的骨骼跟踪和播放视频
Skeleton tracking & playing a video in processing
我们使用了以下代码来同时进行骨骼跟踪(头部跟踪)和播放视频:
import processing.video.*;
import SimpleOpenNI.*;
import java.util.*;
SimpleOpenNI kinect;
PImage kinectDepth;
int[] userID;
color[] userColor = new color[]{ color(255,0,0), color(0,255,0), color(0,0,255),
color(255,255,0), color(255,0,255), color(0,255,255)};
PVector headPosition = new PVector();
float headSize = 200;
float confidenceLevel = 0.5;
float confidence;
PVector confidenceVector = new PVector();
Movie movie1;
void setup()
{
size(640, 480);
movie1 = new Movie(this, "moon.mP4");
kinect = new SimpleOpenNI(this);
kinect.enableDepth();
kinect.enableUser();
movie1.play();
}
void draw(){
image(movie1, 0, 0, width, height);
kinect.update();
kinectDepth = kinect.depthImage();
image(kinectDepth,0,0);
userID = kinect.getUsers();
for(int i=0;i<userID.length;i++)
{
if(kinect.isTrackingSkeleton(userID[i]))
{
confidence = kinect.getJointPositionSkeleton(userID[i],SimpleOpenNI.SKEL_HEAD,confidenceVector);
if(confidence > confidenceLevel)
{
// change draw color based on hand id#
stroke(userColor[(i)]);
// fill the ellipse with the same color
fill(userColor[(i)]);
// draw the rest of the body
drawSkeleton(userID[i]);
}
}
}
}
/*---------------------------------------------------------------
Draw the skeleton of a tracked user. Input is userID
----------------------------------------------------------------*/
void drawSkeleton(int userId){
kinect.getJointPositionSkeleton(userId, SimpleOpenNI.SKEL_HEAD,headPosition);
kinect.convertRealWorldToProjective(headPosition,headPosition);
ellipse(headPosition.x,headPosition.y,30,30);
}
void onNewUser(SimpleOpenNI curContext, int userId){
println("New User Detected - userId: " + userId);
curContext.startTrackingSkeleton(userId);
}
void onLostUser(SimpleOpenNI curContext, int userId){
println("User Lost - userId: " + userId);
}
void onVisibleUser(SimpleOpenNI curContext, int userId){
} //void onVisibleUser(SimpleOpenNI curContext, int userId)
void movieEvent(Movie m) {
m.read();
}
当我们绑定到 运行 上面的代码时,在日志文件中生成了以下错误:
Java frames: (J=compiled Java code, j=interpreted, Vv=VM code)
J 1472 SimpleOpenNI.SimpleOpenNIJNI.IntVector_size(JLSimpleOpenNI/IntVector;)J (0 bytes) @ 0x0000000002ebe695 [0x0000000002ebe640+0x55]
J 1471 C1 SimpleOpenNI.IntVector.size()J (8 bytes) @ 0x0000000002ebe314 [0x0000000002ebe280+0x94]
j SimpleOpenNI.SimpleOpenNI.getUsers()[I+15
J 1777 C1 skeleton_track_simpleopen_video.draw()V (159 bytes) @ 0x0000000003004ca4 [0x0000000003004600+0x6a4]
j processing.core.PApplet.handleDraw()V+161
J 1769 C1 processing.awt.PSurfaceAWT.callDraw()V (18 bytes) @ 0x000000000300009c [0x0000000002ffff80+0x11c]
j processing.core.PSurfaceNone$AnimationThread.run()V+30
v ~StubRoutines::call_stub
值得注意的是上面的代码在没有播放视频(processing.video库)的情况下运行没有任何错误。
能否协助我们找出上面代码中的问题?
这确实是一个奇怪的行为,但要深入了解这个问题,可能需要从源代码编译 SimpleOpenNI 库,调试它的 getUsers()
方法正在生成无效的内存引用。
如果您只是想进行一些测试并让事情顺利进行,这可能不切实际。
我建议不要使用 getUsers()
方法。您可能可以使用 getNumberOfUsers()
:
import processing.video.*;
import SimpleOpenNI.*;
import java.util.*;
SimpleOpenNI kinect;
PImage kinectDepth;
int[] userID;
color[] userColor = new color[]{ color(255,0,0), color(0,255,0), color(0,0,255),
color(255,255,0), color(255,0,255), color(0,255,255)};
PVector headPosition = new PVector();
float headSize = 200;
float confidenceLevel = 0.5;
float confidence;
PVector confidenceVector = new PVector();
Movie movie1;
void setup()
{
size(640, 480);
movie1 = new Movie(this, "moon.mP4");
kinect = new SimpleOpenNI(this,"/Users/George/Downloads/gaf/as/CityWall/oni/test2.oni");
kinect.enableDepth();
kinect.enableUser();
movie1.loop();
}
void draw(){
kinect.update();
kinectDepth = kinect.depthImage();
image(kinectDepth,0,0);
//userID = kinect.getUsers();
for(int i=0;i<kinect.getNumberOfUsers();i++)
{
if(kinect.isTrackingSkeleton(i+1))
{
confidence = kinect.getJointPositionSkeleton(i+1,SimpleOpenNI.SKEL_HEAD,confidenceVector);
if(confidence > confidenceLevel)
{
// change draw color based on hand id#
stroke(userColor[(i)]);
// fill the ellipse with the same color
fill(userColor[(i)]);
// draw the rest of the body
drawSkeleton(i+1);
}
}
}
image(movie1, 0, 0, movie1.width/4, movie1.height/4);
}
/*---------------------------------------------------------------
Draw the skeleton of a tracked user. Input is userID
----------------------------------------------------------------*/
void drawSkeleton(int userId){
kinect.getJointPositionSkeleton(userId, SimpleOpenNI.SKEL_HEAD,headPosition);
kinect.convertRealWorldToProjective(headPosition,headPosition);
ellipse(headPosition.x,headPosition.y,30,30);
}
void onNewUser(SimpleOpenNI curContext, int userId){
println("New User Detected - userId: " + userId);
curContext.startTrackingSkeleton(userId);
}
void onLostUser(SimpleOpenNI curContext, int userId){
println("User Lost - userId: " + userId);
}
void onVisibleUser(SimpleOpenNI curContext, int userId){
} //void onVisibleUser(SimpleOpenNI curContext, int userId)
void movieEvent(Movie m) {
m.read();
}
切记,这会告诉您有多少用户被跟踪,但不会告诉您他们的 ID 是什么。除了 getNumberOfUsers()
之外,您甚至可以使用 int,假设 15 是 OpenNI 支持的最大用户数。这会起作用,因为您总是在检查 kinect.isTrackingSkeleton
.
我们使用了以下代码来同时进行骨骼跟踪(头部跟踪)和播放视频:
import processing.video.*;
import SimpleOpenNI.*;
import java.util.*;
SimpleOpenNI kinect;
PImage kinectDepth;
int[] userID;
color[] userColor = new color[]{ color(255,0,0), color(0,255,0), color(0,0,255),
color(255,255,0), color(255,0,255), color(0,255,255)};
PVector headPosition = new PVector();
float headSize = 200;
float confidenceLevel = 0.5;
float confidence;
PVector confidenceVector = new PVector();
Movie movie1;
void setup()
{
size(640, 480);
movie1 = new Movie(this, "moon.mP4");
kinect = new SimpleOpenNI(this);
kinect.enableDepth();
kinect.enableUser();
movie1.play();
}
void draw(){
image(movie1, 0, 0, width, height);
kinect.update();
kinectDepth = kinect.depthImage();
image(kinectDepth,0,0);
userID = kinect.getUsers();
for(int i=0;i<userID.length;i++)
{
if(kinect.isTrackingSkeleton(userID[i]))
{
confidence = kinect.getJointPositionSkeleton(userID[i],SimpleOpenNI.SKEL_HEAD,confidenceVector);
if(confidence > confidenceLevel)
{
// change draw color based on hand id#
stroke(userColor[(i)]);
// fill the ellipse with the same color
fill(userColor[(i)]);
// draw the rest of the body
drawSkeleton(userID[i]);
}
}
}
}
/*---------------------------------------------------------------
Draw the skeleton of a tracked user. Input is userID
----------------------------------------------------------------*/
void drawSkeleton(int userId){
kinect.getJointPositionSkeleton(userId, SimpleOpenNI.SKEL_HEAD,headPosition);
kinect.convertRealWorldToProjective(headPosition,headPosition);
ellipse(headPosition.x,headPosition.y,30,30);
}
void onNewUser(SimpleOpenNI curContext, int userId){
println("New User Detected - userId: " + userId);
curContext.startTrackingSkeleton(userId);
}
void onLostUser(SimpleOpenNI curContext, int userId){
println("User Lost - userId: " + userId);
}
void onVisibleUser(SimpleOpenNI curContext, int userId){
} //void onVisibleUser(SimpleOpenNI curContext, int userId)
void movieEvent(Movie m) {
m.read();
}
当我们绑定到 运行 上面的代码时,在日志文件中生成了以下错误:
Java frames: (J=compiled Java code, j=interpreted, Vv=VM code) J 1472 SimpleOpenNI.SimpleOpenNIJNI.IntVector_size(JLSimpleOpenNI/IntVector;)J (0 bytes) @ 0x0000000002ebe695 [0x0000000002ebe640+0x55] J 1471 C1 SimpleOpenNI.IntVector.size()J (8 bytes) @ 0x0000000002ebe314 [0x0000000002ebe280+0x94] j SimpleOpenNI.SimpleOpenNI.getUsers()[I+15 J 1777 C1 skeleton_track_simpleopen_video.draw()V (159 bytes) @ 0x0000000003004ca4 [0x0000000003004600+0x6a4] j processing.core.PApplet.handleDraw()V+161 J 1769 C1 processing.awt.PSurfaceAWT.callDraw()V (18 bytes) @ 0x000000000300009c [0x0000000002ffff80+0x11c] j processing.core.PSurfaceNone$AnimationThread.run()V+30 v ~StubRoutines::call_stub
值得注意的是上面的代码在没有播放视频(processing.video库)的情况下运行没有任何错误。
能否协助我们找出上面代码中的问题?
这确实是一个奇怪的行为,但要深入了解这个问题,可能需要从源代码编译 SimpleOpenNI 库,调试它的 getUsers()
方法正在生成无效的内存引用。
如果您只是想进行一些测试并让事情顺利进行,这可能不切实际。
我建议不要使用 getUsers()
方法。您可能可以使用 getNumberOfUsers()
:
import processing.video.*;
import SimpleOpenNI.*;
import java.util.*;
SimpleOpenNI kinect;
PImage kinectDepth;
int[] userID;
color[] userColor = new color[]{ color(255,0,0), color(0,255,0), color(0,0,255),
color(255,255,0), color(255,0,255), color(0,255,255)};
PVector headPosition = new PVector();
float headSize = 200;
float confidenceLevel = 0.5;
float confidence;
PVector confidenceVector = new PVector();
Movie movie1;
void setup()
{
size(640, 480);
movie1 = new Movie(this, "moon.mP4");
kinect = new SimpleOpenNI(this,"/Users/George/Downloads/gaf/as/CityWall/oni/test2.oni");
kinect.enableDepth();
kinect.enableUser();
movie1.loop();
}
void draw(){
kinect.update();
kinectDepth = kinect.depthImage();
image(kinectDepth,0,0);
//userID = kinect.getUsers();
for(int i=0;i<kinect.getNumberOfUsers();i++)
{
if(kinect.isTrackingSkeleton(i+1))
{
confidence = kinect.getJointPositionSkeleton(i+1,SimpleOpenNI.SKEL_HEAD,confidenceVector);
if(confidence > confidenceLevel)
{
// change draw color based on hand id#
stroke(userColor[(i)]);
// fill the ellipse with the same color
fill(userColor[(i)]);
// draw the rest of the body
drawSkeleton(i+1);
}
}
}
image(movie1, 0, 0, movie1.width/4, movie1.height/4);
}
/*---------------------------------------------------------------
Draw the skeleton of a tracked user. Input is userID
----------------------------------------------------------------*/
void drawSkeleton(int userId){
kinect.getJointPositionSkeleton(userId, SimpleOpenNI.SKEL_HEAD,headPosition);
kinect.convertRealWorldToProjective(headPosition,headPosition);
ellipse(headPosition.x,headPosition.y,30,30);
}
void onNewUser(SimpleOpenNI curContext, int userId){
println("New User Detected - userId: " + userId);
curContext.startTrackingSkeleton(userId);
}
void onLostUser(SimpleOpenNI curContext, int userId){
println("User Lost - userId: " + userId);
}
void onVisibleUser(SimpleOpenNI curContext, int userId){
} //void onVisibleUser(SimpleOpenNI curContext, int userId)
void movieEvent(Movie m) {
m.read();
}
切记,这会告诉您有多少用户被跟踪,但不会告诉您他们的 ID 是什么。除了 getNumberOfUsers()
之外,您甚至可以使用 int,假设 15 是 OpenNI 支持的最大用户数。这会起作用,因为您总是在检查 kinect.isTrackingSkeleton
.