使用 Node.js 和 Javascript 在浏览器上输出 Kinect 2
Output of Kinect 2 on browser using Node.js and Javascript
我正在按照此处给出的教程尝试在网络浏览器上显示 Kinect 2 的输出,
http://www.webondevices.com/xbox-kinect-2-javascript-gesture-tracking/
我已经能够使用 server.js
中的这段代码在浏览器控制台中将设备输出作为 JSON 对象
var Kinect2 = require('kinect2'),
express = require('express'),
app = express(),
server = require('http').createServer(app),
io = require('socket.io').listen(server);
var kinect = new Kinect2();
app.use(express.static(__dirname + '/View'));
app.use(express.static(__dirname + '/Script'));
if(kinect.open()) {
console.log('kinect opened');
server.listen(8000);
console.log('Server listening on port 8000');
kinect.on('bodyFrame', function(bodyFrame){
io.sockets.emit('bodyFrame', bodyFrame);
});
kinect.openBodyReader();
app.get('/', function(req, res) {
res.sendFile(__dirname + '/View/output.html');
});
setTimeout(function(){
kinect.close();
console.log("Kinect Closed");
}, 100000);
}
output.html,我想在 canvas 上显示输出的页面看起来像这样
<html>
<head>
<title>
Kinect Output On Canvas
</title>
<script src="https://cdn.socket.io/socket.io-1.3.5.js"></script>
<link rel="stylesheet" href="/style.css" />
</head>
<body>
<h1>Kinect & HTML5 WebSockets</h1>
<canvas id="canvas" width="640" height="480"></canvas>
<script>
var socket = io.connect('http://localhost:8000/');
socket.on('bodyFrame', interpretData);
function interpretData(bodyFrame) {
// Web Socket message:
console.log(bodyFrame); //outputs each bodyframe as a JSON object, 30+ frames/JSON objects in the browser per second
}
</script>
</body>
每个 JSON 对象的结构显示跟踪的每个骨骼的位置
{ bodyIndex: 5,
tracked: true,
trackingId: '72057594038115298',
leftHandState: 1,
rightHandState: 1,
joints:
[ { depthX: 0.24323934316635132,
depthY: 0.5925129055976868,
colorX: 0.33547070622444153,
colorY: 0.6129662394523621,
cameraX: -0.34261977672576904,
cameraY: -0.10602515190839767,
cameraZ: 0.9753329753875732,
orientationX: -0.04046249017119408,
orientationY: 0.9915661215782166,
orientationZ: -0.05280650407075882,
orientationW: 0.11122455447912216 },
{ depthX: 0.21760234236717224,
depthY: 0.3140539526939392,
colorX: 0.31521913409233093,
colorY: 0.2960273027420044,
cameraX: -0.36364009976387024,
cameraY: 0.19814369082450867,
cameraZ: 0.9404330253601074,
orientationX: -0.04830155894160271,
orientationY: 0.9615150094032288,
orientationZ: -0.04574603587388992,
orientationW: 0.26657652854919434 },
……
每跟踪24个关节,Joints数组中就有24个参数相似的数组。
为了在浏览器中显示骨架,我试过了
var ctx = document.getElementById('canvas').getContext('2d');
ctx.fillStyle = "red";
ctx.fillRect(10, 10, 20, 20);
var imgData = ctx.getImageData(10, 10, 15, 15);
在每个 JSON 接收到的对象的 for 循环中
ctx.putImageData(imgData, x, y); // x and y are the depth x positions of left and right hands
这个输出只是 canvas 上的一个正方形点和 canvas 左角的另一个正方形点,我理解这是为什么,因为这是提供给它的坐标。
我想知道如何将我在任何关节的关节数组中获得的数据解释为公式,并在浏览器上显示跟踪点,以便我可以在浏览器中显示骨架。我正在 Kinect Studio v2.0 桌面应用程序中检查实际跟踪结果和视频输出。
如有任何建议,我们将不胜感激
这就是在浏览器屏幕上成功生成跟踪骨架输出的原因。在 output.html 中,sockets.io 接收 JSON 对象,并且 Javascript 用于根据 depthX 参数针对屏幕上的每个点创建项目。在output.html
function interpretData(bodyFrame) {
ctx.clearRect(0, 0, c.width, c.height);
console.log(bodyFrame);
for (var i = 0; i < bodyFrame.bodies.length; i++) {
if (bodyFrame.bodies[i].tracked == true) {
console.log('tracked');
for (var j = 0; j < bodyFrame.bodies[i].joints.length; j++) {
var joint = bodyFrame.bodies[i].joints[j];
ctx.fillStyle = "#FF0000";
ctx.beginPath();
ctx.arc(joint.depthX * 400, joint.depthY * 400, 10, 0, Math.PI * 2, true); //multiplied with static integer 400 in order to adjust position on canvas as without it skeleton projection formed is only visible in a corner as DepthX values were always less than 1
ctx.closePath();
ctx.fill(); //drawing a circle for each joint on the canvas
}
}
}
}
我正在按照此处给出的教程尝试在网络浏览器上显示 Kinect 2 的输出, http://www.webondevices.com/xbox-kinect-2-javascript-gesture-tracking/
我已经能够使用 server.js
中的这段代码在浏览器控制台中将设备输出作为 JSON 对象 var Kinect2 = require('kinect2'),
express = require('express'),
app = express(),
server = require('http').createServer(app),
io = require('socket.io').listen(server);
var kinect = new Kinect2();
app.use(express.static(__dirname + '/View'));
app.use(express.static(__dirname + '/Script'));
if(kinect.open()) {
console.log('kinect opened');
server.listen(8000);
console.log('Server listening on port 8000');
kinect.on('bodyFrame', function(bodyFrame){
io.sockets.emit('bodyFrame', bodyFrame);
});
kinect.openBodyReader();
app.get('/', function(req, res) {
res.sendFile(__dirname + '/View/output.html');
});
setTimeout(function(){
kinect.close();
console.log("Kinect Closed");
}, 100000);
}
output.html,我想在 canvas 上显示输出的页面看起来像这样
<html>
<head>
<title>
Kinect Output On Canvas
</title>
<script src="https://cdn.socket.io/socket.io-1.3.5.js"></script>
<link rel="stylesheet" href="/style.css" />
</head>
<body>
<h1>Kinect & HTML5 WebSockets</h1>
<canvas id="canvas" width="640" height="480"></canvas>
<script>
var socket = io.connect('http://localhost:8000/');
socket.on('bodyFrame', interpretData);
function interpretData(bodyFrame) {
// Web Socket message:
console.log(bodyFrame); //outputs each bodyframe as a JSON object, 30+ frames/JSON objects in the browser per second
}
</script>
</body>
每个 JSON 对象的结构显示跟踪的每个骨骼的位置
{ bodyIndex: 5,
tracked: true,
trackingId: '72057594038115298',
leftHandState: 1,
rightHandState: 1,
joints:
[ { depthX: 0.24323934316635132,
depthY: 0.5925129055976868,
colorX: 0.33547070622444153,
colorY: 0.6129662394523621,
cameraX: -0.34261977672576904,
cameraY: -0.10602515190839767,
cameraZ: 0.9753329753875732,
orientationX: -0.04046249017119408,
orientationY: 0.9915661215782166,
orientationZ: -0.05280650407075882,
orientationW: 0.11122455447912216 },
{ depthX: 0.21760234236717224,
depthY: 0.3140539526939392,
colorX: 0.31521913409233093,
colorY: 0.2960273027420044,
cameraX: -0.36364009976387024,
cameraY: 0.19814369082450867,
cameraZ: 0.9404330253601074,
orientationX: -0.04830155894160271,
orientationY: 0.9615150094032288,
orientationZ: -0.04574603587388992,
orientationW: 0.26657652854919434 },
…… 每跟踪24个关节,Joints数组中就有24个参数相似的数组。
为了在浏览器中显示骨架,我试过了
var ctx = document.getElementById('canvas').getContext('2d');
ctx.fillStyle = "red";
ctx.fillRect(10, 10, 20, 20);
var imgData = ctx.getImageData(10, 10, 15, 15);
在每个 JSON 接收到的对象的 for 循环中
ctx.putImageData(imgData, x, y); // x and y are the depth x positions of left and right hands
这个输出只是 canvas 上的一个正方形点和 canvas 左角的另一个正方形点,我理解这是为什么,因为这是提供给它的坐标。 我想知道如何将我在任何关节的关节数组中获得的数据解释为公式,并在浏览器上显示跟踪点,以便我可以在浏览器中显示骨架。我正在 Kinect Studio v2.0 桌面应用程序中检查实际跟踪结果和视频输出。
如有任何建议,我们将不胜感激
这就是在浏览器屏幕上成功生成跟踪骨架输出的原因。在 output.html 中,sockets.io 接收 JSON 对象,并且 Javascript 用于根据 depthX 参数针对屏幕上的每个点创建项目。在output.html
function interpretData(bodyFrame) {
ctx.clearRect(0, 0, c.width, c.height);
console.log(bodyFrame);
for (var i = 0; i < bodyFrame.bodies.length; i++) {
if (bodyFrame.bodies[i].tracked == true) {
console.log('tracked');
for (var j = 0; j < bodyFrame.bodies[i].joints.length; j++) {
var joint = bodyFrame.bodies[i].joints[j];
ctx.fillStyle = "#FF0000";
ctx.beginPath();
ctx.arc(joint.depthX * 400, joint.depthY * 400, 10, 0, Math.PI * 2, true); //multiplied with static integer 400 in order to adjust position on canvas as without it skeleton projection formed is only visible in a corner as DepthX values were always less than 1
ctx.closePath();
ctx.fill(); //drawing a circle for each joint on the canvas
}
}
}
}