如何在 ES6 node.js 环境中 运行 mediapipe facemesh 做出反应
How to run mediapipe facemesh on a ES6 node.js environment alike react
我正在尝试 运行 这个 HTML 示例 https://codepen.io/mediapipe/details/KKgVaPJ from https://google.github.io/mediapipe/solutions/face_mesh#javascript-solution-api 在 create react 应用程序中。我已经完成了:
- npm 安装所有 facemesh mediapipe 包。
- 已经用节点导入替换了 jsdelivr 标签,我得到了定义和函数。
- 用 react-cam 替换了视频元素
我不知道如何替换这个 jsdelivr,可能影响:
const faceMesh = new FaceMesh({
locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/${file}`;
}
});
所以问题是:
- 为什么facemesh没有显示?有没有我想做的事的例子?
这是我的 App.js 代码(抱歉调试脚手架):
import './App.css';
import React, { useState, useEffect } from "react";
import Webcam from "react-webcam";
import { Camera, CameraOptions } from '@mediapipe/camera_utils'
import {
FaceMesh,
FACEMESH_TESSELATION,
FACEMESH_RIGHT_EYE,
FACEMESH_LEFT_EYE,
FACEMESH_RIGHT_EYEBROW,
FACEMESH_LEFT_EYEBROW,
FACEMESH_FACE_OVAL,
FACEMESH_LIPS
} from '@mediapipe/face_mesh'
import { drawConnectors } from '@mediapipe/drawing_utils'
const videoConstraints = {
width: 1280,
height: 720,
facingMode: "user"
};
function App() {
const webcamRef = React.useRef(null);
const canvasReference = React.useRef(null);
const [cameraReady, setCameraReady] = useState(false);
let canvasCtx
let camera
const videoElement = document.getElementsByClassName('input_video')[0];
// const canvasElement = document.getElementsByClassName('output_canvas')[0];
const canvasElement = document.createElement('canvas');
console.log('canvasElement', canvasElement)
console.log('canvasCtx', canvasCtx)
useEffect(() => {
camera = new Camera(webcamRef.current, {
onFrame: async () => {
console.log('{send}',await faceMesh.send({ image: webcamRef.current.video }));
},
width: 1280,
height: 720
});
canvasCtx = canvasReference.current.getContext('2d');
camera.start();
console.log('canvasReference', canvasReference)
}, [cameraReady]);
function onResults(results) {
console.log('results')
canvasCtx.save();
canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
canvasCtx.drawImage(
results.image, 0, 0, canvasElement.width, canvasElement.height);
if (results.multiFaceLandmarks) {
for (const landmarks of results.multiFaceLandmarks) {
drawConnectors(canvasCtx, landmarks, FACEMESH_TESSELATION, { color: '#C0C0C070', lineWidth: 1 });
drawConnectors(canvasCtx, landmarks, FACEMESH_RIGHT_EYE, { color: '#FF3030' });
drawConnectors(canvasCtx, landmarks, FACEMESH_RIGHT_EYEBROW, { color: '#FF3030' });
drawConnectors(canvasCtx, landmarks, FACEMESH_LEFT_EYE, { color: '#30FF30' });
drawConnectors(canvasCtx, landmarks, FACEMESH_LEFT_EYEBROW, { color: '#30FF30' });
drawConnectors(canvasCtx, landmarks, FACEMESH_FACE_OVAL, { color: '#E0E0E0' });
drawConnectors(canvasCtx, landmarks, FACEMESH_LIPS, { color: '#E0E0E0' });
}
}
canvasCtx.restore();
}
const faceMesh = new FaceMesh({
locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/${file}`;
}
});
faceMesh.setOptions({
selfieMode: true,
maxNumFaces: 1,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5
});
faceMesh.onResults(onResults);
// const camera = new Camera(webcamRef.current, {
// onFrame: async () => {
// await faceMesh.send({ image: videoElement });
// },
// width: 1280,
// height: 720
// });
// camera.start();
return (
<div className="App">
<Webcam
audio={false}
height={720}
ref={webcamRef}
screenshotFormat="image/jpeg"
width={1280}
videoConstraints={videoConstraints}
onUserMedia={() => {
console.log('webcamRef.current', webcamRef.current);
// navigator.mediaDevices
// .getUserMedia({ video: true })
// .then(stream => webcamRef.current.srcObject = stream)
// .catch(console.log);
setCameraReady(true)
}}
/>
<canvas
ref={canvasReference}
style={{
position: "absolute",
marginLeft: "auto",
marginRight: "auto",
left: 0,
right: 0,
textAlign: "center",
zindex: 9,
width: 1280,
height: 720,
}}
/>
</div >
);
}
export default App;
不用换jsdelivr,那段代码就可以了;另外我认为你需要稍微重新排序你的代码:
- faceMesh初始化应该放在useEffect里面,参数是[];因此,该算法将在页面第一次呈现时开始
- 此外,您不需要获取 videoElement 和 canvasElement with doc.*,因为您已经定义了一些引用
代码示例:
useEffect(() => {
const faceMesh = new FaceDetection({
locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/@mediapipe/face_detection/${file}`;
},
});
faceMesh.setOptions({
maxNumFaces: 1,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5,
});
faceMesh.onResults(onResults);
if (
typeof webcamRef.current !== "undefined" &&
webcamRef.current !== null
) {
camera = new Camera(webcamRef.current.video, {
onFrame: async () => {
await faceMesh.send({ image: webcamRef.current.video });
},
width: 1280,
height: 720,
});
camera.start();
}
}, []);
最后,在 onResults 回调中,我建议先打印结果,只是为了检查 Mediapipe 实现是否正常工作。并且不要忘记在绘制之前设置 canvas 大小。
function onResults(results){
console.log(results)
canvasCtx = canvasReference.current.getContext('2d')
canvas.width = webcamRef.current.video.videoWidth;
canvas.height = webcamRef.current.video.videoHeight;;
...
}
祝你好运! :)
我正在尝试 运行 这个 HTML 示例 https://codepen.io/mediapipe/details/KKgVaPJ from https://google.github.io/mediapipe/solutions/face_mesh#javascript-solution-api 在 create react 应用程序中。我已经完成了:
- npm 安装所有 facemesh mediapipe 包。
- 已经用节点导入替换了 jsdelivr 标签,我得到了定义和函数。
- 用 react-cam 替换了视频元素
我不知道如何替换这个 jsdelivr,可能影响:
const faceMesh = new FaceMesh({
locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/${file}`;
}
});
所以问题是:
- 为什么facemesh没有显示?有没有我想做的事的例子?
这是我的 App.js 代码(抱歉调试脚手架):
import './App.css';
import React, { useState, useEffect } from "react";
import Webcam from "react-webcam";
import { Camera, CameraOptions } from '@mediapipe/camera_utils'
import {
FaceMesh,
FACEMESH_TESSELATION,
FACEMESH_RIGHT_EYE,
FACEMESH_LEFT_EYE,
FACEMESH_RIGHT_EYEBROW,
FACEMESH_LEFT_EYEBROW,
FACEMESH_FACE_OVAL,
FACEMESH_LIPS
} from '@mediapipe/face_mesh'
import { drawConnectors } from '@mediapipe/drawing_utils'
const videoConstraints = {
width: 1280,
height: 720,
facingMode: "user"
};
function App() {
const webcamRef = React.useRef(null);
const canvasReference = React.useRef(null);
const [cameraReady, setCameraReady] = useState(false);
let canvasCtx
let camera
const videoElement = document.getElementsByClassName('input_video')[0];
// const canvasElement = document.getElementsByClassName('output_canvas')[0];
const canvasElement = document.createElement('canvas');
console.log('canvasElement', canvasElement)
console.log('canvasCtx', canvasCtx)
useEffect(() => {
camera = new Camera(webcamRef.current, {
onFrame: async () => {
console.log('{send}',await faceMesh.send({ image: webcamRef.current.video }));
},
width: 1280,
height: 720
});
canvasCtx = canvasReference.current.getContext('2d');
camera.start();
console.log('canvasReference', canvasReference)
}, [cameraReady]);
function onResults(results) {
console.log('results')
canvasCtx.save();
canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
canvasCtx.drawImage(
results.image, 0, 0, canvasElement.width, canvasElement.height);
if (results.multiFaceLandmarks) {
for (const landmarks of results.multiFaceLandmarks) {
drawConnectors(canvasCtx, landmarks, FACEMESH_TESSELATION, { color: '#C0C0C070', lineWidth: 1 });
drawConnectors(canvasCtx, landmarks, FACEMESH_RIGHT_EYE, { color: '#FF3030' });
drawConnectors(canvasCtx, landmarks, FACEMESH_RIGHT_EYEBROW, { color: '#FF3030' });
drawConnectors(canvasCtx, landmarks, FACEMESH_LEFT_EYE, { color: '#30FF30' });
drawConnectors(canvasCtx, landmarks, FACEMESH_LEFT_EYEBROW, { color: '#30FF30' });
drawConnectors(canvasCtx, landmarks, FACEMESH_FACE_OVAL, { color: '#E0E0E0' });
drawConnectors(canvasCtx, landmarks, FACEMESH_LIPS, { color: '#E0E0E0' });
}
}
canvasCtx.restore();
}
const faceMesh = new FaceMesh({
locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/${file}`;
}
});
faceMesh.setOptions({
selfieMode: true,
maxNumFaces: 1,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5
});
faceMesh.onResults(onResults);
// const camera = new Camera(webcamRef.current, {
// onFrame: async () => {
// await faceMesh.send({ image: videoElement });
// },
// width: 1280,
// height: 720
// });
// camera.start();
return (
<div className="App">
<Webcam
audio={false}
height={720}
ref={webcamRef}
screenshotFormat="image/jpeg"
width={1280}
videoConstraints={videoConstraints}
onUserMedia={() => {
console.log('webcamRef.current', webcamRef.current);
// navigator.mediaDevices
// .getUserMedia({ video: true })
// .then(stream => webcamRef.current.srcObject = stream)
// .catch(console.log);
setCameraReady(true)
}}
/>
<canvas
ref={canvasReference}
style={{
position: "absolute",
marginLeft: "auto",
marginRight: "auto",
left: 0,
right: 0,
textAlign: "center",
zindex: 9,
width: 1280,
height: 720,
}}
/>
</div >
);
}
export default App;
不用换jsdelivr,那段代码就可以了;另外我认为你需要稍微重新排序你的代码:
- faceMesh初始化应该放在useEffect里面,参数是[];因此,该算法将在页面第一次呈现时开始
- 此外,您不需要获取 videoElement 和 canvasElement with doc.*,因为您已经定义了一些引用
代码示例:
useEffect(() => {
const faceMesh = new FaceDetection({
locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/@mediapipe/face_detection/${file}`;
},
});
faceMesh.setOptions({
maxNumFaces: 1,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5,
});
faceMesh.onResults(onResults);
if (
typeof webcamRef.current !== "undefined" &&
webcamRef.current !== null
) {
camera = new Camera(webcamRef.current.video, {
onFrame: async () => {
await faceMesh.send({ image: webcamRef.current.video });
},
width: 1280,
height: 720,
});
camera.start();
}
}, []);
最后,在 onResults 回调中,我建议先打印结果,只是为了检查 Mediapipe 实现是否正常工作。并且不要忘记在绘制之前设置 canvas 大小。
function onResults(results){
console.log(results)
canvasCtx = canvasReference.current.getContext('2d')
canvas.width = webcamRef.current.video.videoWidth;
canvas.height = webcamRef.current.video.videoHeight;;
...
}
祝你好运! :)