model.detect 不是 HTMLVideoElement.predictWebcam 处的函数 JS 错误
model.detect is not a function at HTMLVideoElement.predictWebcam JS errro
我正在尝试构建一个使用 Tensorflow 自定义模型的网站,该模型使用网络摄像头进行实时对象检测。我有模型,网络摄像头将加载到我的网页中,但我在控制台中收到此错误:
letsscan1.js:69 Uncaught TypeError: model.detect is not a function
at HTMLVideoElement.predictWebcam (letsscan1.js:69)
predictWebcam @ letsscan1.js:69
model.detect 是一个函数,在 HTMLvideoElemenet 中调用,所以我不明白为什么我会收到此错误。
这是JS文件-
//import * as tf from '@tensorflow/tfjs';
//import { loadGraphModel } from '@tensorflow/tfjs-converter';
const demosSection = document.getElementById('demos');
let MODEL_FILE_URL = 'models/Graph/model.json'; // <-- this variable gets assigned another value below. Changed it to let
// For Keras use tf.loadLayersModel().
const model = tf.loadGraphModel(MODEL_FILE_URL);
// Before we can use COCO-SSD class we must wait for it to finish
// loading. Machine Learning models can be large and take a moment to
// get everything needed to run.
model.then(function(loadedModel) {
MODEL_FILE_URL = loadedModel; // <-- you were reassigning a value to a const variable here.
// Show demo section now model is ready to use.
demosSection.classList.remove('invisible');
});
const video = document.getElementById('webcam');
const liveView = document.getElementById('liveView');
// Check if webcam access is supported.
function hasGetUserMedia() {
return !!(navigator.mediaDevices &&
navigator.mediaDevices.getUserMedia);
}
// Keep a reference of all the child elements we create
// so we can remove them easilly on each render.
var children = [];
// If webcam supported, add event listener to button for when user
// wants to activate it.
if (hasGetUserMedia()) {
const enableWebcamButton = document.getElementById('webcamButton');
enableWebcamButton.addEventListener('click', enableCam);
} else {
console.warn('getUserMedia() is not supported by your browser');
}
// Enable the live webcam view and start classification.
function enableCam(event) {
if (!model) {
console.log('Wait! Model not loaded yet.')
return;
}
// Hide the button.
event.target.classList.add('removed');
// getUsermedia parameters.
const constraints = {
video: true
};
// Activate the webcam stream.
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
video.srcObject = stream;
video.addEventListener('loadeddata', predictWebcam);
});
}
function predictWebcam() {
// Now let's start classifying the stream.
model.detect(video).then(function(predictions) {
// Remove any highlighting we did previous frame.
for (let i = 0; i < children.length; i++) {
liveView.removeChild(children[i]);
}
children.splice(0);
// Now lets loop through predictions and draw them to the live view if
// they have a high confidence score.
for (let n = 0; n < predictions.length; n++) {
// If we are over 66% sure we are sure we classified it right, draw it!
if (predictions[n].score > 0.66) {
const p = document.createElement('p');
p.innerText = predictions[n].class + ' - with ' +
Math.round(parseFloat(predictions[n].score) * 100) +
'% confidence.';
// Draw in top left of bounding box outline.
p.style = 'left: ' + predictions[n].bbox[0] + 'px;' +
'top: ' + predictions[n].bbox[1] + 'px;' +
'width: ' + (predictions[n].bbox[2] - 10) + 'px;';
// Draw the actual bounding box.
const highlighter = document.createElement('div');
highlighter.setAttribute('class', 'highlighter');
highlighter.style = 'left: ' + predictions[n].bbox[0] + 'px; top: ' +
predictions[n].bbox[1] + 'px; width: ' +
predictions[n].bbox[2] + 'px; height: ' +
predictions[n].bbox[3] + 'px;';
liveView.appendChild(highlighter);
liveView.appendChild(p);
// Store drawn objects in memory so we can delete them next time around.
children.push(highlighter);
children.push(p);
}
}
// Call this function again to keep predicting when the browser is ready.
window.requestAnimationFrame(predictWebcam);
});
}
将model.detect(video)
替换为model.predict(video)
。
编辑:
因为它是自定义模型,您应该像这样加载它:
const model = await tf.loadLayersModel(MODEL_FILE_URL);
也许可以这样试试。您首先要加载模型,然后保存对它的引用,然后设置事件侦听器,然后仅在加载模型时处理事件。
首先像这样在 html 中加载 tf 脚本(在 html 的头部)
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs/dist/tf.min.js"> </script>
然后使用你的js脚本
const demosSection = document.getElementById('demos');
const MODEL_FILE_URL = 'models/Graph/model.json';
// Keep a reference of all the child elements we create
// so we can remove them easilly on each render.
const children = [];
const video = document.getElementById('webcam');
const liveView = document.getElementById('liveView');
const model = null;
// Before we can use COCO-SSD class we must wait for it to finish
// loading. Machine Learning models can be large and take a moment to
// get everything needed to run.
tf.loadGraphModel(MODEL_FILE_URL).then(function(loadedModel) {
model = loadedModel; // <-- why this line though?
// If webcam supported, add event listener to button for when user
// wants to activate it.
if (hasGetUserMedia()) {
const enableWebcamButton = document.getElementById('webcamButton');
enableWebcamButton.addEventListener('click', enableCam);
} else {
console.warn('getUserMedia() is not supported by your browser');
}
demosSection.classList.remove('invisible');
});
// Check if webcam access is supported.
function hasGetUserMedia() {
return !!(navigator.mediaDevices &&
navigator.mediaDevices.getUserMedia);
}
// Enable the live webcam view and start classification.
function enableCam(event) {
if (!model) {
console.log('Wait! Model not loaded yet.')
return;
}
// Hide the button.
event.target.classList.add('removed');
// getUsermedia parameters.
const constraints = {
video: true
};
// Activate the webcam stream.
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
video.srcObject = stream;
video.addEventListener('loadeddata', predictWebcam);
});
}
function predictWebcam() {
// Now let's start classifying the stream.
model.detect(video).then(function(predictions) {
// Remove any highlighting we did previous frame.
for (let i = 0; i < children.length; i++) {
liveView.removeChild(children[i]);
}
children.splice(0);
// Now lets loop through predictions and draw them to the live view if
// they have a high confidence score.
for (let n = 0; n < predictions.length; n++) {
// If we are over 66% sure we are sure we classified it right, draw it!
if (predictions[n].score > 0.66) {
const p = document.createElement('p');
p.innerText = predictions[n].class + ' - with ' +
Math.round(parseFloat(predictions[n].score) * 100) +
'% confidence.';
// Draw in top left of bounding box outline.
p.style = 'left: ' + predictions[n].bbox[0] + 'px;' +
'top: ' + predictions[n].bbox[1] + 'px;' +
'width: ' + (predictions[n].bbox[2] - 10) + 'px;';
// Draw the actual bounding box.
const highlighter = document.createElement('div');
highlighter.setAttribute('class', 'highlighter');
highlighter.style = 'left: ' + predictions[n].bbox[0] + 'px; top: ' +
predictions[n].bbox[1] + 'px; width: ' +
predictions[n].bbox[2] + 'px; height: ' +
predictions[n].bbox[3] + 'px;';
liveView.appendChild(highlighter);
liveView.appendChild(p);
// Store drawn objects in memory so we can delete them next time around.
children.push(highlighter);
children.push(p);
}
}
// Call this function again to keep predicting when the browser is ready.
window.requestAnimationFrame(predictWebcam);
});
}
我正在尝试构建一个使用 Tensorflow 自定义模型的网站,该模型使用网络摄像头进行实时对象检测。我有模型,网络摄像头将加载到我的网页中,但我在控制台中收到此错误:
letsscan1.js:69 Uncaught TypeError: model.detect is not a function
at HTMLVideoElement.predictWebcam (letsscan1.js:69)
predictWebcam @ letsscan1.js:69
model.detect 是一个函数,在 HTMLvideoElemenet 中调用,所以我不明白为什么我会收到此错误。
这是JS文件-
//import * as tf from '@tensorflow/tfjs';
//import { loadGraphModel } from '@tensorflow/tfjs-converter';
const demosSection = document.getElementById('demos');
let MODEL_FILE_URL = 'models/Graph/model.json'; // <-- this variable gets assigned another value below. Changed it to let
// For Keras use tf.loadLayersModel().
const model = tf.loadGraphModel(MODEL_FILE_URL);
// Before we can use COCO-SSD class we must wait for it to finish
// loading. Machine Learning models can be large and take a moment to
// get everything needed to run.
model.then(function(loadedModel) {
MODEL_FILE_URL = loadedModel; // <-- you were reassigning a value to a const variable here.
// Show demo section now model is ready to use.
demosSection.classList.remove('invisible');
});
const video = document.getElementById('webcam');
const liveView = document.getElementById('liveView');
// Check if webcam access is supported.
function hasGetUserMedia() {
return !!(navigator.mediaDevices &&
navigator.mediaDevices.getUserMedia);
}
// Keep a reference of all the child elements we create
// so we can remove them easilly on each render.
var children = [];
// If webcam supported, add event listener to button for when user
// wants to activate it.
if (hasGetUserMedia()) {
const enableWebcamButton = document.getElementById('webcamButton');
enableWebcamButton.addEventListener('click', enableCam);
} else {
console.warn('getUserMedia() is not supported by your browser');
}
// Enable the live webcam view and start classification.
function enableCam(event) {
if (!model) {
console.log('Wait! Model not loaded yet.')
return;
}
// Hide the button.
event.target.classList.add('removed');
// getUsermedia parameters.
const constraints = {
video: true
};
// Activate the webcam stream.
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
video.srcObject = stream;
video.addEventListener('loadeddata', predictWebcam);
});
}
function predictWebcam() {
// Now let's start classifying the stream.
model.detect(video).then(function(predictions) {
// Remove any highlighting we did previous frame.
for (let i = 0; i < children.length; i++) {
liveView.removeChild(children[i]);
}
children.splice(0);
// Now lets loop through predictions and draw them to the live view if
// they have a high confidence score.
for (let n = 0; n < predictions.length; n++) {
// If we are over 66% sure we are sure we classified it right, draw it!
if (predictions[n].score > 0.66) {
const p = document.createElement('p');
p.innerText = predictions[n].class + ' - with ' +
Math.round(parseFloat(predictions[n].score) * 100) +
'% confidence.';
// Draw in top left of bounding box outline.
p.style = 'left: ' + predictions[n].bbox[0] + 'px;' +
'top: ' + predictions[n].bbox[1] + 'px;' +
'width: ' + (predictions[n].bbox[2] - 10) + 'px;';
// Draw the actual bounding box.
const highlighter = document.createElement('div');
highlighter.setAttribute('class', 'highlighter');
highlighter.style = 'left: ' + predictions[n].bbox[0] + 'px; top: ' +
predictions[n].bbox[1] + 'px; width: ' +
predictions[n].bbox[2] + 'px; height: ' +
predictions[n].bbox[3] + 'px;';
liveView.appendChild(highlighter);
liveView.appendChild(p);
// Store drawn objects in memory so we can delete them next time around.
children.push(highlighter);
children.push(p);
}
}
// Call this function again to keep predicting when the browser is ready.
window.requestAnimationFrame(predictWebcam);
});
}
将model.detect(video)
替换为model.predict(video)
。
编辑:
因为它是自定义模型,您应该像这样加载它:
const model = await tf.loadLayersModel(MODEL_FILE_URL);
也许可以这样试试。您首先要加载模型,然后保存对它的引用,然后设置事件侦听器,然后仅在加载模型时处理事件。
首先像这样在 html 中加载 tf 脚本(在 html 的头部)
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs/dist/tf.min.js"> </script>
然后使用你的js脚本
const demosSection = document.getElementById('demos');
const MODEL_FILE_URL = 'models/Graph/model.json';
// Keep a reference of all the child elements we create
// so we can remove them easilly on each render.
const children = [];
const video = document.getElementById('webcam');
const liveView = document.getElementById('liveView');
const model = null;
// Before we can use COCO-SSD class we must wait for it to finish
// loading. Machine Learning models can be large and take a moment to
// get everything needed to run.
tf.loadGraphModel(MODEL_FILE_URL).then(function(loadedModel) {
model = loadedModel; // <-- why this line though?
// If webcam supported, add event listener to button for when user
// wants to activate it.
if (hasGetUserMedia()) {
const enableWebcamButton = document.getElementById('webcamButton');
enableWebcamButton.addEventListener('click', enableCam);
} else {
console.warn('getUserMedia() is not supported by your browser');
}
demosSection.classList.remove('invisible');
});
// Check if webcam access is supported.
function hasGetUserMedia() {
return !!(navigator.mediaDevices &&
navigator.mediaDevices.getUserMedia);
}
// Enable the live webcam view and start classification.
function enableCam(event) {
if (!model) {
console.log('Wait! Model not loaded yet.')
return;
}
// Hide the button.
event.target.classList.add('removed');
// getUsermedia parameters.
const constraints = {
video: true
};
// Activate the webcam stream.
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
video.srcObject = stream;
video.addEventListener('loadeddata', predictWebcam);
});
}
function predictWebcam() {
// Now let's start classifying the stream.
model.detect(video).then(function(predictions) {
// Remove any highlighting we did previous frame.
for (let i = 0; i < children.length; i++) {
liveView.removeChild(children[i]);
}
children.splice(0);
// Now lets loop through predictions and draw them to the live view if
// they have a high confidence score.
for (let n = 0; n < predictions.length; n++) {
// If we are over 66% sure we are sure we classified it right, draw it!
if (predictions[n].score > 0.66) {
const p = document.createElement('p');
p.innerText = predictions[n].class + ' - with ' +
Math.round(parseFloat(predictions[n].score) * 100) +
'% confidence.';
// Draw in top left of bounding box outline.
p.style = 'left: ' + predictions[n].bbox[0] + 'px;' +
'top: ' + predictions[n].bbox[1] + 'px;' +
'width: ' + (predictions[n].bbox[2] - 10) + 'px;';
// Draw the actual bounding box.
const highlighter = document.createElement('div');
highlighter.setAttribute('class', 'highlighter');
highlighter.style = 'left: ' + predictions[n].bbox[0] + 'px; top: ' +
predictions[n].bbox[1] + 'px; width: ' +
predictions[n].bbox[2] + 'px; height: ' +
predictions[n].bbox[3] + 'px;';
liveView.appendChild(highlighter);
liveView.appendChild(p);
// Store drawn objects in memory so we can delete them next time around.
children.push(highlighter);
children.push(p);
}
}
// Call this function again to keep predicting when the browser is ready.
window.requestAnimationFrame(predictWebcam);
});
}