调试Affectiva FrameDetector:Worker代码报异常

Debugging Affectiva FrameDetector: Worker Code Reported an Exception

我正在尝试使用 FrameDetector 来处理现有的 MP4 文件。我已将视频文件和下面的 HTML/JavaScript 放在 node.js 服务器中,并 运行 将其放在本地主机上,因此不应该存在 CORS 问题。

探测器正确启动,但是当我向它发送图像数据时,每次都会发生同样的两件事:

  1. 第一个请求returns一个空的faces数组
  2. 第二个请求 returns 一个错误(每次都有一个唯一的尾随数字):worker code reported an exception14920304.

我不太确定如何处理该消息 - 有人有什么建议吗?

//- FrameDetector.pug
html
  head
    title FrameDetector Demo
    script(src='https://download.affectiva.com/js/3.2/affdex.js')
  body
    canvas#canvas
    video#video-to-analyze(preload="auto" controls="true")
      source(type="video/mp4" src="video/my-video.mp4")
    script(src='js/FrameDetector.js')

// FrameDetector.js
var heartbeat, startTimestamp;

document.addEventListener('DOMContentLoaded', function(){
  var v = document.getElementById('video-to-analyze');
  var canvas = document.getElementById('canvas');
  var context = canvas.getContext('2d');

  var cw = Math.floor(canvas.clientWidth / 100);
  var ch = Math.floor(canvas.clientHeight / 100);
  canvas.width = cw;
  canvas.height = ch;

  v.addEventListener('play', function(){
      draw(this,context,cw,ch);
  },false);

},false);

function draw(v,c,w,h) {
  if(v.paused || v.ended) return false;
  c.drawImage(v,0,0,w,h);
  setTimeout(draw,20,v,c,w,h);
}







function analyzeVideoFrame() {
  //Get a canvas element from DOM
  var aCanvas = document.getElementById("canvas");
  var context = aCanvas.getContext('2d');

  //Get imageData object.
  var imageData = context.getImageData(0, 0, 640, 360);
  console.log("Captured imageData.", imageData);

  //Get current time in seconds
  var now = (new Date()).getTime() / 1000;

  //Get delta time between the first frame and the current frame.
  var deltaTime = now - startTimestamp;

  //Process the frame
  detector.process(imageData, deltaTime);
}

function onImageResultsSuccess(faces, image, timestamp) {
  console.log("onImageResultsSuccess:", timestamp, faces.length, faces[0]);
}

function onImageResultsFailure(image, timestamp, err_detail) {
  console.error("onImageResultsFailure:", timestamp, err_detail);
  clearInterval(heartbeat);
}



if (typeof(affdex)=="undefined") {
  console.log("The affdex global variable has not been loaded.");
}

var detector = new affdex.FrameDetector(affdex.FaceDetectorMode.LARGE_FACES);

detector.detectAllExpressions();
detector.detectAllEmotions();
detector.detectAllAppearance();

detector.addEventListener("onInitializeSuccess", function() {
  document.getElementById('video-to-analyze').play();
  startTimestamp = (new Date()).getTime() / 1000;
  heartbeat = setInterval(analyzeVideoFrame, 1000);
});
detector.addEventListener("onInitializeFailure", function() {
  console.error("Affectiva failed to initialize.");
});

detector.addEventListener("onImageResultsSuccess", onImageResultsSuccess);
detector.addEventListener("onImageResultsFailure", onImageResultsFailure);

detector.start();

控制台输出:

Captured imageData. ImageData {data: Uint8ClampedArray(921600), width: 640, height: 360}
onImageResultsSuccess: 0.005000114440917969 0 undefined
Captured imageData. ImageData {data: Uint8ClampedArray(921600), width: 640, height: 360}
onImageResultsFailure: 0.0009999275207519531 worker code reported an exception14920304

知道了。一旦我将图像正确绘制到 canvas 元素,Affectiva 代码就可以正常工作。这是我更正的代码:

//- FrameDetector.pug
html
  head
    title FrameDetector Demo
    script(src='http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js')
    script(src='https://download.affectiva.com/js/3.2/affdex.js')
  body
    canvas#canvas(width="640" height="360" style="display:none;")
    video#video(preload="auto" controls="true")
      source(type="video/mp4" src="video/my-video.mp4")
    script(src='js/FrameDetector.js')

// FrameDetector.js
var heartbeat, startTimestamp;

function onVideoPlay() {
  var $this = this; //cache
  (function loop() {
    if (!$this.paused && !$this.ended) {
      ctx.drawImage($this, 0, 0);
      setTimeout(loop, 1000 / 30); // drawing at 30fps
    }
  })();
}

function analyzeVideoFrame() {
  //Get a canvas element from DOM
  var aCanvas = document.getElementById("canvas");
  var context = aCanvas.getContext('2d');


  //Get imageData object.
  var imageData = context.getImageData(0, 0, 640, 360);
  console.log("Captured imageData.", imageData);

  //Get current time in seconds
  var now = (new Date()).getTime() / 1000;

  //Get delta time between the first frame and the current frame.
  var deltaTime = now - startTimestamp;

  //Process the frame
  detector.process(imageData, deltaTime);
}

function onImageResultsSuccess(faces, image, timestamp) {
  console.log("onImageResultsSuccess:", timestamp, faces.length, faces[0]);
}

function onImageResultsFailure(image, timestamp, err_detail) {
  console.error("onImageResultsFailure:", timestamp, err_detail);
  clearInterval(heartbeat);
}


$(function() {
  if (typeof(affdex)=="undefined") {
    console.log("The affdex global variable has not been loaded.");
  }

  var canvas = document.getElementById('canvas');
  var ctx = canvas.getContext('2d');
  var video = document.getElementById('video');
  var detector = new affdex.FrameDetector(affdex.FaceDetectorMode.LARGE_FACES);

  // Set up a loop to draw frames to the canvas element
  video.addEventListener('play', onVideoPlay, 0);


  // Set up and start the detector
  detector.detectAllExpressions();
  detector.detectAllEmotions();
  detector.detectAllAppearance();

  detector.addEventListener("onInitializeSuccess", function() {
    document.getElementById('video').play();
    startTimestamp = (new Date()).getTime() / 1000;
    heartbeat = setInterval(analyzeVideoFrame, 1000);
  });
  detector.addEventListener("onInitializeFailure", function() {
    console.error("Affectiva failed to initialize.");
  });

  detector.addEventListener("onImageResultsSuccess", onImageResultsSuccess);
  detector.addEventListener("onImageResultsFailure", onImageResultsFailure);

  detector.start();
});