使用 Android Camera2 API 人脸检测和画圈
Face detection & draw circle using Android Camera2 API
目前我正在尝试将 Camera2.Face 转换为实际视图的矩形,以便在 Camera2 API 检测到的面部上绘制圆圈 API。
我可以通过以下代码将人脸数量及其数据放入回调中:
private CameraCaptureSession.CaptureCallback mCaptureCallback
= new CameraCaptureSession.CaptureCallback() {
private void process(CaptureResult result) {
Integer mode = result.get(CaptureResult.STATISTICS_FACE_DETECT_MODE);
Face [] faces = result.get(CaptureResult.STATISTICS_FACES);
if(faces != null && mode != null)
Log.e("tag", "faces : " + faces.length + " , mode : " + mode );
}
@Override
public void onCaptureProgressed(CameraCaptureSession session, CaptureRequest request, CaptureResult partialResult) {
process(partialResult);
}
@Override
public void onCaptureCompleted(CameraCaptureSession session, CaptureRequest request, TotalCaptureResult result) {
process(result);
}
}
到目前为止,我已尝试使用以下代码将 Face rect 转换为实际视图坐标(似乎不起作用):
/**
* Callback from the CameraCaptureSession.CaptureCallback
*/
@Override
public void onFaceDetection(Face[] faces) {
if (mCameraView != null) {
setFaceDetectionMatrix();
setFaceDetectionLayout(faces);
}
}
/**
* This method gets the scaling values of the face in matrix
*/
private void setFaceDetectionMatrix() {
// Face Detection Matrix
mFaceDetectionMatrix = new Matrix();
// Need mirror for front camera.
boolean mirror = mCameraView.getFacing() == CameraView.FACING_FRONT;
mFaceDetectionMatrix.setScale(mirror ? -1 : 1, 1);
mFaceDetectionMatrix.postRotate(mCameraDisplayOrientation);
Rect activeArraySizeRect = mCameraView.getCameraCharacteristics().get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE);
Log.i("Test", "activeArraySizeRect1: (" + activeArraySizeRect + ") -> " + activeArraySizeRect.width() + ", " + activeArraySizeRect.height());
Log.i("Test", "activeArraySizeRect2: " + cameraOverlayDrawingView.getWidth() + ", " + cameraOverlayDrawingView.getHeight());
float s1 = cameraOverlayDrawingView.getWidth() / activeArraySizeRect.width();
float s2 = cameraOverlayDrawingView.getHeight() / activeArraySizeRect.height();
mFaceDetectionMatrix.postScale(s1, s2);
mFaceDetectionMatrix.postTranslate(cameraOverlayDrawingView.getWidth() / 2, cameraOverlayDrawingView.getHeight() / 2);
}
/**
* This method set the matrix for translating rect
*/
private void setFaceDetectionLayout(Face[] faces) {
if (faces.length == 0) {
cameraOverlayDrawingView.setHaveFaces(false, null);
} else if (faces.length > 0) {
List<Rect> faceRects;
faceRects = new ArrayList<>();
for (int i = 0; i < faces.length; i++) {
Log.i("Test", "Activity face" + i + " bounds: " + faces[i].getBounds());
if (faces[i].getScore() > 50) {
int left = faces[i].getBounds().left;
int top = faces[i].getBounds().top;
int right = faces[i].getBounds().right;
int bottom = faces[i].getBounds().bottom;
Rect uRect = new Rect(left, top, right, bottom);
RectF rectF = new RectF(uRect);
mFaceDetectionMatrix.mapRect(rectF);
uRect.set((int) rectF.left, (int) rectF.top, (int) rectF.right, (int) rectF.bottom);
Log.i("Test", "Activity rect" + i + " bounds: " + uRect);
faceRects.add(uRect);
}
}
cameraOverlayDrawingView.setHaveFaces(true, faceRects);
}
}
新:
我已经管理了我所有的 phone 轮换。我猜 offsetDxDy 取决于我的布局,但如果我必须告诉你真相,我不知道为什么我将值设置为 100。它在我的华为 P9 上运行良好,我以经验的方式找到了它。我仍然没有尝试找出是否取决于我的 phone 或我的 XML 布局或两者。
无论如何,现在已经找到了矩阵,因此您可以调整它们以满足您的需要。
注意:我的setRotation
不是那么笼统,因为我没有在
上对其进行参数化
int orientationOffset = mCameraCharacteristics.get(CameraCharacteristics.SENSOR_ORIENTATION);
您可以尝试这样做,以便使用 SENSOR_ORIENTATION
的完整通用代码与本示例中的代码 270 不同。
因此此代码适用于 phone,硬件相机传感器的方向为 270。
华为P9有。
只是为了让您了解如何将旋转绑定到硬件传感器方向,这在我的 P9 上也能很好地工作(但我没有任何其他硬件可以测试)
if (mSwappedDimensions) {
// Display Rotation 0
mFaceDetectionMatrix.setRotate(orientationOffset);
mFaceDetectionMatrix.postScale(mirror ? -s1 : s1, s2);
mFaceDetectionMatrix.postTranslate(mPreviewSize.getHeight() + offsetDxDy, mPreviewSize.getWidth() + offsetDxDy);
} else {
// Display Rotation 90 e 270
if (displayRotation == Surface.ROTATION_90) {
mFaceDetectionMatrix.setRotate(orientationOffset + 90);
mFaceDetectionMatrix.postScale(mirror ? -s1 : s1, s2);
mFaceDetectionMatrix.postTranslate(mPreviewSize.getWidth() + offsetDxDy, -offsetDxDy);
} else if (displayRotation == Surface.ROTATION_270) {
mFaceDetectionMatrix.setRotate(orientationOffset + 270);
mFaceDetectionMatrix.postScale(mirror ? -s1 : s1, s2);
mFaceDetectionMatrix.postTranslate(-offsetDxDy, mPreviewSize.getHeight() + offsetDxDy);
}
}
这是我的最终代码(也可在 GitHub 上获得)
int orientationOffset = mCameraCharacteristics.get(CameraCharacteristics.SENSOR_ORIENTATION);
Rect activeArraySizeRect = mCameraCharacteristics.get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE);
// Face Detection Matrix
mFaceDetectionMatrix = new Matrix();
Log.i("Test", "activeArraySizeRect1: (" + activeArraySizeRect + ") -> " + activeArraySizeRect.width() + ", " + activeArraySizeRect.height());
Log.i("Test", "activeArraySizeRect2: " + mPreviewSize.getWidth() + ", " + mPreviewSize.getHeight());
float s1 = mPreviewSize.getWidth() / (float)activeArraySizeRect.width();
float s2 = mPreviewSize.getHeight() / (float)activeArraySizeRect.height();
//float s1 = mOverlayView.getWidth();
//float s2 = mOverlayView.getHeight();
boolean mirror = (facing == CameraCharacteristics.LENS_FACING_FRONT); // we always use front face camera
boolean weAreinPortrait = true;
int offsetDxDy = 100;
if (mSwappedDimensions) {
// Display Rotation 0
mFaceDetectionMatrix.setRotate(270);
mFaceDetectionMatrix.postScale(mirror ? -s1 : s1, s2);
mFaceDetectionMatrix.postTranslate(mPreviewSize.getHeight() + offsetDxDy, mPreviewSize.getWidth() + offsetDxDy);
} else {
// Display Rotation 90 e 270
if (displayRotation == Surface.ROTATION_90) {
mFaceDetectionMatrix.setRotate(0);
mFaceDetectionMatrix.postScale(mirror ? -s1 : s1, s2);
mFaceDetectionMatrix.postTranslate(mPreviewSize.getWidth() + offsetDxDy, -offsetDxDy);
} else if (displayRotation == Surface.ROTATION_270) {
mFaceDetectionMatrix.setRotate(180);
mFaceDetectionMatrix.postScale(mirror ? -s1 : s1, s2);
mFaceDetectionMatrix.postTranslate(-offsetDxDy, mPreviewSize.getHeight() + offsetDxDy);
}
}
This is the public github repo where you
can find the code:
https://github.com/shadowsheep1/android-camera2-api-face-recon. Hope it could
help you.
无论如何也给你一些理论,你正在做的是二维平面变换。我的意思是你有一个平面(HW 传感器),你必须在预览平面上重新映射该平面上的对象。
所以你必须照顾:
- 旋转:这取决于您的 HW 传感器旋转和 Phone 旋转。
- 镜像:水平镜像取决于你是否使用前置摄像头和垂直镜像 取决于 phone 旋转)。镜像是通过缩放矩阵中的“-”符号完成的。
- 平移:这取决于你的对象被旋转放置的位置(这也取决于你正在处理的旋转中心)和平移。所以你必须在你的预览中替换查看你的对象。
数学理论
前段时间我也在我的博客中写了一些技术 post 但它们是意大利语。
目前我正在尝试将 Camera2.Face 转换为实际视图的矩形,以便在 Camera2 API 检测到的面部上绘制圆圈 API。
我可以通过以下代码将人脸数量及其数据放入回调中:
private CameraCaptureSession.CaptureCallback mCaptureCallback
= new CameraCaptureSession.CaptureCallback() {
private void process(CaptureResult result) {
Integer mode = result.get(CaptureResult.STATISTICS_FACE_DETECT_MODE);
Face [] faces = result.get(CaptureResult.STATISTICS_FACES);
if(faces != null && mode != null)
Log.e("tag", "faces : " + faces.length + " , mode : " + mode );
}
@Override
public void onCaptureProgressed(CameraCaptureSession session, CaptureRequest request, CaptureResult partialResult) {
process(partialResult);
}
@Override
public void onCaptureCompleted(CameraCaptureSession session, CaptureRequest request, TotalCaptureResult result) {
process(result);
}
}
到目前为止,我已尝试使用以下代码将 Face rect 转换为实际视图坐标(似乎不起作用):
/**
* Callback from the CameraCaptureSession.CaptureCallback
*/
@Override
public void onFaceDetection(Face[] faces) {
if (mCameraView != null) {
setFaceDetectionMatrix();
setFaceDetectionLayout(faces);
}
}
/**
* This method gets the scaling values of the face in matrix
*/
private void setFaceDetectionMatrix() {
// Face Detection Matrix
mFaceDetectionMatrix = new Matrix();
// Need mirror for front camera.
boolean mirror = mCameraView.getFacing() == CameraView.FACING_FRONT;
mFaceDetectionMatrix.setScale(mirror ? -1 : 1, 1);
mFaceDetectionMatrix.postRotate(mCameraDisplayOrientation);
Rect activeArraySizeRect = mCameraView.getCameraCharacteristics().get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE);
Log.i("Test", "activeArraySizeRect1: (" + activeArraySizeRect + ") -> " + activeArraySizeRect.width() + ", " + activeArraySizeRect.height());
Log.i("Test", "activeArraySizeRect2: " + cameraOverlayDrawingView.getWidth() + ", " + cameraOverlayDrawingView.getHeight());
float s1 = cameraOverlayDrawingView.getWidth() / activeArraySizeRect.width();
float s2 = cameraOverlayDrawingView.getHeight() / activeArraySizeRect.height();
mFaceDetectionMatrix.postScale(s1, s2);
mFaceDetectionMatrix.postTranslate(cameraOverlayDrawingView.getWidth() / 2, cameraOverlayDrawingView.getHeight() / 2);
}
/**
* This method set the matrix for translating rect
*/
private void setFaceDetectionLayout(Face[] faces) {
if (faces.length == 0) {
cameraOverlayDrawingView.setHaveFaces(false, null);
} else if (faces.length > 0) {
List<Rect> faceRects;
faceRects = new ArrayList<>();
for (int i = 0; i < faces.length; i++) {
Log.i("Test", "Activity face" + i + " bounds: " + faces[i].getBounds());
if (faces[i].getScore() > 50) {
int left = faces[i].getBounds().left;
int top = faces[i].getBounds().top;
int right = faces[i].getBounds().right;
int bottom = faces[i].getBounds().bottom;
Rect uRect = new Rect(left, top, right, bottom);
RectF rectF = new RectF(uRect);
mFaceDetectionMatrix.mapRect(rectF);
uRect.set((int) rectF.left, (int) rectF.top, (int) rectF.right, (int) rectF.bottom);
Log.i("Test", "Activity rect" + i + " bounds: " + uRect);
faceRects.add(uRect);
}
}
cameraOverlayDrawingView.setHaveFaces(true, faceRects);
}
}
新: 我已经管理了我所有的 phone 轮换。我猜 offsetDxDy 取决于我的布局,但如果我必须告诉你真相,我不知道为什么我将值设置为 100。它在我的华为 P9 上运行良好,我以经验的方式找到了它。我仍然没有尝试找出是否取决于我的 phone 或我的 XML 布局或两者。
无论如何,现在已经找到了矩阵,因此您可以调整它们以满足您的需要。
注意:我的setRotation
不是那么笼统,因为我没有在
int orientationOffset = mCameraCharacteristics.get(CameraCharacteristics.SENSOR_ORIENTATION);
您可以尝试这样做,以便使用 SENSOR_ORIENTATION
的完整通用代码与本示例中的代码 270 不同。
因此此代码适用于 phone,硬件相机传感器的方向为 270。
华为P9有。
只是为了让您了解如何将旋转绑定到硬件传感器方向,这在我的 P9 上也能很好地工作(但我没有任何其他硬件可以测试)
if (mSwappedDimensions) {
// Display Rotation 0
mFaceDetectionMatrix.setRotate(orientationOffset);
mFaceDetectionMatrix.postScale(mirror ? -s1 : s1, s2);
mFaceDetectionMatrix.postTranslate(mPreviewSize.getHeight() + offsetDxDy, mPreviewSize.getWidth() + offsetDxDy);
} else {
// Display Rotation 90 e 270
if (displayRotation == Surface.ROTATION_90) {
mFaceDetectionMatrix.setRotate(orientationOffset + 90);
mFaceDetectionMatrix.postScale(mirror ? -s1 : s1, s2);
mFaceDetectionMatrix.postTranslate(mPreviewSize.getWidth() + offsetDxDy, -offsetDxDy);
} else if (displayRotation == Surface.ROTATION_270) {
mFaceDetectionMatrix.setRotate(orientationOffset + 270);
mFaceDetectionMatrix.postScale(mirror ? -s1 : s1, s2);
mFaceDetectionMatrix.postTranslate(-offsetDxDy, mPreviewSize.getHeight() + offsetDxDy);
}
}
这是我的最终代码(也可在 GitHub 上获得)
int orientationOffset = mCameraCharacteristics.get(CameraCharacteristics.SENSOR_ORIENTATION);
Rect activeArraySizeRect = mCameraCharacteristics.get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE);
// Face Detection Matrix
mFaceDetectionMatrix = new Matrix();
Log.i("Test", "activeArraySizeRect1: (" + activeArraySizeRect + ") -> " + activeArraySizeRect.width() + ", " + activeArraySizeRect.height());
Log.i("Test", "activeArraySizeRect2: " + mPreviewSize.getWidth() + ", " + mPreviewSize.getHeight());
float s1 = mPreviewSize.getWidth() / (float)activeArraySizeRect.width();
float s2 = mPreviewSize.getHeight() / (float)activeArraySizeRect.height();
//float s1 = mOverlayView.getWidth();
//float s2 = mOverlayView.getHeight();
boolean mirror = (facing == CameraCharacteristics.LENS_FACING_FRONT); // we always use front face camera
boolean weAreinPortrait = true;
int offsetDxDy = 100;
if (mSwappedDimensions) {
// Display Rotation 0
mFaceDetectionMatrix.setRotate(270);
mFaceDetectionMatrix.postScale(mirror ? -s1 : s1, s2);
mFaceDetectionMatrix.postTranslate(mPreviewSize.getHeight() + offsetDxDy, mPreviewSize.getWidth() + offsetDxDy);
} else {
// Display Rotation 90 e 270
if (displayRotation == Surface.ROTATION_90) {
mFaceDetectionMatrix.setRotate(0);
mFaceDetectionMatrix.postScale(mirror ? -s1 : s1, s2);
mFaceDetectionMatrix.postTranslate(mPreviewSize.getWidth() + offsetDxDy, -offsetDxDy);
} else if (displayRotation == Surface.ROTATION_270) {
mFaceDetectionMatrix.setRotate(180);
mFaceDetectionMatrix.postScale(mirror ? -s1 : s1, s2);
mFaceDetectionMatrix.postTranslate(-offsetDxDy, mPreviewSize.getHeight() + offsetDxDy);
}
}
This is the public github repo where you can find the code: https://github.com/shadowsheep1/android-camera2-api-face-recon. Hope it could help you.
无论如何也给你一些理论,你正在做的是二维平面变换。我的意思是你有一个平面(HW 传感器),你必须在预览平面上重新映射该平面上的对象。
所以你必须照顾:
- 旋转:这取决于您的 HW 传感器旋转和 Phone 旋转。
- 镜像:水平镜像取决于你是否使用前置摄像头和垂直镜像 取决于 phone 旋转)。镜像是通过缩放矩阵中的“-”符号完成的。
- 平移:这取决于你的对象被旋转放置的位置(这也取决于你正在处理的旋转中心)和平移。所以你必须在你的预览中替换查看你的对象。
数学理论
前段时间我也在我的博客中写了一些技术 post 但它们是意大利语。