从 CameraSource 裁剪面部
Crop face from the CameraSource
我正在实施 google-vision face tracker 中给出的示例。
MyFaceDetector
class:
public class MyFaceDetector extends Detector<Face> {
private Detector<Face> mDelegate;
MyFaceDetector(Detector<Face> delegate) {
mDelegate = delegate;
}
public SparseArray<Face> detect(Frame frame) {
return mDelegate.detect(frame);
}
public boolean isOperational() {
return mDelegate.isOperational();
}
public boolean setFocus(int id) {
return mDelegate.setFocus(id);
}
}
FaceTrackerActivity
class:
private void createCameraSource() {
imageView = (ImageView) findViewById(R.id.face);
FaceDetector faceDetector = new FaceDetector.Builder(this).build();
myFaceDetector = new MyFaceDetector(faceDetector);
myFaceDetector.setProcessor(new MultiProcessor.Builder<>(new GraphicFaceTrackerFactory())
.build());
mCameraSource = new CameraSource.Builder(this, myFaceDetector)
.setRequestedPreviewSize(640, 480)
.setFacing(CameraSource.CAMERA_FACING_FRONT)
.setRequestedFps(60.0f)
.build();
if (!myFaceDetector.isOperational()) {
Log.w(TAG, "Face detector dependencies are not yet available.");
}
}
我需要裁剪脸部并将其设置为 ImageView
。我无法在这里实现我的自定义 Frame
。 frame.getBitmap()
在 detect(Frame frame)
中总是 returns null
。我该如何实现?
如果帧最初是从位图创建的,frame.getBitmap() 只会 return 一个值。 CameraSource 以 ByteBuffers 而不是位图的形式提供图像信息,因此这是可用的图像信息。
frame.getGrayscaleImageData() 将 return 图像数据。
frame.getMetadata() 将 return 元数据,例如图像尺寸和图像格式。
这进入 CameraSource.java
Frame outputFrame = new Frame.Builder()
.setImageData(mPendingFrameData, mPreviewSize.getWidth(),
mPreviewSize.getHeight(), ImageFormat.NV21)
.setId(mPendingFrameId)
.setTimestampMillis(mPendingTimeMillis)
.setRotation(mRotation)
.build();
int w = outputFrame.getMetadata().getWidth();
int h = outputFrame.getMetadata().getHeight();
SparseArray<Face> detectedFaces = mDetector.detect(outputFrame);
Bitmap bitmap = Bitmap.createBitmap(w, h, Bitmap.Config.ARGB_8888);
if (detectedFaces.size() > 0) {
ByteBuffer byteBufferRaw = outputFrame.getGrayscaleImageData();
byte[] byteBuffer = byteBufferRaw.array();
YuvImage yuvimage = new YuvImage(byteBuffer, ImageFormat.NV21, w, h, null);
Face face = detectedFaces.valueAt(0);
int left = (int) face.getPosition().x;
int top = (int) face.getPosition().y;
int right = (int) face.getWidth() + left;
int bottom = (int) face.getHeight() + top;
ByteArrayOutputStream baos = new ByteArrayOutputStream();
yuvimage.compressToJpeg(new Rect(left, top, right, bottom), 80, baos);
byte[] jpegArray = baos.toByteArray();
bitmap = BitmapFactory.decodeByteArray(jpegArray, 0, jpegArray.length);
}
((FaceTrackerActivity) mContext).setBitmapToImageView(bitmap);
我正在实施 google-vision face tracker 中给出的示例。
MyFaceDetector
class:
public class MyFaceDetector extends Detector<Face> {
private Detector<Face> mDelegate;
MyFaceDetector(Detector<Face> delegate) {
mDelegate = delegate;
}
public SparseArray<Face> detect(Frame frame) {
return mDelegate.detect(frame);
}
public boolean isOperational() {
return mDelegate.isOperational();
}
public boolean setFocus(int id) {
return mDelegate.setFocus(id);
}
}
FaceTrackerActivity
class:
private void createCameraSource() {
imageView = (ImageView) findViewById(R.id.face);
FaceDetector faceDetector = new FaceDetector.Builder(this).build();
myFaceDetector = new MyFaceDetector(faceDetector);
myFaceDetector.setProcessor(new MultiProcessor.Builder<>(new GraphicFaceTrackerFactory())
.build());
mCameraSource = new CameraSource.Builder(this, myFaceDetector)
.setRequestedPreviewSize(640, 480)
.setFacing(CameraSource.CAMERA_FACING_FRONT)
.setRequestedFps(60.0f)
.build();
if (!myFaceDetector.isOperational()) {
Log.w(TAG, "Face detector dependencies are not yet available.");
}
}
我需要裁剪脸部并将其设置为 ImageView
。我无法在这里实现我的自定义 Frame
。 frame.getBitmap()
在 detect(Frame frame)
中总是 returns null
。我该如何实现?
frame.getBitmap() 只会 return 一个值。 CameraSource 以 ByteBuffers 而不是位图的形式提供图像信息,因此这是可用的图像信息。
frame.getGrayscaleImageData() 将 return 图像数据。
frame.getMetadata() 将 return 元数据,例如图像尺寸和图像格式。
这进入 CameraSource.java
Frame outputFrame = new Frame.Builder()
.setImageData(mPendingFrameData, mPreviewSize.getWidth(),
mPreviewSize.getHeight(), ImageFormat.NV21)
.setId(mPendingFrameId)
.setTimestampMillis(mPendingTimeMillis)
.setRotation(mRotation)
.build();
int w = outputFrame.getMetadata().getWidth();
int h = outputFrame.getMetadata().getHeight();
SparseArray<Face> detectedFaces = mDetector.detect(outputFrame);
Bitmap bitmap = Bitmap.createBitmap(w, h, Bitmap.Config.ARGB_8888);
if (detectedFaces.size() > 0) {
ByteBuffer byteBufferRaw = outputFrame.getGrayscaleImageData();
byte[] byteBuffer = byteBufferRaw.array();
YuvImage yuvimage = new YuvImage(byteBuffer, ImageFormat.NV21, w, h, null);
Face face = detectedFaces.valueAt(0);
int left = (int) face.getPosition().x;
int top = (int) face.getPosition().y;
int right = (int) face.getWidth() + left;
int bottom = (int) face.getHeight() + top;
ByteArrayOutputStream baos = new ByteArrayOutputStream();
yuvimage.compressToJpeg(new Rect(left, top, right, bottom), 80, baos);
byte[] jpegArray = baos.toByteArray();
bitmap = BitmapFactory.decodeByteArray(jpegArray, 0, jpegArray.length);
}
((FaceTrackerActivity) mContext).setBitmapToImageView(bitmap);