如何使用 google 视觉面部检测保存帧

How to save a frame using google vision face detect

我正在尝试按照 here 中列出的 google-vision face-tracker 样本进行操作。我想知道如何获取应用程序检测到的人脸数量以及如何将帧保存到 phone 内存中,是否可以从应用程序中获取?

是的,您可以使用以下代码:

import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.PointF;
import android.media.FaceDetector;
import android.util.SparseArray;

import com.google.android.gms.vision.Frame;
import com.piisoft.upecfacerecognition.utility.Image;

import java.io.File;
import java.util.List;




public class extractFacesFromImage {

    public  extractFacesFromImage(String imagePath ,  String OutPutPath , Context context){
        File folder = new File(OutPutPath);
        if(!folder.exists()){
            folder.mkdirs();
        }
        detectFacesInImage(imagePath , OutPutPath, context);
    }

    private  void detectFacesInImage(String imagePath ,  String OutPutPath){
        //ImageWindow[] imageWindow = null;
        Bitmap image =  Image.bitmapFromJpg(imagePath);
        FaceDetector.Face[] faces = detectFaces(image);
        for(FaceDetector.Face fs:faces)
        {
            if(fs == null){continue;}
            PointF midPoint=new PointF();
            fs.getMidPoint(midPoint);
            float eyeDistance=fs.eyesDistance();

            int left = (int)(midPoint.x - (float)(1.4 * eyeDistance));
            int top = (int)(midPoint.y - (float)(1.8 * eyeDistance));

            Bitmap bmFace = Bitmap.createBitmap(image, (int) left, (int) top, (int) (2.8 * eyeDistance), (int) (3.6 * eyeDistance));
            Bitmap bmp= bmFace.createBitmap(bmFace.getWidth(), bmFace.getHeight(), Bitmap.Config.ARGB_8888);
            Image.saveBitmapToJpg(bmp,OutPutPath, "face_" +  System.currentTimeMillis()  +".jpg" ,256,256);

            //ImageWindow Iw = new ImageWindow(fs.)

        }
        //return  imageWindow;
    }


    private  void detectFacesInImage(String imagePath ,  String OutPutPath, Context context){
        //ImageWindow[] imageWindow = null;
        Bitmap image =  Image.bitmapFromJpg(imagePath);
        if(image == null){
            return;
        }
        SparseArray<com.google.android.gms.vision.face.Face> faces = detectFaces(image,context);

        for (int i = 0; i < faces.size(); ++i) {
            com.google.android.gms.vision.face.Face face = faces.valueAt(i);
            if(face == null){continue;}
            try {

                Bitmap bmFace = Bitmap.createBitmap(image, (int) face.getPosition().x, (int) face.getPosition().y, (int) face.getWidth(), (int) face.getHeight());
                Image.saveBitmapToJpg(bmFace, OutPutPath, "face_" + System.currentTimeMillis() + ".jpg",256);
            }
            catch (Exception e){
                e.printStackTrace();

            }

        }

        new File(imagePath).delete();
    }




    private SparseArray<com.google.android.gms.vision.face.Face> detectFaces(Bitmap image , Context context) {

        int h = image.getHeight();
        int w = image.getWidth();
        int max = 10;
        Frame frame = new Frame.Builder().setBitmap(image).build();
        com.google.android.gms.vision.face.FaceDetector detector = new com.google.android.gms.vision.face.FaceDetector.Builder(context)
                .setTrackingEnabled(false)
                .setLandmarkType(com.google.android.gms.vision.face.FaceDetector.ALL_LANDMARKS)
                .build();
        SparseArray<com.google.android.gms.vision.face.Face> faces = detector.detect(frame);
        detector.release();
        return  faces;




    }

    private FaceDetector.Face[] detectFaces(Bitmap image ) {

        int h = image.getHeight();
        int w = image.getWidth();
        int max = 10;

        FaceDetector detector = new FaceDetector(w, h, max);
        FaceDetector.Face[] faces = new FaceDetector.Face[max];


        int facesFound = detector.findFaces(image, faces);
        return  faces;


    }


}