为什么我的图形叠加层没有出现在输入图像上?
Why isn't my graphic overlay appearing on input image?
我的应用程序使用 CameraX 和 Google ML Kit,用 Java 编写。该应用程序的目的是通过实时相机预览来检测 objects。我使用标题为 "Detect and track objects with ML Kit on Android"(基本模型选项)的指南实现了 ML Kit,以检测应用程序中连续帧中的 objects。尽管创建了一个图形覆盖来绘制边界框,但我很困惑为什么在我的设备上启动应用程序时它没有出现在我的用户界面上。这是代码;
MainActivity.java
public class MainActivity extends AppCompatActivity {
AlertDialog alertDialog;
private ListenableFuture<ProcessCameraProvider> cameraProviderFuture;
private class YourAnalyzer implements ImageAnalysis.Analyzer {
@Override
@ExperimentalGetImage
@SuppressLint("UnsafeExperimentalUsageError")
public void analyze(ImageProxy imageProxy) {
Image mediaImage = imageProxy.getImage();
if (mediaImage != null) {
//Log.d("TAG", "mediaImage is throwing null");
InputImage image =
InputImage.fromMediaImage(mediaImage, imageProxy.getImageInfo().getRotationDegrees());
//Pass image to an ML Kit Vision API
//...
ObjectDetectorOptions options =
new ObjectDetectorOptions.Builder()
.setDetectorMode(ObjectDetectorOptions.STREAM_MODE)
.enableClassification() // Optional
.build();
ObjectDetector objectDetector = ObjectDetection.getClient(options);
objectDetector.process(image)
.addOnSuccessListener(detectedObjects -> {
getObjectResults(detectedObjects);
Log.d("TAG", "onSuccess" + detectedObjects.size());
for (DetectedObject detectedObject : detectedObjects) {
Rect boundingBox = detectedObject.getBoundingBox();
Integer trackingId = detectedObject.getTrackingId();
for (DetectedObject.Label label : detectedObject.getLabels()) {
String text = label.getText();
int index = label.getIndex();
float confidence = label.getConfidence();
}
}
})
.addOnFailureListener(e -> Log.e("TAG", e.getLocalizedMessage()))
.addOnCompleteListener(result -> imageProxy.close());
}
}
}
private void getObjectResults(List<DetectedObject> detectedObjects) {
int count=0;
for (DetectedObject object:detectedObjects)
{
Rect rect = object.getBoundingBox();
String text = "Undefined";
DrawGraphic drawGraphic = new DrawGraphic(this, rect, text);
count = count+1;
}
alertDialog.dismiss();
}
@Override
protected void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
cameraProviderFuture = ProcessCameraProvider.getInstance(this);
PreviewView previewView = findViewById(R.id.previewView);
alertDialog = new SpotsDialog.Builder()
.setContext(this)
.setMessage("Currently processing...")
.setCancelable(false)
.build();
cameraProviderFuture.addListener(() -> {
try {
ProcessCameraProvider cameraProvider = cameraProviderFuture.get();
bindPreview(cameraProvider);
} catch (ExecutionException | InterruptedException e) {
// No errors need to be handled for this Future.
// This should never be reached.
}
}, ContextCompat.getMainExecutor(this));
}
void bindPreview(@NonNull ProcessCameraProvider cameraProvider) {
PreviewView previewView = findViewById(R.id.previewView);
Preview preview = new Preview.Builder()
.build();
CameraSelector cameraSelector = new CameraSelector.Builder()
.requireLensFacing(CameraSelector.LENS_FACING_BACK)
.build();
preview.setSurfaceProvider(previewView.getSurfaceProvider());
ImageAnalysis imageAnalysis =
new ImageAnalysis.Builder()
.setTargetResolution(new Size(1280,720))
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.build();
imageAnalysis.setAnalyzer(ContextCompat.getMainExecutor(this), new YourAnalyzer());
Camera camera = cameraProvider.bindToLifecycle((LifecycleOwner)this, cameraSelector, preview, imageAnalysis);
}
}
DrawGraphic.java
public class DrawGraphic extends View {
Paint borderPaint, textPaint;
Rect rect;
String text;
public DrawGraphic(Context context, Rect rect, String text) {
super(context);
this.rect = rect;
this.text = text;
borderPaint = new Paint();
borderPaint.setColor(Color.WHITE);
borderPaint.setStrokeWidth(10f);
borderPaint.setStyle(Paint.Style.STROKE);
textPaint = new Paint();
textPaint.setColor(Color.WHITE);
textPaint.setStrokeWidth(50f);
textPaint.setTextSize(32f);
textPaint.setStyle(Paint.Style.FILL);
}
@Override
protected void onDraw(Canvas canvas) {
super.onDraw(canvas);
canvas.drawText(text, rect.centerX(), rect.centerY(), textPaint);
canvas.drawRect(rect.left, rect.top, rect.right, rect.bottom, borderPaint);
}
}
我代码的最后objective是我希望objects像这样被实时检测到;
将根据要求提供补充此问题所需的任何信息。
使用 MLKit api 返回的边界框和标签,您需要在应用程序中绘制叠加层才能显示 UI。这个mlkit vision_quickstart graphic可以作为参考
我能够通过启用 viewBinding
with the help of this Android documentation 来解决这个问题,因为它允许人们更轻松地编写与视图交互的代码。我在我的 Gradle 文件中做了这样的更改;
android {
buildFeatures {
viewBinding true
}
}
然后,我将这些更改添加到 activity_main.xml;
<RelativeLayout
xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".MainActivity">
<FrameLayout
android:id="@+id/parentlayout"
android:layout_width="match_parent"
android:layout_height="match_parent">
<androidx.camera.view.PreviewView
android:id="@+id/previewView"
android:layout_height="match_parent"
android:layout_width="match_parent"/>
</FrameLayout>
</RelativeLayout>
然后我将注意力转向MainActivity.java,将ActivityMainBinding binding;
声明为class中的一个属性,然后使用this Github user's结果处理方法重新格式化以下方法作为像这样的参考;
private void getObjectResults(List<DetectedObject> detectedObjects) {
for (DetectedObject object : detectedObjects) {
if (binding.parentlayout.getChildCount() > 1) {
binding.parentlayout.removeViewAt(1);
}
Rect rect = object.getBoundingBox();
String text = "Undefined";
if (object.getLabels().size() != 0) {
text = object.getLabels().get(0).getText();
}
DrawGraphic drawGraphic = new DrawGraphic(this, rect, text);
binding.parentlayout.addView(drawGraphic);
}
/*alertDialog.dismiss();*/
}
我的应用程序使用 CameraX 和 Google ML Kit,用 Java 编写。该应用程序的目的是通过实时相机预览来检测 objects。我使用标题为 "Detect and track objects with ML Kit on Android"(基本模型选项)的指南实现了 ML Kit,以检测应用程序中连续帧中的 objects。尽管创建了一个图形覆盖来绘制边界框,但我很困惑为什么在我的设备上启动应用程序时它没有出现在我的用户界面上。这是代码;
MainActivity.java
public class MainActivity extends AppCompatActivity {
AlertDialog alertDialog;
private ListenableFuture<ProcessCameraProvider> cameraProviderFuture;
private class YourAnalyzer implements ImageAnalysis.Analyzer {
@Override
@ExperimentalGetImage
@SuppressLint("UnsafeExperimentalUsageError")
public void analyze(ImageProxy imageProxy) {
Image mediaImage = imageProxy.getImage();
if (mediaImage != null) {
//Log.d("TAG", "mediaImage is throwing null");
InputImage image =
InputImage.fromMediaImage(mediaImage, imageProxy.getImageInfo().getRotationDegrees());
//Pass image to an ML Kit Vision API
//...
ObjectDetectorOptions options =
new ObjectDetectorOptions.Builder()
.setDetectorMode(ObjectDetectorOptions.STREAM_MODE)
.enableClassification() // Optional
.build();
ObjectDetector objectDetector = ObjectDetection.getClient(options);
objectDetector.process(image)
.addOnSuccessListener(detectedObjects -> {
getObjectResults(detectedObjects);
Log.d("TAG", "onSuccess" + detectedObjects.size());
for (DetectedObject detectedObject : detectedObjects) {
Rect boundingBox = detectedObject.getBoundingBox();
Integer trackingId = detectedObject.getTrackingId();
for (DetectedObject.Label label : detectedObject.getLabels()) {
String text = label.getText();
int index = label.getIndex();
float confidence = label.getConfidence();
}
}
})
.addOnFailureListener(e -> Log.e("TAG", e.getLocalizedMessage()))
.addOnCompleteListener(result -> imageProxy.close());
}
}
}
private void getObjectResults(List<DetectedObject> detectedObjects) {
int count=0;
for (DetectedObject object:detectedObjects)
{
Rect rect = object.getBoundingBox();
String text = "Undefined";
DrawGraphic drawGraphic = new DrawGraphic(this, rect, text);
count = count+1;
}
alertDialog.dismiss();
}
@Override
protected void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
cameraProviderFuture = ProcessCameraProvider.getInstance(this);
PreviewView previewView = findViewById(R.id.previewView);
alertDialog = new SpotsDialog.Builder()
.setContext(this)
.setMessage("Currently processing...")
.setCancelable(false)
.build();
cameraProviderFuture.addListener(() -> {
try {
ProcessCameraProvider cameraProvider = cameraProviderFuture.get();
bindPreview(cameraProvider);
} catch (ExecutionException | InterruptedException e) {
// No errors need to be handled for this Future.
// This should never be reached.
}
}, ContextCompat.getMainExecutor(this));
}
void bindPreview(@NonNull ProcessCameraProvider cameraProvider) {
PreviewView previewView = findViewById(R.id.previewView);
Preview preview = new Preview.Builder()
.build();
CameraSelector cameraSelector = new CameraSelector.Builder()
.requireLensFacing(CameraSelector.LENS_FACING_BACK)
.build();
preview.setSurfaceProvider(previewView.getSurfaceProvider());
ImageAnalysis imageAnalysis =
new ImageAnalysis.Builder()
.setTargetResolution(new Size(1280,720))
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.build();
imageAnalysis.setAnalyzer(ContextCompat.getMainExecutor(this), new YourAnalyzer());
Camera camera = cameraProvider.bindToLifecycle((LifecycleOwner)this, cameraSelector, preview, imageAnalysis);
}
}
DrawGraphic.java
public class DrawGraphic extends View {
Paint borderPaint, textPaint;
Rect rect;
String text;
public DrawGraphic(Context context, Rect rect, String text) {
super(context);
this.rect = rect;
this.text = text;
borderPaint = new Paint();
borderPaint.setColor(Color.WHITE);
borderPaint.setStrokeWidth(10f);
borderPaint.setStyle(Paint.Style.STROKE);
textPaint = new Paint();
textPaint.setColor(Color.WHITE);
textPaint.setStrokeWidth(50f);
textPaint.setTextSize(32f);
textPaint.setStyle(Paint.Style.FILL);
}
@Override
protected void onDraw(Canvas canvas) {
super.onDraw(canvas);
canvas.drawText(text, rect.centerX(), rect.centerY(), textPaint);
canvas.drawRect(rect.left, rect.top, rect.right, rect.bottom, borderPaint);
}
}
我代码的最后objective是我希望objects像这样被实时检测到;
将根据要求提供补充此问题所需的任何信息。
使用 MLKit api 返回的边界框和标签,您需要在应用程序中绘制叠加层才能显示 UI。这个mlkit vision_quickstart graphic可以作为参考
我能够通过启用 viewBinding
with the help of this Android documentation 来解决这个问题,因为它允许人们更轻松地编写与视图交互的代码。我在我的 Gradle 文件中做了这样的更改;
android {
buildFeatures {
viewBinding true
}
}
然后,我将这些更改添加到 activity_main.xml;
<RelativeLayout
xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".MainActivity">
<FrameLayout
android:id="@+id/parentlayout"
android:layout_width="match_parent"
android:layout_height="match_parent">
<androidx.camera.view.PreviewView
android:id="@+id/previewView"
android:layout_height="match_parent"
android:layout_width="match_parent"/>
</FrameLayout>
</RelativeLayout>
然后我将注意力转向MainActivity.java,将ActivityMainBinding binding;
声明为class中的一个属性,然后使用this Github user's结果处理方法重新格式化以下方法作为像这样的参考;
private void getObjectResults(List<DetectedObject> detectedObjects) {
for (DetectedObject object : detectedObjects) {
if (binding.parentlayout.getChildCount() > 1) {
binding.parentlayout.removeViewAt(1);
}
Rect rect = object.getBoundingBox();
String text = "Undefined";
if (object.getLabels().size() != 0) {
text = object.getLabels().get(0).getText();
}
DrawGraphic drawGraphic = new DrawGraphic(this, rect, text);
binding.parentlayout.addView(drawGraphic);
}
/*alertDialog.dismiss();*/
}