如何在 Temi Robot 上旋转活动物体检测屏幕?
How do I rotate the live object detection screen on Temi Robot?
我目前正在使用 android-demo-app/ObjectDetection/ On Temi Robot,预加载的图像到目前为止可以正常工作,但是当我按“实时”转到实时对象检测屏幕时,它向右旋转了 90 度。
Temi 机器人在屏幕的同一侧只有一个前置摄像头。
我试过更改 textureView.setTransform()
imageAnalysisConfig.Builder().setTargetRotation()
imageAnalysis.setTargetRotation()
但无济于事
也尝试将 activity
标签下的 AndroidManifest.xml
screenOrientation
更改为 fullSenor
或 Landscape
但没有任何改变。
我一直在 Android Developer CameraX 页面上查找答案 first link second link 但我找不到任何答案。也许我不够聪明,无法在这里找到解决方案。
非常感谢任何帮助!
AbstactCameraXActivity.java
private void setupCameraX() {
final TextureView textureView = getCameraPreviewTextureView();
final PreviewConfig previewConfig = new PreviewConfig.Builder().build();
final Preview preview = new Preview(previewConfig);
// Matrix m = new Matrix();
// m.postRotate(180);
// textureView.setTransform(m); //not working
preview.setOnPreviewOutputUpdateListener(output -> textureView.setSurfaceTexture(output.getSurfaceTexture()));
final var imageAnalysisConfig =
new ImageAnalysisConfig.Builder()
.setTargetResolution(new Size(500, 500))
.setCallbackHandler(mBackgroundHandler)
.setImageReaderMode(ImageAnalysis.ImageReaderMode.ACQUIRE_LATEST_IMAGE)
//.setTargetRotation(Surface.ROTATION_0) // not working
.build();
imageAnalysis = new ImageAnalysis(imageAnalysisConfig);
imageAnalysis.setAnalyzer((image, rotationDegrees) -> {
if (SystemClock.elapsedRealtime() - mLastAnalysisResultTime < 500) {
return;
}
final R2 result = analyzeImage(image, rotationDegrees);
if (result != null) {
mLastAnalysisResultTime = SystemClock.elapsedRealtime();
runOnUiThread(() -> applyToUiAnalyzeImageResult(result));
}
});
//imageAnalysis.setTargetRotation(Surface.ROTATION_180); // not working
CameraX.bindToLifecycle(this, preview, imageAnalysis);
}
ObjectDetectionActivity.java
@Override
@WorkerThread
@Nullable
protected AnalysisResult analyzeImage(ImageProxy image, int rotationDegrees) {
try {
if (mModule == null) {
mModule = LiteModuleLoader.load(MainActivity.assetFilePath(getApplicationContext(), "yolov5s.torchscript.ptl"));
}
} catch (IOException e) {
Log.e("Object Detection", "Error reading assets", e);
return null;
}
Bitmap bitmap = imgToBitmap(Objects.requireNonNull(image.getImage()));
Matrix matrix = new Matrix();
matrix.postRotate(90.0f);
bitmap = Bitmap.createBitmap(bitmap, 0, 0, bitmap.getWidth(), bitmap.getHeight(), matrix, true);
Bitmap resizedBitmap = Bitmap.createScaledBitmap(bitmap, PrePostProcessor.mInputWidth, PrePostProcessor.mInputHeight, true);
final Tensor inputTensor = TensorImageUtils.bitmapToFloat32Tensor(resizedBitmap, PrePostProcessor.NO_MEAN_RGB, PrePostProcessor.NO_STD_RGB);
IValue[] outputTuple = mModule.forward(IValue.from(inputTensor)).toTuple();
final Tensor outputTensor = outputTuple[0].toTensor();
final float[] outputs = outputTensor.getDataAsFloatArray();
float imgScaleX = (float)bitmap.getWidth() / PrePostProcessor.mInputWidth;
float imgScaleY = (float)bitmap.getHeight() / PrePostProcessor.mInputHeight;
float ivScaleX = (float)mResultView.getWidth() / bitmap.getWidth();
float ivScaleY = (float)mResultView.getHeight() / bitmap.getHeight();
final ArrayList<Result> results = PrePostProcessor.outputsToNMSPredictions(outputs, imgScaleX, imgScaleY, ivScaleX, ivScaleY, 0, 0);
return new AnalysisResult(results);
}
AndroidManifest.xml
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="org.pytorch.demo.objectdetection">
<uses-permission android:name="android.permission.READ_EXTERNAL_STORAGE" />
<uses-permission android:name="android.permission.CAMERA" />
<application
android:allowBackup="true"
android:icon="@mipmap/ic_launcher"
android:label="@string/app_name"
android:roundIcon="@mipmap/ic_launcher_round"
android:supportsRtl="true"
android:theme="@style/AppTheme">
<activity android:name=".MainActivity"
android:configChanges="orientation"
android:screenOrientation="fullSensor">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
<activity
android:name=".ObjectDetectionActivity"
android:configChanges="orientation"
android:screenOrientation="fullSensor">
</activity>
</application>
</manifest>
更新
我想我现在可能知道问题所在了。在 ObjectDetectionActivity 的 setupCameraX() 方法中,我应该操纵 textureView 并且操纵矩阵变换的枢轴是我需要的。我开始在屏幕上看到一些 cameraView。但是我不知道这个参数中需要的 x 和 y 是什么...
final TextureView textureView = getCameraPreviewTextureView();
final PreviewConfig previewConfig = new PreviewConfig.Builder().build();
final Preview preview = new Preview(previewConfig);
Matrix m = new Matrix();
m.postRotate(180,x,y);//potential solution here.
textureView.setTransform(m); //not working
preview.setOnPreviewOutputUpdateListener(output -> textureView.setSurfaceTexture(output.getSurfaceTexture()));
我已将 cameraX 版本从 1.0.0-alpha5 更改为 1.0.0
private void setupCameraX() {
ListenableFuture<ProcessCameraProvider> cameraProviderFuture =
ProcessCameraProvider.getInstance(this);
cameraProviderFuture.addListener(() -> {
try {
ProcessCameraProvider cameraProvider = cameraProviderFuture.get();
PreviewView previewView = getCameraPreviewTextureView();
final Preview preview = new Preview.Builder()
.setTargetRotation(Surface.ROTATION_270)//working nicely
.build();
//TODO: Check if result_view can render over preview_view
CameraSelector cameraSelector = new CameraSelector
.Builder()
.requireLensFacing(CameraSelector.LENS_FACING_FRONT)
.build();
preview.setSurfaceProvider(previewView.getSurfaceProvider());
executor = Executors.newSingleThreadExecutor();
imageAnalysis = new ImageAnalysis.Builder()
.setTargetResolution(new Size(500, 500))
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.build();
imageAnalysis.setAnalyzer(executor,
image -> {
Log.d("image analyzer","Entered Analyse method");
if (SystemClock.elapsedRealtime() - mLastAnalysisResultTime < 500) {
return;
}
final T result = analyzeImage(image, 90);
if (result != null) {
mLastAnalysisResultTime = SystemClock.elapsedRealtime();
runOnUiThread(() -> applyToUiAnalyzeImageResult(result));
}
});
camera = cameraProvider.bindToLifecycle(
this,
cameraSelector,
imageAnalysis,
preview);
} catch (InterruptedException | ExecutionException e) {
new AlertDialog
.Builder(this)
.setTitle("Camera setup error")
.setMessage(e.getMessage())
.setPositiveButton("Ok",
(dialog, which) -> {
})
.show();
}
}, ContextCompat.getMainExecutor(this));
注意:getCameraPreviewTextureView() 是膨胀 ViewStub 的方法。我只是在关注一个 pytorch android 示例。
@Override
protected PreviewView getCameraPreviewTextureView() {
mResultView = findViewById(R.id.resultView);
//
return ((ViewStub) findViewById(R.id.preview_view_stub))
.inflate()
.findViewById(R.id.preview_view);
}
我目前正在使用 android-demo-app/ObjectDetection/ On Temi Robot,预加载的图像到目前为止可以正常工作,但是当我按“实时”转到实时对象检测屏幕时,它向右旋转了 90 度。
Temi 机器人在屏幕的同一侧只有一个前置摄像头。
我试过更改 textureView.setTransform()
imageAnalysisConfig.Builder().setTargetRotation()
imageAnalysis.setTargetRotation()
但无济于事
也尝试将 activity
标签下的 AndroidManifest.xml
screenOrientation
更改为 fullSenor
或 Landscape
但没有任何改变。
我一直在 Android Developer CameraX 页面上查找答案 first link second link 但我找不到任何答案。也许我不够聪明,无法在这里找到解决方案。
非常感谢任何帮助!
AbstactCameraXActivity.java
private void setupCameraX() {
final TextureView textureView = getCameraPreviewTextureView();
final PreviewConfig previewConfig = new PreviewConfig.Builder().build();
final Preview preview = new Preview(previewConfig);
// Matrix m = new Matrix();
// m.postRotate(180);
// textureView.setTransform(m); //not working
preview.setOnPreviewOutputUpdateListener(output -> textureView.setSurfaceTexture(output.getSurfaceTexture()));
final var imageAnalysisConfig =
new ImageAnalysisConfig.Builder()
.setTargetResolution(new Size(500, 500))
.setCallbackHandler(mBackgroundHandler)
.setImageReaderMode(ImageAnalysis.ImageReaderMode.ACQUIRE_LATEST_IMAGE)
//.setTargetRotation(Surface.ROTATION_0) // not working
.build();
imageAnalysis = new ImageAnalysis(imageAnalysisConfig);
imageAnalysis.setAnalyzer((image, rotationDegrees) -> {
if (SystemClock.elapsedRealtime() - mLastAnalysisResultTime < 500) {
return;
}
final R2 result = analyzeImage(image, rotationDegrees);
if (result != null) {
mLastAnalysisResultTime = SystemClock.elapsedRealtime();
runOnUiThread(() -> applyToUiAnalyzeImageResult(result));
}
});
//imageAnalysis.setTargetRotation(Surface.ROTATION_180); // not working
CameraX.bindToLifecycle(this, preview, imageAnalysis);
}
ObjectDetectionActivity.java
@Override
@WorkerThread
@Nullable
protected AnalysisResult analyzeImage(ImageProxy image, int rotationDegrees) {
try {
if (mModule == null) {
mModule = LiteModuleLoader.load(MainActivity.assetFilePath(getApplicationContext(), "yolov5s.torchscript.ptl"));
}
} catch (IOException e) {
Log.e("Object Detection", "Error reading assets", e);
return null;
}
Bitmap bitmap = imgToBitmap(Objects.requireNonNull(image.getImage()));
Matrix matrix = new Matrix();
matrix.postRotate(90.0f);
bitmap = Bitmap.createBitmap(bitmap, 0, 0, bitmap.getWidth(), bitmap.getHeight(), matrix, true);
Bitmap resizedBitmap = Bitmap.createScaledBitmap(bitmap, PrePostProcessor.mInputWidth, PrePostProcessor.mInputHeight, true);
final Tensor inputTensor = TensorImageUtils.bitmapToFloat32Tensor(resizedBitmap, PrePostProcessor.NO_MEAN_RGB, PrePostProcessor.NO_STD_RGB);
IValue[] outputTuple = mModule.forward(IValue.from(inputTensor)).toTuple();
final Tensor outputTensor = outputTuple[0].toTensor();
final float[] outputs = outputTensor.getDataAsFloatArray();
float imgScaleX = (float)bitmap.getWidth() / PrePostProcessor.mInputWidth;
float imgScaleY = (float)bitmap.getHeight() / PrePostProcessor.mInputHeight;
float ivScaleX = (float)mResultView.getWidth() / bitmap.getWidth();
float ivScaleY = (float)mResultView.getHeight() / bitmap.getHeight();
final ArrayList<Result> results = PrePostProcessor.outputsToNMSPredictions(outputs, imgScaleX, imgScaleY, ivScaleX, ivScaleY, 0, 0);
return new AnalysisResult(results);
}
AndroidManifest.xml
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="org.pytorch.demo.objectdetection">
<uses-permission android:name="android.permission.READ_EXTERNAL_STORAGE" />
<uses-permission android:name="android.permission.CAMERA" />
<application
android:allowBackup="true"
android:icon="@mipmap/ic_launcher"
android:label="@string/app_name"
android:roundIcon="@mipmap/ic_launcher_round"
android:supportsRtl="true"
android:theme="@style/AppTheme">
<activity android:name=".MainActivity"
android:configChanges="orientation"
android:screenOrientation="fullSensor">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
<activity
android:name=".ObjectDetectionActivity"
android:configChanges="orientation"
android:screenOrientation="fullSensor">
</activity>
</application>
</manifest>
更新
我想我现在可能知道问题所在了。在 ObjectDetectionActivity 的 setupCameraX() 方法中,我应该操纵 textureView 并且操纵矩阵变换的枢轴是我需要的。我开始在屏幕上看到一些 cameraView。但是我不知道这个参数中需要的 x 和 y 是什么...
final TextureView textureView = getCameraPreviewTextureView();
final PreviewConfig previewConfig = new PreviewConfig.Builder().build();
final Preview preview = new Preview(previewConfig);
Matrix m = new Matrix();
m.postRotate(180,x,y);//potential solution here.
textureView.setTransform(m); //not working
preview.setOnPreviewOutputUpdateListener(output -> textureView.setSurfaceTexture(output.getSurfaceTexture()));
我已将 cameraX 版本从 1.0.0-alpha5 更改为 1.0.0
private void setupCameraX() {
ListenableFuture<ProcessCameraProvider> cameraProviderFuture =
ProcessCameraProvider.getInstance(this);
cameraProviderFuture.addListener(() -> {
try {
ProcessCameraProvider cameraProvider = cameraProviderFuture.get();
PreviewView previewView = getCameraPreviewTextureView();
final Preview preview = new Preview.Builder()
.setTargetRotation(Surface.ROTATION_270)//working nicely
.build();
//TODO: Check if result_view can render over preview_view
CameraSelector cameraSelector = new CameraSelector
.Builder()
.requireLensFacing(CameraSelector.LENS_FACING_FRONT)
.build();
preview.setSurfaceProvider(previewView.getSurfaceProvider());
executor = Executors.newSingleThreadExecutor();
imageAnalysis = new ImageAnalysis.Builder()
.setTargetResolution(new Size(500, 500))
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.build();
imageAnalysis.setAnalyzer(executor,
image -> {
Log.d("image analyzer","Entered Analyse method");
if (SystemClock.elapsedRealtime() - mLastAnalysisResultTime < 500) {
return;
}
final T result = analyzeImage(image, 90);
if (result != null) {
mLastAnalysisResultTime = SystemClock.elapsedRealtime();
runOnUiThread(() -> applyToUiAnalyzeImageResult(result));
}
});
camera = cameraProvider.bindToLifecycle(
this,
cameraSelector,
imageAnalysis,
preview);
} catch (InterruptedException | ExecutionException e) {
new AlertDialog
.Builder(this)
.setTitle("Camera setup error")
.setMessage(e.getMessage())
.setPositiveButton("Ok",
(dialog, which) -> {
})
.show();
}
}, ContextCompat.getMainExecutor(this));
注意:getCameraPreviewTextureView() 是膨胀 ViewStub 的方法。我只是在关注一个 pytorch android 示例。
@Override
protected PreviewView getCameraPreviewTextureView() {
mResultView = findViewById(R.id.resultView);
//
return ((ViewStub) findViewById(R.id.preview_view_stub))
.inflate()
.findViewById(R.id.preview_view);
}