无法检测人脸,return 矩形大小始终为 1x0
Can not detect faces, and return rectangles size always 1x0
我有以下 Android 代码:
class MainActivity : AppCompatActivity(), TtsSpeaker.Listener, PocketSphinx.Listener {
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
val ocvLoaded = OpenCVLoader.initDebug()
if (ocvLoaded) {
loadModel(this) // to be sure model in sot alled before initiation openCV
} else {
Log.d("openCV", "loader: ${OpenCVLoader.initDebug()}")
}
}
}
并使用下面的人脸检测class:
package hasan.tts_mobile
import android.app.Activity
import android.graphics.Bitmap
import org.opencv.core.Mat
import org.opencv.core.MatOfRect
import org.opencv.core.Size
import org.opencv.imgproc.Imgproc
import org.opencv.objdetect.CascadeClassifier
import java.io.File
import org.opencv.core.CvType
import android.opengl.ETC1.getWidth
import android.opengl.ETC1.getHeight
object FaceDetection {
private val faceModel = "haarcascade_frontalface_default.xml" //lateinit var faceModel: String // = "haarcascade_frontalface_default.xml"
private lateinit var faceCascade: CascadeClassifier
fun loadModel(activity: Activity) {
println("started loading the model")
faceCascade = CascadeClassifier(File(activity.filesDir, "das").apply {
writeBytes(activity.assets.open(faceModel).readBytes())
}.path)
println("completed loading the model")
tts!!.say("I'm 100% ready!")
}
fun detectFaces(activity: Activity, image: Bitmap?): Long {
// bitmap
val matImage = Mat(image!!.height, image.width, CvType.CV_8UC1)
val bmpImage = image.copy(Bitmap.Config.ARGB_8888, true)
Utils.bitmapToMat(bmpImage, matImage)
val rectangles = MatOfRect() //RectVector() //MatOfRect()
val grayScaled = matImage .prepare()
// loadModel(activity)
faceCascade.detectMultiScale(
grayScaled, rectangles, 1.2, 10, 0,
Size(40.0, 40.0),
null)
return rectangles.size() as Long
}
private fun Mat.toGrayScale(): Mat =
if (channels() >= 3) Mat().apply {
Imgproc.cvtColor(
this@toGrayScale,
this,
Imgproc.COLOR_BGR2GRAY
)
}
else this
private fun Mat.prepare(): Mat {
val mat = toGrayScale()
Imgproc.equalizeHist(mat, mat)
return mat
}
}
使用以下代码从相机成功拍摄照片后调用人脸检测功能:
private fun startCamera() {
val fileName = System.currentTimeMillis().toString() + ".jpeg"
output = File(
this.getExternalFilesDir(Environment.DIRECTORY_PICTURES),
fileName
)
val intent = Intent(MediaStore.ACTION_IMAGE_CAPTURE)
outPutFileUri = this?.let { it1 ->
FileProvider.getUriForFile(
it1,
BuildConfig.APPLICATION_ID,
output!!
)
}
intent.putExtra(MediaStore.EXTRA_OUTPUT, outPutFileUri)
startActivityForResult(intent, REQUEST_IMAGE_CAPTURE)
}
@SuppressLint("MissingSuperCall")
override fun onActivityResult(requestCode: Int, resultCode: Int, data: Intent?) {
if (requestCode == REQUEST_IMAGE_CAPTURE && resultCode == Activity.RESULT_OK) {
val bitmap = outPutFileUri?.let { getCapturedImage(it) }
imageView.setImageBitmap(bitmap)
FaceDetection.detectFaces(this, bitmap) // Calling facedetection
}
}
private fun getCapturedImage(selectedPhotoUri: Uri): Bitmap {
val bitmap = when {
Build.VERSION.SDK_INT < 28 -> MediaStore.Images.Media.getBitmap(
this.contentResolver,
selectedPhotoUri
)
else -> {
val source = ImageDecoder.createSource(this.contentResolver, selectedPhotoUri)
ImageDecoder.decodeBitmap(source)
}
}
return when (ExifInterface(contentResolver.run { openInputStream(selectedPhotoUri) }).getAttributeInt(
ExifInterface.TAG_ORIENTATION, ExifInterface.ORIENTATION_UNDEFINED)) {
ExifInterface.ORIENTATION_ROTATE_90 -> Bitmap.createBitmap(bitmap, 0, 0, bitmap.width, bitmap.height, Matrix().apply { postRotate(90F) }, true)
ExifInterface.ORIENTATION_ROTATE_180 -> Bitmap.createBitmap(bitmap, 0, 0, bitmap.width, bitmap.height, Matrix().apply { postRotate(180F) }, true)
ExifInterface.ORIENTATION_ROTATE_270 -> Bitmap.createBitmap(bitmap, 0, 0, bitmap.width, bitmap.height, Matrix().apply { postRotate(270F) }, true)
else -> bitmap
}
}
我得到的完整错误是:
D/AndroidRuntime: Shutting down VM
E/AndroidRuntime: FATAL EXCEPTION: main
Process: hasan.tts_mobile, PID: 14110
java.lang.RuntimeException: Failure delivering result ResultInfo{who=null, request=1, result=-1, data=Intent { }} to activity {hasan.tts_mobile/hasan.tts_mobile.MainActivity}: java.lang.NullPointerException: Attempt to read from field 'double org.opencv.core.Size.width' on a null object reference
at android.app.ActivityThread.deliverResults(ActivityThread.java:4845)
at android.app.ActivityThread.handleSendResult(ActivityThread.java:4886)
at android.app.servertransaction.ActivityResultItem.execute(ActivityResultItem.java:51)
at android.app.servertransaction.TransactionExecutor.executeCallbacks(TransactionExecutor.java:135)
at android.app.servertransaction.TransactionExecutor.execute(TransactionExecutor.java:95)
at android.app.ActivityThread$H.handleMessage(ActivityThread.java:2016)
at android.os.Handler.dispatchMessage(Handler.java:107)
at android.os.Looper.loop(Looper.java:214)
at android.app.ActivityThread.main(ActivityThread.java:7356)
at java.lang.reflect.Method.invoke(Native Method)
at com.android.internal.os.RuntimeInit$MethodAndArgsCaller.run(RuntimeInit.java:492)
at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:930)
Caused by: java.lang.NullPointerException: Attempt to read from field 'double org.opencv.core.Size.width' on a null object reference
at org.opencv.objdetect.CascadeClassifier.detectMultiScale(CascadeClassifier.java:156)
at hasan.tts_mobile.FaceDetection.detectFaces(FaceDetection.kt:38)
at hasan.tts_mobile.MainActivity.onActivityResult(MainActivity.kt:173)
at android.app.Activity.dispatchActivityResult(Activity.java:8110)
at android.app.ActivityThread.deliverResults(ActivityThread.java:4838)
at android.app.ActivityThread.handleSendResult(ActivityThread.java:4886)
at android.app.servertransaction.ActivityResultItem.execute(ActivityResultItem.java:51)
at android.app.servertransaction.TransactionExecutor.executeCallbacks(TransactionExecutor.java:135)
at android.app.servertransaction.TransactionExecutor.execute(TransactionExecutor.java:95)
at android.app.ActivityThread$H.handleMessage(ActivityThread.java:2016)
at android.os.Handler.dispatchMessage(Handler.java:107)
at android.os.Looper.loop(Looper.java:214)
at android.app.ActivityThread.main(ActivityThread.java:7356)
at java.lang.reflect.Method.invoke(Native Method)
at com.android.internal.os.RuntimeInit$MethodAndArgsCaller.run(RuntimeInit.java:492)
at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:930)
I/Process: Sending signal. PID: 14110 SIG: 9
Process 14110 terminated.
我加
println("pic size: ${grayScaled.size()}")
println("faceCascade: ${faceCascade.originalWindowSize}")
之前
faceCascade.detectMultiScale(
grayScaled, rectangles, 1.2, 10, 0,
Size(40.0, 40.0),
null)
得到以下信息:
I/System.out: pic size: 960x1280
faceCascade: 24x24
D/AndroidRuntime: Shutting down VM
E/AndroidRuntime: FATAL EXCEPTION: main
Process: hasan.tts_mobile, PID: 15177
更新
看起来问题是我将 maxSize
定义为 null
,它应该是 Size()
或全尺寸:Size(40.0, 40.0)
现在我将其更改为:
faceCascade.detectMultiScale(
grayScaled, rectangles, 1.1, 3, 0,
Size(30.0, 30.0), Size()
)
println("rectangles ${rectangles.size()}")
我没有挂起或崩溃,而是将 rectangles.size()
返回为:
I/System.out: pic size: 3264x2448
faceCascade: 24x24
I/System.out: rectangles 1x0
这是否意味着它没有检测到任何人脸,如果是,如何解决?
我想通了,人脸检测过程的结果是在过程本身完成之前读取的,所以解决方案是使用 async
调用,这在 Kotlin 中是通过使用 coroutine
.
在 Kotlin 中有 coroutine-core
和 coroutine-android
所以我应该注意 select 正确的是:
implementation 'org.jetbrains.kotlinx:kotlinx-coroutines-android:1.3.1'
在Kotlin协程中,有:
suspend fun mySuspendMethodThatWaitsForChildCoroutinesToFinish() {
coroutineScope {
launch { mySyspendMethod() }
}
}
// or
suspend fun mySuspendMethodThatWaitsForChildCoroutinesToFinish() = coroutineScope {
launch { mySyspendMethod() }
}
但是这会一直要求通过所有层级传播暂停,并且一旦到达拍照后触发的onActivityResult
就会卡住,所以解决方案是使用MainScope() coroutine
,即允许使用 non-suspended
函数中的 coroutine
,您可以:
1- 将 coroutine scope
声明为 val scope = MainScope()
2- 执行 coroutine
作为 scope.launch { }
完整图片如下:
import kotlinx.coroutines.*
class MainActivity : AppCompatActivity() {
private val scope = MainScope()
override fun onCreate(savedInstanceState: Bundle?) { }
override fun onActivityResult(requestCode: Int, resultCode: Int, data: Intent?) {
detectFace(this, image, textView)
}
}
private fun detectFace(context: Context, image: Bitmap, facesValue: TextView) {
val frame = AndroidFrameConverter().convert(image)
val mat = OpenCVFrameConverter.ToMat().convert(frame)
scope.launch {
val numberOfFaces = FaceDetection.detectFaces(mat).toString()
(context as Activity).runOnUiThread {
facesValue.text = numberOfFaces
}
}
}
所以我的 MainActivity
也不是:
class MainActivity : AppCompatActivity() {
private val scope = MainScope()
val REQUEST_IMAGE_CAPTURE = 1
private var output: File? = null
var outPutFileUri: Uri? = null
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
if (!isPermissionGranted(permission.RECORD_AUDIO)) {
requestAudioPermission(this)
} else {
Toast.makeText(
this@MainActivity, "Audio permission is granted",
Toast.LENGTH_SHORT
).show()
}
val ocvLoaded = OpenCVLoader.initDebug()
if (ocvLoaded) {
loadModel(this)
Toast.makeText(
this@MainActivity, "OpenCV loaded",
Toast.LENGTH_SHORT
).show()
} else {
Toast.makeText(
this@MainActivity, "Unable to load OpenCV",
Toast.LENGTH_SHORT
).show()
Log.d("openCV", "loader: ${OpenCVLoader.initDebug()}")
}
btnCamera.setOnClickListener {
if(isPermissionGranted(permission.CAMERA)) startCamera()
else requestCameraPermission(this)
}
gryImage.setOnClickListener {
val bitmap = outPutFileUri?.let { getCapturedImage(it) }
val matImage = Mat(bitmap!!.height, bitmap.width, CvType.CV_8UC1)
val bmpImage = bitmap.copy(Bitmap.Config.ARGB_8888, true)
Utils.bitmapToMat(bmpImage, matImage)
val bmp = Bitmap.createBitmap(matImage.cols(), matImage.rows(), Bitmap.Config.ARGB_8888)
Imgproc.cvtColor(matImage, matImage, Imgproc.COLOR_RGB2GRAY)
Utils.matToBitmap(matImage, bmp)
imageView.setImageBitmap(bmp)
}
}
private fun startCamera() {
val fileName = System.currentTimeMillis().toString() + ".jpeg"
output = File(
this.getExternalFilesDir(Environment.DIRECTORY_PICTURES),
fileName
)
val intent = Intent(MediaStore.ACTION_IMAGE_CAPTURE)
outPutFileUri = this.let { it ->
FileProvider.getUriForFile(
it,
BuildConfig.APPLICATION_ID,
output!!
)
}
intent.putExtra(MediaStore.EXTRA_OUTPUT, outPutFileUri)
startActivityForResult(intent, REQUEST_IMAGE_CAPTURE)
}
override fun onActivityResult(requestCode: Int, resultCode: Int, data: Intent?) {
super.onActivityResult(requestCode, resultCode, data)
val activity = this
if (requestCode == REQUEST_IMAGE_CAPTURE && resultCode == Activity.RESULT_OK) {
val bitmap = outPutFileUri?.let { getCapturedImage(it) }
imageView.setImageBitmap(bitmap)
outPutFileUri?.let {
scope.launch {
val detectedFaces = FaceDetection.detectFaces(bitmap)
println("Detected Faces = $detectedFaces")
Toast.makeText(
this@MainActivity, "Detected Faces = $detectedFaces",
Toast.LENGTH_SHORT
).show()
}
}
}
}
private fun getCapturedImage(selectedPhotoUri: Uri): Bitmap {
return when {
Build.VERSION.SDK_INT < 28 -> MediaStore.Images.Media.getBitmap(
contentResolver,
selectedPhotoUri
)
else -> {
val source = ImageDecoder.createSource(contentResolver, selectedPhotoUri)
ImageDecoder.decodeBitmap(source)
}
}
// If the image is rotated, fix it
/* return when (ExifInterface(contentResolver.run { openInputStream(selectedPhotoUri) }).getAttributeInt(
ExifInterface.TAG_ORIENTATION, ExifInterface.ORIENTATION_UNDEFINED)) {
ExifInterface.ORIENTATION_ROTATE_90 ->
Bitmap.createBitmap(bitmap, 0, 0, bitmap.width, bitmap.height, Matrix().apply {
postRotate(90F) }, true)
ExifInterface.ORIENTATION_ROTATE_180 ->
Bitmap.createBitmap(bitmap, 0, 0, bitmap.width, bitmap.height, Matrix().apply {
postRotate(180F) }, true)
ExifInterface.ORIENTATION_ROTATE_270 ->
Bitmap.createBitmap(bitmap, 0, 0, bitmap.width, bitmap.height, Matrix().apply {
postRotate(270F) }, true)
else -> bitmap
} */
}
override fun onRequestPermissionsResult(
requestCode: Int, permissions: Array<String>,grantResults: IntArray) =
onPermissionsRequestResult(this@MainActivity,
requestCode, permissions, grantResults)
}
我的 FaceDetection
功能是:
object FaceDetection {
private const val faceModel = "haarcascades/haarcascade_frontalface_default.xml" // main/assets/haarcascades
private lateinit var faceCascade: CascadeClassifier
fun loadModel(activity: Activity) {
faceCascade = CascadeClassifier(File(activity.filesDir, "das").apply {
writeBytes(activity.assets.open(faceModel).readBytes())
}.path))
}
fun detectFaces(image: Bitmap?): Int {
val matImage = Mat(image!!.height, image.width, CvType.CV_8UC1) // CV_8UC1 is gray scale image
val bmpImage = image.copy(Bitmap.Config.ARGB_8888, true)
Utils.bitmapToMat(bmpImage, matImage)
val faceDetections = MatOfRect()
val grayScaled = matImage.prepare()
faceCascade.detectMultiScale(matImage, faceDetections, 1.1, 7, 0,
Size(250.0, 40.0), Size())
// process faces found
for (rect in faceDetections.toArray()) {
println("face found")
// Imgproc.rectangle(
// image,
// Point(rect.x, rect.y),
// Point(rect.x + rect.width, rect.y + rect.height),
// Scalar(0.0, 255.0, 0.0)
// )
}
return faceDetections.toArray().size
}
private fun Mat.toGrayScale(): Mat =
if (channels() >= 3) Mat().apply {
Imgproc.cvtColor(
this@toGrayScale,
this,
Imgproc.COLOR_BGR2GRAY
)
}
else this
private fun Mat.prepare(): Mat {
val mat = toGrayScale()
Imgproc.equalizeHist(mat, mat)
return mat
}
}
我有以下 Android 代码:
class MainActivity : AppCompatActivity(), TtsSpeaker.Listener, PocketSphinx.Listener {
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
val ocvLoaded = OpenCVLoader.initDebug()
if (ocvLoaded) {
loadModel(this) // to be sure model in sot alled before initiation openCV
} else {
Log.d("openCV", "loader: ${OpenCVLoader.initDebug()}")
}
}
}
并使用下面的人脸检测class:
package hasan.tts_mobile
import android.app.Activity
import android.graphics.Bitmap
import org.opencv.core.Mat
import org.opencv.core.MatOfRect
import org.opencv.core.Size
import org.opencv.imgproc.Imgproc
import org.opencv.objdetect.CascadeClassifier
import java.io.File
import org.opencv.core.CvType
import android.opengl.ETC1.getWidth
import android.opengl.ETC1.getHeight
object FaceDetection {
private val faceModel = "haarcascade_frontalface_default.xml" //lateinit var faceModel: String // = "haarcascade_frontalface_default.xml"
private lateinit var faceCascade: CascadeClassifier
fun loadModel(activity: Activity) {
println("started loading the model")
faceCascade = CascadeClassifier(File(activity.filesDir, "das").apply {
writeBytes(activity.assets.open(faceModel).readBytes())
}.path)
println("completed loading the model")
tts!!.say("I'm 100% ready!")
}
fun detectFaces(activity: Activity, image: Bitmap?): Long {
// bitmap
val matImage = Mat(image!!.height, image.width, CvType.CV_8UC1)
val bmpImage = image.copy(Bitmap.Config.ARGB_8888, true)
Utils.bitmapToMat(bmpImage, matImage)
val rectangles = MatOfRect() //RectVector() //MatOfRect()
val grayScaled = matImage .prepare()
// loadModel(activity)
faceCascade.detectMultiScale(
grayScaled, rectangles, 1.2, 10, 0,
Size(40.0, 40.0),
null)
return rectangles.size() as Long
}
private fun Mat.toGrayScale(): Mat =
if (channels() >= 3) Mat().apply {
Imgproc.cvtColor(
this@toGrayScale,
this,
Imgproc.COLOR_BGR2GRAY
)
}
else this
private fun Mat.prepare(): Mat {
val mat = toGrayScale()
Imgproc.equalizeHist(mat, mat)
return mat
}
}
使用以下代码从相机成功拍摄照片后调用人脸检测功能:
private fun startCamera() {
val fileName = System.currentTimeMillis().toString() + ".jpeg"
output = File(
this.getExternalFilesDir(Environment.DIRECTORY_PICTURES),
fileName
)
val intent = Intent(MediaStore.ACTION_IMAGE_CAPTURE)
outPutFileUri = this?.let { it1 ->
FileProvider.getUriForFile(
it1,
BuildConfig.APPLICATION_ID,
output!!
)
}
intent.putExtra(MediaStore.EXTRA_OUTPUT, outPutFileUri)
startActivityForResult(intent, REQUEST_IMAGE_CAPTURE)
}
@SuppressLint("MissingSuperCall")
override fun onActivityResult(requestCode: Int, resultCode: Int, data: Intent?) {
if (requestCode == REQUEST_IMAGE_CAPTURE && resultCode == Activity.RESULT_OK) {
val bitmap = outPutFileUri?.let { getCapturedImage(it) }
imageView.setImageBitmap(bitmap)
FaceDetection.detectFaces(this, bitmap) // Calling facedetection
}
}
private fun getCapturedImage(selectedPhotoUri: Uri): Bitmap {
val bitmap = when {
Build.VERSION.SDK_INT < 28 -> MediaStore.Images.Media.getBitmap(
this.contentResolver,
selectedPhotoUri
)
else -> {
val source = ImageDecoder.createSource(this.contentResolver, selectedPhotoUri)
ImageDecoder.decodeBitmap(source)
}
}
return when (ExifInterface(contentResolver.run { openInputStream(selectedPhotoUri) }).getAttributeInt(
ExifInterface.TAG_ORIENTATION, ExifInterface.ORIENTATION_UNDEFINED)) {
ExifInterface.ORIENTATION_ROTATE_90 -> Bitmap.createBitmap(bitmap, 0, 0, bitmap.width, bitmap.height, Matrix().apply { postRotate(90F) }, true)
ExifInterface.ORIENTATION_ROTATE_180 -> Bitmap.createBitmap(bitmap, 0, 0, bitmap.width, bitmap.height, Matrix().apply { postRotate(180F) }, true)
ExifInterface.ORIENTATION_ROTATE_270 -> Bitmap.createBitmap(bitmap, 0, 0, bitmap.width, bitmap.height, Matrix().apply { postRotate(270F) }, true)
else -> bitmap
}
}
我得到的完整错误是:
D/AndroidRuntime: Shutting down VM
E/AndroidRuntime: FATAL EXCEPTION: main
Process: hasan.tts_mobile, PID: 14110
java.lang.RuntimeException: Failure delivering result ResultInfo{who=null, request=1, result=-1, data=Intent { }} to activity {hasan.tts_mobile/hasan.tts_mobile.MainActivity}: java.lang.NullPointerException: Attempt to read from field 'double org.opencv.core.Size.width' on a null object reference
at android.app.ActivityThread.deliverResults(ActivityThread.java:4845)
at android.app.ActivityThread.handleSendResult(ActivityThread.java:4886)
at android.app.servertransaction.ActivityResultItem.execute(ActivityResultItem.java:51)
at android.app.servertransaction.TransactionExecutor.executeCallbacks(TransactionExecutor.java:135)
at android.app.servertransaction.TransactionExecutor.execute(TransactionExecutor.java:95)
at android.app.ActivityThread$H.handleMessage(ActivityThread.java:2016)
at android.os.Handler.dispatchMessage(Handler.java:107)
at android.os.Looper.loop(Looper.java:214)
at android.app.ActivityThread.main(ActivityThread.java:7356)
at java.lang.reflect.Method.invoke(Native Method)
at com.android.internal.os.RuntimeInit$MethodAndArgsCaller.run(RuntimeInit.java:492)
at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:930)
Caused by: java.lang.NullPointerException: Attempt to read from field 'double org.opencv.core.Size.width' on a null object reference
at org.opencv.objdetect.CascadeClassifier.detectMultiScale(CascadeClassifier.java:156)
at hasan.tts_mobile.FaceDetection.detectFaces(FaceDetection.kt:38)
at hasan.tts_mobile.MainActivity.onActivityResult(MainActivity.kt:173)
at android.app.Activity.dispatchActivityResult(Activity.java:8110)
at android.app.ActivityThread.deliverResults(ActivityThread.java:4838)
at android.app.ActivityThread.handleSendResult(ActivityThread.java:4886)
at android.app.servertransaction.ActivityResultItem.execute(ActivityResultItem.java:51)
at android.app.servertransaction.TransactionExecutor.executeCallbacks(TransactionExecutor.java:135)
at android.app.servertransaction.TransactionExecutor.execute(TransactionExecutor.java:95)
at android.app.ActivityThread$H.handleMessage(ActivityThread.java:2016)
at android.os.Handler.dispatchMessage(Handler.java:107)
at android.os.Looper.loop(Looper.java:214)
at android.app.ActivityThread.main(ActivityThread.java:7356)
at java.lang.reflect.Method.invoke(Native Method)
at com.android.internal.os.RuntimeInit$MethodAndArgsCaller.run(RuntimeInit.java:492)
at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:930)
I/Process: Sending signal. PID: 14110 SIG: 9
Process 14110 terminated.
我加
println("pic size: ${grayScaled.size()}")
println("faceCascade: ${faceCascade.originalWindowSize}")
之前
faceCascade.detectMultiScale(
grayScaled, rectangles, 1.2, 10, 0,
Size(40.0, 40.0),
null)
得到以下信息:
I/System.out: pic size: 960x1280
faceCascade: 24x24
D/AndroidRuntime: Shutting down VM
E/AndroidRuntime: FATAL EXCEPTION: main
Process: hasan.tts_mobile, PID: 15177
更新
看起来问题是我将 maxSize
定义为 null
,它应该是 Size()
或全尺寸:Size(40.0, 40.0)
现在我将其更改为:
faceCascade.detectMultiScale(
grayScaled, rectangles, 1.1, 3, 0,
Size(30.0, 30.0), Size()
)
println("rectangles ${rectangles.size()}")
我没有挂起或崩溃,而是将 rectangles.size()
返回为:
I/System.out: pic size: 3264x2448
faceCascade: 24x24
I/System.out: rectangles 1x0
这是否意味着它没有检测到任何人脸,如果是,如何解决?
我想通了,人脸检测过程的结果是在过程本身完成之前读取的,所以解决方案是使用 async
调用,这在 Kotlin 中是通过使用 coroutine
.
在 Kotlin 中有 coroutine-core
和 coroutine-android
所以我应该注意 select 正确的是:
implementation 'org.jetbrains.kotlinx:kotlinx-coroutines-android:1.3.1'
在Kotlin协程中,有:
suspend fun mySuspendMethodThatWaitsForChildCoroutinesToFinish() {
coroutineScope {
launch { mySyspendMethod() }
}
}
// or
suspend fun mySuspendMethodThatWaitsForChildCoroutinesToFinish() = coroutineScope {
launch { mySyspendMethod() }
}
但是这会一直要求通过所有层级传播暂停,并且一旦到达拍照后触发的onActivityResult
就会卡住,所以解决方案是使用MainScope() coroutine
,即允许使用 non-suspended
函数中的 coroutine
,您可以:
1- 将 coroutine scope
声明为 val scope = MainScope()
2- 执行 coroutine
作为 scope.launch { }
完整图片如下:
import kotlinx.coroutines.*
class MainActivity : AppCompatActivity() {
private val scope = MainScope()
override fun onCreate(savedInstanceState: Bundle?) { }
override fun onActivityResult(requestCode: Int, resultCode: Int, data: Intent?) {
detectFace(this, image, textView)
}
}
private fun detectFace(context: Context, image: Bitmap, facesValue: TextView) {
val frame = AndroidFrameConverter().convert(image)
val mat = OpenCVFrameConverter.ToMat().convert(frame)
scope.launch {
val numberOfFaces = FaceDetection.detectFaces(mat).toString()
(context as Activity).runOnUiThread {
facesValue.text = numberOfFaces
}
}
}
所以我的 MainActivity
也不是:
class MainActivity : AppCompatActivity() {
private val scope = MainScope()
val REQUEST_IMAGE_CAPTURE = 1
private var output: File? = null
var outPutFileUri: Uri? = null
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
if (!isPermissionGranted(permission.RECORD_AUDIO)) {
requestAudioPermission(this)
} else {
Toast.makeText(
this@MainActivity, "Audio permission is granted",
Toast.LENGTH_SHORT
).show()
}
val ocvLoaded = OpenCVLoader.initDebug()
if (ocvLoaded) {
loadModel(this)
Toast.makeText(
this@MainActivity, "OpenCV loaded",
Toast.LENGTH_SHORT
).show()
} else {
Toast.makeText(
this@MainActivity, "Unable to load OpenCV",
Toast.LENGTH_SHORT
).show()
Log.d("openCV", "loader: ${OpenCVLoader.initDebug()}")
}
btnCamera.setOnClickListener {
if(isPermissionGranted(permission.CAMERA)) startCamera()
else requestCameraPermission(this)
}
gryImage.setOnClickListener {
val bitmap = outPutFileUri?.let { getCapturedImage(it) }
val matImage = Mat(bitmap!!.height, bitmap.width, CvType.CV_8UC1)
val bmpImage = bitmap.copy(Bitmap.Config.ARGB_8888, true)
Utils.bitmapToMat(bmpImage, matImage)
val bmp = Bitmap.createBitmap(matImage.cols(), matImage.rows(), Bitmap.Config.ARGB_8888)
Imgproc.cvtColor(matImage, matImage, Imgproc.COLOR_RGB2GRAY)
Utils.matToBitmap(matImage, bmp)
imageView.setImageBitmap(bmp)
}
}
private fun startCamera() {
val fileName = System.currentTimeMillis().toString() + ".jpeg"
output = File(
this.getExternalFilesDir(Environment.DIRECTORY_PICTURES),
fileName
)
val intent = Intent(MediaStore.ACTION_IMAGE_CAPTURE)
outPutFileUri = this.let { it ->
FileProvider.getUriForFile(
it,
BuildConfig.APPLICATION_ID,
output!!
)
}
intent.putExtra(MediaStore.EXTRA_OUTPUT, outPutFileUri)
startActivityForResult(intent, REQUEST_IMAGE_CAPTURE)
}
override fun onActivityResult(requestCode: Int, resultCode: Int, data: Intent?) {
super.onActivityResult(requestCode, resultCode, data)
val activity = this
if (requestCode == REQUEST_IMAGE_CAPTURE && resultCode == Activity.RESULT_OK) {
val bitmap = outPutFileUri?.let { getCapturedImage(it) }
imageView.setImageBitmap(bitmap)
outPutFileUri?.let {
scope.launch {
val detectedFaces = FaceDetection.detectFaces(bitmap)
println("Detected Faces = $detectedFaces")
Toast.makeText(
this@MainActivity, "Detected Faces = $detectedFaces",
Toast.LENGTH_SHORT
).show()
}
}
}
}
private fun getCapturedImage(selectedPhotoUri: Uri): Bitmap {
return when {
Build.VERSION.SDK_INT < 28 -> MediaStore.Images.Media.getBitmap(
contentResolver,
selectedPhotoUri
)
else -> {
val source = ImageDecoder.createSource(contentResolver, selectedPhotoUri)
ImageDecoder.decodeBitmap(source)
}
}
// If the image is rotated, fix it
/* return when (ExifInterface(contentResolver.run { openInputStream(selectedPhotoUri) }).getAttributeInt(
ExifInterface.TAG_ORIENTATION, ExifInterface.ORIENTATION_UNDEFINED)) {
ExifInterface.ORIENTATION_ROTATE_90 ->
Bitmap.createBitmap(bitmap, 0, 0, bitmap.width, bitmap.height, Matrix().apply {
postRotate(90F) }, true)
ExifInterface.ORIENTATION_ROTATE_180 ->
Bitmap.createBitmap(bitmap, 0, 0, bitmap.width, bitmap.height, Matrix().apply {
postRotate(180F) }, true)
ExifInterface.ORIENTATION_ROTATE_270 ->
Bitmap.createBitmap(bitmap, 0, 0, bitmap.width, bitmap.height, Matrix().apply {
postRotate(270F) }, true)
else -> bitmap
} */
}
override fun onRequestPermissionsResult(
requestCode: Int, permissions: Array<String>,grantResults: IntArray) =
onPermissionsRequestResult(this@MainActivity,
requestCode, permissions, grantResults)
}
我的 FaceDetection
功能是:
object FaceDetection {
private const val faceModel = "haarcascades/haarcascade_frontalface_default.xml" // main/assets/haarcascades
private lateinit var faceCascade: CascadeClassifier
fun loadModel(activity: Activity) {
faceCascade = CascadeClassifier(File(activity.filesDir, "das").apply {
writeBytes(activity.assets.open(faceModel).readBytes())
}.path))
}
fun detectFaces(image: Bitmap?): Int {
val matImage = Mat(image!!.height, image.width, CvType.CV_8UC1) // CV_8UC1 is gray scale image
val bmpImage = image.copy(Bitmap.Config.ARGB_8888, true)
Utils.bitmapToMat(bmpImage, matImage)
val faceDetections = MatOfRect()
val grayScaled = matImage.prepare()
faceCascade.detectMultiScale(matImage, faceDetections, 1.1, 7, 0,
Size(250.0, 40.0), Size())
// process faces found
for (rect in faceDetections.toArray()) {
println("face found")
// Imgproc.rectangle(
// image,
// Point(rect.x, rect.y),
// Point(rect.x + rect.width, rect.y + rect.height),
// Scalar(0.0, 255.0, 0.0)
// )
}
return faceDetections.toArray().size
}
private fun Mat.toGrayScale(): Mat =
if (channels() >= 3) Mat().apply {
Imgproc.cvtColor(
this@toGrayScale,
this,
Imgproc.COLOR_BGR2GRAY
)
}
else this
private fun Mat.prepare(): Mat {
val mat = toGrayScale()
Imgproc.equalizeHist(mat, mat)
return mat
}
}