我想使用 Camera2(在 Jetpack Compose 中)在 Android 中显示模糊预览。
无论我尝试什么,我什至无法将图像从
ImageReader
传递到 SurfaceView
或 TextureView
。这是我尝试的最后一件事:
预览本身就是一个
SurfaceView
:
AndroidView(
modifier = Modifier.fillMaxSize(),
factory = {context ->
SurfaceView(context).apply {
layoutParams = ViewGroup.LayoutParams(
ViewGroup.LayoutParams.MATCH_PARENT,
ViewGroup.LayoutParams.MATCH_PARENT,
)
}.also {
it.holder.addCallback(object : SurfaceHolder.Callback {
override fun surfaceCreated(holder: SurfaceHolder) {
scope.launch {
val camera = CameraHandler.openCamera(context);
camera.startBlurredPreview(
it.holder.surface,
context,
previewSize,
);
}
}
override fun surfaceChanged(
holder: SurfaceHolder,
format: Int,
width: Int,
height: Int
) {
}
override fun surfaceDestroyed(holder: SurfaceHolder) {
}
})
}
}
)
相机的逻辑在这里:
class CameraHandler(
private val manager: CameraManager,
private val device: CameraDevice,
private val handler: Handler,
private val thread: HandlerThread,
private val lens: Int = CameraCharacteristics.LENS_FACING_BACK,
) {
private lateinit var imageReader: ImageReader
val characteristics: CameraCharacteristics
get() = manager.getCameraCharacteristics(lens.toString())
@RequiresApi(Build.VERSION_CODES.P)
private suspend fun createCaptureSession(
outputs: List<Surface>,
): CameraCaptureSession = suspendCancellableCoroutine { cont ->
// I really don't want to use a deprecated method, but there is
// absolutely no documentation available for the new method.
device.createCaptureSession(
outputs,
object : CameraCaptureSession.StateCallback() {
override fun onConfigured(session: CameraCaptureSession) {
cont.resume(session)
}
override fun onConfigureFailed(session: CameraCaptureSession) {
cont.resumeWithException(RuntimeException("Failed to configure session"))
}
},
handler,
)
}
@RequiresApi(Build.VERSION_CODES.P)
suspend fun startBlurredPreview(
surface: Surface,
context: Context,
size: Size? = null,
) {
val readerSize = size ?: getScreenSize(context).let {
Size(
it.width / 10,
it.height / 10,
)
}
println("Creating image reader with size $readerSize")
val imageReader = ImageReader.newInstance(
readerSize.width,
readerSize.height,
ImageFormat.YUV_420_888,
IMAGE_BUFFER_SIZE,
)
val rs = RenderScript.create(context)
val blurScript = ScriptIntrinsicBlur.create(rs, Element.U8_4(rs))
imageReader.setOnImageAvailableListener({ reader ->
val image = reader.acquireLatestImage() ?: return@setOnImageAvailableListener
// HERE'S where I struggle. How do I blur the image? How do I send it to the Surface? The code below is the last solution I've tried so far....
// Show yuv image on screen
val yBuffer = image.planes[0].buffer
val uBuffer = image.planes[1].buffer
val vBuffer = image.planes[2].buffer
val ySize = yBuffer.remaining()
val uSize = uBuffer.remaining()
val vSize = vBuffer.remaining()
val yData = ByteArray(ySize)
val uData = ByteArray(uSize)
val vData = ByteArray(vSize)
yBuffer.get(yData)
uBuffer.get(uData)
vBuffer.get(vData)
val yuvImage = YuvImage(yData, ImageFormat.YUY2, readerSize.width, readerSize.height, null,)
val out = ByteArrayOutputStream()
yuvImage.compressToJpeg(Rect(0, 0, readerSize.width, readerSize.height), 100, out)
val imageBytes = out.toByteArray()
val bitmap = BitmapFactory.decodeByteArray(imageBytes, 0, imageBytes.size)
if (bitmap == null) {
return@setOnImageAvailableListener
}
println("Bitmap size: ${bitmap.width}x${bitmap.height}")
val canvas = surface.lockCanvas(null)!!
canvas.drawBitmap(bitmap, 0f, 0f, null)
surface.unlockCanvasAndPost(canvas)
// Release
image.close()
}, handler)
println("Creating capture session")
val outputs = listOf(imageReader.surface)
val session = createCaptureSession(outputs)
println("Creating capture request")
val captureRequest = device.createCaptureRequest(CameraDevice.TEMPLATE_PREVIEW)
captureRequest.addTarget(imageReader.surface)
println("Setting repeating request")
session.setRepeatingRequest(
captureRequest.build(),
null,
handler,
)
}
companion object {
const val IMAGE_BUFFER_SIZE = 2
fun getCameraManager(
context: Context,
): CameraManager = getSystemService(context, CameraManager::class.java)!!
fun createThread(): HandlerThread = HandlerThread("CameraHandler").apply { start() }
fun createHandler(thread: HandlerThread): Handler = Handler(thread.looper)
@SuppressLint("MissingPermission")
suspend fun openCamera(
manager: CameraManager,
cameraId: String,
thread: HandlerThread,
): CameraHandler = suspendCancellableCoroutine { cont ->
val handler = createHandler(thread)
manager.openCamera(cameraId, object : CameraDevice.StateCallback() {
override fun onOpened(device: CameraDevice) {
cont.resume(
CameraHandler(
manager,
device,
handler,
thread,
)
)
}
override fun onDisconnected(device: CameraDevice) {
}
override fun onError(device: CameraDevice, error: Int) {
}
}, handler)
}
suspend fun openCamera(
context: Context,
): CameraHandler {
val manager = getCameraManager(context)
val cameraId = manager.cameraIdList.first()
val thread = createThread()
return openCamera(manager, cameraId, thread)
}
}
}
注意: 将格式更改为
ImageType.JPEG
确实有效,但由于我需要实时预览,因此不幸的是,这不适用。
有时在尝试不同的解决方案时会弹出错误
Failed to create image decoder with message ‘unimplemented’
,但我无法修复它。
这里是我尝试过的所有解决方案:
您似乎正在尝试使用 Android 的
Camera2
API 从相机捕获预览,然后使用 RenderScript
对其进行模糊处理,最后将其显示在 SurfaceView
上。您问题的核心似乎是捕获 YUV 图像、将其转换为位图、模糊它以及在 Surface
上绘制它之间的过渡。
我将为您的
setOnImageAvailableListener
中的图像模糊过程提供简化的解决方案:
您已经将
YuvImage
转换为 Bitmap
。您可以先使用 RenderScript 对其进行模糊处理,而不是直接将此位图绘制到画布上。
模糊位图:
fun blurBitmap(context: Context, bitmap: Bitmap, blurRadius: Float = 10f): Bitmap {
val rs = RenderScript.create(context)
val input = Allocation.createFromBitmap(rs, bitmap)
val output = Allocation.createTyped(rs, input.type)
val script = ScriptIntrinsicBlur.create(rs, Element.U8_4(rs)).apply {
setRadius(blurRadius)
setInput(input)
forEach(output)
}
output.copyTo(bitmap)
rs.destroy()
return bitmap
}
setOnImageAvailableListener
中使用上述功能:val blurredBitmap = blurBitmap(context, bitmap)
val canvas = surface.lockCanvas(null)!!
canvas.drawBitmap(blurredBitmap, 0f, 0f, null)
surface.unlockCanvasAndPost(canvas)
请记住,该过程可能需要大量计算。如果相机的更新来得太快,系统可能会陷入模糊和渲染的困境。考虑添加优化,例如: