使用下面的代码我设法获得了 NV21。所以在输出时我得到黑白图像,可能是在渲染过程中没有正确处理色度(UV)数据,只留下要显示的亮度(Y)数据。
private fun startLocalVideoCapture(localSurfaceView: SurfaceViewRenderer) {
val peerConnectionUtil = PeerConnectionUtil(
requireActivity().application,
eglBase.eglBaseContext
)
val peerConnectionFactory = peerConnectionUtil.peerConnectionFactory
val localVideoSource by lazy { peerConnectionFactory.createVideoSource(false) }
val localAudioSource by lazy { peerConnectionFactory.createAudioSource(MediaConstraints()) }
val videoCapturer = createVideoCapturer()
videoCapturer?.let { capturer ->
val surfaceTextureHelper =
SurfaceTextureHelper.create(Thread.currentThread().name, eglBase.eglBaseContext)
capturer.initialize(
surfaceTextureHelper,
localSurfaceView.context,
localVideoSource.capturerObserver
)
capturer.startCapture(
WebRtcClient.VIDEO_HEIGHT,
WebRtcClient.VIDEO_WIDTH,
WebRtcClient.VIDEO_FPS
)
} ?: run {
Log.e("Failed: ", "Failed to create video capturer")
}
localAudioTrack =
peerConnectionFactory.createAudioTrack(
WebRtcClient.LOCAL_TRACK_ID + WebRtcClient.AUDIO,
localAudioSource
)
localVideoTrack =
peerConnectionFactory.createVideoTrack(WebRtcClient.LOCAL_TRACK_ID, localVideoSource)
val yuvToRgbVideoSink = YuvToRgbVideoSink(localSurfaceView)
localVideoTrack?.addSink(yuvToRgbVideoSink)
val localStream = peerConnectionFactory.createLocalMediaStream(WebRtcClient.LOCAL_STREAM_ID)
localStream.addTrack(localVideoTrack)
localStream.addTrack(localAudioTrack)
}
我制作了
YuvToRgbVideoSink
类,但我无法找到将其转换为 RGB 的正确方法。我也用过 OpenCV 4.6.0,但我对它并不深入。
class YuvToRgbVideoSink(private val surfaceViewRenderer: SurfaceViewRenderer) : VideoSink {
override fun onFrame(videoFrame: VideoFrame) {
val buffer = videoFrame.buffer
val width = buffer.width
val height = buffer.height
// Don't know how to use decodeYUV420SP or is it even correct
}
fun decodeYUV420SP(rgb: IntArray, yuv420sp: ByteArray, width: Int, height: Int)
{
val frameSize = width * height
var j = 0
var yp = 0
while (j < height) {
var uvp = frameSize + (j shr 1) * width
var u = 0
var v = 0
var i = 0
while (i < width)
{
var y = (0xff and yuv420sp[yp].toInt()) - 16
if (y < 0)
y = 0
if (i and 1 == 0) {
v = (0xff and yuv420sp[uvp++].toInt()) - 128
u = (0xff and yuv420sp[uvp++].toInt()) - 128
}
val y1192 = 1192 * y
var r = y1192 + 1634 * v
var g = y1192 - 833 * v - 400 * u
var b = y1192 + 2066 * u
if (r < 0)
r = 0
else if (r > 262143)
r = 262143
if (g < 0)
g = 0
else if (g > 262143)
g = 262143
if (b < 0)
b = 0
else if (b > 262143)
b = 262143
rgb[yp] = -0x1000000 or (r shl 6 and 0xff0000) or (g shr 2 and 0xff00) or (b shr 10 and 0xff)
i++
yp++
}
j++
}
}
}
此代码使用 YuvImage 类从输入字节数组创建 YUV 图像对象,然后将其压缩为 JPEG 图像。生成的 JPEG 被解码为位图对象,然后用于提取 RGB 像素值。最后,RGB 值被裁剪到范围 [0, 255] 并作为 IntArray 返回。
这是演示此过程的示例代码,尽管您可以更改最适合您的输出的参数。 请注意,此代码假定输入的 YUV 数据为 NV21 格式。
import android.graphics.BitmapFactory
import android.graphics.ImageFormat
import android.graphics.Rect
import android.graphics.YuvImage
import android.util.Log
import java.io.ByteArrayOutputStream
import kotlin.math.max
import kotlin.math.min
fun yuvToRgb(image: ByteArray, width: Int, height: Int): IntArray? {
val outWidth = max(width, 1)
val outHeight = max(height, 1)
val yuvImage = YuvImage(image, ImageFormat.NV21, width, height, null)
val byteArrayOutputStream = ByteArrayOutputStream()
if (!yuvImage.compressToJpeg(Rect(0, 0, outWidth, outHeight), 100, byteArrayOutputStream)) {
Log.e(TAG, "Failed to compress YUV image to JPEG")
return null
}
val jpegByteArray = byteArrayOutputStream.toByteArray()
val bitmap = BitmapFactory.decodeByteArray(jpegByteArray, 0, jpegByteArray.size)
val pixels = IntArray(outWidth * outHeight)
bitmap.getPixels(pixels, 0, outWidth, 0, 0, outWidth, outHeight)
bitmap.recycle()
for (i in pixels.indices) {
var r = pixels[i] shr 16 and 0xff
var g = pixels[i] shr 8 and 0xff
var b = pixels[i] and 0xff
r = max(0, min(255, r))
g = max(0, min(255, g))
b = max(0, min(255, b))
pixels[i] = -0x1000000 or (r shl 16) or (g shl 8) or b
}
return pixels
}
希望这会有所帮助。您也可以根据需要的类型转换返回类型。