我正在使用 Qualcomm rb5 开发套件以及两个 MIPI 相机的 OV9282。不知何故,我无法将 Gstreamer 与 OpenCV 结合使用来访问这些立体相机。有谁知道如何使用 HAL3 +OpenCV?没有这方面的基本教程。我被这个问题困住了。请帮助我。
我已经尝试使用 Gstreamer 管道使用以下代码访问这些摄像头。
import cv2
from threading import Thread
from time import sleep
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst, GLib
Gst.init(None)
main_loop = GLib.MainLoop()
thread = Thread(target=main_loop.run)
thread.start()
pipeline_str = """
qtiqmmfsrc camera=1 ! video/x-raw, format=NV12, width=1280, height=720, framerate=15/1 ! videoconvert ! waylandsink
"""
pipeline = Gst.parse_launch(pipeline_str)
pipeline.set_state(Gst.State.PLAYING)
pipeline_str2 = """
qtiqmmfsrc camera=3 ! video/x-raw, format=NV12, width=1280, height=720, framerate=15/1 ! videoconvert ! waylandsink
"""
pipeline2 = Gst.parse_launch(pipeline_str2)
pipeline2.set_state(Gst.State.PLAYING)
cap = cv2.VideoCapture(pipeline_str, cv2.CAP_GSTREAMER)
cap2 = cv2.VideoCapture(pipeline_str2, cv2.CAP_GSTREAMER)
num = 0
while True:
succes1, img = cap.read()
succes2, img2 = cap2.read()
if not succes1 or not succes2:
break
cv2.imshow('Img 1', img)
cv2.imshow('Img 2', img2)
k = cv2.waitKey(5)
if k == 27:
break
elif k == ord('s'):
cv2.imwrite('images/stereoLeft/imageL{}.png'.format(num), img)
cv2.imwrite('images/stereoRight/imageR{}.png'.format(num), img2)
print('images saved!')
num += 1
cap.release()
cap2.release()
cv2.destroyAllWindows()
pipeline.set_state(Gst.State.NULL)
pipeline2.set_state(Gst.State.NULL)
main_loop.quit()
它只显示一个摄像头,摄像头流没有被输入到带有 VideoCapture 功能的代码的其余部分。我不知道它有什么问题。因此,我想看看是否有其他方法可以使用 HAL3/Camera2 API 访问相机。
你的代码有两个问题:
第一次尝试:
import cv2
from time import sleep
pipeline_str = """
videotestsrc ! video/x-raw, format=NV12, width=1280, height=720, framerate=15/1 ! videoconvert ! video/x-raw,format=BGR ! appsink drop=1
"""
cap = cv2.VideoCapture(pipeline_str, cv2.CAP_GSTREAMER)
if not cap.isOpened():
print('Failed to open capture')
exit(-1)
pipeline_str2 = """
videotestsrc pattern=ball ! video/x-raw, format=NV12, width=1280, height=720, framerate=15/1 ! videoconvert ! video/x-raw,format=BGR ! appsink drop=1
"""
cap2 = cv2.VideoCapture(pipeline_str2, cv2.CAP_GSTREAMER)
if not cap2.isOpened():
print('Failed to open capture2')
exit(-2)
num = 0
while True:
succes1, img = cap.read()
succes2, img2 = cap2.read()
if not succes1 or not succes2:
break
cv2.imshow('Img 1', img)
cv2.imshow('Img 2', img2)
k = cv2.waitKey(5)
if k == 27:
break
elif k == ord('s'):
cv2.imwrite('images/stereoLeft/imageL{}.png'.format(num), img)
cv2.imwrite('images/stereoRight/imageR{}.png'.format(num), img2)
print('images saved!')
num += 1
cap.release()
cap2.release()
cv2.destroyAllWindows()
如果有效,请使用相机更改来源。
编辑: 目前还不清楚你的 gstreamer 堆栈是什么,只有相机源和 waylandsink。如果你没有 gstreamer appsink,你可能无法使用依赖它的 opencv VideoCapture。
然而,在这种情况下,您可以尝试将 src pad 探针添加到相机插件。从回调中,您可以获取缓冲区数据并将其放入 opencv mat 或 numpy 数组中以供进一步处理。您可以将它们推入 fifos 或循环缓冲区以供应用程序获取,检查 PTS 以在相机之间进行同步:
回调函数:
def probe_callback(pad,info):
gst_buffer = info.get_buffer()
print(' PTS:', gst_buffer.pts)
ret, mapinfo = gst_buffer.map(Gst.MapFlags.READ)
if ret:
#read the mapped buffer into np frame
frameNV12 = np.frombuffer(mapinfo.data, dtype=np.uint8)
frameNV12.shape = ((int)(720*3/2), 1280, 1)
# Convert NV12 into BGR
frameBGR = cv2.cvtColor(frameNV12, cv2.COLOR_YUV2BGR_NV12)
# Here you would push the frame into a queue or circular buffer for application
# You may attach the pts for checking synchronization between various cameras
# Or write to disk for checking last image...Note that it may be slow depending on your hw
#cv2.imwrite("test.png", frameBGR)
del frameBGR
del frameNV12
gst_buffer.unmap(mapinfo)
else:
print('ERROR: Failed to map buffer')
return Gst.PadProbeReturn.OK
在开始流水线之前,将探针添加到相机源插件的src pad:
srcpad = source.get_static_pad('src')
probeID = srcpad.add_probe(Gst.PadProbeType.BUFFER, probe_callback)