我编写了这段代码并尝试了其他功能,但我无法实现这个场景。我该怎么办? 问题出在代码、方法还是库中?
#include <gst/gst.h>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <gst/app/gstappsrc.h>
int main(int argc, char* argv[]) {
// Initialize GStreamer
gst_init(&argc, &argv);
// Create a GStreamer pipeline
GstElement* pipeline = gst_parse_launch("v4l2src device=/dev/video0 ! videoconvert ! video/x-raw,format=BGR ! appsink", NULL);
// Check if the pipeline was created successfully
if (!pipeline) {
g_print("Pipeline could not be created. Exiting.\n");
return -1;
}
// Set the pipeline state to playing
GstStateChangeReturn ret = gst_element_set_state(pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_print("Unable to set the pipeline to the playing state. Exiting.\n");
return -1;
}
// OpenCV VideoCapture for video capture
cv::VideoCapture cap;
if (!cap.open("appsrc ! videoconvert ! video/x-raw, format=YUY2,width=640,height=480,framerate=25/1 ! jpegenc ! rtpjpegpay ! appsink ! udpsink host=192.168.1.14 port=5000")) {
std::cerr << "Error: Could not open OpenCV VideoCapture." << std::endl;
return -1;
}
cv::Mat frame;
cv::Scalar text_color(0, 255, 0); // Text color (BGR)
int font = cv::FONT_HERSHEY_SIMPLEX;
double font_scale = 1.0;
int font_thickness = 2;
std::string text = "Hello, GStreamer!";
while (true) {
if (!cap.read(frame)) {
std::cerr << "Error: Could not read frame from OpenCV VideoCapture." << std::endl;
break;
}
// Add text to the video frame using OpenCV
cv::putText(frame, text, cv::Point(10, 50), font, font_scale, text_color, font_thickness);
// Display the processed frame using OpenCV
cv::imshow("Processed Frame", frame);
cv::waitKey(1);
// Convert the OpenCV frame to GStreamer format
GstBuffer* buffer = gst_buffer_new_allocate(NULL, frame.total() * frame.elemSize(), NULL);
GstMapInfo map_info;
gst_buffer_map(buffer, &map_info, GST_MAP_WRITE);
memcpy(map_info.data, frame.data, frame.total() * frame.elemSize());
gst_buffer_unmap(buffer, &map_info);
// Push the frame into the GStreamer pipeline
GstFlowReturn ret = gst_app_src_push_buffer(GST_APP_SRC(gst_bin_get_by_name(GST_BIN(pipeline), "appsrc")), buffer);
if (ret != GST_FLOW_OK) {
g_print("Error pushing frame to GStreamer pipeline. Exiting.\n");
break;
}
}
// Clean up
cv::destroyAllWindows();
gst_element_set_state(pipeline, GST_STATE_NULL);
gst_object_unref(pipeline);
return 0;
}
我收到此错误:
(gstremear_and_opencv_driver:19685):GStreamer-CRITICAL **:15:24:59.492:gst_caps_get_struct:断言“GST_IS_CAPS(大写)”失败
(gstremear_and_opencv_driver:19685):GStreamer-CRITICAL **:15:24:59.492:gst_struct_get_int:断言“结构!= NULL”失败 [警告:[电子邮件受保护]]全局 cap_gstreamer.cpp:1707 打开 OpenCV | GStreamer 警告:无法查询视频宽度/高度
(gstremear_and_opencv_driver:19685):GStreamer-CRITICAL **:15:24:59.492:gst_struct_get_fraction:断言“结构!= NULL”失败 [警告:[电子邮件受保护]]全局 cap_gstreamer.cpp:1713 打开 OpenCV | GStreamer 警告:无法查询视频 fps [警告:[电子邮件受保护]]全局 cap_gstreamer.cpp:1728 打开 OpenCV | GStreamer 警告:无法查询视频位置:状态=0,值=-1,持续时间=-1
我尝试使用 opencv 在网络摄像头视频上编写文本并使用 udp gst 库发送。
我不确定你的确切目标是什么,但假设你想从默认输出原始视频的相机 0 读取帧(否则使用 v4l2-ctl -d0 --list-formats-ext 并在捕获中的 v4l2src 之后指定视频上限管道),然后通过 UDP 覆盖一些文本和流结果作为 RTP/JPEG,然后你就可以做到这一点:
gst-launch-1.0 v4l2src device=/dev/video0 ! videoscale ! video/x-raw,width=640,height=480,pixel-aspect-ratio=1/1 ! videoconvert ! videorate ! video/x-raw,framerate=25/1 ! textoverlay text="Hello, Gstreamer" valignment=2 deltax=-200 color=4278255360 ! queue ! videoconvert ! video/x-raw,format=YUY2 ! jpegenc ! rtpjpegpay ! udpsink host=127.0.0.1 port=5000
#include <opencv2/opencv.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
int main(int argc, char* argv[]) {
// Create OpenCV VideoCapture for reading video from camera
cv::VideoCapture cap("v4l2src device=/dev/video0 ! videoscale ! video/x-raw,width=640,height=480,pixel-aspect-ratio=1/1 ! videoconvert ! video/x-raw,format=BGR ! videorate ! video/x-raw,framerate=25/1 ! appsink drop=1", cv::CAP_GSTREAMER);
if (!cap.isOpened()) {
std::cerr << "Error failed to open camera" << std::endl;
return -1;
}
unsigned int width = (unsigned int) cap.get(cv::CAP_PROP_FRAME_WIDTH);
unsigned int height = (unsigned int) cap.get(cv::CAP_PROP_FRAME_HEIGHT);
double fps = cap.get(cv::CAP_PROP_FPS);
std::cout << "Capture input opened, Ffaming: " << width << " x " << height << " @" << fps << " FPS" << std::endl;
// OpenCV VideoWriter to RTP/JPG streaming to client 192.168.1.14 over UDP/5000
cv::VideoWriter streamer("appsrc ! queue ! video/x-raw,format=BGR ! videoconvert ! video/x-raw,format=YUY2 ! jpegenc ! rtpjpegpay ! udpsink host=192.168.1.14 port=5000", cv::CAP_GSTREAMER, 0, (float)fps, cv::Size(width, height));
if (!streamer.isOpened()) {
std::cerr << "Error: Failed to open streamer writer." << std::endl;
return -2;
}
std::cout << "streamer opened, ready for streaming to client 192.168.1.14 over UDP/5000" << std::endl;
cv::Scalar text_color(0, 255, 0); // Text color (BGR)
int font = cv::FONT_HERSHEY_SIMPLEX;
double font_scale = 1.0;
int font_thickness = 2;
std::string text = "Hello, GStreamer!";
cv::Mat frame;
while (true) {
if (!cap.read(frame)) {
std::cerr << "Error: Could not read frame from OpenCV VideoCapture." << std::endl;
break;
}
// Add text to the video frame using OpenCV
cv::putText(frame, text, cv::Point(10, 50), font, font_scale, text_color, font_thickness);
streamer.write(frame);
// Display the processed frame using OpenCV
cv::imshow("Processed Frame", frame);
cv::waitKey(1);
}
// Clean up
cv::destroyAllWindows();
return 0;
}