#include <iostream>
#include <gst/gst.h>
#include <gst/app/gstappsink.h>
static GstFlowReturn cb_new_sample(GstAppSink *appsink, gpointer user_data) {
// Get the sample
GstSample *sample = gst_app_sink_pull_sample(appsink);
if (!sample) {
std::cerr << "Failed to pull sample" << std::endl;
return GST_FLOW_ERROR;
}
// Get the buffer
GstBuffer *buffer = gst_sample_get_buffer(sample);
if (!buffer) {
std::cerr << "Failed to get buffer" << std::endl;
gst_sample_unref(sample);
return GST_FLOW_ERROR;
}
// Get the buffer's data and convert it to the desired format
GstMapInfo map_info;
if (!gst_buffer_map(buffer, &map_info, GST_MAP_READ)) {
std::cerr << "Failed to map buffer" << std::endl;
gst_sample_unref(sample);
return GST_FLOW_ERROR;
}
// Perform frame processing
// ...
// Release resources
gst_buffer_unmap(buffer, &map_info);
gst_sample_unref(sample);
return GST_FLOW_OK;
}
static void cb_decodebin_pad_added(GstElement *decodebin, GstPad *pad, gpointer user_data) {
// Retrieve the sink element from the user data
GstElement *videoconvert = static_cast<GstElement*>(user_data);
// Create ghost pad and link it to videoconvert
GstPad *sink_pad = gst_element_get_static_pad(videoconvert, "sink");
GstPad *ghost_pad = gst_ghost_pad_new(NULL, sink_pad);
gst_pad_set_active(ghost_pad, TRUE);
gst_pad_link(pad, ghost_pad);
gst_object_unref(sink_pad);
gst_object_unref(ghost_pad);
}
static gboolean cb_message(GstBus *bus, GstMessage *message, gpointer user_data) {
switch (GST_MESSAGE_TYPE(message)) {
case GST_MESSAGE_ERROR: {
GError *error;
gchar *debug_info;
gst_message_parse_error(message, &error, &debug_info);
g_printerr("Error: %s\n", error->message);
g_printerr("Debug info: %s\n", debug_info ? debug_info : "none");
g_clear_error(&error);
g_free(debug_info);
break;
}
case GST_MESSAGE_EOS:
g_print("End of stream\n");
break;
case GST_MESSAGE_WARNING: {
gchar *debug_info;
gst_message_parse_warning(message, NULL, &debug_info);
g_printerr("Warning: %s\n", debug_info ? debug_info : "none");
g_free(debug_info);
break;
}
default:
break;
}
return TRUE;
}
int main() {
// Initialize GStreamer
gst_init(NULL, NULL);
// Create the pipeline
GstElement *pipeline = gst_pipeline_new("video-pipeline");
// Create the elements
GstElement *filesrc = gst_element_factory_make("filesrc", "file-source");
GstElement *decodebin = gst_element_factory_make("decodebin", "decoder");
GstElement *videoconvert = gst_element_factory_make("videoconvert", "video-converter");
GstElement *queue = gst_element_factory_make("queue", "video-queue");
GstElement *appsink = gst_element_factory_make("appsink", "sink");
if (!pipeline || !filesrc || !decodebin || !videoconvert || !queue || !appsink) {
std::cerr << "One or more elements could not be created. Exiting." << std::endl;
return -1;
}
// Set the properties
g_object_set(G_OBJECT(filesrc), "location", "1.mp4", NULL);
g_object_set(G_OBJECT(appsink), "emit-signals", TRUE, "sync", FALSE, NULL);
// Add the elements to the pipeline
gst_bin_add_many(GST_BIN(pipeline), filesrc, decodebin, videoconvert, queue, appsink, NULL);
// Link the elements
if (!gst_element_link(filesrc, decodebin)) {
std::cerr << "Failed to link filesrc and decodebin" << std::endl;
gst_object_unref(pipeline);
return -1;
}
// Connect decodebin pad-added signal
g_signal_connect(decodebin, "pad-added", G_CALLBACK(cb_decodebin_pad_added), videoconvert);
// Link videoconvert, queue, and appsink
if (!gst_element_link_many(videoconvert, queue, appsink, NULL)) {
std::cerr << "Failed to link videoconvert, queue, and appsink" << std::endl;
gst_object_unref(pipeline);
return -1;
}
// Set the appsink callback function
g_signal_connect(appsink, "new-sample", G_CALLBACK(cb_new_sample), NULL);
// Set the pipeline state to playing
gst_element_set_state(pipeline, GST_STATE_PLAYING);
// Register a message callback function to handle pipeline messages
GstBus *bus = gst_element_get_bus(pipeline);
gst_bus_add_watch(bus, cb_message, NULL);
gst_object_unref(bus);
// Wait for the pipeline to finish
GstMessage *error_message = gst_bus_timed_pop_filtered(gst_element_get_bus(pipeline), GST_CLOCK_TIME_NONE, GST_MESSAGE_ERROR);
GstMessage *eos_message = gst_bus_timed_pop_filtered(gst_element_get_bus(pipeline), GST_CLOCK_TIME_NONE, GST_MESSAGE_EOS);
// Handle pipeline errors
if (error_message) {
GError *error;
gchar *debug_info;
gst_message_parse_error(error_message, &error, &debug_info);
g_printerr("Error: %s\n", error->message);
g_printerr("Debug info: %s\n", debug_info ? debug_info : "none");
g_clear_error(&error);
g_free(debug_info);
gst_message_unref(error_message);
}
// Handle end of stream
if (eos_message) {
g_print("End of stream\n");
gst_message_unref(eos_message);
}
// Release resources
gst_element_set_state(pipeline, GST_STATE_NULL);
gst_object_unref(pipeline);
return 0;
}
现在可以编译并运行代码,但无法显示视频文件。我的视频文件和代码都在同一个目录下,并且我的所有gstreamer插件都已安装。 我的代码运行后一直运行没有任何反应,就像被阻塞一样。我希望他读取视频流,然后使用 appsink 逐帧分析。
现在我该怎么做才能让他正确获取视频流并播放并让sink解析每一帧? 还可以获取rtsp进行分析
管道可以进入播放状态,但之后似乎没有执行任何操作,可能与您之后添加的事件的过滤器相关。
您已经在创建添加总线监视来处理这些。
cb_message
回调已经在检查这些事件,因此之后无需过滤它们。您还缺少 g_main_loop
才能使其正常运行。我建议查看 gstreamer 文档中的应用程序示例中的
bus_watch
是如何完成的
只需将decodebin pad正常链接到videoconvert pad即可。
static void cb_decodebin_pad_add(GstElement *decodebin, GstPad *pad, gpointer user_data) {
// 从用户数据中检索接收器元素
GstElement videoconvert = static_cast
GstPad *sink_pad = gst_element_get_static_pad(videoconvert, "sink");
gst_pad_link(pad, sink_pad);
gst_object_unref(sink_pad);
}