如何让我的 Python GStreamer 应用程序正确显示网络摄像头源而不是显示乱码的绿色视频?

问题描述 投票:0回答:1

尝试构建一个非常简单的 Python 程序来打开我的 MacBook 上的网络摄像头并将其显示在屏幕上。但是,我无法获取管道的 Python 版本来显示网络摄像头视频,并且出现乱码/滚动绿线。

想了解我哪里出了问题以及如何修复该程序。

到目前为止,我有以下计划:

import os
import sys

import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstVideo', '1.0')
gi.require_version('GLib', '2.0')
gi.require_version('GObject', '2.0')
from gi.repository import GObject, Gst, GstVideo
GObject.threads_init()
Gst.init(None)

class GstCaps(object):
  def __init__(self, caps_string):
    self.caps_string = caps_string

  def __new__(cls, caps_string):
    cf = Gst.ElementFactory.make('capsfilter', None)
    caps = Gst.Caps.from_string(caps_string)
    cf.set_property('caps', caps)
    return cf


class Webcam(object):
  # macOS, open webcam and display on screen
  # ./gst-launch-1.0 -v -e
  #     avfvideosrc device-index=0 !
  #     "video/x-raw, width=1280, height=720, format=(string)YUY2, texture-target=rectangle" !
  #     rawvideoparse width=1280 height=720 format=yuy2 !
  #     queue !
  #     autovideoconvert !
  #     autovideosink

  def __init__(self, device_index: int = 0):
    self.mainloop = GObject.MainLoop()

    self.pipeline = Gst.ElementFactory.make('pipeline', 'pipeline')

    self.source = Gst.ElementFactory.make('avfvideosrc', 'source')

    self.source.set_property('device-index', device_index)
    self.caps = GstCaps('video/x-raw, width=1280, height=720, format=(string)YUY2, texture-target=rectangle')
    # self.source.set_property('caps', caps)

    self.rawvideoparse = Gst.ElementFactory.make('rawvideoparse', 'rawvideoparse')
    self.rawvideoparse.set_property('width', 1280)
    self.rawvideoparse.set_property('height', 720)
    self.rawvideoparse.set_property('format', 'yuy2')

    self.queue = Gst.ElementFactory.make('queue', 'queue')

    self.autovideoconvert = Gst.ElementFactory.make('autovideoconvert', 'autovideoconvert')

    self.autovideosink = Gst.ElementFactory.make('autovideosink', 'autovideosink')

    if (not self.pipeline or
        not self.source or
        not self.caps or
        not self.rawvideoparse or
        not self.queue or
        not self.autovideoconvert or
        not self.autovideosink
    ):
      print('ERROR: Not all elements could be created.')
      sys.exit(1)

    self.pipeline.add(self.source)
    self.pipeline.add(self.rawvideoparse)
    self.pipeline.add(self.queue)
    self.pipeline.add(self.autovideoconvert)
    self.pipeline.add(self.autovideosink)

    linked = self.source.link(self.rawvideoparse)
    linked = linked and self.rawvideoparse.link(self.queue)
    linked = linked and self.queue.link(self.autovideoconvert)
    linked = linked and self.autovideoconvert.link(self.autovideosink)

    if not linked:
      print("ERROR: Elements could not be linked")
      sys.exit(1)

    self.bus = self.pipeline.get_bus()
    self.bus.add_signal_watch()
    self.bus.connect('message::eos', self.on_eos)
    self.bus.connect('message::error', self.on_error)

  def run(self):
    self.pipeline.set_state(Gst.State.PLAYING)
    self.mainloop.run()

  def quit(self):
    self.pipeline.set_state(Gst.State.NULL)
    self.mainloop.quit()

  def on_eos(self, bus, message):
    self.quit()

  def on_error(self, bus, message):
    print(f'ERROR: {message.parse_error()}' )
    self.quit()

webcam = Webcam()
webcam.run()

通过这个程序,我得到以下视频:

当我从命令行运行管道时,我得到正确的视频输出,如下所示:

gst-launch-1.0 -v -e  avfvideosrc device-index=1 ! \
  "video/x-raw, width=1280, height=720, format=(string)YUY2, texture-target=rectangle" ! \
  rawvideoparse width=1280 height=720 format=yuy2 ! \
  queue ! \
  autovideoconvert ! \
  autovideosink

当格式未正确指定时,Python 程序的输出看起来非常熟悉管道错误。当我学习如何在命令行中设置管道时,我遇到了这个问题。

python gstreamer python-gstreamer
1个回答
0
投票

当我发布时可能已经太晚了,错过了一个非常明显的步骤,所以发布答案,以防将来的谷歌搜索来到这里。

我忘记在

caps
avfvideosrc
之间添加
rawvideoparse
。更新了下面的完整代码:

import os
import sys

import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstVideo', '1.0')
gi.require_version('GLib', '2.0')
gi.require_version('GObject', '2.0')
from gi.repository import GObject, Gst, GstVideo
GObject.threads_init()
Gst.init(None)

class GstCaps(object):
  def __init__(self, caps_string):
    self.caps_string = caps_string

  def __new__(cls, caps_string):
    cf = Gst.ElementFactory.make('capsfilter', None)
    caps = Gst.Caps.from_string(caps_string)
    cf.set_property('caps', caps)
    return cf


class Webcam(object):
  # macOS
  # Downscale from 720p to 360p to 180p to 90p
  # ./gst-launch-1.0 -v -e
  #     avfvideosrc device-index=0 !
  #     "video/x-raw, width=1280, height=720, format=(string)YUY2, texture-target=rectangle" !
  #     rawvideoparse width=1280 height=720 format=yuy2 !
  #     queue !
  #     autovideoconvert !
  #     autovideosink

  def __init__(self, device_index: int = 1):
    self.mainloop = GObject.MainLoop()

    self.pipeline = Gst.ElementFactory.make('pipeline', 'pipeline')

    self.source = Gst.ElementFactory.make('avfvideosrc', 'source')

    self.source.set_property('device-index', device_index)
    self.caps = GstCaps('video/x-raw, width=1280, height=720, format=(string)YUY2, texture-target=rectangle')

    self.rawvideoparse = Gst.ElementFactory.make('rawvideoparse', 'rawvideoparse')
    self.rawvideoparse.set_property('width', 1280)
    self.rawvideoparse.set_property('height', 720)
    self.rawvideoparse.set_property('format', 4)

    self.queue = Gst.ElementFactory.make('queue', 'queue')

    self.autovideoconvert = Gst.ElementFactory.make('autovideoconvert', 'autovideoconvert')

    self.autovideosink = Gst.ElementFactory.make('autovideosink', 'autovideosink')

    if (not self.pipeline or
        not self.source or
        not self.caps or
        not self.rawvideoparse or
        not self.queue or
        not self.autovideoconvert or
        not self.autovideosink
    ):
      print('ERROR: Not all elements could be created.')
      sys.exit(1)

    self.pipeline.add(self.source)
    self.pipeline.add(self.caps)              # THIS LINE WAS MISSING
    self.pipeline.add(self.rawvideoparse)
    self.pipeline.add(self.queue)
    self.pipeline.add(self.autovideoconvert)
    self.pipeline.add(self.autovideosink)

    linked = self.source.link(self.caps)                    # THIS LINE WAS MODIFIED TO LINK TO CAPS
    linked = linked and self.caps.link(self.rawvideoparse)  # THIS LINE WAS ADDED TO LINK CAPS TO RAWVIDEOPARSE
    linked = linked and self.rawvideoparse.link(self.queue)
    linked = linked and self.queue.link(self.autovideoconvert)
    linked = linked and self.autovideoconvert.link(self.autovideosink)

    if not linked:
      print("ERROR: Elements could not be linked")
      sys.exit(1)

    self.bus = self.pipeline.get_bus()
    self.bus.add_signal_watch()
    self.bus.connect('message::eos', self.on_eos)
    self.bus.connect('message::error', self.on_error)
    # self.bus.connect('message', self.on_message)

  def run(self):
    self.pipeline.set_state(Gst.State.PLAYING)
    self.mainloop.run()

  def quit(self):
    self.pipeline.set_state(Gst.State.NULL)
    self.mainloop.quit()

  def on_eos(self, bus, message):
    self.quit()

  def on_error(self, bus, message):
    print(f'ERROR: {message.parse_error()}' )
    self.quit()

  def on_message(self, bus, message):
    print(f'MESSAGE: {format(message)}')

webcam = Webcam()
webcam.run()
© www.soinside.com 2019 - 2024. All rights reserved.