在 C++ 中使用 WebRTC 通过自定义 AudioDeviceModule 发送音频时遇到问题

问题描述 投票:0回答:1

我正在尝试使用 C++ 为虚幻引擎实现 WebRTC 插件,但在通过我创建的自定义 AudioDeviceModule 发送音频时遇到问题。

问题很简单,我可以看到对等点之间建立了连接,我可以看到虚幻引擎通过 AudioTransport->RecordedDataIsAvailable() 捕获音频并将其传递给 ADM,但我没有看到传递到 AudioTransport 的音频被传输其他 Peer 或 Wireshark 都看不到任何数据传输。

这是我的 ADM 代码:


FCustomAudioDeviceModule::FCustomAudioDeviceModule()
{
    AudioCapturer = MakeUnique<FWebRTCAudioCaptureComponent>();
}

FCustomAudioDeviceModule::~FCustomAudioDeviceModule()
{

}

// Retrieve the currently utilized audio layer
int32_t FCustomAudioDeviceModule::ActiveAudioLayer(webrtc::AudioDeviceModule::AudioLayer* audioLayer) const 
{
    *audioLayer = webrtc::AudioDeviceModule::AudioLayer::kDummyAudio;
    return 0;
}

// Full-duplex transportation of PCM audio
int32_t FCustomAudioDeviceModule::RegisterAudioCallback(webrtc::AudioTransport* audioCallback) 
{
    AudioCapturer->RegisterAudioTransport(audioCallback);
    return 0;
}

// Main initialization and termination
int32_t FCustomAudioDeviceModule::Init() 
{
    AudioCapturer->Init();
    bIsInitialized = true;
    return 0;
}

int32_t FCustomAudioDeviceModule::Terminate() 
{
    bIsInitialized = false;
    return 0;
}

bool FCustomAudioDeviceModule::Initialized() const 
{
    return bIsInitialized;
}

// Device enumeration
int16_t FCustomAudioDeviceModule::PlayoutDevices() 
{
    return -1;
}

int16_t FCustomAudioDeviceModule::RecordingDevices() 
{
    return -1;
}

int32_t FCustomAudioDeviceModule::PlayoutDeviceName(uint16_t index, char name[webrtc::kAdmMaxDeviceNameSize], char guid[webrtc::kAdmMaxGuidSize]) 
{
    return -1;
}

int32_t FCustomAudioDeviceModule::RecordingDeviceName(uint16_t index, char name[webrtc::kAdmMaxDeviceNameSize], char guid[webrtc::kAdmMaxGuidSize]) 
{
    return -1;
}

// Device selection
int32_t FCustomAudioDeviceModule::SetPlayoutDevice(uint16_t index) 
{
    return 0;
}

int32_t FCustomAudioDeviceModule::SetPlayoutDevice(WindowsDeviceType device) 
{
    return 0;
}

// Audio transport initialization
int32_t FCustomAudioDeviceModule::PlayoutIsAvailable(bool* available) 
{
    LOG_MESSAGE("ADM was queried for PlayoutIsAvailable.");
    *available = false;
    return 0;
}

int32_t FCustomAudioDeviceModule::InitPlayout() 
{
    LOG_ERROR("InitPlayout shouldn't have been called!");
    return -1;
}

bool FCustomAudioDeviceModule::PlayoutIsInitialized() const 
{
    return false;
}

// Audio transport control
int32_t FCustomAudioDeviceModule::StartPlayout() 
{
    LOG_ERROR("StartPlayout shouldn't have been called!");
    return -1;
}

int32_t FCustomAudioDeviceModule::StopPlayout() 
{
    LOG_ERROR("StopPlayout shouldn't have been called!");
    return -1;
}

bool FCustomAudioDeviceModule::Playing() const
{
    return false;
}

int32_t FCustomAudioDeviceModule::SetRecordingDevice(uint16_t index)
{
    return 0;
}

int32_t FCustomAudioDeviceModule::SetRecordingDevice(WindowsDeviceType device)
{
    return 0;
}

int32_t FCustomAudioDeviceModule::RecordingIsAvailable(bool* available)
{
    LOG_MESSAGE("ADM was queried for RecordingIsAvailable.");
    *available = false;
    return 0;
}

int32_t FCustomAudioDeviceModule::InitRecording()
{
    LOG_MESSAGE("ADM InitRecording");
    if (!AudioCapturer->IsInitialized())
    {
        AudioCapturer->Init();
    }

    return 0;
}

bool FCustomAudioDeviceModule::RecordingIsInitialized() const
{
    return AudioCapturer->IsInitialized();
}


int32_t FCustomAudioDeviceModule::StartRecording() 
{
    LOG_MESSAGE("ADM StartRecording");
    AudioCapturer->StartCapturing();
    bIsRecording = true;
    return 0;
}

int32_t FCustomAudioDeviceModule::StopRecording() 
{
    LOG_MESSAGE("ADM StopRecording");
    AudioCapturer->StopCapturing();
    bIsRecording = false;
    return 0;
}

bool FCustomAudioDeviceModule::Recording() const 
{
    return bIsRecording;
}

// Audio mixer initialization
int32_t FCustomAudioDeviceModule::InitSpeaker() 
{
    return 0;
}

bool FCustomAudioDeviceModule::SpeakerIsInitialized() const 
{
    return false;
}

int32_t FCustomAudioDeviceModule::InitMicrophone() 
{
    return 0;
}

bool FCustomAudioDeviceModule::MicrophoneIsInitialized() const 
{
    return true;
}

int32_t FCustomAudioDeviceModule::SpeakerVolumeIsAvailable(bool* available) {
    *available = false;  // Indicate that volume control is not available
    return -1;  // Operation successful
}

int32_t FCustomAudioDeviceModule::SetSpeakerVolume(uint32_t volume) {
    return -1;  // Operation not supported
}

int32_t FCustomAudioDeviceModule::SpeakerVolume(uint32_t* volume) const {
    return -1;  // Operation successful
}

int32_t FCustomAudioDeviceModule::MaxSpeakerVolume(uint32_t* maxVolume) const {
    return -1;  // Operation successful
}

int32_t FCustomAudioDeviceModule::MinSpeakerVolume(uint32_t* minVolume) const {
    return -1;  // Operation successful
}

int32_t FCustomAudioDeviceModule::MicrophoneVolumeIsAvailable(bool* available) {
    return 0;  // Operation successful
}

int32_t FCustomAudioDeviceModule::SetMicrophoneVolume(uint32_t volume) {
    return 0;  // Operation not supported
}

int32_t FCustomAudioDeviceModule::MicrophoneVolume(uint32_t* volume) const {
    return 0;  // Operation successful
}

int32_t FCustomAudioDeviceModule::MaxMicrophoneVolume(uint32_t* maxVolume) const {
    *maxVolume = FWebRTCAudioCaptureComponent::MaxVolumeLevel;  // Arbitrary max volume
    return 0;  // Operation successful
}

int32_t FCustomAudioDeviceModule::MinMicrophoneVolume(uint32_t* minVolume) const {
    return 0;  // Operation successful
}

int32_t FCustomAudioDeviceModule::SpeakerMuteIsAvailable(bool* available) {
    *available = false;  // Indicate that mute control is not available
    return -1;
}

int32_t FCustomAudioDeviceModule::SetSpeakerMute(bool enable) {
    return -1;
}

int32_t FCustomAudioDeviceModule::SpeakerMute(bool* enabled) const {
    *enabled = false;  // Speaker is not muted
    return -1;
}

int32_t FCustomAudioDeviceModule::MicrophoneMuteIsAvailable(bool* available) {
    *available = false;  // Indicate that mute control is not available
    return -1;
}

int32_t FCustomAudioDeviceModule::SetMicrophoneMute(bool enable) {
    return -1;
}

int32_t FCustomAudioDeviceModule::MicrophoneMute(bool* enabled) const {
    *enabled = false;  // Microphone is not muted
    return -1;  // Operation successful
}

// Stereo support
int32_t FCustomAudioDeviceModule::StereoPlayoutIsAvailable(bool* available) const 
{
    *available = false;
    return 0;
}

int32_t FCustomAudioDeviceModule::SetStereoPlayout(bool enable) 
{
    return 0;
}

int32_t FCustomAudioDeviceModule::StereoPlayout(bool* enabled) const 
{
    *enabled = false;
    return 0;
}
int32_t FCustomAudioDeviceModule::StereoRecordingIsAvailable(bool* available) const 
{
    *available = true;
    return 0;
}

int32_t FCustomAudioDeviceModule::SetStereoRecording(bool enable) 
{
    return 0;
}

int32_t FCustomAudioDeviceModule::StereoRecording(bool* enabled) const 
{
    *enabled = true;
    return 0;
}

// Playout delay
int32_t FCustomAudioDeviceModule::PlayoutDelay(uint16_t* delayMS) const 
{
    *delayMS = 0;
    return 0;
}

这是 PeerConnection 创建逻辑:


void FWebRTCClient::CreatePeerConnection()
{
    if (!PeerConnectionFactory)
    {
        CreatePeerConnectionFactory();
    }

    std::vector<std::string> stunServers = {
        "stun:stun.l.google.com:19302",
        "stun:stun1.l.google.com:19302",
        "stun:stun2.l.google.com:19302",
        "stun:stun3.l.google.com:19302",
        "stun:stun4.l.google.com:19302"
    };

    webrtc::PeerConnectionInterface::RTCConfiguration Config;

    for (const auto& uri : stunServers) {
        webrtc::PeerConnectionInterface::IceServer StunServer;
        StunServer.uri = uri;
        Config.servers.push_back(StunServer);
    }
    
    webrtc::RTCErrorOr<rtc::scoped_refptr<TPeerConnection>> Result = PeerConnectionFactory->CreatePeerConnectionOrError(Config, webrtc::PeerConnectionDependencies(this));
    if (!Result.ok())
    {
        Delegate->OnWebRTCError(FString::Printf(TEXT("Failed to Create PeerConnectionObject: %s"), Result.error().message()));
        return;
    }

    // move object's ownership to the class.
    PeerConnection = Result.MoveValue();

    // Create an audio source. This might involve your custom audio capture logic.
    cricket::AudioOptions AudioOptions;

    rtc::scoped_refptr<webrtc::AudioSourceInterface> AudioSource = PeerConnectionFactory->CreateAudioSource(AudioOptions);
    rtc::scoped_refptr<webrtc::AudioTrackInterface> AudioTrack = PeerConnectionFactory->CreateAudioTrack("audioLabel", AudioSource);

    // Add the audio track to the peer connection
    auto AddTrackResult = PeerConnection->AddTrack(AudioTrack, { "streamId" }); // "streamId" is arbitrary and used to identify the stream
    if (!AddTrackResult.ok()) {
        // Handle the error
        Delegate->OnWebRTCError(FString::Printf(TEXT("Failed to add audio track: %s"), AddTrackResult.error().message()));
        return;
    }
}


void FWebRTCClient::CreatePeerConnectionFactory()
{
    NetworkThread = rtc::Thread::CreateWithSocketServer();
    NetworkThread->Start();

    WorkerThread = rtc::Thread::Create();
    WorkerThread->Start();

    SignallingThread = rtc::Thread::Create();
    SignallingThread->Start();

    rtc::scoped_refptr<FCustomAudioDeviceModule> AudioDeviceModule = new rtc::RefCountedObject<FCustomAudioDeviceModule>();
    rtc::scoped_refptr<webrtc::AudioProcessing> AudioProcessingModule = webrtc::AudioProcessingBuilder().Create();
    {
        webrtc::AudioProcessing::Config Config;
        // Enabled multi channel audio capture/render
        Config.pipeline.multi_channel_capture = true;
        Config.pipeline.multi_channel_render = true;
        Config.pipeline.maximum_internal_processing_rate = 48000;
        // Turn off all other audio processing effects in UE's WebRTC. We want to stream audio from UE as pure as possible.
        Config.pre_amplifier.enabled = false;
        Config.high_pass_filter.enabled = false;
        Config.echo_canceller.enabled = false;
        Config.noise_suppression.enabled = false;
        Config.transient_suppression.enabled = false;
        Config.gain_controller1.enabled = false;
        Config.gain_controller2.enabled = false;
#if !WEBRTC_5414
        Config.voice_detection.enabled = false;
        Config.residual_echo_detector.enabled = false;
        Config.level_estimation.enabled = false;
#endif

        // Apply the config.
        AudioProcessingModule->ApplyConfig(Config);
    }

    auto AudioEncoderFactory = webrtc::CreateAudioEncoderFactory<webrtc::AudioEncoderOpus>();
    auto AudioDecoderFactory = webrtc::CreateAudioDecoderFactory<webrtc::AudioDecoderOpus>();

    PeerConnectionFactory = webrtc::CreatePeerConnectionFactory(
                                    NetworkThread.get(),   // Network thread
                                    WorkerThread.get(),    // Worker thread
                                    SignallingThread.get(), // Signaling thread
                                    AudioDeviceModule,     // Audio device module
                                    AudioEncoderFactory,   // Audio Encoder Factory
                                    AudioDecoderFactory,   // Audio Decoder Factory
                                    nullptr,                
                                    nullptr,
                                    nullptr,
                                    AudioProcessingModule
                                );

    checkf(PeerConnectionFactory, TEXT("Failed to create peer connection factory!"));
}

在调用 AudioTransport->RecordedDataIsAvailable() 回调后,我一直试图了解 WebRTC 的作用,但我迷失在代码库中。

我觉得我错过了一条非常关键的信息,并且希望能得到任何帮助,因为我已经解决这个问题几天了。

谢谢,

c++ webrtc unreal-engine4 unreal-engine5
1个回答
0
投票

好吧,我解决了......

代码非常好,问题在于我计算 10ms 帧长度的方式......我忘记将长度乘以通道数:D。

© www.soinside.com 2019 - 2024. All rights reserved.