使用 `wrtc` RTCAudioSink 接收空音频流的未知原因

问题描述 投票:0回答:1

我从事 POC 项目,该项目应该接收远程音频流并将其写入文件。我可以使用 wrtc RTCAudioSink 订阅数据事件,但每个收到的块是:

ArrayBuffer { [Uint8Contents]: <00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ... 220 more bytes>, byteLength: 320 }
。 结果文件只是一个无声的mp3文件。 我不确定是我做错了什么还是我正在使用的库导致了这个麻烦。 这是后端代码片段:

const path = require('path');
const io = require('socket.io');
const {
  nonstandard: {
    RTCAudioSink,
    RTCVideoSink
  },
  RTCPeerConnection,
  RTCSessionDescription,
  RTCIceCandidate
} = require('wrtc');

const ffmpegPath = require('@ffmpeg-installer/ffmpeg').path;
const ffmpeg = require('fluent-ffmpeg');
ffmpeg.setFfmpegPath(ffmpegPath);
const { PassThrough } = require('stream');
const { StreamInput } = require('fluent-ffmpeg-multistream');

const clientHost = require('../config/connections').CLIENT_URL[process.env.NODE_ENV];

let audioSink;
let videoSink;
let audioStream;

const outputAudioPath = path.join(__dirname, 'out-audio.mp3');

const beforeOffer = () => {
  const peerConnection = new RTCPeerConnection({
    sdpSemantic: 'unified-plan'
  });

  return peerConnection;
};

module.exports = (http) => {
  const socketIO = io(http, {
    allowEIO3: true,
    cors: {
      origin: clientHost,
      methods: ["GET", "POST"],
      transport: ['websocket'],
      credentials: true
    }
  });

  socketIO.on('connection', (socket) => {
    let pc;

    socket.on('chatConnected', ({ roomId, senderId }) => {
      console.log({ roomId, senderId })
      socket.join(roomId);

      pc = beforeOffer();

      pc.onicecandidate = (event) => {
        if (!event.candidate) {
          return null;
        }

        socket.to(roomId).emit('newICECandidate', {
          roomId,
          senderId,
          candidate: event.candidate
        })
      };

      pc.ontrack = (event) => {
        if (event.track.kind !== 'audio') {
          return null;
        }

        audioStream = new PassThrough();
        audioSink = new RTCAudioSink(event.track);
        console.log('id ', event.track.id)
        console.log('label ', event.track.label)
        console.log('enabled ', event.track.enabled)
        console.log('muted ', event.track.muted)
        console.log('readyState', event.track.readyState)
        audioSink.addEventListener('data', ({ samples: { buffer } }) => {
          audioStream.write(Buffer.from(buffer));
        });

        ffmpeg()
          .addInput((new StreamInput(audioStream)).url)
          .addInputOptions([
            '-f s16le',
            '-ar 48k',
            '-ac 1',
          ])
          .on('start', () => {
            console.log('Start recording')
          })
          .on('end', () => {
            console.log('Stop recording')
          })
          .output(outputAudioPath)
          .run();
      };
    });

    // socket.on('participantConnected', (params) => {
    //   socket.to(params.roomId).emit('participantConnected', params);
    // });

    socket.on('participantDisconnected', (params) => {
      audioSink && audioSink.stop();
      audioSink && audioSink.removeEventListener('data');

      audioStream && audioStream.end();

      // socket.to(params.roomId).emit('participantDisconnected', params.senderId);
    });

    socket.on('message', (params) => {
      socket.to(params.roomId).emit('message', params);
    });

    socket.on('videoChatOffer', async (params) => {
      await pc.setRemoteDescription(new RTCSessionDescription(params.sdp));

      const answer = await pc.createAnswer();
      await pc.setLocalDescription(answer);

      socket.to(params.roomId).emit('videoChatAnswer', { ...params, sdp: pc.localDescription });
      // socket.to(params.roomId).emit('videoChatOffer', params);
    });

    // socket.on('videoChatAnswer', (params) => {
    //  socket.to(params.roomId).emit('videoChatAnswer', params);
    // });

    socket.on('newICECandidate', async (params) => {
      const candidate = new RTCIceCandidate(params.candidate);

      await pc.addIceCandidate(candidate);

      // socket.to(params.roomId).emit('newICECandidate', params);
    });

    socket.on('cameraAction', (params) => {
      socket.to(params.roomId).emit('cameraAction', params);
    });

    socket.on('microphoneAction', (params) => {
      socket.to(params.roomId).emit('microphoneAction', params);
    });
  });
};

我尝试使用 RTCAudioSink 接收音频流,但所有块都是空的。
我希望从客户端收到实际的音频数据。

javascript node.js webrtc sdp rtcp
1个回答
0
投票

我得到空音频流的原因完全是愚蠢的。不知何故,socket.to 方法不起作用,尽管我收到了报价,但答案并未发送回远程对等点。对等点正在添加曲目,并且触发了 ontrack 事件,但在远程对等点收到答案之前,它们的音频流将保持为空。修复插座后,它开始按预期工作。我专注于错误的事情并错过了显而易见的事情。

© www.soinside.com 2019 - 2024. All rights reserved.