我正在尝试使用 AudioEngine(通过 iPhone 麦克风)通过 GCDAsyncUdpSocket 将音频发送到另一个收听设备,并且我在另一个 ios 设备中获取音频,并且我可以在下面的 getComingAudio 方法中收听 auido,但音频带有背景噪音和回声,如何避免背景噪音和音频回声? ,谢谢...
func setupAudio () {
self.audioEngine = AVAudioEngine()
self.mixer = AVAudioMixerNode()
self.mixer.volume = 0
self.audioEngine.attach(mixer)
self.socket = GCDAsyncUdpSocket(delegate: self, delegateQueue: DispatchQueue.main)
audioFormat = AVAudioFormat(standardFormatWithSampleRate: Double(sampleRate), channels: 1)
}
//Sending Audio
func sendAudio {
try! AVAudioSession.sharedInstance().setCategory(AVAudioSession.Category.playAndRecord)
try! AVAudioSession.sharedInstance().setActive(true)
let format = AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatInt16,
sampleRate: 44100.0,
channels: 1,
interleaved: true)
self.audioEngine.connect(self.audioEngine.inputNode, to: self.mixer, format: format)
self.audioEngine.connect(self.mixer, to: self.audioEngine.mainMixerNode, format: format)
尝试一下! self.audioEngine.inputNode.setVoiceProcessingEnabled(true)
DispatchQueue.global(qos: .background).async { [weak self] in
guard let self = self else { return }
do {
self.socket.setIPv4Enabled(true)
self.socket.setIPv6Enabled(false)
try self.socket.connect(toHost:"235.10.10.100" ?? "", onPort: 4646 ?? 0)
try self.socket.beginReceiving()
print("Socket started")
} catch {
print("Socket Started Error: \(error)")
}
}
self.mixer.installTap(onBus: 0, bufferSize: 2048, format: format, block: { (buffer: AVAudioPCMBuffer!, time: AVAudioTime!) -> Void in
let data = Data(bytes: (buffer.int16ChannelData![0]), count: Int(buffer.frameLength))
print(buffer)
DispatchQueue.global(qos: .background).async { [weak self] in
guard let self = self else { return }
do {
self.socket.send(data, withTimeout: 0, tag: 0)
} catch {
print("Socket send Error: \(error)")
}
}
})
audioEngine.prepare()
do {
try audioEngine.start()
} catch {
print("Can't start the engine: \(error)")
}
}
// 收听音频
func udpSocket(_ sock: GCDAsyncUdpSocket, didReceive data: Data, fromAddress address: Data, withFilterContext filterContext: Any?) {
audioPlayer.scheduleBuffer(getComingAudio(with: data), completionHandler: nil)
}
func getComingAudio(with data: Data) -> AVAudioPCMBuffer {
let audioBuffer = AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: UInt32(data.count) / 2)!
data.withUnsafeBytes { (bufferPointer: UnsafeRawBufferPointer) in
let int16Array = Array(bufferPointer.bindMemory(to: Int16.self))
let floatArray = int16Array.map { Float($0) / Float(Int16.max) }
floatArray.withUnsafeBufferPointer { audioBuffer.floatChannelData!.pointee.assign(from: $0.baseAddress!, count: floatArray.count) }
}
audioBuffer.frameLength = audioBuffer.frameCapacity
return audioBuffer
}
最简短的答案是打开语音处理:
try audioEngine.inputNode.setVoiceProcessingEnabled(true)
有关更多信息(您可能需要更多信息,因为这些事情有时可能很棘手),请参阅 WWDC2019/510 AVAudioEngine 中的新增功能 和相关示例代码,使用语音处理。