选择麦克风时,IOS 应用程序崩溃,并显示“所需条件为 false:IsFormatSampleRateAndChannelCountValid(format)'”

问题描述 投票:0回答:3

当我在后台使用 Microsoft Teams 麦克风并尝试在我的应用程序内录制音频时,我的应用程序发生应用程序崩溃。

由于未捕获的异常“com.apple.coreaudio.avfaudio”而终止应用程序,原因:“所需条件为假:IsFormatSampleRateAndChannelCountValid(format)”

请参考以下代码:

func startRecording() {
        
        // Clear all previous session data and cancel task
        if recognitionTask != nil {
            recognitionTask?.cancel()
            recognitionTask = nil
        }

        // Create instance of audio session to record voice
        let audioSession = AVAudioSession.sharedInstance()
        do {
            try audioSession.setCategory(AVAudioSession.Category.record, mode: AVAudioSession.Mode.measurement, options: AVAudioSession.CategoryOptions.defaultToSpeaker)
            try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
        } catch {
            print("audioSession properties weren't set because of an error.")
        }
    
        self.recognitionRequest = SFSpeechAudioBufferRecognitionRequest()

        let inputNode = audioEngine.inputNode

        guard let recognitionRequest = recognitionRequest else {
            fatalError("Unable to create an SFSpeechAudioBufferRecognitionRequest object")
        }

        recognitionRequest.shouldReportPartialResults = true

        self.recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in

            var isFinal = false

            if result != nil {

                self.textField.text = result?.bestTranscription.formattedString
                isFinal = (result?.isFinal)!
            }

            if error != nil || isFinal {

                self.audioEngine.stop()
                inputNode.removeTap(onBus: 0)

                self.recognitionRequest = nil
                self.recognitionTask = nil

                self.micButton.isEnabled = true
            }
        })
    
        let recordingFormat = inputNode.outputFormat(forBus: 0)

    
        inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
            self.recognitionRequest?.append(buffer)
        }

        self.audioEngine.prepare()

        do {
            try self.audioEngine.start()
        } catch {
            print("audioEngine couldn't start because of an error.")
        }

        self.textField.text = ""
    }

我很确定问题出在此处,但不知道如何解决它。

let recordingFormat = inputNode.outputFormat(forBus: 0)
        inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
            self.recognitionRequest?.append(buffer)
        }
ios swift avaudiosession avkit
3个回答
0
投票
fileprivate let NibName = "FeedbackView"
protocol FeedbackViewDelegate : AnyObject {
    func showFeedbackError(title: String, message: String)
    func audioDidStart(forType type : FeedbackViewType)
}

enum FeedbackViewType {
    
    case feedbackView, rootcauseView, suggestionView, actionView
    
}

class FeedbackView: UIControl, ViewLoadable, SFSpeechRecognizerDelegate {
    
    @IBOutlet weak var textField: UITextField!
    
    static var nibName: String = NibName
    
    var feedbackViewType : FeedbackViewType = .feedbackView
    
    @IBOutlet var contentView: UIView!
    
    @IBOutlet weak var micButton: UIButton!
    
    @IBOutlet weak var micView: DefaultCardView!
    
    @IBOutlet weak var micImageView: UIImageView!
    
    weak var delegate : FeedbackViewDelegate?
    var allowTextEntry = true
    
    let speechRecognizer        = SFSpeechRecognizer(locale: Locale(identifier: "en-US"))

       var recognitionRequest      : SFSpeechAudioBufferRecognitionRequest?
       var recognitionTask         : SFSpeechRecognitionTask?
       let audioEngine             = AVAudioEngine()
    
    override init(frame: CGRect) {
        super.init(frame: frame)
        commonInit()
    }
    
    required public init?(coder aDecoder: NSCoder) {
        super.init(coder: aDecoder)
        commonInit()
    }
    
    init() {
        super.init(frame: CGRect.zero)
        commonInit()
    }
    
    private func commonInit() {
        Bundle(for: type(of: self)).loadNibNamed(NibName, owner: self, options: nil)
        backgroundColor = .clear
        addSubview(contentView)
        contentView.frame = self.bounds
        contentView.autoresizingMask = [.flexibleHeight, .flexibleWidth]
      
    }
    
    func configure(text: String, placeholder:String, contentType: UITextContentType,keyboardType:UIKeyboardType) {
        
        print("Did configure keyboard")
        self.textField.textContentType = contentType
        self.textField.isSecureTextEntry = (contentType == .password)
        self.textField.keyboardType = keyboardType
        self.textField.delegate = self
        self.textField.placeholder = placeholder
        if(!text.isEmpty) {
            self.textField.text = text
        }
    }
    
    
    @IBAction func btnStartSpeechToText(_ sender: UIButton) {
//        allowTextEntry = false
        if audioEngine.isRunning {
            let audioText = textField.text
                  self.audioEngine.stop()
            DispatchQueue.main.asyncAfter(deadline: .now() + 0.2) {
                self.textField.text = audioText
//                self.allowTextEntry = true
            }
            textField.text = audioText
                  self.micButton.isEnabled = true
                  self.micImageView.image = UIImage(named: "mic")
              } else {
                  print("Audio did start")
                  self.delegate?.audioDidStart(forType: self.feedbackViewType)
                  self.setupSpeech()
                  if self.startRecording() {
                      self.micImageView.image = UIImage(named: "micRed")

                  }
              }
    }
    
    func stopRecording() {
//        allowTextEntry = false
        let audioText = textField.text
        self.audioEngine.stop()
        self.recognitionRequest?.endAudio()
        DispatchQueue.main.asyncAfter(deadline: .now() + 0.2) {
            self.textField.text = audioText
//            self.allowTextEntry = true
        }
        self.micButton.isEnabled = true
        self.micImageView.image = UIImage(named: "mic")
    }
    
    func setupSpeech() {
        
//           self.micButton.isEnabled = false
           self.speechRecognizer?.delegate = self

           SFSpeechRecognizer.requestAuthorization { (authStatus) in

               var isButtonEnabled = false

               switch authStatus {
               case .authorized:
                   isButtonEnabled = true

               case .denied:
                   isButtonEnabled = false
                   print("User denied access to speech recognition")

               case .restricted:
                   isButtonEnabled = false
                   print("Speech recognition restricted on this device")

               case .notDetermined:
                   isButtonEnabled = false
                   print("Speech recognition not yet authorized")
               }

               OperationQueue.main.addOperation() {
//                   self.micButton.isEnabled = isButtonEnabled
               }
           }
       }
    
//    func audioInputIsBusy(recordingFormat: AVAudioFormat) -> Bool {
//        guard recordingFormat.sampleRate == 0 || recordingFormat.channelCount == 0 else {
//            return false
//        }
//        return true
//    }
    
    func startRecording() -> Bool {
            
            // Clear all previous session data and cancel task
            if recognitionTask != nil {
                recognitionTask?.cancel()
                recognitionTask = nil
            }

            // Create instance of audio session to record voice
            let audioSession = AVAudioSession.sharedInstance()
            do {
                try audioSession.setCategory(AVAudioSession.Category.playAndRecord, mode: AVAudioSession.Mode.measurement, options: AVAudioSession.CategoryOptions.defaultToSpeaker)
                try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
            } catch {
                print("audioSession properties weren't set because of an error.")
                delegate?.showFeedbackError(title: "Sorry", message: "Mic is busy")
                return false
            }
        
            self.recognitionRequest = SFSpeechAudioBufferRecognitionRequest()

            let inputNode = audioEngine.inputNode

            guard let recognitionRequest = recognitionRequest else {
                fatalError("Unable to create an SFSpeechAudioBufferRecognitionRequest object")
            }

            recognitionRequest.shouldReportPartialResults = true

            self.recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in

                var isFinal = false

                if result != nil {

                    self.textField.text = result?.bestTranscription.formattedString
                    isFinal = (result?.isFinal)!
                }

                if error != nil || isFinal {

                    self.audioEngine.stop()
                    inputNode.removeTap(onBus: 0)
                    self.recognitionRequest = nil
                    self.recognitionTask = nil
                    self.micButton.isEnabled = true
                }
            })
        
            let recordingFormat = inputNode.outputFormat(forBus: 0)
            inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
                self.recognitionRequest?.append(buffer)
            }

            self.audioEngine.prepare()

            do {
                try self.audioEngine.start()
            } catch {
                print("audioEngine couldn't start because of an error.")
                delegate?.showFeedbackError(title: "Sorry", message: "Your microphone is used somewhere else")
                return false
            }

            self.textField.text = ""
        return true
        }
    
    func speechRecognizer(_ speechRecognizer: SFSpeechRecognizer, availabilityDidChange available: Bool) {
        if available {
            self.micButton.isEnabled = true
        } else {
            self.micButton.isEnabled = false
        }
    }
    

}

extension FeedbackView: UITextFieldDelegate {
    
    func textFieldShouldReturn(_ textField: UITextField) -> Bool {
        self.endEditing(true)
        return false
    }
    
    func textField(_ textField: UITextField, shouldChangeCharactersIn range: NSRange, replacementString string: String) -> Bool {
        return allowTextEntry
    }
}

0
投票

所以应用程序崩溃了,因为我没有应用正确的麦克风通道。

  1. 导入后在代码顶部创建一个协议,以表示文件中的错误:

    let audioEngine = AVAudioEngine()
    
    protocol FeedbackViewDelegate : AnyObject {
        func showFeedbackError(title: String, message: String)
        func audioDidStart(forType type : FeedbackViewType)
    }
    
  2. 首先在函数中添加布尔值的返回值

    func startRecording() -> Bool {
    }
    
  3. sharedInstance
    catch
    部分添加这行代码(这样可以防止崩溃)

    let audioSession = AVAudioSession.sharedInstance()
    do {
        try audioSession.setCategory(AVAudioSession.Category.playAndRecord, mode: AVAudioSession.Mode.measurement, options: AVAudioSession.CategoryOptions.defaultToSpeaker)
        try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
    } catch {
        print("audioSession properties weren't set because of an error.")
        delegate?.showFeedbackError(title: "Sorry", message: "Mic is busy")
        return false
    }
    

    上面的 Return 将阻止代码执行。

  4. 在视图控制器中创建扩展

    extension codeFileName : FeedbackViewDelegate {
        func showFeedbackError(title: String, message: String) {
    
        }
    }
    

    (网络上有数百万个示例) 在函数内部,您可以创建一个警报,并在“in”部分使用 self


0
投票

我在遵循 Apple 开发者网站上的 Scrumdinger 教程时遇到了同样的问题。最后我发现有人对类似问题发表了评论。原来他的开发机没有麦克风。这就解释了为什么教程代码在我的 iPhone 上运行良好。

然后我将 iPhone 连接到 Mac Mini 作为网络摄像头。之后预览和模拟器都运行顺利。我找不到带有该评论的帖子,但这个答案的功劳和我的感谢要归功于发布它的匿名者。

© www.soinside.com 2019 - 2024. All rights reserved.