AudioUnitRender错误-50

问题描述 投票:7回答:3

在以下情况下,我从AudioUnitRender获得错误-50(无效参数)。我使用此Pitch Detector示例应用程序作为起点,并且运行良好。我项目中唯一的主要区别是,我还将远程I / O单元用于音频输出。音频输出正常。这是我的输入回调和我的初始化代码(为简便起见,删除了错误检查)。我知道很多,但是错误-50确实给我很少的问题可能出在哪里的信息。

输入回调:

OSStatus inputCallback( void* inRefCon, 
                            AudioUnitRenderActionFlags  *ioActionFlags, 
                            const AudioTimeStamp        *inTimeStamp, 
                            UInt32                      inBusNumber, 
                            UInt32                      inNumberFrames, 
                            AudioBufferList             *ioData) {

    WBAudio* audioObject= (WBAudio*)inRefCon;

    AudioUnit rioUnit = audioObject->m_audioUnit;
    OSStatus renderErr;
    UInt32 bus1 = 1;

    renderErr = AudioUnitRender(rioUnit, ioActionFlags, 
                                inTimeStamp, bus1, inNumberFrames, audioObject->m_inBufferList );
    if (renderErr < 0) {
        return renderErr; // breaks here
    }

    return noErr;
} // end inputCallback()

初始化:

- (id) init {

    self= [super init];
    if( !self ) return nil;

    OSStatus result;

    //! Initialize a buffer list for rendering input
    size_t bytesPerSample;
    bytesPerSample = sizeof(SInt16);
    m_inBufferList = (AudioBufferList *)malloc(sizeof(AudioBuffer));
    m_inBufferList->mNumberBuffers = 1;
    m_inBufferList->mBuffers[0].mNumberChannels = 1;
    m_inBufferList->mBuffers[0].mDataByteSize = 512*bytesPerSample;
    m_inBufferList->mBuffers[0].mData = calloc(512, bytesPerSample);

    //! Initialize an audio session to get buffer size
    result = AudioSessionInitialize(NULL, NULL, NULL, NULL);

    UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord;
    result = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory);

    // Set preferred buffer size
    Float32 preferredBufferSize = static_cast<float>(m_pBoard->m_uBufferSize) / m_pBoard->m_fSampleRate;
    result = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(preferredBufferSize), &preferredBufferSize);

    // Get actual buffer size
    Float32 audioBufferSize;
    UInt32 size = sizeof (audioBufferSize);
    result = AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareIOBufferDuration, &size, &audioBufferSize);

    result = AudioSessionSetActive(true);

    //! Create our Remote I/O component description
    AudioComponentDescription desc;
    desc.componentType= kAudioUnitType_Output;
    desc.componentSubType= kAudioUnitSubType_RemoteIO;
    desc.componentFlags= 0;
    desc.componentFlagsMask= 0;
    desc.componentManufacturer= kAudioUnitManufacturer_Apple;

    //! Find the corresponding component
    AudioComponent outputComponent = AudioComponentFindNext(NULL, &desc);

    //! Create the component instance
    result = AudioComponentInstanceNew(outputComponent, &m_audioUnit);

    //! Enable audio output
    UInt32 flag = 1;
    result = AudioUnitSetProperty( m_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputBus, &flag, sizeof(flag));

    //! Enable audio input
    result= AudioUnitSetProperty( m_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag));

    //! Create our audio stream description
    m_audioFormat.mSampleRate= m_pBoard->m_fSampleRate;
    m_audioFormat.mFormatID= kAudioFormatLinearPCM;
    m_audioFormat.mFormatFlags= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
    m_audioFormat.mFramesPerPacket= 1;
    m_audioFormat.mChannelsPerFrame= 1;
    m_audioFormat.mBitsPerChannel= 16;
    m_audioFormat.mBytesPerPacket= 2;
    m_audioFormat.mBytesPerFrame= 2;

    //! Set the stream format
    result = AudioUnitSetProperty( m_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &m_audioFormat, sizeof(m_audioFormat));

    result = AudioUnitSetProperty(m_audioUnit, kAudioUnitProperty_StreamFormat, 
                               kAudioUnitScope_Output, 
                               kInputBus, &m_audioFormat, sizeof(m_audioFormat));

    //! Set the render callback
    AURenderCallbackStruct renderCallbackStruct= {0};
    renderCallbackStruct.inputProc= renderCallback;
    renderCallbackStruct.inputProcRefCon= m_pBoard;
    result = AudioUnitSetProperty(m_audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, kOutputBus, &renderCallbackStruct, sizeof(renderCallbackStruct));

    //! Set the input callback
    AURenderCallbackStruct inputCallbackStruct = {0};
    inputCallbackStruct.inputProc= inputCallback;
    inputCallbackStruct.inputProcRefCon= self;
    result= AudioUnitSetProperty( m_audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Input, kOutputBus, &inputCallbackStruct, sizeof( inputCallbackStruct ) );

    //! Initialize the unit
    result = AudioUnitInitialize( m_audioUnit );

    return self;
}
objective-c ios audio core-audio audiounit
3个回答
0
投票

您正在将m_inBufferList分配为:

m_inBufferList = (AudioBufferList *)malloc(sizeof(AudioBuffer));

这应该是:

m_inBufferList = (AudioBufferList *)malloc(sizeof(AudioBufferList) + sizeof(AudioBuffer) * numberOfBuffers);   //numberOfBuffers in your case is 1

也许这可以解决您的问题。


0
投票

开发文档中的错误-50表示参数错误,确保在AudioUnitRender中传递了正确的参数。检查流格式和您的单位


0
投票

[我同意Kurt Pattyn的观点,m_inBufferList的分配不正确,并且可能是-50错误的参数错误的原因。

除了我认为应该是一个缓冲区

(AudioBufferList *)malloc(sizeof(AudioBufferList)

我的证据是以下尺寸以及下面来自亚当森和阿维拉的代码。

((lldb)sizeof(AudioBufferList)24

(lldb)po sizeof(AudioBuffer)16

(lldb)po offsetof(AudioBufferList,mBuffers [0])8

[根据Learning Core Audio中的Chris Adamson和Kevin Avila:

    // Allocate an AudioBufferList plus enough space for 
    // array of AudioBuffers
    UInt32 propsize = offsetof(AudioBufferList, mBuffers[0]) + 
        (sizeof(AudioBuffer) * player->streamFormat.mChannelsPerFrame);

    // malloc buffer lists
    player->inputBuffer = (AudioBufferList *)malloc(propsize); 
    player->inputBuffer->mNumberBuffers = player->streamFormat.mChannelsPerFrame;

    // Pre-malloc buffers for AudioBufferLists
    for(UInt32 i =0; i< player->inputBuffer->mNumberBuffers ; i++) {
        player->inputBuffer->mBuffers[i].mNumberChannels = 1;
        player->inputBuffer->mBuffers[i].mDataByteSize = bufferSizeBytes; 
        player->inputBuffer->mBuffers[i].mData = malloc(bufferSizeBytes);
    }

最后但并非最不重要的一点,我只是在这段代码上出现了以下注释:)

//credit to TheAmazingAudioEngine for an illustration of proper audiobufferlist allocation. 
// Google leads to some really really bad allocation code...
[other code]
sampleABL = malloc(sizeof(AudioBufferList) + (bufferCnt-1)*sizeof(AudioBuffer));

https://github.com/zakk4223/CocoaSplit/blob/master/CocoaSplit/CAMultiAudio/CAMultiAudioPCMPlayer.m#L203

© www.soinside.com 2019 - 2024. All rights reserved.