Search code examples
objective-ciphoneaudioaudio-recordingaudio-converter

Convert audio from iPhone microphone to iLBC


I want record my audio from iPhone microphone and convert to iLBC then stream to remote server. But I always got 1768846202 in AudioConverterFillComplexBuffer. I know this means kAudioConverterErr_InvalidInputSize, but I don't know which input was wrong.

I've search some articles, like Stream audio from iOS , Record audio on iPhone with smallest file size and AudioUnit PCM compression to iLBC and decompression to PCM, but none of these solve my problem.

Here's my convert function:

-(AudioBuffer) doConvert: (AudioBuffer)pcmData
{
char *outputBuffer = NULL;
OSStatus status;

UInt32 theOutputBufSize = pcmData.mDataByteSize;//32768;
outputBuffer = (char*)malloc(sizeof(char) * theOutputBufSize);

/* Create the output buffer list */
AudioBufferList outBufferList;
outBufferList.mNumberBuffers = 1;
outBufferList.mBuffers[0].mNumberChannels = 1;
outBufferList.mBuffers[0].mDataByteSize   = theOutputBufSize;
outBufferList.mBuffers[0].mData           = outputBuffer;

//Converting
//UInt32 ioOutputDataPackets = numOutputPackets;
UInt32 numOutputDataPackets = 1;
AudioStreamPacketDescription outPacketDesc[1];
status = AudioConverterFillComplexBuffer(audioConverterDecode,
                                         encodeProc,
                                         &pcmData,
                                         &numOutputDataPackets,
                                         &outBufferList,
                                         outPacketDesc);
//outBufferList.mBuffers[0].mDataByteSize   = theOutputBufSize;
[self hasError:status:__FILE__:__LINE__];

/* Set the ouput data */
AudioBuffer outData;
outData.mNumberChannels      = 1;
outData.mData           = outBufferList.mBuffers[0].mData;
outData.mDataByteSize = outBufferList.mBuffers[0].mDataByteSize;

return outData;
}

And my initial function:

-(void)initDecoder
{

NSLog(@"initDecoder");
AudioStreamBasicDescription srcFormat, dstFormat;
//AudioConverterRef   converter = NULL;
char *outputBuffer = NULL;
OSStatus status;

//output format
dstFormat.mSampleRate = 8000.0;
dstFormat.mFormatID = kAudioFormatiLBC ;
dstFormat.mChannelsPerFrame = 1;
//dstFormat.mBitsPerChannel = 0;
dstFormat.mBytesPerPacket = 38;//50;
dstFormat.mFramesPerPacket = 160;//240;
dstFormat.mBytesPerFrame = 0;
dstFormat.mBitsPerChannel = 0;
dstFormat.mFormatFlags =  0;// little-endian

//source format
srcFormat.mSampleRate           = SAMPLE_RATE;   //This is 48000
srcFormat.mFormatID         = kAudioFormatLinearPCM;
srcFormat.mFormatFlags      = kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
srcFormat.mFramesPerPacket  = 1;
srcFormat.mChannelsPerFrame = 1;
srcFormat.mBitsPerChannel       = 16;
srcFormat.mBytesPerPacket       = 2;
srcFormat.mBytesPerFrame        = 2;
srcFormat.mReserved           = 0;



status = AudioConverterNew(&srcFormat, &dstFormat, &audioConverterDecode);
[self hasError:status:__FILE__:__LINE__];
}

Solution

  • I've change some code in this, and I found my solution.

    createAudioConvert:

    AudioStreamBasicDescription inputFormat = *(CMAudioFormatDescriptionGetStreamBasicDescription(CMSampleBufferGetFormatDescription(sampleBuffer))); // 输入音频格式
    AudioStreamBasicDescription outputFormat; 
    
    memset(&outputFormat, 0, sizeof(outputFormat));
    outputFormat.mSampleRate       = 8000;
    outputFormat.mFormatID         = kAudioFormatiLBC;
    outputFormat.mChannelsPerFrame = 1;
    
    // use AudioFormat API to fill out the rest of the description
    UInt32 size = sizeof(outputFormat);
    AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &outputFormat);
    
    
    outputFormat.mBytesPerPacket  = 50;
    outputFormat.mFramesPerPacket = 240;
    
    
    AudioClassDescription *desc = [self getAudioClassDescriptionWithType:kAudioFormatiLBC, fromManufacturer:kAppleSoftwareAudioCodecManufacturer];
    if (AudioConverterNewSpecific(&inputFormat, &outputFormat, 1, desc, &m_converter) != noErr)
    {
        printf("AudioConverterNewSpecific failed\n");
        return NO;
    }
    
    return YES;
    

    encoderAAC

    if ([self createAudioConvert:sampleBuffer] != YES)
    {
        return NO;
    }
    
    CMBlockBufferRef blockBuffer = nil;
    AudioBufferList  inBufferList;
    if (CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &inBufferList, sizeof(inBufferList), NULL, NULL, 0, &blockBuffer) != noErr)
    {
        printf("CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer failed");
        return NO;
    }
    
    AudioBufferList outBufferList;
    outBufferList.mNumberBuffers              = 1;
    outBufferList.mBuffers[0].mNumberChannels = 1;//2;
    outBufferList.mBuffers[0].mDataByteSize   = *aacLen; 
    outBufferList.mBuffers[0].mData           = aacData; 
    
    UInt32 outputDataPacketSize               = 1;
    
    
    
    OSStatus err = AudioConverterFillComplexBuffer(m_converter, inputDataProc, &inBufferList, &outputDataPacketSize, &outBufferList, NULL);
    printf("AudioConverterFillComplexBuffer\n");
    
    
    if ( err != noErr)
    {
        printf("AudioConverterFillComplexBuffer failed\n");
        return NO;
    }
    
    *aacLen = outBufferList.mBuffers[0].mDataByteSize; 
    CFRelease(blockBuffer);
    return YES;
    

    callback function:

    OSStatus inputDataProc(AudioConverterRef inConverter, UInt32 *ioNumberDataPackets, AudioBufferList *ioData,AudioStreamPacketDescription **outDataPacketDescription, void *inUserData) { 
    
    ioData->mNumberBuffers = 1;
    
    
    AudioBufferList bufferList = *(AudioBufferList*)inUserData;
    ioData->mBuffers[0].mNumberChannels = 1;
    ioData->mBuffers[0].mData           = bufferList.mBuffers[0].mData;
    ioData->mBuffers[0].mDataByteSize   = bufferList.mBuffers[0].mDataByteSize;
    
    UInt32 maxPackets = bufferList.mBuffers[0].mDataByteSize / 2;
    *ioNumberDataPackets = maxPackets;
    
    
    
    return noErr;
    }