将音频从 iPhone 个麦克风转换为 iLBC

Convert audio from iPhone microphone to iLBC

我想从 iPhone 麦克风录制我的音频并转换为 iLBC,然后流式传输到远程服务器。但我总是在 AudioConverterFillComplexBuffer 中得到 1768846202。 我知道这意味着 kAudioConverterErr_InvalidInputSize,但我不知道哪个输入错误。

我搜索了一些文章,例如 Stream audio from iOS , Record audio on iPhone with smallest file size and AudioUnit PCM compression to iLBC and decompression to PCM,但其中 none 解决了我的问题。

这是我的转换函数:

-(AudioBuffer) doConvert: (AudioBuffer)pcmData
{
char *outputBuffer = NULL;
OSStatus status;

UInt32 theOutputBufSize = pcmData.mDataByteSize;//32768;
outputBuffer = (char*)malloc(sizeof(char) * theOutputBufSize);

/* Create the output buffer list */
AudioBufferList outBufferList;
outBufferList.mNumberBuffers = 1;
outBufferList.mBuffers[0].mNumberChannels = 1;
outBufferList.mBuffers[0].mDataByteSize   = theOutputBufSize;
outBufferList.mBuffers[0].mData           = outputBuffer;

//Converting
//UInt32 ioOutputDataPackets = numOutputPackets;
UInt32 numOutputDataPackets = 1;
AudioStreamPacketDescription outPacketDesc[1];
status = AudioConverterFillComplexBuffer(audioConverterDecode,
                                         encodeProc,
                                         &pcmData,
                                         &numOutputDataPackets,
                                         &outBufferList,
                                         outPacketDesc);
//outBufferList.mBuffers[0].mDataByteSize   = theOutputBufSize;
[self hasError:status:__FILE__:__LINE__];

/* Set the ouput data */
AudioBuffer outData;
outData.mNumberChannels      = 1;
outData.mData           = outBufferList.mBuffers[0].mData;
outData.mDataByteSize = outBufferList.mBuffers[0].mDataByteSize;

return outData;
}

我的初始函数:

-(void)initDecoder
{

NSLog(@"initDecoder");
AudioStreamBasicDescription srcFormat, dstFormat;
//AudioConverterRef   converter = NULL;
char *outputBuffer = NULL;
OSStatus status;

//output format
dstFormat.mSampleRate = 8000.0;
dstFormat.mFormatID = kAudioFormatiLBC ;
dstFormat.mChannelsPerFrame = 1;
//dstFormat.mBitsPerChannel = 0;
dstFormat.mBytesPerPacket = 38;//50;
dstFormat.mFramesPerPacket = 160;//240;
dstFormat.mBytesPerFrame = 0;
dstFormat.mBitsPerChannel = 0;
dstFormat.mFormatFlags =  0;// little-endian

//source format
srcFormat.mSampleRate           = SAMPLE_RATE;   //This is 48000
srcFormat.mFormatID         = kAudioFormatLinearPCM;
srcFormat.mFormatFlags      = kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
srcFormat.mFramesPerPacket  = 1;
srcFormat.mChannelsPerFrame = 1;
srcFormat.mBitsPerChannel       = 16;
srcFormat.mBytesPerPacket       = 2;
srcFormat.mBytesPerFrame        = 2;
srcFormat.mReserved           = 0;



status = AudioConverterNew(&srcFormat, &dstFormat, &audioConverterDecode);
[self hasError:status:__FILE__:__LINE__];
}

我更改了 this 中的一些代码,并找到了解决方案。

createAudioConvert:

AudioStreamBasicDescription inputFormat = *(CMAudioFormatDescriptionGetStreamBasicDescription(CMSampleBufferGetFormatDescription(sampleBuffer))); // 输入音频格式
AudioStreamBasicDescription outputFormat; 

memset(&outputFormat, 0, sizeof(outputFormat));
outputFormat.mSampleRate       = 8000;
outputFormat.mFormatID         = kAudioFormatiLBC;
outputFormat.mChannelsPerFrame = 1;

// use AudioFormat API to fill out the rest of the description
UInt32 size = sizeof(outputFormat);
AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &outputFormat);


outputFormat.mBytesPerPacket  = 50;
outputFormat.mFramesPerPacket = 240;


AudioClassDescription *desc = [self getAudioClassDescriptionWithType:kAudioFormatiLBC, fromManufacturer:kAppleSoftwareAudioCodecManufacturer];
if (AudioConverterNewSpecific(&inputFormat, &outputFormat, 1, desc, &m_converter) != noErr)
{
    printf("AudioConverterNewSpecific failed\n");
    return NO;
}

return YES;

编码器AAC

if ([self createAudioConvert:sampleBuffer] != YES)
{
    return NO;
}

CMBlockBufferRef blockBuffer = nil;
AudioBufferList  inBufferList;
if (CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &inBufferList, sizeof(inBufferList), NULL, NULL, 0, &blockBuffer) != noErr)
{
    printf("CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer failed");
    return NO;
}

AudioBufferList outBufferList;
outBufferList.mNumberBuffers              = 1;
outBufferList.mBuffers[0].mNumberChannels = 1;//2;
outBufferList.mBuffers[0].mDataByteSize   = *aacLen; 
outBufferList.mBuffers[0].mData           = aacData; 

UInt32 outputDataPacketSize               = 1;



OSStatus err = AudioConverterFillComplexBuffer(m_converter, inputDataProc, &inBufferList, &outputDataPacketSize, &outBufferList, NULL);
printf("AudioConverterFillComplexBuffer\n");


if ( err != noErr)
{
    printf("AudioConverterFillComplexBuffer failed\n");
    return NO;
}

*aacLen = outBufferList.mBuffers[0].mDataByteSize; 
CFRelease(blockBuffer);
return YES;

回调函数:

OSStatus inputDataProc(AudioConverterRef inConverter, UInt32 *ioNumberDataPackets, AudioBufferList *ioData,AudioStreamPacketDescription **outDataPacketDescription, void *inUserData) { 

ioData->mNumberBuffers = 1;


AudioBufferList bufferList = *(AudioBufferList*)inUserData;
ioData->mBuffers[0].mNumberChannels = 1;
ioData->mBuffers[0].mData           = bufferList.mBuffers[0].mData;
ioData->mBuffers[0].mDataByteSize   = bufferList.mBuffers[0].mDataByteSize;

UInt32 maxPackets = bufferList.mBuffers[0].mDataByteSize / 2;
*ioNumberDataPackets = maxPackets;



return noErr;
}