AUGraph FormatConverter (AUConverter) 渲染通知包含 NULL ioData 缓冲区
AUGraph FormatConverter (AUConverter) render notify contains NULL ioData buffer
我正在做一个 iOS 项目,需要从麦克风捕获输入并将其转换为 ULaw(以发送数据流)。我正在使用带有转换器节点的 AUGraph 来完成此操作。该图已成功创建并初始化,但是在我的渲染通知回调中,ioData 缓冲区始终包含 NULL,即使认为 inNumberFrame 包含 93 的值。我认为它可能与格式转换器缓冲区的大小不正确有关,但我可以确定出正在发生的事情。
代码如下:
OSStatus status;
// ************************** DEFINE AUDIO STREAM FORMATS ******************************
double currentSampleRate;
currentSampleRate = [[AVAudioSession sharedInstance] sampleRate];
// Describe stream format
AudioStreamBasicDescription streamAudioFormat = {0};
streamAudioFormat.mSampleRate = 8000.00;
streamAudioFormat.mFormatID = kAudioFormatULaw;
streamAudioFormat.mFormatFlags = kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
streamAudioFormat.mFramesPerPacket = 1;
streamAudioFormat.mChannelsPerFrame = 1;
streamAudioFormat.mBitsPerChannel = 8;
streamAudioFormat.mBytesPerPacket = 1;
streamAudioFormat.mBytesPerFrame = streamAudioFormat.mBytesPerPacket * streamAudioFormat.mFramesPerPacket;
// ************************** SETUP SEND AUDIO ******************************
AUNode ioSendNode;
AUNode convertToULAWNode;
AUNode convertToLPCMNode;
AudioUnit convertToULAWUnit;
AudioUnit convertToLPCMUnit;
status = NewAUGraph(&singleChannelSendGraph);
if (status != noErr)
{
NSLog(@"Unable to create send audio graph.");
return;
}
AudioComponentDescription ioDesc = {0};
ioDesc.componentType = kAudioUnitType_Output;
ioDesc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
ioDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
ioDesc.componentFlags = 0;
ioDesc.componentFlagsMask = 0;
status = AUGraphAddNode(singleChannelSendGraph, &ioDesc, &ioSendNode);
if (status != noErr)
{
NSLog(@"Unable to add IO node.");
return;
}
AudioComponentDescription converterDesc = {0};
converterDesc.componentType = kAudioUnitType_FormatConverter;
converterDesc.componentSubType = kAudioUnitSubType_AUConverter;
converterDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
converterDesc.componentFlags = 0;
converterDesc.componentFlagsMask = 0;
status = AUGraphAddNode(singleChannelSendGraph, &converterDesc, &convertToULAWNode);
if (status != noErr)
{
NSLog(@"Unable to add ULAW converter node.");
return;
}
status = AUGraphAddNode(singleChannelSendGraph, &converterDesc, &convertToLPCMNode);
if (status != noErr)
{
NSLog(@"Unable to add LPCM converter node.");
return;
}
status = AUGraphOpen(singleChannelSendGraph);
if (status != noErr)
{
return;
}
// get the io audio unit
status = AUGraphNodeInfo(singleChannelSendGraph, ioSendNode, NULL, &ioSendUnit);
if (status != noErr)
{
NSLog(@"Unable to get IO unit.");
return;
}
UInt32 enableInput = 1;
status = AudioUnitSetProperty (ioSendUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
1, // microphone bus
&enableInput,
sizeof (enableInput)
);
if (status != noErr)
{
return;
}
UInt32 sizeASBD = sizeof(AudioStreamBasicDescription);
AudioStreamBasicDescription ioASBDin;
AudioStreamBasicDescription ioASBDout;
status = AudioUnitGetProperty(ioSendUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &ioASBDin, &sizeASBD);
if (status != noErr)
{
NSLog(@"Unable to get IO stream input format.");
return;
}
status = AudioUnitGetProperty(ioSendUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &ioASBDout, &sizeASBD);
if (status != noErr)
{
NSLog(@"Unable to get IO stream output format.");
return;
}
ioASBDin.mSampleRate = currentSampleRate;
ioASBDout.mSampleRate = currentSampleRate;
status = AudioUnitSetProperty(ioSendUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &ioASBDin, sizeof(ioASBDin));
if (status != noErr)
{
NSLog(@"Unable to set IO stream output format.");
return;
}
status = AudioUnitSetProperty(ioSendUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &ioASBDin, sizeof(ioASBDin));
if (status != noErr)
{
NSLog(@"Unable to set IO stream input format.");
return;
}
// get the converter audio unit
status = AUGraphNodeInfo(singleChannelSendGraph, convertToULAWNode, NULL, &convertToULAWUnit);
if (status != noErr)
{
NSLog(@"Unable to get ULAW converter unit.");
return;
}
status = AudioUnitSetProperty(convertToULAWUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &ioASBDin, sizeof(ioASBDin));
if (status != noErr)
{
NSLog(@"Unable to set ULAW stream input format.");
return;
}
status = AudioUnitSetProperty(convertToULAWUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &streamAudioFormat, sizeof(streamAudioFormat));
if (status != noErr)
{
NSLog(@"Unable to set ULAW stream output format.");
return;
}
// get the converter audio unit
status = AUGraphNodeInfo(singleChannelSendGraph, convertToLPCMNode, NULL, &convertToLPCMUnit);
if (status != noErr)
{
NSLog(@"Unable to get LPCM converter unit.");
return;
}
status = AudioUnitSetProperty(convertToLPCMUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamAudioFormat, sizeof(streamAudioFormat));
if (status != noErr)
{
NSLog(@"Unable to set LPCM stream input format.");
return;
}
status = AudioUnitSetProperty(convertToLPCMUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &ioASBDin, sizeof(ioASBDin));
if (status != noErr)
{
NSLog(@"Unable to set LPCM stream output format.");
return;
}
status = AUGraphConnectNodeInput(singleChannelSendGraph, ioSendNode, 1, convertToULAWNode, 0);
if (status != noErr)
{
NSLog(@"Unable to set ULAW node input.");
return;
}
status = AUGraphConnectNodeInput(singleChannelSendGraph, convertToULAWNode, 0, convertToLPCMNode, 0);
if (status != noErr)
{
NSLog(@"Unable to set LPCM node input.");
return;
}
status = AUGraphConnectNodeInput(singleChannelSendGraph, convertToLPCMNode, 0, ioSendNode, 0);
if (status != noErr)
{
NSLog(@"Unable to set IO node input.");
return;
}
status = AudioUnitAddRenderNotify(convertToULAWUnit, &outputULAWCallback, (__bridge void*)self);
if (status != noErr)
{
NSLog(@"Unable to add ULAW render notify.");
return;
}
status = AUGraphInitialize(singleChannelSendGraph);
if (status != noErr)
{
NSLog(@"Unable to initialize send graph.");
return;
}
CAShow (singleChannelSendGraph);
}
并且图节点初始化为:
Member Nodes:
node 1: 'auou' 'vpio' 'appl', instance 0x7fd5faf8fac0 O I
node 2: 'aufc' 'conv' 'appl', instance 0x7fd5fad05420 O I
node 3: 'aufc' 'conv' 'appl', instance 0x7fd5fad05810 O I
Connections:
node 1 bus 1 => node 2 bus 0 [ 1 ch, 44100 Hz, 'lpcm' (0x0000000C) 16-bit little-endian signed integer]
node 2 bus 0 => node 3 bus 0 [ 1 ch, 8000 Hz, 'ulaw' (0x0000000C) 8 bits/channel, 1 bytes/packet, 1 frames/packet, 1 bytes/frame]
node 3 bus 0 => node 1 bus 0 [ 1 ch, 44100 Hz, 'lpcm' (0x0000000C) 16-bit little-endian signed integer]
渲染通知回调:
static OSStatus outputULAWCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
AudioManager *audioManager = (__bridge AudioManager*)inRefCon;
if ((*ioActionFlags) & kAudioUnitRenderAction_PostRender)
{
if (!audioManager.mute && ioData->mBuffers[0].mData != NULL)
{
TPCircularBufferProduceBytes(audioManager.activeChannel == 0 ? audioManager.channel1StreamOutBufferPtr : audioManager.channel2StreamOutBufferPtr,
ioData->mBuffers[0].mData, ioData->mBuffers[0].mDataByteSize);
// do not want to playback our audio into local speaker
SilenceData(ioData);
}
}
return noErr;
}
注意:如果我直接将麦克风输入发送到输出(跳过转换器节点),我确实听到了输出,所以我知道 AUGraph 正在工作。
我有一个接收 AUGraph 设置,可以从流中接收 ULaw,并通过转换器 运行 通过扬声器播放,并且可以正常工作。
只是想不通为什么转换器会失败并且没有返回任何数据。
有人遇到过此类问题吗?
更新
因此,您在别处调用 AUGraphStart
,但 ulaw 转换器拒绝为您进行一般速率转换 :( 您可以在图表中添加另一个速率转换器,或者简单地让 vpio 单元为您完成。更改此代码
ioASBDin.mSampleRate = currentSampleRate; // change me to 8000Hz
ioASBDout.mSampleRate = currentSampleRate; // delete me, I'm ignored
status = AudioUnitSetProperty(ioSendUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &ioASBDin, sizeof(ioASBDin));
进入
ioASBDin.mSampleRate = streamAudioFormat.mSampleRate; // a.k.a 8000Hz
status = AudioUnitSetProperty(ioSendUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &ioASBDin, sizeof(ioASBDin));
将使整个图形达到 8kHz 并为您提供非空 ioData 缓冲区:
AudioUnitGraph 0xCA51000:
Member Nodes:
node 1: 'auou' 'vpio' 'appl', instance 0x7b5bb320 O I
node 2: 'aufc' 'conv' 'appl', instance 0x7c878d50 O I
node 3: 'aufc' 'conv' 'appl', instance 0x7c875eb0 O I
Connections:
node 1 bus 1 => node 2 bus 0 [ 1 ch, 8000 Hz, 'lpcm' (0x0000000C) 16-bit little-endian signed integer]
node 2 bus 0 => node 3 bus 0 [ 1 ch, 8000 Hz, 'ulaw' (0x0000000C) 8 bits/channel, 1 bytes/packet, 1 frames/packet, 1 bytes/frame]
node 3 bus 0 => node 1 bus 0 [ 1 ch, 8000 Hz, 'lpcm' (0x0000000C) 16-bit little-endian signed integer]
CurrentState:
mLastUpdateError=0, eventsToProcess=F, isInitialized=T, isRunning=T (1)
旧答案
你需要
AUGraphStart
你的图表
- 将您的 ulaw
mSampleRate
更改为 11025、22050 或 44100
然后您将在 kAudioUnitRenderAction_PostRender
阶段看到非空 ioData。
转换为 8kHz 甚至 16kHz ulaw 似乎是音频转换器应该能够做到的事情。我不知道为什么它不起作用,但是当您将采样率设置为第 2 点中的值以外的任何值时,ulaw 转换器报告 kAUGraphErr_CannotDoInCurrentContext
(-10863) 错误,这对我。
我正在做一个 iOS 项目,需要从麦克风捕获输入并将其转换为 ULaw(以发送数据流)。我正在使用带有转换器节点的 AUGraph 来完成此操作。该图已成功创建并初始化,但是在我的渲染通知回调中,ioData 缓冲区始终包含 NULL,即使认为 inNumberFrame 包含 93 的值。我认为它可能与格式转换器缓冲区的大小不正确有关,但我可以确定出正在发生的事情。
代码如下:
OSStatus status;
// ************************** DEFINE AUDIO STREAM FORMATS ******************************
double currentSampleRate;
currentSampleRate = [[AVAudioSession sharedInstance] sampleRate];
// Describe stream format
AudioStreamBasicDescription streamAudioFormat = {0};
streamAudioFormat.mSampleRate = 8000.00;
streamAudioFormat.mFormatID = kAudioFormatULaw;
streamAudioFormat.mFormatFlags = kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
streamAudioFormat.mFramesPerPacket = 1;
streamAudioFormat.mChannelsPerFrame = 1;
streamAudioFormat.mBitsPerChannel = 8;
streamAudioFormat.mBytesPerPacket = 1;
streamAudioFormat.mBytesPerFrame = streamAudioFormat.mBytesPerPacket * streamAudioFormat.mFramesPerPacket;
// ************************** SETUP SEND AUDIO ******************************
AUNode ioSendNode;
AUNode convertToULAWNode;
AUNode convertToLPCMNode;
AudioUnit convertToULAWUnit;
AudioUnit convertToLPCMUnit;
status = NewAUGraph(&singleChannelSendGraph);
if (status != noErr)
{
NSLog(@"Unable to create send audio graph.");
return;
}
AudioComponentDescription ioDesc = {0};
ioDesc.componentType = kAudioUnitType_Output;
ioDesc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
ioDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
ioDesc.componentFlags = 0;
ioDesc.componentFlagsMask = 0;
status = AUGraphAddNode(singleChannelSendGraph, &ioDesc, &ioSendNode);
if (status != noErr)
{
NSLog(@"Unable to add IO node.");
return;
}
AudioComponentDescription converterDesc = {0};
converterDesc.componentType = kAudioUnitType_FormatConverter;
converterDesc.componentSubType = kAudioUnitSubType_AUConverter;
converterDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
converterDesc.componentFlags = 0;
converterDesc.componentFlagsMask = 0;
status = AUGraphAddNode(singleChannelSendGraph, &converterDesc, &convertToULAWNode);
if (status != noErr)
{
NSLog(@"Unable to add ULAW converter node.");
return;
}
status = AUGraphAddNode(singleChannelSendGraph, &converterDesc, &convertToLPCMNode);
if (status != noErr)
{
NSLog(@"Unable to add LPCM converter node.");
return;
}
status = AUGraphOpen(singleChannelSendGraph);
if (status != noErr)
{
return;
}
// get the io audio unit
status = AUGraphNodeInfo(singleChannelSendGraph, ioSendNode, NULL, &ioSendUnit);
if (status != noErr)
{
NSLog(@"Unable to get IO unit.");
return;
}
UInt32 enableInput = 1;
status = AudioUnitSetProperty (ioSendUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
1, // microphone bus
&enableInput,
sizeof (enableInput)
);
if (status != noErr)
{
return;
}
UInt32 sizeASBD = sizeof(AudioStreamBasicDescription);
AudioStreamBasicDescription ioASBDin;
AudioStreamBasicDescription ioASBDout;
status = AudioUnitGetProperty(ioSendUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &ioASBDin, &sizeASBD);
if (status != noErr)
{
NSLog(@"Unable to get IO stream input format.");
return;
}
status = AudioUnitGetProperty(ioSendUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &ioASBDout, &sizeASBD);
if (status != noErr)
{
NSLog(@"Unable to get IO stream output format.");
return;
}
ioASBDin.mSampleRate = currentSampleRate;
ioASBDout.mSampleRate = currentSampleRate;
status = AudioUnitSetProperty(ioSendUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &ioASBDin, sizeof(ioASBDin));
if (status != noErr)
{
NSLog(@"Unable to set IO stream output format.");
return;
}
status = AudioUnitSetProperty(ioSendUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &ioASBDin, sizeof(ioASBDin));
if (status != noErr)
{
NSLog(@"Unable to set IO stream input format.");
return;
}
// get the converter audio unit
status = AUGraphNodeInfo(singleChannelSendGraph, convertToULAWNode, NULL, &convertToULAWUnit);
if (status != noErr)
{
NSLog(@"Unable to get ULAW converter unit.");
return;
}
status = AudioUnitSetProperty(convertToULAWUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &ioASBDin, sizeof(ioASBDin));
if (status != noErr)
{
NSLog(@"Unable to set ULAW stream input format.");
return;
}
status = AudioUnitSetProperty(convertToULAWUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &streamAudioFormat, sizeof(streamAudioFormat));
if (status != noErr)
{
NSLog(@"Unable to set ULAW stream output format.");
return;
}
// get the converter audio unit
status = AUGraphNodeInfo(singleChannelSendGraph, convertToLPCMNode, NULL, &convertToLPCMUnit);
if (status != noErr)
{
NSLog(@"Unable to get LPCM converter unit.");
return;
}
status = AudioUnitSetProperty(convertToLPCMUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamAudioFormat, sizeof(streamAudioFormat));
if (status != noErr)
{
NSLog(@"Unable to set LPCM stream input format.");
return;
}
status = AudioUnitSetProperty(convertToLPCMUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &ioASBDin, sizeof(ioASBDin));
if (status != noErr)
{
NSLog(@"Unable to set LPCM stream output format.");
return;
}
status = AUGraphConnectNodeInput(singleChannelSendGraph, ioSendNode, 1, convertToULAWNode, 0);
if (status != noErr)
{
NSLog(@"Unable to set ULAW node input.");
return;
}
status = AUGraphConnectNodeInput(singleChannelSendGraph, convertToULAWNode, 0, convertToLPCMNode, 0);
if (status != noErr)
{
NSLog(@"Unable to set LPCM node input.");
return;
}
status = AUGraphConnectNodeInput(singleChannelSendGraph, convertToLPCMNode, 0, ioSendNode, 0);
if (status != noErr)
{
NSLog(@"Unable to set IO node input.");
return;
}
status = AudioUnitAddRenderNotify(convertToULAWUnit, &outputULAWCallback, (__bridge void*)self);
if (status != noErr)
{
NSLog(@"Unable to add ULAW render notify.");
return;
}
status = AUGraphInitialize(singleChannelSendGraph);
if (status != noErr)
{
NSLog(@"Unable to initialize send graph.");
return;
}
CAShow (singleChannelSendGraph);
}
并且图节点初始化为:
Member Nodes:
node 1: 'auou' 'vpio' 'appl', instance 0x7fd5faf8fac0 O I
node 2: 'aufc' 'conv' 'appl', instance 0x7fd5fad05420 O I
node 3: 'aufc' 'conv' 'appl', instance 0x7fd5fad05810 O I
Connections:
node 1 bus 1 => node 2 bus 0 [ 1 ch, 44100 Hz, 'lpcm' (0x0000000C) 16-bit little-endian signed integer]
node 2 bus 0 => node 3 bus 0 [ 1 ch, 8000 Hz, 'ulaw' (0x0000000C) 8 bits/channel, 1 bytes/packet, 1 frames/packet, 1 bytes/frame]
node 3 bus 0 => node 1 bus 0 [ 1 ch, 44100 Hz, 'lpcm' (0x0000000C) 16-bit little-endian signed integer]
渲染通知回调:
static OSStatus outputULAWCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
AudioManager *audioManager = (__bridge AudioManager*)inRefCon;
if ((*ioActionFlags) & kAudioUnitRenderAction_PostRender)
{
if (!audioManager.mute && ioData->mBuffers[0].mData != NULL)
{
TPCircularBufferProduceBytes(audioManager.activeChannel == 0 ? audioManager.channel1StreamOutBufferPtr : audioManager.channel2StreamOutBufferPtr,
ioData->mBuffers[0].mData, ioData->mBuffers[0].mDataByteSize);
// do not want to playback our audio into local speaker
SilenceData(ioData);
}
}
return noErr;
}
注意:如果我直接将麦克风输入发送到输出(跳过转换器节点),我确实听到了输出,所以我知道 AUGraph 正在工作。
我有一个接收 AUGraph 设置,可以从流中接收 ULaw,并通过转换器 运行 通过扬声器播放,并且可以正常工作。
只是想不通为什么转换器会失败并且没有返回任何数据。
有人遇到过此类问题吗?
更新
因此,您在别处调用 AUGraphStart
,但 ulaw 转换器拒绝为您进行一般速率转换 :( 您可以在图表中添加另一个速率转换器,或者简单地让 vpio 单元为您完成。更改此代码
ioASBDin.mSampleRate = currentSampleRate; // change me to 8000Hz
ioASBDout.mSampleRate = currentSampleRate; // delete me, I'm ignored
status = AudioUnitSetProperty(ioSendUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &ioASBDin, sizeof(ioASBDin));
进入
ioASBDin.mSampleRate = streamAudioFormat.mSampleRate; // a.k.a 8000Hz
status = AudioUnitSetProperty(ioSendUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &ioASBDin, sizeof(ioASBDin));
将使整个图形达到 8kHz 并为您提供非空 ioData 缓冲区:
AudioUnitGraph 0xCA51000:
Member Nodes:
node 1: 'auou' 'vpio' 'appl', instance 0x7b5bb320 O I
node 2: 'aufc' 'conv' 'appl', instance 0x7c878d50 O I
node 3: 'aufc' 'conv' 'appl', instance 0x7c875eb0 O I
Connections:
node 1 bus 1 => node 2 bus 0 [ 1 ch, 8000 Hz, 'lpcm' (0x0000000C) 16-bit little-endian signed integer]
node 2 bus 0 => node 3 bus 0 [ 1 ch, 8000 Hz, 'ulaw' (0x0000000C) 8 bits/channel, 1 bytes/packet, 1 frames/packet, 1 bytes/frame]
node 3 bus 0 => node 1 bus 0 [ 1 ch, 8000 Hz, 'lpcm' (0x0000000C) 16-bit little-endian signed integer]
CurrentState:
mLastUpdateError=0, eventsToProcess=F, isInitialized=T, isRunning=T (1)
旧答案
你需要
AUGraphStart
你的图表- 将您的 ulaw
mSampleRate
更改为 11025、22050 或 44100
然后您将在 kAudioUnitRenderAction_PostRender
阶段看到非空 ioData。
转换为 8kHz 甚至 16kHz ulaw 似乎是音频转换器应该能够做到的事情。我不知道为什么它不起作用,但是当您将采样率设置为第 2 点中的值以外的任何值时,ulaw 转换器报告 kAUGraphErr_CannotDoInCurrentContext
(-10863) 错误,这对我。