如何从外部音频接口访问带有核心音频的各个通道
How to access individual channels with core audio from an external audio interface
我正在学习 CoreAudio,我只是在浏览 Apple 文档中的一些示例,并弄清楚如何设置以及不设置。到目前为止,我能够连接到默认连接的音频输入设备并将其输出到默认输出设备。我连接了一个 2 通道接口,并且能够从它输出输入并输出它。
然而,我正在搜索他们的 API 参考资料和示例,但找不到任何实质性的东西来从我的界面访问各个输入通道。
我能够破解并从我的 Render Callback 函数中的 AudioBufferList 中提取样本并以这种方式对其进行操作,但我想知道是否有正确的方法或更正式的方法来访问每个人的数据输入通道。
编辑:
这是我从我正在使用的示例中找到的用户数据:
typedef struct MyAUGraphPlayer
{
AudioStreamBasicDescription streamFormat;
AUGraph graph;
AudioUnit inputUnit;
AudioUnit outputUnit;
AudioBufferList * inputBuffer;
CARingBuffer * ringBuffer;
Float64 firstInputSampleTime;
Float64 firstOutputSampleTime;
Float64 inToOutSampleTimeOffset;
} MyAUGraphPlayer;
这是我设置输入单元的方式:
void CreateInputUnit(MyAUGraphPlayer * player)
{
//Generates a description that matches audio HAL
AudioComponentDescription inputcd = {0};
inputcd.componentType = kAudioUnitType_Output;
inputcd.componentSubType = kAudioUnitSubType_HALOutput;
inputcd.componentManufacturer = kAudioUnitManufacturer_Apple;
UInt32 deviceCount = AudioComponentCount ( &inputcd );
printf("Found %d devices\n", deviceCount);
AudioComponent comp = AudioComponentFindNext(NULL, &inputcd);
if(comp == NULL) {
printf("Can't get output unit\n");
exit(1);
}
OSStatus status;
status = AudioComponentInstanceNew(comp, &player->inputUnit);
assert(status == noErr);
//Explicitly enable Input and disable output
UInt32 disableFlag = 0;
UInt32 enableFlag = 1;
AudioUnitScope outputBus = 0;
AudioUnitScope inputBus = 1;
status = AudioUnitSetProperty(player->inputUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
inputBus,
&enableFlag,
sizeof(enableFlag))
assert(status == noErr);
status = AudioUnitSetProperty(player->inputUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
outputBus,
&disableFlag,
sizeof(enableFlag));
assert(status == noErr);
printf("Finished enabling input and disabling output on an inputUnit\n");
//Get the default Audio input Device
AudioDeviceID defaultDevice = kAudioObjectUnknown;
UInt32 propertySize = sizeof(defaultDevice);
AudioObjectPropertyAddress defaultDeviceProperty;
defaultDeviceProperty.mSelector = kAudioHardwarePropertyDefaultInputDevice;
defaultDeviceProperty.mScope = kAudioObjectPropertyScopeGlobal;
defaultDeviceProperty.mElement = kAudioObjectPropertyElementMaster;
status = AudioObjectGetPropertyData(kAudioObjectSystemObject,
&defaultDeviceProperty,
0,
NULL,
&propertySize,
&defaultDevice);
assert(status == noErr);
//Set the current device property of the AUHAL
status = AudioUnitSetProperty(player->inputUnit,
kAudioOutputUnitProperty_CurrentDevice,
kAudioUnitScope_Global,
outputBus,
&defaultDevice,
sizeof(defaultDevice));
assert(status == noErr);
//Get the AudioStreamBasicDescription from Input AUHAL
propertySize = sizeof(AudioStreamBasicDescription);
status = AudioUnitGetProperty(player->inputUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
inputBus,
&player->streamFormat,
&propertySize);
assert(status == noErr);
//Adopt hardware input sample rate
AudioStreamBasicDescription deviceFormat;
status = AudioUnitGetProperty(player->inputUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
inputBus,
&deviceFormat,
&propertySize);
assert(status == noErr);
player->streamFormat.mSampleRate = deviceFormat.mSampleRate;
printf("Sample Rate %f...\n", deviceFormat.mSampleRate);
propertySize = sizeof(AudioStreamBasicDescription);
status = AudioUnitSetProperty(player->inputUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
inputBus,
&player->streamFormat,
propertySize);
assert(status == noErr);
//Calculating Capture buffer size for an I/O unit
UInt32 bufferSizeFrames = 0;
propertySize = sizeof(UInt32);
status = AudioUnitGetProperty(player->inputUnit,
kAudioDevicePropertyBufferFrameSize,
kAudioUnitScope_Global,
0,
&bufferSizeFrames,
&propertySize);
assert(status == noErr);
UInt32 bufferSizeBytes = bufferSizeFrames * sizeof(Float32);
//Create AudioBufferList to receive capture data
UInt32 propSize = offsetof(AudioBufferList, mBuffers[0]) +
(sizeof(AudioBuffer) * player->streamFormat.mChannelsPerFrame);
//Malloc buffer lists
player->inputBuffer = (AudioBufferList *) malloc(propSize);
player->inputBuffer->mNumberBuffers = player->streamFormat.mChannelsPerFrame;
//Pre malloc buffers for AudioBufferLists
for(UInt32 i = 0; i < player->inputBuffer->mNumberBuffers; i++){
player->inputBuffer->mBuffers[i].mNumberChannels = 1;
player->inputBuffer->mBuffers[i].mDataByteSize = bufferSizeBytes;
player->inputBuffer->mBuffers[i].mData = malloc(bufferSizeBytes);
}
//Create the ring buffer
player->ringBuffer = new CARingBuffer();
player->ringBuffer->Allocate(player->streamFormat.mChannelsPerFrame,
player->streamFormat.mBytesPerFrame,
bufferSizeFrames * 3);
printf("Number of channels: %d\n", player->streamFormat.mChannelsPerFrame);
printf("Number of buffers: %d\n", player->inputBuffer->mNumberBuffers);
//Set render proc to supply samples
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = InputRenderProc;
callbackStruct.inputProcRefCon = player;
status = AudioUnitSetProperty(player->inputUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
0,
&callbackStruct,
sizeof(callbackStruct);
assert(status == noErr);
status = AudioUnitInitialize(player->inputUnit);
assert(status == noErr);
player->firstInputSampleTime = -1;
player->inToOutSampleTimeOffset = -1;
printf("Finished CreateInputUnit()\n");
}
这是我访问各个缓冲区的渲染回调函数。 :
OSStatus GraphRenderProc(void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData)
{
MyAUGraphPlayer * player = (MyAUGraphPlayer *) inRefCon;
if(player->firstOutputSampleTime < 0.0) {
player->firstOutputSampleTime = inTimeStamp->mSampleTime;
if((player->firstInputSampleTime > -1.0) &&
(player->inToOutSampleTimeOffset < 0.0)) {
player->inToOutSampleTimeOffset = player->firstInputSampleTime - player->firstOutputSampleTime;
}
}
//Copy samples out of ring buffer
OSStatus outputProcErr = noErr;
outputProcErr = player->ringBuffer->Fetch(ioData,
inNumberFrames,
inTimeStamp->mSampleTime + player->inToOutSampleTimeOffset);
//BUT THIS IS NOT HOW IT IS SUPPOSED TO WORK
Float32 * data = (Float32 *) ioData->mBuffers[0].mData;
Float32 * data2 = (Float32 *) ioData->mBuffers[1].mData;
for(int frame = 0; frame < inNumberFrames; frame++)
{
Float32 sample = data[frame] + data2[frame];
data[frame] = data2[frame] = sample;
}
return outputProcErr;
}
尽管您的代码对于它似乎管理的任务来说看起来过于复杂,但我会尝试回答您的问题:
您在回调中检索样本数据的概念没有错。如果处理多声道音频设备,这将是不够的。设备有多少频道,频道布局、格式等。您通过 AudioStreamBasicDescription
查询给定设备。此 属性 用于初始化处理链的其余部分。您在初始化时分配音频缓冲区,或让程序为您完成(请阅读文档)。
如果您觉得使用额外的缓冲区复制到数据处理和 DSP 更舒服,您可以在回调中管理它,如下所示(简化代码):
Float32 buf[streamFormat.mChanelsPerFrame][inNumberFrames];
for(int ch; ch < streamFormat.mChanelsPerFrame; ch++){
Float32 data = (Float32 *)ioData->mBuffers[ch].mData;
memcpy(buf[ch], data, inNumberFrames*sizeof(Float32));
}
我正在学习 CoreAudio,我只是在浏览 Apple 文档中的一些示例,并弄清楚如何设置以及不设置。到目前为止,我能够连接到默认连接的音频输入设备并将其输出到默认输出设备。我连接了一个 2 通道接口,并且能够从它输出输入并输出它。
然而,我正在搜索他们的 API 参考资料和示例,但找不到任何实质性的东西来从我的界面访问各个输入通道。
我能够破解并从我的 Render Callback 函数中的 AudioBufferList 中提取样本并以这种方式对其进行操作,但我想知道是否有正确的方法或更正式的方法来访问每个人的数据输入通道。
编辑:
这是我从我正在使用的示例中找到的用户数据:
typedef struct MyAUGraphPlayer
{
AudioStreamBasicDescription streamFormat;
AUGraph graph;
AudioUnit inputUnit;
AudioUnit outputUnit;
AudioBufferList * inputBuffer;
CARingBuffer * ringBuffer;
Float64 firstInputSampleTime;
Float64 firstOutputSampleTime;
Float64 inToOutSampleTimeOffset;
} MyAUGraphPlayer;
这是我设置输入单元的方式:
void CreateInputUnit(MyAUGraphPlayer * player)
{
//Generates a description that matches audio HAL
AudioComponentDescription inputcd = {0};
inputcd.componentType = kAudioUnitType_Output;
inputcd.componentSubType = kAudioUnitSubType_HALOutput;
inputcd.componentManufacturer = kAudioUnitManufacturer_Apple;
UInt32 deviceCount = AudioComponentCount ( &inputcd );
printf("Found %d devices\n", deviceCount);
AudioComponent comp = AudioComponentFindNext(NULL, &inputcd);
if(comp == NULL) {
printf("Can't get output unit\n");
exit(1);
}
OSStatus status;
status = AudioComponentInstanceNew(comp, &player->inputUnit);
assert(status == noErr);
//Explicitly enable Input and disable output
UInt32 disableFlag = 0;
UInt32 enableFlag = 1;
AudioUnitScope outputBus = 0;
AudioUnitScope inputBus = 1;
status = AudioUnitSetProperty(player->inputUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
inputBus,
&enableFlag,
sizeof(enableFlag))
assert(status == noErr);
status = AudioUnitSetProperty(player->inputUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
outputBus,
&disableFlag,
sizeof(enableFlag));
assert(status == noErr);
printf("Finished enabling input and disabling output on an inputUnit\n");
//Get the default Audio input Device
AudioDeviceID defaultDevice = kAudioObjectUnknown;
UInt32 propertySize = sizeof(defaultDevice);
AudioObjectPropertyAddress defaultDeviceProperty;
defaultDeviceProperty.mSelector = kAudioHardwarePropertyDefaultInputDevice;
defaultDeviceProperty.mScope = kAudioObjectPropertyScopeGlobal;
defaultDeviceProperty.mElement = kAudioObjectPropertyElementMaster;
status = AudioObjectGetPropertyData(kAudioObjectSystemObject,
&defaultDeviceProperty,
0,
NULL,
&propertySize,
&defaultDevice);
assert(status == noErr);
//Set the current device property of the AUHAL
status = AudioUnitSetProperty(player->inputUnit,
kAudioOutputUnitProperty_CurrentDevice,
kAudioUnitScope_Global,
outputBus,
&defaultDevice,
sizeof(defaultDevice));
assert(status == noErr);
//Get the AudioStreamBasicDescription from Input AUHAL
propertySize = sizeof(AudioStreamBasicDescription);
status = AudioUnitGetProperty(player->inputUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
inputBus,
&player->streamFormat,
&propertySize);
assert(status == noErr);
//Adopt hardware input sample rate
AudioStreamBasicDescription deviceFormat;
status = AudioUnitGetProperty(player->inputUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
inputBus,
&deviceFormat,
&propertySize);
assert(status == noErr);
player->streamFormat.mSampleRate = deviceFormat.mSampleRate;
printf("Sample Rate %f...\n", deviceFormat.mSampleRate);
propertySize = sizeof(AudioStreamBasicDescription);
status = AudioUnitSetProperty(player->inputUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
inputBus,
&player->streamFormat,
propertySize);
assert(status == noErr);
//Calculating Capture buffer size for an I/O unit
UInt32 bufferSizeFrames = 0;
propertySize = sizeof(UInt32);
status = AudioUnitGetProperty(player->inputUnit,
kAudioDevicePropertyBufferFrameSize,
kAudioUnitScope_Global,
0,
&bufferSizeFrames,
&propertySize);
assert(status == noErr);
UInt32 bufferSizeBytes = bufferSizeFrames * sizeof(Float32);
//Create AudioBufferList to receive capture data
UInt32 propSize = offsetof(AudioBufferList, mBuffers[0]) +
(sizeof(AudioBuffer) * player->streamFormat.mChannelsPerFrame);
//Malloc buffer lists
player->inputBuffer = (AudioBufferList *) malloc(propSize);
player->inputBuffer->mNumberBuffers = player->streamFormat.mChannelsPerFrame;
//Pre malloc buffers for AudioBufferLists
for(UInt32 i = 0; i < player->inputBuffer->mNumberBuffers; i++){
player->inputBuffer->mBuffers[i].mNumberChannels = 1;
player->inputBuffer->mBuffers[i].mDataByteSize = bufferSizeBytes;
player->inputBuffer->mBuffers[i].mData = malloc(bufferSizeBytes);
}
//Create the ring buffer
player->ringBuffer = new CARingBuffer();
player->ringBuffer->Allocate(player->streamFormat.mChannelsPerFrame,
player->streamFormat.mBytesPerFrame,
bufferSizeFrames * 3);
printf("Number of channels: %d\n", player->streamFormat.mChannelsPerFrame);
printf("Number of buffers: %d\n", player->inputBuffer->mNumberBuffers);
//Set render proc to supply samples
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = InputRenderProc;
callbackStruct.inputProcRefCon = player;
status = AudioUnitSetProperty(player->inputUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
0,
&callbackStruct,
sizeof(callbackStruct);
assert(status == noErr);
status = AudioUnitInitialize(player->inputUnit);
assert(status == noErr);
player->firstInputSampleTime = -1;
player->inToOutSampleTimeOffset = -1;
printf("Finished CreateInputUnit()\n");
}
这是我访问各个缓冲区的渲染回调函数。 :
OSStatus GraphRenderProc(void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData)
{
MyAUGraphPlayer * player = (MyAUGraphPlayer *) inRefCon;
if(player->firstOutputSampleTime < 0.0) {
player->firstOutputSampleTime = inTimeStamp->mSampleTime;
if((player->firstInputSampleTime > -1.0) &&
(player->inToOutSampleTimeOffset < 0.0)) {
player->inToOutSampleTimeOffset = player->firstInputSampleTime - player->firstOutputSampleTime;
}
}
//Copy samples out of ring buffer
OSStatus outputProcErr = noErr;
outputProcErr = player->ringBuffer->Fetch(ioData,
inNumberFrames,
inTimeStamp->mSampleTime + player->inToOutSampleTimeOffset);
//BUT THIS IS NOT HOW IT IS SUPPOSED TO WORK
Float32 * data = (Float32 *) ioData->mBuffers[0].mData;
Float32 * data2 = (Float32 *) ioData->mBuffers[1].mData;
for(int frame = 0; frame < inNumberFrames; frame++)
{
Float32 sample = data[frame] + data2[frame];
data[frame] = data2[frame] = sample;
}
return outputProcErr;
}
尽管您的代码对于它似乎管理的任务来说看起来过于复杂,但我会尝试回答您的问题:
您在回调中检索样本数据的概念没有错。如果处理多声道音频设备,这将是不够的。设备有多少频道,频道布局、格式等。您通过 AudioStreamBasicDescription
查询给定设备。此 属性 用于初始化处理链的其余部分。您在初始化时分配音频缓冲区,或让程序为您完成(请阅读文档)。
如果您觉得使用额外的缓冲区复制到数据处理和 DSP 更舒服,您可以在回调中管理它,如下所示(简化代码):
Float32 buf[streamFormat.mChanelsPerFrame][inNumberFrames];
for(int ch; ch < streamFormat.mChanelsPerFrame; ch++){
Float32 data = (Float32 *)ioData->mBuffers[ch].mData;
memcpy(buf[ch], data, inNumberFrames*sizeof(Float32));
}