OSStatus 错误 -50(参数无效)AudioQueueNewInput 在 iOS 上录制音频
OSStatus error -50 (invalid parameters) AudioQueueNewInput recording audio on iOS
我已经在互联网上搜索了很长时间,试图找到这个错误的原因,但我被卡住了。我一直在关注有关使用音频服务录制音频的 Apple Developer 文档,但无论我做什么,我都会收到此错误。
我可以使用 AVAudioRecorder
将音频录制成任何格式,但我的最终目标是从输入数据中获取标准化的浮点数组,以便对其应用 FFT(抱歉我的菜鸟措辞我对音频编程还很陌生)。
这是我的代码:
- (void)beginRecording
{
// Initialise session
[[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryPlayAndRecord error:nil];
[[AVAudioSession sharedInstance] setActive:YES error:nil];
state.dataFormat.mFormatID = kAudioFormatLinearPCM;
state.dataFormat.mSampleRate = 8000.0f;
state.dataFormat.mChannelsPerFrame = 1;
state.dataFormat.mBitsPerChannel = 16;
state.dataFormat.mBytesPerPacket = state.dataFormat.mChannelsPerFrame * sizeof(SInt16);
state.dataFormat.mFramesPerPacket = 1;
//AudioFileTypeID fileID = kAudioFileAIFFType;
state.dataFormat.mFormatFlags = kLinearPCMFormatFlagIsBigEndian | kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
OSStatus err = AudioQueueNewInput(&state.dataFormat, handleInputBuffer, &state, CFRunLoopGetMain(), kCFRunLoopCommonModes, 0, &state.queue);
printf("%i", err); // this is always -50 i.e. invalid parameters error
deriveBufferSize(state.queue, state.dataFormat, 0.5, &state.bufferByteState);
for (int i = 0; i < kNumberOfBuffers; i++) {
AudioQueueAllocateBuffer(state.queue, state.bufferByteState, &state.buffers[i]);
AudioQueueEnqueueBuffer(state.queue, state.buffers[i], 0, NULL);
}
state.currentPacket = 0;
state.isRunning = YES;
AudioQueueStart(state.queue, NULL);
}
- (void)endRecording
{
AudioQueueStop(state.queue, YES);
state.isRunning = NO;
AudioQueueDispose(state.queue, YES);
// Close the audio file here...
}
#pragma mark - CoreAudio
// Core Audio Callback Function
static void handleInputBuffer(void *agData, AudioQueueRef inAQ, AudioQueueBufferRef inBuffer, const AudioTimeStamp *inStartTime, UInt32 inNumPackets, const AudioStreamPacketDescription *inPacketDesc) {
AQRecorderState *state = (AQRecorderState *)agData;
if (inNumPackets == 0 && state->dataFormat.mBytesPerPacket != 0) {
inNumPackets = inBuffer->mAudioDataByteSize / state->dataFormat.mBytesPerPacket;
}
printf("Called");
/*
if (AudioFileWritePackets(state->audioFile, false, inBuffer->mAudioDataByteSize, inPacketDesc, state->currentPacket, &inNumPackets, inBuffer->mAudioData) == noErr) {
state->currentPacket += inNumPackets;
}
*/
if (state->isRunning) {
AudioQueueEnqueueBuffer(state->queue, inBuffer, 0, NULL);
}
}
void deriveBufferSize(AudioQueueRef audioQueue, AudioStreamBasicDescription ABSDescription, Float64 secs, UInt32 *outBufferSize) {
static const int maxBufferSize = 0x50000;
int maxPacketSize = ABSDescription.mBytesPerPacket;
if (maxPacketSize == 0) {
UInt32 maxVBRPacketSize = sizeof(maxPacketSize);
AudioQueueGetProperty(audioQueue, kAudioConverterPropertyMaximumOutputPacketSize, &maxPacketSize, &maxVBRPacketSize);
}
Float64 numBytesForTime = ABSDescription.mSampleRate * maxPacketSize * secs;
UInt32 x = (numBytesForTime < maxBufferSize ? numBytesForTime : maxBufferSize);
*outBufferSize = x;
}
如果有人知道这里发生了什么,我将不胜感激。 Here is the apple docs for the error
你得到 -50 (kAudio_ParamError
) 因为你还没有初始化 AudioStreamBasicDescription
的 mBytesPerFrame
字段:
asbd.mBytesPerFrame = asbd.mFramesPerPacket*asbd.mBytesPerPacket;
其中 asbd
是 state.dataFormat
的缩写。在你的情况下 mBytesPerFrame = 2
.
我也不会指定 kLinearPCMFormatFlagIsBigEndian
,让记录器 return 你本机字节顺序样本。
我已经在互联网上搜索了很长时间,试图找到这个错误的原因,但我被卡住了。我一直在关注有关使用音频服务录制音频的 Apple Developer 文档,但无论我做什么,我都会收到此错误。
我可以使用 AVAudioRecorder
将音频录制成任何格式,但我的最终目标是从输入数据中获取标准化的浮点数组,以便对其应用 FFT(抱歉我的菜鸟措辞我对音频编程还很陌生)。
这是我的代码:
- (void)beginRecording
{
// Initialise session
[[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryPlayAndRecord error:nil];
[[AVAudioSession sharedInstance] setActive:YES error:nil];
state.dataFormat.mFormatID = kAudioFormatLinearPCM;
state.dataFormat.mSampleRate = 8000.0f;
state.dataFormat.mChannelsPerFrame = 1;
state.dataFormat.mBitsPerChannel = 16;
state.dataFormat.mBytesPerPacket = state.dataFormat.mChannelsPerFrame * sizeof(SInt16);
state.dataFormat.mFramesPerPacket = 1;
//AudioFileTypeID fileID = kAudioFileAIFFType;
state.dataFormat.mFormatFlags = kLinearPCMFormatFlagIsBigEndian | kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
OSStatus err = AudioQueueNewInput(&state.dataFormat, handleInputBuffer, &state, CFRunLoopGetMain(), kCFRunLoopCommonModes, 0, &state.queue);
printf("%i", err); // this is always -50 i.e. invalid parameters error
deriveBufferSize(state.queue, state.dataFormat, 0.5, &state.bufferByteState);
for (int i = 0; i < kNumberOfBuffers; i++) {
AudioQueueAllocateBuffer(state.queue, state.bufferByteState, &state.buffers[i]);
AudioQueueEnqueueBuffer(state.queue, state.buffers[i], 0, NULL);
}
state.currentPacket = 0;
state.isRunning = YES;
AudioQueueStart(state.queue, NULL);
}
- (void)endRecording
{
AudioQueueStop(state.queue, YES);
state.isRunning = NO;
AudioQueueDispose(state.queue, YES);
// Close the audio file here...
}
#pragma mark - CoreAudio
// Core Audio Callback Function
static void handleInputBuffer(void *agData, AudioQueueRef inAQ, AudioQueueBufferRef inBuffer, const AudioTimeStamp *inStartTime, UInt32 inNumPackets, const AudioStreamPacketDescription *inPacketDesc) {
AQRecorderState *state = (AQRecorderState *)agData;
if (inNumPackets == 0 && state->dataFormat.mBytesPerPacket != 0) {
inNumPackets = inBuffer->mAudioDataByteSize / state->dataFormat.mBytesPerPacket;
}
printf("Called");
/*
if (AudioFileWritePackets(state->audioFile, false, inBuffer->mAudioDataByteSize, inPacketDesc, state->currentPacket, &inNumPackets, inBuffer->mAudioData) == noErr) {
state->currentPacket += inNumPackets;
}
*/
if (state->isRunning) {
AudioQueueEnqueueBuffer(state->queue, inBuffer, 0, NULL);
}
}
void deriveBufferSize(AudioQueueRef audioQueue, AudioStreamBasicDescription ABSDescription, Float64 secs, UInt32 *outBufferSize) {
static const int maxBufferSize = 0x50000;
int maxPacketSize = ABSDescription.mBytesPerPacket;
if (maxPacketSize == 0) {
UInt32 maxVBRPacketSize = sizeof(maxPacketSize);
AudioQueueGetProperty(audioQueue, kAudioConverterPropertyMaximumOutputPacketSize, &maxPacketSize, &maxVBRPacketSize);
}
Float64 numBytesForTime = ABSDescription.mSampleRate * maxPacketSize * secs;
UInt32 x = (numBytesForTime < maxBufferSize ? numBytesForTime : maxBufferSize);
*outBufferSize = x;
}
如果有人知道这里发生了什么,我将不胜感激。 Here is the apple docs for the error
你得到 -50 (kAudio_ParamError
) 因为你还没有初始化 AudioStreamBasicDescription
的 mBytesPerFrame
字段:
asbd.mBytesPerFrame = asbd.mFramesPerPacket*asbd.mBytesPerPacket;
其中 asbd
是 state.dataFormat
的缩写。在你的情况下 mBytesPerFrame = 2
.
我也不会指定 kLinearPCMFormatFlagIsBigEndian
,让记录器 return 你本机字节顺序样本。