如何将我的自定义 float / shorts 数组写入 AVAssetWriter?
How can I write to an AVAssetWriter my custom float / shorts array?
所以我使用 AVAssetReader
得到了 CMSampleBufferRef
然后我从 CMBlockBufferRef
数据中得到了样本值。
然后我用自定义过滤器更改了这个示例。
现在我有一个 Shorts 数组,我想使用 AVAssetWriter 将其写回文件。
我的问题是如何重新创建一个 CMSampleBufferRef
和一个 CMBlockBufferRef
以发送到 AVAssetWriter?
看看这个apple developer page。
编辑:
根据您的意见,要直接从 shorts 数组创建样本缓冲区,您应该使用 Core Media Framework 组件,即 CMSampleBufferCreate
- 将 shorts 数组中的示例数据转换为 AudioBufferList
- 将 AudioBufferList 转换为 CMSampleBuffer。
例如:
OSStatus status = noErr;
CMItemCount framesToProcess = 8192;
unsigned long sizeInBytes = framesToProcess * sizeof(SInt16);
// Init ABL from sample array
AudioBufferList *myAudioBufferList; // contains converted
myAudioBufferList = static_cast<AudioBufferList *>(calloc(1, offsetof(AudioBufferList, mBuffers) + (sizeof(AudioBuffer))));
myAudioBufferList->mNumberBuffers = 1;
myAudioBufferList->mBuffers[0].mNumberChannels = 1;
myAudioBufferList->mBuffers[0].mData = JoãoSamplesArrayPointer;
myAudioBufferList->mBuffers[0].mDataByteSize = (UInt32)sizeInBytes;
// sampleBuffer in (from the assetreader) you already have this
CMSampleBufferRef sampleBufferIn = [self.assetReaderAudioOutput copyNextSampleBuffer];
CMAudioFormatDescriptionRef format = CMSampleBufferGetFormatDescription(sampleBufferIn);
const AudioStreamBasicDescription *assetReaderOutputASBD = CMAudioFormatDescriptionGetStreamBasicDescription(format);
// setup output samplebuffer
CMSampleBufferRef sampleBufferOut = NULL;
CMSampleTimingInfo timing = { CMTimeMake(1, sampleRate), kCMTimeZero, kCMTimeInvalid };
// create description
status = CMAudioFormatDescriptionCreate(kCFAllocatorDefault, assetReaderOutputASBD, 0, NULL, 0, NULL, NULL, &format);
// create buffer
CMSampleBufferCreate( kCFAllocatorDefault, NULL, false, NULL, NULL, format, framesToProcess, 1, &timing, 0, NULL, &sampleBufferOut);
// put data into buffer from ABL (audio buffer list)
status = CMSampleBufferSetDataBufferFromAudioBufferList( sampleBufferOut, kCFAllocatorDefault, kCFAllocatorDefault, 0, myAudioBufferList );
// write to assetwriter audioinput
BOOL success = [self.assetWriterAudioInput appendSampleBuffer:sampleBufferOut];
CFRelease(sampleBufferOut);
所以我使用 AVAssetReader
得到了 CMSampleBufferRef
然后我从 CMBlockBufferRef
数据中得到了样本值。
然后我用自定义过滤器更改了这个示例。
现在我有一个 Shorts 数组,我想使用 AVAssetWriter 将其写回文件。
我的问题是如何重新创建一个 CMSampleBufferRef
和一个 CMBlockBufferRef
以发送到 AVAssetWriter?
看看这个apple developer page。
编辑: 根据您的意见,要直接从 shorts 数组创建样本缓冲区,您应该使用 Core Media Framework 组件,即 CMSampleBufferCreate
- 将 shorts 数组中的示例数据转换为 AudioBufferList
- 将 AudioBufferList 转换为 CMSampleBuffer。
例如:
OSStatus status = noErr;
CMItemCount framesToProcess = 8192;
unsigned long sizeInBytes = framesToProcess * sizeof(SInt16);
// Init ABL from sample array
AudioBufferList *myAudioBufferList; // contains converted
myAudioBufferList = static_cast<AudioBufferList *>(calloc(1, offsetof(AudioBufferList, mBuffers) + (sizeof(AudioBuffer))));
myAudioBufferList->mNumberBuffers = 1;
myAudioBufferList->mBuffers[0].mNumberChannels = 1;
myAudioBufferList->mBuffers[0].mData = JoãoSamplesArrayPointer;
myAudioBufferList->mBuffers[0].mDataByteSize = (UInt32)sizeInBytes;
// sampleBuffer in (from the assetreader) you already have this
CMSampleBufferRef sampleBufferIn = [self.assetReaderAudioOutput copyNextSampleBuffer];
CMAudioFormatDescriptionRef format = CMSampleBufferGetFormatDescription(sampleBufferIn);
const AudioStreamBasicDescription *assetReaderOutputASBD = CMAudioFormatDescriptionGetStreamBasicDescription(format);
// setup output samplebuffer
CMSampleBufferRef sampleBufferOut = NULL;
CMSampleTimingInfo timing = { CMTimeMake(1, sampleRate), kCMTimeZero, kCMTimeInvalid };
// create description
status = CMAudioFormatDescriptionCreate(kCFAllocatorDefault, assetReaderOutputASBD, 0, NULL, 0, NULL, NULL, &format);
// create buffer
CMSampleBufferCreate( kCFAllocatorDefault, NULL, false, NULL, NULL, format, framesToProcess, 1, &timing, 0, NULL, &sampleBufferOut);
// put data into buffer from ABL (audio buffer list)
status = CMSampleBufferSetDataBufferFromAudioBufferList( sampleBufferOut, kCFAllocatorDefault, kCFAllocatorDefault, 0, myAudioBufferList );
// write to assetwriter audioinput
BOOL success = [self.assetWriterAudioInput appendSampleBuffer:sampleBufferOut];
CFRelease(sampleBufferOut);