使用低通滤波器的离线渲染会导致混叠和裁剪

offline rendering with a lowpass filter causes aliasing and clipping

我有一个 8khz 的样本缓冲区,我正在尝试简单地将低通滤波器应用于缓冲区。意思是,我从一个 8khz 样本缓冲区开始,我想以一个 8khz LOWPASSED 样本缓冲区结束。如果我连接一个低通单元并将其与默认输出单元连接并提供我的缓冲器,它听起来很完美并且低通正确。但是,一旦我删除输出并直接在低通音频单元上调用 AudioUnitRender,生成的样本就会出现混叠和裁剪。

#import "EffectMachine.h"
#import <AudioToolbox/AudioToolbox.h>
#import "AudioHelpers.h"
#import "Buffer.h"

@interface EffectMachine ()
@property (nonatomic, strong) Buffer *buffer;
@end

typedef struct EffectPlayer {
    NSUInteger index;
    AudioUnit lowPassUnit;
    __unsafe_unretained Buffer *buffer;
} EffectPlayer;

OSStatus EffectMachineCallbackRenderProc(void *inRefCon,
                                         AudioUnitRenderActionFlags *ioActionFlags,
                                         const AudioTimeStamp *inTimeStamp,
                                         UInt32 inBusNumber,
                                         UInt32 inNumberFrames,
                                         AudioBufferList * ioData);

OSStatus EffectMachineCallbackRenderProc(void *inRefCon,
                                         AudioUnitRenderActionFlags *ioActionFlags,
                                         const AudioTimeStamp *inTimeStamp,
                                         UInt32 inBusNumber,
                                         UInt32 inNumberFrames,
                                         AudioBufferList * ioData) {
    struct EffectPlayer *player = (struct EffectPlayer *)inRefCon;

    for (int i = 0; i < inNumberFrames; i++) {
        float sample;
        if (player->index < player->buffer.size) {
            sample = (float)player->buffer.samples[player->index];
            player->index += 1;
        } else {
            sample = 0;
        }
        ((float *)ioData->mBuffers[0].mData)[i] = sample;
        ((float *)ioData->mBuffers[1].mData)[i] = sample;
    }

    return noErr;
}

@implementation EffectMachine {
    EffectPlayer player;
}

-(instancetype)initWithBuffer:(Buffer *)buffer {
    if (self = [super init]) {
        self.buffer = buffer;
    }
    return self;
}

-(Buffer *)process {
    struct EffectPlayer initialized = {0};
    player        = initialized;
    player.buffer = self.buffer;

    [self setupAudioUnits];
    Buffer *buffer = [self processedBuffer];
    [self cleanup];

    return buffer;
}

-(void)setupAudioUnits {
    AudioComponentDescription lowpasscd = {0};
    lowpasscd.componentType = kAudioUnitType_Effect;
    lowpasscd.componentSubType = kAudioUnitSubType_LowPassFilter;
    lowpasscd.componentManufacturer = kAudioUnitManufacturer_Apple;

    AudioComponent comp = AudioComponentFindNext(NULL, &lowpasscd);
    if (comp == NULL) NSLog(@"can't get lowpass unit");

    AudioComponentInstanceNew(comp, &player.lowPassUnit);

    AURenderCallbackStruct input;
    input.inputProc = EffectMachineCallbackRenderProc;
    input.inputProcRefCon = &player;

    CheckError(AudioUnitSetProperty(player.lowPassUnit,
                                    kAudioUnitProperty_SetRenderCallback,
                                    kAudioUnitScope_Input,
                                    0,
                                    &input,
                                    sizeof(input)),
               "AudioUnitSetProperty for callback failed");

    CheckError(AudioUnitSetParameter(player.lowPassUnit,
                                     kLowPassParam_CutoffFrequency,
                                     kAudioUnitScope_Global,
                                     0,
                                     1500,
                                     0), "AudioUnitSetParameter cutoff for lowpass failed");

    CheckError(AudioUnitSetParameter(player.lowPassUnit,
                                     kLowPassParam_Resonance,
                                     kAudioUnitScope_Global,
                                     0,
                                     0,
                                     0), "AudioUnitSetParameter resonance for lowpass failed");

    CheckError(AudioUnitInitialize(player.lowPassUnit),
               "Couldn't initialize lowpass unit");
}

-(Buffer *)processedBuffer {
    AudioBufferList *bufferlist = malloc(sizeof(AudioBufferList));
    UInt32 blockSize = 1024;
    float *left = malloc(sizeof(float) * blockSize);
    float *right = malloc(sizeof(float) * blockSize);

    bufferlist->mBuffers[0].mData = left;
    bufferlist->mBuffers[1].mData = right;
    UInt32 size = sizeof(float) * blockSize;

    AudioTimeStamp inTimeStamp;
    memset(&inTimeStamp, 0, sizeof(AudioTimeStamp));
    inTimeStamp.mSampleTime = 0;

    AudioUnitRenderActionFlags flag = 0;

    NSUInteger length = ceil(self.buffer.size / (float)blockSize);

    double *processed = malloc(sizeof(double) * blockSize * length);

    for (int i = 0; i < length; i++) {
        bufferlist->mBuffers[0].mDataByteSize = size;
        bufferlist->mBuffers[1].mDataByteSize = size;
        bufferlist->mNumberBuffers = 2;
        inTimeStamp.mFlags = kAudioTimeStampSampleTimeValid;

        AudioUnitRender(player.lowPassUnit, &flag, &inTimeStamp, 0, blockSize, bufferlist);
        for (NSUInteger j = 0; j < blockSize; j++) {
            processed[j + (blockSize * i)] = left[j];
        }
        inTimeStamp.mSampleTime += blockSize;
    }

    Buffer *buffer = [[Buffer alloc] initWithSamples:processed size:self.buffer.size sampleRate:self.buffer.sampleRate];

    free(bufferlist);
    free(left);
    free(right);
    free(processed);

    return buffer;
}

-(void)cleanup {
    AudioOutputUnitStop(player.lowPassUnit);
    AudioUnitUninitialize(player.lowPassUnit);
    AudioComponentInstanceDispose(player.lowPassUnit);
}

@end

如果我添加一个通用输出并尝试在其输入上设置一个 8khz ASBD,那么我只会得到输出的垃圾噪声。它看起来像,0,0,0,0,0,17438231945853048031929171968.000000,0, 0,0,-2548199532257382185315640279040.000000...哎呀!

我尝试将 ASBD 添加到低通单元的输入和输出,给它一个 8khz 的采样率 属性,但它什么也没做。我之前尝试添加转换器单元(ASBD 设置为 8khz) ,然后是低通滤波器之后,然后是低通滤波器之前和之后(在链中),这也没有用。

作为附带问题,我的缓冲区是单声道 8khz 样本,如果我将我的缓冲区列表的 mNumberBuffers 设置为 1,那么我的低通输入渲染过程将永远不会被调用...有没有办法不必使用立体声频道?

我在两端使用转换器,将 ASBD 设置为 8000 采样率单声道浮点用于输入转换器的输入和输出转换器的输出,同时使用 44100.0 立体声用于低通单元的输入和输出,并在最后调用 AudioUnitRender没有用于离线渲染的 io 单元的转换器。对于在线渲染,我在 io 单元之前放置了一个转换器单元,因此渲染回调也将从 8K 的缓冲区中提取以进行播放。输出 ASBD 上较低的采样率似乎需要更高的每片最大帧数和更小的片(AudioUnitRender inNumberFrames),这就是它无法呈现的原因。

#import "ViewController.h"
#import <AudioToolbox/AudioToolbox.h>


@implementation ViewController{


    int sampleCount;
    int renderBufferHead;
    float *renderBuffer;
}


- (void)viewDidLoad {

    [super viewDidLoad];
    float sampleRate = 8000;

    int bufferSeconds = 3;
    sampleCount = sampleRate * bufferSeconds;//seconds
    float *originalSaw = generateSawWaveBuffer(440, sampleRate, sampleCount);

    renderBuffer = originalSaw;
    renderBufferHead = 0;


    AURenderCallbackStruct cbStruct = {renderCallback,(__bridge void *)self};

    //this will do offline render using the render callback,  callback just reads from renderBuffer at samplerate
    float *processedBuffer = offlineRender(sampleCount, sampleRate, &cbStruct);

    renderBufferHead = 0;//rewind render buffer after processing

    //set up audio units to do live render using the render callback at sample rate then self destruct after delay
    //it will play originalSaw for bufferSeconds, then after delay will switch renderBuffer to point at processedBuffer
    float secondsToPlayAudio = (bufferSeconds + 1) * 2;
    onlineRender(sampleRate, &cbStruct,secondsToPlayAudio);


    //wait for original to finish playing, then change render callback source buffer to processed buffer
    dispatch_after(dispatch_time(DISPATCH_TIME_NOW, (int64_t)((secondsToPlayAudio / 2) * NSEC_PER_SEC)), dispatch_get_main_queue(), ^{
        renderBuffer = processedBuffer;
        renderBufferHead = 0;//rewind render buffer
    });

    //destroy after all rendering done
    dispatch_after(dispatch_time(DISPATCH_TIME_NOW, (int64_t)(secondsToPlayAudio * NSEC_PER_SEC)), dispatch_get_main_queue(), ^{
        free(originalSaw);
        free(processedBuffer);
    });
}


float * offlineRender(int count, double sampleRate, AURenderCallbackStruct *cbStruct){

    AudioComponentInstance inConverter = getComponentInstance(kAudioUnitType_FormatConverter, kAudioUnitSubType_AUConverter);
    AudioComponentInstance lowPass = getComponentInstance(kAudioUnitType_Effect, kAudioUnitSubType_LowPassFilter);
    AudioComponentInstance outConverter = getComponentInstance(kAudioUnitType_FormatConverter, kAudioUnitSubType_AUConverter);

    AudioStreamBasicDescription asbd = getMonoFloatASBD(sampleRate);
    AudioUnitSetProperty(inConverter, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &asbd, sizeof(AudioStreamBasicDescription));
    AudioUnitSetProperty(outConverter, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &asbd, sizeof(AudioStreamBasicDescription));

    AudioUnitSetProperty(inConverter, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, cbStruct, sizeof(AURenderCallbackStruct));

    formatAndConnect(inConverter, lowPass);
    formatAndConnect(lowPass, outConverter);

    UInt32 maxFramesPerSlice = 4096;
    AudioUnitSetProperty(inConverter, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32));
    AudioUnitSetProperty(lowPass, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32));
    AudioUnitSetProperty(outConverter, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32));

    AudioUnitInitialize(inConverter);
    AudioUnitInitialize(lowPass);
    AudioUnitInitialize(outConverter);

    AudioUnitSetParameter(lowPass, kLowPassParam_CutoffFrequency, kAudioUnitScope_Global, 0, 500, 0);

    AudioBufferList *bufferlist = malloc(sizeof(AudioBufferList) + sizeof(AudioBufferList));//stereo bufferlist  + sizeof(AudioBuffer)
    float *left = malloc(sizeof(float) * 4096);
    bufferlist->mBuffers[0].mData = left;
    bufferlist->mNumberBuffers = 1;

    AudioTimeStamp inTimeStamp;
    memset(&inTimeStamp, 0, sizeof(AudioTimeStamp));
    inTimeStamp.mFlags = kAudioTimeStampSampleTimeValid;
    inTimeStamp.mSampleTime = 0;

    float *buffer = malloc(sizeof(float) * count);
    int inNumberframes = 512;
    AudioUnitRenderActionFlags flag = 0;
    int framesRead = 0;
    while (count){
        inNumberframes = MIN(inNumberframes, count);
        bufferlist->mBuffers[0].mDataByteSize = sizeof(float) * inNumberframes;
        printf("Offline Render %i frames\n",inNumberframes);
        AudioUnitRender(outConverter, &flag, &inTimeStamp, 0, inNumberframes, bufferlist);
        memcpy(buffer + framesRead, left, sizeof(float) * inNumberframes);
        inTimeStamp.mSampleTime += inNumberframes;
        count -= inNumberframes;
        framesRead += inNumberframes;

    }
    free(left);
//    free(right);
    free(bufferlist);
    AudioUnitUninitialize(inConverter);
    AudioUnitUninitialize(lowPass);
    AudioUnitUninitialize(outConverter);
    return buffer;
}

OSStatus renderCallback(void *                          inRefCon,
                        AudioUnitRenderActionFlags *    ioActionFlags,
                        const AudioTimeStamp *          inTimeStamp,
                        UInt32                          inBusNumber,
                        UInt32                          inNumberFrames,
                        AudioBufferList *               ioData){

    ViewController *self = (__bridge ViewController*)inRefCon;
    float *left = ioData->mBuffers[0].mData;

    for (int i = 0; i < inNumberFrames; i++) {
        if (self->renderBufferHead >= self->sampleCount) {
            left[i] = 0;
        }
        else{
            left[i] = self->renderBuffer[self->renderBufferHead++];
        }
    }
    if(ioData->mNumberBuffers == 2){
        memcpy(ioData->mBuffers[1].mData, left, sizeof(float) * inNumberFrames);
    }
    printf("render %f to %f\n",inTimeStamp->mSampleTime,inTimeStamp->mSampleTime + inNumberFrames);
    return noErr;
}

void onlineRender(double sampleRate, AURenderCallbackStruct *cbStruct,float duration){
    AudioComponentInstance converter = getComponentInstance(kAudioUnitType_FormatConverter, kAudioUnitSubType_AUConverter);
    AudioComponentInstance ioUnit = getComponentInstance(kAudioUnitType_Output, kAudioUnitSubType_DefaultOutput);

    AudioStreamBasicDescription asbd = getMonoFloatASBD(sampleRate);
    AudioUnitSetProperty(converter, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &asbd, sizeof(AudioStreamBasicDescription));
    AudioUnitSetProperty(converter, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, cbStruct, sizeof(AURenderCallbackStruct));

    formatAndConnect(converter, ioUnit);

    AudioUnitInitialize(converter);
    AudioUnitInitialize(ioUnit);
    AudioOutputUnitStart(ioUnit);

    dispatch_after(dispatch_time(DISPATCH_TIME_NOW, (int64_t)(duration * NSEC_PER_SEC)), dispatch_get_main_queue(), ^{
        AudioOutputUnitStop(ioUnit);
        AudioUnitUninitialize(ioUnit);
        AudioUnitUninitialize(converter);
    });

}

float * generateSawWaveBuffer(float frequency,float sampleRate, int sampleCount){
    float *buffer = malloc(sizeof(float) * sampleCount);
    float increment = (frequency / sampleRate) * 2;
    int increasing = 1;
    float sample = 0;
    for (int i = 0; i < sampleCount; i++) {
        if (increasing) {
            sample += increment;
            if (sample >= 1) {
                increasing = 0;
            }
        }
        else{
            sample -= increment;
            if (sample < -1) {
                increasing = 1;
            }
        }
        buffer[i] = sample;
    }
    return buffer;
}
AudioComponentInstance getComponentInstance(OSType type,OSType subType){
    AudioComponentDescription desc = {0};
    desc.componentFlags = 0;
    desc.componentFlagsMask = 0;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;
    desc.componentSubType =  subType;
    desc.componentType    = type;
    AudioComponent ioComponent = AudioComponentFindNext(NULL, &desc);
    AudioComponentInstance unit;
    AudioComponentInstanceNew(ioComponent, &unit);
    return unit;
}


AudioStreamBasicDescription getMonoFloatASBD(double sampleRate){
    AudioStreamBasicDescription asbd = {0};
    asbd.mSampleRate = sampleRate;
    asbd.mFormatID = kAudioFormatLinearPCM;
    asbd.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved | kAudioFormatFlagIsPacked;
    asbd.mFramesPerPacket = 1;
    asbd.mChannelsPerFrame = 1;
    asbd.mBitsPerChannel = 32;
    asbd.mBytesPerPacket = 4;
    asbd.mBytesPerFrame = 4;
    return asbd;
}

void formatAndConnect(AudioComponentInstance src,AudioComponentInstance dst){

    AudioStreamBasicDescription asbd;
    UInt32 propsize = sizeof(AudioStreamBasicDescription);
    AudioUnitGetProperty(dst, kAudioUnitProperty_StreamFormat,kAudioUnitScope_Input,0,&asbd,&propsize);
    AudioUnitSetProperty(src, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &asbd, sizeof(AudioStreamBasicDescription));

    AudioUnitConnection connection = {0};
    connection.destInputNumber = 0;
    connection.sourceAudioUnit = src;
    connection.sourceOutputNumber = 0;
    AudioUnitSetProperty(dst, kAudioUnitProperty_MakeConnection, kAudioUnitScope_Input, 0, &connection, sizeof(AudioUnitConnection));
}
@end