如何使用 Agora SDK 模糊 YUV 视频帧

How do I blur a YUV videoframe with Agora SDK

我在 Github 上使用 Advanced Video Example 中的以下方法来捕获原始视频数据:

- (AgoraVideoRawData *)mediaDataPlugin:(AgoraMediaDataPlugin *)mediaDataPlugin didCapturedVideoRawData:(AgoraVideoRawData *)videoRawData

我已经能够将 Y U V 缓冲区转换为 CVPixelBuffer > CIImage 并应用模糊,但我无法将 CIImage 数据转换回 YUV缓冲区。

我已经成功地为 yuv 缓冲区设置了随机值,这导致将灰色视频帧发送给其他用户。

memset(videoRawData.yBuffer, 128, videoRawData.yStride * videoRawData.height);
memset(videoRawData.uBuffer, 128, videoRawData.uStride * videoRawData.height / 2);
memset(videoRawData.vBuffer, 128, videoRawData.vStride * videoRawData.height / 2);

有人能为我指明正确的方向,告诉我如何将 CIImage 数据转换回 YUV 缓冲区吗?或者如果有更有效的方法来模糊 YUV 视频数据流,我愿意尝试。

我找到了适合我的解决方案。我会尝试 post 一个完整的答案,这样其他人可能会找到适合他们的解决方案。请参阅代码中的注释以获取更多解释。

在您的文件中的某处设置这些助手。后面会用到这个来计算每个颜色像素的RGB值:

#define Mask8(x) ( (x) & 0xFF )
#define R(x) ( Mask8(x) )
#define G(x) ( Mask8(x >> 8 ) )
#define B(x) ( Mask8(x >> 16) )

为了简单回答这个问题,此处 post 的所有代码都在 - (AgoraVideoRawData *)mediaDataPlugin:(AgoraMediaDataPlugin *)mediaDataPlugin didCapturedVideoRawData:(AgoraVideoRawData *)videoRawData 方法中。

- (AgoraVideoRawData *)mediaDataPlugin:(AgoraMediaDataPlugin *)mediaDataPlugin didCapturedVideoRawData:(AgoraVideoRawData *)videoRawData
{
    // create pixelbuffer from raw video data
    NSDictionary *pixelAttributes = @{(NSString *)kCVPixelBufferIOSurfacePropertiesKey:@{}};
    CVPixelBufferRef pixelBuffer = NULL;
    CVReturn result = CVPixelBufferCreate(kCFAllocatorDefault,
                                          videoRawData.width,
                                          videoRawData.height,
                                          kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,   //  NV12
                                          (__bridge CFDictionaryRef)(pixelAttributes),
                                          &pixelBuffer);
    if (result != kCVReturnSuccess) {
        NSLog(@"Unable to create cvpixelbuffer %d", result);
    }
    CVPixelBufferLockBaseAddress(pixelBuffer, 0);
    unsigned char *yDestPlane = (unsigned char *)CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0);
    for (int i = 0, k = 0; i < videoRawData.height; i ++) {
        for (int j = 0; j < videoRawData.width; j ++) {
            yDestPlane[k++] = videoRawData.yBuffer[j + i * videoRawData.yStride];
        }
    }
    unsigned char *uvDestPlane = (unsigned char *)CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1);
    for (int i = 0, k = 0; i < videoRawData.height / 2; i ++) {
        for (int j = 0; j < videoRawData.width / 2; j ++) {
            uvDestPlane[k++] = videoRawData.uBuffer[j + i * videoRawData.uStride];
            uvDestPlane[k++] = videoRawData.vBuffer[j + i * videoRawData.vStride];
        }
    }
    CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);

    // create CIImage from pixel buffer
    CIImage *coreImage = [CIImage imageWithCVPixelBuffer:pixelBuffer];

    // apply pixel filter to image
    CIFilter *pixelFilter = [CIFilter filterWithName:@"CIPixellate"];
    [pixelFilter setDefaults];
    [pixelFilter setValue:coreImage forKey:kCIInputImageKey];
    [pixelFilter setValue:@40 forKey:@"inputScale"];
    CIVector *vector = [[CIVector alloc] initWithX:160 Y:160]; // x & y should be multiple of 'inputScale' parameter
    [pixelFilter setValue:vector forKey:@"inputCenter"];
    CIImage *outputBlurredImage = [pixelFilter outputImage];

    CIContext *blurImageContext = [CIContext contextWithOptions:nil];
    CGImageRef inputCGImage = [blurImageContext createCGImage:outputBlurredImage fromRect:[coreImage extent]];

    // write blurred image data to YUV buffers
    NSUInteger blurredWidth = CGImageGetWidth(inputCGImage);
    NSUInteger blurredHeight = CGImageGetHeight(inputCGImage);

    NSUInteger bytesPerPixel = 4;
    NSUInteger bytesPerRow = bytesPerPixel * blurredWidth;
    NSUInteger bitsPerComponent = 8;
    UInt32 * pixels = (UInt32 *) calloc(blurredHeight * blurredWidth, sizeof(UInt32));

    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
    CGContextRef context = CGBitmapContextCreate(pixels, blurredWidth, blurredHeight, bitsPerComponent, bytesPerRow, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
    CGContextDrawImage(context, CGRectMake(0, 0, blurredWidth, blurredHeight), inputCGImage);

    int frameSize = videoRawData.width * videoRawData.height;
    int yIndex = 0; // Y start index
    int uIndex = frameSize; // U statt index
    int vIndex = frameSize * 5 / 4; // V start index: w*h*5/4

    // allocate buffers to store YUV data
    UInt32 *currentPixel = pixels;
    char *yBuffer = malloc( sizeof(char) * ( frameSize + 1 ) );
    char *uBuffer = malloc( sizeof(char) * ( uIndex + frameSize + 1 ) );
    char *vBuffer = malloc( sizeof(char) * ( vIndex + frameSize + 1 ) );

    // loop through each RGB pixel and translate to YUV
    for (int j = 0; j < blurredHeight; j++) {
      for (int i = 0; i < blurredWidth; i++) {
          UInt32 color = *currentPixel;
          UInt32 R = R(color);
          UInt32 G = G(color);
          UInt32 B = B(color);

          UInt32 Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
          UInt32 U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
          UInt32 V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;

          yBuffer[yIndex++] = Y;
          if (j % 2 == 0 && i % 2 == 0) {
              uBuffer[uIndex++] = U;
              vBuffer[vIndex++] = V;
          }

        currentPixel++;
      }
    }

    // copy new YUV values to given videoRawData object buffers
    memcpy((void*)videoRawData.yBuffer, yBuffer, strlen(yBuffer));
    memcpy((void*)videoRawData.uBuffer, uBuffer, strlen(uBuffer));
    memcpy((void*)videoRawData.vBuffer, vBuffer, strlen(vBuffer));

    // cleanup
    CVPixelBufferRelease(pixelBuffer);
    CGImageRelease(inputCGImage);
    CGColorSpaceRelease(colorSpace);
    CGContextRelease(context);
    free(pixels);
    free(yBuffer);
    free(uBuffer);
    free(vBuffer);

    return videoRawData;
}