在 YUV 中绘制 CGImageRef
Drawing CGImageRef in YUV
我正在使用此处的代码将 CGImageRef
转换为 OS X 上的 CVPixelBufferRef
。
Convert UIImage to CVImageBufferRef
但是,我需要用 YUV (kCVPixelFormatType_420YpCbCr8Planar)
而非现在的 RBG 绘制图像。
有没有直接在YUV色彩空间中绘制CGImage
的方法?如果没有,有没有人有关于将 CVPixedBufferRef
从 RBG 转换为 YUV 的最佳方法的示例?
我了解转换公式,但在 CPU 上进行转换非常慢。
使用以下方法解决:
CVPixelBufferRef converted_frame;
CVPixelBufferCreate(kCFAllocatorDefault, width, height, kCVPixelFormatType_420YpCbCr8Planar, 0, &converted_frame);
VTPixelTransferSessionTransferImage(_vtpt_ref, imageBuffer, converted_frame);
其中 imageBuffer
是来源 CVPixedBufferRef
这是一个迟到的答案,并不是为了获得选票或其他任何东西。有一个 Accelerate 框架方法可以将 RGB 转换为 YUV,代码有点复杂,但它可以工作并且包含在内,因为很难找到工作示例。这被包装为扩展 CIFilter 的 class,但如果您想做一些不同的事情,它会很容易适应。此代码不包含内存泄漏,并且在重复调用时应该表现良好。需要注意的是,该实现创建了一个 CVPixelBufferRef,然后设置所有需要的属性,以便稍后对 vImageCVImageFormat_CreateWithCVPixelBuffer() 的调用正常工作。该代码将 RGB 数据渲染到 CoreVideo 缓冲区,然后包装 YUV 结果图像,returns 它像任何其他 CoreImage 过滤器一样。
//
// CoreImageToYUVConverter.h
//
// Make use of CoreImage to convert a RGB input image into YUV data where
// UV is sumsampled and Y is the same dimensions as the original data.
#import <Foundation/Foundation.h>
#import <CoreImage/CoreImage.h>
@interface CoreImageToYUVConverter : CIFilter
@property (nonatomic, retain) CIImage *inputImage;
// If there is an error while processing the filter, this value is
// set to non-nil. Otherwise it is set to nil.
@property (nonatomic, retain) NSError *error;
// Dimension of the output image, not that Y is 2x
// the dimensions of u and v buffer so the Y image
// must have even width and height.
@property (nonatomic, assign) CGSize size;
@end
// CoreImageToYUVConverter.m
#import "CoreImageToYUVConverter.h"
@import Accelerate;
@interface CoreImageToYUVConverter ()
@property (nonatomic, retain) CIContext *coreImageContext;
@property (nonatomic, copy) NSNumber *inputWidth;
@property (nonatomic, copy) NSNumber *inputAspectRatio;
@property (nonatomic, assign) CVPixelBufferRef pixelBuffer;
@end
@implementation CoreImageToYUVConverter
@synthesize coreImageContext = m_coreImageContext;
@synthesize pixelBuffer = m_pixelBuffer;
- (void) deallocate
{
self.pixelBuffer = NULL;
}
// Setter for self.rgbBuffer, this logic holds on to a retain for the CoreVideo buffer
- (void) setPixelBuffer:(CVImageBufferRef)cvBufferRef
{
if (cvBufferRef) {
CFRetain(cvBufferRef);
}
if (self->m_pixelBuffer) {
CFRelease(self->m_pixelBuffer);
}
self->m_pixelBuffer = cvBufferRef;
}
- (CIImage *)outputImage
{
self.error = nil;
NSParameterAssert(self.inputImage != nil && [self.inputImage isKindOfClass:[CIImage class]]);
CIImage *inputImage = self.inputImage;
[self renderIntoYUVBuffer:inputImage];
CIImage *outCIImage = [CIImage imageWithCVImageBuffer:self.pixelBuffer];
return outCIImage;
}
- (NSDictionary *)customAttributes
{
return @{
kCIInputWidthKey : @{kCIAttributeDefault : @(0), kCIAttributeType : kCIAttributeTypeScalar},
kCIInputAspectRatioKey : @{kCIAttributeDefault : @(0), kCIAttributeType : kCIAttributeTypeScalar},
};
}
- (void) renderIntoYUVBuffer:(CIImage*)inputImage
{
CGRect imageExtent = inputImage.extent;
int width = (int) imageExtent.size.width;
int height = (int) imageExtent.size.height;
// Extract a CGImageRef from CIImage, this will flatten pixels possibly from
// multiple steps of a CoreImage chain.
if (self.coreImageContext == nil) {
CIContext *context = [CIContext contextWithOptions:nil];
NSAssert(context != nil, @"CIContext contextWithOptions failed");
self.coreImageContext = context;
}
CGImageRef inCGImageRef = [self.coreImageContext createCGImage:inputImage fromRect:imageExtent];
NSDictionary *pixelAttributes = @{
(__bridge NSString*)kCVPixelBufferIOSurfacePropertiesKey : @{},
(__bridge NSString*)kCVPixelFormatOpenGLESCompatibility : @(YES),
(__bridge NSString*)kCVPixelBufferCGImageCompatibilityKey : @(YES),
(__bridge NSString*)kCVPixelBufferCGBitmapContextCompatibilityKey : @(YES),
};
CVPixelBufferRef cvPixelBuffer = NULL;
uint32_t yuvImageFormatType;
//yuvImageFormatType = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange; // luma (0, 255)
yuvImageFormatType = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange; // luma (16, 235)
CVReturn result = CVPixelBufferCreate(kCFAllocatorDefault,
width,
height,
yuvImageFormatType,
(__bridge CFDictionaryRef)(pixelAttributes),
&cvPixelBuffer);
NSAssert(result == kCVReturnSuccess, @"CVPixelBufferCreate failed");
// FIXME: UHDTV : HEVC uses kCGColorSpaceITUR_2020
CGColorSpaceRef yuvColorSpace = CGColorSpaceCreateWithName(kCGColorSpaceITUR_709);
{
// Attach colorspace info to pixel buffer
//CFDataRef colorProfileData = CGColorSpaceCopyICCProfile(yuvColorSpace); // deprecated
CFDataRef colorProfileData = CGColorSpaceCopyICCData(yuvColorSpace);
NSDictionary *pbAttachments = @{
(__bridge NSString*)kCVImageBufferYCbCrMatrixKey: (__bridge NSString*)kCVImageBufferYCbCrMatrix_ITU_R_709_2,
(__bridge NSString*)kCVImageBufferColorPrimariesKey: (__bridge NSString*)kCVImageBufferColorPrimaries_ITU_R_709_2,
(__bridge NSString*)kCVImageBufferTransferFunctionKey: (__bridge NSString*)kCVImageBufferTransferFunction_ITU_R_709_2,
(__bridge NSString*)kCVImageBufferICCProfileKey: (__bridge NSData *)colorProfileData,
(__bridge NSString*)kCVImageBufferChromaLocationTopFieldKey: (__bridge NSString*)kCVImageBufferChromaLocation_Center,
(__bridge NSString*)kCVImageBufferAlphaChannelIsOpaque: (id)kCFBooleanTrue,
};
CVBufferRef pixelBuffer = cvPixelBuffer;
CVBufferSetAttachments(pixelBuffer, (__bridge CFDictionaryRef)pbAttachments, kCVAttachmentMode_ShouldPropagate);
// Drop ref to NSDictionary to enable explicit checking of ref count of colorProfileData, after the
// release below the colorProfileData must be 1.
pbAttachments = nil;
CFRelease(colorProfileData);
}
// Note that this setter will implicitly release an earlier held ref to a pixel buffer
self.pixelBuffer = cvPixelBuffer;
vImageCVImageFormatRef cvImgFormatRef;
cvImgFormatRef = vImageCVImageFormat_CreateWithCVPixelBuffer(cvPixelBuffer);
// vImage_CGImageFormat for input RGB
// FIXME: Need to select sRGB if running under MacOSX
//CGColorSpaceRef defaultColorspaceRef = CGColorSpaceCreateDeviceRGB();
// Default to sRGB on both MacOSX and iOS
CGColorSpaceRef defaultColorspaceRef = NULL;
vImage_CGImageFormat rgbCGImgFormat = {
.bitsPerComponent = 8,
.bitsPerPixel = 32,
.bitmapInfo = (CGBitmapInfo)(kCGBitmapByteOrder32Host | kCGImageAlphaNoneSkipFirst),
.colorSpace = defaultColorspaceRef,
};
// Copy input CoreGraphic image into a CoreVideo buffer
vImage_Buffer sourceBuffer;
const CGFloat backgroundColor = 0.0f;
vImage_Flags flags = 0;
flags = kvImagePrintDiagnosticsToConsole;
vImage_Error err;
err = vImageBuffer_InitWithCGImage(&sourceBuffer, &rgbCGImgFormat, &backgroundColor, inCGImageRef, flags);
NSAssert(err == kvImageNoError, @"vImageBuffer_InitWithCGImage failed");
err = vImageBuffer_CopyToCVPixelBuffer(&sourceBuffer, &rgbCGImgFormat, cvPixelBuffer, cvImgFormatRef, &backgroundColor, flags);
NSAssert(err == kvImageNoError, @"error in vImageBuffer_CopyToCVPixelBuffer %d", (int)err);
// Manually free() the allocated buffer
free(sourceBuffer.data);
vImageCVImageFormat_Release(cvImgFormatRef);
CVPixelBufferRelease(cvPixelBuffer);
CGColorSpaceRelease(yuvColorSpace);
CGColorSpaceRelease(defaultColorspaceRef);
CGImageRelease(inCGImageRef);
}
@end
我正在使用此处的代码将 CGImageRef
转换为 OS X 上的 CVPixelBufferRef
。
Convert UIImage to CVImageBufferRef
但是,我需要用 YUV (kCVPixelFormatType_420YpCbCr8Planar)
而非现在的 RBG 绘制图像。
有没有直接在YUV色彩空间中绘制CGImage
的方法?如果没有,有没有人有关于将 CVPixedBufferRef
从 RBG 转换为 YUV 的最佳方法的示例?
我了解转换公式,但在 CPU 上进行转换非常慢。
使用以下方法解决:
CVPixelBufferRef converted_frame;
CVPixelBufferCreate(kCFAllocatorDefault, width, height, kCVPixelFormatType_420YpCbCr8Planar, 0, &converted_frame);
VTPixelTransferSessionTransferImage(_vtpt_ref, imageBuffer, converted_frame);
其中 imageBuffer
是来源 CVPixedBufferRef
这是一个迟到的答案,并不是为了获得选票或其他任何东西。有一个 Accelerate 框架方法可以将 RGB 转换为 YUV,代码有点复杂,但它可以工作并且包含在内,因为很难找到工作示例。这被包装为扩展 CIFilter 的 class,但如果您想做一些不同的事情,它会很容易适应。此代码不包含内存泄漏,并且在重复调用时应该表现良好。需要注意的是,该实现创建了一个 CVPixelBufferRef,然后设置所有需要的属性,以便稍后对 vImageCVImageFormat_CreateWithCVPixelBuffer() 的调用正常工作。该代码将 RGB 数据渲染到 CoreVideo 缓冲区,然后包装 YUV 结果图像,returns 它像任何其他 CoreImage 过滤器一样。
//
// CoreImageToYUVConverter.h
//
// Make use of CoreImage to convert a RGB input image into YUV data where
// UV is sumsampled and Y is the same dimensions as the original data.
#import <Foundation/Foundation.h>
#import <CoreImage/CoreImage.h>
@interface CoreImageToYUVConverter : CIFilter
@property (nonatomic, retain) CIImage *inputImage;
// If there is an error while processing the filter, this value is
// set to non-nil. Otherwise it is set to nil.
@property (nonatomic, retain) NSError *error;
// Dimension of the output image, not that Y is 2x
// the dimensions of u and v buffer so the Y image
// must have even width and height.
@property (nonatomic, assign) CGSize size;
@end
// CoreImageToYUVConverter.m
#import "CoreImageToYUVConverter.h"
@import Accelerate;
@interface CoreImageToYUVConverter ()
@property (nonatomic, retain) CIContext *coreImageContext;
@property (nonatomic, copy) NSNumber *inputWidth;
@property (nonatomic, copy) NSNumber *inputAspectRatio;
@property (nonatomic, assign) CVPixelBufferRef pixelBuffer;
@end
@implementation CoreImageToYUVConverter
@synthesize coreImageContext = m_coreImageContext;
@synthesize pixelBuffer = m_pixelBuffer;
- (void) deallocate
{
self.pixelBuffer = NULL;
}
// Setter for self.rgbBuffer, this logic holds on to a retain for the CoreVideo buffer
- (void) setPixelBuffer:(CVImageBufferRef)cvBufferRef
{
if (cvBufferRef) {
CFRetain(cvBufferRef);
}
if (self->m_pixelBuffer) {
CFRelease(self->m_pixelBuffer);
}
self->m_pixelBuffer = cvBufferRef;
}
- (CIImage *)outputImage
{
self.error = nil;
NSParameterAssert(self.inputImage != nil && [self.inputImage isKindOfClass:[CIImage class]]);
CIImage *inputImage = self.inputImage;
[self renderIntoYUVBuffer:inputImage];
CIImage *outCIImage = [CIImage imageWithCVImageBuffer:self.pixelBuffer];
return outCIImage;
}
- (NSDictionary *)customAttributes
{
return @{
kCIInputWidthKey : @{kCIAttributeDefault : @(0), kCIAttributeType : kCIAttributeTypeScalar},
kCIInputAspectRatioKey : @{kCIAttributeDefault : @(0), kCIAttributeType : kCIAttributeTypeScalar},
};
}
- (void) renderIntoYUVBuffer:(CIImage*)inputImage
{
CGRect imageExtent = inputImage.extent;
int width = (int) imageExtent.size.width;
int height = (int) imageExtent.size.height;
// Extract a CGImageRef from CIImage, this will flatten pixels possibly from
// multiple steps of a CoreImage chain.
if (self.coreImageContext == nil) {
CIContext *context = [CIContext contextWithOptions:nil];
NSAssert(context != nil, @"CIContext contextWithOptions failed");
self.coreImageContext = context;
}
CGImageRef inCGImageRef = [self.coreImageContext createCGImage:inputImage fromRect:imageExtent];
NSDictionary *pixelAttributes = @{
(__bridge NSString*)kCVPixelBufferIOSurfacePropertiesKey : @{},
(__bridge NSString*)kCVPixelFormatOpenGLESCompatibility : @(YES),
(__bridge NSString*)kCVPixelBufferCGImageCompatibilityKey : @(YES),
(__bridge NSString*)kCVPixelBufferCGBitmapContextCompatibilityKey : @(YES),
};
CVPixelBufferRef cvPixelBuffer = NULL;
uint32_t yuvImageFormatType;
//yuvImageFormatType = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange; // luma (0, 255)
yuvImageFormatType = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange; // luma (16, 235)
CVReturn result = CVPixelBufferCreate(kCFAllocatorDefault,
width,
height,
yuvImageFormatType,
(__bridge CFDictionaryRef)(pixelAttributes),
&cvPixelBuffer);
NSAssert(result == kCVReturnSuccess, @"CVPixelBufferCreate failed");
// FIXME: UHDTV : HEVC uses kCGColorSpaceITUR_2020
CGColorSpaceRef yuvColorSpace = CGColorSpaceCreateWithName(kCGColorSpaceITUR_709);
{
// Attach colorspace info to pixel buffer
//CFDataRef colorProfileData = CGColorSpaceCopyICCProfile(yuvColorSpace); // deprecated
CFDataRef colorProfileData = CGColorSpaceCopyICCData(yuvColorSpace);
NSDictionary *pbAttachments = @{
(__bridge NSString*)kCVImageBufferYCbCrMatrixKey: (__bridge NSString*)kCVImageBufferYCbCrMatrix_ITU_R_709_2,
(__bridge NSString*)kCVImageBufferColorPrimariesKey: (__bridge NSString*)kCVImageBufferColorPrimaries_ITU_R_709_2,
(__bridge NSString*)kCVImageBufferTransferFunctionKey: (__bridge NSString*)kCVImageBufferTransferFunction_ITU_R_709_2,
(__bridge NSString*)kCVImageBufferICCProfileKey: (__bridge NSData *)colorProfileData,
(__bridge NSString*)kCVImageBufferChromaLocationTopFieldKey: (__bridge NSString*)kCVImageBufferChromaLocation_Center,
(__bridge NSString*)kCVImageBufferAlphaChannelIsOpaque: (id)kCFBooleanTrue,
};
CVBufferRef pixelBuffer = cvPixelBuffer;
CVBufferSetAttachments(pixelBuffer, (__bridge CFDictionaryRef)pbAttachments, kCVAttachmentMode_ShouldPropagate);
// Drop ref to NSDictionary to enable explicit checking of ref count of colorProfileData, after the
// release below the colorProfileData must be 1.
pbAttachments = nil;
CFRelease(colorProfileData);
}
// Note that this setter will implicitly release an earlier held ref to a pixel buffer
self.pixelBuffer = cvPixelBuffer;
vImageCVImageFormatRef cvImgFormatRef;
cvImgFormatRef = vImageCVImageFormat_CreateWithCVPixelBuffer(cvPixelBuffer);
// vImage_CGImageFormat for input RGB
// FIXME: Need to select sRGB if running under MacOSX
//CGColorSpaceRef defaultColorspaceRef = CGColorSpaceCreateDeviceRGB();
// Default to sRGB on both MacOSX and iOS
CGColorSpaceRef defaultColorspaceRef = NULL;
vImage_CGImageFormat rgbCGImgFormat = {
.bitsPerComponent = 8,
.bitsPerPixel = 32,
.bitmapInfo = (CGBitmapInfo)(kCGBitmapByteOrder32Host | kCGImageAlphaNoneSkipFirst),
.colorSpace = defaultColorspaceRef,
};
// Copy input CoreGraphic image into a CoreVideo buffer
vImage_Buffer sourceBuffer;
const CGFloat backgroundColor = 0.0f;
vImage_Flags flags = 0;
flags = kvImagePrintDiagnosticsToConsole;
vImage_Error err;
err = vImageBuffer_InitWithCGImage(&sourceBuffer, &rgbCGImgFormat, &backgroundColor, inCGImageRef, flags);
NSAssert(err == kvImageNoError, @"vImageBuffer_InitWithCGImage failed");
err = vImageBuffer_CopyToCVPixelBuffer(&sourceBuffer, &rgbCGImgFormat, cvPixelBuffer, cvImgFormatRef, &backgroundColor, flags);
NSAssert(err == kvImageNoError, @"error in vImageBuffer_CopyToCVPixelBuffer %d", (int)err);
// Manually free() the allocated buffer
free(sourceBuffer.data);
vImageCVImageFormat_Release(cvImgFormatRef);
CVPixelBufferRelease(cvPixelBuffer);
CGColorSpaceRelease(yuvColorSpace);
CGColorSpaceRelease(defaultColorspaceRef);
CGImageRelease(inCGImageRef);
}
@end