Core Graphics - 使用整数数组绘制灰度图像
Core Graphics - draw a gray scale image using array of integers
我正在尝试使用核心图形创建 UIImage。
我的愿望是绘制一个分为4个不同灰度的图像areas/pixels。
.....白色....灰色.....
.....灰色.....黑色...
所以使用核心图形,我想定义一个包含 4 个不同 int8_t
的数组,它们对应于所需的图像:
int8_t data[] = {
255, 122,
122, 0,
};
255
是白色,
122
是灰色的,
0
是黑色
我能找到的类似代码的最佳参考是 here
这个参考引用了一个 RGB 图像,所以根据我自己的常识想出了这个代码(抱歉 objective-C 法语 - 这不是我的参考 :)):
- (UIImage *)getImageFromGrayScaleArray {
int width = 2;
int height = 2;
int8_t data[] = {
255, 122,
122, 0,
};
CGDataProviderRef provider = CGDataProviderCreateWithData (NULL,
&data[0],
width * height,
NULL);
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceGray();
CGImageRef imageRef = CGImageCreate (width,
height,
[self bitsPerComponent],
[self bitsPerPixel],
width * [self bytesPerPixel],
colorSpaceRef,
kCGBitmapByteOrderDefault,
provider,
NULL,
NO,
kCGRenderingIntentDefault);
UIImage *image = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
return image;
}
- (int)bitsPerPixel {
return 8 * [self bytesPerPixel];;
}
- (int)bytesPerPixel {
return [self bytesPerComponent] * [self componentsPerPixel];
}
- (int)componentsPerPixel {
return 1;
}
- (int)bytesPerComponent {
return 1;
}
- (int)bitsPerComponent {
return 8 * [self bytesPerComponent];
}
但是...这段代码给了我这个全黑的 UIImage:
有人可以向我介绍我可以阅读和理解如何完成这样的任务的地方吗?尝试执行此类任务时,有关核心图形的数据量似乎非常稀缺。所有这些猜测的时间......永远:)
你很接近...
灰度图像每个像素需要 两个 分量:亮度和 alpha。
因此,只需进行一些更改(参见评论):
- (UIImage *)getImageFromGrayScaleArray {
int width = 2;
int height = 2;
// 1 byte for brightness, 1 byte for alpha
int8_t data[] = {
255, 255,
122, 255,
122, 255,
0, 255,
};
CGDataProviderRef provider = CGDataProviderCreateWithData (NULL,
&data[0],
// size is width * height * bytesPerPixel
width * height * [self bytesPerPixel],
NULL);
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceGray();
CGImageRef imageRef = CGImageCreate (width,
height,
[self bitsPerComponent],
[self bitsPerPixel],
width * [self bytesPerPixel],
colorSpaceRef,
// use this
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big,
// instead of this
//kCGBitmapByteOrderDefault,
provider,
NULL,
NO,
kCGRenderingIntentDefault);
UIImage *image = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
return image;
}
- (int)bitsPerPixel {
return 8 * [self bytesPerPixel];;
}
- (int)bytesPerPixel {
return [self bytesPerComponent] * [self componentsPerPixel];
}
- (int)componentsPerPixel {
return 2; // 1 byte for brightness, 1 byte for alpha
}
- (int)bytesPerComponent {
return 1;
}
- (int)bitsPerComponent {
return 8 * [self bytesPerComponent];
}
Edit -- 我认为使用上述代码的内存缓冲区寻址存在问题。经过一些测试后,我得到了不一致的结果。
尝试使用修改后的代码:
@interface TestingViewController : UIViewController
@end
@interface TestingViewController ()
@end
@implementation TestingViewController
// CGDataProviderCreateWithData callback to free the pixel data buffer
void freePixelData(void *info, const void *data, size_t size) {
free((void *)data);
}
- (UIImage*) getImageFromGrayScaleArray:(BOOL)allBlack {
int8_t grayArray[] = {
255, 122,
122, 0,
};
int8_t blackArray[] = {
0, 0,
0, 0,
};
int width = 2;
int height = 2;
int imageSizeInPixels = width * height;
int bytesPerPixel = 2; // 1 byte for brightness, 1 byte for alpha
unsigned char *pixels = (unsigned char *)malloc(imageSizeInPixels * bytesPerPixel);
memset(pixels, 255, imageSizeInPixels * bytesPerPixel); // setting alpha values to 255
if (allBlack) {
for (int i = 0; i < imageSizeInPixels; i++) {
pixels[i * 2] = blackArray[i]; // writing array of bytes as image brightnesses
}
} else {
for (int i = 0; i < imageSizeInPixels; i++) {
pixels[i * 2] = grayArray[i]; // writing array of bytes as image brightnesses
}
}
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceGray();
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL,
pixels,
imageSizeInPixels * bytesPerPixel,
freePixelData);
CGImageRef imageRef = CGImageCreate(width,
height,
8,
8 * bytesPerPixel,
width * bytesPerPixel,
colorSpaceRef,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big,
provider,
NULL,
false,
kCGRenderingIntentDefault);
UIImage *image = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
return image;
}
- (void)viewDidLoad {
[super viewDidLoad];
self.view.backgroundColor = [UIColor systemTealColor];
UIImage *img1 = [self getImageFromGrayScaleArray:NO];
UIImage *img2 = [self getImageFromGrayScaleArray:YES];
UIImageView *v1 = [UIImageView new];
UIImageView *v2 = [UIImageView new];
v1.image = img1;
v1.backgroundColor = [UIColor systemYellowColor];
v2.image = img2;
v2.backgroundColor = [UIColor systemYellowColor];
v1.contentMode = UIViewContentModeScaleToFill;
v2.contentMode = UIViewContentModeScaleToFill;
v1.translatesAutoresizingMaskIntoConstraints = NO;
[self.view addSubview:v1];
v2.translatesAutoresizingMaskIntoConstraints = NO;
[self.view addSubview:v2];
UILayoutGuide *g = [self.view safeAreaLayoutGuide];
[NSLayoutConstraint activateConstraints:@[
[v1.topAnchor constraintEqualToAnchor:g.topAnchor constant:40.0],
[v1.centerXAnchor constraintEqualToAnchor:g.centerXAnchor],
[v1.widthAnchor constraintEqualToConstant:200.0],
[v1.heightAnchor constraintEqualToAnchor:v1.widthAnchor],
[v2.topAnchor constraintEqualToAnchor:v1.bottomAnchor constant:40.0],
[v2.centerXAnchor constraintEqualToAnchor:self.view.centerXAnchor],
[v2.widthAnchor constraintEqualToAnchor:v1.widthAnchor],
[v2.heightAnchor constraintEqualToAnchor:v2.widthAnchor],
]];
}
@end
我们添加两个 200x200 图像视图,并使用以下方法将顶部 .image
设置为返回的 UIImage
:
int8_t grayArray[] = {
255, 122,
122, 0,
};
底部图片使用:
int8_t blackArray[] = {
0, 0,
0, 0,
};
输出:
我正在尝试使用核心图形创建 UIImage。
我的愿望是绘制一个分为4个不同灰度的图像areas/pixels。
.....白色....灰色.....
.....灰色.....黑色...
所以使用核心图形,我想定义一个包含 4 个不同 int8_t
的数组,它们对应于所需的图像:
int8_t data[] = {
255, 122,
122, 0,
};
255
是白色,
122
是灰色的,
0
是黑色
我能找到的类似代码的最佳参考是 here
这个参考引用了一个 RGB 图像,所以根据我自己的常识想出了这个代码(抱歉 objective-C 法语 - 这不是我的参考 :)):
- (UIImage *)getImageFromGrayScaleArray {
int width = 2;
int height = 2;
int8_t data[] = {
255, 122,
122, 0,
};
CGDataProviderRef provider = CGDataProviderCreateWithData (NULL,
&data[0],
width * height,
NULL);
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceGray();
CGImageRef imageRef = CGImageCreate (width,
height,
[self bitsPerComponent],
[self bitsPerPixel],
width * [self bytesPerPixel],
colorSpaceRef,
kCGBitmapByteOrderDefault,
provider,
NULL,
NO,
kCGRenderingIntentDefault);
UIImage *image = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
return image;
}
- (int)bitsPerPixel {
return 8 * [self bytesPerPixel];;
}
- (int)bytesPerPixel {
return [self bytesPerComponent] * [self componentsPerPixel];
}
- (int)componentsPerPixel {
return 1;
}
- (int)bytesPerComponent {
return 1;
}
- (int)bitsPerComponent {
return 8 * [self bytesPerComponent];
}
但是...这段代码给了我这个全黑的 UIImage:
有人可以向我介绍我可以阅读和理解如何完成这样的任务的地方吗?尝试执行此类任务时,有关核心图形的数据量似乎非常稀缺。所有这些猜测的时间......永远:)
你很接近...
灰度图像每个像素需要 两个 分量:亮度和 alpha。
因此,只需进行一些更改(参见评论):
- (UIImage *)getImageFromGrayScaleArray {
int width = 2;
int height = 2;
// 1 byte for brightness, 1 byte for alpha
int8_t data[] = {
255, 255,
122, 255,
122, 255,
0, 255,
};
CGDataProviderRef provider = CGDataProviderCreateWithData (NULL,
&data[0],
// size is width * height * bytesPerPixel
width * height * [self bytesPerPixel],
NULL);
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceGray();
CGImageRef imageRef = CGImageCreate (width,
height,
[self bitsPerComponent],
[self bitsPerPixel],
width * [self bytesPerPixel],
colorSpaceRef,
// use this
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big,
// instead of this
//kCGBitmapByteOrderDefault,
provider,
NULL,
NO,
kCGRenderingIntentDefault);
UIImage *image = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
return image;
}
- (int)bitsPerPixel {
return 8 * [self bytesPerPixel];;
}
- (int)bytesPerPixel {
return [self bytesPerComponent] * [self componentsPerPixel];
}
- (int)componentsPerPixel {
return 2; // 1 byte for brightness, 1 byte for alpha
}
- (int)bytesPerComponent {
return 1;
}
- (int)bitsPerComponent {
return 8 * [self bytesPerComponent];
}
Edit -- 我认为使用上述代码的内存缓冲区寻址存在问题。经过一些测试后,我得到了不一致的结果。
尝试使用修改后的代码:
@interface TestingViewController : UIViewController
@end
@interface TestingViewController ()
@end
@implementation TestingViewController
// CGDataProviderCreateWithData callback to free the pixel data buffer
void freePixelData(void *info, const void *data, size_t size) {
free((void *)data);
}
- (UIImage*) getImageFromGrayScaleArray:(BOOL)allBlack {
int8_t grayArray[] = {
255, 122,
122, 0,
};
int8_t blackArray[] = {
0, 0,
0, 0,
};
int width = 2;
int height = 2;
int imageSizeInPixels = width * height;
int bytesPerPixel = 2; // 1 byte for brightness, 1 byte for alpha
unsigned char *pixels = (unsigned char *)malloc(imageSizeInPixels * bytesPerPixel);
memset(pixels, 255, imageSizeInPixels * bytesPerPixel); // setting alpha values to 255
if (allBlack) {
for (int i = 0; i < imageSizeInPixels; i++) {
pixels[i * 2] = blackArray[i]; // writing array of bytes as image brightnesses
}
} else {
for (int i = 0; i < imageSizeInPixels; i++) {
pixels[i * 2] = grayArray[i]; // writing array of bytes as image brightnesses
}
}
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceGray();
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL,
pixels,
imageSizeInPixels * bytesPerPixel,
freePixelData);
CGImageRef imageRef = CGImageCreate(width,
height,
8,
8 * bytesPerPixel,
width * bytesPerPixel,
colorSpaceRef,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big,
provider,
NULL,
false,
kCGRenderingIntentDefault);
UIImage *image = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
return image;
}
- (void)viewDidLoad {
[super viewDidLoad];
self.view.backgroundColor = [UIColor systemTealColor];
UIImage *img1 = [self getImageFromGrayScaleArray:NO];
UIImage *img2 = [self getImageFromGrayScaleArray:YES];
UIImageView *v1 = [UIImageView new];
UIImageView *v2 = [UIImageView new];
v1.image = img1;
v1.backgroundColor = [UIColor systemYellowColor];
v2.image = img2;
v2.backgroundColor = [UIColor systemYellowColor];
v1.contentMode = UIViewContentModeScaleToFill;
v2.contentMode = UIViewContentModeScaleToFill;
v1.translatesAutoresizingMaskIntoConstraints = NO;
[self.view addSubview:v1];
v2.translatesAutoresizingMaskIntoConstraints = NO;
[self.view addSubview:v2];
UILayoutGuide *g = [self.view safeAreaLayoutGuide];
[NSLayoutConstraint activateConstraints:@[
[v1.topAnchor constraintEqualToAnchor:g.topAnchor constant:40.0],
[v1.centerXAnchor constraintEqualToAnchor:g.centerXAnchor],
[v1.widthAnchor constraintEqualToConstant:200.0],
[v1.heightAnchor constraintEqualToAnchor:v1.widthAnchor],
[v2.topAnchor constraintEqualToAnchor:v1.bottomAnchor constant:40.0],
[v2.centerXAnchor constraintEqualToAnchor:self.view.centerXAnchor],
[v2.widthAnchor constraintEqualToAnchor:v1.widthAnchor],
[v2.heightAnchor constraintEqualToAnchor:v2.widthAnchor],
]];
}
@end
我们添加两个 200x200 图像视图,并使用以下方法将顶部 .image
设置为返回的 UIImage
:
int8_t grayArray[] = {
255, 122,
122, 0,
};
底部图片使用:
int8_t blackArray[] = {
0, 0,
0, 0,
};
输出: