在 Swift 3 中使用 CGPoint 从图像获取像素颜色

Getting Pixel Color from an Image using CGPoint in Swift 3

我在Swift 3中尝试这个PixelExtractor class,得到一个错误; 无法使用类型为“(UnsafeMutableRawPointer?)”

的参数列表调用类型 'UnsafePointer' 的初始值设定项
class PixelExtractor: NSObject {

let image: CGImage
let context: CGContextRef?

var width: Int {
    get {
        return CGImageGetWidth(image)
    }
}

var height: Int {
    get {
        return CGImageGetHeight(image)
    }
}

init(img: CGImage) {
    image = img
    context = PixelExtractor.createBitmapContext(img)
}

class func createBitmapContext(img: CGImage) -> CGContextRef {

    // Get image width, height
    let pixelsWide = CGImageGetWidth(img)
    let pixelsHigh = CGImageGetHeight(img)

    let bitmapBytesPerRow = pixelsWide * 4
    let bitmapByteCount = bitmapBytesPerRow * Int(pixelsHigh)

    // Use the generic RGB color space.
    let colorSpace = CGColorSpaceCreateDeviceRGB()

    // Allocate memory for image data. This is the destination in memory
    // where any drawing to the bitmap context will be rendered.
    let bitmapData = malloc(bitmapByteCount)
    let bitmapInfo = CGBitmapInfo(rawValue:   CGImageAlphaInfo.PremultipliedFirst.rawValue)
    let size = CGSizeMake(CGFloat(pixelsWide), CGFloat(pixelsHigh))
    UIGraphicsBeginImageContextWithOptions(size, false, 0.0)
    // create bitmap
    let context = CGBitmapContextCreate(bitmapData, pixelsWide, pixelsHigh, 8,
    bitmapBytesPerRow, colorSpace, bitmapInfo.rawValue)

    // draw the image onto the context
    let rect = CGRect(x: 0, y: 0, width: pixelsWide, height: pixelsHigh)
    CGContextDrawImage(context, rect, img)

    return context!
}

func colorAt(x x: Int, y: Int)->UIColor {

    assert(0<=x && x<width)
    assert(0<=y && y<height)

    let uncastedData = CGBitmapContextGetData(context)
    let data = UnsafePointer<UInt8>(uncastedData)

    let offset = 4 * (y * width + x)

    let alpha: UInt8 = data[offset]
    let red: UInt8 = data[offset+1]
    let green: UInt8 = data[offset+2]
    let blue: UInt8 = data[offset+3]

    let color = UIColor(red: CGFloat(red)/255.0, green: CGFloat(green)/255.0, blue: CGFloat(blue)/255.0, alpha: CGFloat(alpha)/255.0)

    return color
}

}

修正这个错误。

let data = UnsafePointer<UInt8>(uncastedData)

->

let data = UnsafeRawPointer(uncastedData)

得到其他错误; '类型 'UnsafeRawPointer?' 没有下标成员'

如何修改这个错误?

以下部分摘自一些 Swift 3 代码,我用它从图像中采样像素以获得主要色调,我用它来为 tableView 行生成背景。色调选择过程的机制不适用于您的问题,所以我只是提供相关片段。

let colorSpace = CGColorSpaceCreateDeviceRGB() //  UIExtendedSRGBColorSpace
let newImage = image.cgImage?.copy(colorSpace: colorSpace)

let pixelData = newImage?.dataProvider!.data
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)

var hueFrequency = [Int: Double]()
hueFrequency[1] = 1                 // Add one entry so this serves as a default if no hues from the image pass the filters

let nStart = 1
let mStart = 1

for n in nStart...Int(image.size.width / samplingFactor) {
    for m in mStart...Int(image.size.height / samplingFactor) {
        let pixelInfo: Int = ((Int(image.size.width) * m * Int(samplingFactor)) + n * Int(samplingFactor)) * 4 // bytesPerPixel

        let b = CGFloat(data[pixelInfo]) / CGFloat(255.0)           // cgImage bitmapinfo = rawValue 8194 -> BGRA ordering
        let g = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
        let r = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
        let a = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)

此外,请注意,我发现 bitmapInfo 值(image.cgImage!.bitmapInfo 使用我的参数)指示将 RGBA 序列重新排序为 BGRA,我必须处理该顺序才能挑选出的步骤数据。如果您的颜色不对,您可能需要检查一下。

当你的 data 中有一个 UnsafeRawPointer 时,你可以这样写:

    let alpha = data.load(fromByteOffset: offset, as: UInt8.self)
    let red = data.load(fromByteOffset: offset+1, as: UInt8.self)
    let green = data.load(fromByteOffset: offset+2, as: UInt8.self)
    let blue = data.load(fromByteOffset: offset+3, as: UInt8.self)

否则,您可以从 uncastedData 中获取 UnsafeMutablePointer<UInt8>(假设它是 UnsafeMutableRawPointer):

    let data = uncastedData.assumingMemoryBound(to: UInt8.self)

SWIFT 3(2017 年 3 月更新)Xcode 8 / IOS 10

重要:注意return值对应红色:b、绿色:r和蓝色:r 因为在数据中它们是向后存储的

  • 首先,创建扩展(您可以复制并粘贴到您的 代码)

    extension UIImage {
    
        func getPixelColor(pos: CGPoint) -> UIColor {
    
            if let pixelData = self.cgImage?.dataProvider?.data {
                let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
    
                let pixelInfo: Int = ((Int(self.size.width) * Int(pos.y)) + Int(pos.x)) * 4
    
                let r = CGFloat(data[pixelInfo+0]) / CGFloat(255.0)
                let g = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
                let b = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
                let a = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)
    
                return UIColor(red: b, green: g, blue: r, alpha: a)
            } else {
            //IF something is wrong I returned WHITE, but change as needed
                return UIColor.white
            }
        }
    }
    
  • 那么就叫它为:

    let colorAtPixel : UIColor = (theView.image?.getPixelColor(pos: CGPoint(x: 2, y: 2)))!
    

虽然代码 return 的颜色是准确的,但似乎 return 对不同的 CGPoints 来说并不是正确的颜色。

可能是因为屏幕分辨率? (x1,x2,x3)?

如果有人能为这个谜团增添一些光彩就好了...

Swift-3 (IOS 10.3)

extension UIImage {

    func getPixelColor(atLocation location: CGPoint, withFrameSize size: CGSize) -> UIColor {
        let x: CGFloat = (self.size.width) * location.x / size.width
        let y: CGFloat = (self.size.height) * location.y / size.height

        let pixelPoint: CGPoint = CGPoint(x: x, y: y)

        let pixelData = self.cgImage!.dataProvider!.data
        let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)

        let pixelIndex: Int = ((Int(self.size.width) * Int(pixelPoint.y)) + Int(pixelPoint.x)) * 4

        let r = CGFloat(data[pixelIndex]) / CGFloat(255.0)
        let g = CGFloat(data[pixelIndex+1]) / CGFloat(255.0)
        let b = CGFloat(data[pixelIndex+2]) / CGFloat(255.0)
        let a = CGFloat(data[pixelIndex+3]) / CGFloat(255.0)

        return UIColor(red: r, green: g, blue: b, alpha: a)
    }

}

用法:-

let color = yourImageView.image!.getPixelColor(atLocation: location, withFrameSize: yourImageView.frame.size)

location is a CGPoint and size is size of your imageView