对 multiArray 输出求和的自定义 CoreML 输出层
Custom CoreML output layer that sums multiArray output
请多多包涵。我是 CoreML 和机器学习的新手。我有一个 CoreML 模型,我可以从使用 Caffe 的研究论文实现中转换它。这是一个 CSRNet,objective 是人群计数。经过多次争论,我能够使用 Coremltools 将 MLmodel 加载到 Python,使用 Pillow 预处理图像并预测输出。结果是一个 MultiArray(来自密度图),然后我对其进行了进一步处理以得出实际的数值预测。
如何将自定义层作为输出添加到采用当前输出并执行以下功能的模型?看了很多文章,还是一头雾水。 (本质上,它将 MultiArray 中的所有值相加)我希望能够保存模型/层并将其导入 Xcode 以便 MLModel 结果是单个数值,而不是多阵列。
这是我目前用于将模型输出转换为数字的代码(在 Python 中):
# predict output
output = model.predict({'data': img})
summed_output = sum(output.values())
prediction = np.sum(summed_output)
print("prediction: ", prediction)
完整(缩写)代码:
import coremltools as ct
from PIL import Image
import numpy as np
# instantiate model (CSRNet)
model = ct.models.MLModel('shanghai_b.mlmodel')
# function to resize image
def load_image(path, resize_to=None):
img = PIL.Image.open(path)
if resize_to is not None:
img = img.resize(resize_to, PIL.Image.ANTIALIAS)
img_np = np.array(img).astype(np.float32)
return img_np, img
# select image
image = 'IMG_173.jpg'
#resize image
_, img = load_image(image, resize_to=(900, 675))
# predict output
output = model.predict({'data': img})
summed_output = sum(output.values())
prediction = np.sum(summed_output)
print("prediction: ", prediction)
Xcode 将 MLModel 的输出显示为:“MultiArray (Double 1 x 168 x 225)”。当我使用 Coremltools 将其导入 python 时,与当前相同模型的规格说明如下:
<bound method MLModel.predict of input {
name: "data"
type {
imageType {
width: 224
height: 224
colorSpace: RGB
}
}
}
output {
name: "estdmap"
type {
multiArrayType {
dataType: DOUBLE
}
}
}
>
感谢您的帮助!如果有用的话,我很乐意 post 过程中的任何其他代码。
P.S。我正在添加我的 Xcode 项目中的代码作为参考。
private func detectImage(_ image: CIImage) {
guard let model = try? VNCoreMLModel(for: HundredsPredictor().model) else {
fatalError("Loading to CoreML failed") }
let modelRequest = VNCoreMLRequest(model: model) { (request, error) in
if error != nil {
print(error?.localizedDescription ?? "Error")
} else {
guard let result = request.results as? [VNObservation] else {fatalError("Error")}
if #available(iOS 14.0, *) {
print(result)
// output: [<VNCoreMLFeatureValueObservation: 0x282069da0> 344A87BC-B13E-4195-922E-7381694C91FF requestRevision=1 confidence=1.000000 timeRange={{0/1 = 0.000}, {0/1 = 0.000}} "density_map" - "MultiArray: Double 1 × 168 × 225 array" (1.000000)]
} else {
// Fallback on earlier versions
}
if let firstResult = result.first {
print(firstResult)
// output: [<VNCoreMLFeatureValueObservation: 0x282069da0> 344A87BC-B13E-4195-922E-7381694C91FF requestRevision=1 confidence=1.000000 timeRange={{0/1 = 0.000}, {0/1 = 0.000}} "density_map" - "MultiArray : Double 1 × 168 × 225 array" (1.000000)]
}
}
}
let handler = VNImageRequestHandler(ciImage: image)
do {
try handler.perform([modelRequest])
print(handler)
}
catch let error as NSError {
print(error)
}
}
更新:解决方案
在python中:
from helpers import get_nn
# helper file sourced from Matthijs Hollemans github
# url: https://github.com/hollance/coreml-survival-guide/blob/master/Scripts/helpers.py
# load original model
spec = ct.utils.load_spec("HundredsPredictor.mlmodel")
nn = get_nn(spec)
#construct new layer
new_layer = nn.layers.add()
new_layer.name = "summingLayer"
params = ct.proto.NeuralNetwork_pb2.ReduceLayerParams
new_layer.reduce.mode = params.SUM
new_layer.reduce.axis = params.CHW
# append new layer to model
new_layer.output.append(nn.layers[-2].output[0])
nn.layers[-2].output[0] = nn.layers[-2].name + "_output"
new_layer.input.append(nn.layers[-2].output[0])
spec.description.output[0].type.multiArrayType.shape[0] = 1
# save new model
ct.models.utils.save_spec(spec, "HundredPredictorSummed.mlmodel")
在Swift中,导入新的更新模型后:
private func detectImage(_ image: CIImage) {
guard let model = try? VNCoreMLModel(for: HundredPredictorSummed().model) else {
fatalError("Loading to CoreML failed") }
let request = VNCoreMLRequest(model: model) { [weak self] request, error in
guard let results = request.results as? [VNCoreMLFeatureValueObservation],
let topResult = results.first else {
fatalError("Unexpected result type from VNCoreMLRequest")}
DispatchQueue.main.async {
guard let data = topResult.featureValue.multiArrayValue else { return }
let ptr = data.dataPointer.assumingMemoryBound(to: Double.self)
let sum = ptr[0]
print("SUM: ", sum)
self?.detectLabel.text = "~\(String(Int(round(sum)))) ppl"
}
}
let handler = VNImageRequestHandler(ciImage: image)
DispatchQueue.global(qos: .userInteractive).async {
do {
try handler.perform([request])
} catch {
print(error)
}
}
}
您可以在模型末尾添加一个ReduceSumLayerParams。您需要在 Python 中手动执行此操作。如果将其 reduceAll 参数设置为 true,它将计算整个张量的总和。
但是,在我看来,按原样使用模型同样容易,在您的 Swift 代码中获取指向 MLMultiArray 数据的指针并使用 vDSP.sum(a)
计算总和.
请多多包涵。我是 CoreML 和机器学习的新手。我有一个 CoreML 模型,我可以从使用 Caffe 的研究论文实现中转换它。这是一个 CSRNet,objective 是人群计数。经过多次争论,我能够使用 Coremltools 将 MLmodel 加载到 Python,使用 Pillow 预处理图像并预测输出。结果是一个 MultiArray(来自密度图),然后我对其进行了进一步处理以得出实际的数值预测。
如何将自定义层作为输出添加到采用当前输出并执行以下功能的模型?看了很多文章,还是一头雾水。 (本质上,它将 MultiArray 中的所有值相加)我希望能够保存模型/层并将其导入 Xcode 以便 MLModel 结果是单个数值,而不是多阵列。
这是我目前用于将模型输出转换为数字的代码(在 Python 中):
# predict output
output = model.predict({'data': img})
summed_output = sum(output.values())
prediction = np.sum(summed_output)
print("prediction: ", prediction)
完整(缩写)代码:
import coremltools as ct
from PIL import Image
import numpy as np
# instantiate model (CSRNet)
model = ct.models.MLModel('shanghai_b.mlmodel')
# function to resize image
def load_image(path, resize_to=None):
img = PIL.Image.open(path)
if resize_to is not None:
img = img.resize(resize_to, PIL.Image.ANTIALIAS)
img_np = np.array(img).astype(np.float32)
return img_np, img
# select image
image = 'IMG_173.jpg'
#resize image
_, img = load_image(image, resize_to=(900, 675))
# predict output
output = model.predict({'data': img})
summed_output = sum(output.values())
prediction = np.sum(summed_output)
print("prediction: ", prediction)
Xcode 将 MLModel 的输出显示为:“MultiArray (Double 1 x 168 x 225)”。当我使用 Coremltools 将其导入 python 时,与当前相同模型的规格说明如下:
<bound method MLModel.predict of input {
name: "data"
type {
imageType {
width: 224
height: 224
colorSpace: RGB
}
}
}
output {
name: "estdmap"
type {
multiArrayType {
dataType: DOUBLE
}
}
}
>
感谢您的帮助!如果有用的话,我很乐意 post 过程中的任何其他代码。
P.S。我正在添加我的 Xcode 项目中的代码作为参考。
private func detectImage(_ image: CIImage) {
guard let model = try? VNCoreMLModel(for: HundredsPredictor().model) else {
fatalError("Loading to CoreML failed") }
let modelRequest = VNCoreMLRequest(model: model) { (request, error) in
if error != nil {
print(error?.localizedDescription ?? "Error")
} else {
guard let result = request.results as? [VNObservation] else {fatalError("Error")}
if #available(iOS 14.0, *) {
print(result)
// output: [<VNCoreMLFeatureValueObservation: 0x282069da0> 344A87BC-B13E-4195-922E-7381694C91FF requestRevision=1 confidence=1.000000 timeRange={{0/1 = 0.000}, {0/1 = 0.000}} "density_map" - "MultiArray: Double 1 × 168 × 225 array" (1.000000)]
} else {
// Fallback on earlier versions
}
if let firstResult = result.first {
print(firstResult)
// output: [<VNCoreMLFeatureValueObservation: 0x282069da0> 344A87BC-B13E-4195-922E-7381694C91FF requestRevision=1 confidence=1.000000 timeRange={{0/1 = 0.000}, {0/1 = 0.000}} "density_map" - "MultiArray : Double 1 × 168 × 225 array" (1.000000)]
}
}
}
let handler = VNImageRequestHandler(ciImage: image)
do {
try handler.perform([modelRequest])
print(handler)
}
catch let error as NSError {
print(error)
}
}
更新:解决方案
在python中:
from helpers import get_nn
# helper file sourced from Matthijs Hollemans github
# url: https://github.com/hollance/coreml-survival-guide/blob/master/Scripts/helpers.py
# load original model
spec = ct.utils.load_spec("HundredsPredictor.mlmodel")
nn = get_nn(spec)
#construct new layer
new_layer = nn.layers.add()
new_layer.name = "summingLayer"
params = ct.proto.NeuralNetwork_pb2.ReduceLayerParams
new_layer.reduce.mode = params.SUM
new_layer.reduce.axis = params.CHW
# append new layer to model
new_layer.output.append(nn.layers[-2].output[0])
nn.layers[-2].output[0] = nn.layers[-2].name + "_output"
new_layer.input.append(nn.layers[-2].output[0])
spec.description.output[0].type.multiArrayType.shape[0] = 1
# save new model
ct.models.utils.save_spec(spec, "HundredPredictorSummed.mlmodel")
在Swift中,导入新的更新模型后:
private func detectImage(_ image: CIImage) {
guard let model = try? VNCoreMLModel(for: HundredPredictorSummed().model) else {
fatalError("Loading to CoreML failed") }
let request = VNCoreMLRequest(model: model) { [weak self] request, error in
guard let results = request.results as? [VNCoreMLFeatureValueObservation],
let topResult = results.first else {
fatalError("Unexpected result type from VNCoreMLRequest")}
DispatchQueue.main.async {
guard let data = topResult.featureValue.multiArrayValue else { return }
let ptr = data.dataPointer.assumingMemoryBound(to: Double.self)
let sum = ptr[0]
print("SUM: ", sum)
self?.detectLabel.text = "~\(String(Int(round(sum)))) ppl"
}
}
let handler = VNImageRequestHandler(ciImage: image)
DispatchQueue.global(qos: .userInteractive).async {
do {
try handler.perform([request])
} catch {
print(error)
}
}
}
您可以在模型末尾添加一个ReduceSumLayerParams。您需要在 Python 中手动执行此操作。如果将其 reduceAll 参数设置为 true,它将计算整个张量的总和。
但是,在我看来,按原样使用模型同样容易,在您的 Swift 代码中获取指向 MLMultiArray 数据的指针并使用 vDSP.sum(a)
计算总和.