了解 google 人工智能平台自定义预测例程的输入

Understanding inputs for google ai platform custom prediction routines

我正在关注有关自定义预测例程的 this 文档,并且我正在尝试了解自定义预测例程的输入方式。发送输入的代码如下所示:

instances = [
        [6.7, 3.1, 4.7, 1.5],
        [4.6, 3.1, 1.5, 0.2],
    ]
service = discovery.build('ml', 'v1')
name = 'projects/{}/models/{}'.format(project, model)

if version is not None:
    name += '/versions/{}'.format(version)

response = service.projects().predict(
    name=name,
    body={'instances': instances}
).execute()

而现在的Predictor.py很简单。我只是想了解输入的样子...

class Predictor(object):
    """An example Predictor for an AI Platform custom prediction routine."""

    def __init__(self, model):
        self._model = model

    def predict(self, instances, **kwargs):

        inputs = np.asarray(instances)
        if kwargs.get('max'):
            return np.argmax(inputs, axis=1)

        return np.sum(inputs)


    @classmethod
    def from_path(cls, model_dir):
        return cls(None)

但是当我尝试获取响应时出现以下错误:

{
  "error": "Prediction failed: unknown error."
}

此外,调试代码非常困难,因为无法进入代码或打印日志...我不知道发生了什么...输入的样子如何?我应该如何访问它们? 这只是一个简单的测试,但最终我要发送图片,到时候调试就更难了。我将如何收到它们?我将如何在预处理器中对它们进行预处理?让我们假设我在训练时所做的处理看起来像这样

data = cv2.imread(str(img_path))
data = cv2.resize(data, (224, 224))
data = cv2.cvtColor(data, cv2.COLOR_BGR2RGB)
x = data.astype(np.float32) / 255.
return np.expand_dims(x, axis=0)

instances 对象看起来如何,以便我可以相应地构建预处理器? 提前谢谢你。

我正在为自定义预测构建一个新示例,这可能对您进行调试很有用: 首先,我通过笔记本 (Colab) 在本地写入文件

%%writefile model_prediction.py

import numpy as np
import os
import pickle
import pandas as pd
import importlib

class CustomModelPrediction(object):
    _UNUSED_COLUMNS = ['fnlwgt', 'education', 'gender']
    _CSV_COLUMNS = [
        'age', 'workclass', 'fnlwgt', 'education', 'education_num',
        'marital_status', 'occupation', 'relationship', 'race', 'gender',
        'capital_gain', 'capital_loss', 'hours_per_week', 'native_country',
        'income_bracket'
    ]
    _CATEGORICAL_TYPES = {
        'workclass': pd.api.types.CategoricalDtype(categories=[
            'Federal-gov', 'Local-gov', 'Never-worked', 'Private',
            'Self-emp-inc',
            'Self-emp-not-inc', 'State-gov', 'Without-pay'
        ]),
        'marital_status': pd.api.types.CategoricalDtype(categories=[
            'Divorced', 'Married-AF-spouse', 'Married-civ-spouse',
            'Married-spouse-absent', 'Never-married', 'Separated', 'Widowed'
        ]),
        'occupation': pd.api.types.CategoricalDtype([
            'Adm-clerical', 'Armed-Forces', 'Craft-repair',
            'Exec-managerial',
            'Farming-fishing', 'Handlers-cleaners', 'Machine-op-inspct',
            'Other-service', 'Priv-house-serv', 'Prof-specialty',
            'Protective-serv',
            'Sales', 'Tech-support', 'Transport-moving'
        ]),
        'relationship': pd.api.types.CategoricalDtype(categories=[
            'Husband', 'Not-in-family', 'Other-relative', 'Own-child',
            'Unmarried',
            'Wife'
        ]),
        'race': pd.api.types.CategoricalDtype(categories=[
            'Amer-Indian-Eskimo', 'Asian-Pac-Islander', 'Black', 'Other',
            'White'
        ]),
        'native_country': pd.api.types.CategoricalDtype(categories=[
            'Cambodia', 'Canada', 'China', 'Columbia', 'Cuba',
            'Dominican-Republic',
            'Ecuador', 'El-Salvador', 'England', 'France', 'Germany',
            'Greece',
            'Guatemala', 'Haiti', 'Holand-Netherlands', 'Honduras', 'Hong',
            'Hungary',
            'India', 'Iran', 'Ireland', 'Italy', 'Jamaica', 'Japan', 'Laos',
            'Mexico',
            'Nicaragua', 'Outlying-US(Guam-USVI-etc)', 'Peru',
            'Philippines', 'Poland',
            'Portugal', 'Puerto-Rico', 'Scotland', 'South', 'Taiwan',
            'Thailand',
            'Trinadad&Tobago', 'United-States', 'Vietnam', 'Yugoslavia'
        ])
    }

    def __init__(self, model, processor):
        self._model = model
        self._processor = processor
        self._class_names = ['<=50K', '>50K']

    def _preprocess(self, instances):
        """Dataframe contains both numeric and categorical features, convert
        categorical features to numeric.

        Args:
          dataframe: A `Pandas.Dataframe` to process.
        """
        dataframe = pd.DataFrame(data=[instances], columns=self._CSV_COLUMNS[:-1])
        dataframe = dataframe.drop(columns=self._UNUSED_COLUMNS)
        # Convert integer valued (numeric) columns to floating point
        numeric_columns = dataframe.select_dtypes(['int64']).columns
        dataframe[numeric_columns] = dataframe[numeric_columns].astype(
            'float32')

        # Convert categorical columns to numeric
        cat_columns = dataframe.select_dtypes(['object']).columns
        # Keep categorical columns always using same values based on dict.
        dataframe[cat_columns] = dataframe[cat_columns].apply(
            lambda x: x.astype(self._CATEGORICAL_TYPES[x.name]))
        dataframe[cat_columns] = dataframe[cat_columns].apply(
            lambda x: x.cat.codes)
        return dataframe

    def predict(self, instances, **kwargs):
        preprocessed_data = self._preprocess(instances)
        preprocessed_inputs = self._processor.preprocess(preprocessed_data)
        outputs = self._model.predict_classes(preprocessed_inputs)
        if kwargs.get('probabilities'):
            return outputs.tolist()
        else:
            return [self._class_names[index] for index in
                    np.argmax(outputs, axis=1)]

    @classmethod
    def from_path(cls, model_dir):
        import tensorflow as tf
        model_path = os.path.join(model_dir, 'model.h5')
        model = tf.keras.models.load_model(model_path)

        preprocessor_path = os.path.join(model_dir, 'preprocessor.pkl')
        with open(preprocessor_path, 'rb') as f:
            preprocessor = pickle.load(f)

        return cls(model, preprocessor)

写入文件后,我可以在部署模型之前像这样在本地测试它:

from model_prediction import CustomModelPrediction
model = CustomModelPrediction.from_path('.')
instance = [25, 'Private', 226802, '11th', 7, 'Never-married', 'Machine-op-inspct', 'Own-child', 'Black', 'Male', 0, 0, 40, 'United-States']
model.predict(instance)

其他选项是,一旦您构建了安装包,您还可以在本地测试安装,其中 my_custom_code-0.1.tar.gz 是要在 AI Platform 中部署的文件:

 pip install --target=/tmp/custom_lib --no-cache-dir -b /tmp/pip_builds my_custom_code-0.1.tar.gz

另请参阅 this 部分:

您可以使用 --enable-console-logging 将日志导出到您的项目。您可能需要创建一个新模型。

看起来在没有模型的情况下使用调试代码(在 post 时)不起作用。我使用以下代码使我的图像预测用例一切正常:

image_filename = 'your image path'
PROJECT_ID = ''
MODEL_NAME = ''
VERSION_NAME = ''

img = base64.b64encode(open(image_filename, "rb").read()).decode()
image_bite_dict = {"key": "0", "image_bytes": {"b64": img}}

instances = [
            image_bite_dict
        ]


service = googleapiclient.discovery.build('ml', 'v1')
    name = 'projects/{}/models/{}/versions/{}'.format(PROJECT_ID, MODEL_NAME, VERSION_NAME)
response = service.projects().predict(
        name=name,
        body={'instances': instances}
    ).execute()