在 AzureML 中部署模型时如何将参数传递给评分文件
How to pass arguments to scoring file when deploying a Model in AzureML
我正在使用 Python SDK 将经过训练的模型部署到 Azure 机器学习上的 ACI 端点。
我已经创建了我的 score.py 文件,但我希望调用该文件时传递一个我可以使用 argparse
解释的参数(就像训练文件一样)。
但是,我似乎找不到如何传递参数
这是我必须创建 InferenceConfig 环境的代码,但显然不起作用。我应该重新使用额外的 Docker 文件步骤吗?
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.environment import Environment
from azureml.core.model import InferenceConfig
env = Environment('my_hosted_environment')
env.python.conda_dependencies = CondaDependencies.create(
conda_packages=['scikit-learn'],
pip_packages=['azureml-defaults'])
scoring_script = 'score.py --model_name ' + model_name
inference_config = InferenceConfig(entry_script=scoring_script, environment=env)
添加 score.py 作为我希望如何使用该脚本中的参数的参考:
#removed imports
import argparse
def init():
global model
parser = argparse.ArgumentParser(description="Load sklearn model")
parser.add_argument('--model_name', dest="model_name", required=True)
args, _ = parser.parse_known_args()
model_path = Model.get_model_path(model_name=args.model_name)
model = joblib.load(model_path)
def run(raw_data):
try:
data = json.loads(raw_data)['data']
data = np.array(data)
result = model.predict(data)
return result.tolist()
except Exception as e:
result = str(e)
return result
有兴趣听听你的想法
可以在此处找到如何使用环境进行部署 model-register-and-deploy.ipynb . InferenceConfig class accepts source_directory and entry_script parameters,其中 source_directory 是包含要创建的所有文件(score.py 和任何其他附加文件)的文件夹的路径图片。
此 multi-model-register-and-deploy.ipynb 包含有关如何使用 source_directory 和 entry_script 创建 InferenceConfig 的代码片段。
from azureml.core.webservice import Webservice
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment
myenv = Environment.from_conda_specification(name="myenv", file_path="myenv.yml")
inference_config = InferenceConfig(entry_script="score.py", environment=myenv)
service = Model.deploy(workspace=ws,
name='sklearn-mnist-svc',
models=[model],
inference_config=inference_config,
deployment_config=aciconfig)
service.wait_for_deployment(show_output=True)
print(service.scoring_uri)
这个问题已经有一年了。提供解决方案以帮助那些可能仍在寻找答案的人。我对类似问题的回答是 here。您可以将本机 python 数据类型变量传递到推理配置中,并在评分脚本中将它们作为环境变量访问。
我以不同的方式解决了这个问题。当 score.py 被 InferenceConfig 使用时,我找不到一种(正确且易于遵循的)方法来传递参数。相反,我所做的是遵循 4 个步骤:
- 已创建score_template.py并定义应分配的变量
- 读取 score_template.py 的内容并通过用所需值替换变量来修改它
- 将修改后的内容写入score.py
- 最后将score.py传给InferenceConfig
score_template.py 中的第 1 步:
import json
from azureml.core.model import Model
import os
import joblib
import pandas as pd
import numpy as np
def init():
global model
#model = joblib.load('recommender.pkl')
model_name="#MODEL_NAME#"
model_saved_file='#MODEL_SAVED_FILE#'
try:
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), model_saved_file)
model = joblib.load(model_path)
except:
model_path = Model.get_model_path(model_name)
model = joblib.load(model_path)
def run(raw_data):
try:
#data=pd.json_normalize(data)
#data=np.array(data['data'])
data = json.loads(raw_data)["data"]
data = np.array(data)
result = model.predict(data)
# you can return any datatype as long as it is JSON-serializable
return {"result": result.tolist()}
except Exception as e:
error = str(e)
#error= data
return error
deploy_model.py 中的第 2-4 步:
#--Modify Entry Script/Pass Model Name--
entry_script="score.py"
entry_script_temp="score_template.py"
# Read in the entry script template
print("Prepare Entry Script")
with open(entry_script_temp, 'r') as file :
entry_script_contents = file.read()
# Replace the target string
entry_script_contents = entry_script_contents.replace('#MODEL_NAME#', model_name)
entry_script_contents = entry_script_contents.replace('#MODEL_SAVED_FILE#', model_file_name)
# Write the file to entry script
with open(entry_script, 'w') as file:
file.write(entry_script_contents)
#--Define configs for the deployment---
print("Get Environtment")
env = Environment.get(workspace=ws, name=env_name)
env.inferencing_stack_version = "latest"
print("Inference Configuration")
inference_config = InferenceConfig(entry_script=entry_script, environment=env, source_directory=base_path)
aci_config = AciWebservice.deploy_configuration(cpu_cores = int(cpu_cores), memory_gb = int(memory_gb),location=location)
#--Deloy the service---
print("Deploy Model")
print("model version:", model_artifact.version)
service = Model.deploy( workspace=ws,
name=service_name,
models=[model_artifact],
inference_config=inference_config,
deployment_config=aci_config,
overwrite=True )
service.wait_for_deployment(show_output=True)
我正在使用 Python SDK 将经过训练的模型部署到 Azure 机器学习上的 ACI 端点。
我已经创建了我的 score.py 文件,但我希望调用该文件时传递一个我可以使用 argparse
解释的参数(就像训练文件一样)。
但是,我似乎找不到如何传递参数
这是我必须创建 InferenceConfig 环境的代码,但显然不起作用。我应该重新使用额外的 Docker 文件步骤吗?
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.environment import Environment
from azureml.core.model import InferenceConfig
env = Environment('my_hosted_environment')
env.python.conda_dependencies = CondaDependencies.create(
conda_packages=['scikit-learn'],
pip_packages=['azureml-defaults'])
scoring_script = 'score.py --model_name ' + model_name
inference_config = InferenceConfig(entry_script=scoring_script, environment=env)
添加 score.py 作为我希望如何使用该脚本中的参数的参考:
#removed imports
import argparse
def init():
global model
parser = argparse.ArgumentParser(description="Load sklearn model")
parser.add_argument('--model_name', dest="model_name", required=True)
args, _ = parser.parse_known_args()
model_path = Model.get_model_path(model_name=args.model_name)
model = joblib.load(model_path)
def run(raw_data):
try:
data = json.loads(raw_data)['data']
data = np.array(data)
result = model.predict(data)
return result.tolist()
except Exception as e:
result = str(e)
return result
有兴趣听听你的想法
可以在此处找到如何使用环境进行部署 model-register-and-deploy.ipynb . InferenceConfig class accepts source_directory and entry_script parameters,其中 source_directory 是包含要创建的所有文件(score.py 和任何其他附加文件)的文件夹的路径图片。
此 multi-model-register-and-deploy.ipynb 包含有关如何使用 source_directory 和 entry_script 创建 InferenceConfig 的代码片段。
from azureml.core.webservice import Webservice
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment
myenv = Environment.from_conda_specification(name="myenv", file_path="myenv.yml")
inference_config = InferenceConfig(entry_script="score.py", environment=myenv)
service = Model.deploy(workspace=ws,
name='sklearn-mnist-svc',
models=[model],
inference_config=inference_config,
deployment_config=aciconfig)
service.wait_for_deployment(show_output=True)
print(service.scoring_uri)
这个问题已经有一年了。提供解决方案以帮助那些可能仍在寻找答案的人。我对类似问题的回答是 here。您可以将本机 python 数据类型变量传递到推理配置中,并在评分脚本中将它们作为环境变量访问。
我以不同的方式解决了这个问题。当 score.py 被 InferenceConfig 使用时,我找不到一种(正确且易于遵循的)方法来传递参数。相反,我所做的是遵循 4 个步骤:
- 已创建score_template.py并定义应分配的变量
- 读取 score_template.py 的内容并通过用所需值替换变量来修改它
- 将修改后的内容写入score.py
- 最后将score.py传给InferenceConfig
score_template.py 中的第 1 步:
import json
from azureml.core.model import Model
import os
import joblib
import pandas as pd
import numpy as np
def init():
global model
#model = joblib.load('recommender.pkl')
model_name="#MODEL_NAME#"
model_saved_file='#MODEL_SAVED_FILE#'
try:
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), model_saved_file)
model = joblib.load(model_path)
except:
model_path = Model.get_model_path(model_name)
model = joblib.load(model_path)
def run(raw_data):
try:
#data=pd.json_normalize(data)
#data=np.array(data['data'])
data = json.loads(raw_data)["data"]
data = np.array(data)
result = model.predict(data)
# you can return any datatype as long as it is JSON-serializable
return {"result": result.tolist()}
except Exception as e:
error = str(e)
#error= data
return error
deploy_model.py 中的第 2-4 步:
#--Modify Entry Script/Pass Model Name--
entry_script="score.py"
entry_script_temp="score_template.py"
# Read in the entry script template
print("Prepare Entry Script")
with open(entry_script_temp, 'r') as file :
entry_script_contents = file.read()
# Replace the target string
entry_script_contents = entry_script_contents.replace('#MODEL_NAME#', model_name)
entry_script_contents = entry_script_contents.replace('#MODEL_SAVED_FILE#', model_file_name)
# Write the file to entry script
with open(entry_script, 'w') as file:
file.write(entry_script_contents)
#--Define configs for the deployment---
print("Get Environtment")
env = Environment.get(workspace=ws, name=env_name)
env.inferencing_stack_version = "latest"
print("Inference Configuration")
inference_config = InferenceConfig(entry_script=entry_script, environment=env, source_directory=base_path)
aci_config = AciWebservice.deploy_configuration(cpu_cores = int(cpu_cores), memory_gb = int(memory_gb),location=location)
#--Deloy the service---
print("Deploy Model")
print("model version:", model_artifact.version)
service = Model.deploy( workspace=ws,
name=service_name,
models=[model_artifact],
inference_config=inference_config,
deployment_config=aci_config,
overwrite=True )
service.wait_for_deployment(show_output=True)