如何使用最新的 IBM Watson Studio API 解决 .MPS 文件
How solve .MPS file using the latest IBM Waston Studio APIs
我正在尝试迁移一个目前因重大更改而损坏的实用程序,该实用程序使用 IBM 的 API 解决了 .mps 问题。
原始代码使用一个空的 model.tar.gz 文件,创建部署并将 .mps 文件传递给新作业。
(python) 代码如下所示:
import tarfile
tar = tarfile.open("model.tar.gz", "w:gz")
tar.close()
test_metadata = {
client.repository.ModelMetaNames.NAME: "Test",
client.repository.ModelMetaNames.DESCRIPTION: "Model for Test",
client.repository.ModelMetaNames.TYPE: "do-cplex_12.9",
client.repository.ModelMetaNames.RUNTIME_UID: "do_12.9"
}
model_details = client.repository.store_model(model='model.tar.gz', meta_props=test_metadata)
model_uid = client.repository.get_model_uid(model_details)
n_nodes = 1
meta_props = {
client.deployments.ConfigurationMetaNames.NAME: "Test Deployment " + str(n_nodes),
client.deployments.ConfigurationMetaNames.DESCRIPTION: "Test Deployment",
client.deployments.ConfigurationMetaNames.BATCH: {},
client.deployments.ConfigurationMetaNames.COMPUTE: {'name': 'S', 'nodes': n_nodes}
}
deployment_details = client.deployments.create(model_uid, meta_props=meta_props)
deployment_uid = client.deployments.get_uid(deployment_details)
solve_payload = {
client.deployments.DecisionOptimizationMetaNames.SOLVE_PARAMETERS: {
'oaas.logAttachmentName':'log.txt',
'oaas.logTailEnabled':'true',
'oaas.resultsFormat': 'JSON'
},
client.deployments.DecisionOptimizationMetaNames.INPUT_DATA_REFERENCES: [
{
'id':'test.mps',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': cos_credentials['cos_hmac_keys']["access_key_id"],
'secret_access_key': cos_credentials['cos_hmac_keys']["secret_access_key"]
},
'location': {
'bucket': COS_BUCKET,
'path': 'test.mps'
}
}
],
client.deployments.DecisionOptimizationMetaNames.OUTPUT_DATA_REFERENCES: [
{
'id':'solution.json',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': cos_credentials['cos_hmac_keys']["access_key_id"],
'secret_access_key': cos_credentials['cos_hmac_keys']["secret_access_key"]
},
'location': {
'bucket': COS_BUCKET,
'path': 'solution.json'
}
},
{
'id':'log.txt',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': cos_credentials['cos_hmac_keys']["access_key_id"],
'secret_access_key': cos_credentials['cos_hmac_keys']["secret_access_key"]
},
'location': {
'bucket': COS_BUCKET,
'path': 'log.txt'
}
}
]
}
job_details = client.deployments.create_job(deployment_uid, solve_payload)
我最接近的(这几乎正是我需要的)是使用此示例中的大部分代码:
https://github.com/IBM/watson-machine-learning-samples/blob/master/cloud/notebooks/python_sdk/deployments/decision_optimization/Use%20Decision%20Optimization%20to%20plan%20your%20diet.ipynb
这是一个完整的工作示例。
from ibm_watson_machine_learning import APIClient
import os
import wget
import json
import pandas as pd
import time
COS_ENDPOINT = "https://s3.ams03.cloud-object-storage.appdomain.cloud"
model_path = 'do-model.tar.gz'
api_key = 'XXXXX'
access_key_id = "XXXX",
secret_access_key= "XXXX"
location = 'eu-gb'
space_id = 'XXXX'
softwareSpecificationName = "do_12.9"
modelType = "do-docplex_12.9"
wml_credentials = {
"apikey": api_key,
"url": 'https://' + location + '.ml.cloud.ibm.com'
}
client = APIClient(wml_credentials)
client.set.default_space(space_id)
if not os.path.isfile(model_path):
wget.download("https://github.com/IBM/watson-machine-learning-samples/raw/master/cloud/models/decision_optimization/do-model.tar.gz")
sofware_spec_uid = client.software_specifications.get_uid_by_name(softwareSpecificationName)
model_meta_props = {
client.repository.ModelMetaNames.NAME: "LOCALLY created DO model",
client.repository.ModelMetaNames.TYPE: modelType,
client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid
}
published_model = client.repository.store_model(model=model_path, meta_props=model_meta_props)
time.sleep(5) # So that the model is avalable on the API
published_model_uid = client.repository.get_model_uid(published_model)
client.repository.list_models()
meta_data = {
client.deployments.ConfigurationMetaNames.NAME: "deployment_DO",
client.deployments.ConfigurationMetaNames.BATCH: {},
client.deployments.ConfigurationMetaNames.HARDWARE_SPEC: {"name": "S", "num_nodes": 1}
}
deployment_details = client.deployments.create(published_model_uid, meta_props=meta_data)
time.sleep(5) # So that the deployment is avalable on the API
deployment_uid = client.deployments.get_uid(deployment_details)
client.deployments.list()
job_payload_ref = {
client.deployments.DecisionOptimizationMetaNames.INPUT_DATA_REFERENCES: [
{
'id':'diet_food.csv',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': access_key_id,
'secret_access_key': secret_access_key
},
'location': {
'bucket': "gvbucketname0api",
'path': "diet_food.csv"
}
},
{
'id':'diet_food_nutrients.csv',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': access_key_id,
'secret_access_key': secret_access_key
},
'location': {
'bucket': "gvbucketname0api",
'path': "diet_food_nutrients.csv"
}
},
{
'id':'diet_nutrients.csv',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': access_key_id,
'secret_access_key': secret_access_key
},
'location': {
'bucket': "gvbucketname0api",
'path': "diet_nutrients.csv"
}
}
],
client.deployments.DecisionOptimizationMetaNames.OUTPUT_DATA_REFERENCES:
[
{
'id':'.*',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': access_key_id,
'secret_access_key':secret_access_key
},
'location': {
'bucket': "gvbucketname0api",
'path': "${job_id}/${attachment_name}"
}
}
]
}
job = client.deployments.create_job(deployment_uid, meta_props=job_payload_ref)
以上示例使用一个模型和一些 csv 文件作为输入。
当我将 INPUT_DATA_REFERENCES 更改为使用 .mps 文件(和空模型)时,出现错误
"errors": [
{
"code": "invalid_model_archive_in_deployment",
"message": "Invalid or unrecognized archive type in deployment `XXX-XXX-XXX`.
Supported archive types are `zip` or `tar.gz`"
}
我不是专家,但据我了解,mps 文件包含输入文件和模型文件,因此我不必同时提供两者。
答案由Alex Fleischer on another forum提供。
可在此处找到完整示例:
https://medium.com/@AlainChabrier/solve-lp-problems-from-do-experiments-9afd4d53aaf5
上面的 link(类似于我问题中的代码)显示了一个带有“.lp”文件的示例,但它对于“.mps”文件也完全相同。
(注意模型类型是 do-cplex_12.10 ,不是 do-docplex_12.10)
我的问题是我使用的是空 model.tar.gz 文件。
一旦您在存档中有了 .lp/.mps 文件,一切都会按预期进行
我正在尝试迁移一个目前因重大更改而损坏的实用程序,该实用程序使用 IBM 的 API 解决了 .mps 问题。
原始代码使用一个空的 model.tar.gz 文件,创建部署并将 .mps 文件传递给新作业。
(python) 代码如下所示:
import tarfile
tar = tarfile.open("model.tar.gz", "w:gz")
tar.close()
test_metadata = {
client.repository.ModelMetaNames.NAME: "Test",
client.repository.ModelMetaNames.DESCRIPTION: "Model for Test",
client.repository.ModelMetaNames.TYPE: "do-cplex_12.9",
client.repository.ModelMetaNames.RUNTIME_UID: "do_12.9"
}
model_details = client.repository.store_model(model='model.tar.gz', meta_props=test_metadata)
model_uid = client.repository.get_model_uid(model_details)
n_nodes = 1
meta_props = {
client.deployments.ConfigurationMetaNames.NAME: "Test Deployment " + str(n_nodes),
client.deployments.ConfigurationMetaNames.DESCRIPTION: "Test Deployment",
client.deployments.ConfigurationMetaNames.BATCH: {},
client.deployments.ConfigurationMetaNames.COMPUTE: {'name': 'S', 'nodes': n_nodes}
}
deployment_details = client.deployments.create(model_uid, meta_props=meta_props)
deployment_uid = client.deployments.get_uid(deployment_details)
solve_payload = {
client.deployments.DecisionOptimizationMetaNames.SOLVE_PARAMETERS: {
'oaas.logAttachmentName':'log.txt',
'oaas.logTailEnabled':'true',
'oaas.resultsFormat': 'JSON'
},
client.deployments.DecisionOptimizationMetaNames.INPUT_DATA_REFERENCES: [
{
'id':'test.mps',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': cos_credentials['cos_hmac_keys']["access_key_id"],
'secret_access_key': cos_credentials['cos_hmac_keys']["secret_access_key"]
},
'location': {
'bucket': COS_BUCKET,
'path': 'test.mps'
}
}
],
client.deployments.DecisionOptimizationMetaNames.OUTPUT_DATA_REFERENCES: [
{
'id':'solution.json',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': cos_credentials['cos_hmac_keys']["access_key_id"],
'secret_access_key': cos_credentials['cos_hmac_keys']["secret_access_key"]
},
'location': {
'bucket': COS_BUCKET,
'path': 'solution.json'
}
},
{
'id':'log.txt',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': cos_credentials['cos_hmac_keys']["access_key_id"],
'secret_access_key': cos_credentials['cos_hmac_keys']["secret_access_key"]
},
'location': {
'bucket': COS_BUCKET,
'path': 'log.txt'
}
}
]
}
job_details = client.deployments.create_job(deployment_uid, solve_payload)
我最接近的(这几乎正是我需要的)是使用此示例中的大部分代码:
https://github.com/IBM/watson-machine-learning-samples/blob/master/cloud/notebooks/python_sdk/deployments/decision_optimization/Use%20Decision%20Optimization%20to%20plan%20your%20diet.ipynb
这是一个完整的工作示例。
from ibm_watson_machine_learning import APIClient
import os
import wget
import json
import pandas as pd
import time
COS_ENDPOINT = "https://s3.ams03.cloud-object-storage.appdomain.cloud"
model_path = 'do-model.tar.gz'
api_key = 'XXXXX'
access_key_id = "XXXX",
secret_access_key= "XXXX"
location = 'eu-gb'
space_id = 'XXXX'
softwareSpecificationName = "do_12.9"
modelType = "do-docplex_12.9"
wml_credentials = {
"apikey": api_key,
"url": 'https://' + location + '.ml.cloud.ibm.com'
}
client = APIClient(wml_credentials)
client.set.default_space(space_id)
if not os.path.isfile(model_path):
wget.download("https://github.com/IBM/watson-machine-learning-samples/raw/master/cloud/models/decision_optimization/do-model.tar.gz")
sofware_spec_uid = client.software_specifications.get_uid_by_name(softwareSpecificationName)
model_meta_props = {
client.repository.ModelMetaNames.NAME: "LOCALLY created DO model",
client.repository.ModelMetaNames.TYPE: modelType,
client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid
}
published_model = client.repository.store_model(model=model_path, meta_props=model_meta_props)
time.sleep(5) # So that the model is avalable on the API
published_model_uid = client.repository.get_model_uid(published_model)
client.repository.list_models()
meta_data = {
client.deployments.ConfigurationMetaNames.NAME: "deployment_DO",
client.deployments.ConfigurationMetaNames.BATCH: {},
client.deployments.ConfigurationMetaNames.HARDWARE_SPEC: {"name": "S", "num_nodes": 1}
}
deployment_details = client.deployments.create(published_model_uid, meta_props=meta_data)
time.sleep(5) # So that the deployment is avalable on the API
deployment_uid = client.deployments.get_uid(deployment_details)
client.deployments.list()
job_payload_ref = {
client.deployments.DecisionOptimizationMetaNames.INPUT_DATA_REFERENCES: [
{
'id':'diet_food.csv',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': access_key_id,
'secret_access_key': secret_access_key
},
'location': {
'bucket': "gvbucketname0api",
'path': "diet_food.csv"
}
},
{
'id':'diet_food_nutrients.csv',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': access_key_id,
'secret_access_key': secret_access_key
},
'location': {
'bucket': "gvbucketname0api",
'path': "diet_food_nutrients.csv"
}
},
{
'id':'diet_nutrients.csv',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': access_key_id,
'secret_access_key': secret_access_key
},
'location': {
'bucket': "gvbucketname0api",
'path': "diet_nutrients.csv"
}
}
],
client.deployments.DecisionOptimizationMetaNames.OUTPUT_DATA_REFERENCES:
[
{
'id':'.*',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': access_key_id,
'secret_access_key':secret_access_key
},
'location': {
'bucket': "gvbucketname0api",
'path': "${job_id}/${attachment_name}"
}
}
]
}
job = client.deployments.create_job(deployment_uid, meta_props=job_payload_ref)
以上示例使用一个模型和一些 csv 文件作为输入。 当我将 INPUT_DATA_REFERENCES 更改为使用 .mps 文件(和空模型)时,出现错误
"errors": [
{
"code": "invalid_model_archive_in_deployment",
"message": "Invalid or unrecognized archive type in deployment `XXX-XXX-XXX`.
Supported archive types are `zip` or `tar.gz`"
}
我不是专家,但据我了解,mps 文件包含输入文件和模型文件,因此我不必同时提供两者。
答案由Alex Fleischer on another forum提供。
可在此处找到完整示例:
https://medium.com/@AlainChabrier/solve-lp-problems-from-do-experiments-9afd4d53aaf5
上面的 link(类似于我问题中的代码)显示了一个带有“.lp”文件的示例,但它对于“.mps”文件也完全相同。
(注意模型类型是 do-cplex_12.10 ,不是 do-docplex_12.10)
我的问题是我使用的是空 model.tar.gz 文件。
一旦您在存档中有了 .lp/.mps 文件,一切都会按预期进行