将基本 Tensorflow 模型导出到 Google Cloud ML
Export a basic Tensorflow model to Google Cloud ML
我正在尝试导出我的本地 tensorflow 模型以在 Google Cloud ML 上使用它并对其进行 运行 预测。
我正在关注tensorflow serving example with mnist data。他们处理和使用 input/output 向量的方式有相当大的差异,这与您在网上的典型示例中发现的不同。
我不确定如何设置我的签名参数:
model_exporter.init(
sess.graph.as_graph_def(),
init_op = init_op,
default_graph_signature = exporter.classification_signature(
input_tensor = "**UNSURE**" ,
scores_tensor = "**UNSURE**"),
named_graph_signatures = {
'inputs' : "**UNSURE**",
'outputs': "**UNSURE**"
}
)
model_exporter.export(export_path, "**UNSURE**", sess)
这是我的其余代码:
import sys
import tensorflow as tf
from tensorflow.contrib.session_bundle import exporter
import numpy as np
from newpreprocess import create_feature_sets_and_labels
train_x,train_y,test_x,test_y = create_feature_sets_and_labels()
x = tf.placeholder('float', [None, 13])
y = tf.placeholder('float', [None, 1])
n_nodes_hl1 = 20
n_nodes_hl2 = 20
n_classes = 1
batch_size = 100
def neural_network_model(data):
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([13, n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes]))}
l1 = tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.tanh(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.tanh(l2)
output = tf.add(tf.matmul(l2, output_layer['weights']), output_layer['biases'])
return output
def train_neural_network(x):
output = neural_network_model(x)
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(output, y))
optimizer = tf.train.AdamOptimizer(0.003).minimize(cost)
hm_epochs = 700
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss = 0
i = 0
while i < len(train_x):
start = i
end = i + batch_size
batch_x = np.array(train_x[start:end])
batch_y = np.array(train_y[start:end])
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
y: batch_y})
epoch_loss += c
i+=batch_size
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss/(len(train_x)/batch_size))
prediction = tf.sigmoid(output)
predicted_class = tf.greater(prediction,0.5)
correct = tf.equal(predicted_class, tf.equal(y,1.0))
accuracy = tf.reduce_mean( tf.cast(correct, 'float') )
print('Accuracy:', accuracy.eval({x: test_x, y: test_y}))
export_path = "~/Documents/cloudcomputing/Project/RNN_timeseries/"
print ("Exporting trained model to %s", %export_path)
init_op = tf.group(tf.initialize_all_tables(), name="init_op")
saver = tf.train.Saver(sharded = True)
model_exporter = exporter.Exporter(saver)
model_exporter.init(
sess.graph.as_graph_def(),
init_op = init_op,
default_graph_signature = exporter.classification_signature(
input_tensor = ,
scores_tensor = ),
named_graph_signatures = {
'inputs' : ,
'outputs':
}
)
model_exporter.export(export_path, tf.constant(1), sess)
print("Done exporting!")
train_neural_network(x)
在 Google Cloud ML 上上传和使用它的具体步骤是什么?他们的演练似乎是针对在云端而非本地机器上训练的模型。
Tensorflow Serving 和 Google Cloud ML 是两个不同的东西,不要混淆。 Cloud ML 是一个完全托管的解决方案(ML 即服务),而 TF Serving 需要您设置和维护您的基础设施——它只是一个服务器。它们是不相关的,并且在 input/output 处理中有不同的要求。
您应该遵循的指南是 this one。您不使用图形签名,而是将输入和输出添加到集合中。您的代码中的更改将是这样的:
import sys
import tensorflow as tf
from tensorflow.contrib.session_bundle import exporter
import numpy as np
from newpreprocess import create_feature_sets_and_labels
import json
import os
train_x,train_y,test_x,test_y = create_feature_sets_and_labels()
n_nodes_hl1 = 20
n_nodes_hl2 = 20
n_classes = 1
batch_size = 100
x = tf.placeholder('float', [None, 13])
y = tf.placeholder('float', [None, 1])
keys_placeholder = tf.placeholder(tf.int64, shape=(None,))
keys = tf.identity(keys_placeholder)
def neural_network_model(data):
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([13, n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes]))}
l1 = tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.tanh(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.tanh(l2)
output = tf.add(tf.matmul(l2, output_layer['weights']), output_layer['biases'])
return output
output = neural_network_model(x)
prediction = tf.sigmoid(output)
predicted_class = tf.greater(prediction,0.5)
inputs = {'key': keys_placeholder.name, 'x': x.name}
tf.add_to_collection('inputs', json.dumps(inputs))
outputs = {'key': keys.name,
'prediction': predicted_class.name}
tf.add_to_collection('outputs', json.dumps(outputs))
def train_neural_network(x):
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(output, y))
optimizer = tf.train.AdamOptimizer(0.003).minimize(cost)
hm_epochs = 700
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss = 0
i = 0
while i < len(train_x):
start = i
end = i + batch_size
batch_x = np.array(train_x[start:end])
batch_y = np.array(train_y[start:end])
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
y: batch_y})
epoch_loss += c
i+=batch_size
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss/(len(train_x)/batch_size))
correct = tf.equal(predicted_class, tf.equal(y,1.0))
accuracy = tf.reduce_mean( tf.cast(correct, 'float') )
print('Accuracy:', accuracy.eval({x: test_x, y: test_y}))
export_path = "~/Documents/cloudcomputing/Project/RNN_timeseries/"
print ("Exporting trained model to %s", %export_path)
init_op = tf.group(tf.initialize_all_tables(), name="init_op")
saver = tf.train.Saver(sharded = True)
saver.save(sess, os.path.join(export_path, 'export'))
print("Done exporting!")
train_neural_network(x)
我在你的代码中移动了一些东西(并没有实际测试过),但这应该给你一个起点。
我正在尝试导出我的本地 tensorflow 模型以在 Google Cloud ML 上使用它并对其进行 运行 预测。
我正在关注tensorflow serving example with mnist data。他们处理和使用 input/output 向量的方式有相当大的差异,这与您在网上的典型示例中发现的不同。
我不确定如何设置我的签名参数:
model_exporter.init(
sess.graph.as_graph_def(),
init_op = init_op,
default_graph_signature = exporter.classification_signature(
input_tensor = "**UNSURE**" ,
scores_tensor = "**UNSURE**"),
named_graph_signatures = {
'inputs' : "**UNSURE**",
'outputs': "**UNSURE**"
}
)
model_exporter.export(export_path, "**UNSURE**", sess)
这是我的其余代码:
import sys
import tensorflow as tf
from tensorflow.contrib.session_bundle import exporter
import numpy as np
from newpreprocess import create_feature_sets_and_labels
train_x,train_y,test_x,test_y = create_feature_sets_and_labels()
x = tf.placeholder('float', [None, 13])
y = tf.placeholder('float', [None, 1])
n_nodes_hl1 = 20
n_nodes_hl2 = 20
n_classes = 1
batch_size = 100
def neural_network_model(data):
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([13, n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes]))}
l1 = tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.tanh(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.tanh(l2)
output = tf.add(tf.matmul(l2, output_layer['weights']), output_layer['biases'])
return output
def train_neural_network(x):
output = neural_network_model(x)
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(output, y))
optimizer = tf.train.AdamOptimizer(0.003).minimize(cost)
hm_epochs = 700
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss = 0
i = 0
while i < len(train_x):
start = i
end = i + batch_size
batch_x = np.array(train_x[start:end])
batch_y = np.array(train_y[start:end])
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
y: batch_y})
epoch_loss += c
i+=batch_size
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss/(len(train_x)/batch_size))
prediction = tf.sigmoid(output)
predicted_class = tf.greater(prediction,0.5)
correct = tf.equal(predicted_class, tf.equal(y,1.0))
accuracy = tf.reduce_mean( tf.cast(correct, 'float') )
print('Accuracy:', accuracy.eval({x: test_x, y: test_y}))
export_path = "~/Documents/cloudcomputing/Project/RNN_timeseries/"
print ("Exporting trained model to %s", %export_path)
init_op = tf.group(tf.initialize_all_tables(), name="init_op")
saver = tf.train.Saver(sharded = True)
model_exporter = exporter.Exporter(saver)
model_exporter.init(
sess.graph.as_graph_def(),
init_op = init_op,
default_graph_signature = exporter.classification_signature(
input_tensor = ,
scores_tensor = ),
named_graph_signatures = {
'inputs' : ,
'outputs':
}
)
model_exporter.export(export_path, tf.constant(1), sess)
print("Done exporting!")
train_neural_network(x)
在 Google Cloud ML 上上传和使用它的具体步骤是什么?他们的演练似乎是针对在云端而非本地机器上训练的模型。
Tensorflow Serving 和 Google Cloud ML 是两个不同的东西,不要混淆。 Cloud ML 是一个完全托管的解决方案(ML 即服务),而 TF Serving 需要您设置和维护您的基础设施——它只是一个服务器。它们是不相关的,并且在 input/output 处理中有不同的要求。
您应该遵循的指南是 this one。您不使用图形签名,而是将输入和输出添加到集合中。您的代码中的更改将是这样的:
import sys
import tensorflow as tf
from tensorflow.contrib.session_bundle import exporter
import numpy as np
from newpreprocess import create_feature_sets_and_labels
import json
import os
train_x,train_y,test_x,test_y = create_feature_sets_and_labels()
n_nodes_hl1 = 20
n_nodes_hl2 = 20
n_classes = 1
batch_size = 100
x = tf.placeholder('float', [None, 13])
y = tf.placeholder('float', [None, 1])
keys_placeholder = tf.placeholder(tf.int64, shape=(None,))
keys = tf.identity(keys_placeholder)
def neural_network_model(data):
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([13, n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes]))}
l1 = tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.tanh(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.tanh(l2)
output = tf.add(tf.matmul(l2, output_layer['weights']), output_layer['biases'])
return output
output = neural_network_model(x)
prediction = tf.sigmoid(output)
predicted_class = tf.greater(prediction,0.5)
inputs = {'key': keys_placeholder.name, 'x': x.name}
tf.add_to_collection('inputs', json.dumps(inputs))
outputs = {'key': keys.name,
'prediction': predicted_class.name}
tf.add_to_collection('outputs', json.dumps(outputs))
def train_neural_network(x):
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(output, y))
optimizer = tf.train.AdamOptimizer(0.003).minimize(cost)
hm_epochs = 700
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss = 0
i = 0
while i < len(train_x):
start = i
end = i + batch_size
batch_x = np.array(train_x[start:end])
batch_y = np.array(train_y[start:end])
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
y: batch_y})
epoch_loss += c
i+=batch_size
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss/(len(train_x)/batch_size))
correct = tf.equal(predicted_class, tf.equal(y,1.0))
accuracy = tf.reduce_mean( tf.cast(correct, 'float') )
print('Accuracy:', accuracy.eval({x: test_x, y: test_y}))
export_path = "~/Documents/cloudcomputing/Project/RNN_timeseries/"
print ("Exporting trained model to %s", %export_path)
init_op = tf.group(tf.initialize_all_tables(), name="init_op")
saver = tf.train.Saver(sharded = True)
saver.save(sess, os.path.join(export_path, 'export'))
print("Done exporting!")
train_neural_network(x)
我在你的代码中移动了一些东西(并没有实际测试过),但这应该给你一个起点。