如何使用 Mnist 预测特定图像
How to predict a specific image using Mnist
我是 tensorflow 的新手,我想我得到了正确的答案,但我遗漏了一些我在网上找不到的最小的东西。我希望有人给我发一份参考资料或引导我找到我所缺少的东西。
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
batch_size = 128
test_size = 256
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.01))
def model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden):
l1a = tf.nn.relu(tf.nn.conv2d(X, w, # l1a shape=(?, 28, 28, 32)
strides=[1, 1, 1, 1], padding='SAME'))
l1 = tf.nn.max_pool(l1a, ksize=[1, 2, 2, 1], # l1 shape=(?, 14, 14, 32)
strides=[1, 2, 2, 1], padding='SAME')
l1 = tf.nn.dropout(l1, p_keep_conv)
l2a = tf.nn.relu(tf.nn.conv2d(l1, w2, # l2a shape=(?, 14, 14, 64)
strides=[1, 1, 1, 1], padding='SAME'))
l2 = tf.nn.max_pool(l2a, ksize=[1, 2, 2, 1], # l2 shape=(?, 7, 7, 64)
strides=[1, 2, 2, 1], padding='SAME')
l2 = tf.nn.dropout(l2, p_keep_conv)
l3a = tf.nn.relu(tf.nn.conv2d(l2, w3, # l3a shape=(?, 7, 7, 128)
strides=[1, 1, 1, 1], padding='SAME'))
l3 = tf.nn.max_pool(l3a, ksize=[1, 2, 2, 1], # l3 shape=(?, 4, 4, 128)
strides=[1, 2, 2, 1], padding='SAME')
l3 = tf.reshape(l3, [-1, w4.get_shape().as_list()[0]]) # reshape to (?, 2048)
l3 = tf.nn.dropout(l3, p_keep_conv)
l4 = tf.nn.relu(tf.matmul(l3, w4))
l4 = tf.nn.dropout(l4, p_keep_hidden)
pyx = tf.matmul(l4, w_o)
return pyx
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
trX = trX.reshape(-1, 28, 28, 1) # 28x28x1 input img
teX = teX.reshape(-1, 28, 28, 1) # 28x28x1 input img
X = tf.placeholder("float", [None, 28, 28, 1])
Y = tf.placeholder("float", [None, 10])
w = init_weights([3, 3, 1, 32]) # 3x3x1 conv, 32 outputs
w2 = init_weights([3, 3, 32, 64]) # 3x3x32 conv, 64 outputs
w3 = init_weights([3, 3, 64, 128]) # 3x3x32 conv, 128 outputs
w4 = init_weights([128 * 4 * 4, 625]) # FC 128 * 4 * 4 inputs, 625 outputs
w_o = init_weights([625, 10]) # FC 625 inputs, 10 outputs (labels)
p_keep_conv = tf.placeholder("float")
p_keep_hidden = tf.placeholder("float")
py_x = model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y))
train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
predict_op = tf.argmax(py_x, 1)
# Launch the graph in a session
saver = tf.train.Saver()
with tf.Session() as sess:
# you need to initialize all variables
tf.global_variables_initializer().run()
for i in range(100):
training_batch = zip(range(0, len(trX), batch_size),
range(batch_size, len(trX)+1, batch_size))
for start, end in training_batch:
sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end],
p_keep_conv: 0.8, p_keep_hidden: 0.5})
test_indices = np.arange(len(teX)) # Get A Test Batch
np.random.shuffle(test_indices)
test_indices = test_indices[0:test_size]
print(i, np.mean(np.argmax(teY[test_indices], axis=1) ==
sess.run(predict_op, feed_dict={X: teX[test_indices],
Y: teY[test_indices],
p_keep_conv: 1.0,
p_keep_hidden: 1.0})))
save_path = saver.save(sess, "tmp/model.ckpt")
print("Model saved in file: %s" % save_path)
在所有这一切之后,现在我正尝试从这个数组中预测单个图像作为示例(我知道这不是一个合适的测试),给我 class 使用:
with tf.Session() as sess:
# Restore variables from disk.
saver.restore(sess, "tmp/model.ckpt")
print "...Model Loaded..."
prediction=tf.argmax(predict_op,1)
print prediction.eval(feed_dict={X: teX[2].reshape(1,28,28,1)}, session=sess)
但是我收到这个错误:
InvalidArgumentError: You must feed a value for placeholder tensor
'Placeholder_3' with dtype
前面的问题已经通过在字典中添加 p_keep_conv: 1.0, p_keep_hidden: 1.0 解决了。
此后出现另一个问题:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-91-3e9ead14a8b3> in <module>()
4 print "...Model Loaded..."
5 prediction=tf.argmax(predict_op,1)
----> 6 classification = sess.run(tf.argmax(predict_op, 1), feed_dict={X: teX[3].reshape(1,28,28,1),p_keep_conv: 1.0,p_keep_hidden: 1.0})
7
....
InvalidArgumentError: Expected dimension in the range [-1, 1), but got 1
[[Node: ArgMax_21 = ArgMax[T=DT_INT64, Tidx=DT_INT32, output_type=DT_INT64, _device="/job:localhost/replica:0/task:0/device:CPU:0"](ArgMax/_37, ArgMax_21/dimension)]]
我正在总结我们在这个答案的评论中所说的内容。
占位符错误:
您的 prediction.eval()
调用有一个 feed_dict
不包含 p_keep_conv
和 p_keep_hidden
的值。请注意,由于您在定义占位符时没有提供 name=...
参数,因此它们将获得默认名称 Placeholder_N
,这就是错误消息显示的内容。最好始终为变量、常量和占位符指定一个有意义的名称,以便于调试。
Argmax 预期维度:
axis: A Tensor. Must be one of the following types: int32, int64.
int32, 0 <= axis < rank(input). Describes which axis of the input
Tensor to reduce across.
看来,在张量的最后一个轴上 运行 argmax
的唯一方法是给它 axis=-1
,因为 "strictly less than"签入函数的定义(我不明白他们为什么做出这样的设计选择)。
我是 tensorflow 的新手,我想我得到了正确的答案,但我遗漏了一些我在网上找不到的最小的东西。我希望有人给我发一份参考资料或引导我找到我所缺少的东西。
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
batch_size = 128
test_size = 256
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.01))
def model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden):
l1a = tf.nn.relu(tf.nn.conv2d(X, w, # l1a shape=(?, 28, 28, 32)
strides=[1, 1, 1, 1], padding='SAME'))
l1 = tf.nn.max_pool(l1a, ksize=[1, 2, 2, 1], # l1 shape=(?, 14, 14, 32)
strides=[1, 2, 2, 1], padding='SAME')
l1 = tf.nn.dropout(l1, p_keep_conv)
l2a = tf.nn.relu(tf.nn.conv2d(l1, w2, # l2a shape=(?, 14, 14, 64)
strides=[1, 1, 1, 1], padding='SAME'))
l2 = tf.nn.max_pool(l2a, ksize=[1, 2, 2, 1], # l2 shape=(?, 7, 7, 64)
strides=[1, 2, 2, 1], padding='SAME')
l2 = tf.nn.dropout(l2, p_keep_conv)
l3a = tf.nn.relu(tf.nn.conv2d(l2, w3, # l3a shape=(?, 7, 7, 128)
strides=[1, 1, 1, 1], padding='SAME'))
l3 = tf.nn.max_pool(l3a, ksize=[1, 2, 2, 1], # l3 shape=(?, 4, 4, 128)
strides=[1, 2, 2, 1], padding='SAME')
l3 = tf.reshape(l3, [-1, w4.get_shape().as_list()[0]]) # reshape to (?, 2048)
l3 = tf.nn.dropout(l3, p_keep_conv)
l4 = tf.nn.relu(tf.matmul(l3, w4))
l4 = tf.nn.dropout(l4, p_keep_hidden)
pyx = tf.matmul(l4, w_o)
return pyx
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
trX = trX.reshape(-1, 28, 28, 1) # 28x28x1 input img
teX = teX.reshape(-1, 28, 28, 1) # 28x28x1 input img
X = tf.placeholder("float", [None, 28, 28, 1])
Y = tf.placeholder("float", [None, 10])
w = init_weights([3, 3, 1, 32]) # 3x3x1 conv, 32 outputs
w2 = init_weights([3, 3, 32, 64]) # 3x3x32 conv, 64 outputs
w3 = init_weights([3, 3, 64, 128]) # 3x3x32 conv, 128 outputs
w4 = init_weights([128 * 4 * 4, 625]) # FC 128 * 4 * 4 inputs, 625 outputs
w_o = init_weights([625, 10]) # FC 625 inputs, 10 outputs (labels)
p_keep_conv = tf.placeholder("float")
p_keep_hidden = tf.placeholder("float")
py_x = model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y))
train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
predict_op = tf.argmax(py_x, 1)
# Launch the graph in a session
saver = tf.train.Saver()
with tf.Session() as sess:
# you need to initialize all variables
tf.global_variables_initializer().run()
for i in range(100):
training_batch = zip(range(0, len(trX), batch_size),
range(batch_size, len(trX)+1, batch_size))
for start, end in training_batch:
sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end],
p_keep_conv: 0.8, p_keep_hidden: 0.5})
test_indices = np.arange(len(teX)) # Get A Test Batch
np.random.shuffle(test_indices)
test_indices = test_indices[0:test_size]
print(i, np.mean(np.argmax(teY[test_indices], axis=1) ==
sess.run(predict_op, feed_dict={X: teX[test_indices],
Y: teY[test_indices],
p_keep_conv: 1.0,
p_keep_hidden: 1.0})))
save_path = saver.save(sess, "tmp/model.ckpt")
print("Model saved in file: %s" % save_path)
在所有这一切之后,现在我正尝试从这个数组中预测单个图像作为示例(我知道这不是一个合适的测试),给我 class 使用:
with tf.Session() as sess:
# Restore variables from disk.
saver.restore(sess, "tmp/model.ckpt")
print "...Model Loaded..."
prediction=tf.argmax(predict_op,1)
print prediction.eval(feed_dict={X: teX[2].reshape(1,28,28,1)}, session=sess)
但是我收到这个错误:
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_3' with dtype
前面的问题已经通过在字典中添加 p_keep_conv: 1.0, p_keep_hidden: 1.0 解决了。
此后出现另一个问题:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-91-3e9ead14a8b3> in <module>()
4 print "...Model Loaded..."
5 prediction=tf.argmax(predict_op,1)
----> 6 classification = sess.run(tf.argmax(predict_op, 1), feed_dict={X: teX[3].reshape(1,28,28,1),p_keep_conv: 1.0,p_keep_hidden: 1.0})
7
....
InvalidArgumentError: Expected dimension in the range [-1, 1), but got 1
[[Node: ArgMax_21 = ArgMax[T=DT_INT64, Tidx=DT_INT32, output_type=DT_INT64, _device="/job:localhost/replica:0/task:0/device:CPU:0"](ArgMax/_37, ArgMax_21/dimension)]]
我正在总结我们在这个答案的评论中所说的内容。
占位符错误:
您的 prediction.eval()
调用有一个 feed_dict
不包含 p_keep_conv
和 p_keep_hidden
的值。请注意,由于您在定义占位符时没有提供 name=...
参数,因此它们将获得默认名称 Placeholder_N
,这就是错误消息显示的内容。最好始终为变量、常量和占位符指定一个有意义的名称,以便于调试。
Argmax 预期维度:
axis: A Tensor. Must be one of the following types: int32, int64. int32, 0 <= axis < rank(input). Describes which axis of the input Tensor to reduce across.
看来,在张量的最后一个轴上 运行 argmax
的唯一方法是给它 axis=-1
,因为 "strictly less than"签入函数的定义(我不明白他们为什么做出这样的设计选择)。