错误的数据类型用于占位符 x-input TensorFlow
Wrong dtype for a feed to the placeholder x-input TensorFlow
我想用刚刚安装的 TF 在 MNIST 上实现一个简单的逻辑回归,并想用 TensorBoard 监控 minibatch-SGD 的进度。
我首先在没有 tensorboard 的情况下进行了编译,并在测试集上获得了 0.9166 的准确度。
然而,当我添加 tensorboard 以查看发生了什么时,我什至无法再编译它,我得到:
the placeholders must be fed with dtype float
但我所有的数组都是 dtype float 的 np 数组!
如果您能指出我的代码中的问题,那就太棒了:
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 13:06:44 2016
@author: me
"""
#from tensorflow.examples.tutorials.mnist import input_data
#mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import tensorflow as tf
import os
import random
import numpy as np
from array import array
import struct
import matplotlib.pyplot as plt
import time
#I first placed the decompressed -ubyte files from mnist on the path indicated
os.chdir('/home/me/Bureau/Step1/')
with open("train-labels.idx1-ubyte") as file:
magic, size = struct.unpack(">II",file.read(8))
train_labels_data=np.asarray(array("B",file.read()))
with open("t10k-labels.idx1-ubyte") as file:
magic, size = struct.unpack(">II",file.read(8))
test_labels_data=np.asarray(array("B",file.read()))
with open("train-images.idx3-ubyte") as file:
magic, size, rows, cols =struct.unpack(">IIII",file.read(16))
train_images_data=np.reshape(np.asarray(array("B",file.read())),(size,rows,cols))
with open("t10k-images.idx3-ubyte") as file:
magic, size, rows, cols =struct.unpack(">IIII",file.read(16))
test_images_data=np.reshape(np.asarray(array("B",file.read())),(size,rows,cols))
for i in range(10):
plt.imshow(train_images_data[i,:])
plt.show()
print(train_labels_data[i])
train_images=np.reshape(train_images_data,(60000,28*28)).astype(np.float32)*1/255
test_images=np.reshape(test_images_data,(10000,28*28)).astype(np.float32)*1/255
train_labels=np.zeros((60000,10),dtype=np.float32)
test_labels=np.zeros((10000,10),dtype=np.float32)
for i in range(60000):
a=train_labels_data[i]
train_labels[i,a]=1.
for j in range(10000):
b=test_labels_data[j]
test_labels[j,b]=1.
sess=tf.Session()
x=tf.placeholder(tf.float32, [None, 784],name="x-input")
W=tf.Variable(tf.zeros([784, 10]),name="weights")
b=tf.Variable(tf.zeros([10]),name="bias")
with tf.name_scope("Wx_b") as scope:
y=tf.nn.softmax(tf.matmul(x,W) + b)
w_hist=tf.histogram_summary("weights",W)
b_hist=tf.histogram_summary("bias",b)
y_hist=tf.histogram_summary("y",y)
y_ =tf.placeholder(tf.float32, [None, 10], name="y-input")
with tf.name_scope("xent") as scope:
cross_entropy= -tf.reduce_sum(y_*tf.log(y))
ce_summ=tf.scalar_summary("cross_entropy", cross_entropy)
with tf.name_scope("train") as scope:
train_step=tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
with tf.name_scope("test") as scope:
correct_prediction =tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
accuracy_summary=tf.scalar_summary("accuracy",accuracy)
merged=tf.merge_all_summaries()
writer=tf.train.SummaryWriter("/tmp/mnist_logs",sess.graph_def)
init=tf.initialize_all_variables()
sess.run(init)
for i in range(1000):
if i % 10 == 0:
feed={x:test_images, y_: test_labels}
result=sess.run([merged, accuracy],feed_dict=feed)
summary_str=result[0]
acc=result[1]
writer.add_summary(summary_str, i)
print("Accuracy at step %s: %s" % (i,acc))
else:
index=np.random.randint(60000-1,size=100)
batch_xs, batch_ys = train_images[index,:], train_labels[index]
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
print(sess.run(accuracy, feed_dict={x: train_images, y_: train_labels}))
它发生在要合并的 feed 中,但是因为我的 feed 方式与我的 feed 完全相同 train_step 我不知所措...
事实证明,当我重新打开一个新的 spyder 并启动它运行的程序时,你不能 运行 一遍又一遍地使用相同的脚本 !!!
头脑=爆炸
我想用刚刚安装的 TF 在 MNIST 上实现一个简单的逻辑回归,并想用 TensorBoard 监控 minibatch-SGD 的进度。
我首先在没有 tensorboard 的情况下进行了编译,并在测试集上获得了 0.9166 的准确度。
然而,当我添加 tensorboard 以查看发生了什么时,我什至无法再编译它,我得到:
the placeholders must be fed with dtype float
但我所有的数组都是 dtype float 的 np 数组!
如果您能指出我的代码中的问题,那就太棒了:
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 13:06:44 2016
@author: me
"""
#from tensorflow.examples.tutorials.mnist import input_data
#mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import tensorflow as tf
import os
import random
import numpy as np
from array import array
import struct
import matplotlib.pyplot as plt
import time
#I first placed the decompressed -ubyte files from mnist on the path indicated
os.chdir('/home/me/Bureau/Step1/')
with open("train-labels.idx1-ubyte") as file:
magic, size = struct.unpack(">II",file.read(8))
train_labels_data=np.asarray(array("B",file.read()))
with open("t10k-labels.idx1-ubyte") as file:
magic, size = struct.unpack(">II",file.read(8))
test_labels_data=np.asarray(array("B",file.read()))
with open("train-images.idx3-ubyte") as file:
magic, size, rows, cols =struct.unpack(">IIII",file.read(16))
train_images_data=np.reshape(np.asarray(array("B",file.read())),(size,rows,cols))
with open("t10k-images.idx3-ubyte") as file:
magic, size, rows, cols =struct.unpack(">IIII",file.read(16))
test_images_data=np.reshape(np.asarray(array("B",file.read())),(size,rows,cols))
for i in range(10):
plt.imshow(train_images_data[i,:])
plt.show()
print(train_labels_data[i])
train_images=np.reshape(train_images_data,(60000,28*28)).astype(np.float32)*1/255
test_images=np.reshape(test_images_data,(10000,28*28)).astype(np.float32)*1/255
train_labels=np.zeros((60000,10),dtype=np.float32)
test_labels=np.zeros((10000,10),dtype=np.float32)
for i in range(60000):
a=train_labels_data[i]
train_labels[i,a]=1.
for j in range(10000):
b=test_labels_data[j]
test_labels[j,b]=1.
sess=tf.Session()
x=tf.placeholder(tf.float32, [None, 784],name="x-input")
W=tf.Variable(tf.zeros([784, 10]),name="weights")
b=tf.Variable(tf.zeros([10]),name="bias")
with tf.name_scope("Wx_b") as scope:
y=tf.nn.softmax(tf.matmul(x,W) + b)
w_hist=tf.histogram_summary("weights",W)
b_hist=tf.histogram_summary("bias",b)
y_hist=tf.histogram_summary("y",y)
y_ =tf.placeholder(tf.float32, [None, 10], name="y-input")
with tf.name_scope("xent") as scope:
cross_entropy= -tf.reduce_sum(y_*tf.log(y))
ce_summ=tf.scalar_summary("cross_entropy", cross_entropy)
with tf.name_scope("train") as scope:
train_step=tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
with tf.name_scope("test") as scope:
correct_prediction =tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
accuracy_summary=tf.scalar_summary("accuracy",accuracy)
merged=tf.merge_all_summaries()
writer=tf.train.SummaryWriter("/tmp/mnist_logs",sess.graph_def)
init=tf.initialize_all_variables()
sess.run(init)
for i in range(1000):
if i % 10 == 0:
feed={x:test_images, y_: test_labels}
result=sess.run([merged, accuracy],feed_dict=feed)
summary_str=result[0]
acc=result[1]
writer.add_summary(summary_str, i)
print("Accuracy at step %s: %s" % (i,acc))
else:
index=np.random.randint(60000-1,size=100)
batch_xs, batch_ys = train_images[index,:], train_labels[index]
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
print(sess.run(accuracy, feed_dict={x: train_images, y_: train_labels}))
它发生在要合并的 feed 中,但是因为我的 feed 方式与我的 feed 完全相同 train_step 我不知所措...
事实证明,当我重新打开一个新的 spyder 并启动它运行的程序时,你不能 运行 一遍又一遍地使用相同的脚本 !!! 头脑=爆炸