如何修复 Tensorflow 神经网络回归中的错误
How do I fix error in Tensorflow Neural Network Regression
我不明白为什么我的代码不会 运行。我从 TensorFlow 教程开始,使用单层前馈神经网络对 mnist 数据集中的图像进行分类。然后修改代码以创建一个多层感知器,将 37 个输入映射到 1 个输出。正在从 Matlab 数据文件 (.mat) 加载输入和输出训练数据
这是我的代码..
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from scipy.io import loadmat
%matplotlib inline
import tensorflow as tf
from tensorflow.contrib import learn
import sklearn
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from warnings import filterwarnings
filterwarnings('ignore')
sns.set_style('white')
from sklearn import datasets
from sklearn.preprocessing import scale
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_moons
X = np.array(loadmat("Data/DataIn.mat")['TrainingDataIn'])
Y = np.array(loadmat("Data/DataOut.mat")['TrainingDataOut'])
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.5)
total_len = X_train.shape[0]
# Parameters
learning_rate = 0.001
training_epochs = 500
batch_size = 10
display_step = 1
dropout_rate = 0.9
# Network Parameters
n_hidden_1 = 19 # 1st layer number of features
n_hidden_2 = 26 # 2nd layer number of features
n_input = X_train.shape[1]
n_classes = 1
# tf Graph input
X = tf.placeholder("float", [None, 37])
Y = tf.placeholder("float", [None])
def multilayer_perceptron(X, weights, biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(X, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, weights['h1']), biases['b1'])
layer_2 = tf.nn.relu(layer_2)
# Output layer with linear activation
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1], 0, 0.1)),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes], 0, 0.1))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1], 0, 0.1)),
'b2': tf.Variable(tf.random_normal([n_hidden_2], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_classes], 0, 0.1))
}
# Construct model
pred = multilayer_perceptron(X, weights, biases)
tf.shape(pred)
tf.shape(Y)
print("Prediction matrix:", pred)
print("Output matrix:", Y)
# Define loss and optimizer
cost = tf.reduce_mean(tf.square(pred-Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Launch the graph
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(total_len/batch_size)
print(total_batch)
# Loop over all batches
for i in range(total_batch-1):
batch_x = X_train[i*batch_size:(i+1)*batch_size]
batch_y = Y_train[i*batch_size:(i+1)*batch_size]
# Run optimization op (backprop) and cost op (to get loss value)
_, c, p = sess.run([optimizer, cost, pred], feed_dict={X: batch_x,
Y: batch_y})
# Compute average loss
avg_cost += c / total_batch
# sample prediction
label_value = batch_y
estimate = p
err = label_value-estimate
print ("num batch:", total_batch)
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost))
print ("[*]----------------------------")
for i in xrange(5):
print ("label value:", label_value[i], \
"estimated value:", estimate[i])
print ("[*]============================")
print ("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(pred), tf.argmax(Y))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
当我 运行 代码时,我收到与维度问题有关的错误消息。我刚刚修改了我在网上看到的教程代码以解决我的问题。为什么我的代码会 运行.我是 python 的新手。请帮忙。
错误信息如下:
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\framework\common_shapes.py in _call_cpp_shape_fn_impl(op, input_tensors_needed, input_tensors_as_shapes_needed, debug_python_shape_fn, require_shape_fn)
670 graph_def_version, node_def_str, input_shapes, input_tensors,
--> 671 input_tensors_as_shapes, status)
672 except errors.InvalidArgumentError as err:
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\contextlib.py in __exit__(self, type, value, traceback)
65 try:
---> 66 next(self.gen)
67 except StopIteration:
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\framework\errors_impl.py in raise_exception_on_not_ok_status()
465 compat.as_text(pywrap_tensorflow.TF_Message(status)),
--> 466 pywrap_tensorflow.TF_GetCode(status))
467 finally:
InvalidArgumentError: Dimensions must be equal, but are 19 and 37 for 'MatMul_1' (op: 'MatMul') with input shapes: [?,19], [37,19].
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-1-751c8673d311> in <module>()
68
69 # Construct model
---> 70 pred = multilayer_perceptron(X, weights, biases)
71 tf.shape(pred)
72 tf.shape(Y)
<ipython-input-1-751c8673d311> in multilayer_perceptron(X, weights, biases)
46 layer_1 = tf.nn.relu(layer_1)
47
---> 48 layer_2 = tf.add(tf.matmul(layer_1, weights['h1']), biases['b1'])
49 layer_2 = tf.nn.relu(layer_2)
50
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\ops\math_ops.py in matmul(a, b, transpose_a, transpose_b, adjoint_a, adjoint_b, a_is_sparse, b_is_sparse, name)
1814 else:
1815 return gen_math_ops._mat_mul(
-> 1816 a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
1817
1818
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\ops\gen_math_ops.py in _mat_mul(a, b, transpose_a, transpose_b, name)
1215 """
1216 result = _op_def_lib.apply_op("MatMul", a=a, b=b, transpose_a=transpose_a,
-> 1217 transpose_b=transpose_b, name=name)
1218 return result
1219
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\framework\op_def_library.py in apply_op(self, op_type_name, name, **keywords)
765 op = g.create_op(op_type_name, inputs, output_types, name=scope,
766 input_types=input_types, attrs=attr_protos,
--> 767 op_def=op_def)
768 if output_structure:
769 outputs = op.outputs
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\framework\ops.py in create_op(self, op_type, inputs, dtypes, input_types, name, attrs, op_def, compute_shapes, compute_device)
2506 original_op=self._default_original_op, op_def=op_def)
2507 if compute_shapes:
-> 2508 set_shapes_for_outputs(ret)
2509 self._add_op(ret)
2510 self._record_op_seen_by_control_dependencies(ret)
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\framework\ops.py in set_shapes_for_outputs(op)
1871 shape_func = _call_cpp_shape_fn_and_require_op
1872
-> 1873 shapes = shape_func(op)
1874 if shapes is None:
1875 raise RuntimeError(
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\framework\ops.py in call_with_requiring(op)
1821
1822 def call_with_requiring(op):
-> 1823 return call_cpp_shape_fn(op, require_shape_fn=True)
1824
1825 _call_cpp_shape_fn_and_require_op = call_with_requiring
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\framework\common_shapes.py in call_cpp_shape_fn(op, input_tensors_needed, input_tensors_as_shapes_needed, debug_python_shape_fn, require_shape_fn)
608 res = _call_cpp_shape_fn_impl(op, input_tensors_needed,
609 input_tensors_as_shapes_needed,
--> 610 debug_python_shape_fn, require_shape_fn)
611 if not isinstance(res, dict):
612 # Handles the case where _call_cpp_shape_fn_impl calls unknown_shape(op).
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\framework\common_shapes.py in _call_cpp_shape_fn_impl(op, input_tensors_needed, input_tensors_as_shapes_needed, debug_python_shape_fn, require_shape_fn)
674 missing_shape_fn = True
675 else:
--> 676 raise ValueError(err.message)
677
678 if missing_shape_fn:
ValueError: Dimensions must be equal, but are 19 and 37 for 'MatMul_1' (op: 'MatMul') with input shapes: [?,19], [37,19].
我已经更正了我的打字错误。我现在收到此错误消息:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-2-946a0b048e42> in <module>()
93 # Run optimization op (backprop) and cost op (to get loss value)
94 _, c, p = sess.run([optimizer, cost, pred], feed_dict={X: batch_x,
---> 95 Y: batch_y})
96 # Compute average loss
97 avg_cost += c / total_batch
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
787 try:
788 result = self._run(None, fetches, feed_dict, options_ptr,
--> 789 run_metadata_ptr)
790 if run_metadata:
791 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
973 'Cannot feed value of shape %r for Tensor %r, '
974 'which has shape %r'
--> 975 % (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
976 if not self.graph.is_feedable(subfeed_t):
977 raise ValueError('Tensor %s may not be fed.' % subfeed_t)
ValueError: Cannot feed value of shape (10, 1) for Tensor 'Placeholder_3:0', which has shape '(?,)'
当您定义 layer2
时,您使用了错误的权重和偏差,它应该是 'h2'
和 'b2'
而不是 'h1'
和 'b1'
:
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
有效的矩阵乘法要求第一个矩阵的宽度与第二个矩阵的高度匹配。
错误是说,对于您的 hidden_layer_2 matmul,第一个输入的形状为 batch_size x 19,第二个输入的形状为 37 x 19。它们的尺寸不匹配。
你的意思可能是 "h2" 代表 layer_2,但你打错了 "h1"。
我不明白为什么我的代码不会 运行。我从 TensorFlow 教程开始,使用单层前馈神经网络对 mnist 数据集中的图像进行分类。然后修改代码以创建一个多层感知器,将 37 个输入映射到 1 个输出。正在从 Matlab 数据文件 (.mat) 加载输入和输出训练数据
这是我的代码..
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from scipy.io import loadmat
%matplotlib inline
import tensorflow as tf
from tensorflow.contrib import learn
import sklearn
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from warnings import filterwarnings
filterwarnings('ignore')
sns.set_style('white')
from sklearn import datasets
from sklearn.preprocessing import scale
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_moons
X = np.array(loadmat("Data/DataIn.mat")['TrainingDataIn'])
Y = np.array(loadmat("Data/DataOut.mat")['TrainingDataOut'])
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.5)
total_len = X_train.shape[0]
# Parameters
learning_rate = 0.001
training_epochs = 500
batch_size = 10
display_step = 1
dropout_rate = 0.9
# Network Parameters
n_hidden_1 = 19 # 1st layer number of features
n_hidden_2 = 26 # 2nd layer number of features
n_input = X_train.shape[1]
n_classes = 1
# tf Graph input
X = tf.placeholder("float", [None, 37])
Y = tf.placeholder("float", [None])
def multilayer_perceptron(X, weights, biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(X, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, weights['h1']), biases['b1'])
layer_2 = tf.nn.relu(layer_2)
# Output layer with linear activation
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1], 0, 0.1)),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes], 0, 0.1))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1], 0, 0.1)),
'b2': tf.Variable(tf.random_normal([n_hidden_2], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_classes], 0, 0.1))
}
# Construct model
pred = multilayer_perceptron(X, weights, biases)
tf.shape(pred)
tf.shape(Y)
print("Prediction matrix:", pred)
print("Output matrix:", Y)
# Define loss and optimizer
cost = tf.reduce_mean(tf.square(pred-Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Launch the graph
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(total_len/batch_size)
print(total_batch)
# Loop over all batches
for i in range(total_batch-1):
batch_x = X_train[i*batch_size:(i+1)*batch_size]
batch_y = Y_train[i*batch_size:(i+1)*batch_size]
# Run optimization op (backprop) and cost op (to get loss value)
_, c, p = sess.run([optimizer, cost, pred], feed_dict={X: batch_x,
Y: batch_y})
# Compute average loss
avg_cost += c / total_batch
# sample prediction
label_value = batch_y
estimate = p
err = label_value-estimate
print ("num batch:", total_batch)
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost))
print ("[*]----------------------------")
for i in xrange(5):
print ("label value:", label_value[i], \
"estimated value:", estimate[i])
print ("[*]============================")
print ("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(pred), tf.argmax(Y))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
当我 运行 代码时,我收到与维度问题有关的错误消息。我刚刚修改了我在网上看到的教程代码以解决我的问题。为什么我的代码会 运行.我是 python 的新手。请帮忙。
错误信息如下:
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\framework\common_shapes.py in _call_cpp_shape_fn_impl(op, input_tensors_needed, input_tensors_as_shapes_needed, debug_python_shape_fn, require_shape_fn)
670 graph_def_version, node_def_str, input_shapes, input_tensors,
--> 671 input_tensors_as_shapes, status)
672 except errors.InvalidArgumentError as err:
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\contextlib.py in __exit__(self, type, value, traceback)
65 try:
---> 66 next(self.gen)
67 except StopIteration:
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\framework\errors_impl.py in raise_exception_on_not_ok_status()
465 compat.as_text(pywrap_tensorflow.TF_Message(status)),
--> 466 pywrap_tensorflow.TF_GetCode(status))
467 finally:
InvalidArgumentError: Dimensions must be equal, but are 19 and 37 for 'MatMul_1' (op: 'MatMul') with input shapes: [?,19], [37,19].
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-1-751c8673d311> in <module>()
68
69 # Construct model
---> 70 pred = multilayer_perceptron(X, weights, biases)
71 tf.shape(pred)
72 tf.shape(Y)
<ipython-input-1-751c8673d311> in multilayer_perceptron(X, weights, biases)
46 layer_1 = tf.nn.relu(layer_1)
47
---> 48 layer_2 = tf.add(tf.matmul(layer_1, weights['h1']), biases['b1'])
49 layer_2 = tf.nn.relu(layer_2)
50
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\ops\math_ops.py in matmul(a, b, transpose_a, transpose_b, adjoint_a, adjoint_b, a_is_sparse, b_is_sparse, name)
1814 else:
1815 return gen_math_ops._mat_mul(
-> 1816 a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
1817
1818
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\ops\gen_math_ops.py in _mat_mul(a, b, transpose_a, transpose_b, name)
1215 """
1216 result = _op_def_lib.apply_op("MatMul", a=a, b=b, transpose_a=transpose_a,
-> 1217 transpose_b=transpose_b, name=name)
1218 return result
1219
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\framework\op_def_library.py in apply_op(self, op_type_name, name, **keywords)
765 op = g.create_op(op_type_name, inputs, output_types, name=scope,
766 input_types=input_types, attrs=attr_protos,
--> 767 op_def=op_def)
768 if output_structure:
769 outputs = op.outputs
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\framework\ops.py in create_op(self, op_type, inputs, dtypes, input_types, name, attrs, op_def, compute_shapes, compute_device)
2506 original_op=self._default_original_op, op_def=op_def)
2507 if compute_shapes:
-> 2508 set_shapes_for_outputs(ret)
2509 self._add_op(ret)
2510 self._record_op_seen_by_control_dependencies(ret)
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\framework\ops.py in set_shapes_for_outputs(op)
1871 shape_func = _call_cpp_shape_fn_and_require_op
1872
-> 1873 shapes = shape_func(op)
1874 if shapes is None:
1875 raise RuntimeError(
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\framework\ops.py in call_with_requiring(op)
1821
1822 def call_with_requiring(op):
-> 1823 return call_cpp_shape_fn(op, require_shape_fn=True)
1824
1825 _call_cpp_shape_fn_and_require_op = call_with_requiring
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\framework\common_shapes.py in call_cpp_shape_fn(op, input_tensors_needed, input_tensors_as_shapes_needed, debug_python_shape_fn, require_shape_fn)
608 res = _call_cpp_shape_fn_impl(op, input_tensors_needed,
609 input_tensors_as_shapes_needed,
--> 610 debug_python_shape_fn, require_shape_fn)
611 if not isinstance(res, dict):
612 # Handles the case where _call_cpp_shape_fn_impl calls unknown_shape(op).
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\framework\common_shapes.py in _call_cpp_shape_fn_impl(op, input_tensors_needed, input_tensors_as_shapes_needed, debug_python_shape_fn, require_shape_fn)
674 missing_shape_fn = True
675 else:
--> 676 raise ValueError(err.message)
677
678 if missing_shape_fn:
ValueError: Dimensions must be equal, but are 19 and 37 for 'MatMul_1' (op: 'MatMul') with input shapes: [?,19], [37,19].
我已经更正了我的打字错误。我现在收到此错误消息:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-2-946a0b048e42> in <module>()
93 # Run optimization op (backprop) and cost op (to get loss value)
94 _, c, p = sess.run([optimizer, cost, pred], feed_dict={X: batch_x,
---> 95 Y: batch_y})
96 # Compute average loss
97 avg_cost += c / total_batch
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
787 try:
788 result = self._run(None, fetches, feed_dict, options_ptr,
--> 789 run_metadata_ptr)
790 if run_metadata:
791 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
~\AppData\Local\Continuum\Anaconda3\envs\ann\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
973 'Cannot feed value of shape %r for Tensor %r, '
974 'which has shape %r'
--> 975 % (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
976 if not self.graph.is_feedable(subfeed_t):
977 raise ValueError('Tensor %s may not be fed.' % subfeed_t)
ValueError: Cannot feed value of shape (10, 1) for Tensor 'Placeholder_3:0', which has shape '(?,)'
当您定义 layer2
时,您使用了错误的权重和偏差,它应该是 'h2'
和 'b2'
而不是 'h1'
和 'b1'
:
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
有效的矩阵乘法要求第一个矩阵的宽度与第二个矩阵的高度匹配。
错误是说,对于您的 hidden_layer_2 matmul,第一个输入的形状为 batch_size x 19,第二个输入的形状为 37 x 19。它们的尺寸不匹配。
你的意思可能是 "h2" 代表 layer_2,但你打错了 "h1"。