具有批量大小和错误维度的 Tensorflow

Tensorflow with batch size and wrong dimesion

这是我的代码:

# Model parameters: W and b
# tf.reset_default_graph()

W = tf.get_variable("weight",shape = [784, 10], dtype=tf.float32)
b = tf.get_variable("b", shape=(784,10), dtype= tf.float32)

input_X = tf.placeholder('float32', shape = (None,10)) 
input_y =  tf.placeholder('float32', [784, 10]) 

ogits = W*input_X + b 

probas = tf.nn.softmax(logits) 

classes = tf.argmax(probas) 

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = input_y, logits = logits))


step = tf.train.AdamOptimizer(loss)

s.run(tf.global_variables_initializer())

BATCH_SIZE = 512
EPOCHS = 40

# for logging the progress right here in Jupyter (for those who don't have TensorBoard)
simpleTrainingCurves = matplotlib_utils.SimpleTrainingCurves("cross-entropy", "accuracy")

for epoch in range(EPOCHS):  # we finish an epoch when we've looked at all training samples
    
    batch_losses = []
    for batch_start in range(0, X_train_flat.shape[0], BATCH_SIZE):  # data is already shuffled
        _, batch_loss = s.run([step, loss], {input_X: X_train_flat[batch_start:batch_start+BATCH_SIZE], 
                                             input_y: y_train_oh[batch_start:batch_start+BATCH_SIZE]})
        # collect batch losses, this is almost free as we need a forward pass for backprop anyway
        batch_losses.append(batch_loss)

    train_loss = np.mean(batch_losses)
    val_loss = s.run(loss, {input_X: X_val_flat, input_y: y_val_oh})  # this part is usually small
    train_accuracy = accuracy_score(y_train, s.run(classes, {input_X: X_train_flat}))  # this is slow and usually skipped
    valid_accuracy = accuracy_score(y_val, s.run(classes, {input_X: X_val_flat}))  
    simpleTrainingCurves.add(train_loss, val_loss, train_accuracy, valid_accuracy)

错误是:

/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata) 973 'Cannot feed value of shape %r for Tensor %r, ' 974 'which has shape %r' --> 975 % (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape()))) 976 if not self.graph.is_feedable(subfeed_t): 977 raise ValueError('Tensor %s may not be fed.' % subfeed_t)

ValueError: Cannot feed value of shape (512, 784) for Tensor 'Placeholder_2:0', which has shape '(?, 10)'

我是tensorflow的新手,我正在学习coursena。请帮助我。

问题在于权重、偏差、input_x 和 input_y 占位符大小。

下面是修改后的代码,应该可以解决您的问题。

W = tf.get_variable('Weight', shape=(784,10), dtype=tf.float32)
b = tf.get_variable('bias', shape=(10,), dtype=tf.float32)

input_X = tf.placeholder(shape=(None, 784), dtype=tf.float32) #None is batch size and 784 is the input.
input_y = tf.placeholder(shape=(None, 10), dtype=tf.float32) # None is batch size, 10 is number of classess.
logits = tf.matmul(input_X, W) + b
probas = tf.nn.softmax(logits)
classes = tf.argmax(probas, 1)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=input_y))
step = tf.train.RMSPropOptimizer(0.001).minimize(loss)
s.run(tf.global_variables_initializer())

BATCH_SIZE = 512
EPOCHS = 50

simpleTrainingCurves = matplotlib_utils.SimpleTrainingCurves("cross-entropy", "accuracy")

for epoch in range(EPOCHS):  
    batch_losses = []
    for batch_start in range(0, X_train_flat.shape[0], BATCH_SIZE):  # data is already shuffled
        _, batch_loss = s.run([step, loss], {input_X: X_train_flat[batch_start:batch_start+BATCH_SIZE], 
                                             input_y: y_train_oh[batch_start:batch_start+BATCH_SIZE]})
        batch_losses.append(batch_loss)

    train_loss = np.mean(batch_losses)
    val_loss = s.run(loss, {input_X: X_val_flat, input_y: y_val_oh}) 
    train_accuracy = accuracy_score(y_train, s.run(classes, {input_X: X_train_flat})) 
    valid_accuracy = accuracy_score(y_val, s.run(classes, {input_X: X_val_flat}))  
    simpleTrainingCurves.add(train_loss, val_loss, train_accuracy, valid_accuracy)