Tensorflow:训练神经网络时损失没有改善

Tensorflow: No improvement in loss while training neural net

我制作了这个神经网络,但每次我 运行 它都会给我不同的损失,开始时它在整个循环中保持不变。我想为 'xx' 中的每 3 个值预测一个 'yy' 中的值作为输入。另外我怎样才能显示我的输出?例如:我想显示一个数组,其预测值尽可能接近 'yy'.

中的值
import tensorflow as tf

xx=(
        [178.72,218.38,171.1],
        [211.57,215.63,173.13],
        [196.25,196.69,116.91],
        [121.88,132.07,85.02],
        [117.04,135.44,112.54],
        [118.13,124.04,97.98],
        [116.73,125.88,99.04],
        [118.75,125.01,110.16],
        [109.69,111.72,69.07],
        [76.57,96.88,67.38],
        [91.69,128.43,87.57],
        [117.57,146.43,117.57]
      )

yy=(
        [212.09],
        [195.58],
        [127.6],
        [116.5],
        [117.95],
        [117.55],
        [117.55],
        [110.39],
        [74.33],
        [91.08],
        [121.75],
        [127.3]
       )


x=tf.placeholder(tf.float32,[None,3])
y=tf.placeholder(tf.float32,[None,1])
n1=5
n2=5
classes=12

def neuralnetwork(data):

    hl1={'weights':tf.Variable(tf.random_normal([3,n1])),'biases':tf.Variable(tf.random_normal([n1]))}   

    hl2={'weights':tf.Variable(tf.random_normal([n1,n2])),'biases':tf.Variable(tf.random_normal([n2]))}

    op={'weights':tf.Variable(tf.random_normal([n2,classes])),'biases':tf.Variable(tf.random_normal([classes]))}

    l1=tf.add(tf.matmul(data,hl1['weights']),hl1['biases'])
    l1=tf.nn.relu(l1)
    l2=tf.add(tf.matmul(l1,hl2['weights']),hl2['biases'])
    l2=tf.nn.relu(l2)
    output=tf.matmul(l2,op['weights'])+op['biases']
    return output

def train(x):
        pred=neuralnetwork(x)
       # cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,labels=y))
        sq = tf.square(pred-y)
        loss=tf.reduce_mean(sq)

        optimizer = tf.train.GradientDescentOptimizer(0.01)
        train = optimizer.minimize(loss)

        #optimizer=tf.train.RMSPropOptimizer(0.01).minimize(cost)
        epochs=100



        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            for epoch in range(epochs):
                epoch_loss=0
                for i in range (int(1)):
                    batch_x=xx
                    batch_y=yy
                  # a=tf.shape(xx)
                   #print(sess.run(a))
                    c=sess.run(loss,feed_dict={x:batch_x, y: batch_y})
                    epoch_loss+=c
                    print("Epoch ",epoch," completed out of ",epochs, 'loss:', epoch_loss)


train(x)

我不确定你到底想完成什么,但在我看来这是一个回归问题,而不是分类问题。我认为以下代码是您想要的。我已经稍微清理了一下,但仍试图以您会认出的方式保留它。我个人会以不同的方式写这篇文章。

import tensorflow as tf

xx = (
    [178.72, 218.38, 171.1],
    [211.57, 215.63, 173.13],
    [196.25, 196.69, 116.91],
    [121.88, 132.07, 85.02],
    [117.04, 135.44, 112.54],
    [118.13, 124.04, 97.98],
    [116.73, 125.88, 99.04],
    [118.75, 125.01, 110.16],
    [109.69, 111.72, 69.07],
    [76.57, 96.88, 67.38],
    [91.69, 128.43, 87.57],
    [117.57, 146.43, 117.57]
)

yy = (212.09, 195.58, 127.6, 116.5, 117.95, 117.55, 117.55,
      110.39, 74.33, 91.08, 121.75, 127.3)

x = tf.placeholder(tf.float32, [None, 3])
y = tf.placeholder(tf.float32, [None])


def neuralnetwork(data, n1=5, n2=5):
    hl1 = {'weights': tf.Variable(tf.random_normal([3, n1])), 'biases':
           tf.Variable(tf.random_normal([n1]))}

    hl2 = {'weights': tf.Variable(tf.random_normal([n1, n2])),
           'biases': tf.Variable(tf.random_normal([n2]))}

    op = {'weights': tf.Variable(tf.random_normal([n2, 1])), 'biases':
          tf.Variable(tf.random_normal([1]))}

    l1 = tf.add(tf.matmul(data, hl1['weights']), hl1['biases'])
    l1 = tf.nn.relu(l1)
    l2 = tf.add(tf.matmul(l1, hl2['weights']), hl2['biases'])
    l2 = tf.nn.relu(l2)
    output = tf.matmul(l2, op['weights']) + op['biases']
    return output


N_EPOCHS = 100
if __name__ == '__main__':
    pred = neuralnetwork(x)
    loss = tf.reduce_mean(tf.squared_difference(pred, y))

    optimizer = tf.train.GradientDescentOptimizer(0.01)
    train = optimizer.minimize(loss)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for epoch in range(N_EPOCHS):
            epoch_loss = sess.run([train, loss], feed_dict={x: xx, y: yy})[1]
            print("Epoch", epoch, " completed out of", N_EPOCHS, "loss:",
                  epoch_loss)

您犯了两个主要错误:

  1. 你正试图有 12 个输出节点,你可能想要的是一个节点,它试图预测相应的 y 值。

  2. 您没有调用 train 操作,因此优化器实际上没有做任何事情。

Also how can I show my output? For example: I want to show an array having predictions as close as possible to the values in 'yy'

例如这些行:

predictions = sess.run(pred, feed_dict={x: xx, y: yy})
print("Predictions:", predictions)

这将简单地评估计算图的一部分,这是计算 pred 张量所必需的,使用整个数据集作为输入,将其输入占位符。

但是,正如您所见,无论输入如何,您的网络都会简单地学习预测标签的平均值。