NaN 中的 Tensorflow 多元线性回归结果

Tensorflow Multivariate linear regression results in NaN

我正在使用 sklearn 的波士顿住房数据集(一个 506x13 矩阵)进行多元线性回归。我计划使用所有数据训练它,然后 "plug in" 一个随机数据,例如 boston_dataset.data[39],然后查看损失。但是当我打印结果时,我得到的只是 NaN。这是我的代码。

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston

np.set_printoptions(suppress=True)

boston = load_boston()

m = boston.data.shape[0] - 1

bt_unfixed = np.transpose(boston.data)
bt = np.insert(bt_unfixed, 0, 1)

Y = tf.placeholder(tf.float64, name='Y___')
X = tf.placeholder(tf.float64, [1, 13], name='X_____')
#print X.shape
W = tf.Variable(tf.zeros([13, 1]), name='weights')
b = tf.Variable(0.5, name='bias')

hypothesis = tf.add(tf.matmul(X, tf.cast(W, tf.float64)), tf.cast(b, tf.float64))

loss = tf.reduce_sum(tf.square(hypothesis - Y)) / (2 * m)

optimizer = tf.train.GradientDescentOptimizer(0.01)

train_op = optimizer.minimize(loss)

with tf.Session() as sess:
    sess.run(tf.initialize_all_variables())
    for i in range(0, 500):
        for (x, y) in zip(boston.data, boston.target):
            sess.run(train_op, feed_dict={X:x.reshape(1, 13), Y:y})
        if (i + 1)%50 == 0:
            print "Ran " + str(i) + "times\nW=" +str(sess.run(W)) + "\nb=" +str(sess.run(b))

    print "Done!\n"
    print "Running test...\n"
    t = sess.run(cost, feed_dict={X:boston.data[504], Y:boston.target.data[504]})
    print "loss =" + str(t) + "Real value" + str(boston.target.data[504]) + "Pred " +str(sess.run(hypothesis, feed_dict={X:boston.data[504]}))

谢谢!也请随时添加任何建议

看来你没有对波士顿数据做任何数据预处理,这使得损失和假设值变为inf (NaN)。所以我对数据进行了规范化并且它起作用了。这是我的代码。

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston


boston = load_boston()

data = boston.data
label = boston.target

# normalized data
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)

M = boston.data.shape[0]


Y = tf.placeholder(tf.float32, name='Y')
X = tf.placeholder(tf.float32, [1, 13], name='X')

W = tf.Variable(tf.random_normal([13, 1]), name='weights')
b = tf.Variable(tf.random_normal([1]), name='bias')

hypothesis = tf.add(tf.matmul(X, W), b)

loss = tf.reduce_sum(tf.square(hypothesis - Y)) / (2. * (M - 1))

optimizer = tf.train.GradientDescentOptimizer(0.01)

train_op = optimizer.minimize(loss)

with tf.Session() as sess:
    sess.run(tf.initialize_all_variables())

    for i in range(0, 500):
        for l in xrange(M):
            _, loss_val, hypo = sess.run(
                [train_op, loss, hypothesis],
                feed_dict={X: data[l, :].reshape([1, 13]),
                           Y: label[l]})
        if (i + 1) % 50 == 0:
            print "Ran " + str(i) + "times\nW=" + \
                str(sess.run(W)) + "\nb=" + str(sess.run(b))

    print "Done!\n"
    print "Running test...\n"
    t = sess.run(
        loss, feed_dict={X: data[50].reshape([1, 13]),
                         Y: label[50]})
    print "loss =" + str(t)
    print "Real value Y: " + str(label[50])
    print "Pred Y: " + str(sess.run(hypothesis,
                                    feed_dict={X: data[50].reshape([1, 13])}))