使用 tensorflow 2 api 的简单模型给出了巨大的损失和 acc 值

simple model using tensorflow 2 api gives huge loss and acc values

import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler


df = pd.DataFrame({'A': np.array([100, 105.4, 108.3, 111.1, 113, 114.7, 120, 125, 129]),
                   'B': np.array([11, 11.8, 12.3, 12.8, 13.1,13.6, 13.9, 14.4, 15]),
                   'C': np.array([55, 56.3, 57, 58, 59.5, 60.4, 61, 61.5, 62]),
                   'Target': np.array([4000, 4200.34, 4700, 5300, 5800, 6400, 6800, 7200, 7500])})

df.head()

X_train = df.iloc[:, :3]
y_train = df.iloc[:, 3]

scaler = StandardScaler()
scaler.fit(X_train)
X_train_std = scaler.transform(X_train)

features = {'A': X_train_std[:, 0],
            'B': X_train_std[:, 1],
            'C': X_train_std[:, 2]}

labels = y_train


batch_size = 1
def train_input_fn(features, labels, batch_size):
    train_dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
    train_dataset = train_dataset.shuffle(1).batch(batch_size)
    return train_dataset


def pack_features_vector(features, labels):
    '''Pack the features into a single array'''
    features = tf.stack(list(features.values()), axis=1)
    return features, labels


train_dataset = train_input_fn(features, labels, batch_size).map(pack_features_vector)


class Model(tf.keras.Model):

    def __init__(self):
        super().__init__()

        self.l1 = tf.keras.layers.Dense(8, activation='relu')
        self.l2 = tf.keras.layers.Dense(4, activation='relu')
        self.out = tf.keras.layers.Dense(1)

    def call(self, x):
        x = self.l1(x)
        x = self.l2(x)
        return self.out(x)


learning_rate = 0.1
optimizer = tf.keras.optimizers.RMSprop(learning_rate)
loss_object = tf.keras.losses.MeanSquaredError()


train_loss_res = []
train_acc_res = []
epochs = 100
model = Model()

for epoch in range(epochs):
    epoch_loss_avg = tf.keras.metrics.Mean()
    epoch_acc = tf.keras.metrics.MeanAbsoluteError()

    for x,y in train_dataset:
        with tf.GradientTape() as tape:
            y_ = model(x)
            loss = loss_object(y, y_)
        gradients = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))

        epoch_loss_avg(loss)
        epoch_acc(y, y_)

    train_loss_res.append(epoch_loss_avg.result())
    train_acc_res.append(epoch_acc.result())

    if epoch % 10 == 0:
        print("Epoch {:03d} Loss: {:.3f}, Acc: {:.3%}".format(epoch, epoch_loss_avg.result(), epoch_acc.result()))

我使用的是简单数据和简单模型。

我收到的结果是:

Epoch 000 Loss: 32666856.000, Acc: 561536.963%
Epoch 010 Loss: 342012.625, Acc: 37158.075%
Epoch 020 Loss: 328074.844, Acc: 35578.772%
Epoch 030 Loss: 189751.594, Acc: 27069.794%
Epoch 040 Loss: 273142.312, Acc: 29358.673%
Epoch 050 Loss: 424036.625, Acc: 44175.562%
Epoch 060 Loss: 43667.957, Acc: 14025.812%
Epoch 070 Loss: 96341.156, Acc: 19105.350%
Epoch 080 Loss: 39308.691, Acc: 16228.386%
Epoch 090 Loss: 46950.699, Acc: 17407.053%

我找不到问题所在。

我尝试了其他设置(更多单位、不同指标、损失)但结果是一样的。

loss这么高是因为它是预测y和真实y之差的平方。我注意到您使用绝对误差作为损失 metric,但您将平方损失传递给它。

  1. 您的平均 y 值约为 5766
  2. 最终 MSE 的平方根(即实际差值)约为 216
  3. 216 约占 3%
  4. 如果你训练 1000 个 epoch 将下降到不到 1%

准确性有两个主要问题。使用的指标不会输出百分比,它输出的是损失的绝对值。第二个问题是字符串格式标记 {:.3%} 需要一个介于 0 和 1 之间的浮点数,然后乘以 100 以显示为百分比。

请参阅以下代码,其中我将平均 MAE 除以 y 值的平均值。(并从 100 中减去它以表示 "accuracy"。此数学仅在损失为不大于平均值 y)。

import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler


df = pd.DataFrame({'A': np.array([100, 105.4, 108.3, 111.1, 113, 114.7, 120, 125, 129]),
                   'B': np.array([11, 11.8, 12.3, 12.8, 13.1,13.6, 13.9, 14.4, 15]),
                   'C': np.array([55, 56.3, 57, 58, 59.5, 60.4, 61, 61.5, 62]),
                   'Target': np.array([4000, 4200.34, 4700, 5300, 5800, 6400, 6800, 7200, 7500])})

df.head()

X_train = df.iloc[:, :3]
y_train = df.iloc[:, 3]

scaler = StandardScaler()
scaler.fit(X_train)
X_train_std = scaler.transform(X_train)

features = {'A': X_train_std[:, 0],
            'B': X_train_std[:, 1],
            'C': X_train_std[:, 2]}

labels = y_train


batch_size = 1
def train_input_fn(features, labels, batch_size):
    train_dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
    train_dataset = train_dataset.shuffle(1).batch(batch_size)
    return train_dataset


def pack_features_vector(features, labels):
    '''Pack the features into a single array'''
    features = tf.stack(list(features.values()), axis=1)
    return features, labels


train_dataset = train_input_fn(features, labels, batch_size).map(pack_features_vector)


class Model(tf.keras.Model):

    def __init__(self):
        super().__init__()

        self.l1 = tf.keras.layers.Dense(8, activation='relu')
        self.l2 = tf.keras.layers.Dense(4, activation='relu')
        self.out = tf.keras.layers.Dense(1)

    def call(self, x):
        x = self.l1(x)
        x = self.l2(x)
        return self.out(x)


learning_rate = 0.1
optimizer = tf.keras.optimizers.RMSprop(learning_rate)
loss_object = tf.keras.losses.MeanSquaredError()


train_loss_res = []
train_acc_res = []
epochs = 1000
model = Model()

for epoch in range(epochs):
    epoch_loss_avg = tf.keras.metrics.Mean()
    epoch_acc = tf.keras.metrics.MeanAbsoluteError()
    y_avg = tf.metrics.Mean()
    for x,y in train_dataset:
        with tf.GradientTape() as tape:
            y_ = model(x)
            loss = loss_object(y, y_)
        gradients = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))

        epoch_loss_avg(loss)
        epoch_acc(y, y_)
        y_avg(y)

    train_loss_res.append(epoch_loss_avg.result())
    train_acc_res.append(epoch_acc.result())

    if epoch % 10 == 0:
        print("Epoch {:03d} Squared Loss: {:.3f}, Acc: {:.3f}%".format(epoch, epoch_loss_avg.result(),100-(epoch_acc.result()/y_avg.result())))