tf.GradientTape 给出 None 渐变

tf.GradientTape giving None gradient

我正在尝试编写自定义训练循环。创建模型后,我向模型的某些层添加了一些额外的可训练参数。我已经使用这些额外的参数在每次前向传播时更新我的​​原始参数。但是当我计算梯度时,它为我最后添加的额外参数提供了 None。代码如下:

model = Sequential()
model.add(tf.keras.layers.Flatten(input_shape=(1,1)))
model.add(Dense(1, activation='relu'))
model.add(Dense(2, activation='softmax'))

model.layers[1].add_weight(name="x1", shape=(1,), initializer=tf.keras.initializers.Constant(value=1.0),trainable=True)

dataset = tf.data.Dataset.from_tensor_slices((feature, labels))

for i, (x_batch_train, y_batch_train) in enumerate(dataset):
    with tf.GradientTape() as tape:
        for par in model.layers[1].trainable_weights:
            if "x1" in par.name:
                bits = tf.convert_to_tensor(par)
        for par in model.layers[1].trainable_weights:
            if "kernel" in par.name:
                par = bits + 1.0    
        x = model(x_batch_train, training = True)
        loss = tf.keras.losses.SparseCategoricalCrossentropy(y_batch_train, x)
        val = tape.gradient(loss, model.trainable_weights)
        for v in val:
            print(v)

在这里,我添加了一个名为 x1 的额外参数,它正在更新 Dense 层的 kernel。但是我得到 None 参数的 x1 渐变。输出是:

tf.Tensor([[0.]], shape=(1, 1), dtype=float32)
tf.Tensor([-0.], shape=(1,), dtype=float32)
None
tf.Tensor([[0. 0.]], shape=(1, 2), dtype=float32)
tf.Tensor([-0.5  0.5], shape=(2,), dtype=float32)

为什么会这样?

问题是您对图层权重所做的更改与 tf.GradientTape 上下文中的模型输出没有直接联系,因此不会被跟踪。你可以用一个简单的自定义层来解决这个问题:

import tensorflow as tf

class DenseLayer(tf.keras.layers.Layer):
    def __init__(self, units=1):
        super(DenseLayer, self).__init__()
        self.units = units
    def build(self, input_shape):
        self.w = self.add_weight("kernel",
                              shape=[int(input_shape[-1]),
                                      self.units], trainable=True)
        self.b = self.add_weight(shape=(self.units,), initializer="zeros", trainable=True)
        self.bits = self.add_weight(name="x1", shape=[int(input_shape[-1]),
                                      self.units], initializer=tf.keras.initializers.ones(), trainable=True)

    def call(self, inputs):
        return tf.nn.relu(tf.matmul(inputs, (self.w + self.bits + 1.0)) + self.b)

dense_layer = DenseLayer(1)
model = tf.keras.Sequential()
model.add(tf.keras.layers.Flatten(input_shape=(1,1)))
model.add(dense_layer)
model.add(tf.keras.layers.Dense(2, activation='softmax'))
print(model.summary())
dataset = tf.data.Dataset.from_tensor_slices((tf.random.normal((50, 1, 1)), tf.random.uniform((50, ), maxval=2, dtype=tf.int32))).batch(2)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
for i, (x_batch_train, y_batch_train) in enumerate(dataset):
    with tf.GradientTape() as tape:
        y = model(x_batch_train, training = True)
        loss = loss_fn(y_batch_train, y)
        val = tape.gradient(loss, model.trainable_weights)
        for v in val:
            print(v)
    optimizer.apply_gradients(zip(val, model.trainable_variables))

你的想法很好我没有从上一个答案延伸但是这个问题被问过一次关于自定义层并且你可以通过训练为 lstm 做到这一点 model.fit( ... )

这与渐变胶带无关。

[样本 - 密集]:

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Class / Function
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class MyDenseLayer(tf.keras.layers.Layer):
    def __init__(self, num_outputs, num_add):
        super(MyDenseLayer, self).__init__()
        self.num_outputs = num_outputs
        self.num_add = num_add
        
    def build(self, input_shape):
        self.kernel = self.add_weight("kernel",
        shape=[int(input_shape[-1]),
        self.num_outputs])

    def call(self, inputs):
        temp = tf.add( inputs, self.num_add )
        temp = tf.matmul(temp, self.kernel)
        return temp

"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
    tf.keras.layers.InputLayer(input_shape=( 32, 32, 4 )),
    tf.keras.layers.Normalization(mean=3., variance=2.),
    tf.keras.layers.Normalization(mean=4., variance=6.),
    tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
    tf.keras.layers.MaxPooling2D((2, 2)),
    tf.keras.layers.Dense(128, activation='relu'),
    tf.keras.layers.Reshape((128, 225)),
    tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96, return_sequences=True, return_state=False)),
    tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96)),
])

layer = MyDenseLayer(10, 5)

model.add(layer)
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(192, activation='relu'))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.summary()

[输出]: