Adam optimizer: ValueError: No gradients provided for any variable
Adam optimizer: ValueError: No gradients provided for any variable
我正在尝试使用预训练模型 (vgg16) 优化我的过滤器激活并降低过滤器分数计算的均值。我不断收到“没有为任何变量提供梯度”的错误消息。
如果有任何帮助,我将不胜感激。谢谢!
这里可以看到代码:
import numpy as np
import tensorflow as tf
from tensorflow import keras
np.random.seed(1)
image_f = np.random.normal(size=[1, 32, 32, 3], scale=0.01).astype(np.float32)
img = tf.nn.sigmoid(image_f)
tf.compat.v1.keras.backend.set_image_data_format('channels_last')
model = keras.applications.VGG16(weights="imagenet", include_top=False)
optimizer = tf.keras.optimizers.Adam(epsilon=1e-08, learning_rate=0.05)
layer_weight =keras.Model(inputs=model.inputs, outputs=model.get_layer(name="block3_conv1").output)
for i in range(5):
img = tf.Variable(img)
filter_activation = layer_weight(img)[:,:,:,5]
def compute_activation():
score = -1 * tf.reduce_mean(filter_activation)
print(score)
return score
optimizer.minimize(compute_activation, [img])
print(img)
我认为问题是你的变量 img
没有包含在你的损失函数的计算中。我根据文档修改了你的代码:https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Optimizer.
import numpy as np
import tensorflow as tf
from tensorflow import keras
np.random.seed(1)
image_f = np.random.normal(size=[1, 32, 32, 3], scale=0.01).astype(np.float32)
img = tf.nn.sigmoid(image_f)
tf.compat.v1.keras.backend.set_image_data_format('channels_last')
model = keras.applications.VGG16(weights="imagenet", include_top=False)
optimizer = tf.keras.optimizers.Adam(epsilon=1e-08, learning_rate=0.05)
layer_weight =keras.Model(inputs=model.inputs, outputs=model.get_layer(name="block3_conv1").output)
# Variable only need to define once
img = tf.Variable(img)
def compute_activation():
# Include variable img here
filter_activation = layer_weight(img)[:,:,:,5]
score = -1 * tf.reduce_mean(filter_activation)
print(score)
return score
for i in range(5):
optimizer.minimize(compute_activation, [img])
print(img)
我正在尝试使用预训练模型 (vgg16) 优化我的过滤器激活并降低过滤器分数计算的均值。我不断收到“没有为任何变量提供梯度”的错误消息。
如果有任何帮助,我将不胜感激。谢谢!
这里可以看到代码:
import numpy as np
import tensorflow as tf
from tensorflow import keras
np.random.seed(1)
image_f = np.random.normal(size=[1, 32, 32, 3], scale=0.01).astype(np.float32)
img = tf.nn.sigmoid(image_f)
tf.compat.v1.keras.backend.set_image_data_format('channels_last')
model = keras.applications.VGG16(weights="imagenet", include_top=False)
optimizer = tf.keras.optimizers.Adam(epsilon=1e-08, learning_rate=0.05)
layer_weight =keras.Model(inputs=model.inputs, outputs=model.get_layer(name="block3_conv1").output)
for i in range(5):
img = tf.Variable(img)
filter_activation = layer_weight(img)[:,:,:,5]
def compute_activation():
score = -1 * tf.reduce_mean(filter_activation)
print(score)
return score
optimizer.minimize(compute_activation, [img])
print(img)
我认为问题是你的变量 img
没有包含在你的损失函数的计算中。我根据文档修改了你的代码:https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Optimizer.
import numpy as np
import tensorflow as tf
from tensorflow import keras
np.random.seed(1)
image_f = np.random.normal(size=[1, 32, 32, 3], scale=0.01).astype(np.float32)
img = tf.nn.sigmoid(image_f)
tf.compat.v1.keras.backend.set_image_data_format('channels_last')
model = keras.applications.VGG16(weights="imagenet", include_top=False)
optimizer = tf.keras.optimizers.Adam(epsilon=1e-08, learning_rate=0.05)
layer_weight =keras.Model(inputs=model.inputs, outputs=model.get_layer(name="block3_conv1").output)
# Variable only need to define once
img = tf.Variable(img)
def compute_activation():
# Include variable img here
filter_activation = layer_weight(img)[:,:,:,5]
score = -1 * tf.reduce_mean(filter_activation)
print(score)
return score
for i in range(5):
optimizer.minimize(compute_activation, [img])
print(img)