如何计算张量流中不可训练的权重数?
How to count the number of non trainable weights in tensorflow?
在 keras 中可以通过
计算不可训练权重的数量
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, Dense
#Dummy Keras model
model = tf.keras.Sequential([
tf.keras.layers.Dense(3, input_dim=3072,activation="relu", name="inputlayer"),
tf.keras.layers.Dense(512, activation="tanh", name="2ndlayer"),
tf.keras.layers.Dense(512, activation="tanh", name="3rdlayer"),
tf.keras.layers.Dense(512, activation="tanh", name="4thlayer"),
tf.keras.layers.Dense(512, activation="tanh", name="5thlayer"),
tf.keras.layers.Dense(3, name="lastlayer"),
])
trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
non_trainable_count = int(np.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))
print('Total params: {:,}'.format(trainable_count + non_trainable_count))
print('Trainable params: {:,}'.format(trainable_count))
print('Non-trainable params: {:,}'.format(non_trainable_count))
我有一个带有自定义层的自定义 tesorflow 1.15 模型,所以我无法利用 Keras。我知道我可以通过以下方式计算自定义模型中的可训练参数:
#Dummy data
x = np.zeros((1,16,16,3))
#Dummy tf model
x_tf = tf.convert_to_tensor(x, np.float32)
z_tf = tf.layers.conv2d(x_tf, filters=32, kernel_size=(3,3))
zz_tf = tf.layers.conv2d(z_tf, filters=32, kernel_size=(3,3))
trainable_count = np.sum([np.prod(v.shape) for v in tf.trainable_variables()])
print(trainable_count)
我的问题是如何访问我的 tensorflow 模型的不可训练权重?
在 keras 中,它是用 non_trainable_count = int(np.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))
完成的,所以我正在寻找等效的命令而不调用 keras api。
您可以使用以下脚本计算一层的不可训练权重,然后对网络的所有层求和
class Dense(tf.Module):
def __init__(self, input_shape, output_shape):
super(Dense, self).__init__()
self.w = tf.Variable(tf.random.normal([input_shape, output_shape]),
trainable= False)
self.b = tf.Variable(tf.zeros([output_shape]))
def __call__(self,x):
y = tf.matmul(x,self.w) + self.b
return tf.nn.relu(y)
def count_non_trainable_weights(self):
return(len(self.variables) - len(self.trainable_variables))
在 keras 中可以通过
计算不可训练权重的数量from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, Dense
#Dummy Keras model
model = tf.keras.Sequential([
tf.keras.layers.Dense(3, input_dim=3072,activation="relu", name="inputlayer"),
tf.keras.layers.Dense(512, activation="tanh", name="2ndlayer"),
tf.keras.layers.Dense(512, activation="tanh", name="3rdlayer"),
tf.keras.layers.Dense(512, activation="tanh", name="4thlayer"),
tf.keras.layers.Dense(512, activation="tanh", name="5thlayer"),
tf.keras.layers.Dense(3, name="lastlayer"),
])
trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
non_trainable_count = int(np.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))
print('Total params: {:,}'.format(trainable_count + non_trainable_count))
print('Trainable params: {:,}'.format(trainable_count))
print('Non-trainable params: {:,}'.format(non_trainable_count))
我有一个带有自定义层的自定义 tesorflow 1.15 模型,所以我无法利用 Keras。我知道我可以通过以下方式计算自定义模型中的可训练参数:
#Dummy data
x = np.zeros((1,16,16,3))
#Dummy tf model
x_tf = tf.convert_to_tensor(x, np.float32)
z_tf = tf.layers.conv2d(x_tf, filters=32, kernel_size=(3,3))
zz_tf = tf.layers.conv2d(z_tf, filters=32, kernel_size=(3,3))
trainable_count = np.sum([np.prod(v.shape) for v in tf.trainable_variables()])
print(trainable_count)
我的问题是如何访问我的 tensorflow 模型的不可训练权重?
在 keras 中,它是用 non_trainable_count = int(np.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))
完成的,所以我正在寻找等效的命令而不调用 keras api。
您可以使用以下脚本计算一层的不可训练权重,然后对网络的所有层求和
class Dense(tf.Module):
def __init__(self, input_shape, output_shape):
super(Dense, self).__init__()
self.w = tf.Variable(tf.random.normal([input_shape, output_shape]),
trainable= False)
self.b = tf.Variable(tf.zeros([output_shape]))
def __call__(self,x):
y = tf.matmul(x,self.w) + self.b
return tf.nn.relu(y)
def count_non_trainable_weights(self):
return(len(self.variables) - len(self.trainable_variables))