TF1到TF2写的matmul-based nn如何实现
How to implement matmul-based nn written in TF1 to TF2
我想实现用 TF1 到 TF2 编写的基于 matmul 的简单神经网络。
Here is source.(不要介意韩语评论,这是用韩语写的教程)
所以我找到了 'how to migrate TF1 into TF2',我知道我必须删除占位符。
这是我的总体代码:
import tensorflow as tf
import numpy as np
x_data = np.array(
[[0, 0], [1, 0], [1, 1], [0, 0], [0, 0], [0, 1]])
y_data = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]
])
x_data = tf.cast(x_data, tf.float32)
y_data = tf.cast(y_data, tf.float32)
W1 = tf.Variable(tf.random.uniform([2, 10], -1., 1.))
W2 = tf.Variable(tf.random.uniform([10, 3], -1., 1.))
b1 = tf.Variable(tf.zeros([10]))
b2 = tf.Variable(tf.zeros([3]))
Layer1 = tf.matmul(x_data, W1) + b1
Layer1 = tf.nn.relu(Layer1)
model = tf.matmul(Layer1, W2) + b2
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_data, logits=model)
)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
optimizer.minimize(loss=lambda: cost , var_list=[W1, W2, b1, b2])
我找到了 ,我把 loss 改成了 lambda。
而且,得到这个错误:
ValueError: No gradients provided for any variable: ['Variable:0', 'Variable:0', 'Variable:0', 'Variable:0'].
而且我不知道如何修复它。
所以我想知道在 TF2 中实现类似功能的正确方法。
好的。经历了official guide for eager execution,终于搞定了
代码如下:
import tensorflow as tf
import numpy as np
x_data = np.array(
[[0, 0], [1, 0], [1, 1], [0, 0], [0, 0], [0, 1]])
y_data = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]
])
x_data = tf.cast(x_data, tf.float32)
y_data = tf.cast(y_data, tf.float32)
class Model(tf.keras.Model):
def __init__(self):
super(Model, self).__init__()
self.W1 = tf.Variable(tf.random.uniform([2, 10], -1., 1.))
self.W2 = tf.Variable(tf.random.uniform([10, 3], -1., 1.))
self.b1 = tf.Variable(tf.zeros([10]))
self.b2 = tf.Variable(tf.zeros([3]))
def _calc_layer(x, w, b):
return tf.matmul(x, w) + b
def __call__(self, x):
layer1 = tf.nn.relu(Model._calc_layer(x_data, self.W1, self.b1))
return Model._calc_layer(layer1, self.W2, self.b2)
def cost(model, inputs, targets):
return tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=targets,
logits=model(inputs))
)
model = Model()
def cost_tominimize():
return cost(model, x_data, y_data)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
for i in range(100):
optimizer.minimize(loss=cost_tominimize,
var_list=model.trainable_variables)
#print(cost_tominimize().numpy())
#test
prediction = tf.argmax(model(x_data), 1)
target = tf.argmax(y_data, 1)
print("prediction : ", prediction.numpy())
print("real : ", target.numpy())
is_correct = tf.equal(prediction, target)
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
print('accuracy: %.2f%%' % (accuracy * 100))
还有不使用的版本class:
import tensorflow as tf
import numpy as np
x_data = np.array(
[[0, 0], [1, 0], [1, 1], [0, 0], [0, 0], [0, 1]])
y_data = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]
])
x_data = tf.cast(x_data, tf.float32)
y_data = tf.cast(y_data, tf.float32)
W1 = tf.Variable(tf.random.uniform([2, 10], -1., 1.))
W2 = tf.Variable(tf.random.uniform([10, 3], -1., 1.))
b1 = tf.Variable(tf.zeros([10]))
b2 = tf.Variable(tf.zeros([3]))
def calc_layer(x, w, b):
return tf.matmul(x, w) + b
def model(x):
layer1 = tf.nn.relu(calc_layer(x, W1, b1))
return calc_layer(layer1, W2, b2)
def cost(model, inputs, targets):
return tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=targets,
logits=model(inputs))
)
def cost_tominimize():
return cost(model, x_data, y_data)
optimizer = tf.keras.optimizers.Adam(learning_rate = 0.01)
for i in range(100):
optimizer.minimize(loss=cost_tominimize,
var_list = [W1, W2, b1, b2])
print(cost_tominimize().numpy())
#...and test part here...
我想实现用 TF1 到 TF2 编写的基于 matmul 的简单神经网络。
Here is source.(不要介意韩语评论,这是用韩语写的教程)
所以我找到了 'how to migrate TF1 into TF2',我知道我必须删除占位符。
这是我的总体代码:
import tensorflow as tf
import numpy as np
x_data = np.array(
[[0, 0], [1, 0], [1, 1], [0, 0], [0, 0], [0, 1]])
y_data = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]
])
x_data = tf.cast(x_data, tf.float32)
y_data = tf.cast(y_data, tf.float32)
W1 = tf.Variable(tf.random.uniform([2, 10], -1., 1.))
W2 = tf.Variable(tf.random.uniform([10, 3], -1., 1.))
b1 = tf.Variable(tf.zeros([10]))
b2 = tf.Variable(tf.zeros([3]))
Layer1 = tf.matmul(x_data, W1) + b1
Layer1 = tf.nn.relu(Layer1)
model = tf.matmul(Layer1, W2) + b2
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_data, logits=model)
)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
optimizer.minimize(loss=lambda: cost , var_list=[W1, W2, b1, b2])
我找到了
而且,得到这个错误:
ValueError: No gradients provided for any variable: ['Variable:0', 'Variable:0', 'Variable:0', 'Variable:0'].
而且我不知道如何修复它。
所以我想知道在 TF2 中实现类似功能的正确方法。
好的。经历了official guide for eager execution,终于搞定了
代码如下:
import tensorflow as tf
import numpy as np
x_data = np.array(
[[0, 0], [1, 0], [1, 1], [0, 0], [0, 0], [0, 1]])
y_data = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]
])
x_data = tf.cast(x_data, tf.float32)
y_data = tf.cast(y_data, tf.float32)
class Model(tf.keras.Model):
def __init__(self):
super(Model, self).__init__()
self.W1 = tf.Variable(tf.random.uniform([2, 10], -1., 1.))
self.W2 = tf.Variable(tf.random.uniform([10, 3], -1., 1.))
self.b1 = tf.Variable(tf.zeros([10]))
self.b2 = tf.Variable(tf.zeros([3]))
def _calc_layer(x, w, b):
return tf.matmul(x, w) + b
def __call__(self, x):
layer1 = tf.nn.relu(Model._calc_layer(x_data, self.W1, self.b1))
return Model._calc_layer(layer1, self.W2, self.b2)
def cost(model, inputs, targets):
return tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=targets,
logits=model(inputs))
)
model = Model()
def cost_tominimize():
return cost(model, x_data, y_data)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
for i in range(100):
optimizer.minimize(loss=cost_tominimize,
var_list=model.trainable_variables)
#print(cost_tominimize().numpy())
#test
prediction = tf.argmax(model(x_data), 1)
target = tf.argmax(y_data, 1)
print("prediction : ", prediction.numpy())
print("real : ", target.numpy())
is_correct = tf.equal(prediction, target)
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
print('accuracy: %.2f%%' % (accuracy * 100))
还有不使用的版本class:
import tensorflow as tf
import numpy as np
x_data = np.array(
[[0, 0], [1, 0], [1, 1], [0, 0], [0, 0], [0, 1]])
y_data = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]
])
x_data = tf.cast(x_data, tf.float32)
y_data = tf.cast(y_data, tf.float32)
W1 = tf.Variable(tf.random.uniform([2, 10], -1., 1.))
W2 = tf.Variable(tf.random.uniform([10, 3], -1., 1.))
b1 = tf.Variable(tf.zeros([10]))
b2 = tf.Variable(tf.zeros([3]))
def calc_layer(x, w, b):
return tf.matmul(x, w) + b
def model(x):
layer1 = tf.nn.relu(calc_layer(x, W1, b1))
return calc_layer(layer1, W2, b2)
def cost(model, inputs, targets):
return tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=targets,
logits=model(inputs))
)
def cost_tominimize():
return cost(model, x_data, y_data)
optimizer = tf.keras.optimizers.Adam(learning_rate = 0.01)
for i in range(100):
optimizer.minimize(loss=cost_tominimize,
var_list = [W1, W2, b1, b2])
print(cost_tominimize().numpy())
#...and test part here...