正在尝试使用未初始化的值 InceptionV3/Mixed_6d/Branch_3/Conv2d_0b_1x
Attempting to use uninitialized value InceptionV3/Mixed_6d/Branch_3/Conv2d_0b_1x
我修改了 Inception V3 网络(删除了一些层模块),并创建了 6 classes 训练数据,每个 class 一张图像。当我执行训练时,出现错误
tensorflow.python.framework.errors_impl.FailedPreconditionError:
Attempting to use uninitialized value
InceptionV3/Mixed_6d/Branch_3/Conv2d_0b_1x1/weights [[Node:
InceptionV3/Mixed_6d/Branch_3/Conv2d_0b_1x1/weights/read =
Identity[T=DT_FLOAT, _class=["loc:/Branch_3/Conv2d_0
列车代码:
import tensorflow as tf
import inception
import create_record
import numpy as np
import inception_utils
width, height = 299, 299
classes = 6
batch_size = 6
learning_rate = 0.01
max_step = 1
image_dir = '/home/xzy/test/images/'
path = '/home/xzy/test/train.tfrecords'
logs_dir = '/home/xzy/test/logs/'
# %% Training
def train():
filename_queue = tf.train.string_input_producer([path])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example,
features={
'label': tf.FixedLenFeature([], tf.int64),
'img_raw': tf.FixedLenFeature([], tf.string),
})
image = tf.decode_raw(features['img_raw'], tf.uint8)
image = tf.reshape(image, [299, 299, 3])
label = tf.cast(features['label'], tf.int32)
image_batch, label_batch = tf.train.batch([image, label],
batch_size=6, num_threads=64, capacity=300)
label_batch = tf.one_hot(label_batch, depth=classes)
label_batch = tf.cast(label_batch, dtype=tf.int32)
label_batch = tf.reshape(label_batch, [batch_size, classes])
x = tf.placeholder(tf.float32, shape=[batch_size, width, height, 3])
y_ = tf.placeholder(tf.int16, shape=[batch_size, classes])
init_op = tf.initialize_all_variables()
logits = inception.inference(x, num_classes=classes)
loss = inception.loss(logits, y_)
my_global_step = tf.Variable(0, name='global_step', trainable=False)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=my_global_step)
saver = tf.train.Saver(tf.global_variables())
summary_op = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
train_summary_writer = tf.summary.FileWriter(logs_dir, sess.graph)
try:
for step in np.arange(max_step):
if coord.should_stop():
break
example, lab = sess.run([image_batch, label_batch])
example = tf.to_float(example)
_, train_loss = sess.run([train_op, loss], feed_dict={x: example.eval(), y_: lab})
if step == 0 or (step + 1) == max_step:
print ('Step: %d, loss: %.4f' % (step, train_loss))
summary_str = sess.run(summary_op)
train_summary_writer.add_summary(summary_str, step)
if step % 2000 == 0 or (step + 1) == max_step:
checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
coord.request_stop()
coord.join(threads)
sess.close()
train()
错误堆栈跟踪:
Traceback (most recent call last):
File "/home/xzy/PycharmProjects/network/train_inception.py", line 89, in
train()
File "/home/xzy/PycharmProjects/network/train_inception.py", line 71, in train()
_, train_loss = sess.run([train_op, loss], feed_dict={x: example.eval(), y_: lab})
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 889, in run
run_metadata_ptr)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 1120, in _run
feed_dict_tensor, options, run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 1317, in _do_run
options, run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 1336, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.FailedPreconditionError: Attempting to use uninitialized value InceptionV3/Mixed_6d/Branch_3/Conv2d_0b_1x1/weights
[[Node: InceptionV3/Mixed_6d/Branch_3/Conv2d_0b_1x1/weights/read = Identity[T=DT_FLOAT, _class=["loc:@InceptionV3/Mixed_6d/Branch_3/Conv2d_0
怎么了?有人可以给我一些想法吗,谢谢?
Tensorflow 版本:1.5.0-dev20171206,python2.7,Ubuntu16.04.
你的 init_op
定义得太早了:
init_op = tf.initialize_all_variables()
# BAD! All the ops below won't get initialized!
logits = inception.inference(x, num_classes=classes)
loss = inception.loss(logits, y_)
my_global_step = tf.Variable(0, name='global_step', trainable=False)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=my_global_step)
解法:
logits = inception.inference(x, num_classes=classes)
loss = inception.loss(logits, y_)
my_global_step = tf.Variable(0, name='global_step', trainable=False)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=my_global_step)
# Now it's OK.
init_op = tf.global_variables_initializer()
我修改了 Inception V3 网络(删除了一些层模块),并创建了 6 classes 训练数据,每个 class 一张图像。当我执行训练时,出现错误
tensorflow.python.framework.errors_impl.FailedPreconditionError: Attempting to use uninitialized value InceptionV3/Mixed_6d/Branch_3/Conv2d_0b_1x1/weights [[Node: InceptionV3/Mixed_6d/Branch_3/Conv2d_0b_1x1/weights/read = Identity[T=DT_FLOAT, _class=["loc:/Branch_3/Conv2d_0
列车代码:
import tensorflow as tf
import inception
import create_record
import numpy as np
import inception_utils
width, height = 299, 299
classes = 6
batch_size = 6
learning_rate = 0.01
max_step = 1
image_dir = '/home/xzy/test/images/'
path = '/home/xzy/test/train.tfrecords'
logs_dir = '/home/xzy/test/logs/'
# %% Training
def train():
filename_queue = tf.train.string_input_producer([path])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example,
features={
'label': tf.FixedLenFeature([], tf.int64),
'img_raw': tf.FixedLenFeature([], tf.string),
})
image = tf.decode_raw(features['img_raw'], tf.uint8)
image = tf.reshape(image, [299, 299, 3])
label = tf.cast(features['label'], tf.int32)
image_batch, label_batch = tf.train.batch([image, label],
batch_size=6, num_threads=64, capacity=300)
label_batch = tf.one_hot(label_batch, depth=classes)
label_batch = tf.cast(label_batch, dtype=tf.int32)
label_batch = tf.reshape(label_batch, [batch_size, classes])
x = tf.placeholder(tf.float32, shape=[batch_size, width, height, 3])
y_ = tf.placeholder(tf.int16, shape=[batch_size, classes])
init_op = tf.initialize_all_variables()
logits = inception.inference(x, num_classes=classes)
loss = inception.loss(logits, y_)
my_global_step = tf.Variable(0, name='global_step', trainable=False)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=my_global_step)
saver = tf.train.Saver(tf.global_variables())
summary_op = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
train_summary_writer = tf.summary.FileWriter(logs_dir, sess.graph)
try:
for step in np.arange(max_step):
if coord.should_stop():
break
example, lab = sess.run([image_batch, label_batch])
example = tf.to_float(example)
_, train_loss = sess.run([train_op, loss], feed_dict={x: example.eval(), y_: lab})
if step == 0 or (step + 1) == max_step:
print ('Step: %d, loss: %.4f' % (step, train_loss))
summary_str = sess.run(summary_op)
train_summary_writer.add_summary(summary_str, step)
if step % 2000 == 0 or (step + 1) == max_step:
checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
coord.request_stop()
coord.join(threads)
sess.close()
train()
错误堆栈跟踪:
Traceback (most recent call last):
File "/home/xzy/PycharmProjects/network/train_inception.py", line 89, in train()
File "/home/xzy/PycharmProjects/network/train_inception.py", line 71, in train()
_, train_loss = sess.run([train_op, loss], feed_dict={x: example.eval(), y_: lab})File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 889, in run run_metadata_ptr)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 1120, in _run feed_dict_tensor, options, run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 1317, in _do_run options, run_metadata)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 1336, in _do_call raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.FailedPreconditionError: Attempting to use uninitialized value InceptionV3/Mixed_6d/Branch_3/Conv2d_0b_1x1/weights [[Node: InceptionV3/Mixed_6d/Branch_3/Conv2d_0b_1x1/weights/read = Identity[T=DT_FLOAT, _class=["loc:@InceptionV3/Mixed_6d/Branch_3/Conv2d_0
怎么了?有人可以给我一些想法吗,谢谢?
Tensorflow 版本:1.5.0-dev20171206,python2.7,Ubuntu16.04.
你的 init_op
定义得太早了:
init_op = tf.initialize_all_variables()
# BAD! All the ops below won't get initialized!
logits = inception.inference(x, num_classes=classes)
loss = inception.loss(logits, y_)
my_global_step = tf.Variable(0, name='global_step', trainable=False)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=my_global_step)
解法:
logits = inception.inference(x, num_classes=classes)
loss = inception.loss(logits, y_)
my_global_step = tf.Variable(0, name='global_step', trainable=False)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=my_global_step)
# Now it's OK.
init_op = tf.global_variables_initializer()