Tensorflow:通过数据集(tfrecord)读取可变长度数据
Tensorflow: read variable length data, via Dataset (tfrecord)
最佳
我想读取一些TF记录数据
这有效,但仅适用于固定长度数据,但现在我想对可变长度数据做同样的事情 VarLenFeature
def load_tfrecord_fixed(serialized_example):
context_features = {
'length':tf.FixedLenFeature([],dtype=tf.int64),
'type':tf.FixedLenFeature([],dtype=tf.string)
}
sequence_features = {
"values":tf.FixedLenSequenceFeature([], dtype=tf.int64)
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=serialized_example,
context_features=context_features,
sequence_features=sequence_features
)
return context_parsed,sequence_parsed
和
tf.reset_default_graph()
with tf.Session() as sess:
filenames = [fp.name]
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(load_tfrecord_fixed)
dataset = dataset.repeat()
dataset = dataset.batch(2)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
a = sess.run(iterator.initializer)
for i in range(3):
a = sess.run(next_element)
print(a)
结果:
({'length': array([3, 3], dtype=int64), 'type': array([b'FIXED_length', b'FIXED_length'], dtype=object)}, {'values': array([[82, 2, 2],
[42, 5, 1]], dtype=int64)}) ({'length': array([3, 3], dtype=int64), 'type': array([b'FIXED_length', b'FIXED_length'], dtype=object)}, {'values': array([[2, 3, 1],
[1, 2, 3]], dtype=int64)}) ({'length': array([3, 3], dtype=int64), 'type': array([b'FIXED_length', b'FIXED_length'], dtype=object)}, {'values': array([[ 1, 100, 200],
[123, 12, 12]], dtype=int64)})
这是我正在尝试使用的地图函数,但最后它给了我一些错误:'(
def load_tfrecord_variable(serialized_example):
context_features = {
'length':tf.FixedLenFeature([],dtype=tf.int64),
'batch_size':tf.FixedLenFeature([],dtype=tf.int64),
'type':tf.FixedLenFeature([],dtype=tf.string)
}
sequence_features = {
"values":tf.VarLenFeature(tf.int64)
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=serialized_example,
context_features=context_features,
sequence_features=sequence_features
)
#return context_parsed, sequence_parsed (which is sparse)
# return context_parsed, sequence_parsed
batched_data = tf.train.batch(
tensors=[sequence_parsed['values']],
batch_size= 2,
dynamic_pad=True
)
# make dense data
dense_data = tf.sparse_tensor_to_dense(batched_data)
return context_parsed, dense_data
错误:
OutOfRangeError: Attempted to repeat an empty dataset infinitely.
[[Node: IteratorGetNext = IteratorGetNext[output_shapes=[[], [], [], [?,?,?]], output_types=[DT_INT64, DT_INT64, DT_STRING, DT_INT64], _device="/job:localhost/replica:0/task:0/device:CPU:0"](Iterator)]]
During handling of the above exception, another exception occurred:
所以有人可以帮助我吗?另外,我每晚都在使用 tensorflow。
我不认为我错过了很多......
def load_tfrecord_variable(serialized_example):
context_features = {
'length':tf.FixedLenFeature([],dtype=tf.int64),
'batch_size':tf.FixedLenFeature([],dtype=tf.int64),
'type':tf.FixedLenFeature([],dtype=tf.string)
}
sequence_features = {
"values":tf.VarLenFeature(tf.int64)
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=serialized_example,
context_features=context_features,
sequence_features=sequence_features
)
length = context_parsed['length']
batch_size = context_parsed['batch_size']
type = context_parsed['type']
values = sequence_parsed['values'].values
return tf.tuple([length, batch_size, type, values])
#
filenames = [fp.name]
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(load_tfrecord_fixed)
dataset = dataset.repeat()
dataset = dataset.padded_batch(
batch_size,
padded_shapes=(
tf.TensorShape([]),
tf.TensorShape([]),
tf.TensorShape([]),
tf.TensorShape([None]) # if you reshape 'values' in load_tfrecord_variable, add the added dims after None, e.g. [None, 3]
),
padding_values = (
tf.constant(0, dtype=tf.int64),
tf.constant(0, dtype=tf.int64),
tf.constant(""),
tf.constant(0, dtype=tf.int64)
)
)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
with tf.Session() as sess:
a = sess.run(iterator.initializer)
for i in range(3):
[length_vals, batch_size_vals, type_vals, values_vals] = sess.run(next_element)
我遇到了同样的问题。我为 Voxceleb 音频数据集创建了一个 TFRecord 文件。数据集由 1-20 秒不等的音频文件组成。
1) 阅读音频文件- audio = tf.io.read_file(audio_file_name)
2) 已解码 waveform, sr = tf.audio.decode_wav(audio)
3) 将其存储为 waveform.numpy().flatten()
但是在尝试读取数据时,我最初在功能描述中使用了 tf.io.FixedLenFeature
,它引发了一个错误:
InvalidArgumentError: Key: waveform. Can't parse serialized Example.
Tensorflow 2.x引入了一项专门用于处理可变长度数据的新功能:RaggedTensor
要从 TFRecord 文件中读取可变长度数据,您只需在特征描述字典中使用 tf.io.RaggedFeature(dtype)
例如:
feature_description = {
'feature0': tf.io.RaggedFeature(tf.float32),
'feature1': tf.io.FixedLenFeature([], tf.int64, default_value=0),
...
}
使用 RaggedFeature 我能够成功读取数据
最佳
我想读取一些TF记录数据
这有效,但仅适用于固定长度数据,但现在我想对可变长度数据做同样的事情 VarLenFeature
def load_tfrecord_fixed(serialized_example):
context_features = {
'length':tf.FixedLenFeature([],dtype=tf.int64),
'type':tf.FixedLenFeature([],dtype=tf.string)
}
sequence_features = {
"values":tf.FixedLenSequenceFeature([], dtype=tf.int64)
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=serialized_example,
context_features=context_features,
sequence_features=sequence_features
)
return context_parsed,sequence_parsed
和
tf.reset_default_graph()
with tf.Session() as sess:
filenames = [fp.name]
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(load_tfrecord_fixed)
dataset = dataset.repeat()
dataset = dataset.batch(2)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
a = sess.run(iterator.initializer)
for i in range(3):
a = sess.run(next_element)
print(a)
结果:
({'length': array([3, 3], dtype=int64), 'type': array([b'FIXED_length', b'FIXED_length'], dtype=object)}, {'values': array([[82, 2, 2],
[42, 5, 1]], dtype=int64)}) ({'length': array([3, 3], dtype=int64), 'type': array([b'FIXED_length', b'FIXED_length'], dtype=object)}, {'values': array([[2, 3, 1],
[1, 2, 3]], dtype=int64)}) ({'length': array([3, 3], dtype=int64), 'type': array([b'FIXED_length', b'FIXED_length'], dtype=object)}, {'values': array([[ 1, 100, 200],
[123, 12, 12]], dtype=int64)})
这是我正在尝试使用的地图函数,但最后它给了我一些错误:'(
def load_tfrecord_variable(serialized_example):
context_features = {
'length':tf.FixedLenFeature([],dtype=tf.int64),
'batch_size':tf.FixedLenFeature([],dtype=tf.int64),
'type':tf.FixedLenFeature([],dtype=tf.string)
}
sequence_features = {
"values":tf.VarLenFeature(tf.int64)
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=serialized_example,
context_features=context_features,
sequence_features=sequence_features
)
#return context_parsed, sequence_parsed (which is sparse)
# return context_parsed, sequence_parsed
batched_data = tf.train.batch(
tensors=[sequence_parsed['values']],
batch_size= 2,
dynamic_pad=True
)
# make dense data
dense_data = tf.sparse_tensor_to_dense(batched_data)
return context_parsed, dense_data
错误:
OutOfRangeError: Attempted to repeat an empty dataset infinitely.
[[Node: IteratorGetNext = IteratorGetNext[output_shapes=[[], [], [], [?,?,?]], output_types=[DT_INT64, DT_INT64, DT_STRING, DT_INT64], _device="/job:localhost/replica:0/task:0/device:CPU:0"](Iterator)]]
During handling of the above exception, another exception occurred:
所以有人可以帮助我吗?另外,我每晚都在使用 tensorflow。 我不认为我错过了很多......
def load_tfrecord_variable(serialized_example):
context_features = {
'length':tf.FixedLenFeature([],dtype=tf.int64),
'batch_size':tf.FixedLenFeature([],dtype=tf.int64),
'type':tf.FixedLenFeature([],dtype=tf.string)
}
sequence_features = {
"values":tf.VarLenFeature(tf.int64)
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=serialized_example,
context_features=context_features,
sequence_features=sequence_features
)
length = context_parsed['length']
batch_size = context_parsed['batch_size']
type = context_parsed['type']
values = sequence_parsed['values'].values
return tf.tuple([length, batch_size, type, values])
#
filenames = [fp.name]
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(load_tfrecord_fixed)
dataset = dataset.repeat()
dataset = dataset.padded_batch(
batch_size,
padded_shapes=(
tf.TensorShape([]),
tf.TensorShape([]),
tf.TensorShape([]),
tf.TensorShape([None]) # if you reshape 'values' in load_tfrecord_variable, add the added dims after None, e.g. [None, 3]
),
padding_values = (
tf.constant(0, dtype=tf.int64),
tf.constant(0, dtype=tf.int64),
tf.constant(""),
tf.constant(0, dtype=tf.int64)
)
)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
with tf.Session() as sess:
a = sess.run(iterator.initializer)
for i in range(3):
[length_vals, batch_size_vals, type_vals, values_vals] = sess.run(next_element)
我遇到了同样的问题。我为 Voxceleb 音频数据集创建了一个 TFRecord 文件。数据集由 1-20 秒不等的音频文件组成。
1) 阅读音频文件- audio = tf.io.read_file(audio_file_name)
2) 已解码 waveform, sr = tf.audio.decode_wav(audio)
3) 将其存储为 waveform.numpy().flatten()
但是在尝试读取数据时,我最初在功能描述中使用了 tf.io.FixedLenFeature
,它引发了一个错误:
InvalidArgumentError: Key: waveform. Can't parse serialized Example.
Tensorflow 2.x引入了一项专门用于处理可变长度数据的新功能:RaggedTensor
要从 TFRecord 文件中读取可变长度数据,您只需在特征描述字典中使用 tf.io.RaggedFeature(dtype)
例如:
feature_description = {
'feature0': tf.io.RaggedFeature(tf.float32),
'feature1': tf.io.FixedLenFeature([], tf.int64, default_value=0),
...
}
使用 RaggedFeature 我能够成功读取数据