Tensorflow.image.decode_jpeg 在解码图像 TFRecord 数据时移动值
Tensorflow.image.decode_jpeg shifts values while decoding image TFRecord data
我使用 SequenceExample:
将每个示例的可变数量的 jpg 帧存储到 TFRecord 中
tf.compat.as_bytes(cv2.imencode(".jpg", frame)[1].tobytes()))
然后我使用以下方法解析这些帧:
images = tf.map_fn(lambda x: tf.image.decode_jpeg(x, channels=3), sequence_features['frames'], dtype=tf.uint8)
但是图像值发生了某种偏移:
当我只是解析原始字节串,然后用opencv对其进行解码时,图片看起来很正常:
for img in images:
img = np.frombuffer(img, dtype=np.uint8)
img = cv2.imdecode(img, 1)
更完整的示例:
def write(videos, tfr_path):
with tf.python_io.TFRecordWriter(tfr_path) as writer:
for video in videos:
label = get_label()
frames = []
for frame in video:
frames.append(tf.compat.as_bytes(cv2.imencode(".jpg", prec_img)[1].tobytes()))
feature_list = {
'label': (_float_list_feature_list(label),),
'frames': _bytes_feature_list(encoded_frames)
}
feature_lists = tf.train.FeatureLists(feature_list=feature_list)
example = tf.train.SequenceExample(feature_lists=feature_lists, context=None)
writer.write(example.SerializeToString())
def _parse_tfr_data(example, size):
sequence_features = {
'label': tf.FixedLenSequenceFeature([size], dtype=tf.float32),
'frames': tf.FixedLenSequenceFeature([], dtype=tf.string)
}
features, sequence_features = tf.parse_single_sequence_example(example, context_features=None,
sequence_features=sequence_features)
images = tf.map_fn(lambda x: tf.image.decode_jpeg(x, channels=3), sequence_features['frames'], dtype=tf.uint8)
label = sequence_features['label']
return images, label
感谢Dan Mašek! tf.image.decode_jpeg 使用 RGB cv2.imencode BGR,所以事先交换它是可行的。
我使用 SequenceExample:
将每个示例的可变数量的 jpg 帧存储到 TFRecord 中tf.compat.as_bytes(cv2.imencode(".jpg", frame)[1].tobytes()))
然后我使用以下方法解析这些帧:
images = tf.map_fn(lambda x: tf.image.decode_jpeg(x, channels=3), sequence_features['frames'], dtype=tf.uint8)
但是图像值发生了某种偏移:
当我只是解析原始字节串,然后用opencv对其进行解码时,图片看起来很正常:
for img in images:
img = np.frombuffer(img, dtype=np.uint8)
img = cv2.imdecode(img, 1)
更完整的示例:
def write(videos, tfr_path):
with tf.python_io.TFRecordWriter(tfr_path) as writer:
for video in videos:
label = get_label()
frames = []
for frame in video:
frames.append(tf.compat.as_bytes(cv2.imencode(".jpg", prec_img)[1].tobytes()))
feature_list = {
'label': (_float_list_feature_list(label),),
'frames': _bytes_feature_list(encoded_frames)
}
feature_lists = tf.train.FeatureLists(feature_list=feature_list)
example = tf.train.SequenceExample(feature_lists=feature_lists, context=None)
writer.write(example.SerializeToString())
def _parse_tfr_data(example, size):
sequence_features = {
'label': tf.FixedLenSequenceFeature([size], dtype=tf.float32),
'frames': tf.FixedLenSequenceFeature([], dtype=tf.string)
}
features, sequence_features = tf.parse_single_sequence_example(example, context_features=None,
sequence_features=sequence_features)
images = tf.map_fn(lambda x: tf.image.decode_jpeg(x, channels=3), sequence_features['frames'], dtype=tf.uint8)
label = sequence_features['label']
return images, label
感谢Dan Mašek! tf.image.decode_jpeg 使用 RGB cv2.imencode BGR,所以事先交换它是可行的。