如何使用 TFlearn 中的 ImageAugmentation 在 CNN 中训练图像和数据的混合

How to train mix of image and data in CNN using ImageAugmentation in TFlearn

我想在 Tflearn-Tensorflow 中混合使用图像(像素信息)和数据来训练卷积神经网络。因为我的图像数量很少,所以我需要使用图像增强来增加传递给网络的图像样本数量。但这意味着我只能将图像数据作为输入数据传递,必须在稍后阶段添加非图像数据,大概是在全连接层之前。我不知道该怎么做,因为我似乎只能告诉网络在调用 model.fit({'input': ) 时要使用哪些数据,而我无法通过连接input_data 两种类型的数据直接调用图像增强。我可以在中间阶段进行任何串联以添加额外数据或允许我使用 ImageAugmentation 和训练网络所需的非图像数据的任何其他替代方法吗? 我的代码在下面有一些评论。非常感谢。

import tensorflow as tf
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression

#px_train:pixel data, data_train: additional data 
px_train, data_train, px_cv, data_cv, labels_train, labels_cv = prepare_data(path, filename)

img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle = 89.)
img_aug.add_random_blur(sigma_max=3.)
img_aug.add_random_flip_updown()
img_aug.add_random_90degrees_rotation(rotations = [0, 1, 2, 3])

#I can only pass image data here to apply data_augmentation 
convnet = input_data(shape = [None, 96, 96, 1], name = 'input', data_augmentation = img_aug)

convnet = conv_2d(convnet, 32, 2, activation = 'relu')
convnet = max_pool_2d(convnet, 2)                                   

convnet = conv_2d(convnet, 64, 2, activation = 'relu')
convnet = max_pool_2d(convnet, 2)                                   

convnet = tf.reshape(convnet, [-1, 24*24*64])    
#convnet = tf.concat((convnet, conv_feat), 1)
#If I concatenated data like above, where could I tell Tensorflow to assign the variable conv_feat to my 'data_train' values?

convnet = fully_connected(convnet, 1024, activation = 'relu')
convnet = dropout(convnet, 0.8)

convnet = fully_connected(convnet, 99, activation = 'softmax')
convnet = regression(convnet, optimizer = 'adam', learning_rate = 0.01, loss = 'categorical_crossentropy', name = 'labels')

model = tflearn.DNN(convnet)

#I can't add additional 'input' labels here to pass my 'data_train'. TF gives error.
model.fit({'input': np.array(px_train).reshape(-1, 96, 96, 1)}, {'labels': labels_train}, n_epoch = 50, validation_set = ({'input': np.array(px_cv).reshape(-1, 96, 96, 1)}, {'labels': labels_cv}), snapshot_step = 500, show_metric = True, run_id = 'Test')

如果您查看 model.fit 方法的文档: http://tflearn.org/models/dnn/。要为 model.fit 提供多个输入,您只需将它们作为列表传递,即 model.fit([X1, X2], Y)。通过这种方式,X1 被传递到您拥有的第一个 input_data 层,X2 被传递到第二个 input_data 层。

如果您正在寻找不同层的串联,您可以查看 Tflearn 中的合并层:http://tflearn.org/layers/merge_ops/

编辑 1:

我认为下面的代码应该 运行,但是您可能希望以与我不同的方式合并图层。

import tensorflow as tf
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
from tflearn.layers.merge_ops import merge
from tflearn.data_augmentation import ImageAugmentation

img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle = 89.)
img_aug.add_random_blur(sigma_max=3.)
img_aug.add_random_flip_updown()
img_aug.add_random_90degrees_rotation(rotations = [0, 1, 2, 3])

convnet = input_data(shape = [None, 96, 96, 1], data_augmentation = img_aug)
convfeat = input_data(shape = [None, 120])

convnet = conv_2d(convnet, 32, 2, activation = 'relu')
convnet = max_pool_2d(convnet, 2)                                   

convnet = conv_2d(convnet, 64, 2, activation = 'relu')
convnet = max_pool_2d(convnet, 2)                                   

# To merge the layers they need to have same dimension
convnet = fully_connected(convnet, 120) 
convnet = merge([convnet, convfeat], 'concat')

convnet = fully_connected(convnet, 1024, activation = 'relu')
convnet = dropout(convnet, 0.8)

convnet = fully_connected(convnet, 99, activation = 'softmax')
convnet = regression(convnet, optimizer = 'adam', learning_rate = 0.01, loss = 'categorical_crossentropy', name = 'labels')

model = tflearn.DNN(convnet)

# Give multiple inputs as a list
model.fit([np.array(px_train).reshape(-1, 96, 96, 1), np.array(data_train).reshape(-1, 120)], 
           labels_train, 
           n_epoch = 50, 
           validation_set = ([np.array(px_cv).reshape(-1, 96, 96, 1), np.array(data_cv).reshape(-1, 120)], labels_cv), 
           snapshot_step = 500, 
           show_metric = True, 
           run_id = 'Test')