如何组合(合并)不同的回归模型
how to combine (merge) different regression models
我正在努力训练不同的模型来对人体姿势问题进行不同的估计。实际上,我需要的是从人体不同关节的回归模型中获得不同的输出。在我搜索这个问题之后,我想到了两个方法:
- 训练不同的模型并结合它们的最终结果。
- 以链状训练模型。 (第二个模型的输入是第一个模型的输出...)
我知道 Keras 有一个叫做 concatenate 的函数,它是一个合并模型的两个输出的层。但是,如果我不想使用 Keras,是否可以拥有 6 个模型,然后以最终训练的模型可以一次估计这些不同模型的所有输出的方式合并它们?
我的模型是这样的(它们根据我拥有的不同数据集而有所不同):
## conv1 layer
W_conv1 = weight_func([3, 3, 1, 32])
b_conv1 = bias_func([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# h_pool1 = max_pool_2x2(h_conv1)
#h_drop1 = tf.nn.dropout(h_conv1, keep_prob)
## conv2 layer
W_conv2 = weight_func([3, 3, 32, 64]) # patch 2x2, in size 32, out size 64
b_conv2 = bias_func([64])
h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)
#h_drop2 = tf.nn.dropout(h_conv2, keep_prob)
## conv3 layer
W_conv3 = weight_func([3, 3, 64, 128])
b_conv3 = bias_func([128])
h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3) + b_conv3)
#h_drop3 = tf.nn.dropout(h_conv3, keep_prob)
## conv4 layer
W_conv4 = weight_func([3, 3, 128,256]) # patch 3*3, in size 32, out size 64
b_conv4 = bias_func([256])
h_conv4 = tf.nn.relu(conv2d(h_conv3, W_conv4) + b_conv4)
#h_drop4 = tf.nn.dropout(h_conv4, keep_prob)
## fc1 layer
W_fc1 = weight_func([6 * 6 * 256, 9216])
b_fc1 = bias_func([9216])
h_pool2_flat = tf.reshape(h_conv4, [-1, 6 * 6 * 256])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# fc2 layer
W_fc2 = weight_func([9216, 1])
b_fc2 = bias_func([1])
prediction = tf.add(tf.matmul(h_fc1_drop, W_fc2) , b_fc2, name= 'output_node')
cross_entropy = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]))
您可以使用函数 API 来实现这一点。
我添加了一个简单的示例,您可以根据您的用例将此示例调整为更复杂的模型。
代码:
import tensorflow as tf
import numpy as np
# Here I have generated to different data and labels containing different number of features.
x1 = tf.constant(np.random.randint(50, size =(1000,13)), dtype = tf.float32)
y1 = tf.constant(np.random.randint(2, size =(1000,)), dtype = tf.int32)
x2 = tf.constant(np.random.randint(50, size =(1000,6)), dtype = tf.float32)
y2 = tf.constant(np.random.randint(2, size =(1000,)), dtype = tf.int32)
# Creation of model
def create_model3():
input1 = tf.keras.Input(shape=(13,), name = 'I1')
input2 = tf.keras.Input(shape=(6,), name = 'I2')
hidden1 = tf.keras.layers.Dense(units = 4, activation='relu')(input1)
hidden2 = tf.keras.layers.Dense(units = 4, activation='relu')(input2)
hidden3 = tf.keras.layers.Dense(units = 3, activation='relu')(hidden1)
hidden4 = tf.keras.layers.Dense(units = 3, activation='relu')(hidden2)
output1 = tf.keras.layers.Dense(units = 2, activation='softmax', name ='O1')(hidden3)
output2 = tf.keras.layers.Dense(units = 2, activation='softmax', name = 'O2')(hidden4)
model = tf.keras.models.Model(inputs = [input1,input2], outputs = [output1,output2])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
model = create_model3()
tf.keras.utils.plot_model(model, 'my_first_model.png', show_shapes=True)
模型架构:
您可以像这样使用 model.fit() 训练此模型:
history = model.fit(
x = {'I1':x1, 'I2':x2},
y = {'O1':y1, 'O2': y2},
batch_size = 32,
epochs = 10,
verbose = 1,
callbacks = None,
# validation_data = [(val_data,new_val_data),(val_labels, new_val_labels)]
)
注意:要使训练有效,所有输入数据中的样本数应该相同。即 x1 包含 1000 行,所以 x2 也应该包含 1000 行。
您可以像这样使用此模型进行预测:
model.predict(x = {'I1':x1, 'I2':x2})
我正在努力训练不同的模型来对人体姿势问题进行不同的估计。实际上,我需要的是从人体不同关节的回归模型中获得不同的输出。在我搜索这个问题之后,我想到了两个方法:
- 训练不同的模型并结合它们的最终结果。
- 以链状训练模型。 (第二个模型的输入是第一个模型的输出...)
我知道 Keras 有一个叫做 concatenate 的函数,它是一个合并模型的两个输出的层。但是,如果我不想使用 Keras,是否可以拥有 6 个模型,然后以最终训练的模型可以一次估计这些不同模型的所有输出的方式合并它们?
我的模型是这样的(它们根据我拥有的不同数据集而有所不同):
## conv1 layer
W_conv1 = weight_func([3, 3, 1, 32])
b_conv1 = bias_func([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# h_pool1 = max_pool_2x2(h_conv1)
#h_drop1 = tf.nn.dropout(h_conv1, keep_prob)
## conv2 layer
W_conv2 = weight_func([3, 3, 32, 64]) # patch 2x2, in size 32, out size 64
b_conv2 = bias_func([64])
h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)
#h_drop2 = tf.nn.dropout(h_conv2, keep_prob)
## conv3 layer
W_conv3 = weight_func([3, 3, 64, 128])
b_conv3 = bias_func([128])
h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3) + b_conv3)
#h_drop3 = tf.nn.dropout(h_conv3, keep_prob)
## conv4 layer
W_conv4 = weight_func([3, 3, 128,256]) # patch 3*3, in size 32, out size 64
b_conv4 = bias_func([256])
h_conv4 = tf.nn.relu(conv2d(h_conv3, W_conv4) + b_conv4)
#h_drop4 = tf.nn.dropout(h_conv4, keep_prob)
## fc1 layer
W_fc1 = weight_func([6 * 6 * 256, 9216])
b_fc1 = bias_func([9216])
h_pool2_flat = tf.reshape(h_conv4, [-1, 6 * 6 * 256])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# fc2 layer
W_fc2 = weight_func([9216, 1])
b_fc2 = bias_func([1])
prediction = tf.add(tf.matmul(h_fc1_drop, W_fc2) , b_fc2, name= 'output_node')
cross_entropy = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]))
您可以使用函数 API 来实现这一点。 我添加了一个简单的示例,您可以根据您的用例将此示例调整为更复杂的模型。
代码:
import tensorflow as tf
import numpy as np
# Here I have generated to different data and labels containing different number of features.
x1 = tf.constant(np.random.randint(50, size =(1000,13)), dtype = tf.float32)
y1 = tf.constant(np.random.randint(2, size =(1000,)), dtype = tf.int32)
x2 = tf.constant(np.random.randint(50, size =(1000,6)), dtype = tf.float32)
y2 = tf.constant(np.random.randint(2, size =(1000,)), dtype = tf.int32)
# Creation of model
def create_model3():
input1 = tf.keras.Input(shape=(13,), name = 'I1')
input2 = tf.keras.Input(shape=(6,), name = 'I2')
hidden1 = tf.keras.layers.Dense(units = 4, activation='relu')(input1)
hidden2 = tf.keras.layers.Dense(units = 4, activation='relu')(input2)
hidden3 = tf.keras.layers.Dense(units = 3, activation='relu')(hidden1)
hidden4 = tf.keras.layers.Dense(units = 3, activation='relu')(hidden2)
output1 = tf.keras.layers.Dense(units = 2, activation='softmax', name ='O1')(hidden3)
output2 = tf.keras.layers.Dense(units = 2, activation='softmax', name = 'O2')(hidden4)
model = tf.keras.models.Model(inputs = [input1,input2], outputs = [output1,output2])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
model = create_model3()
tf.keras.utils.plot_model(model, 'my_first_model.png', show_shapes=True)
模型架构:
您可以像这样使用 model.fit() 训练此模型:
history = model.fit(
x = {'I1':x1, 'I2':x2},
y = {'O1':y1, 'O2': y2},
batch_size = 32,
epochs = 10,
verbose = 1,
callbacks = None,
# validation_data = [(val_data,new_val_data),(val_labels, new_val_labels)]
)
注意:要使训练有效,所有输入数据中的样本数应该相同。即 x1 包含 1000 行,所以 x2 也应该包含 1000 行。
您可以像这样使用此模型进行预测:
model.predict(x = {'I1':x1, 'I2':x2})