Keras:尺寸必须相等
Keras: Dimensions must be equal
我在用keras做一些分类,遇到这个错误:
InvalidArgumentError: Dimensions must be equal, but are 256 and 8 for 'dense_185/MatMul' (op: 'MatMul') with input shapes: [?,256], [8,300].
令我惊讶的是,dense 的输入维度是 1。
这是一个带有几个自定义层的顺序模型。不知道为什么dense layer的error里会出现8
class Residual(Layer):
def __init__(self,input_shape,**kwargs):
super(Residual, self).__init__(**kwargs)
self.input_shapes = input_shape
def call(self, x):
print(np.shape(x)) # (?, 128, 8)
first_layer = Conv1D(256, 4, activation='relu', input_shape = self.input_shapes)(x)
print(np.shape(first_layer)) (?, 125, 256)
x = Conv1D(256, 4, activation='relu')(first_layer)
print(np.shape(x)) (?, 122, 256)
x = Conv1D(256, 4, activation='relu')(x)
print(np.shape(x)) (?, 119, 256)
x = ZeroPadding1D(padding=3)(x)
residual = Add()([x, first_layer])
x = Activation("relu")(residual)
return x
class Pooling(Layer):
def __init__(self,**kwargs):
super(Pooling, self).__init__(**kwargs)
def call(self, x):
first_layer = GlobalMaxPooling1D(data_format='channels_last')(x)
second_layer = GlobalAveragePooling1D(data_format='channels_last')(x)
pooling = Add()([first_layer, second_layer])
print(np.shape(pooling)) (?, 256)
return pooling
model = Sequential()
model.add(Residual(input_shape=(128,8)))
model.add(Pooling())
model.add(Dense(300, activation='relu'))
model.add(Dense(150, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'])
model.fit(np.array(dataset_data), dataset_target, epochs=1000, validation_split=0.1, verbose=1, batch_size=8)
维度:
(1000, 128, 8) - 输入(1000 个音频,8 个特征,128 seq_length)
(1000, 10) - 目标(1000 个音频,10 类)
我认为需要进行两项编辑:
- 添加
InputLayer
作为数据的入口
- 至少为
Pooling
层 (link) 定义 compute_output_shape
方法。如果未定义此方法,Dense
图层无法弄清楚它的输入形状,我猜,然后失败。
还有少量编辑 - 因为模型有 InputLayer
,你在 Residual
层中不再需要 input_shape
kwarg。
class Residual(Layer):
def __init__(self, **kwargs): # remove input shape
super(Residual, self).__init__(**kwargs)
def call(self, x):
print(np.shape(x))
first_layer = Conv1D(256, 4, activation='relu')(x)
print(np.shape(first_layer))
x = Conv1D(256, 4, activation='relu')(first_layer)
print(np.shape(x))
x = Conv1D(256, 4, activation='relu')(x)
print(np.shape(x))
x = ZeroPadding1D(padding=3)(x)
residual = Add()([x, first_layer])
x = Activation("relu")(residual)
return x
class Pooling(Layer):
def __init__(self, **kwargs):
super(Pooling, self).__init__(**kwargs)
def call(self, x):
# !!! I build model without data_format argument - my version of keras
# doesn't support it !!!
first_layer = GlobalMaxPooling1D(data_format='channels_last')(x)
second_layer = GlobalAveragePooling1D(data_format='channels_last')(x)
pooling = Add()([first_layer, second_layer])
print(np.shape(pooling))
self.output_dim = int(np.shape(pooling)[-1]) # save output shape
return pooling
def compute_output_shape(self, input_shape):
# compute output shape here
return (input_shape[0], self.output_dim)
初始化模型:
model = Sequential()
model.add(InputLayer((128,8)))
model.add(Residual())
model.add(Pooling())
model.add(Dense(300, activation='relu'))
model.add(Dense(150, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
Out:
(?, 128, 8)
(?, 125, 256)
(?, 122, 256)
(?, 119, 256)
(?, 256)
模型总结(不知道为什么 Residual 和 Pooling 不显示参数。我想这个 类 需要一些额外的方法来计算内部参数):
model.summary()
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
residual_10 (Residual) (None, 128, 8) 0
_________________________________________________________________
pooling_8 (Pooling) (None, 256) 0
_________________________________________________________________
dense_15 (Dense) (None, 300) 77100
_________________________________________________________________
dense_16 (Dense) (None, 150) 45150
_________________________________________________________________
dense_17 (Dense) (None, 10) 1510
=================================================================
Total params: 123,760
Trainable params: 123,760
Non-trainable params: 0
_________________________________________________________________
创建假数据并检查训练过程:
dataset_data = np.random.randn(1000, 128, 8)
dataset_target = np.zeros((1000, 10))
dataset_target[:, 0] = 1
model.fit(np.array(dataset_data), dataset_target, epochs=1000,
validation_split=0.1, verbose=1, batch_size=8)
Train on 900 samples, validate on 100 samples
Epoch 1/1000
900/900 [==============================] - 2s 2ms/step - loss: 0.0235 - acc: 0.9911 - val_loss: 9.4426e-05 - val_acc: 1.0000
Epoch 2/1000
900/900 [==============================] - 1s 1ms/step - loss: 4.2552e-05 - acc: 1.0000 - val_loss: 1.7458e-05 - val_acc: 1.0000
Epoch 3/1000
900/900 [==============================] - 1s 1ms/step - loss: 1.1342e-05 - acc: 1.0000 - val_loss: 7.3141e-06 - val_acc: 1.0000
... and so on
看起来有效。
我在用keras做一些分类,遇到这个错误:
InvalidArgumentError: Dimensions must be equal, but are 256 and 8 for 'dense_185/MatMul' (op: 'MatMul') with input shapes: [?,256], [8,300].
令我惊讶的是,dense 的输入维度是 1。
这是一个带有几个自定义层的顺序模型。不知道为什么dense layer的error里会出现8
class Residual(Layer):
def __init__(self,input_shape,**kwargs):
super(Residual, self).__init__(**kwargs)
self.input_shapes = input_shape
def call(self, x):
print(np.shape(x)) # (?, 128, 8)
first_layer = Conv1D(256, 4, activation='relu', input_shape = self.input_shapes)(x)
print(np.shape(first_layer)) (?, 125, 256)
x = Conv1D(256, 4, activation='relu')(first_layer)
print(np.shape(x)) (?, 122, 256)
x = Conv1D(256, 4, activation='relu')(x)
print(np.shape(x)) (?, 119, 256)
x = ZeroPadding1D(padding=3)(x)
residual = Add()([x, first_layer])
x = Activation("relu")(residual)
return x
class Pooling(Layer):
def __init__(self,**kwargs):
super(Pooling, self).__init__(**kwargs)
def call(self, x):
first_layer = GlobalMaxPooling1D(data_format='channels_last')(x)
second_layer = GlobalAveragePooling1D(data_format='channels_last')(x)
pooling = Add()([first_layer, second_layer])
print(np.shape(pooling)) (?, 256)
return pooling
model = Sequential()
model.add(Residual(input_shape=(128,8)))
model.add(Pooling())
model.add(Dense(300, activation='relu'))
model.add(Dense(150, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'])
model.fit(np.array(dataset_data), dataset_target, epochs=1000, validation_split=0.1, verbose=1, batch_size=8)
维度:
(1000, 128, 8) - 输入(1000 个音频,8 个特征,128 seq_length)
(1000, 10) - 目标(1000 个音频,10 类)
我认为需要进行两项编辑:
- 添加
InputLayer
作为数据的入口 - 至少为
Pooling
层 (link) 定义compute_output_shape
方法。如果未定义此方法,Dense
图层无法弄清楚它的输入形状,我猜,然后失败。
还有少量编辑 - 因为模型有 InputLayer
,你在 Residual
层中不再需要 input_shape
kwarg。
class Residual(Layer):
def __init__(self, **kwargs): # remove input shape
super(Residual, self).__init__(**kwargs)
def call(self, x):
print(np.shape(x))
first_layer = Conv1D(256, 4, activation='relu')(x)
print(np.shape(first_layer))
x = Conv1D(256, 4, activation='relu')(first_layer)
print(np.shape(x))
x = Conv1D(256, 4, activation='relu')(x)
print(np.shape(x))
x = ZeroPadding1D(padding=3)(x)
residual = Add()([x, first_layer])
x = Activation("relu")(residual)
return x
class Pooling(Layer):
def __init__(self, **kwargs):
super(Pooling, self).__init__(**kwargs)
def call(self, x):
# !!! I build model without data_format argument - my version of keras
# doesn't support it !!!
first_layer = GlobalMaxPooling1D(data_format='channels_last')(x)
second_layer = GlobalAveragePooling1D(data_format='channels_last')(x)
pooling = Add()([first_layer, second_layer])
print(np.shape(pooling))
self.output_dim = int(np.shape(pooling)[-1]) # save output shape
return pooling
def compute_output_shape(self, input_shape):
# compute output shape here
return (input_shape[0], self.output_dim)
初始化模型:
model = Sequential()
model.add(InputLayer((128,8)))
model.add(Residual())
model.add(Pooling())
model.add(Dense(300, activation='relu'))
model.add(Dense(150, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
Out:
(?, 128, 8)
(?, 125, 256)
(?, 122, 256)
(?, 119, 256)
(?, 256)
模型总结(不知道为什么 Residual 和 Pooling 不显示参数。我想这个 类 需要一些额外的方法来计算内部参数):
model.summary()
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
residual_10 (Residual) (None, 128, 8) 0
_________________________________________________________________
pooling_8 (Pooling) (None, 256) 0
_________________________________________________________________
dense_15 (Dense) (None, 300) 77100
_________________________________________________________________
dense_16 (Dense) (None, 150) 45150
_________________________________________________________________
dense_17 (Dense) (None, 10) 1510
=================================================================
Total params: 123,760
Trainable params: 123,760
Non-trainable params: 0
_________________________________________________________________
创建假数据并检查训练过程:
dataset_data = np.random.randn(1000, 128, 8)
dataset_target = np.zeros((1000, 10))
dataset_target[:, 0] = 1
model.fit(np.array(dataset_data), dataset_target, epochs=1000,
validation_split=0.1, verbose=1, batch_size=8)
Train on 900 samples, validate on 100 samples
Epoch 1/1000
900/900 [==============================] - 2s 2ms/step - loss: 0.0235 - acc: 0.9911 - val_loss: 9.4426e-05 - val_acc: 1.0000
Epoch 2/1000
900/900 [==============================] - 1s 1ms/step - loss: 4.2552e-05 - acc: 1.0000 - val_loss: 1.7458e-05 - val_acc: 1.0000
Epoch 3/1000
900/900 [==============================] - 1s 1ms/step - loss: 1.1342e-05 - acc: 1.0000 - val_loss: 7.3141e-06 - val_acc: 1.0000
... and so on
看起来有效。