在使用 MaxPoolingWithArgmax2D 和 MaxUnpooling2D 实现 SEGNET 时出现错误
while implementing SEGNET using MaxPoolingWithArgmax2D and MaxUnpooling2D giving error
我正在 python 中实施 SEGNET 分段网络,但出现以下错误,
_Traceback(最后一次调用):
文件“/scratch/pkasar.dbatu/training/NEW_SEGNET_updated_on_16_11_20.py”,第 370 行,位于
模型=segnet(input_shape=(256,256,3),n_labels=1)
文件“/scratch/pkasar.dbatu/training/NEW_SEGNET_updated_on_16_11_20.py”,第 161 行,在 segnet 中
conv_14 = Convolution2D(512, (kernel, kernel), padding="same")(unpool_1)
文件“/home/pkasar.dbatu/.conda/envs/dl/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py”,第 897 行,在调用中
self._maybe_build(输入)
文件“/home/pkasar.dbatu/.conda/envs/dl/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py”,第 2416 行,在 _maybe_build
self.build(input_shapes) # pylint:disable=不可调用
文件“/home/pkasar.dbatu/.conda/envs/dl/lib/python3.7/site-packages/tensorflow/python/keras/layers/convolutional.py”,第 153 行,在构建中
input_channel = self._get_input_channel(input_shape)
文件“/home/pkasar.dbatu/.conda/envs/dl/lib/python3.7/site-packages/tensorflow/python/keras/layers/convolutional.py”,第 293 行,在 get_input_channel
提高 ValueError('The channel dimension of the inputs '
ValueError:应该定义输入的通道维度。找到 None.
Tensorflow 图像是:-
张量流 2.2.0
张量流-GPU 2.2.0
keras-base 2.4.3
喀拉斯-GPU 2.4.3
python 3.7.9
请帮帮我
提前谢谢你
代码片段如下:-
from keras.layers.convolutional import Convolution2D
from keras.layers.core import Activation, Reshape
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras import backend as K
from keras.layers import Layer
class MaxPoolingWithArgmax2D(Layer):
def init(self, pool_size=(2, 2), strides=(2, 2), padding="same", **kwargs):
super(MaxPoolingWithArgmax2D, self).init(**kwargs)
self.padding = padding
self.pool_size = pool_size
self.strides = strides
def call(self, inputs, **kwargs):
padding = self.padding
pool_size = self.pool_size
strides = self.strides
if K.backend() == "tensorflow":
ksize = [1, pool_size[0], pool_size[1], 1]
padding = padding.upper()
strides = [1, strides[0], strides[1], 1]
output, argmax = K.tf.nn.max_pool_with_argmax(
inputs, ksize=ksize, strides=strides, padding=padding
)
else:
errmsg = "{} backend is not supported for layer {}".format(
K.backend(), type(self).__name__
)
raise NotImplementedError(errmsg)
argmax = K.cast(argmax, K.floatx())
return [output, argmax]
def compute_output_shape(self, input_shape):
ratio = (1, 2, 2, 1)
output_shape = [
dim // ratio[idx] if dim is not None else None
for idx, dim in enumerate(input_shape)
]
output_shape = tuple(output_shape)
return [output_shape, output_shape]
def compute_mask(self, inputs, mask=None):
return 2 * [None]
class MaxUnpooling2D(Layer):
def init(self, size=(2, 2), **kwargs):
super(MaxUnpooling2D, self).init(**kwargs)
self.size = size
def call(self, inputs, output_shape=None):
updates, mask = inputs[0], inputs[1]
with K.tf.variable_scope(self.name):
mask = K.cast(mask, "int32")
input_shape = K.tf.shape(updates, out_type="int32")
# calculation new shape
if output_shape is None:
output_shape = (
input_shape[0],
input_shape[1] * self.size[0],
input_shape[2] * self.size[1],
input_shape[3],
)
self.output_shape1 = output_shape
# calculation indices for batch, height, width and feature maps
one_like_mask = K.ones_like(mask, dtype="int32")
batch_shape = K.concatenate([[input_shape[0]], [1], [1], [1]], axis=0)
batch_range = K.reshape(
K.tf.range(output_shape[0], dtype="int32"), shape=batch_shape
)
b = one_like_mask * batch_range
y = mask // (output_shape[2] * output_shape[3])
x = (mask // output_shape[3]) % output_shape[2]
feature_range = K.tf.range(output_shape[3], dtype="int32")
f = one_like_mask * feature_range
# transpose indices & reshape update values to one dimension
updates_size = K.tf.size(updates)
indices = K.transpose(K.reshape(K.stack([b, y, x, f]), [4, updates_size]))
values = K.reshape(updates, [updates_size])
ret = K.tf.scatter_nd(indices, values, output_shape)
return ret
def compute_output_shape(self, input_shape):
mask_shape = input_shape[1]
return (
mask_shape[0],
mask_shape[1] * self.size[0],
mask_shape[2] * self.size[1],
mask_shape[3],
)
def segnet(input_shape, n_labels, kernel=3, pool_size=(2, 2), output_mode="softmax"):
# encoder
inputs = Input(shape=input_shape)
conv_1 = Convolution2D(64, (kernel, kernel), padding="same")(inputs)
conv_1 = BatchNormalization()(conv_1)
conv_1 = Activation("relu")(conv_1)
conv_2 = Convolution2D(64, (kernel, kernel), padding="same")(conv_1)
conv_2 = BatchNormalization()(conv_2)
conv_2 = Activation("relu")(conv_2)
pool_1, mask_1 = MaxPoolingWithArgmax2D(pool_size)(conv_2)
conv_3 = Convolution2D(128, (kernel, kernel), padding="same")(pool_1)
conv_3 = BatchNormalization()(conv_3)
conv_3 = Activation("relu")(conv_3)
conv_4 = Convolution2D(128, (kernel, kernel), padding="same")(conv_3)
conv_4 = BatchNormalization()(conv_4)
conv_4 = Activation("relu")(conv_4)
pool_2, mask_2 = MaxPoolingWithArgmax2D(pool_size)(conv_4)
conv_5 = Convolution2D(256, (kernel, kernel), padding="same")(pool_2)
conv_5 = BatchNormalization()(conv_5)
conv_5 = Activation("relu")(conv_5)
conv_6 = Convolution2D(256, (kernel, kernel), padding="same")(conv_5)
conv_6 = BatchNormalization()(conv_6)
conv_6 = Activation("relu")(conv_6)
conv_7 = Convolution2D(256, (kernel, kernel), padding="same")(conv_6)
conv_7 = BatchNormalization()(conv_7)
conv_7 = Activation("relu")(conv_7)
pool_3, mask_3 = MaxPoolingWithArgmax2D(pool_size)(conv_7)
conv_8 = Convolution2D(512, (kernel, kernel), padding="same")(pool_3)
conv_8 = BatchNormalization()(conv_8)
conv_8 = Activation("relu")(conv_8)
conv_9 = Convolution2D(512, (kernel, kernel), padding="same")(conv_8)
conv_9 = BatchNormalization()(conv_9)
conv_9 = Activation("relu")(conv_9)
conv_10 = Convolution2D(512, (kernel, kernel), padding="same")(conv_9)
conv_10 = BatchNormalization()(conv_10)
conv_10 = Activation("relu")(conv_10)
pool_4, mask_4 = MaxPoolingWithArgmax2D(pool_size)(conv_10)
conv_11 = Convolution2D(512, (kernel, kernel), padding="same")(pool_4)
conv_11 = BatchNormalization()(conv_11)
conv_11 = Activation("relu")(conv_11)
conv_12 = Convolution2D(512, (kernel, kernel), padding="same")(conv_11)
conv_12 = BatchNormalization()(conv_12)
conv_12 = Activation("relu")(conv_12)
conv_13 = Convolution2D(512, (kernel, kernel), padding="same")(conv_12)
conv_13 = BatchNormalization()(conv_13)
conv_13 = Activation("relu")(conv_13)
pool_5, mask_5 = MaxPoolingWithArgmax2D(pool_size)(conv_13)
print("Build enceder done..")
# decoder
unpool_1 = MaxUnpooling2D(pool_size)([pool_5, mask_5])
conv_14 = Convolution2D(512, (kernel, kernel), padding="same")(unpool_1)
conv_14 = BatchNormalization()(conv_14)
conv_14 = Activation("relu")(conv_14)
conv_15 = Convolution2D(512, (kernel, kernel), padding="same")(conv_14)
conv_15 = BatchNormalization()(conv_15)
conv_15 = Activation("relu")(conv_15)
conv_16 = Convolution2D(512, (kernel, kernel), padding="same")(conv_15)
conv_16 = BatchNormalization()(conv_16)
conv_16 = Activation("relu")(conv_16)
unpool_2 = MaxUnpooling2D(pool_size)([conv_16, mask_4])
conv_17 = Convolution2D(512, (kernel, kernel), padding="same")(unpool_2)
conv_17 = BatchNormalization()(conv_17)
conv_17 = Activation("relu")(conv_17)
conv_18 = Convolution2D(512, (kernel, kernel), padding="same")(conv_17)
conv_18 = BatchNormalization()(conv_18)
conv_18 = Activation("relu")(conv_18)
conv_19 = Convolution2D(256, (kernel, kernel), padding="same")(conv_18)
conv_19 = BatchNormalization()(conv_19)
conv_19 = Activation("relu")(conv_19)
unpool_3 = MaxUnpooling2D(pool_size)([conv_19, mask_3])
conv_20 = Convolution2D(256, (kernel, kernel), padding="same")(unpool_3)
conv_20 = BatchNormalization()(conv_20)
conv_20 = Activation("relu")(conv_20)
conv_21 = Convolution2D(256, (kernel, kernel), padding="same")(conv_20)
conv_21 = BatchNormalization()(conv_21)
conv_21 = Activation("relu")(conv_21)
conv_22 = Convolution2D(128, (kernel, kernel), padding="same")(conv_21)
conv_22 = BatchNormalization()(conv_22)
conv_22 = Activation("relu")(conv_22)
unpool_4 = MaxUnpooling2D(pool_size)([conv_22, mask_2])
conv_23 = Convolution2D(128, (kernel, kernel), padding="same")(unpool_4)
conv_23 = BatchNormalization()(conv_23)
conv_23 = Activation("relu")(conv_23)
conv_24 = Convolution2D(64, (kernel, kernel), padding="same")(conv_23)
conv_24 = BatchNormalization()(conv_24)
conv_24 = Activation("relu")(conv_24)
unpool_5 = MaxUnpooling2D(pool_size)([conv_24, mask_1])
conv_25 = Convolution2D(64, (kernel, kernel), padding="same")(unpool_5)
conv_25 = BatchNormalization()(conv_25)
conv_25 = Activation("relu")(conv_25)
conv_26 = Convolution2D(n_labels, (1, 1), padding="valid")(conv_25)
conv_26 = BatchNormalization()(conv_26)
conv_26 = Reshape(
(input_shape[0] * input_shape[1], n_labels),
input_shape=(input_shape[0], input_shape[1], n_labels),
)(conv_26)
outputs = Activation(output_mode)(conv_26)
print("Build decoder done..")
model = Model(inputs=inputs, outputs=outputs, name="SegNet")
return model```
您有 input_shape=(256,256,3)
.
你必须(number_of_samples, nb_channels, rows, cols)
需要在 class MaxUnpooling2D 定义中进行以下更改:-
class MaxUnpooling2D(Layer):
def __init__(self, size=(2, 2), **kwargs):
super(MaxUnpooling2D, self).__init__(**kwargs)
self.size = size
def call(self, inputs, output_shape=None):
updates, mask = inputs[0], inputs[1]
with tf.compat.v1.variable_scope(self.name):
mask = K.cast(mask, 'int32')
input_shape = tf.shape(updates, out_type='int32')
#print(updates.shape)
#print(mask.shape)
if output_shape is None:
output_shape = (
input_shape[0],
input_shape[1] * self.size[0],
input_shape[2] * self.size[1],
input_shape[3])
ret = tf.scatter_nd(K.expand_dims(K.flatten(mask)),
K.flatten(updates),
[K.prod(output_shape)])
input_shape = updates.shape
out_shape = [-1,
input_shape[1] * self.size[0],
input_shape[2] * self.size[1],
input_shape[3]]
return K.reshape(ret, out_shape)
def get_config(self):
config = super().get_config().copy()
config.update({
'size': self.size
})
return config
def compute_output_shape(self, input_shape):
mask_shape = input_shape[1]
return (
mask_shape[0],
mask_shape[1]*self.size[0],
mask_shape[2]*self.size[1],
mask_shape[3]
)
只是对答案的补充,由于 get_config 方法,我收到了 json 序列化警告。因此我无法保存模型,如果有人遇到类似问题,请通过以下方式修复它:
def get_config(self):
config = super(MaxUnpooling2D, self).get_config()
config.update({'size': self.size})
return config
我正在 python 中实施 SEGNET 分段网络,但出现以下错误,
_Traceback(最后一次调用): 文件“/scratch/pkasar.dbatu/training/NEW_SEGNET_updated_on_16_11_20.py”,第 370 行,位于 模型=segnet(input_shape=(256,256,3),n_labels=1) 文件“/scratch/pkasar.dbatu/training/NEW_SEGNET_updated_on_16_11_20.py”,第 161 行,在 segnet 中 conv_14 = Convolution2D(512, (kernel, kernel), padding="same")(unpool_1) 文件“/home/pkasar.dbatu/.conda/envs/dl/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py”,第 897 行,在调用中 self._maybe_build(输入) 文件“/home/pkasar.dbatu/.conda/envs/dl/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py”,第 2416 行,在 _maybe_build self.build(input_shapes) # pylint:disable=不可调用 文件“/home/pkasar.dbatu/.conda/envs/dl/lib/python3.7/site-packages/tensorflow/python/keras/layers/convolutional.py”,第 153 行,在构建中 input_channel = self._get_input_channel(input_shape) 文件“/home/pkasar.dbatu/.conda/envs/dl/lib/python3.7/site-packages/tensorflow/python/keras/layers/convolutional.py”,第 293 行,在 get_input_channel 提高 ValueError('The channel dimension of the inputs ' ValueError:应该定义输入的通道维度。找到 None.
Tensorflow 图像是:- 张量流 2.2.0 张量流-GPU 2.2.0 keras-base 2.4.3 喀拉斯-GPU 2.4.3 python 3.7.9 请帮帮我 提前谢谢你
代码片段如下:-
from keras.layers.convolutional import Convolution2D
from keras.layers.core import Activation, Reshape
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras import backend as K
from keras.layers import Layer
class MaxPoolingWithArgmax2D(Layer):
def init(self, pool_size=(2, 2), strides=(2, 2), padding="same", **kwargs):
super(MaxPoolingWithArgmax2D, self).init(**kwargs)
self.padding = padding
self.pool_size = pool_size
self.strides = strides
def call(self, inputs, **kwargs):
padding = self.padding
pool_size = self.pool_size
strides = self.strides
if K.backend() == "tensorflow":
ksize = [1, pool_size[0], pool_size[1], 1]
padding = padding.upper()
strides = [1, strides[0], strides[1], 1]
output, argmax = K.tf.nn.max_pool_with_argmax(
inputs, ksize=ksize, strides=strides, padding=padding
)
else:
errmsg = "{} backend is not supported for layer {}".format(
K.backend(), type(self).__name__
)
raise NotImplementedError(errmsg)
argmax = K.cast(argmax, K.floatx())
return [output, argmax]
def compute_output_shape(self, input_shape):
ratio = (1, 2, 2, 1)
output_shape = [
dim // ratio[idx] if dim is not None else None
for idx, dim in enumerate(input_shape)
]
output_shape = tuple(output_shape)
return [output_shape, output_shape]
def compute_mask(self, inputs, mask=None):
return 2 * [None]
class MaxUnpooling2D(Layer):
def init(self, size=(2, 2), **kwargs):
super(MaxUnpooling2D, self).init(**kwargs)
self.size = size
def call(self, inputs, output_shape=None):
updates, mask = inputs[0], inputs[1]
with K.tf.variable_scope(self.name):
mask = K.cast(mask, "int32")
input_shape = K.tf.shape(updates, out_type="int32")
# calculation new shape
if output_shape is None:
output_shape = (
input_shape[0],
input_shape[1] * self.size[0],
input_shape[2] * self.size[1],
input_shape[3],
)
self.output_shape1 = output_shape
# calculation indices for batch, height, width and feature maps
one_like_mask = K.ones_like(mask, dtype="int32")
batch_shape = K.concatenate([[input_shape[0]], [1], [1], [1]], axis=0)
batch_range = K.reshape(
K.tf.range(output_shape[0], dtype="int32"), shape=batch_shape
)
b = one_like_mask * batch_range
y = mask // (output_shape[2] * output_shape[3])
x = (mask // output_shape[3]) % output_shape[2]
feature_range = K.tf.range(output_shape[3], dtype="int32")
f = one_like_mask * feature_range
# transpose indices & reshape update values to one dimension
updates_size = K.tf.size(updates)
indices = K.transpose(K.reshape(K.stack([b, y, x, f]), [4, updates_size]))
values = K.reshape(updates, [updates_size])
ret = K.tf.scatter_nd(indices, values, output_shape)
return ret
def compute_output_shape(self, input_shape):
mask_shape = input_shape[1]
return (
mask_shape[0],
mask_shape[1] * self.size[0],
mask_shape[2] * self.size[1],
mask_shape[3],
)
def segnet(input_shape, n_labels, kernel=3, pool_size=(2, 2), output_mode="softmax"):
# encoder
inputs = Input(shape=input_shape)
conv_1 = Convolution2D(64, (kernel, kernel), padding="same")(inputs)
conv_1 = BatchNormalization()(conv_1)
conv_1 = Activation("relu")(conv_1)
conv_2 = Convolution2D(64, (kernel, kernel), padding="same")(conv_1)
conv_2 = BatchNormalization()(conv_2)
conv_2 = Activation("relu")(conv_2)
pool_1, mask_1 = MaxPoolingWithArgmax2D(pool_size)(conv_2)
conv_3 = Convolution2D(128, (kernel, kernel), padding="same")(pool_1)
conv_3 = BatchNormalization()(conv_3)
conv_3 = Activation("relu")(conv_3)
conv_4 = Convolution2D(128, (kernel, kernel), padding="same")(conv_3)
conv_4 = BatchNormalization()(conv_4)
conv_4 = Activation("relu")(conv_4)
pool_2, mask_2 = MaxPoolingWithArgmax2D(pool_size)(conv_4)
conv_5 = Convolution2D(256, (kernel, kernel), padding="same")(pool_2)
conv_5 = BatchNormalization()(conv_5)
conv_5 = Activation("relu")(conv_5)
conv_6 = Convolution2D(256, (kernel, kernel), padding="same")(conv_5)
conv_6 = BatchNormalization()(conv_6)
conv_6 = Activation("relu")(conv_6)
conv_7 = Convolution2D(256, (kernel, kernel), padding="same")(conv_6)
conv_7 = BatchNormalization()(conv_7)
conv_7 = Activation("relu")(conv_7)
pool_3, mask_3 = MaxPoolingWithArgmax2D(pool_size)(conv_7)
conv_8 = Convolution2D(512, (kernel, kernel), padding="same")(pool_3)
conv_8 = BatchNormalization()(conv_8)
conv_8 = Activation("relu")(conv_8)
conv_9 = Convolution2D(512, (kernel, kernel), padding="same")(conv_8)
conv_9 = BatchNormalization()(conv_9)
conv_9 = Activation("relu")(conv_9)
conv_10 = Convolution2D(512, (kernel, kernel), padding="same")(conv_9)
conv_10 = BatchNormalization()(conv_10)
conv_10 = Activation("relu")(conv_10)
pool_4, mask_4 = MaxPoolingWithArgmax2D(pool_size)(conv_10)
conv_11 = Convolution2D(512, (kernel, kernel), padding="same")(pool_4)
conv_11 = BatchNormalization()(conv_11)
conv_11 = Activation("relu")(conv_11)
conv_12 = Convolution2D(512, (kernel, kernel), padding="same")(conv_11)
conv_12 = BatchNormalization()(conv_12)
conv_12 = Activation("relu")(conv_12)
conv_13 = Convolution2D(512, (kernel, kernel), padding="same")(conv_12)
conv_13 = BatchNormalization()(conv_13)
conv_13 = Activation("relu")(conv_13)
pool_5, mask_5 = MaxPoolingWithArgmax2D(pool_size)(conv_13)
print("Build enceder done..")
# decoder
unpool_1 = MaxUnpooling2D(pool_size)([pool_5, mask_5])
conv_14 = Convolution2D(512, (kernel, kernel), padding="same")(unpool_1)
conv_14 = BatchNormalization()(conv_14)
conv_14 = Activation("relu")(conv_14)
conv_15 = Convolution2D(512, (kernel, kernel), padding="same")(conv_14)
conv_15 = BatchNormalization()(conv_15)
conv_15 = Activation("relu")(conv_15)
conv_16 = Convolution2D(512, (kernel, kernel), padding="same")(conv_15)
conv_16 = BatchNormalization()(conv_16)
conv_16 = Activation("relu")(conv_16)
unpool_2 = MaxUnpooling2D(pool_size)([conv_16, mask_4])
conv_17 = Convolution2D(512, (kernel, kernel), padding="same")(unpool_2)
conv_17 = BatchNormalization()(conv_17)
conv_17 = Activation("relu")(conv_17)
conv_18 = Convolution2D(512, (kernel, kernel), padding="same")(conv_17)
conv_18 = BatchNormalization()(conv_18)
conv_18 = Activation("relu")(conv_18)
conv_19 = Convolution2D(256, (kernel, kernel), padding="same")(conv_18)
conv_19 = BatchNormalization()(conv_19)
conv_19 = Activation("relu")(conv_19)
unpool_3 = MaxUnpooling2D(pool_size)([conv_19, mask_3])
conv_20 = Convolution2D(256, (kernel, kernel), padding="same")(unpool_3)
conv_20 = BatchNormalization()(conv_20)
conv_20 = Activation("relu")(conv_20)
conv_21 = Convolution2D(256, (kernel, kernel), padding="same")(conv_20)
conv_21 = BatchNormalization()(conv_21)
conv_21 = Activation("relu")(conv_21)
conv_22 = Convolution2D(128, (kernel, kernel), padding="same")(conv_21)
conv_22 = BatchNormalization()(conv_22)
conv_22 = Activation("relu")(conv_22)
unpool_4 = MaxUnpooling2D(pool_size)([conv_22, mask_2])
conv_23 = Convolution2D(128, (kernel, kernel), padding="same")(unpool_4)
conv_23 = BatchNormalization()(conv_23)
conv_23 = Activation("relu")(conv_23)
conv_24 = Convolution2D(64, (kernel, kernel), padding="same")(conv_23)
conv_24 = BatchNormalization()(conv_24)
conv_24 = Activation("relu")(conv_24)
unpool_5 = MaxUnpooling2D(pool_size)([conv_24, mask_1])
conv_25 = Convolution2D(64, (kernel, kernel), padding="same")(unpool_5)
conv_25 = BatchNormalization()(conv_25)
conv_25 = Activation("relu")(conv_25)
conv_26 = Convolution2D(n_labels, (1, 1), padding="valid")(conv_25)
conv_26 = BatchNormalization()(conv_26)
conv_26 = Reshape(
(input_shape[0] * input_shape[1], n_labels),
input_shape=(input_shape[0], input_shape[1], n_labels),
)(conv_26)
outputs = Activation(output_mode)(conv_26)
print("Build decoder done..")
model = Model(inputs=inputs, outputs=outputs, name="SegNet")
return model```
您有 input_shape=(256,256,3)
.
你必须(number_of_samples, nb_channels, rows, cols)
需要在 class MaxUnpooling2D 定义中进行以下更改:-
class MaxUnpooling2D(Layer):
def __init__(self, size=(2, 2), **kwargs):
super(MaxUnpooling2D, self).__init__(**kwargs)
self.size = size
def call(self, inputs, output_shape=None):
updates, mask = inputs[0], inputs[1]
with tf.compat.v1.variable_scope(self.name):
mask = K.cast(mask, 'int32')
input_shape = tf.shape(updates, out_type='int32')
#print(updates.shape)
#print(mask.shape)
if output_shape is None:
output_shape = (
input_shape[0],
input_shape[1] * self.size[0],
input_shape[2] * self.size[1],
input_shape[3])
ret = tf.scatter_nd(K.expand_dims(K.flatten(mask)),
K.flatten(updates),
[K.prod(output_shape)])
input_shape = updates.shape
out_shape = [-1,
input_shape[1] * self.size[0],
input_shape[2] * self.size[1],
input_shape[3]]
return K.reshape(ret, out_shape)
def get_config(self):
config = super().get_config().copy()
config.update({
'size': self.size
})
return config
def compute_output_shape(self, input_shape):
mask_shape = input_shape[1]
return (
mask_shape[0],
mask_shape[1]*self.size[0],
mask_shape[2]*self.size[1],
mask_shape[3]
)
只是对答案的补充,由于 get_config 方法,我收到了 json 序列化警告。因此我无法保存模型,如果有人遇到类似问题,请通过以下方式修复它:
def get_config(self):
config = super(MaxUnpooling2D, self).get_config()
config.update({'size': self.size})
return config