caffe reshape / 上采样全连接层

caffe reshape / upsample fully connected layer

假设我们有这样一个图层:

layer {
  name: "fully-connected"
  type: "InnerProduct"
  bottom: "bottom"
  top: "top"
  inner_product_param {
    num_output: 1
  }
}

输出是batch_size x 1。在几篇论文中(例如link1 page 3 picture on the top, or link2第4页在上面)我看到他们最终使用了这样的层来提出2D用于像素级预测的图像。如何将其转换为二维图像?我在考虑重塑或反卷积,但我不知道它是如何工作的。一个简单的例子会有所帮助

更新:我的输入图像是 304x228,我的 ground_truth(深度图像)是 75x55。

################# Main net ##################

layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 96
    kernel_size: 11
    stride: 4
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "relu1"
  type: "ReLU"
  bottom: "conv1"
  top: "conv1"
}
layer {
  name: "norm1"
  type: "LRN"
  bottom: "conv1"
  top: "norm1"
  lrn_param {
    local_size: 5
    alpha: 0.0001
    beta: 0.75
  }
}
layer {
  name: "pool1"
  type: "Pooling"
  bottom: "norm1"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "conv2"
  type: "Convolution"
  bottom: "pool1"
  top: "conv2"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 256
    pad: 2
    kernel_size: 5
    group: 2
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "relu2"
  type: "ReLU"
  bottom: "conv2"
  top: "conv2"
}
layer {
  name: "norm2"
  type: "LRN"
  bottom: "conv2"
  top: "norm2"
  lrn_param {
    local_size: 5
    alpha: 0.0001
    beta: 0.75
  }
}
layer {
  name: "pool2"
  type: "Pooling"
  bottom: "norm2"
  top: "pool2"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "conv3"
  type: "Convolution"
  bottom: "pool2"
  top: "conv3"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 384
    pad: 1
    kernel_size: 3
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "relu3"
  type: "ReLU"
  bottom: "conv3"
  top: "conv3"
}
layer {
  name: "conv4"
  type: "Convolution"
  bottom: "conv3"
  top: "conv4"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 384
    pad: 1
    kernel_size: 3
    group: 2
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "relu4"
  type: "ReLU"
  bottom: "conv4"
  top: "conv4"
}
layer {
  name: "conv5"
  type: "Convolution"
  bottom: "conv4"
  top: "conv5"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 256
    pad: 1
    kernel_size: 3
    group: 2
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "relu5"
  type: "ReLU"
  bottom: "conv5"
  top: "conv5"
}
layer {
  name: "pool5"
  type: "Pooling"
  bottom: "conv5"
  top: "pool5"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "fc6"
  type: "InnerProduct"
  bottom: "pool5"
  top: "fc6"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  inner_product_param {
    num_output: 4096
    weight_filler {
      type: "gaussian"
      std: 0.005
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "relufc6"
  type: "ReLU"
  bottom: "fc6"
  top: "fc6"
}
layer {
  name: "drop6"
  type: "Dropout"
  bottom: "fc6"
  top: "fc6"
  dropout_param {
    dropout_ratio: 0.5
  }
}

layer {
  name: "fc7"
  type: "InnerProduct"
  bottom: "fc6"
  top: "fc7"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  inner_product_param {
    num_output: 4070
    weight_filler {
      type: "gaussian"
      std: 0.005
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}

layer {
  type: "Reshape"
  name: "reshape"
  bottom: "fc7"
  top: "fc7_reshaped"
  reshape_param {
    shape { dim:  1  dim: 1  dim:  55 dim: 74 }
  }
}

layer {
  name: "deconv1"
  type: "Deconvolution"
  bottom: "fc7_reshaped"
  top: "deconv1"
  convolution_param {
    num_output: 64
    kernel_size: 5
    pad: 2
    stride: 1
      #group: 256
    weight_filler {
        type: "bilinear"
    }
    bias_term: false
  }
}

#########################

layer {
  name: "conv6"
  type: "Convolution"
  bottom: "data"
  top: "conv6"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 63
    kernel_size: 9
    stride: 2
    pad: 1
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "relu6"
  type: "ReLU"
  bottom: "conv6"
  top: "conv6"
}

layer {
  name: "pool6"
  type: "Pooling"
  bottom: "conv6"
  top: "pool6"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}

########################
layer {
  name: "concat"
  type: "Concat"
  bottom: "deconv1"
  bottom: "pool6"
  top: "concat"
  concat_param {
    concat_dim: 1
  }
}

layer {
  name: "conv7"
  type: "Convolution"
  bottom: "concat"
  top: "conv7"
  convolution_param {
    num_output: 64
    kernel_size: 5
    pad: 2
    stride: 1
    weight_filler {
      type: "gaussian"
      std: 0.011
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
    name: "relu7"
    type: "ReLU"
    bottom: "conv7"
    top: "conv7"
    relu_param{
    negative_slope: 0.01
        engine: CUDNN
    }
}

layer {
  name: "conv8"
  type: "Convolution"
  bottom: "conv7"
  top: "conv8"
  convolution_param {
    num_output: 64
    kernel_size: 5
    pad: 2
    stride: 1
    weight_filler {
      type: "gaussian"
      std: 0.011
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
    name: "relu8"
    type: "ReLU"
    bottom: "conv8"
    top: "conv8"
    relu_param{
    negative_slope: 0.01
        engine: CUDNN
    }
}

layer {
  name: "conv9"
  type: "Convolution"
  bottom: "conv8"
  top: "conv9"
  convolution_param {
    num_output: 1
    kernel_size: 5
    pad: 2
    stride: 1
    weight_filler {
      type: "gaussian"
      std: 0.011
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
    name: "relu9"
    type: "ReLU"
    bottom: "conv9"
    top: "result"
    relu_param{
    negative_slope: 0.01
        engine: CUDNN
    }
}

日志:

I1108 19:34:57.239722  4277 data_layer.cpp:41] output data size: 1,1,228,304
I1108 19:34:57.243340  4277 data_layer.cpp:41] output data size: 1,1,55,74
I1108 19:34:57.247392  4277 net.cpp:150] Setting up conv1
I1108 19:34:57.247407  4277 net.cpp:157] Top shape: 1 96 55 74 (390720)
I1108 19:34:57.248191  4277 net.cpp:150] Setting up pool1
I1108 19:34:57.248196  4277 net.cpp:157] Top shape: 1 96 27 37 (95904)
I1108 19:34:57.253263  4277 net.cpp:150] Setting up conv2
I1108 19:34:57.253276  4277 net.cpp:157] Top shape: 1 256 27 37 (255744)
I1108 19:34:57.254202  4277 net.cpp:150] Setting up pool2
I1108 19:34:57.254220  4277 net.cpp:157] Top shape: 1 256 13 18 (59904)
I1108 19:34:57.269943  4277 net.cpp:150] Setting up conv3
I1108 19:34:57.269961  4277 net.cpp:157] Top shape: 1 384 13 18 (89856)
I1108 19:34:57.285303  4277 net.cpp:150] Setting up conv4
I1108 19:34:57.285338  4277 net.cpp:157] Top shape: 1 384 13 18 (89856)
I1108 19:34:57.294801  4277 net.cpp:150] Setting up conv5
I1108 19:34:57.294841  4277 net.cpp:157] Top shape: 1 256 13 18 (59904)
I1108 19:34:57.295207  4277 net.cpp:150] Setting up pool5
I1108 19:34:57.295210  4277 net.cpp:157] Top shape: 1 256 6 9 (13824)
I1108 19:34:57.743222  4277 net.cpp:150] Setting up fc6
I1108 19:34:57.743259  4277 net.cpp:157] Top shape: 1 4096 (4096)
I1108 19:34:57.881680  4277 net.cpp:150] Setting up fc7
I1108 19:34:57.881718  4277 net.cpp:157] Top shape: 1 4070 (4070)

I1108 19:34:57.881826  4277 net.cpp:150] Setting up reshape
I1108 19:34:57.881846  4277 net.cpp:157] Top shape: 1 1 55 74 (4070)

I1108 19:34:57.884768  4277 net.cpp:150] Setting up conv6
I1108 19:34:57.885309  4277 net.cpp:150] Setting up pool6
I1108 19:34:57.885327  4277 net.cpp:157] Top shape: 1 63 55 74 (256410)

I1108 19:34:57.885395  4277 net.cpp:150] Setting up concat
I1108 19:34:57.885412  4277 net.cpp:157] Top shape: 1 64 55 74 (260480)

I1108 19:34:57.886759  4277 net.cpp:150] Setting up conv7
I1108 19:34:57.886786  4277 net.cpp:157] Top shape: 1 64 55 74 (260480)

I1108 19:34:57.897269  4277 net.cpp:150] Setting up conv8
I1108 19:34:57.897303  4277 net.cpp:157] Top shape: 1 64 55 74 (260480)
I1108 19:34:57.899129  4277 net.cpp:150] Setting up conv9
I1108 19:34:57.899138  4277 net.cpp:157] Top shape: 1 1 55 74 (4070)

如果您只需要像传统多层感知器那样的全连接网络,请使用 2D blob (shape (N, D)) 并调用 InnerProductLayer.

最后一个全连接层的 num_output 值将不会是 1 用于像素级预测。它将等于输入图像的 w*h

是什么让您觉得该值为 1?

编辑 1:

下面是link1第3页图中提到的每一层的尺寸:

LAYER        OUTPUT DIM [c*h*w]
course1     96*h1*w1     conv layer
course2     256*h2*w2    conv layer
course3     384*h3*w3    conv layer
course4     384*h4*w4    conv layer
course5     256*h5*w5    conv layer
course6     4096*1*1     fc layer
course7     X*1*1        fc layer    where 'X' could be interpreted as w*h

为了进一步理解这一点,假设我们有一个网络来预测图像的像素。图片大小为 10*10。因此,fc 层的最终输出也将具有 100*1*1 的维度(如课程 7)。这可以解释为 10*10。

现在的问题是,一维数组如何正确预测二维图像。为此,您必须注意,损失是针对此输出计算的,使用的标签可能与像素数据相对应。因此在训练期间,权重将学习预测像素数据。

编辑 2:

尝试在 caffe 中使用 draw_net.py 绘制网络,给你这个:

conv6fc6连接的relu图层同名,导致绘制的图像连接复杂。我不确定这是否会在训练过程中引起一些问题,但我建议您将其中一个 relu 层重命名为唯一名称以避免出现一些不可预见的问题。

回到你的问题,在完全连接的层之后似乎没有发生上采样。如日志中所示:

I1108 19:34:57.881680  4277 net.cpp:150] Setting up fc7
I1108 19:34:57.881718  4277 net.cpp:157] Top shape: 1 4070 (4070)

I1108 19:34:57.881826  4277 net.cpp:150] Setting up reshape
I1108 19:34:57.881846  4277 net.cpp:157] Top shape: 1 1 55 74 (4070)

I1108 19:34:57.884768  4277 net.cpp:150] Setting up conv6
I1108 19:34:57.885309  4277 net.cpp:150] Setting up pool6
I1108 19:34:57.885327  4277 net.cpp:157] Top shape: 1 63 55 74 (256410)

fc7 的输出维度为 4070*1*1。这被重塑为 1*55*74 以作为输入传递给 conv6 层。

整个网络的输出在conv9中产生,其输出维度为1*55*74,与标签(深度数据)的维度完全相似[=23] =]

如果我的回答仍然不清楚,请确定您认为上采样发生的位置。