在 fp16 上训练时,用于图像分割的 unet 上的 acc 没有上升

acc on unet for image segmentation is not rising when is trained on fp16

我正在做一些深度学习的图像分割,当我在 fp32 上做时,我得到 val acc 大约 97%,但是当我尝试在 fp16 上训练时,我卡在了 21%,我改变了学习率和epsilon,有时我会得到其他 acc,但总是像这样卡住

这是我的代码

import tensorflow as tf
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
from tensorflow.keras import optimizers
from tensorflow_examples.models.pix2pix import pix2pix
tf.keras.mixed_precision.experimental.set_policy('mixed_float16')

from tensorflow.keras import layers


def get_model(img_size, num_classes):
    inputs = keras.Input(shape=img_size + (3,))

    ### [First half of the network: downsampling inputs] ###

    # Entry block
    x = layers.Conv2D(32, 3, strides=2, padding="same")(inputs)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    previous_block_activation = x  # Set aside residual

    # Blocks 1, 2, 3 are identical apart from the feature depth.
    for filters in [64, 128, 256]:
        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(filters, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(filters, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.MaxPooling2D(3, strides=2, padding="same")(x)

        # Project residual
        residual = layers.Conv2D(filters, 1, strides=2, padding="same")(
            previous_block_activation
        )
        x = layers.add([x, residual])  # Add back residual
        previous_block_activation = x  # Set aside next residual

    ### [Second half of the network: upsampling inputs] ###

    for filters in [256, 128, 64, 32]:
        x = layers.Activation("relu")(x)
        x = layers.Conv2DTranspose(filters, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.Activation("relu")(x)
        x = layers.Conv2DTranspose(filters, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.UpSampling2D(2)(x)

        # Project residual
        residual = layers.UpSampling2D(2)(previous_block_activation)
        residual = layers.Conv2D(filters, 1, padding="same")(residual)
        x = layers.add([x, residual])  # Add back residual
        previous_block_activation = x  # Set aside next residual

    # Add a per-pixel classification layer
    outputs = layers.Conv2D(num_classes, 3, activation="softmax", padding="same")(x)

    # Define the model
    model = keras.Model(inputs, outputs)
    return model


# Free up RAM in case the model definition cells were run multiple times
keras.backend.clear_session()

    # Build model
    model = get_model(img_size, num_classes)
model.compile(loss="categorical_crossentropy",  optimizer=optimizers.RMSprop(lr=5e-5), metrics=['acc'])



# Train the model, doing validation at the end of each epoch.
epochs = 30
model_history= model.fit(train_gen, epochs=epochs, validation_data=val_gen)

可能出了什么问题?提前致谢

使用fp16时,需要让最后一层输出fp32

改变

outputs = layers.Conv2D(num_classes, 3, activation="softmax", padding="same")(x)

outputs = layers.Conv2D(num_classes, 3, padding="same")(x)
outputs = layers.Activation('softmax', dtype=tf.float32)(output)