按层评估 U-Net

Evaluate U-Net by layer

我来自医学背景,是机器学习领域的新手。我正在尝试使用 keras 和 tensorflow 训练我的 U-Net 模型以进行图像分割。然而,我的损失值全是NaN,预测全黑。

我想逐层检查U-Net,但我不知道如何提供数据以及从哪里开始。我检查每一层的意思是,我想将我的图像提供给第一层,然后查看第一层的输出,然后移动到第二层,直到最后一层。只是想看看每一层的输出是如何产生的,并检查 nan 值是从哪里开始的。非常感谢您的帮助。

这些是我的代码。

import os
import matplotlib.pyplot as plt

import tensorflow as tf 
from keras_preprocessing.image 
import ImageDataGenerator 
from tensorflow import keras


#Constants
SEED = 42 
BATCH_SIZE_TRAIN = 16
BATCH_SIZE_TEST = 16

IMAGE_HEIGHT = 512
IMAGE_WIDTH = 512
IMG_SIZE = (IMAGE_HEIGHT, IMAGE_WIDTH)

data_dir = 'data'
data_dir_train = os.path.join(data_dir, 'training')
data_dir_train_image = os.path.join(data_dir_train, 'img')
data_dir_train_mask = os.path.join(data_dir_train, 'mask')

data_dir_test = os.path.join(data_dir, 'test')
data_dir_test_image = os.path.join(data_dir_test, 'img')
data_dir_test_mask = os.path.join(data_dir_test, 'mask')

NUM_TRAIN = 1413
NUM_TEST = 210

NUM_OF_EPOCHS = 10

def create_segmentation_generator_train(img_path, mask_path, BATCH_SIZE):
    data_gen_args = dict(rescale=1./255)
    img_datagen = ImageDataGenerator(**data_gen_args)
    mask_datagen = ImageDataGenerator(*data_gen_args)

    img_generator = img_datagen.flow_from_directory(img_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
    mask_generator = mask_datagen.flow_from_directory(mask_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
    return zip(img_generator, mask_generator)


def create_segmentation_generator_test(img_path, mask_path, BATCH_SIZE):
    data_gen_args = dict(rescale=1./255)
    img_datagen = ImageDataGenerator(**data_gen_args)
    mask_datagen = ImageDataGenerator(*data_gen_args)

    img_generator = img_datagen.flow_from_directory(img_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
    mask_generator = mask_datagen.flow_from_directory(mask_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
    return zip(img_generator, mask_generator)


def display(display_list):
    plt.figure(figsize=(15,15))
   
    title = ['Input Image', 'True Mask', 'Predicted Mask']
   
    for i in range(len(display_list)):
        plt.subplot(1, len(display_list), i+1)
        plt.title(title[i])
        plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]), cmap='gray')
    plt.show()


def show_dataset(datagen, num=1):
    for i in range(0,num):
        image,mask = next(datagen)
        display([image[0], mask[0]])


def unet(n_levels, initial_features=32, n_blocks=2, kernel_size=3, pooling_size=2, in_channels=1, out_channels=1):
    #n_blocks = how many conv in each level
    inputs = keras.layers.Input(shape=(IMAGE_HEIGHT, IMAGE_WIDTH, in_channels))
    x = inputs

    convpars = dict(kernel_size=kernel_size, activation='relu', padding='same')

    #downstream
    skips = {}
    for level in range(n_levels):
        for _ in range (n_blocks):
            x = keras.layers.Conv2D(initial_features * 2 ** level, **convpars)(x)
        if level < n_levels - 1:
            skips[level] = x
            x = keras.layers.MaxPool2D(pooling_size)(x)
    
    #upstream
    for level in reversed(range(n_levels-1)):
        x = keras.layers.Conv2DTranspose(initial_features * 2 ** level, strides=pooling_size, **convpars)(x)
        x = keras.layers.Concatenate()([x, skips[level]])
        for _ in range (n_blocks):
            x = keras.layers.Conv2D(initial_features * 2 ** level, **convpars)(x)

    #output
    activation = 'sigmoid' if out_channels == 1 else 'softmax'
    x = keras.layers.Conv2D(out_channels, kernel_size=1, activation='sigmoid',  padding='same')(x)
    
    return keras.Model(inputs=[inputs], outputs=[x], name=f'UNET-L{n_levels}-F{initial_features}')


EPOCH_STEP_TRAIN = NUM_TRAIN // BATCH_SIZE_TRAIN
EPOCH_STEP_TEST = NUM_TEST // BATCH_SIZE_TRAIN

model = unet(4)
model.compile(optimizer="adam", loss='binary_crossentropy', metrics=['accuracy'])


model.fit_generator(generator=train_generator, steps_per_epoch=EPOCH_STEP_TRAIN, validation_data=test_generator, validation_steps=EPOCH_STEP_TEST, epochs=NUM_OF_EPOCHS)


def show_prediction(datagen, num=1):
    for i in range(0,num):
        image,mask = next(datagen)
        pred_mask = model.predict(image)[0] > 0.5
        display([image[0], mask[0], pred_mask])


show_prediction(test_generator, 2)

要逐层研究您的模型,请查看如何显示模型摘要以及如何保存模型的示例:

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers

#luodaan input
inputs=keras.Input(shape=(1,))

#luodaan kerros
dense=layers.Dense(64,activation="relu")
x=dense(inputs)
x=layers.Dense(64,activation="relu")(x)
outputs=layers.Dense(10)(x)

#Koostetaa
model=keras.Model(inputs=inputs,outputs=outputs,name="Spesiaali")

#Tarkastellaan
model.summary()

#Tallennellaan
model.save(".\model_to_be_investigated_by_someone_else_to_help_you")

...这让您可以看到“调试您的 AI”的整个模型结构。如果您没有找到解决方案本身,那么将最后一行示例添加到您自己的代码中,然后将生成的文件夹 e.g. github 并请其他人查看您的模型结构以帮助您解决问题。

蓝色图说明了命令 model.summary() 的输出,红线说明了第一个密集层的输出形状。