U-Net 语义分割模型在新图像上测试时失败
U-Net Semantic segmentation model fails when tested on new image
我有一个 U-Net 模型,带有来自自动编码器的预训练权重,自动编码器构建了一个包含 1400 张图像的图像数据集。我正在尝试使用临床数据集的 1400 个标记图像执行语义分割。该模型在我的测试图像数据集上使用 iou_score=0.97
表现良好,但是当我尝试在我的数据集外的随机图像上测试它时,我得到了一个非常糟糕的分割结果。我不明白这是为什么。请检查我的代码并告诉我哪里错了。
在我的数据集和标签上训练:
import cv2
import numpy as np
from matplotlib import pyplot as plt
#########################################################################
#Load data for U-net training.
#################################################################
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ["SM_FRAMEWORK"] = "tf.keras"
import glob
import cv2
import os
import numpy as np
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
train_images = []
#Resizing images is optional, CNNs are ok with large images
SIZE_X = 256 #Resize images (height = X, width = Y)
SIZE_Y = 256
#Capture training image info as a list
directory_path = '/content/drive/MyDrive/Colab Notebooks/semantic/images/'
list_of_files = sorted( filter( os.path.isfile, glob.glob(directory_path + '*.jpg', recursive=True) ) )
for img_path in list_of_files:
#for img_path in glob.glob(os.path.join(directory_path, "*.png")):
print(img_path)
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
img = cv2.resize(img, (SIZE_Y, SIZE_X))
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
train_images.append(img)
#train_labels.append(label)
#Convert list to array for machine learning processing
train_images = np.array(train_images)
train_masks = []
labels_path = '/content/drive/MyDrive/Colab Notebooks/semantic/lables/'
list_of_labels = sorted( filter( os.path.isfile, glob.glob(labels_path + '*.png', recursive=True) ) )
for mask_path in list_of_labels:
#for img_path in glob.glob(os.path.join(directory_path, "*.png")):
print(mask_path)
mask = cv2.imread(mask_path, 0)
mask = cv2.resize(mask, (SIZE_Y, SIZE_X))
#img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
train_masks.append(mask)
#Convert list to array for machine learning processing
train_masks = np.array(train_masks)
#Normalize images
image_dataset = np.array(train_images)/255.
#D not normalize masks, just rescale to 0 to 1.
mask_dataset = np.expand_dims((np.array(train_masks)), 3) /255.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(image_dataset, mask_dataset, test_size = 0.20, random_state = 0)
#Load unet model and load pretrained weights
from models import build_autoencoder, build_encoder, build_unet
from tensorflow.keras.optimizers import Adam
#import segmentation_models as sm
input_shape = (256, 256, 3)
pre_trained_unet_model = build_unet(input_shape)
pre_trained_unet_model.load_weights('/content/drive/MyDrive/Colab Notebooks/semantic/unet_clinical_model_weights.h5')
pre_trained_unet_model_weights = pre_trained_unet_model.get_weights()[0][1]
pretrained_encoder_wts = np.load('/content/drive/MyDrive/Colab Notebooks/semantic/pretrained_clinical_encoder-weights_300e.npy')
if pre_trained_unet_model_weights.all() == pretrained_encoder_wts.all():
print("Both weights are identical")
else:
print("Something wrong, weghts are different")
pre_trained_unet_model.compile('Adam', loss=sm.losses.binary_focal_jaccard_loss, metrics=[sm.metrics.iou_score])
####################################################################
#Train the model
batch_size=16
pre_trained_unet_model_history = pre_trained_unet_model.fit(X_train, y_train,
verbose=1,
batch_size = batch_size,
validation_data=(X_test, y_test ),
shuffle=False,
epochs=300)
pre_trained_unet_model.save('/content/drive/MyDrive/Colab Notebooks/semantic/pre_trained_unet_model_300epochs.h5')
正在测试我的模型:
from keras.models import load_model
pre_trained_unet_model = load_model('/content/drive/MyDrive/Colab Notebooks/semantic/pre_trained_unet_model_300epochs.h5', compile=False)
my_model = pre_trained_unet_model
import random
test_img_number = random.randint(0, X_test.shape[0]-1)
#test_img_number = 119
test_img = X_test[test_img_number]
ground_truth=y_test[test_img_number]
test_img_input=np.expand_dims(test_img, 0)
prediction = (my_model.predict(test_img_input)[0,:,:,0] > 0.5).astype(np.uint8)
plt.figure(figsize=(16, 8))
plt.subplot(231)
plt.title('Testing Image')
plt.imshow(test_img, cmap='gray')
plt.subplot(232)
plt.title('Testing Label')
plt.imshow(ground_truth[:,:,0], cmap='gray')
plt.subplot(233)
plt.title('Prediction on test image')
plt.imshow(prediction, cmap='gray')
plt.show()
当我在随机的临床溃疡图像上测试相同的模型时,我得到了一个非常糟糕的分割结果。用于在随机图像上测试模型的代码
from keras.models import load_model
import segmentation_models as sm
import numpy as np
import matplotlib.pyplot as plt
model = load_model('/content/drive/MyDrive/Colab Notebooks/semantic/pre_trained_unet_model_300epochs.h5', compile=False) # load the model
model.compile(loss=sm.losses.binary_focal_jaccard_loss, optimizer='Adam', metrics=[sm.metrics.iou_score])
from keras.preprocessing import image
test_image= image.load_img('/content/drive/MyDrive/Colab Notebooks/semantic/images/Foot ulcer 3-3.jpg',target_size = (256, 256))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = model.predict(test_image)
result_img = result.reshape(256,256)
plt.imshow(result_img, cmap='gray')
如果我的测试过程有错误,请提出建议。
在训练和验证之前,您正在对这一行的数据进行规范化 -
image_dataset = np.array(train_images) / 255.
所以你必须在测试时做同样的事情 -
from keras.models import load_model
import segmentation_models as sm
import numpy as np
import matplotlib.pyplot as plt
model = load_model('/content/drive/MyDrive/Colab Notebooks/semantic/pre_trained_unet_model_300epochs.h5', compile=False) # load the model
model.compile(loss=sm.losses.binary_focal_jaccard_loss, optimizer='Adam', metrics=[sm.metrics.iou_score])
from keras.preprocessing import image
test_image= image.load_img('/content/drive/MyDrive/Colab Notebooks/semantic/images/Foot ulcer 3-3.jpg',target_size = (256, 256))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0) / 255.0
result = model.predict(test_image)
result_img = result.reshape(256,256)
plt.imshow(result_img, cmap='gray')
我有一个 U-Net 模型,带有来自自动编码器的预训练权重,自动编码器构建了一个包含 1400 张图像的图像数据集。我正在尝试使用临床数据集的 1400 个标记图像执行语义分割。该模型在我的测试图像数据集上使用 iou_score=0.97
表现良好,但是当我尝试在我的数据集外的随机图像上测试它时,我得到了一个非常糟糕的分割结果。我不明白这是为什么。请检查我的代码并告诉我哪里错了。
在我的数据集和标签上训练:
import cv2
import numpy as np
from matplotlib import pyplot as plt
#########################################################################
#Load data for U-net training.
#################################################################
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ["SM_FRAMEWORK"] = "tf.keras"
import glob
import cv2
import os
import numpy as np
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
train_images = []
#Resizing images is optional, CNNs are ok with large images
SIZE_X = 256 #Resize images (height = X, width = Y)
SIZE_Y = 256
#Capture training image info as a list
directory_path = '/content/drive/MyDrive/Colab Notebooks/semantic/images/'
list_of_files = sorted( filter( os.path.isfile, glob.glob(directory_path + '*.jpg', recursive=True) ) )
for img_path in list_of_files:
#for img_path in glob.glob(os.path.join(directory_path, "*.png")):
print(img_path)
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
img = cv2.resize(img, (SIZE_Y, SIZE_X))
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
train_images.append(img)
#train_labels.append(label)
#Convert list to array for machine learning processing
train_images = np.array(train_images)
train_masks = []
labels_path = '/content/drive/MyDrive/Colab Notebooks/semantic/lables/'
list_of_labels = sorted( filter( os.path.isfile, glob.glob(labels_path + '*.png', recursive=True) ) )
for mask_path in list_of_labels:
#for img_path in glob.glob(os.path.join(directory_path, "*.png")):
print(mask_path)
mask = cv2.imread(mask_path, 0)
mask = cv2.resize(mask, (SIZE_Y, SIZE_X))
#img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
train_masks.append(mask)
#Convert list to array for machine learning processing
train_masks = np.array(train_masks)
#Normalize images
image_dataset = np.array(train_images)/255.
#D not normalize masks, just rescale to 0 to 1.
mask_dataset = np.expand_dims((np.array(train_masks)), 3) /255.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(image_dataset, mask_dataset, test_size = 0.20, random_state = 0)
#Load unet model and load pretrained weights
from models import build_autoencoder, build_encoder, build_unet
from tensorflow.keras.optimizers import Adam
#import segmentation_models as sm
input_shape = (256, 256, 3)
pre_trained_unet_model = build_unet(input_shape)
pre_trained_unet_model.load_weights('/content/drive/MyDrive/Colab Notebooks/semantic/unet_clinical_model_weights.h5')
pre_trained_unet_model_weights = pre_trained_unet_model.get_weights()[0][1]
pretrained_encoder_wts = np.load('/content/drive/MyDrive/Colab Notebooks/semantic/pretrained_clinical_encoder-weights_300e.npy')
if pre_trained_unet_model_weights.all() == pretrained_encoder_wts.all():
print("Both weights are identical")
else:
print("Something wrong, weghts are different")
pre_trained_unet_model.compile('Adam', loss=sm.losses.binary_focal_jaccard_loss, metrics=[sm.metrics.iou_score])
####################################################################
#Train the model
batch_size=16
pre_trained_unet_model_history = pre_trained_unet_model.fit(X_train, y_train,
verbose=1,
batch_size = batch_size,
validation_data=(X_test, y_test ),
shuffle=False,
epochs=300)
pre_trained_unet_model.save('/content/drive/MyDrive/Colab Notebooks/semantic/pre_trained_unet_model_300epochs.h5')
正在测试我的模型:
from keras.models import load_model
pre_trained_unet_model = load_model('/content/drive/MyDrive/Colab Notebooks/semantic/pre_trained_unet_model_300epochs.h5', compile=False)
my_model = pre_trained_unet_model
import random
test_img_number = random.randint(0, X_test.shape[0]-1)
#test_img_number = 119
test_img = X_test[test_img_number]
ground_truth=y_test[test_img_number]
test_img_input=np.expand_dims(test_img, 0)
prediction = (my_model.predict(test_img_input)[0,:,:,0] > 0.5).astype(np.uint8)
plt.figure(figsize=(16, 8))
plt.subplot(231)
plt.title('Testing Image')
plt.imshow(test_img, cmap='gray')
plt.subplot(232)
plt.title('Testing Label')
plt.imshow(ground_truth[:,:,0], cmap='gray')
plt.subplot(233)
plt.title('Prediction on test image')
plt.imshow(prediction, cmap='gray')
plt.show()
当我在随机的临床溃疡图像上测试相同的模型时,我得到了一个非常糟糕的分割结果。用于在随机图像上测试模型的代码
from keras.models import load_model
import segmentation_models as sm
import numpy as np
import matplotlib.pyplot as plt
model = load_model('/content/drive/MyDrive/Colab Notebooks/semantic/pre_trained_unet_model_300epochs.h5', compile=False) # load the model
model.compile(loss=sm.losses.binary_focal_jaccard_loss, optimizer='Adam', metrics=[sm.metrics.iou_score])
from keras.preprocessing import image
test_image= image.load_img('/content/drive/MyDrive/Colab Notebooks/semantic/images/Foot ulcer 3-3.jpg',target_size = (256, 256))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = model.predict(test_image)
result_img = result.reshape(256,256)
plt.imshow(result_img, cmap='gray')
在训练和验证之前,您正在对这一行的数据进行规范化 -
image_dataset = np.array(train_images) / 255.
所以你必须在测试时做同样的事情 -
from keras.models import load_model
import segmentation_models as sm
import numpy as np
import matplotlib.pyplot as plt
model = load_model('/content/drive/MyDrive/Colab Notebooks/semantic/pre_trained_unet_model_300epochs.h5', compile=False) # load the model
model.compile(loss=sm.losses.binary_focal_jaccard_loss, optimizer='Adam', metrics=[sm.metrics.iou_score])
from keras.preprocessing import image
test_image= image.load_img('/content/drive/MyDrive/Colab Notebooks/semantic/images/Foot ulcer 3-3.jpg',target_size = (256, 256))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0) / 255.0
result = model.predict(test_image)
result_img = result.reshape(256,256)
plt.imshow(result_img, cmap='gray')