这段代码是一个基于U-Net的图像分割模型,主要实现了以下功能:

  1. 读取训练集和验证集的图像和掩模数据,对图像进行预处理(resize到512x512,归一化)。
  2. 利用ImageDataGenerator进行数据增强,生成训练集和验证集的批数据。
  3. 基于resnet34的U-Net模型进行训练,得到分割模型。
  4. 对测试集的图像进行预测,得到分割结果,计算分割模型性能指标(Dice系数、IoU、召回率、精确率)。
  5. 可视化分割结果和掩模数据,保存评估结果。

代码中的网络结构是U-Net,输入为一张512x512的RGB图像,输出为一张512x512的二值图像,表示像素点是否属于感兴趣区域。U-Net由对称的编码器和解码器组成,通过跳跃连接将编码器的输出与解码器的输入相结合,从而使得解码器能够更好地恢复细节信息。具体网络结构可以通过调用segmentation_models库中的Unet函数实现。

代码详细解析如下:

import os
import sys
import random

import numpy as np
import cv2
import matplotlib.pyplot as plt


from tensorflow import keras
import tensorflow as tf
import cv2
import os
import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from tqdm import tqdm
#gpus = tf.config.list_physical_devices(device_type='GPU')
#for gpu in gpus:
#    tf.config.experimental.set_memory_growth(device=gpu, enable=True)
os.environ['CUDA_VISIBLE_DEVICES']='2'
dir_img="Train_Data/Images/"
dir_mask="Train_Data/Vts/"

width=512
height=512
channels=3

tab_img=[]
tab_mask=[]

for n, i in tqdm(enumerate(os.listdir(dir_img)), total=len(os.listdir(dir_img))):
    tab_img.append(cv2.resize(cv2.imread(dir_img+i), (width, height))/255)

    img_mask=cv2.resize(cv2.imread(dir_mask+i), (width, height))[:,:,2]
    img_mask_result=np.zeros(shape=(height, width, 1), dtype=np.float32)
    img_mask_result[:,:,0][img_mask==255]=1.
    tab_mask.append(img_mask_result)

tab_img=np.array(tab_img)
tab_mask=np.array(tab_mask)


from keras.preprocessing import image

# Creating the training Image and Mask generator
image_datagen = image.ImageDataGenerator(shear_range=0.5, rotation_range=50, zoom_range=0.2, width_shift_range=0.2, height_shift_range=0.2, fill_mode='reflect')
mask_datagen = image.ImageDataGenerator(shear_range=0.5, rotation_range=50, zoom_range=0.2, width_shift_range=0.2, height_shift_range=0.2, fill_mode='reflect')

seed = 42
image_datagen.fit(tab_img[:int(tab_img.shape[0]*0.9)], augment=True, seed=seed)
mask_datagen.fit(tab_mask[:int(tab_mask.shape[0]*0.9)], augment=True, seed=seed)

BATCH_SIZE = 16
x=image_datagen.flow(tab_img[:int(tab_img.shape[0]*0.9)],batch_size=BATCH_SIZE,shuffle=True, seed=seed)
y=mask_datagen.flow(tab_mask[:int(tab_mask.shape[0]*0.9)],batch_size=BATCH_SIZE,shuffle=True, seed=seed)

# Creating the validation Image and Mask generator
image_datagen_val = image.ImageDataGenerator()
mask_datagen_val = image.ImageDataGenerator()

image_datagen_val.fit(tab_img[int(tab_img.shape[0]*0.9):], augment=True, seed=seed)
mask_datagen_val.fit(tab_mask[int(tab_mask.shape[0]*0.9):], augment=True, seed=seed)

x_val=image_datagen_val.flow(tab_img[int(tab_img.shape[0]*0.9):],batch_size=BATCH_SIZE,shuffle=True, seed=seed)
y_val=mask_datagen_val.flow(tab_mask[int(tab_mask.shape[0]*0.9):],batch_size=BATCH_SIZE,shuffle=True, seed=seed)

#creating a training and validation generator that generate masks and images
train_generator = zip(x, y)
val_generator = zip(x_val, y_val)

Valid_dir_img="Valid_Data/Images/"
Valid_dir_mask="Valid_Data/Vts/"

width=512
height=512
channels=3

Valid_tab_img=[]
Valid_tab_mask=[]

for n, i in tqdm(enumerate(os.listdir(Valid_dir_img)), total=len(os.listdir(Valid_dir_img))):
    Valid_tab_img.append(cv2.resize(cv2.imread(Valid_dir_img+i), (width, height))/255)

    img_mask=cv2.resize(cv2.imread(Valid_dir_mask+i), (width, height))[:,:,2]
    img_mask_result=np.zeros(shape=(height, width, 1), dtype=np.float32)
    img_mask_result[:,:,0][img_mask==255]=1.
    Valid_tab_mask.append(img_mask_result)

Valid_tab_img=np.array(Valid_tab_img)
Valid_tab_mask=np.array(Valid_tab_mask)

from keras.preprocessing import image

# Creating the training Image and Mask generator
image_datagen = image.ImageDataGenerator(shear_range=0.5, rotation_range=50, zoom_range=0.2, width_shift_range=0.2, height_shift_range=0.2, fill_mode='reflect')
mask_datagen = image.ImageDataGenerator(shear_range=0.5, rotation_range=50, zoom_range=0.2, width_shift_range=0.2, height_shift_range=0.2, fill_mode='reflect')

seed = 42
image_datagen.fit(tab_img, augment=True, seed=seed)
mask_datagen.fit(tab_mask, augment=True, seed=seed)

BATCH_SIZE = 16
x=image_datagen.flow(tab_img,batch_size=BATCH_SIZE,shuffle=True, seed=seed)
y=mask_datagen.flow(tab_mask,batch_size=BATCH_SIZE,shuffle=True, seed=seed)

# Creating the validation Image and Mask generator
image_datagen_val = image.ImageDataGenerator()
mask_datagen_val = image.ImageDataGenerator()

image_datagen_val.fit(Valid_tab_img, augment=True, seed=seed)
mask_datagen_val.fit(Valid_tab_mask, augment=True, seed=seed)

x_val=image_datagen_val.flow(Valid_tab_img,batch_size=BATCH_SIZE,shuffle=True, seed=seed)
y_val=mask_datagen_val.flow(Valid_tab_mask,batch_size=BATCH_SIZE,shuffle=True, seed=seed)

#creating a training and validation generator that generate masks and images
train_generator = zip(x, y)
val_generator = zip(x_val, y_val)

#pip install segmentation-models

from segmentation_models import Unet
import segmentation_models as sm
sm.set_framework('tf.keras')
sm.framework()
model = Unet('resnet34', encoder_weights='imagenet', classes=1, input_shape=(512,512, 3), activation='sigmoid')

model.compile('Adam', loss="binary_crossentropy", metrics=["acc"])
model.summary()
from keras.models import load_model
#model.load_weights('Unet_weights.h5')

results = model.fit_generator(train_generator, validation_data=val_generator, validation_steps=500, steps_per_epoch=100,epochs=100)
model.save_weights('Unet_weights.h5')



def SEG_EVAL(Seg,GT):
    # Seg : Segmented image, must be binary (1 = regions of interest 0 = background)
    # GT : Ground truth, must be binary (1 = regions of interest 0 = background)
    Seg.astype(np.bool)
    GT.astype(np.bool)
    
    #dice_coefficient
    intersection = np.logical_and(Seg, GT)
    dice_coefficient = 2. * intersection.sum() / (Seg.sum() + GT.sum())
    
    #IoU
    TP = np.logical_and(Seg, GT)
    IoU = TP.sum() / (GT.sum() + Seg.sum() - TP.sum())
    
    #recall
    recall = TP.sum() / GT.sum()
    
    #precision
    precision = TP.sum() / Seg.sum()
    
    EVAL = [dice_coefficient,IoU,recall,precision]
    
    return EVAL
    
def VIS_EVAL(Image, Seg, GT, option = 'contour'):
    # Image : Gray image
    # Seg : Segmented image, must be binary (1 = regions of interest 0 = background)
    # GT : Ground truth, must be binary (1 = regions of interest 0 = background)
    # option = 'contour' For contour plotting
    # option = 'region' For color the region
       
    Image = cv2.cvtColor(np.array(Image, np.uint8), cv2.COLOR_GRAY2RGB)
    GT = np.array(255*GT, dtype=np.uint8)
    Seg = np.array(255*Seg, dtype=np.uint8)
    GT = cv2.cvtColor(GT, cv2.COLOR_GRAY2RGB)
    Seg = cv2.cvtColor(Seg, cv2.COLOR_GRAY2RGB)
        
    if option == 'region':
        GT[:, :, 1:2] = 0*GT[:, :, 1:2]
        Seg[:, :, 0] = 0*Seg[:, :, 0]
        Seg[:, :, 2] = 0*Seg[:, :, 2]
        
        VIS_Seg = cv2.addWeighted(Image, 1, Seg, 0.5, 0)
        VIS_GT = cv2.addWeighted(Image, 1, GT, 0.5, 0)
          
    elif option == 'contour':
       
        contours_GT = cv2.Canny(GT, 250, 260) 
        contours_Seg = cv2.Canny(Seg, 250, 260) 
        
        contours_GT = cv2.cvtColor(contours_GT, cv2.COLOR_GRAY2RGB)
        contours_Seg = cv2.cvtColor(contours_Seg, cv2.COLOR_GRAY2RGB)
        
        contours_GT[:, :, 1:2] = 0*contours_GT[:, :, 1:2]
        contours_Seg[:, :, 0] = 0*contours_Seg[:, :, 0]
        contours_Seg[:, :, 2] = 0*contours_Seg[:, :, 2]
        
        VIS_Seg = cv2.addWeighted(Image, 1, contours_Seg, 0.5, 0)
        VIS_GT = cv2.addWeighted(Image, 1, contours_GT, 0.5, 0)
        
    return VIS_Seg,VIS_GT

dir_img = "Test_Data/Images/"
dir_mask = "Test_Data/Vts/"
Eval = []

Dice = []
TP = []
FP = []

width=512
height=512
channels=3

for i in os.listdir(dir_img):
    Image_Test = cv2.resize(cv2.imread(dir_img+i), (width, height))/255
    Image = cv2.resize(cv2.imread(dir_img+i), (width, height))[:,:,2]
    VT=cv2.resize(cv2.imread(dir_mask+i), (width, height))[:,:,2]
    VT=(VT==255).astype(int)

    Image_Test = np.expand_dims(Image_Test, axis=0)
    Seg = model.predict(Image_Test)
    Seg = ((Seg[0,:,:,0]*255)>5).astype(int)

    Eval.append(SEG_EVAL(Seg,VT))

    VIS_Seg,VIS_GT=VIS_EVAL(Image, Seg, VT, option = 'contour')
    cv2.imwrite('CNN_Results/Seg_'+i, VIS_Seg)
    cv2.imwrite('CNN_Results/GT_'+i, VIS_GT)
    #cv2.imwrite('FPN_Results/Mask_Seg_'+i, Seg)
    
np.savetxt("CNN_Results.csv", Eval, delimiter=",")
    

代码解读

  1. 数据加载与预处理

    • 代码首先定义了训练集和验证集的图像和掩模数据路径,以及图像尺寸和通道数。
    • 然后读取图像文件并进行resize和归一化处理,将图像数据存储到tab_img和tab_mask列表中。
    • 接着将列表转换为numpy数组,方便后续操作。
  2. 数据增强

    • 使用ImageDataGenerator进行数据增强,对图像进行随机剪切、旋转、缩放、平移等操作,从而扩充训练数据,提高模型泛化能力。
    • 通过fit方法对ImageDataGenerator进行训练,使用flow方法生成批数据。
  3. 模型构建与训练

    • 使用segmentation_models库中的Unet函数构建U-Net模型,选择ResNet34作为编码器,并设置输入尺寸和输出类别数。
    • 使用Adam优化器和binary_crossentropy损失函数进行模型训练,训练过程中记录模型的准确率和损失值。
    • 训练结束后,将模型权重保存到Unet_weights.h5文件中。
  4. 模型评估

    • 读取测试集的图像和掩模数据,进行resize和预处理。
    • 使用训练好的模型对测试集图像进行预测,得到分割结果。
    • 计算分割结果的Dice系数、IoU、召回率、精确率等指标,并将结果保存到CNN_Results.csv文件中。
  5. 结果可视化

    • 使用VIS_EVAL函数将分割结果、掩模数据和原图进行可视化,并保存到CNN_Results文件夹中。

总结

这段代码完整地展示了使用U-Net进行图像分割的过程,涵盖了数据预处理、数据增强、模型训练、模型评估和结果可视化等多个步骤。通过代码解析和注释,可以帮助用户理解U-Net网络结构和图像分割的基本原理,并为用户进行图像分割相关的开发提供参考。

基于U-Net的图像分割模型代码解析与性能评估

原文地址: https://www.cveoy.top/t/topic/nN2v 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录