# 导入必要的库
import os
import random
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D, BatchNormalization, GlobalAveragePooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import VGG16

# 设置训练集和测试集路径
train_dir = 'C:/Users/chaofan/Desktop/class/class/archive/seg_train/seg_train'  # 训练集目录
val_dir = 'C:/Users/chaofan/Desktop/class/class/archive/seg_test/seg_test'    #测试集目录

# 获取类别列表
classes = os.listdir(train_dir)

# 设置超参数
batch_size = 64  # 批大小
IMG_HEIGHT = 150  # 图像高度
IMG_WIDTH = 150  # 图像宽度
epochs = 10  # 训练轮数

# 创建数据增强器
train_image_generator = ImageDataGenerator(
    rescale=1./255,  # 归一化
    horizontal_flip=True  # 水平翻转
)

val_image_generator = ImageDataGenerator(
    rescale=1./255  # 归一化
)

# 创建数据生成器
train_data_gen = train_image_generator.flow_from_directory(
    batch_size=batch_size,  # 批大小
    directory=train_dir,  # 训练集目录
    shuffle=True,  # 是否打乱数据
    target_size=(IMG_HEIGHT, IMG_WIDTH),  # 图像大小
    class_mode='categorical'  # 分类方式
)

val_data_gen = val_image_generator.flow_from_directory(
    batch_size=batch_size,  # 批大小
    directory=val_dir,  # 验证集目录
    shuffle=True,  # 是否打乱数据
    target_size=(IMG_HEIGHT, IMG_WIDTH),  # 图像大小
    class_mode='categorical'  # 分类方式
)

# 获取训练集和验证集样本数
total_train = len(train_data_gen)  # 训练集样本数
total_val = len(val_data_gen)  # 验证集样本数

# 打印训练集和验证集样本数
print('总训练数据批次数:', total_train)
print('总验证数据批次数: ', total_val)

# 加载VGG16预训练模型
base_model = VGG16(weights='imagenet', include_top=False, input_shape=(IMG_HEIGHT, IMG_WIDTH, 3))

# 冻结VGG16模型的权重
for layer in base_model.layers:
    layer.trainable = False

# 创建模型
model = Sequential()
model.add(base_model)
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(len(classes), activation='softmax'))

# 编译模型
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

# 打印模型摘要
model.summary()

# 训练模型
history = model.fit(
    train_data_gen,  # 训练集数据生成器
    steps_per_epoch=total_train,  # 每轮迭代的步数
    epochs=epochs,  # 训练轮数
    validation_data=val_data_gen,  # 验证集数据生成器
    validation_steps=total_val  # 验证集每轮迭代的步数
)

# 获取训练结果
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

# 绘制训练和验证曲线
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()), 1])
plt.title('Training and Validation Accuracy')

plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0, 1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')

plt.show()

# 从测试集中随机选择5张图片
test_images = random.sample(os.listdir(val_dir), 5)

# 遍历每张图片
for image_name in test_images:
    # 构建图片路径
    image_path = os.path.join(val_dir, image_name)
    
    # 使用Keras的图片预处理工具加载图片
    img = image.load_img(image_path, target_size=(IMG_HEIGHT, IMG_WIDTH))
    
    # 将图片转换为数组
    img_array = image.img_to_array(img)
    
    # 将图片数组转换为模型可接受的输入格式
    img_array = np.expand_dims(img_array, axis=0)
    
    # 归一化图片数组
    img_array = img_array / 255.0
    
    # 使用训练好的模型进行预测
    predictions = model.predict(img_array)
    
    # 获取预测结果的类别索引
    predicted_class_index = np.argmax(predictions[0])
    
    # 获取类别名称
    predicted_class = classes[predicted_class_index]
    
    # 显示图片和预测结果
    plt.imshow(img)
    plt.title(predicted_class)
    plt.axis('off')
    plt.show()

原文地址: https://www.cveoy.top/t/topic/pica 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录