# Importing the required libraries
import os
import random
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D, BatchNormalization, GlobalAveragePooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import VGG16

# Setting the paths for the training and validation datasets
train_dir = 'C:/Users/chaofan/Desktop/class/class/archive/seg_train/seg_train'  # Training dataset directory
val_dir = 'C:/Users/chaofan/Desktop/class/class/archive/seg_test/seg_test'  # Validation dataset directory

# Getting the list of classes
classes = os.listdir(train_dir)

# Setting the batch size, image height, image width, and number of epochs
batch_size = 64
IMG_HEIGHT = 150
IMG_WIDTH = 150
epochs = 10

# Creating an image generator for the training data
train_image_generator = ImageDataGenerator(
    rescale=1./255,
    horizontal_flip=True
)

# Creating an image generator for the validation data
val_image_generator = ImageDataGenerator(
    rescale=1./255
)

# Generating training data from the directory
train_data_gen = train_image_generator.flow_from_directory(
    batch_size=batch_size,
    directory=train_dir,
    shuffle=True,
    target_size=(IMG_HEIGHT, IMG_WIDTH),
    class_mode='categorical'
)

# Generating validation data from the directory
val_data_gen = val_image_generator.flow_from_directory(
    batch_size=batch_size,
    directory=val_dir,
    shuffle=True,
    target_size=(IMG_HEIGHT, IMG_WIDTH),
    class_mode='categorical'
)

# Calculating the total number of training and validation samples
total_train = len(train_data_gen)
total_val = len(val_data_gen)

print('Total training data batches:', total_train)
print('Total validation data batches:', total_val)

# Loading the VGG16 model with pre-trained weights
base_model = VGG16(weights='imagenet', include_top=False, input_shape=(IMG_HEIGHT, IMG_WIDTH, 3))

# Freezing the layers of the base model
for layer in base_model.layers:
    layer.trainable = False

# Creating the model architecture
model = Sequential()
model.add(base_model)
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(len(classes), activation='softmax'))

# Compiling the model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

# Printing the model summary
model.summary()

# Training the model
history = model.fit(
    train_data_gen,
    steps_per_epoch=total_train,
    epochs=epochs,
    validation_data=val_data_gen,
    validation_steps=total_val
)

# Plotting the training and validation metrics
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()), 1])
plt.title('Training and Validation Accuracy')

plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0, 1.0])
plt.title('Training and Validation Loss')
plt.xlabel('Epoch')

plt.show()

# Randomly selecting 5 images from the validation set and predicting their classes
num_images = 5
random_indices = random.sample(range(total_val), num_images)  # Generating random indices
val_images = []  # List to store the selected images
val_labels = []  # List to store the corresponding labels

# Generating the selected images and labels
for i in random_indices:
    x, y = val_data_gen[i]  # Getting the image and label at the current index
    val_images.append(x)  # Adding the image to the list
    val_labels.append(y)  # Adding the label to the list

# Converting the lists to numpy arrays
val_images = np.array(val_images)
val_labels = np.array(val_labels)

# Predicting the classes of the selected images
predictions = model.predict(val_images)

# Displaying the selected images and their predicted classes
for i in range(num_images):
    image_array = val_images[i]  # Getting the image array
    predicted_class = np.argmax(predictions[i])  # Getting the predicted class index
    actual_class = np.argmax(val_labels[i])  # Getting the actual class index

    # Getting the class labels
    predicted_label = classes[predicted_class]
    actual_label = classes[actual_class]

    # Displaying the image and its predicted class
    plt.imshow(image_array)
    plt.title(f'Predicted Class: {predicted_label}
Actual Class: {actual_label}')
    plt.axis('off')
    plt.show()

This code will randomly select 5 images from the validation set, predict their classes using the trained model, and display the images along with their predicted and actual classes.


原文地址: https://www.cveoy.top/t/topic/pib8 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录