车辆识别模型训练与测试:基于VGG16的图像分类
这段代码是一个完整的车辆识别的深度学习模型的训练和测试过程。
import os
import random
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D, BatchNormalization, GlobalAveragePooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import VGG16
from sklearn.metrics import accuracy_score
# 设置参数
train_dir = 'D:/BS/class/archive/seg_train/seg_train' # 训练集目录
val_dir = 'D:/BS/class/archive/seg_test/seg_test' #目录
classes = os.listdir(train_dir) # 获取类别列表
batch_size = 64 # 批大小
IMG_HEIGHT = 150 # 图像高度
IMG_WIDTH = 150 # 图像宽度
epochs = 10 # 训练轮数
# 创建图像生成器
train_image_generator = ImageDataGenerator(
rescale=1./255, # 归一化
horizontal_flip=True # 水平翻转
)
val_image_generator = ImageDataGenerator(
rescale=1./255 # 归一化
)
train_data_gen = train_image_generator.flow_from_directory(
batch_size=batch_size, # 批大小
directory=train_dir, # 训练集目录
shuffle=True, # 是否打乱数据
target_size=(IMG_HEIGHT, IMG_WIDTH), # 图像大小
class_mode='categorical' # 分类方式
)
val_data_gen = val_image_generator.flow_from_directory(
batch_size=batch_size, # 批大小
directory=val_dir, # 验证集目录
shuffle=True, # 是否打乱数据
target_size=(IMG_HEIGHT, IMG_WIDTH), # 图像大小
class_mode='categorical' # 分类方式
)
total_train = len(train_data_gen) # 训练集样本数
total_val = len(val_data_gen) # 验证集样本数
print("总训练数据批次数:", total_train)
print("总验证数据批次数: ", total_val)
# 加载 VGG16 模型并进行微调
base_model = VGG16(weights='imagenet', include_top=False, input_shape=(IMG_HEIGHT, IMG_WIDTH, 3))
for layer in base_model.layers:
layer.trainable = False
model = Sequential()
model.add(base_model)
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(64, activation='relu'))
model.add(Dense(len(classes), activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
# 训练模型
history = model.fit(
train_data_gen, # 训练集数据生成器
steps_per_epoch=total_train, # 每轮迭代的步数
epochs=epochs, # 训练轮数
validation_data=val_data_gen, # 验证集数据生成器
validation_steps=total_val # 验证集每轮迭代的步数
)
# 绘制训练过程中的准确率和损失值
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()), 1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0, 1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
# 设置图像目录和类别
img_dir = 'D:/BS/class/archive/seg_train/seg_train'
classes = os.listdir(img_dir)
# 构建图像路径列表
img_paths = []
for class_name in classes:
class_dir = os.path.join(img_dir, class_name)
img_names = [img_name for img_name in os.listdir(class_dir) if img_name.endswith('.jpg')]
class_paths = [os.path.join(class_dir, img_name) for img_name in img_names]
img_paths.extend(class_paths)
# 随机选择一部分图像路径
selected_img_paths = random.sample(img_paths, 10)
# 加载测试集数据生成器
test_image_generator = ImageDataGenerator(rescale=1. / 255)
test_data_gen = test_image_generator.flow_from_directory(
batch_size=1,
directory=val_dir,
shuffle=False,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='categorical'
)
# 获取测试集样本数
total_test = len(test_data_gen)
print("Total testing data batches: ", total_test)
# 进行预测并计算准确率
y_pred = model.predict(test_data_gen)
y_pred_class = np.argmax(y_pred, axis=1)
y_true_class = test_data_gen.classes
class_names = list(test_data_gen.class_indices.keys())
accuracy = accuracy_score(y_true_class, y_pred_class)
print('Accuracy: {:.2f}%'.format(accuracy * 100))
# 随机选择一些测试样本进行展示
for i in range(5):
rand_num = random.randint(0, total_test - 1)
img, label = test_data_gen[rand_num]
img = img[0]
label_name = class_names[np.argmax(label)]
pred_name = class_names[y_pred_class[rand_num]]
plt.title('True Label: {}, Predicted Label: {}'.format(label_name, pred_name))
plt.imshow(img)
plt.show()
要将其链接到HTML网页中,你可以使用以下步骤:
- 在HTML文件中添加一个Canvas元素,用于显示预测的车辆图片:
<canvas id='canvas'></canvas>
- 在HTML文件中引入TensorFlow.js库。你可以在
<head>标签中添加以下代码:
<script src='https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@3.8.0'></script>
- 创建一个JavaScript文件,并将以下代码添加到文件中:
// 加载模型
const model = await tf.loadLayersModel('path/to/model.json');
// 获取Canvas元素
const canvas = document.getElementById('canvas');
const ctx = canvas.getContext('2d');
// 加载图像
const img = new Image();
img.src = 'path/to/image.jpg';
// 图像加载完成后进行预测
img.onload = function() {
ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
const tensor = tf.browser.fromPixels(imageData).div(255).expandDims();
const prediction = model.predict(tensor);
const predictedClass = tf.argMax(prediction, 1).dataSync()[0];
const classNames = ['class1', 'class2', 'class3']; // 替换为你的类别名称
const result = classNames[predictedClass];
console.log('预测结果:', result);
};
请确保将'path/to/model.json'替换为实际的模型路径,将'path/to/image.jpg'替换为实际的图像路径,并将'classNames'替换为你的类别名称。
- 将JavaScript文件链接到HTML文件中。在HTML文件的
<body>标签中添加以下代码:
<script src='path/to/javascript.js'></script>
请将'path/to/javascript.js'替换为实际的JavaScript文件路径。
- 在浏览器中打开HTML文件,你将能够看到预测的结果在控制台中输出,并在Canvas元素中显示图像。
请注意,这只是一个基本的示例,具体实现可能会根据你的项目需求而有所不同。此外,确保在网页上使用模型时遵循适当的法律和隐私规定。
原文地址: https://www.cveoy.top/t/topic/bKv7 著作权归作者所有。请勿转载和采集!