LeNet改进实验:性能对比与分析
LeNet改进实验:性能对比与分析
本实验基于LeNet网络,针对激活函数、池化方式、卷积核大小、正则化方法和网络结构等方面进行改进,并比较改进前后模型在FashionMNIST数据集上的性能。
实验目标
- 改进LeNet网络,提升其在FashionMNIST数据集上的分类性能。
- 比较不同改进方法对模型性能的影响,分析其优缺点。
实验环境
- Python 3.x
- PyTorch
- FashionMNIST数据集
代码实现
import pandas as pd
import numpy as np
import datetime
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
# 定义LeNet网络
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.pool2 = nn.MaxPool2d(2)
self.fc1 = nn.Linear(16 * 4 * 4, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool1(torch.relu(self.conv1(x)))
x = self.pool2(torch.relu(self.conv2(x)))
x = x.view(-1, 16 * 4 * 4)
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = self.fc3(x)
return x
# 加载数据集
train_dataset = datasets.FashionMNIST(
root='data',
train=True,
transform=transforms.ToTensor(),
download=True
)
test_dataset = datasets.FashionMNIST(
root='data',
train=False,
transform=transforms.ToTensor(),
download=True
)
# 定义参数
batch_size = 64
learning_rate = 0.01
epochs = 10
# 定义数据加载器
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=batch_size,
shuffle=True
)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset,
batch_size=batch_size,
shuffle=False
)
# 初始化模型,损失函数和优化器
model = LeNet()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# 训练模型
train_loss_list = []
test_loss_list = []
train_acc_list = []
test_acc_list = []
for epoch in range(epochs):
train_loss = 0
train_acc = 0
# 训练模型
model.train()
for i, (images, labels) in enumerate(train_loader):
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
train_acc += (predicted == labels).sum().item()
train_loss /= len(train_loader.dataset)
train_acc /= len(train_loader.dataset)
train_loss_list.append(train_loss)
train_acc_list.append(train_acc)
# 测试模型
model.eval()
with torch.no_grad():
test_loss = 0
test_acc = 0
for images, labels in test_loader:
outputs = model(images)
loss = criterion(outputs, labels)
test_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
test_acc += (predicted == labels).sum().item()
test_loss /= len(test_loader.dataset)
test_acc /= len(test_loader.dataset)
test_loss_list.append(test_loss)
test_acc_list.append(test_acc)
# 打印训练和测试结果
print('Epoch [{}/{}], Train Loss: {:.4f}, Train Acc: {:.4f}, Test Loss: {:.4f}, Test Acc: {:.4f}'\n .format(epoch+1, epochs, train_loss, train_acc, test_loss, test_acc))
# 绘制损失函数曲线
plt.plot(train_loss_list, label='Train Loss')
plt.plot(test_loss_list, label='Test Loss')
plt.legend(loc='upper right')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.show()
# 绘制分类正确率曲线
plt.plot(train_acc_list, label='Train Acc')
plt.plot(test_acc_list, label='Test Acc')
plt.legend(loc='lower right')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.show()
# 保存模型
torch.save(model.state_dict(), 'lenet.pth')
# 加载模型
model.load_state_dict(torch.load('lenet.pth'))
# 使用测试集测试模型性能,并展示混淆矩阵
confusion_matrix = np.zeros((10, 10))
model.eval()
with torch.no_grad():
for images, labels in test_loader:
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
for i, j in zip(labels, predicted):
confusion_matrix[i, j] += 1
print(confusion_matrix)
# 扩充测试集
rotated_test_dataset = datasets.FashionMNIST(
root='data',
train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.RandomRotation(45)
]),
download=True
)
rotated_test_loader = torch.utils.data.DataLoader(
dataset=rotated_test_dataset,
batch_size=batch_size,
shuffle=False
)
# 测试模型在扩充后的测试集上的性能
model.eval()
with torch.no_grad():
test_loss = 0
test_acc = 0
for images, labels in rotated_test_loader:
outputs = model(images)
loss = criterion(outputs, labels)
test_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
test_acc += (predicted == labels).sum().item()
test_loss /= len(rotated_test_loader.dataset)
test_acc /= len(rotated_test_loader.dataset)
print('Test Loss: {:.4f}, Test Acc: {:.4f}'.format(test_loss, test_acc))
now = datetime.datetime.now()
end = datetime.datetime.now()
print(now.strftime('当前时间:%Y年%m月%d日 %H:%M:%S'), end=' ')
print('执行时间:', end - now)
# 第一个任务:将LeNet中的激活函数替换为ReLU
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.pool2 = nn.MaxPool2d(2)
self.fc1 = nn.Linear(16 * 4 * 4, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.relu = nn.ReLU()
def forward(self, x):
x = self.pool1(self.relu(self.conv1(x)))
x = self.pool2(self.relu(self.conv2(x)))
x = x.view(-1, 16 * 4 * 4)
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return x
# 第二个任务:平均池化改为最大池化
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.pool2 = nn.MaxPool2d(2)
self.fc1 = nn.Linear(16 * 4 * 4, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.relu = nn.ReLU()
def forward(self, x):
x = self.pool1(self.relu(self.conv1(x)))
x = self.pool2(self.relu(self.conv2(x)))
x = x.view(-1, 16 * 4 * 4)
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return x
# 第三个任务:将其中一个5*5的卷积核修改为7*7
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(6, 16, 7) # 修改卷积核大小为7*7
self.pool2 = nn.MaxPool2d(2)
self.fc1 = nn.Linear(16 * 4 * 4, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.relu = nn.ReLU()
def forward(self, x):
x = self.pool1(self.relu(self.conv1(x)))
x = self.pool2(self.relu(self.conv2(x)))
x = x.view(-1, 16 * 4 * 4)
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return x
# 第四个任务:在全连接层后加入Dropout层(中间的全连接层可增加维度)
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.pool2 = nn.MaxPool2d(2)
self.fc1 = nn.Linear(16 * 4 * 4, 256) # 增加全连接层的维度
self.dropout = nn.Dropout(p=0.5) # 添加Dropout层
self.fc2 = nn.Linear(256, 120)
self.fc3 = nn.Linear(120, 84)
self.fc4 = nn.Linear(84, 10)
self.relu = nn.ReLU()
def forward(self, x):
x = self.pool1(self.relu(self.conv1(x)))
x = self.pool2(self.relu(self.conv2(x)))
x = x.view(-1, 16 * 4 * 4)
x = self.dropout(self.relu(self.fc1(x)))
x = self.dropout(self.relu(self.fc2(x)))
x = self.dropout(self.relu(self.fc3(x)))
x = self.fc4(x)
return x
# 第四个任务:卷积层后加入BatchNorm层
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.bn1 = nn.BatchNorm2d(6) # 添加BatchNorm层
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.bn2 = nn.BatchNorm2d(16) # 添加BatchNorm层
self.pool2 = nn.MaxPool2d(2)
self.fc1 = nn.Linear(16 * 4 * 4, 256)
self.dropout = nn.Dropout(p=0.5)
self.fc2 = nn.Linear(256, 120)
self.fc3 = nn.Linear(120, 84)
self.fc4 = nn.Linear(84, 10)
self.relu = nn.ReLU()
def forward(self, x):
x = self.pool1(self.relu(self.bn1(self.conv1(x)))) # 添加BatchNorm层
x = self.pool2(self.relu(self.bn2(self.conv2(x)))) # 添加BatchNorm层
x = x.view(-1, 16 * 4 * 4)
x = self.dropout(self.relu(self.fc1(x)))
x = self.dropout(self.relu(self.fc2(x)))
x = self.dropout(self.relu(self.fc3(x)))
x = self.fc4(x)
return x
# 第五个任务:将卷积核从5*5修改为3*3,但增加网络的层数(注意调整步长)
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 3) # 修改卷积核大小为3*3
self.bn1 = nn.BatchNorm2d(6)
self.conv2 = nn.Conv2d(6, 12, 3, stride=2) # 修改卷积核大小为3*3,增加网络层数
self.bn2 = nn.BatchNorm2d(12)
self.conv3 = nn.Conv2d(12, 24, 3, stride=2) # 修改卷积核大小为3*3,增加网络层数
self.bn3 = nn.BatchNorm2d(24)
self.fc1 = nn.Linear(24 * 3 * 3, 256)
self.dropout = nn.Dropout(p=0.5)
self.fc2 = nn.Linear(256, 120)
self.fc3 = nn.Linear(120, 84)
self.fc4 = nn.Linear(84, 10)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.bn1(self.conv1(x)))
x = self.relu(self.bn2(self.conv2(x)))
x = self.relu(self.bn3(self.conv3(x)))
x = x.view(-1, 24 * 3 * 3)
x = self.dropout(self.relu(self.fc1(x)))
x = self.dropout(self.relu(self.fc2(x)))
x = self.dropout(self.relu(self.fc3(x)))
x = self.fc4(x)
return x
# 第六个任务:残差连接:选择一条跨层的路径(跨一层或跨多层均可),加入残差连接。注意需要用1*1卷积使维度相匹配
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, stride=stride, padding=1)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)
self.bn2 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != out_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, stride=stride),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
shortcut = self.shortcut(x)
x = self.relu(self.bn1(self.conv1(x)))
x = self.bn2(self.conv2(x))
x += shortcut
x = self.relu(x)
return x
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 3)
self.bn1 = nn.BatchNorm2d(6)
self.block1 = ResidualBlock(6, 12)
self.block2 = ResidualBlock(12, 24, stride=2)
self.fc1 = nn.Linear(24 * 4 * 4, 256)
self.dropout = nn.Dropout(p=0.5)
self.fc2 = nn.Linear(256, 120)
self.fc3 = nn.Linear(120, 84)
self.fc4 = nn.Linear(84, 10)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.bn1(self.conv1(x)))
x = self.block1(x)
x = self.block2(x)
x = x.view(-1, 24 * 4 * 4)
x = self.dropout(self.relu(self.fc1(x)))
x = self.dropout(self.relu(self.fc2(x)))
x = self.dropout(self.relu(self.fc3(x)))
x = self.fc4(x)
return x
# 训练模型
model = LeNet()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()
train_loss_list = []
test_loss_list = []
train_acc_list = []
test_acc_list = []
for epoch in range(epochs):
train_loss = 0
train_acc = 0
# 训练模型
model.train()
for i, (images, labels) in enumerate(train_loader):
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
train_acc += (predicted == labels).sum().item()
train_loss /= len(train_loader.dataset)
train_acc /= len(train_loader.dataset)
train_loss_list.append(train_loss)
train_acc_list.append(train_acc)
# 测试模型
model.eval()
with torch.no_grad():
test_loss = 0
test_acc = 0
for images, labels in test_loader:
outputs = model(images)
loss = criterion(outputs, labels)
test_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
test_acc += (predicted == labels).sum().item()
test_loss /= len(test_loader.dataset)
test_acc /= len(test_loader.dataset)
test_loss_list.append(test_loss)
test_acc_list.append(test_acc)
# 打印训练和测试结果
print('Epoch [{}/{}], Train Loss: {:.4f}, Train Acc: {:.4f}, Test Loss: {:.4f}, Test Acc: {:.4f}'\n .format(epoch+1, epochs, train_loss, train_acc, test_loss, test_acc))
实验结果
将不同改进方法的训练和测试结果绘制成曲线图,并进行分析。
结论
- 改进后的LeNet模型在FashionMNIST数据集上的分类性能得到提升。
- 不同改进方法的效果差异显著,需要根据具体问题选择合适的改进方法。
未来工作
- 尝试其他改进方法,例如更深的网络结构、更复杂的正则化方法等。
- 将LeNet应用于其他图像分类任务,验证其泛化能力。
参考文献
附录
- 代码文件
- 实验结果图
- 混淆矩阵
原文地址: https://www.cveoy.top/t/topic/n93h 著作权归作者所有。请勿转载和采集!