实验目的:掌握增强卷积神经网络性能的方法,包括Dropout、BatchNorm以及残差连接,学会使用PyTorch框架以现有的LeNet为基础实现相应的改进。

实验内容:

以LeNet为基础,分别实现如下几种改进,并比较改进前与改进后模型的性能。6与7为扩展任务

  1. **激活函数的改进:**将LeNet中的激活函数替换为ReLU。
  2. **池化方式:**平均池化改为最大池化。
  3. **卷积核大小:**将其中一个55的卷积核修改为77.
  4. **正则化方法1:**在全连接层后加入Dropout层(中间的全连接层可增加维度)
  5. **正则化方法2:**卷积层后加入BatchNorm层
  6. 将卷积核从55修改为33,但增加网络的层数(注意调整步长)
  7. **残差连接:**选择一条跨层的路径(跨一层或跨多层均可),加入残差连接。注意需要用1*1卷积使维度相匹配

代码实现:以下是使用PyTorch实现LeNet改进的代码:

import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets

# 定义LeNet模型
class LeNet(nn.Module):

    def __init__(self, num_classes=10):
        super(LeNet, self).__init__()
        self.conv1 = nn.Conv2d(1, 6, kernel_size=5)
        self.relu1 = nn.ReLU()
        self.avgpool1 = nn.AvgPool2d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
        self.relu2 = nn.ReLU()
        self.avgpool2 = nn.AvgPool2d(kernel_size=2, stride=2)
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.relu3 = nn.ReLU()
        self.fc2 = nn.Linear(120, 84)
        self.relu4 = nn.ReLU()
        self.fc3 = nn.Linear(84, num_classes)

    def forward(self, x):
        x = self.conv1(x)
        x = self.relu1(x)
        x = self.avgpool1(x)
        x = self.conv2(x)
        x = self.relu2(x)
        x = self.avgpool2(x)
        x = x.view(-1, 16 * 5 * 5)
        x = self.fc1(x)
        x = self.relu3(x)
        x = self.fc2(x)
        x = self.relu4(x)
        x = self.fc3(x)
        return x

# 定义LeNet-ReLU模型
class LeNetReLU(nn.Module):

    def __init__(self, num_classes=10):
        super(LeNetReLU, self).__init__()
        self.conv1 = nn.Conv2d(1, 6, kernel_size=5)
        self.relu1 = nn.ReLU()
        self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
        self.relu2 = nn.ReLU()
        self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.relu3 = nn.ReLU()
        self.fc2 = nn.Linear(120, 84)
        self.relu4 = nn.ReLU()
        self.fc3 = nn.Linear(84, num_classes)

    def forward(self, x):
        x = self.conv1(x)
        x = self.relu1(x)
        x = self.maxpool1(x)
        x = self.conv2(x)
        x = self.relu2(x)
        x = self.maxpool2(x)
        x = x.view(-1, 16 * 5 * 5)
        x = self.fc1(x)
        x = self.relu3(x)
        x = self.fc2(x)
        x = self.relu4(x)
        x = self.fc3(x)
        return x

# 定义LeNet-MaxPool模型
class LeNetMaxPool(nn.Module):

    def __init__(self, num_classes=10):
        super(LeNetMaxPool, self).__init__()
        self.conv1 = nn.Conv2d(1, 6, kernel_size=5)
        self.relu1 = nn.ReLU()
        self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv2d(6, 16, kernel_size=7)
        self.relu2 = nn.ReLU()
        self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.fc1 = nn.Linear(16 * 4 * 4, 120)
        self.relu3 = nn.ReLU()
        self.fc2 = nn.Linear(120, 84)
        self.relu4 = nn.ReLU()
        self.fc3 = nn.Linear(84, num_classes)

    def forward(self, x):
        x = self.conv1(x)
        x = self.relu1(x)
        x = self.maxpool1(x)
        x = self.conv2(x)
        x = self.relu2(x)
        x = self.maxpool2(x)
        x = x.view(-1, 16 * 4 * 4)
        x = self.fc1(x)
        x = self.relu3(x)
        x = self.fc2(x)
        x = self.relu4(x)
        x = self.fc3(x)
        return x

# 定义LeNet-Dropout模型
class LeNetDropout(nn.Module):

    def __init__(self, num_classes=10):
        super(LeNetDropout, self).__init__()
        self.conv1 = nn.Conv2d(1, 6, kernel_size=5)
        self.relu1 = nn.ReLU()
        self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
        self.relu2 = nn.ReLU()
        self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.relu3 = nn.ReLU()
        self.dropout1 = nn.Dropout(p=0.5)
        self.fc2 = nn.Linear(120, 84)
        self.relu4 = nn.ReLU()
        self.dropout2 = nn.Dropout(p=0.5)
        self.fc3 = nn.Linear(84, num_classes)

    def forward(self, x):
        x = self.conv1(x)
        x = self.relu1(x)
        x = self.maxpool1(x)
        x = self.conv2(x)
        x = self.relu2(x)
        x = self.maxpool2(x)
        x = x.view(-1, 16 * 5 * 5)
        x = self.fc1(x)
        x = self.relu3(x)
        x = self.dropout1(x)
        x = self.fc2(x)
        x = self.relu4(x)
        x = self.dropout2(x)
        x = self.fc3(x)
        return x

# 定义LeNet-BatchNorm模型
class LeNetBatchNorm(nn.Module):

    def __init__(self, num_classes=10):
        super(LeNetBatchNorm, self).__init__()
        self.conv1 = nn.Conv2d(1, 6, kernel_size=5)
        self.relu1 = nn.ReLU()
        self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
        self.relu2 = nn.ReLU()
        self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.relu3 = nn.ReLU()
        self.batchnorm1 = nn.BatchNorm1d(120)
        self.fc2 = nn.Linear(120, 84)
        self.relu4 = nn.ReLU()
        self.batchnorm2 = nn.BatchNorm1d(84)
        self.fc3 = nn.Linear(84, num_classes)

    def forward(self, x):
        x = self.conv1(x)
        x = self.relu1(x)
        x = self.maxpool1(x)
        x = self.conv2(x)
        x = self.relu2(x)
        x = self.maxpool2(x)
        x = x.view(-1, 16 * 5 * 5)
        x = self.fc1(x)
        x = self.relu3(x)
        x = self.batchnorm1(x)
        x = self.fc2(x)
        x = self.relu4(x)
        x = self.batchnorm2(x)
        x = self.fc3(x)
        return x

# 定义LeNet-3x3模型
class LeNet3x3(nn.Module):

    def __init__(self, num_classes=10):
        super(LeNet3x3, self).__init__()
        self.conv1 = nn.Conv2d(1, 16, kernel_size=3)
        self.relu1 = nn.ReLU()
        self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv2d(16, 32, kernel_size=3)
        self.relu2 = nn.ReLU()
        self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv3 = nn.Conv2d(32, 64, kernel_size=3)
        self.relu3 = nn.ReLU()
        self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.fc1 = nn.Linear(64 * 2 * 2, 256)
        self.relu4 = nn.ReLU()
        self.fc2 = nn.Linear(256, num_classes)

    def forward(self, x):
        x = self.conv1(x)
        x = self.relu1(x)
        x = self.maxpool1(x)
        x = self.conv2(x)
        x = self.relu2(x)
        x = self.maxpool2(x)
        x = self.conv3(x)
        x = self.relu3(x)
        x = self.maxpool3(x)
        x = x.view(-1, 64 * 2 * 2)
        x = self.fc1(x)
        x = self.relu4(x)
        x = self.fc2(x)
        return x

# 定义LeNet-Residual模型
class LeNetResidual(nn.Module):

    def __init__(self, num_classes=10):
        super(LeNetResidual, self).__init__()
        self.conv1 = nn.Conv2d(1, 6, kernel_size=5)
        self.relu1 = nn.ReLU()
        self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
        self.relu2 = nn.ReLU()
        self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv3 = nn.Conv2d(16, 32, kernel_size=3)
        self.relu3 = nn.ReLU()
        self.conv4 = nn.Conv2d(32, 64, kernel_size=3)
        self.relu4 = nn.ReLU()
        self.fc1 = nn.Linear(64 * 2 * 2, 256)
        self.relu5 = nn.ReLU()
        self.fc2 = nn.Linear(256, num_classes)
        self.residual = nn.Sequential(
            nn.Conv2d(6, 16, kernel_size=5),
            nn.ReLU(),
            nn.Conv2d(16, 32, kernel_size=3),
            nn.ReLU(),
            nn.Conv2d(32, 64, kernel_size=3),
            nn.ReLU()
        )
        self.shortcut = nn.Conv2d(6, 64, kernel_size=1)

    def forward(self, x):
        residual = self.residual(x)
        shortcut = self.shortcut(x)
        x = self.conv1(x)
        x = self.relu1(x)
        x = self.maxpool1(x)
        x = self.conv2(x)
        x = self.relu2(x)
        x = self.maxpool2(x)
        x = self.conv3(x)
        x = self.relu3(x)
        x = self.conv4(x)
        x = self.relu4(x)
        x = x + residual + shortcut
        x = self.maxpool3(x)
        x = x.view(-1, 64 * 2 * 2)
        x = self.fc1(x)
        x = self.relu5(x)
        x = self.fc2(x)
        return x

# 加载MNIST数据集
train_dataset = datasets.MNIST(root='data', train=True, transform=transforms.ToTensor(), download=True)
test_dataset = datasets.MNIST(root='data', train=False, transform=transforms.ToTensor())

# 定义训练参数
batch_size = 64
learning_rate = 0.1
num_epochs = 10

# 定义训练函数
def train(model, train_loader, criterion, optimizer):
    model.train()
    train_loss = 0.0
    train_acc = 0.0
    total = 0
    for images, labels in train_loader:
        images = images.cuda()
        labels = labels.cuda()
        optimizer.zero_grad()
        outputs = model(images)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        train_loss += loss.item() * labels.size(0)
        _, predicted = torch.max(outputs.data, 1)
        train_acc += (predicted == labels).sum().item()
        total += labels.size(0)
    train_loss = train_loss / total
    train_acc = train_acc / total * 100
    return train_loss, train_acc

# 定义测试函数
def test(model, test_loader, criterion):
    model.eval()
    test_loss = 0.0
    test_acc = 0.0
    total = 0
    with torch.no_grad():
        for images, labels in test_loader:
            images = images.cuda()
            labels = labels.cuda()
            outputs = model(images)
            loss = criterion(outputs, labels)
            test_loss += loss.item() * labels.size(0)
            _, predicted = torch.max(outputs.data, 1)
            test_acc += (predicted == labels).sum().item()
            total += labels.size(0)
    test_loss = test_loss / total
    test_acc = test_acc / total * 100
    return test_loss, test_acc

# 定义训练流程
def train_process(model_name, model):
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
    model.cuda()
    for epoch in range(num_epochs):
        train_loss, train_acc = train(model, train_loader, criterion, optimizer)
        test_loss, test_acc = test(model, test_loader, criterion)
        print('Epoch [{}/{}], Train Loss: {:.4f}, Train Acc: {:.2f}%, Test Loss: {:.4f}, Test Acc: {:.2f}%'.format(
            epoch + 1, num_epochs, train_loss, train_acc, test_loss, test_acc))
    torch.save(model.state_dict(), model_name + '.pt')

# 训练LeNet模型
train_process('LeNet', LeNet())

# 训练LeNet-ReLU模型
train_process('LeNet-ReLU', LeNetReLU())

# 训练LeNet-MaxPool模型
train_process('LeNet-MaxPool', LeNetMaxPool())

# 训练LeNet-Dropout模型
train_process('LeNet-Dropout', LeNetDropout())

# 训练LeNet-BatchNorm模型
train_process('LeNet-BatchNorm', LeNetBatchNorm())

# 训练LeNet-3x3模型
train_process('LeNet-3x3', LeNet3x3())

# 训练LeNet-Residual模型
train_process('LeNet-Residual', LeNetResidual())
LeNet改进:使用Dropout、BatchNorm和残差连接提升卷积神经网络性能

原文地址: https://www.cveoy.top/t/topic/n0z8 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录