(注:以下代码仅为参考,不一定完全正确)

import torch import torch.nn as nn import torch.nn.functional as F

class LeNet(nn.Module): def init(self): super(LeNet, self).init() self.conv1 = nn.Conv2d(1, 6, 5) self.pool1 = nn.AvgPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.pool2 = nn.AvgPool2d(2, 2) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10)

def forward(self, x):
    x = self.pool1(F.relu(self.conv1(x)))
    x = self.pool2(F.relu(self.conv2(x)))
    x = x.view(-1, 16 * 5 * 5)
    x = F.relu(self.fc1(x))
    x = F.relu(self.fc2(x))
    x = self.fc3(x)
    return x

class ImprovedLeNet1(nn.Module): def init(self): super(ImprovedLeNet1, self).init() self.conv1 = nn.Conv2d(1, 6, 5) self.pool1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 7) self.pool2 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(16 * 3 * 3, 240) self.dropout1 = nn.Dropout(0.5) self.fc2 = nn.Linear(240, 120) self.dropout2 = nn.Dropout(0.5) self.fc3 = nn.Linear(120, 84) self.fc4 = nn.Linear(84, 10)

def forward(self, x):
    x = self.pool1(F.relu(self.conv1(x)))
    x = self.pool2(F.relu(self.conv2(x)))
    x = x.view(-1, 16 * 3 * 3)
    x = F.relu(self.fc1(x))
    x = self.dropout1(x)
    x = F.relu(self.fc2(x))
    x = self.dropout2(x)
    x = F.relu(self.fc3(x))
    x = self.fc4(x)
    return x

class ImprovedLeNet2(nn.Module): def init(self): super(ImprovedLeNet2, self).init() self.conv1 = nn.Conv2d(1, 6, 5) self.bn1 = nn.BatchNorm2d(6) self.pool1 = nn.AvgPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.bn2 = nn.BatchNorm2d(16) self.pool2 = nn.AvgPool2d(2, 2) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10)

def forward(self, x):
    x = self.pool1(F.relu(self.bn1(self.conv1(x))))
    x = self.pool2(F.relu(self.bn2(self.conv2(x))))
    x = x.view(-1, 16 * 5 * 5)
    x = F.relu(self.fc1(x))
    x = F.relu(self.fc2(x))
    x = self.fc3(x)
    return x

class ImprovedLeNet3(nn.Module): def init(self): super(ImprovedLeNet3, self).init() self.conv1 = nn.Conv2d(1, 6, 3) self.bn1 = nn.BatchNorm2d(6) self.conv2 = nn.Conv2d(6, 6, 3) self.bn2 = nn.BatchNorm2d(6) self.pool1 = nn.MaxPool2d(2, 2) self.conv3 = nn.Conv2d(6, 16, 3) self.bn3 = nn.BatchNorm2d(16) self.conv4 = nn.Conv2d(16, 16, 3) self.bn4 = nn.BatchNorm2d(16) self.pool2 = nn.MaxPool2d(2, 2) self.conv5 = nn.Conv2d(16, 32, 3) self.bn5 = nn.BatchNorm2d(32) self.conv6 = nn.Conv2d(32, 32, 3) self.bn6 = nn.BatchNorm2d(32) self.fc1 = nn.Linear(32 * 3 * 3, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10)

def forward(self, x):
    x = F.relu(self.bn1(self.conv1(x)))
    x = F.relu(self.bn2(self.conv2(x)))
    x = self.pool1(x)
    x = F.relu(self.bn3(self.conv3(x)))
    x = F.relu(self.bn4(self.conv4(x) + self._shortcut(x)))
    x = self.pool2(x)
    x = F.relu(self.bn5(self.conv5(x)))
    x = F.relu(self.bn6(self.conv6(x)))
    x = x.view(-1, 32 * 3 * 3)
    x = F.relu(self.fc1(x))
    x = F.relu(self.fc2(x))
    x = self.fc3(x)
    return x

def _shortcut(self, x):
    return F.max_pool2d(x, 1) if self.in_channels == self.out_channels else nn.Conv2d(self.in_channels, self.out_channels, 1)

if name == 'main': # load data train_loader = torch.utils.data.DataLoader( datasets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True), batch_size=64, shuffle=True) test_loader = torch.utils.data.DataLoader( datasets.MNIST(root='./data', train=False, transform=transforms.ToTensor(), download=True), batch_size=64, shuffle=True)

# train models
models = [LeNet(), ImprovedLeNet1(), ImprovedLeNet2(), ImprovedLeNet3()]
for model in models:
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    criterion = nn.CrossEntropyLoss()
    for epoch in range(10):
        for i, (images, labels) in enumerate(train_loader):
            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            if (i+1) % 100 == 0:
                print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
                      .format(epoch+1, 10, i+1, len(train_loader), loss.item()))
    correct = 0
    total = 0
    with torch.no_grad():
        for images, labels in test_loader:
            outputs = model(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    print('Accuracy of the network on the 10000 test images: {} %'.format(100 * correct / total)
实验目的:掌握增强卷积神经网络性能的方法包括Dropout、 BatchNorm以及残差连接学会使用PyTorch框架以现有的LeNet为基础实现相应的改进。 实验内容: 以LeNet为基础分别实现如下几种改进并比较改进前与改进后模型的性能。6与7为扩展任务激活函数的改进:将LeNet中的激活函数替换为ReLU。池化方式:平均池化改为最大池化。卷积核大小:将其中一个55的卷积核修改为77正则化

原文地址: https://www.cveoy.top/t/topic/eYLs 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录