1. 激活函数的改进:将LeNet中的激活函数替换为ReLU。

class LeNet_ReLU(nn.Module): def init(self): super(LeNet_ReLU, self).init() self.conv1 = nn.Conv2d(1, 6, 5) self.pool1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.pool2 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(16 * 4 * 4, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10)

def forward(self, x):
    x = self.pool1(F.relu(self.conv1(x)))
    x = self.pool2(F.relu(self.conv2(x)))
    x = x.view(-1, 16 * 4 * 4)
    x = F.relu(self.fc1(x))
    x = F.relu(self.fc2(x))
    x = self.fc3(x)
    return x

2. 池化方式:平均池化改为最大池化。

class LeNet_MaxPool(nn.Module): def init(self): super(LeNet_MaxPool, self).init() self.conv1 = nn.Conv2d(1, 6, 5) self.pool1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.pool2 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(16 * 4 * 4, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10)

def forward(self, x):
    x = self.pool1(F.relu(self.conv1(x)))
    x = self.pool2(F.relu(self.conv2(x)))
    x = x.view(-1, 16 * 4 * 4)
    x = F.relu(self.fc1(x))
    x = F.relu(self.fc2(x))
    x = self.fc3(x)
    return x

3. 卷积核大小:将其中一个55的卷积核修改为77。

class LeNet_7x7(nn.Module): def init(self): super(LeNet_7x7, self).init() self.conv1 = nn.Conv2d(1, 6, 5) self.pool1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 7) # 修改为7x7的卷积核 self.pool2 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(16 * 4 * 4, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10)

def forward(self, x):
    x = self.pool1(F.relu(self.conv1(x)))
    x = self.pool2(F.relu(self.conv2(x)))
    x = x.view(-1, 16 * 4 * 4)
    x = F.relu(self.fc1(x))
    x = F.relu(self.fc2(x))
    x = self.fc3(x)
    return x

4. 正则化方法1:在全连接层后加入Dropout层(中间的全连接层可增加维度)

class LeNet_Dropout(nn.Module): def init(self): super(LeNet_Dropout, self).init() self.conv1 = nn.Conv2d(1, 6, 5) self.pool1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.pool2 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(16 * 4 * 4, 240) # 增加全连接层的维度 self.fc2 = nn.Linear(240, 120) # 增加全连接层的维度 self.fc3 = nn.Linear(120, 84) self.fc4 = nn.Linear(84, 10) self.dropout = nn.Dropout(p=0.5) # 加入Dropout层

def forward(self, x):
    x = self.pool1(F.relu(self.conv1(x)))
    x = self.pool2(F.relu(self.conv2(x)))
    x = x.view(-1, 16 * 4 * 4)
    x = F.relu(self.fc1(x))
    x = self.dropout(x)  # 在第一个全连接层后加入Dropout层
    x = F.relu(self.fc2(x))
    x = F.relu(self.fc3(x))
    x = self.fc4(x)
    return x

5. 正则化方法2:卷积层后加入BatchNorm层

class LeNet_BatchNorm(nn.Module): def init(self): super(LeNet_BatchNorm, self).init() self.conv1 = nn.Conv2d(1, 6, 5) self.batchnorm1 = nn.BatchNorm2d(6) # 加入BatchNorm层 self.pool1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.batchnorm2 = nn.BatchNorm2d(16) # 加入BatchNorm层 self.pool2 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(16 * 4 * 4, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10)

def forward(self, x):
    x = self.pool1(F.relu(self.batchnorm1(self.conv1(x))))  # 在卷积层后加入BatchNorm层
    x = self.pool2(F.relu(self.batchnorm2(self.conv2(x))))  # 在卷积层后加入BatchNorm层
    x = x.view(-1, 16 * 4 * 4)
    x = F.relu(self.fc1(x))
    x = F.relu(self.fc2(x))
    x = self.fc3(x)
    return x

6. 将卷积核从55修改为33,但增加网络的层数(注意调整步长)

class LeNet_3x3(nn.Module): def init(self): super(LeNet_3x3, self).init() self.conv1 = nn.Conv2d(1, 6, 3) # 修改为3x3的卷积核 self.conv2 = nn.Conv2d(6, 12, 3) # 修改为3x3的卷积核 self.conv3 = nn.Conv2d(12, 24, 3) # 修改为3x3的卷积核 self.fc1 = nn.Linear(24 * 4 * 4, 240) self.fc2 = nn.Linear(240, 120) self.fc3 = nn.Linear(120, 84) self.fc4 = nn.Linear(84, 10)

def forward(self, x):
    x = F.relu(self.conv1(x))
    x = F.max_pool2d(x, 2)  # 修改池化方式为最大池化
    x = F.relu(self.conv2(x))
    x = F.max_pool2d(x, 2)  # 修改池化方式为最大池化
    x = F.relu(self.conv3(x))
    x = x.view(-1, 24 * 4 * 4)
    x = F.relu(self.fc1(x))
    x = F.relu(self.fc2(x))
    x = F.relu(self.fc3(x))
    x = self.fc4(x)
    return x

7. 残差连接:选择一条跨层的路径(跨一层或跨多层均可),加入残差连接。注意需要用1*1卷积使维度相匹配

class LeNet_Residual(nn.Module): def init(self): super(LeNet_Residual, self).init() self.conv1 = nn.Conv2d(1, 6, 5) self.batchnorm1 = nn.BatchNorm2d(6) self.conv2 = nn.Conv2d(6, 16, 5) self.batchnorm2 = nn.BatchNorm2d(16) self.conv3 = nn.Conv2d(16, 32, 3) # 添加第三个卷积层 self.batchnorm3 = nn.BatchNorm2d(32) # 添加第三个BatchNorm层 self.fc1 = nn.Linear(32 * 3 * 3, 120) # 修改全连接层的输入维度 self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) self.conv1x1 = nn.Conv2d(16, 32, 1) # 添加1x1卷积层,使得维度相同

def forward(self, x):
    x = F.relu(self.batchnorm1(self.conv1(x)))
    residual = x  # 保存输入到第二个卷积层前的输出
    x = F.relu(self.batchnorm2(self.conv2(x)))
    x = F.max_pool2d(x, 2)
    residual = self.conv1x1(residual)  # 用1x1卷积层使得维度相同
    x += residual  # 加入残差连接
    residual = x  # 保存输入到第三个卷积层前的输出
    x = F.relu(self.batchnorm3(self.conv3(x)))
    residual = self.conv1x1(residual)  # 用1x1卷积层使得维度相同
    x += residual  # 加入残差连接
    x = x.view(-1, 32 * 3 * 3)
    x = F.relu(self.fc1(x))
    x = F.relu(self.fc2(x))
    x = self.fc3(x)
    return
在下面代码的基础上用python编程完成:以LeNet为基础实现如下几种改进。6与7为扩展任务并给出最终完整的代码。1激活函数的改进:将LeNet中的激活函数替换为ReLU。2池化方式:平均池化改为最大池化。3卷积核大小:将其中一个55的卷积核修改为774正则化方法1:在全连接层后加入Dropout层中间的全连接层可增加维度5正则化方法2:卷积层后加入BatchNorm层6将卷积核从55修改为33

原文地址: https://www.cveoy.top/t/topic/ffjW 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录