猫狗分类:小型数据集上的卷积神经网络训练与优化
以下是一个示例代码,演示了如何从头开始训练一个卷积神经网络来实现猫狗分类,以及如何使用数据增强来提高其准确率。
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
# 设置随机种子
torch.manual_seed(42)
# 定义数据预处理的转换
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# 加载训练集和测试集
train_dataset = ImageFolder(root='train/', transform=transform)
test_dataset = ImageFolder(root='test/', transform=transform)
# 创建数据加载器
train_loader = DataLoader(dataset=train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=32, shuffle=False)
# 定义卷积神经网络模型
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1)
self.fc = nn.Linear(32 * 8 * 8, 2)
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.relu(x)
x = self.maxpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
# 创建模型实例
model = CNN()
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# 训练网络
for epoch in range(10):
running_loss = 0.0
for i, data in enumerate(train_loader):
inputs, labels = data
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 100 == 99:
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 100))
running_loss = 0.0
# 测试网络
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels = data
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('准确率: %.2f %%' % (100 * correct / total))
接下来是使用数据增强的方法来提高准确率的示例代码:
# 修改数据预处理的转换
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# 重新加载训练集和测试集
train_dataset = ImageFolder(root='train/', transform=transform)
test_dataset = ImageFolder(root='test/', transform=transform)
# 创建新的数据加载器
train_loader = DataLoader(dataset=train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=32, shuffle=False)
# 接下来的代码与上面相同,重新训练和测试网络
...
使用预训练的卷积神经网络并采用数据增强的示例代码:
# 加载预训练的模型
model = torchvision.models.resnet18(pretrained=True)
# 冻结预训练模型的参数
for param in model.parameters():
param.requires_grad = False
# 替换分类器层
model.fc = nn.Linear(512, 2)
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.fc.parameters(), lr=0.001, momentum=0.9)
# 修改数据预处理的转换
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# 重新加载训练集和测试集
train_dataset = ImageFolder(root='train/', transform=transform)
test_dataset = ImageFolder(root='test/', transform=transform)
# 创建新的数据加载器
train_loader = DataLoader(dataset=train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=32, shuffle=False)
# 训练和测试网络
...
最后是微调模型并采用数据增强的示例代码:
# 解冻预训练模型的参数
for param in model.layer4.parameters():
param.requires_grad = True
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# 修改数据预处理的转换
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# 重新加载训练集和测试集
train_dataset = ImageFolder(root='train/', transform=transform)
test_dataset = ImageFolder(root='test/', transform=transform)
# 创建新的数据加载器
train_loader = DataLoader(dataset=train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=32, shuffle=False)
# 训练和测试网络
...
请注意,这只是一个示例代码,实际应用中可能需要根据具体情况进行修改和调整。
原文地址: https://www.cveoy.top/t/topic/peH7 著作权归作者所有。请勿转载和采集!