使用PyTorch构建三层神经网络模型进行训练

本代码展示了如何使用PyTorch构建一个由三个层级组成的简单神经网络模型,并使用自定义数据集进行训练。

1. 导入所需库

import torch
import torch.nn as nn
import pandas as pd
from sklearn import preprocessing

2. 数据预处理

# 读入Excel表格
data = pd.read_excel('C:\Users\lenovo\Desktop\HIV\GSE6740GSE50011基因降低\output_data.xlsx')

# 数据标准化
data.iloc[:, 1:] = preprocessing.scale(data.iloc[:, 1:])

# 划分数据集
X = torch.tensor(data.iloc[:, 1:].values, dtype=torch.float32)
y = torch.tensor(data.iloc[:, 0].values, dtype=torch.long)
train_data = torch.utils.data.TensorDataset(X, y)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)

3. 定义模型

3.1 第一个模型

class Model1(nn.Module):
    def __init__(self):
        super(Model1, self).__init__()
        self.fc1 = nn.Linear(16, 128)
        self.fc2 = nn.Linear(128, 64)
        self.fc3 = nn.Linear(64, 8)
        self.dropout = nn.Dropout(p=0.5)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = self.dropout(x)
        x = torch.relu(self.fc2(x))
        x = self.dropout(x)
        x = self.fc3(x)
        return x

3.2 第二个模型

class Model2(nn.Module):
    def __init__(self):
        super(Model2, self).__init__()
        self.fc1 = nn.Linear(8, 32)
        self.fc2 = nn.Linear(32, 16)
        self.fc3 = nn.Linear(16, 4)
        self.dropout = nn.Dropout(p=0.5)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = self.dropout(x)
        x = torch.relu(self.fc2(x))
        x = self.dropout(x)
        x = self.fc3(x)
        return x

3.3 第三个模型

class Model3(nn.Module):
    def __init__(self):
        super(Model3, self).__init__()
        self.fc1 = nn.Linear(4, 2)

    def forward(self, x):
        x = self.fc1(x)
        return x

4. 初始化模型

# 初始化三个模型
model1 = Model1()
model2 = Model2()
model3 = Model3()

5. 定义损失函数和优化器

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=0.001)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=0.001)
optimizer3 = torch.optim.Adam(model3.parameters(), lr=0.001)

6. 模型训练

6.1 训练第一个模型

for epoch in range(100):
    running_loss = 0.0
    total = 0
    correct = 0
    for i, data in enumerate(train_loader):
        inputs, labels = data
        optimizer1.zero_grad()
        outputs = model1(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer1.step()

        running_loss += loss.item()
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

    print('Model 1 - Epoch: %d, Loss: %.3f, Accuracy: %.3f' % (epoch + 1, running_loss / len(train_loader), correct / total))

6.2 训练第二个模型

train_X = []
train_y = []
with torch.no_grad():
    for data in train_loader:
        inputs, labels = data
        outputs = model1(inputs)
        train_X.append(outputs)
        train_y.append(labels)
train_X = torch.cat(train_X, 0)
train_y = torch.cat(train_y, 0)

train_data2 = torch.utils.data.TensorDataset(train_X, train_y)
train_loader2 = torch.utils.data.DataLoader(train_data2, batch_size=64, shuffle=True)

for epoch in range(100):
    running_loss = 0.0
    total = 0
    correct = 0
    for i, data in enumerate(train_loader2):
        inputs, labels = data
        optimizer2.zero_grad()
        outputs = model2(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer2.step()

        running_loss += loss.item()
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

    print('Model 2 - Epoch: %d, Loss: %.3f, Accuracy: %.3f' % (epoch + 1, running_loss / len(train_loader2), correct / total))

6.3 训练第三个模型

train_X2 = []
train_y2 = []
with torch.no_grad():
    for data in train_loader2:
        inputs, labels = data
        outputs = model2(inputs)
        train_X2.append(outputs)
        train_y2.append(labels)
train_X2 = torch.cat(train_X2, 0)
train_y2 = torch.cat(train_y2, 0)

train_data3 = torch.utils.data.TensorDataset(train_X2, train_y2)
train_loader3 = torch.utils.data.DataLoader(train_data3, batch_size=64, shuffle=True)

for epoch in range(100):
    running_loss = 0.0
    total = 0
    correct = 0
    for i, data in enumerate(train_loader3):
        inputs, labels = data
        optimizer3.zero_grad()
        outputs = model3(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer3.step()

        running_loss += loss.item()
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

    print('Model 3 - Epoch: %d, Loss: %.3f, Accuracy: %.3f' % (epoch + 1, running_loss / len(train_loader3), correct / total))

代码示例中,三个模型分别进行了100轮训练,并在每轮训练结束后输出训练的损失值和准确率。可以通过修改代码中的参数,例如训练轮数、学习率等,来调整模型训练过程。

总结

本代码演示了使用PyTorch构建一个简单神经网络模型,并使用自定义数据集进行训练的过程。代码展示了数据预处理、模型定义、损失函数和优化器等重要环节,并输出每个模型训练过程中的损失值和准确率,可以作为学习PyTorch进行神经网络模型训练的入门示例。

使用PyTorch构建三层神经网络模型进行训练

原文地址: https://www.cveoy.top/t/topic/mz8g 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录