使用 PyTorch 构建并训练多层神经网络模型
导入所需库
import torch import torch.nn as nn import pandas as pd from sklearn import preprocessing
数据准备
读入 Excel 表格
data = pd.read_excel('C:\Users\lenovo\Desktop\HIV\GSE6740GSE50011基因降低\output_data.xlsx')
数据标准化
data.iloc[:, 1:] = preprocessing.scale(data.iloc[:, 1:])
划分数据集
X = torch.tensor(data.iloc[:, 1:].values, dtype=torch.float32) y = torch.tensor(data.iloc[:, 0].values, dtype=torch.long) train_data = torch.utils.data.TensorDataset(X, y) train_loader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
模型定义
定义第一个模型
class Model1(nn.Module): def init(self): super(Model1, self).init() self.fc1 = nn.Linear(16, 128) self.fc2 = nn.Linear(128, 64) self.fc3 = nn.Linear(64, 8) self.dropout = nn.Dropout(p=0.5)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = self.dropout(x)
x = torch.relu(self.fc2(x))
x = self.dropout(x)
x = self.fc3(x)
return x
定义第二个模型
class Model2(nn.Module): def init(self): super(Model2, self).init() self.fc1 = nn.Linear(8, 32) self.fc2 = nn.Linear(32, 16) self.fc3 = nn.Linear(16, 4) self.dropout = nn.Dropout(p=0.5)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = self.dropout(x)
x = torch.relu(self.fc2(x))
x = self.dropout(x)
x = self.fc3(x)
return x
定义第三个模型
class Model3(nn.Module): def init(self): super(Model3, self).init() self.fc1 = nn.Linear(4, 2)
def forward(self, x):
x = self.fc1(x)
return x
模型初始化
初始化三个模型
model1 = Model1() model2 = Model2() model3 = Model3()
定义损失函数和优化器
criterion = nn.CrossEntropyLoss() optimizer1 = torch.optim.Adam(model1.parameters(), lr=0.001) optimizer2 = torch.optim.Adam(model2.parameters(), lr=0.001) optimizer3 = torch.optim.Adam(model3.parameters(), lr=0.001)
模型训练
训练第一个模型
for epoch in range(100): running_loss = 0.0 total = 0 correct = 0 for i, data in enumerate(train_loader): inputs, labels = data optimizer1.zero_grad() outputs = model1(inputs) loss = criterion(outputs, labels) loss.backward() optimizer1.step()
running_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Epoch: %d, Loss: %.3f, Accuracy: %.3f' % (epoch + 1, running_loss / len(train_loader), correct / total))
训练第二个模型
train_X = [] train_y = [] with torch.no_grad(): for data in train_loader: inputs, labels = data outputs = model1(inputs) train_X.append(outputs) train_y.append(labels) train_X = torch.cat(train_X, 0) train_y = torch.cat(train_y, 0)
train_data2 = torch.utils.data.TensorDataset(train_X, train_y) train_loader2 = torch.utils.data.DataLoader(train_data2, batch_size=64, shuffle=True)
for epoch in range(100): running_loss = 0.0 total = 0 correct = 0 for i, data in enumerate(train_loader2): inputs, labels = data optimizer2.zero_grad() outputs = model2(inputs) loss = criterion(outputs, labels) loss.backward() optimizer2.step()
running_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Epoch: %d, Loss: %.3f, Accuracy: %.3f' % (epoch + 1, running_loss / len(train_loader2), correct / total))
训练第三个模型
train_X2 = [] train_y2 = [] with torch.no_grad(): for data in train_loader2: inputs, labels = data outputs = model2(inputs) train_X2.append(outputs) train_y2.append(labels) train_X2 = torch.cat(train_X2, 0) train_y2 = torch.cat(train_y2, 0)
train_data3 = torch.utils.data.TensorDataset(train_X2, train_y2) train_loader3 = torch.utils.data.DataLoader(train_data3, batch_size=64, shuffle=True)
for epoch in range(100): running_loss = 0.0 total = 0 correct = 0 for i, data in enumerate(train_loader3): inputs, labels = data optimizer3.zero_grad() outputs = model3(inputs) loss = criterion(outputs, labels) loss.backward() optimizer3.step()
running_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Epoch: %d, Loss: %.3f, Accuracy: %.3f' % (epoch + 1, running_loss / len(train_loader3), correct / total))
输出结果
print('上述代码有没有将三次训练的三个模型的每次准确率和损失值输出内容:有,第一个模型的每次准确率和损失值在第一个训练循环中输出,第二个模型的每次准确率和损失值在第二个训练循环中输出,第三个模型的准确率和损失值在第三个训练循环中输出。')
原文地址: https://www.cveoy.top/t/topic/mz8a 著作权归作者所有。请勿转载和采集!