导入必要的库

import torch import torch.nn as nn import torch.optim as optim import pandas as pd from sklearn.preprocessing import StandardScaler

读入Excel表格

data = pd.read_excel("C:\Users\lenovo\Desktop\HIV\DNN神经网络测试\data1.xlsx")

对数据进行标准化处理

scaler = StandardScaler() data.iloc[:, 1:] = scaler.fit_transform(data.iloc[:, 1:])

定义第一个模型

class Model1(nn.Module): def init(self, input_size, hidden_size, num_classes): super(Model1, self).init() self.fc1 = nn.Linear(input_size, hidden_size) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=0.5) self.fc2 = nn.Linear(hidden_size, num_classes)

def forward(self, x):
    out = self.fc1(x)
    out = self.relu(out)
    out = self.dropout(out)
    out = self.fc2(out)
    return out

定义第二个模型

class Model2(nn.Module): def init(self, input_size, hidden_size, num_classes): super(Model2, self).init() self.fc1 = nn.Linear(input_size, hidden_size) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=0.5) self.fc2 = nn.Linear(hidden_size, num_classes) self.softmax = nn.Softmax(dim=1)

def forward(self, x):
    out = self.fc1(x)
    out = self.relu(out)
    out = self.dropout(out)
    out = self.fc2(out)
    out = self.softmax(out)
    return out

设置模型参数

input_size = len(data.columns) - 1 hidden_size = 256 num_classes1 = 4 num_classes2 = 2 learning_rate = 0.001 num_epochs = 100

定义第一个模型

model1 = Model1(input_size, hidden_size, num_classes1)

定义损失函数和优化器

criterion1 = nn.CrossEntropyLoss() optimizer1 = optim.Adam(model1.parameters(), lr=learning_rate)

训练第一个模型

for epoch in range(num_epochs): inputs = torch.Tensor(data.iloc[:, 1:].values) targets = torch.Tensor(data.iloc[:, 0].values).long() optimizer1.zero_grad() outputs = model1(inputs) loss = criterion1(outputs, targets) loss.backward() optimizer1.step() if (epoch+1) % 100 == 0: print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))

# 计算第一个模型的准确率
_, predicted = torch.max(outputs.data, 1)
total = targets.size(0)
correct = (predicted == targets).sum().item()
accuracy = correct / total
print('Accuracy of the first model: {:.2f}%'.format(accuracy * 100))

第一个模型的输出作为第二个模型的输入

inputs = model1(inputs).detach()

定义第二个模型

model2 = Model2(num_classes1, hidden_size, num_classes2)

定义损失函数和优化器

criterion2 = nn.CrossEntropyLoss() optimizer2 = optim.Adam(model2.parameters(), lr=learning_rate)

训练第二个模型

for epoch in range(num_epochs): optimizer2.zero_grad() outputs = model2(inputs) loss = criterion2(outputs, targets) loss.backward() optimizer2.step() if (epoch+1) % 100 == 0: print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))

# 计算第二个模型的准确率
_, predicted = torch.max(outputs.data, 1)
total = targets.size(0)
correct = (predicted == targets).sum().item()
accuracy = correct / total
print('Accuracy of the second model: {:.2f}%'.format(accuracy * 100))

输出最后一次训练得到每个样本所对应的概率

outputs = model2(inputs) probabilities = outputs.detach().numpy()

print(probabilities)

# 导入必要的库import torchimport torchnn as nnimport torchoptim as optimimport pandas as pdfrom sklearnpreprocessing import StandardScaler# 读入Excel表格data = pdread_excelCUserslenovoDesktopHIVDNN神经网络测试data1xl

原文地址: https://www.cveoy.top/t/topic/bcBA 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录