import torch import torch.nn as nn import torch.optim as optim import pandas as pd

读入Excel表格

data = pd.read_excel('C:\Users\lenovo\Desktop\HIV\DNN神经网络测试\output_data.xlsx')

获取输入特征和标签

features = data.iloc[:, 1:].values # 获取所有行的第1列及以后的数据 labels = data.iloc[:, 0].values # 获取所有行的第0列的数据

将标签转为张量

labels = torch.from_numpy(labels)

将数据划分为训练集和测试集

train_features = torch.Tensor(features[:80]) train_labels = torch.Tensor(labels[:80]) test_features = torch.Tensor(features[80:]) test_labels = torch.Tensor(labels[80:])

定义注意力机制类

class Attention(nn.Module): def init(self, in_dim): super(Attention, self).init() self.embed = nn.Linear(in_dim, in_dim)

def forward(self, x):
    # 对每个特征进行嵌入操作
    embed_x = self.embed(x)
    # 计算嵌入后的特征的平均值
    avg_x = torch.mean(embed_x, dim=1)
    # 计算嵌入后的特征的最大值
    max_x = torch.max(embed_x, dim=1)[0]
    # 将平均值和最大值进行拼接
    concat_x = torch.cat((avg_x, max_x), dim=1)
    # 对拼接后的特征进行激活函数处理
    out = torch.sigmoid(concat_x)
    # 返回注意力权重
    return out

定义神经网络类

class DNN(nn.Module): def init(self, in_dim, hidden1_dim, hidden2_dim, hidden3_dim): super(DNN, self).init() # 定义三个隐藏层和一个输出层 self.hidden1 = nn.Linear(in_dim, hidden1_dim) self.hidden2 = nn.Linear(hidden1_dim, hidden2_dim) self.hidden3 = nn.Linear(hidden2_dim, hidden3_dim) self.output = nn.Linear(hidden3_dim, 1) # 定义注意力机制 self.attention = Attention(in_dim)

def forward(self, x):
    # 对输入进行注意力机制处理
    attention_weight = self.attention(x)
    attention_x = torch.mul(x, attention_weight)
    # 对输入进行三个隐藏层的处理
    hidden1_out = torch.relu(self.hidden1(attention_x))
    hidden2_out = torch.relu(self.hidden2(hidden1_out))
    hidden3_out = torch.relu(self.hidden3(hidden2_out))
    # 对输出进行sigmoid处理
    out = torch.sigmoid(self.output(hidden3_out))
    return out

定义模型参数

in_dim = 16 # 输入特征的维度 hidden1_dim = 8 # 第一个隐藏层的神经元个数 hidden2_dim = 4 # 第二个隐藏层的神经元个数 hidden3_dim = 8 # 第三个隐藏层的神经元个数 lr = 0.01 # 学习率 epochs = 1000 # 训练轮数

实例化模型

model = DNN(in_dim, hidden1_dim, hidden2_dim, hidden3_dim)

定义损失函数和优化器

criterion = nn.BCELoss() optimizer = optim.Adam(model.parameters(), lr=lr)

训练模型

for epoch in range(epochs): # 将输入和标签放入GPU train_features = train_features.cuda() train_labels = train_labels.cuda() # 前向传播 output = model(train_features) # 计算损失值 loss = criterion(output, train_labels.unsqueeze(1)) # 反向传播 optimizer.zero_grad() loss.backward() optimizer.step() # 计算准确率 predict_labels = (output >= 0.5).squeeze().float() accuracy = torch.mean((predict_labels == train_labels).float()) # 输出每次训练的准确率和损失值 print('Epoch [{}/{}], Loss: {:.4f}, Accuracy: {:.4f}'.format(epoch+1, epochs, loss.item(), accuracy.item()))

测试模型

with torch.no_grad(): # 将输入和标签放入GPU test_features = test_features.cuda() test_labels = test_labels.cuda() # 前向传播 output = model(test_features) # 计算损失值 test_loss = criterion(output, test_labels.unsqueeze(1)) # 计算准确率 predict_labels = (output >= 0.5).squeeze().float() test_accuracy = torch.mean((predict_labels == test_labels).float()) # 输出测试集的准确率和损失值 print('Test Loss: {:.4f}, Test Accuracy: {:.4f}'.format(test_loss.item(), test_accuracy.item()))

输出最后一次训练得到每个样本的概率

with torch.no_grad(): # 将输入放入GPU features = torch.Tensor(features).cuda() # 前向传播 output = model(features) # 输出每个样本的概率 print('Output Probability: ', output.squeeze().tolist())


原文地址: https://www.cveoy.top/t/topic/nemR 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录