import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import Dataset, DataLoader import numpy as np import pandas as pd from sklearn.metrics import roc_curve, auc from bayes_opt import BayesianOptimization

读入数据

data = pd.read_excel('C:\Users\lenovo\Desktop\HIV\DNN神经网络测试\data1.xlsx')

获取标签

label = data['state'].values.astype(np.float32)

获取数据

data = data.iloc[:, 1:].values.astype(np.float32)

数据集类

class MyDataset(Dataset): def init(self, data, label): self.data = data self.label = label

def __getitem__(self, index):
    return self.data[index], self.label[index]

def __len__(self):
    return len(self.data)

定义DNN模型

class DNN(nn.Module): def init(self, input_size, hidden_size, num_classes): super(DNN, self).init()

    self.fc1 = nn.Linear(input_size, hidden_size)
    self.relu1 = nn.ReLU()
    self.fc2 = nn.Linear(hidden_size, hidden_size)
    self.relu2 = nn.ReLU()
    self.fc3 = nn.Linear(hidden_size, hidden_size)
    self.relu3 = nn.ReLU()
    self.fc4 = nn.Linear(hidden_size, num_classes)

    self.att = nn.MultiheadAttention(hidden_size, 1)

def forward(self, x):
    # 注意力机制
    x, _ = self.att(x, x, x)

    out = self.fc1(x)
    out = self.relu1(out)
    out = self.fc2(out)
    out = self.relu2(out)
    out = self.fc3(out)
    out = self.relu3(out)
    out = self.fc4(out)

    return out

定义训练函数

def train(hidden_size, learning_rate): # 定义超参数 input_size = 16 num_classes = 1 batch_size = len(data) num_epochs = 1000

# 定义数据集和加载器
dataset = MyDataset(data, label)
loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

# 定义模型、损失函数和优化器
model = DNN(input_size, int(hidden_size), num_classes)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# 训练模型
for epoch in range(num_epochs):
    for data, label in loader:
        # 前向传播
        output = model(data)

        # 计算损失
        loss = criterion(output, label.view(-1, 1))

        # 反向传播和优化
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    # 每个epoch输出一次训练结果
    with torch.no_grad():
        output = model(torch.from_numpy(data))
        output = torch.sigmoid(output)
        predict = (output > 0.5).float()
        acc = torch.mean((predict == label.view(-1, 1)).float())
        print('Epoch [{}/{}], Loss: {:.4f}, Accuracy: {:.4f}'.format(epoch+1, num_epochs, loss.item(), acc.item()))

# 输出每个样本的概率
with torch.no_grad():
    output = model(torch.from_numpy(data))
    output = torch.sigmoid(output)
    prob = output.numpy().flatten()

return prob

定义优化函数

def optimize(hidden_size, learning_rate): prob = train(hidden_size, learning_rate) fpr, tpr, _ = roc_curve(label, prob) roc_auc = auc(fpr, tpr) return roc_auc

进行贝叶斯优化

pbounds = {'hidden_size': (5, 50), 'learning_rate': (0.001, 0.1)} optimizer = BayesianOptimization(f=optimize, pbounds=pbounds, verbose=2) optimizer.maximize(n_iter=10)

输出最优参数和最优结果

print(optimizer.max)

使用Python构建DNN神经网络预测患者疾病状态:基于基因表达量和贝叶斯优化

原文地址: https://www.cveoy.top/t/topic/ndlA 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录