基于贝叶斯优化的双层神经网络模型用于HIV预测

该代码使用贝叶斯优化方法对双层神经网络模型进行超参数优化,实现对HIV感染状态的预测。

1. 数据预处理

import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torch.optim import lr_scheduler
from bayes_opt import BayesianOptimization


# 读取Excel文件,并进行标准化处理。

data = pd.read_excel('C:\Users\lenovo\Desktop\HIV\GSE6740GSE50011基因降低\output_data.xlsx')
X = data.iloc[:, 1:].values
y = data.iloc[:, 0].values
scaler = StandardScaler()
X = scaler.fit_transform(X)

2. 定义数据集类

# 定义数据集类,继承自torch.utils.data.Dataset,重载__getitem__和__len__方法。

class MyDataset(Dataset):
    def __init__(self, X, y):
        self.X = X
        self.y = y

    def __getitem__(self, index):
        return torch.tensor(self.X[index]).float(), torch.tensor(self.y[index]).long()

    def __len__(self):
        return len(self.X)

3. 定义第一个模型

# 定义第一个模型,使用贝叶斯优化对其进行超参数优化。

class Model1(nn.Module):
    def __init__(self, input_size, hidden_size, output_size, dropout=0.5):
        super(Model1, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.fc2 = nn.Linear(hidden_size, hidden_size)
        self.fc3 = nn.Linear(hidden_size, hidden_size)
        self.fc4 = nn.Linear(hidden_size, output_size)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        x = self.fc1(x)
        x = self.relu(x)
        x = self.dropout(x)
        x = self.fc2(x)
        x = self.relu(x)
        x = self.dropout(x)
        x = self.fc3(x)
        x = self.relu(x)
        x = self.dropout(x)
        x = self.fc4(x)
        return x


def train_model1(hidden_size, dropout):
    model = Model1(input_size=X.shape[1], hidden_size=int(hidden_size), output_size=4, dropout=dropout)
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, verbose=True)
    dataset = MyDataset(X, y)
    dataloader = DataLoader(dataset, batch_size=32, shuffle=True)
    model.train()
    for epoch in range(100):
        running_loss = 0.0
        running_corrects = 0
        for inputs, labels in dataloader:
            optimizer.zero_grad()
            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.item() * inputs.size(0)
            running_corrects += torch.sum(preds == labels.data)

        scheduler.step(running_loss)
        epoch_loss = running_loss / len(dataset)
        epoch_acc = running_corrects.double() / len(dataset)
        print('Epoch {}/{} - Loss: {:.4f} Acc: {:.4f}'.format(epoch+1, 100, epoch_loss, epoch_acc))
    return epoch_acc.item()


# 调用贝叶斯优化对模型进行超参数优化。


pbounds = {'hidden_size': (100, 500), 'dropout': (0.3, 0.7)}
optimizer = BayesianOptimization(
    f=train_model1,
    pbounds=pbounds,
    verbose=2,
    random_state=1,
)
optimizer.maximize(n_iter=10)


# 得到最优的超参数,使用其训练第一个模型。

hidden_size = int(optimizer.max['params']['hidden_size'])
dropout = optimizer.max['params']['dropout']
model1 = Model1(input_size=X.shape[1], hidden_size=hidden_size, output_size=4, dropout=dropout)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model1.parameters(), lr=0.001)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, verbose=True)
dataset = MyDataset(X, y)
dataloader = DataLoader(dataset, batch_size=32, shuffle=True)
model1.train()
for epoch in range(100):
    running_loss = 0.0
    running_corrects = 0
    for inputs, labels in dataloader:
        optimizer.zero_grad()
        outputs = model1(inputs)
        _, preds = torch.max(outputs, 1)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        running_loss += loss.item() * inputs.size(0)
        running_corrects += torch.sum(preds == labels.data)

    scheduler.step(running_loss)
    epoch_loss = running_loss / len(dataset)
    epoch_acc = running_corrects.double() / len(dataset)
    print('Epoch {}/{} - Loss: {:.4f} Acc: {:.4f}'.format(epoch+1, 100, epoch_loss, epoch_acc))

4. 定义第二个模型

# 定义第二个模型,使用贝叶斯优化对其进行超参数优化。

class Model2(nn.Module):
    def __init__(self, input_size, hidden_size, output_size, dropout=0.5):
        super(Model2, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.fc2 = nn.Linear(hidden_size, hidden_size)
        self.fc3 = nn.Linear(hidden_size, hidden_size)
        self.fc4 = nn.Linear(hidden_size, output_size)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        x = self.fc1(x)
        x = self.relu(x)
        x = self.dropout(x)
        x = self.fc2(x)
        x = self.relu(x)
        x = self.dropout(x)
        x = self.fc3(x)
        x = self.relu(x)
        x = self.dropout(x)
        x = self.fc4(x)
        return x


def train_model2(hidden_size, dropout):
    model2 = Model2(input_size=4, hidden_size=int(hidden_size), output_size=1, dropout=dropout)
    criterion = nn.BCEWithLogitsLoss()
    optimizer = torch.optim.Adam(model2.parameters(), lr=0.001)
    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, verbose=True)
    dataset = MyDataset(model1_outputs, y)
    dataloader = DataLoader(dataset, batch_size=32, shuffle=True)
    model2.train()
    for epoch in range(100):
        running_loss = 0.0
        running_corrects = 0
        for inputs, labels in dataloader:
            optimizer.zero_grad()
            outputs = model2(inputs)
            loss = criterion(outputs, labels.float().unsqueeze(1))
            loss.backward()
            optimizer.step()
            running_loss += loss.item() * inputs.size(0)
            running_corrects += torch.sum(torch.round(torch.sigmoid(outputs)) == labels.unsqueeze(1).float())

        scheduler.step(running_loss)
        epoch_loss = running_loss / len(dataset)
        epoch_acc = running_corrects.double() / len(dataset)
        print('Epoch {}/{} - Loss: {:.4f} Acc: {:.4f}'.format(epoch+1, 100, epoch_loss, epoch_acc))
    return epoch_acc.item()


# 将第一个模型的输出作为第二个模型的输入,调用贝叶斯优化对第二个模型进行超参数优化。


model1_outputs = []
for inputs, _ in dataloader:
    outputs = model1(inputs)
    model1_outputs.append(outputs.detach().numpy())
model1_outputs = np.concatenate(model1_outputs, axis=0)


pbounds = {'hidden_size': (100, 500), 'dropout': (0.3, 0.7)}
optimizer = BayesianOptimization(
    f=train_model2,
    pbounds=pbounds,
    verbose=2,
    random_state=1,
)
optimizer.maximize(n_iter=10)


# 得到最优的超参数,使用其训练第二个模型。

hidden_size = int(optimizer.max['params']['hidden_size'])
dropout = optimizer.max['params']['dropout']
model2 = Model2(input_size=4, hidden_size=hidden_size, output_size=1, dropout=dropout)
criterion = nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model2.parameters(), lr=0.001)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, verbose=True)
dataset = MyDataset(model1_outputs, y)
dataloader = DataLoader(dataset, batch_size=32, shuffle=True)
model2.train()
for epoch in range(100):
    running_loss = 0.0
    running_corrects = 0
    for inputs, labels in dataloader:
        optimizer.zero_grad()
        outputs = model2(inputs)
        loss = criterion(outputs, labels.float().unsqueeze(1))
        loss.backward()
        optimizer.step()
        running_loss += loss.item() * inputs.size(0)
        running_corrects += torch.sum(torch.round(torch.sigmoid(outputs)) == labels.unsqueeze(1).float())

    scheduler.step(running_loss)
    epoch_loss = running_loss / len(dataset)
    epoch_acc = running_corrects.double() / len(dataset)
    print('Epoch {}/{} - Loss: {:.4f} Acc: {:.4f}'.format(epoch+1, 100, epoch_loss, epoch_acc))

5. 结果输出

# 最后,将结果输出。


inputs = torch.tensor(scaler.transform(X)).float()
model1.eval()
model2.eval()
outputs1 = model1(inputs)
outputs2 = model2(outputs1.detach())
outputs = torch.round(torch.sigmoid(outputs2.detach())).squeeze().numpy()
accuracy = np.sum(outputs == y) / len(y)
loss = nn.BCEWithLogitsLoss()(outputs2, torch.tensor(y).float().unsqueeze(1)).item()
print('Accuracy: {:.4f} Loss: {:.4f}'.format(accuracy, loss))

6. 代码修改

在最后输出结果时,需要将outputs2.detach()后再调用numpy()方法,避免出现RuntimeError。

修改后的代码如下:

outputs = torch.round(torch.sigmoid(outputs2.detach())).squeeze().numpy()

总结

该代码使用贝叶斯优化方法对双层神经网络模型进行超参数优化,实现对HIV感染状态的预测。通过对模型进行训练和测试,可以得到模型的准确率和损失值,以此评估模型的性能。

基于贝叶斯优化的双层神经网络模型用于HIV预测

原文地址: https://www.cveoy.top/t/topic/m8ck 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录