导入必要的库

import pandas as pd import numpy as np import torch import torch.nn as nn import torch.optim as optim import torch.utils.data as Data from sklearn.preprocessing import StandardScaler from skopt import gp_minimize from skopt.space import Real, Integer from skopt.utils import use_named_args

读取Excel表格

data = pd.read_excel('C:\Users\lenovo\Desktop\HIV\GSE6740GSE50011基因降低\output_data.xlsx')

标准化数据

scaler = StandardScaler() data.iloc[:, 1:] = scaler.fit_transform(data.iloc[:, 1:])

将数据划分为训练集和测试集(此处没有测试集)

train_data = data.iloc[:, 1:].values.astype(np.float32) train_label = data.iloc[:, 0].values.astype(np.int64)

模型参数

input_size = train_data.shape[1] hidden_size1 = 128 hidden_size2 = 64 hidden_size3 = 32 output_size1 = 4 output_size2 = 1 lr = 0.01 batch_size = 16 epochs = 50

定义第一个模型

class Model1(nn.Module): def init(self, input_size, hidden_size1, hidden_size2, hidden_size3, output_size1): super(Model1, self).init() self.fc1 = nn.Linear(input_size, hidden_size1) self.relu1 = nn.ReLU(inplace=True) self.dropout1 = nn.Dropout(0.2) self.fc2 = nn.Linear(hidden_size1, hidden_size2) self.relu2 = nn.ReLU(inplace=True) self.dropout2 = nn.Dropout(0.2) self.fc3 = nn.Linear(hidden_size2, hidden_size3) self.relu3 = nn.ReLU(inplace=True) self.fc4 = nn.Linear(hidden_size3, output_size1)

def forward(self, x):
    x = self.fc1(x)
    x = self.relu1(x)
    x = self.dropout1(x)
    x = self.fc2(x)
    x = self.relu2(x)
    x = self.dropout2(x)
    x = self.fc3(x)
    x = self.relu3(x)
    x = self.fc4(x)
    return x

定义第二个模型

class Model2(nn.Module): def init(self, input_size, hidden_size1, hidden_size2, hidden_size3, output_size1, output_size2): super(Model2, self).init() self.model1 = Model1(input_size, hidden_size1, hidden_size2, hidden_size3, output_size1) self.fc1 = nn.Linear(output_size1, hidden_size1) self.relu1 = nn.ReLU(inplace=True) self.dropout1 = nn.Dropout(0.2) self.fc2 = nn.Linear(hidden_size1, hidden_size2) self.relu2 = nn.ReLU(inplace=True) self.dropout2 = nn.Dropout(0.2) self.fc3 = nn.Linear(hidden_size2, hidden_size3) self.relu3 = nn.ReLU(inplace=True) self.fc4 = nn.Linear(hidden_size3, output_size2)

def forward(self, x):
    x = self.model1(x)
    x = self.fc1(x)
    x = self.relu1(x)
    x = self.dropout1(x)
    x = self.fc2(x)
    x = self.relu2(x)
    x = self.dropout2(x)
    x = self.fc3(x)
    x = self.relu3(x)
    x = self.fc4(x)
    return x

定义损失函数和优化器

criterion1 = nn.CrossEntropyLoss() criterion2 = nn.BCEWithLogitsLoss() optimizer1 = optim.Adam(model1.parameters(), lr=lr) optimizer2 = optim.Adam(model2.parameters(), lr=lr)

定义训练函数

def train(model, criterion, optimizer, data, label, batch_size, epochs): model.train() for epoch in range(epochs): running_loss = 0.0 for i in range(0, len(data), batch_size): inputs = torch.from_numpy(data[i:i+batch_size, :]) targets = torch.from_numpy(label[i:i+batch_size]) optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() running_loss += loss.item() * batch_size print('Epoch: %d, Training Loss: %.3f' % (epoch+1, running_loss/len(data)))

第一次调用第一个模型,输入为基因的个数,输出为4分类

model1 = Model1(input_size, hidden_size1, hidden_size2, hidden_size3, output_size1) train(model1, criterion1, optimizer1, train_data, train_label, batch_size, epochs)

第二次调用第二个模型,第二个模型为二分类模型,输入为第一个模型的输出

train_data1 = torch.from_numpy(train_data).float() train_data2 = model1(train_data1).detach().numpy() train_label2 = train_label.reshape(-1, 1).astype(np.float32)

model2 = Model2(output_size1, hidden_size1, hidden_size2, hidden_size3, output_size1, output_size2) train(model2, criterion2, optimizer2, train_data2, train_label2, batch_size, epochs)

对神经网络模型进行贝叶斯优化

space = [Real(0.001, 0.1, name='lr'), Integer(32, 256, name='hidden_size1'), Integer(16, 128, name='hidden_size2'), Integer(8, 64, name='hidden_size3'), Integer(8, 16, name='batch_size'), Integer(10, 100, name='epochs')]

@use_named_args(space) def objective(**params): lr = params['lr'] hidden_size1 = params['hidden_size1'] hidden_size2 = params['hidden_size2'] hidden_size3 = params['hidden_size3'] batch_size = params['batch_size'] epochs = params['epochs'] model1 = Model1(input_size, hidden_size1, hidden_size2, hidden_size3, output_size1) optimizer1 = optim.Adam(model1.parameters(), lr=lr) train(model1, criterion1, optimizer1, train_data, train_label, batch_size, epochs) train_data1 = torch.from_numpy(train_data).float() train_data2 = model1(train_data1).detach().numpy() model2 = Model2(output_size1, hidden_size1, hidden_size2, hidden_size3, output_size1, output_size2) optimizer2 = optim.Adam(model2.parameters(), lr=lr) train(model2, criterion2, optimizer2, train_data2, train_label2, batch_size, epochs) outputs = model2(torch.from_numpy(train_data2).float()) accuracy = ((outputs > 0).int().numpy().flatten() == train_label).mean() loss = criterion2(outputs, torch.from_numpy(train_label2).float()).item() print('Accuracy: %.3f, Loss: %.3f' % (accuracy, loss)) return loss

result = gp_minimize(objective, space, n_calls=20, random_state=0) print('Best Accuracy: %.3f, Best Loss: %.3f' % (-result.fun, result.fun))

基于基因表达量预测患者疾病状态的深度神经网络模型

原文地址: http://www.cveoy.top/t/topic/mOHT 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录