使用 Python 编写 DNN 神经网络根据基因表达量预测患者患病状态
使用 Python 编写 DNN 神经网络根据基因表达量预测患者患病状态
本文介绍使用 Python 编写 DNN 神经网络,根据基因表达量预测患者是否患病。模型包含两个部分,第一个模型进行 4 分类,第二个模型进行二分类。使用贝叶斯优化对模型进行超参数优化,并加入 Dropout 层。
1. 导入库和读取数据
首先,导入需要的库,包括 pandas、numpy、sklearn、torch 等。
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torch.optim import lr_scheduler
from bayes_opt import BayesianOptimization
读取 Excel 文件,并进行标准化处理。
data = pd.read_excel('C:\Users\lenovo\Desktop\HIV\GSE6740GSE50011基因降低\output_data.xlsx')
X = data.iloc[:, 1:].values
y = data.iloc[:, 0].values
scaler = StandardScaler()
X = scaler.fit_transform(X)
2. 定义数据集类
定义数据集类,继承自 torch.utils.data.Dataset,重载 __getitem__ 和 __len__ 方法。
class MyDataset(Dataset):
def __init__(self, X, y):
self.X = X
self.y = y
def __getitem__(self, index):
return torch.tensor(self.X[index]).float(), torch.tensor(self.y[index]).long()
def __len__(self):
return len(self.X)
3. 定义第一个模型
定义第一个模型,使用贝叶斯优化对其进行超参数优化。
class Model1(nn.Module):
def __init__(self, input_size, hidden_size, output_size, dropout=0.5):
super(Model1, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, hidden_size)
self.fc4 = nn.Linear(hidden_size, output_size)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.relu(x)
x = self.dropout(x)
x = self.fc3(x)
x = self.relu(x)
x = self.dropout(x)
x = self.fc4(x)
return x
def train_model1(hidden_size, dropout):
model = Model1(input_size=X.shape[1], hidden_size=int(hidden_size), output_size=4, dropout=dropout)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, verbose=True)
dataset = MyDataset(X, y)
dataloader = DataLoader(dataset, batch_size=32, shuffle=True)
model.train()
for epoch in range(100):
running_loss = 0.0
running_corrects = 0
for inputs, labels in dataloader:
optimizer.zero_grad()
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
scheduler.step(running_loss)
epoch_loss = running_loss / len(dataset)
epoch_acc = running_corrects.double() / len(dataset)
print('Epoch {}/{} - Loss: {:.4f} Acc: {:.4f}'.format(epoch+1, 100, epoch_loss, epoch_acc))
return epoch_acc.item()
4. 调用贝叶斯优化
调用贝叶斯优化对模型进行超参数优化。
pbounds = {'hidden_size': (100, 500), 'dropout': (0.3, 0.7)}
optimizer = BayesianOptimization(
f=train_model1,
pbounds=pbounds,
verbose=2,
random_state=1,
)
optimizer.maximize(n_iter=10)
5. 训练第一个模型
得到最优的超参数,使用其训练第一个模型。
hidden_size = int(optimizer.max['params']['hidden_size'])
dropout = optimizer.max['params']['dropout']
model1 = Model1(input_size=X.shape[1], hidden_size=hidden_size, output_size=4, dropout=dropout)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model1.parameters(), lr=0.001)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, verbose=True)
dataset = MyDataset(X, y)
dataloader = DataLoader(dataset, batch_size=32, shuffle=True)
model1.train()
for epoch in range(100):
running_loss = 0.0
running_corrects = 0
for inputs, labels in dataloader:
optimizer.zero_grad()
outputs = model1(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
scheduler.step(running_loss)
epoch_loss = running_loss / len(dataset)
epoch_acc = running_corrects.double() / len(dataset)
print('Epoch {}/{} - Loss: {:.4f} Acc: {:.4f}'.format(epoch+1, 100, epoch_loss, epoch_acc))
6. 定义第二个模型
定义第二个模型,使用贝叶斯优化对其进行超参数优化。
class Model2(nn.Module):
def __init__(self, input_size, hidden_size, output_size, dropout=0.5):
super(Model2, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, hidden_size)
self.fc4 = nn.Linear(hidden_size, output_size)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.relu(x)
x = self.dropout(x)
x = self.fc3(x)
x = self.relu(x)
x = self.dropout(x)
x = self.fc4(x)
return x
def train_model2(hidden_size, dropout):
model2 = Model2(input_size=4, hidden_size=int(hidden_size), output_size=1, dropout=dropout)
criterion = nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model2.parameters(), lr=0.001)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, verbose=True)
dataset = MyDataset(model1_outputs, y)
dataloader = DataLoader(dataset, batch_size=32, shuffle=True)
model2.train()
for epoch in range(100):
running_loss = 0.0
running_corrects = 0
for inputs, labels in dataloader:
optimizer.zero_grad()
outputs = model2(inputs)
loss = criterion(outputs, labels.float().unsqueeze(1))
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(torch.round(torch.sigmoid(outputs)) == labels.unsqueeze(1).float())
scheduler.step(running_loss)
epoch_loss = running_loss / len(dataset)
epoch_acc = running_corrects.double() / len(dataset)
print('Epoch {}/{} - Loss: {:.4f} Acc: {:.4f}'.format(epoch+1, 100, epoch_loss, epoch_acc))
return epoch_acc.item()
7. 训练第二个模型
将第一个模型的输出作为第二个模型的输入,调用贝叶斯优化对第二个模型进行超参数优化。
model1_outputs = []
for inputs, _ in dataloader:
outputs = model1(inputs)
model1_outputs.append(outputs.detach().numpy())
model1_outputs = np.concatenate(model1_outputs, axis=0)
pbounds = {'hidden_size': (100, 500), 'dropout': (0.3, 0.7)}
optimizer = BayesianOptimization(
f=train_model2,
pbounds=pbounds,
verbose=2,
random_state=1,
)
optimizer.maximize(n_iter=10)
得到最优的超参数,使用其训练第二个模型。
hidden_size = int(optimizer.max['params']['hidden_size'])
dropout = optimizer.max['params']['dropout']
model2 = Model2(input_size=4, hidden_size=hidden_size, output_size=1, dropout=dropout)
criterion = nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model2.parameters(), lr=0.001)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, verbose=True)
dataset = MyDataset(model1_outputs, y)
dataloader = DataLoader(dataset, batch_size=32, shuffle=True)
model2.train()
for epoch in range(100):
running_loss = 0.0
running_corrects = 0
for inputs, labels in dataloader:
optimizer.zero_grad()
outputs = model2(inputs)
loss = criterion(outputs, labels.float().unsqueeze(1))
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(torch.round(torch.sigmoid(outputs)) == labels.unsqueeze(1).float())
scheduler.step(running_loss)
epoch_loss = running_loss / len(dataset)
epoch_acc = running_corrects.double() / len(dataset)
print('Epoch {}/{} - Loss: {:.4f} Acc: {:.4f}'.format(epoch+1, 100, epoch_loss, epoch_acc))
8. 评估模型
最后,将结果输出。
inputs = torch.tensor(scaler.transform(X)).float()
model1.eval()
model2.eval()
outputs1 = model1(inputs)
outputs2 = model2(outputs1.detach())
outputs = torch.round(torch.sigmoid(outputs2)).squeeze().numpy()
accuracy = np.sum(outputs == y) / len(y)
loss = nn.BCEWithLogitsLoss()(outputs2, torch.tensor(y).float().unsqueeze(1)).item()
print('Accuracy: {:.4f} Loss: {:.4f}'.format(accuracy, loss))
总结
本文介绍了如何使用 Python 编写 DNN 神经网络,根据基因表达量预测患者是否患病。模型使用了贝叶斯优化进行超参数优化,并加入了 Dropout 层。最终的模型能够有效地预测患者的患病状态。
注意:
- 本文仅供参考,实际应用中需要根据具体情况进行调整。
- 模型的性能受数据质量和模型结构的影响。
- 为了提高模型的泛化能力,可以考虑使用交叉验证等方法。
- 为了确保模型的可靠性,建议使用更多的数据进行训练和评估。
- 使用该模型进行预测时,需要注意其局限性,不要将其用于诊断或治疗。
希望本文对您有所帮助!
原文地址: https://www.cveoy.top/t/topic/m8bM 著作权归作者所有。请勿转载和采集!