import torch import torch.nn as nn import torch.optim as optim import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve, auc import matplotlib.pyplot as plt

读取 Excel 表格

data = pd.read_excel('gene_expression_data.xlsx') y = data.iloc[:, 0].values # 真值 X = data.iloc[:, 1:].values # 基因表达量

划分训练集和测试集

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)

转换为 tensor 格式

X_train = torch.from_numpy(X_train).float() y_train = torch.from_numpy(y_train).long() X_test = torch.from_numpy(X_test).float() y_test = torch.from_numpy(y_test).long()

定义第一个模型

class Model1(nn.Module): def init(self, input_size, hidden_size, num_classes): super(Model1, self).init() self.fc1 = nn.Linear(input_size, hidden_size) self.relu = nn.ReLU() self.fc2 = nn.Linear(hidden_size, num_classes) self.softmax = nn.Softmax(dim=1)

def forward(self, x):
    out = self.fc1(x)
    out = self.relu(out)
    out = self.fc2(out)
    out = self.softmax(out)
    return out

定义第二个模型

class Model2(nn.Module): def init(self, input_size, hidden_size, num_classes): super(Model2, self).init() self.fc1 = nn.Linear(input_size, hidden_size) self.relu = nn.ReLU() self.fc2 = nn.Linear(hidden_size, num_classes) self.softmax = nn.Softmax(dim=1)

def forward(self, x):
    out = self.fc1(x)
    out = self.relu(out)
    out = self.fc2(out)
    out = self.softmax(out)
    return out

定义第三个模型

class Model3(nn.Module): def init(self, input_size, hidden_size, num_classes): super(Model3, self).init() self.fc1 = nn.Linear(input_size, hidden_size) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=0.5) self.fc2 = nn.Linear(hidden_size, num_classes) self.sigmoid = nn.Sigmoid()

def forward(self, x):
    out = self.fc1(x)
    out = self.relu(out)
    out = self.dropout(out)
    out = self.fc2(out)
    out = self.sigmoid(out)
    return out

定义训练函数

def train(model, criterion, optimizer, X_train, y_train): model.train() optimizer.zero_grad() outputs = model(X_train) loss = criterion(outputs, y_train) loss.backward() optimizer.step() return loss.item()

定义测试函数

def test(model, X_test, y_test): model.eval() with torch.no_grad(): outputs = model(X_test) _, predicted = torch.max(outputs.data, 1) accuracy = accuracy_score(y_test, predicted) prob = outputs[:, 1].numpy() auc_value = roc_auc_score(y_test, prob) fpr, tpr, _ = roc_curve(y_test, prob) roc_auc = auc(fpr, tpr) return accuracy, roc_auc

训练第一个模型

model1 = Model1(input_size=X_train.shape[1], hidden_size=64, num_classes=8) criterion1 = nn.CrossEntropyLoss() optimizer1 = optim.Adam(model1.parameters(), lr=0.001) losses1 = [] for epoch in range(200): loss = train(model1, criterion1, optimizer1, X_train, y_train) losses1.append(loss) if epoch % 10 == 0: print('Epoch: {}, Loss: {:.4f}'.format(epoch+1, loss)) accuracy1, roc_auc1 = test(model1, X_test, y_test) print('Accuracy of Model1 on test set: {:.2f}%'.format(accuracy1*100)) print('AUC of Model1 on test set: {:.4f}'.format(roc_auc1))

训练第二个模型

model2 = Model2(input_size=8, hidden_size=32, num_classes=4) criterion2 = nn.CrossEntropyLoss() optimizer2 = optim.Adam(model2.parameters(), lr=0.001) losses2 = [] for epoch in range(200): X_train_ = model1(X_train).detach() loss = train(model2, criterion2, optimizer2, X_train_, y_train) losses2.append(loss) if epoch % 10 == 0: print('Epoch: {}, Loss: {:.4f}'.format(epoch+1, loss)) X_test_ = model1(X_test).detach() accuracy2, roc_auc2 = test(model2, X_test_, y_test) print('Accuracy of Model2 on test set: {:.2f}%'.format(accuracy2*100)) print('AUC of Model2 on test set: {:.4f}'.format(roc_auc2))

训练第三个模型

model3 = Model3(input_size=4, hidden_size=16, num_classes=1) criterion3 = nn.BCELoss() optimizer3 = optim.Adam(model3.parameters(), lr=0.001) losses3 = [] for epoch in range(200): X_train_ = model1(X_train).detach() X_train_ = model2(X_train_).detach() loss = train(model3, criterion3, optimizer3, X_train_, y_train.float()) losses3.append(loss) if epoch % 10 == 0: print('Epoch: {}, Loss: {:.4f}'.format(epoch+1, loss)) X_test_ = model1(X_test).detach() X_test_ = model2(X_test_).detach() accuracy3, roc_auc3 = test(model3, X_test_, y_test.float()) print('Accuracy of Model3 on test set: {:.2f}%'.format(accuracy3*100)) print('AUC of Model3 on test set: {:.4f}'.format(roc_auc3))

绘制损失函数变化曲线

plt.figure() plt.plot(losses1, label='Model1') plt.plot(losses2, label='Model2') plt.plot(losses3, label='Model3') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() plt.show()

绘制准确率变化曲线

accuracies = [accuracy1, accuracy2, accuracy3] models = ['Model1', 'Model2', 'Model3'] plt.figure() plt.bar(models, accuracies) plt.ylim([0.5, 1]) plt.ylabel('Accuracy') plt.show()

绘制 ROC 曲线和计算 AUC 值

probs1 = model1(X_test).detach()[:, 1].numpy() fpr1, tpr1, _ = roc_curve(y_test, probs1) roc_auc1 = auc(fpr1, tpr1) probs2 = model2(model1(X_test).detach()).detach()[:, 1].numpy() fpr2, tpr2, _ = roc_curve(y_test, probs2) roc_auc2 = auc(fpr2, tpr2) probs3 = model3(model2(model1(X_test).detach()).detach()).detach().numpy().flatten() fpr3, tpr3, _ = roc_curve(y_test, probs3) roc_auc3 = auc(fpr3, tpr3) plt.figure() plt.plot(fpr1, tpr1, label='Model1 (AUC = {:.4f})'.format(roc_auc1)) plt.plot(fpr2, tpr2, label='Model2 (AUC = {:.4f})'.format(roc_auc2)) plt.plot(fpr3, tpr3, label='Model3 (AUC = {:.4f})'.format(roc_auc3)) plt.plot([0, 1], [0, 1], linestyle='--') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.legend() plt.show()

绘制特征重要性图

weights1 = model1.fc2.weight.detach().numpy() weights2 = model2.fc2.weight.detach().numpy() weights3 = model3.fc2.weight.detach().numpy() plt.figure() plt.bar(range(X.shape[1]), weights1[1]) plt.xlabel('Gene') plt.ylabel('Weight') plt.title('Model1 Feature Importance') plt.show() plt.figure() plt.bar(range(8), weights2[1]) plt.xlabel('Model1 Prediction') plt.ylabel('Weight') plt.title('Model2 Feature Importance') plt.show() plt.figure() plt.bar(range(4), weights3.flatten()) plt.xlabel('Model2 Prediction') plt.ylabel('Weight') plt.title('Model3 Feature Importance') plt.show()

绘制热图

corr = np.corrcoef(X.T) plt.figure() plt.imshow(corr, cmap='coolwarm') plt.colorbar() plt.show()

绘制 t-SNE 图

from sklearn.manifold import TSNE X_embedded = TSNE(n_components=2).fit_transform(X) plt.figure() plt.scatter(X_embedded[:, 0], X_embedded[:, 1], c=y) plt.colorbar() plt.show()

使用 PyTorch 构建 DNN 神经网络,基于基因表达量预测患者病症

原文地址: https://www.cveoy.top/t/topic/mmFt 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录