使用python编写DNN神经网络根据基因的表达量来预测患者是否患病并且满足以下要求:1读入Excel表格第一行为患者状态标志state1为患病0为正常和基因名称第0列为患者是否患病的真值其余列为各基因及其表达量2定义两个模型3模型的各项参数可调4模型调用分为两次第一次调用第一个模型输入为基因的个数输出为4分类5第二次调用第二个模型第二个模型为二分类模型输入为第一个模型的输出7给出详细注释8模型加
导入所需库
import torch import torch.nn as nn import pandas as pd from sklearn import preprocessing import numpy as np
读取Excel表格
data = pd.read_excel('C:\Users\lenovo\Desktop\HIV\DNN神经网络测试\data1.xlsx') x_data = data.iloc[:, 1:].values.astype(float) # 去掉第一列和第一行 y_data = data.iloc[:, 0].values.astype(float)
数据标准化
scaler = preprocessing.StandardScaler().fit(x_data) x_data = scaler.transform(x_data)
定义第一个模型
class Model1(nn.Module): def init(self, input_size, hidden_size, num_classes): super(Model1, self).init() self.fc1 = nn.Linear(input_size, hidden_size) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=0.5) self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.dropout(out)
out = self.fc2(out)
return out
定义第二个模型
class Model2(nn.Module): def init(self, input_size, hidden_size, num_classes): super(Model2, self).init() self.fc1 = nn.Linear(input_size, hidden_size) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=0.5) self.fc2 = nn.Linear(hidden_size, num_classes) self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.dropout(out)
out = self.fc2(out)
out = self.sigmoid(out)
return out
定义模型参数
input_size = x_data.shape[1] hidden_size = 256 num_classes1 = 4 num_classes2 = 1 learning_rate = 0.001 num_epochs = 1000
初始化模型
model1 = Model1(input_size, hidden_size, num_classes1) model2 = Model2(num_classes1, hidden_size, num_classes2)
定义损失函数和优化器
criterion1 = nn.CrossEntropyLoss() criterion2 = nn.BCELoss() optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate) optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
将数据转换成Tensor类型
x_data = torch.Tensor(x_data) y_data = torch.LongTensor(y_data)
训练第一个模型
for epoch in range(num_epochs): # 向前传播 outputs = model1(x_data) loss = criterion1(outputs, y_data)
# 反向传播和优化
optimizer1.zero_grad()
loss.backward()
optimizer1.step()
# 计算准确率
_, predicted = torch.max(outputs.data, 1)
total = predicted.size(0)
correct = predicted.eq(y_data.data).cpu().sum()
accuracy = 100.0 * correct / total
# 输出结果
if (epoch + 1) % 100 == 0:
print('Epoch [%d/%d], Loss: %.4f, Accuracy: %.2f %%' % (epoch + 1, num_epochs, loss.item(), accuracy))
将第一个模型的输出作为第二个模型的输入
x_data2 = model1(x_data) y_data2 = y_data.view(-1, 1).float()
训练第二个模型
for epoch in range(num_epochs): # 向前传播 outputs = model2(x_data2) loss = criterion2(outputs, y_data2)
# 反向传播和优化
optimizer2.zero_grad()
loss.backward()
optimizer2.step()
# 计算准确率
predicted = outputs.round()
total = predicted.size(0)
correct = predicted.eq(y_data2).cpu().sum()
accuracy = 100.0 * correct / total
# 输出结果
if (epoch + 1) % 100 == 0:
print('Epoch [%d/%d], Loss: %.4f, Accuracy: %.2f %%' % (epoch + 1, num_epochs, loss.item(), accuracy))
原文地址: http://www.cveoy.top/t/topic/bcDn 著作权归作者所有。请勿转载和采集!