在第二次和第三次训练中,代码中使用了变量 running_loss,但是该变量没有在训练前初始化,导致在训练时出现错误。可以在每次训练前将 running_loss 初始化为 0,例如:

# 第二次训练
model2 = Model2()
criterion2 = nn.CrossEntropyLoss()
optimizer2 = torch.optim.SGD(model2.parameters(), lr=lr2)
outputs1 = model1(torch.tensor(X_train, dtype=torch.float32))
_, preds1 = torch.max(outputs1, 1)
running_loss = 0.0
running_corrects = 0
for epoch in range(epochs2):
    optimizer2.zero_grad()
    outputs2 = model2(preds1)
    loss = criterion2(outputs2, torch.tensor(y_train, dtype=torch.long))
    loss.backward()
    optimizer2.step()
    running_loss = loss.item() * X_train.shape[0]
    running_corrects += torch.sum(preds2 == torch.tensor(y_train, dtype=torch.long))
    epoch_loss = running_loss / len(X_train)
    epoch_acc = running_corrects.double() / len(X_train)
    print('Epoch [{}/{}], Loss: {:.4f}, Accuracy: {:.4f}'.format(epoch+1, epochs2, epoch_loss, epoch_acc))

# 第三次训练
model3 = Model3()
criterion3 = nn.BCELoss()
optimizer3 = torch.optim.SGD(model3.parameters(), lr=lr3)
outputs2 = model2(preds1)
_, preds2 = torch.max(outputs2, 1)
running_loss = 0.0
running_corrects = 0
for epoch in range(epochs3):
    optimizer3.zero_grad()
    outputs3 = model3(preds2.float())
    loss = criterion3(outputs3, torch.tensor(y_train, dtype=torch.float32))
    loss.backward()
    optimizer3.step()
    running_loss = loss.item() * X_train.shape[0]
    running_corrects += torch.sum(torch.round(outputs3) == torch.tensor(y_train, dtype=torch.float32))
    epoch_loss = running_loss / len(X_train)
    epoch_acc = running_corrects.double() / len(X_train)
    print('Epoch [{}/{}], Loss: {:.4f}, Accuracy: {:.4f}'.format(epoch+1, epochs3, epoch_loss, epoch_acc))
import torchimport torchnn as nnimport pandas as pdfrom sklearnmodel_selection import train_test_split# 读入Excel表格data = pdread_excelrCUserslenovoDesktopHIVDNN神经网络测试data1xlsx# 获取基因名称gene_names = dataco

原文地址: https://www.cveoy.top/t/topic/9da 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录