PyTorch 神经网络:使用两个全连接层和权重衰减
好的,根据你的要求,将网络改为两个全连接层,并且使用权重衰减(weight decay)来正则化模型。以下是修改后的代码:
import torch
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
# 读取标签数据
tensor_list = []
with open('biaoqian.txt', 'r') as file:
lines = file.readlines()
for line in lines:
line = line.strip()
numbers = line.split()
tensor = torch.tensor([float(num) for num in numbers])
tensor_list.append(tensor)
# 创建网络实例
class MyNetwork(nn.Module):
def __init__(self):
super(MyNetwork, self).__init__()
self.flatten = nn.Flatten()
self.fc1 = nn.Linear(312 * 256, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 3)
def forward(self, x):
x = self.flatten(x)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
network = MyNetwork()
# 指定pt文件的路径和训练数据
pt_file_path = 'expanded1.pt'
data = torch.load(pt_file_path)
# 将数据转换为张量
input_tensors = [torch.tensor(tensor) for tensor in data]
train_tensors = input_tensors[0:18000]
val_tensors = input_tensors[18000:]
# 定义优化器
optimizer = optim.AdamW(network.parameters(), lr=0.001, weight_decay=0.1)
# 自定义损失函数
def custom_loss(output, label):
# 实现自定义的损失函数逻辑
return loss
num_epochs = 5
for epoch in range(num_epochs):
running_loss = 0.0
train_correct_total = 0
train_total = 0
# 训练阶段
network.train()
for i, input_tensor in enumerate(train_tensors):
optimizer.zero_grad()
output = network(input_tensor)
loss = custom_loss(output, tensor_list[i])
loss.backward()
optimizer.step()
# 统计准确率
target_similarity = F.cosine_similarity(output, tensor_list[i].unsqueeze(0), dim=1)
label_list = [torch.tensor([1, 0, 0]), torch.tensor([0, 1, 0]), torch.tensor([0, 0, 1])]
other_list = []
for label_tensor in label_list:
if not torch.all(torch.eq(tensor_list[i], label_tensor)):
other_list.append(label_tensor)
if target_similarity > torch.max(torch.stack([F.cosine_similarity(output, other.unsqueeze(0), dim=1) for other in other_list]), dim=0).values:
train_correct_total += 1
train_total += 1
running_loss += loss.item()
# 计算训练集的准确率
train_accuracy = train_correct_total / train_total
# 保存网络参数
if epoch == num_epochs - 1:
torch.save(network.state_dict(), 'final_model.pt')
# 打印训练信息
print('Train Accuracy: %.2f%%' % (100 * train_accuracy))
print('Epoch: %d, Loss: %.3f' % (epoch + 1, running_loss))
# 验证阶段
network.eval()
val_correct_total = 0
val_total = 0
with torch.no_grad():
for j, val_input_tensor in enumerate(val_tensors):
val_output = network(val_input_tensor)
label_list = [torch.tensor([1, 0, 0]), torch.tensor([0, 1, 0]), torch.tensor([0, 0, 1])]
val_target_similarity = F.cosine_similarity(val_output, tensor_list[j].unsqueeze(0), dim=1)
other_list = []
for label_tensor in label_list:
if not torch.all(torch.eq(tensor_list[j], label_tensor)):
other_list.append(label_tensor)
if val_target_similarity > torch.max(torch.stack([F.cosine_similarity(val_output, other.unsqueeze(0), dim=1) for other in other_list]), dim=0).values:
val_correct_total += 1
val_total += 1
# 计算验证集的准确率
val_accuracy = val_correct_total / val_total
# 打印验证信息
print('Validation Accuracy: %.2f%%' % (100 * val_accuracy))
print('Epoch: %d, Loss: %.3f' % (epoch + 1, running_loss))
在这个修改后的代码中,我将网络改为两个全连接层,并且使用 weight_decay 参数来设置权重衰减的系数,以正则化模型。
你可以根据实际情况修改自定义的损失函数 custom_loss 的实现,并调整优化器的学习率和权重衰减系数。
希望这个修改后的代码对你有所帮助!如果还有其他问题,请随时提问。
原文地址: https://www.cveoy.top/t/topic/b9J2 著作权归作者所有。请勿转载和采集!