import torch
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn

# 读取标签数据
file_path = 'C:\Users\18105\Desktop\MVSA-multiple\MVSA\biaoqian.txt'
tensor_list = []
with open(file_path, 'r') as file:
    for line in file:
        line = line.strip()
        numbers = line.split()
        tensor = torch.tensor([float(num) for num in numbers])
        tensor_list.append(tensor)

# 定义网络结构
class MyNetwork(nn.Module):
    def __init__(self):
        super(MyNetwork, self).__init__()
        self.flatten = nn.Flatten()
        self.fc = nn.Linear(312 * 256, 3)

    def forward(self, x):
        x = self.flatten(x)
        x = self.fc(x)
        return x

network = MyNetwork()

# 加载预处理后的数据
pt_file_path = 'C:\Users\18105\PycharmProjects\tuwenqingganfenxi\expanded1.pt'
data = torch.load(pt_file_path)

# 划分训练集和验证集
input_tensors = [torch.tensor(tensor) for tensor in data]
train_tensors = input_tensors[0:18000]
val_tensors = input_tensors[18000:]

# 自定义损失函数
def custom_loss(output, label):
    target_similarity = F.cosine_similarity(output, label.unsqueeze(0), dim=1)
    other_similarities = []
    for i, tensor in enumerate(label):
        if i != torch.argmax(label):
            similarity = F.cosine_similarity(output, tensor.unsqueeze(0), dim=1)
            other_similarities.append(similarity)
    other_similarities = torch.cat(other_similarities)
    diff = target_similarity - torch.max(other_similarities)
    loss = -torch.log((1 + diff) / 2)
    return loss

# 定义优化器
optimizer = optim.AdamW(network.parameters(), lr=0.001, weight_decay=0.02)

# 设置训练轮数
num_epochs = 5

# 模型训练和验证
for epoch in range(num_epochs):
    running_loss = 0.0
    train_correct_total = 0
    train_total = 0

    # 训练阶段
    network.train()
    for i, input_tensor in enumerate(train_tensors):
        optimizer.zero_grad()
        output = network(input_tensor)
        loss = custom_loss(output, tensor_list[i])
        loss.backward()
        optimizer.step()

        # 计算训练集准确率
        target_similarity = F.cosine_similarity(output, tensor_list[i].unsqueeze(0), dim=1)
        label_list = [torch.tensor([1, 0, 0]), torch.tensor([0, 1, 0]), torch.tensor([0, 0, 1])]
        other_list = [label_tensor for label_tensor in label_list if not torch.all(torch.eq(tensor_list[i], label_tensor))]
        if target_similarity > torch.max(torch.stack([F.cosine_similarity(output, other.unsqueeze(0), dim=1) for other in other_list]), dim=0).values:
            train_correct_total += 1
        train_total += 1
        running_loss += loss.item()

    train_accuracy = train_correct_total / train_total
    print('Train Accuracy: %.2f%%' % (100 * train_accuracy))
    print('Epoch: %d, Loss: %.3f' % (epoch + 1, running_loss))

    # 保存模型参数
    if epoch == num_epochs - 1:
        torch.save(network.state_dict(), 'final_model.pt')

    # 验证阶段
    network.eval()
    val_correct_total = 0
    val_total = 0
    with torch.no_grad():
        for j, val_input_tensor in enumerate(val_tensors):
            val_output = network(val_input_tensor)
            val_target_similarity = F.cosine_similarity(val_output, tensor_list[j + 18000].unsqueeze(0), dim=1) 
            other_list = [label_tensor for label_tensor in label_list if not torch.all(torch.eq(tensor_list[j + 18000], label_tensor))]
            if val_target_similarity > torch.max(torch.stack([F.cosine_similarity(val_output, other.unsqueeze(0), dim=1) for other in other_list]), dim=0).values:
                val_correct_total += 1
            val_total += 1
        val_accuracy = val_correct_total / val_total
        print('Validation Accuracy: %.2f%%' % (100 * val_accuracy))
        print('Epoch: %d, Loss: %.3f' % (epoch + 1, running_loss))

代码改进说明:

  • 使用更具有描述性的变量名和函数名。
  • 将数据加载和预处理部分与模型训练部分分离,提高代码可读性。
  • 在训练和验证阶段分别计算并打印准确率和损失值,方便追踪模型性能。
  • 简化了部分代码逻辑,提高代码简洁性。

优化建议:

  1. 数据预处理: 对输入数据进行归一化或标准化,例如使用 torchvision.transforms 中的函数。
  2. 模型复杂度: 根据数据量和任务复杂度,尝试调整模型的层数、每层神经元数量等参数。
  3. 学习率调度: 使用学习率衰减策略,例如 torch.optim.lr_scheduler 中的函数,可以帮助模型更好地收敛。
  4. 正则化和Dropout: 在全连接层后添加 Dropout 层 (nn.Dropout),并尝试使用 L1 或 L2 正则化来防止过拟合。
  5. 数据增强: 对训练数据进行随机旋转、缩放、平移、翻转等操作,增加数据的多样性,提高模型泛化能力。
  6. 调整优化器参数: 尝试不同的优化器,例如 SGD、Adam 等,并调整优化器的参数,例如学习率、动量等。
  7. 更长的训练时间: 增加训练轮数,观察模型是否能够进一步收敛。
  8. 更多的训练数据: 如果可能,尝试收集更多的数据用于训练,提高模型的泛化能力。
PyTorch深度学习:基于余弦相似度的多分类模型训练与优化

原文地址: http://www.cveoy.top/t/topic/b38w 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录