import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pickle
from matplotlib import pyplot as plt
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
from torch.utils.data import Dataset, DataLoader
from rdkit.Chem import Draw
from rdkit.Chem import MolFromSmiles
import os,sys
import time
# 定义超参数
input_dim = 128  # 输入向量的维度
hidden_dim = 64  # 隐藏层的维度
output_dim = 128  # 输出向量的维度与输入向量相同
learning_rate = 1e-3  # 学习率
num_epochs = 100  # 总共训练轮数
# define a function to generate one-hot encoded matrices from SMILES
def smiles_to_one_hot(smiles):
    mol = Chem.MolFromSmiles(smiles)
    atoms = mol.GetNumAtoms()
    atom_dict = {'H': 0, 'C': 1, 'N': 2, 'O': 3, 'F': 4, 
            'n': 5, 'c': 6, 'o': 7, 
            '1': 8, '2': 9, '3': 10, '4': 11, '5': 12, 
            '(': 13, ')': 14, '[': 15, ']': 16, 
            '-': 17, '=': 18, '#': 19, '+': 20, 'X': 21,
               'Y': 22, '/': 23, 'C1': 24, 'C2': 25, 'N3': 26,
              'C4': 27, 'C3': 28, 'O1': 29, '[nH]': 30,
              '[NH3+]': 31, '[O-]': 32, '[NH2+]': 33,
              '[NH2+]': 34}
    one_hot = np.zeros((atoms, len(atom_dict)))
    for atom in mol.GetAtoms():
        atom_type = atom.GetSymbol()
        if atom_type not in atom_dict:
            continue
        atom_index = atom.GetIdx()
        one_hot[atom_index][atom_dict[atom_type]] = 1
    return one_hot
class MyEncoder(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(MyEncoder, self).__init__()
        self.fc1 = nn.Linear(input_dim * input_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, output_dim * input_dim)

    def forward(self, x):
        x = self.fc1(x)
        x = nn.ReLU()(x)
        x = self.fc2(x)
        x = nn.ReLU()(x)
        x = self.fc3(x)
        return x
class MyDecoder(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(MyDecoder, self).__init__()
        self.fc1 = nn.Linear(input_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, output_dim)

    def forward(self, x):
        x = self.fc1(x)
        x = nn.ReLU()(x)
        x = self.fc2(x)
        x = nn.ReLU()(x)
        x = self.fc3(x)
        x = nn.Sigmoid()(x)
        return x
# read SMILES from file
smiles_list = []
with open('qm9_3200smiles.txt', 'r') as f:
    for line in f:
        smiles_list.append(line.strip())

# convert SMILES to one-hot encoded matrices
one_hot_matrices = []
for smiles in smiles_list:
    one_hot = smiles_to_one_hot(smiles)
    one_hot_matrices.append(one_hot)
max_len = max([len(x) for x in one_hot_matrices])
for i in range(len(one_hot_matrices)):
    pad_len = max_len - len(one_hot_matrices[i])
    one_hot_matrices[i] = np.pad(one_hot_matrices[i], ((0, pad_len), (0, 0)), 'constant')
# save the one-hot encoded matrices to file
np.save('one_hot_matrices.npy', one_hot_matrices)
class MyDataset(Dataset):
    def __init__(self, data):
        self.data = data

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        # 将 SMILES 字符串转换为输入和输出张量
        input_dim = torch.Tensor(self.data[index][0])
        output_dim = torch.Tensor(self.data[index][1])
        return input_dim, output_dim
    
# 定义数据加载器
batch_size = 128
#dataset = MyDataset([(np.random.rand(128), np.random.rand(128)) for i in range(128)])
dataset = MyDataset([(one_hot_matrix, one_hot_matrix) for one_hot_matrix in one_hot_matrices])
train_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
encoder = MyEncoder(input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim)
decoder = MyDecoder(input_dim=output_dim, hidden_dim=hidden_dim, output_dim=input_dim)
optimizer = optim.Adam(list(encoder.parameters()) + list(decoder.parameters()), lr=learning_rate)
criterion = nn.L1Loss()
# 训练模型

train_losses = []

for epoch in range(num_epochs):
    epoch_loss = 0.0

    for data, target in train_loader:
        optimizer.zero_grad()
        #data = data.float()
        encoded = encoder(data)
        decoded = decoder(encoded)
        #loss = criterion(decoded, target.float() / 256)
        loss = criterion(decoded, target / 256.0)
        loss.backward()
        optimizer.step()
        epoch_loss += loss.item()                                                                                                                                                                                             
        #generated_smiles.extend([smile for smile in decoded.detach().numpy()])
    train_losses.append(epoch_loss / len(train_loader))
    print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, epoch_loss / len(train_loader)))

# 绘制损失函数曲线
plt.plot(train_losses)
plt.title('Training Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.show()

修改说明:

MyEncoder 类的 __init__ 方法中,将 self.fc1 的输入维度修改为 input_dim * input_dim,并将 self.fc3 的输出维度修改为 output_dim * input_dim

修正后的代码如下:

class MyEncoder(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(MyEncoder, self).__init__()
        self.fc1 = nn.Linear(input_dim * input_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, output_dim * input_dim)

    def forward(self, x):
        x = self.fc1(x)
        x = nn.ReLU()(x)
        x = self.fc2(x)
        x = nn.ReLU()(x)
        x = self.fc3(x)
        return x

修改原因:

原始代码中,self.fc1 的输入维度为 128*64,而 self.fc3 的输出维度为 128。由于 data 的形状为 (batch_size, 128, 64),因此在 self.fc1 中进行矩阵乘法时,会出现维度不匹配的错误。

self.fc1 的输入维度修改为 input_dim * input_dim,并将 self.fc3 的输出维度修改为 output_dim * input_dim,可以确保在矩阵乘法过程中维度匹配,从而解决错误。


原文地址: https://www.cveoy.top/t/topic/mFgj 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录