导入相关库

import torch import torch.nn as nn import numpy as np

读取拼音数据

with open('/kaggle/input/pinyin-data/pinyin.txt', 'r', encoding='utf-8') as f: pinyin_data = f.read().splitlines()

定义数据集

首先把所有的拼音字符放到一个列表中

pinyin_chars = [] for pinyin in pinyin_data: pinyin_chars.extend(list(pinyin)) pinyin_chars = list(set(pinyin_chars)) pinyin_chars.sort()

构建字符对应的索引字典和反向索引字典

char2idx = {char: idx for idx, char in enumerate(pinyin_chars)} idx2char = {idx: char for char, idx in char2idx.items()}

构建训练数据

seq_length = 25 # 序列长度 batch_size = 128 # 批大小

随机采样

def random_sample(): inputs = [] targets = [] for i in range(batch_size): start_idx = np.random.randint(0, len(pinyin_data) - seq_length) input_str = pinyin_data[start_idx:start_idx+seq_length] target_str = pinyin_data[start_idx+1:start_idx+seq_length+1] input = [char2idx[char] for char in input_str] target = [char2idx[char] for char in target_str] inputs.append(input) targets.append(target) inputs = torch.LongTensor(inputs).transpose(0, 1) # 转置为(时间步,Batch) targets = torch.LongTensor(targets).transpose(0, 1) # 转置为(时间步,Batch) return inputs, targets

序列划分

def seq_partition(): num_batches = int(len(pinyin_data) / (batch_size * seq_length)) pinyin_data_trimmed = pinyin_data[:num_batches * batch_size * seq_length] inputs = [] targets = [] for i in range(0, len(pinyin_data_trimmed), seq_length): input_str = pinyin_data_trimmed[i:i+seq_length] target_str = pinyin_data_trimmed[i+1:i+seq_length+1] input = [char2idx[char] for char in input_str] target = [char2idx[char] for char in target_str] inputs.append(input) targets.append(target) inputs = torch.LongTensor(inputs).transpose(0, 1) # 转置为(时间步,Batch) targets = torch.LongTensor(targets).transpose(0, 1) # 转置为(时间步,Batch) return inputs, targets

定义模型

class RNNModel(nn.Module): def init(self, input_size, hidden_size, output_size, num_layers, rnn_type): super(RNNModel, self).init() self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.num_layers = num_layers self.rnn_type = rnn_type

    self.encoder = nn.Embedding(input_size, hidden_size)
    if rnn_type == 'RNN':
        self.rnn = nn.RNN(hidden_size, hidden_size, num_layers, batch_first=True)
    elif rnn_type == 'GRU':
        self.rnn = nn.GRU(hidden_size, hidden_size, num_layers, batch_first=True)
    self.decoder = nn.Linear(hidden_size, output_size)

def forward(self, input, hidden):
    input = self.encoder(input)
    output, hidden = self.rnn(input, hidden)
    output = self.decoder(output)
    return output, hidden

def init_hidden(self, batch_size):
    if self.rnn_type == 'RNN':
        return torch.zeros(self.num_layers, batch_size, self.hidden_size)
    elif self.rnn_type == 'GRU':
        return torch.zeros(self.num_layers, batch_size, self.hidden_size)

训练模型

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

model = RNNModel(len(pinyin_chars), 128, len(pinyin_chars), 2, 'GRU').to(device) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

num_epochs = 100 for epoch in range(num_epochs): hidden = model.init_hidden(batch_size).to(device) inputs, targets = random_sample() # 或者使用seq_partition() inputs, targets = inputs.to(device), targets.to(device) loss = 0 for i in range(seq_length): output, hidden = model(inputs[i], hidden) loss += criterion(output.view(batch_size, -1), targets[i]) loss /= seq_length optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1) # 梯度截断 optimizer.step() if (epoch+1) % 10 == 0: print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')

预测

def predict(prefix, num_predictions): hidden = model.init_hidden(1).to(device) prefix_input = torch.LongTensor([char2idx[char] for char in prefix]).unsqueeze(1).to(device) predicted_chars = list(prefix) for i in range(num_predictions): output, hidden = model(prefix_input[-1], hidden) probabilities = nn.functional.softmax(output.squeeze(), dim=0).cpu().detach().numpy() predicted_idx = np.random.choice(len(pinyin_chars), p=probabilities) predicted_char = idx2char[predicted_idx] predicted_chars.append(predicted_char) prefix_input = torch.LongTensor([predicted_idx]).unsqueeze(1).to(device) return ''.join(predicted_chars)

print(predict('shi', 10)) # 测试单步预测 print(predict('shi', 20)) # 测试K步预测

使用循环神经网络学习汉语拼音拼写:数据准备和模型构建

原文地址: https://www.cveoy.top/t/topic/oi21 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录