用python编程完成:使用循环神经网络学习汉语拼音的拼写本次实验重点为准备数据和模型。拼音数据无声调:with opendeskpinyintxt r encoding=utf-8 as f定义数据集:采用字符模型因此一个字符为一个样本。每个样本采用one-hot编码。样本是时间相关的分别实现序列的随机采样和序列的顺序划分标签Y与X同形状但时间超前1准备数据:一次梯度更新使用的数据形状为:时间步
import torch import torch.nn as nn import numpy as np
读取拼音数据
with open('pinyin.txt', 'r', encoding='utf-8') as f: pinyin = f.read().split('\n')
建立字符到索引的映射
char_to_ix = {char: i for i, char in enumerate(sorted(set(''.join(pinyin))))} ix_to_char = {i: char for char, i in char_to_ix.items()}
将拼音转换为 one-hot 编码的形式
def one_hot_encode(pinyin): encoded = np.zeros((len(pinyin), len(char_to_ix))) for i, char in enumerate(pinyin): encoded[i][char_to_ix[char]] = 1 return encoded
encoded_pinyin = one_hot_encode(pinyin)
划分数据集
def split_data(data, seq_len, batch_size): n_seqs = len(data) // seq_len data = data[:n_seqs * seq_len] data = data.reshape(batch_size, -1, seq_len) return data
seq_len = 50 batch_size = 16 encoded_pinyin = split_data(encoded_pinyin, seq_len, batch_size)
定义 RNN 模型
class RNN(nn.Module): def init(self, input_size, hidden_size, output_size): super(RNN, self).init() self.hidden_size = hidden_size self.rnn = nn.RNN(input_size, hidden_size, batch_first=True) self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x, hidden):
output, hidden = self.rnn(x, hidden)
output = self.fc(output)
return output, hidden
def init_hidden(self, batch_size):
return torch.zeros(1, batch_size, self.hidden_size)
input_size = len(char_to_ix) hidden_size = 100 output_size = len(char_to_ix) rnn = RNN(input_size, hidden_size, output_size)
定义损失函数和优化器
criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(rnn.parameters(), lr=0.001)
定义训练函数
def train(rnn, encoded_pinyin, criterion, optimizer, epochs=10, seq_len=50, batch_size=16, clip=5): rnn.train() for epoch in range(epochs): hidden = rnn.init_hidden(batch_size) for i in range(0, encoded_pinyin.shape[1] - seq_len, seq_len): inputs = encoded_pinyin[:, i:i+seq_len, :] targets = encoded_pinyin[:, i+1:i+seq_len+1, :]
inputs = torch.tensor(inputs, dtype=torch.float32)
targets = torch.tensor(targets, dtype=torch.long)
hidden = hidden.detach()
rnn.zero_grad()
output, hidden = rnn(inputs, hidden)
loss = criterion(output.view(-1, output_size), targets.view(-1))
loss.backward()
nn.utils.clip_grad_norm_(rnn.parameters(), clip)
optimizer.step()
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, epochs, loss.item()))
训练模型
train(rnn, encoded_pinyin, criterion, optimizer)
测试模型
def predict(rnn, prefix, hidden=None, k=1): rnn.eval() prefix = one_hot_encode(prefix).reshape(1, -1, len(char_to_ix)) if hidden is not None: hidden = hidden.detach() else: hidden = rnn.init_hidden(batch_size=1) for i in range(len(prefix)): output, hidden = rnn(torch.tensor(prefix[i]).unsqueeze(0), hidden) if k == 1: _, top_ix = torch.topk(output[0], k=k) return ix_to_char[top_ix.item()], hidden else: probs = torch.softmax(output[0], dim=0) top_k_probs, top_k_ixs = torch.topk(probs, k=k) top_k_chars = [ix_to_char[ix.item()] for ix in top_k_ixs] return top_k_chars, top_k_probs.detach().numpy(), hidden
prefix = 'ni' k = 5 for i in range(10): char, hidden = predict(rnn, prefix, k=k) print(prefix + char) prefix += cha
原文地址: https://www.cveoy.top/t/topic/fzg3 著作权归作者所有。请勿转载和采集!