使用循环神经网络学习汉语拼音的拼写本次实验重点为准备数据和模型。拼音数据无声调:kaggleinputpinyin-datapinyintxt定义数据集:采用字符模型因此一个字符为一个样本。每个样本采用one-hot编码。样本是时间相关的分别实现序列的随机采样和序列的顺序划分标签Y与X同形状但时间超前1准备数据:一次梯度更新使用的数据形状为:时间步Batch类别数实现基本循环神经网络模型循环单元为
导入必要的库
import torch import torch.nn as nn import numpy as np import random
设置随机种子,保证实验的可重现性
SEED = 42 torch.manual_seed(SEED) np.random.seed(SEED) random.seed(SEED)
定义数据集路径
data_path = './pinyin.txt'
定义字符集和字符到索引的映射
char_set = ['
定义超参数
batch_size = 128 n_epochs = 50 lr = 0.001 hidden_size = 128 n_layers = 2 dropout = 0.5 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
定义数据预处理函数,将拼音字符串转换为对应的索引序列
def preprocess_data(data_path, batch_size): with open(data_path, 'r', encoding='utf-8') as f: pinyin_data = f.read().split('\n')[:-1]
# 将拼音字符串转换为对应的索引序列
data = []
for pinyin in pinyin_data:
index_list = []
for char in pinyin:
if char in char2idx:
index_list.append(char2idx[char])
else:
index_list.append(char2idx['<UNK>'])
data.append(index_list)
# 构建batch数据
n_batches = len(data) // batch_size
data = data[:n_batches * batch_size]
data = np.array(data)
data = data.reshape(batch_size, -1)
data = torch.LongTensor(data).transpose(0, 1).contiguous()
return data.to(device)
定义数据划分函数,将数据划分为x和y
def divide_data(data): x = data[:-1, :] y = data[1:, :] return x, y
定义模型
class RNN(nn.Module): def init(self, input_size, hidden_size, output_size, n_layers=1, dropout=0): super(RNN, self).init() self.hidden_size = hidden_size self.n_layers = n_layers self.dropout = dropout
self.embedding = nn.Embedding(input_size, hidden_size)
self.rnn = nn.GRU(hidden_size, hidden_size, n_layers, dropout=dropout, batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x, hidden):
embedded = self.embedding(x)
output, hidden = self.rnn(embedded, hidden)
output = self.fc(output)
return output, hidden
def init_hidden(self, batch_size):
return torch.zeros(self.n_layers, batch_size, self.hidden_size, device=device)
定义训练函数
def train(model, data, criterion, optimizer): model.train() total_loss = 0 hidden = model.init_hidden(batch_size) for i in range(0, data.size(0) - 1, batch_size): x, y = divide_data(data[i:i+batch_size]) hidden = hidden.detach() hidden = hidden.data hidden = hidden.zero_() output, hidden = model(x, hidden) loss = criterion(output.view(-1, len(char_set)), y.view(-1)) optimizer.zero_grad() loss.backward() optimizer.step() total_loss += loss.item() return total_loss / (data.size(0) - 1)
定义测试函数
def test(model, data): model.eval() total_loss = 0 hidden = model.init_hidden(batch_size) with torch.no_grad(): for i in range(0, data.size(0) - 1, batch_size): x, y = divide_data(data[i:i+batch_size]) output, hidden = model(x, hidden) loss = criterion(output.view(-1, len(char_set)), y.view(-1)) total_loss += loss.item() return total_loss / (data.size(0) - 1)
定义前向传播函数
def predict(model, prefix, k): model.eval() hidden = model.init_hidden(1) for char in prefix: x = torch.LongTensor([[char2idx[char]]]).to(device) output, hidden = model(x, hidden) for i in range(k): _, top_idx = output.topk(1) char = idx2char[top_idx.item()] print(char, end='') x = torch.LongTensor([[top_idx.item()]]).to(device) output, hidden = model(x, hidden)
数据预处理
data = preprocess_data(data_path, batch_size)
定义模型、损失函数和优化器
model = RNN(len(char_set), hidden_size, len(char_set), n_layers, dropout).to(device) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=lr)
训练模型
for epoch in range(n_epochs): train_loss = train(model, data, criterion, optimizer) test_loss = test(model, data) print('Epoch [{}/{}], Train Loss: {:.4f}, Test Loss: {:.4f}'.format(epoch+1, n_epochs, train_loss, test_loss))
测试前向传播
prefix = 'ni hao wo shi' k = 5 predict(model, prefix, k
原文地址: https://www.cveoy.top/t/topic/fGE2 著作权归作者所有。请勿转载和采集!