PyTorch Transformer 模型实现:构建强大的序列到序列模型
很高兴为您提供帮助!以下是一个简单的 Transformer 模型的 PyTorch 实现:
import torch
import torch.nn as nn
import torch.nn.functional as F
class Transformer(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, num_heads, num_layers, dropout):
super().__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.num_heads = num_heads
self.num_layers = num_layers
self.dropout = dropout
self.embedding = nn.Embedding(input_dim, hidden_dim)
self.pos_encoding = PositionalEncoding(hidden_dim, dropout)
self.encoder_layers = nn.ModuleList([EncoderLayer(hidden_dim, num_heads, dropout) for _ in range(num_layers)])
self.decoder_layers = nn.ModuleList([DecoderLayer(hidden_dim, num_heads, dropout) for _ in range(num_layers)])
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, src, trg, src_mask=None, trg_mask=None):
src_emb = self.embedding(src)
trg_emb = self.embedding(trg)
src_emb = self.pos_encoding(src_emb)
trg_emb = self.pos_encoding(trg_emb)
for layer in self.encoder_layers:
src_emb = layer(src_emb, src_mask)
for layer in self.decoder_layers:
trg_emb = layer(trg_emb, src_emb, trg_mask, src_mask)
output = self.fc(trg_emb)
return output
class PositionalEncoding(nn.Module):
def __init__(self, hidden_dim, dropout, max_len=5000):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, hidden_dim)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, hidden_dim, 2).float() * (-math.log(10000.0) / hidden_dim))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class EncoderLayer(nn.Module):
def __init__(self, hidden_dim, num_heads, dropout):
super().__init__()
self.self_attn = MultiHeadAttention(hidden_dim, num_heads, dropout)
self.feed_forward = FeedForward(hidden_dim, dropout)
self.norm1 = nn.LayerNorm(hidden_dim)
self.norm2 = nn.LayerNorm(hidden_dim)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, mask):
attn_output, _ = self.self_attn(x, x, x, mask)
x = self.norm1(x + self.dropout(attn_output))
ff_output = self.feed_forward(x)
x = self.norm2(x + self.dropout(ff_output))
return x
class DecoderLayer(nn.Module):
def __init__(self, hidden_dim, num_heads, dropout):
super().__init__()
self.self_attn = MultiHeadAttention(hidden_dim, num_heads, dropout)
self.enc_attn = MultiHeadAttention(hidden_dim, num_heads, dropout)
self.feed_forward = FeedForward(hidden_dim, dropout)
self.norm1 = nn.LayerNorm(hidden_dim)
self.norm2 = nn.LayerNorm(hidden_dim)
self.norm3 = nn.LayerNorm(hidden_dim)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, enc_output, trg_mask, src_mask):
attn_output, _ = self.self_attn(x, x, x, trg_mask)
x = self.norm1(x + self.dropout(attn_output))
attn_output, _ = self.enc_attn(x, enc_output, enc_output, src_mask)
x = self.norm2(x + self.dropout(attn_output))
ff_output = self.feed_forward(x)
x = self.norm3(x + self.dropout(ff_output))
return x
class MultiHeadAttention(nn.Module):
def __init__(self, hidden_dim, num_heads, dropout):
super().__init__()
self.hidden_dim = hidden_dim
self.num_heads = num_heads
self.dropout = nn.Dropout(p=dropout)
self.q_linear = nn.Linear(hidden_dim, hidden_dim)
self.k_linear = nn.Linear(hidden_dim, hidden_dim)
self.v_linear = nn.Linear(hidden_dim, hidden_dim)
self.fc = nn.Linear(hidden_dim, hidden_dim)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
# Perform linear projection and split into multiple heads
q = self.q_linear(q).view(bs, -1, self.num_heads, self.hidden_dim // self.num_heads).transpose(1,2)
k = self.k_linear(k).view(bs, -1, self.num_heads, self.hidden_dim // self.num_heads).transpose(1,2)
v = self.v_linear(v).view(bs, -1, self.num_heads, self.hidden_dim // self.num_heads).transpose(1,2)
# Scaled Dot-Product Attention
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.hidden_dim // self.num_heads)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
attn = F.softmax(scores, dim=-1)
attn = self.dropout(attn)
output = torch.matmul(attn, v).transpose(1,2).contiguous().view(bs, -1, self.num_heads * (self.hidden_dim // self.num_heads))
output = self.fc(output)
return output, attn
class FeedForward(nn.Module):
def __init__(self, hidden_dim, dropout):
super().__init__()
self.hidden_dim = hidden_dim
self.dropout = nn.Dropout(p=dropout)
self.fc1 = nn.Linear(hidden_dim, 4 * hidden_dim)
self.fc2 = nn.Linear(4 * hidden_dim, hidden_dim)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return x
这个模型包含了 Transformer 的所有组件,包括编码器和解码器层,多头注意力机制,前馈神经网络等。您可以使用这个模型来进行各种序列到序列的任务,比如机器翻译、文本摘要等。
原文地址: https://www.cveoy.top/t/topic/lfeG 著作权归作者所有。请勿转载和采集!