PyTorch Textrcnn 多标签文本分类实战案例
这是一个使用 PyTorch 实现 Textrcnn 模型进行多标签文本分类的实战案例。
1. 导入库
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
2. 定义数据集类
class TextDataset(Dataset):
def __init__(self, data, transform=None):
self.data = data
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
text = self.data.iloc[idx]['text']
label = self.data.iloc[idx]['label']
if self.transform:
text = self.transform(text)
return text, label
3. 定义 Textrcnn 模型
class Textrcnn(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, num_layers, bidirectional, dropout):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.rnn = nn.RNN(embedding_dim, hidden_dim, num_layers=num_layers, bidirectional=bidirectional, batch_first=True)
self.fc = nn.Linear(hidden_dim * 2 if bidirectional else hidden_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, text):
embedded = self.dropout(self.embedding(text))
output, hidden = self.rnn(embedded)
hidden = self.dropout(torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1))
return self.fc(hidden)
4. 定义超参数和训练代码
# 超参数
vocab_size = 10000
embedding_dim = 100
hidden_dim = 128
output_dim = 5
num_layers = 2
bidirectional = True
dropout = 0.5
lr = 0.001
batch_size = 64
num_epochs = 10
# 加载数据
data = pd.read_csv('data.csv')
train_data, test_data = train_test_split(data, test_size=0.2, random_state=123)
train_dataset = TextDataset(train_data)
test_dataset = TextDataset(test_data)
# 定义数据加载器
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# 定义模型和优化器
model = Textrcnn(vocab_size, embedding_dim, hidden_dim, output_dim, num_layers, bidirectional, dropout)
optimizer = optim.Adam(model.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
# 训练函数
def train(model, iterator, optimizer, criterion):
model.train()
for batch_text, batch_label in iterator:
optimizer.zero_grad()
predictions = model(batch_text).squeeze(1)
loss = criterion(predictions, batch_label)
loss.backward()
optimizer.step()
# 测试函数
def evaluate(model, iterator, criterion):
model.eval()
with torch.no_grad():
all_predictions = []
all_labels = []
for batch_text, batch_label in iterator:
predictions = model(batch_text).squeeze(1)
all_predictions.extend(torch.argmax(predictions, dim=1).tolist())
all_labels.extend(batch_label.tolist())
report = classification_report(all_labels, all_predictions)
return report
# 开始训练
for epoch in range(num_epochs):
train(model, train_loader, optimizer, criterion)
report = evaluate(model, test_loader, criterion)
print(report)
5. 使用说明
- 将数据集保存为 'data.csv' 文件,其中包含 'text' 和 'label' 两列。
- 确保 'label' 列为整数类型,并根据需要调整 'output_dim' 超参数的值。
- 运行代码进行训练和测试。
注意:
- 本案例仅供参考,实际应用中可能需要根据具体情况进行调整。
- Textrcnn 模型的性能取决于数据集和超参数的选择。
- 可以尝试使用其他预训练词嵌入或更复杂的模型结构来提高性能。
- 为了获得最佳性能,建议使用更大型的数据集进行训练。
原文地址: https://www.cveoy.top/t/topic/lFzO 著作权归作者所有。请勿转载和采集!