使用 PyTorch 进行自然语言处理(NLP)时,通常会结合其生态系统中的工具库(如
torchtext、transformers)和自定义模型。以下是创建一个简单 NLP
项目的步骤和示例代码,涵盖数据预处理、模型构建和训练流程。
1. 安装依赖
确保已安装以下库:
pip install torch torchtext spacy
python -m spacy download en_core_web_sm # 英文分词模型
2. 数据预处理
使用 torchtext 加载和预处理文本数据。以文本分类为例(如 IMDB 数据集):
代码示例:加载和预处理数据
import torch
from torchtext.datasets import IMDB
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
# 定义分词器
tokenizer = get_tokenizer("spacy", language="en_core_web_sm")
# 加载数据集
train_iter, test_iter = IMDB(split=("train", "test"))
# 构建词表
def yield_tokens(data_iter):
for _, text in data_iter:
yield tokenizer(text)
vocab = build_vocab_from_iterator(yield_tokens(train_iter), specials=["<unk>", "<pad>"])
vocab.set_default_index(vocab["<unk>"])
# 文本转换为张量
text_pipeline = lambda x: vocab(tokenizer(x))
label_pipeline = lambda x: int(x) - 1 # 标签从0开始
# 批处理函数
def collate_batch(batch):
text_list, label_list = [], []
for label, text in batch:
text_list.append(torch.tensor(text_pipeline(text), dtype=torch.int64))
label_list.append(label_pipeline(label))
return torch.nn.utils.rnn.pad_sequence(text_list, padding_value=1.0), torch.tensor(label_list)
3. 定义模型
构建一个简单的 LSTM 文本分类模型:
代码示例:模型定义
import torch.nn as nn
class TextClassifier(nn.Module):
def __init__(self, vocab_size, embed_dim, hidden_dim, num_class):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=1)
self.lstm = nn.LSTM(embed_dim, hidden_dim, batch_first=True)
self.fc = nn.Linear(hidden_dim, num_class)
def forward(self, text):
embedded = self.embedding(text)
output, (hidden, _) = self.lstm(embedded)
return self.fc(hidden.squeeze(0))
4. 训练流程
代码示例:训练和评估
from torch.utils.data import DataLoader
# 超参数
BATCH_SIZE = 64
EMBED_DIM = 128
HIDDEN_DIM = 256
NUM_CLASS = 2
EPOCHS = 5
# 数据加载器
train_loader = DataLoader(list(train_iter), batch_size=BATCH_SIZE, collate_fn=collate_batch)
test_loader = DataLoader(list(test_iter), batch_size=BATCH_SIZE, collate_fn=collate_batch)
# 初始化模型、损失函数、优化器
model = TextClassifier(len(vocab), EMBED_DIM, HIDDEN_DIM, NUM_CLASS)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# 训练循环
for epoch in range(EPOCHS):
model.train()
for texts, labels in train_loader:
optimizer.zero_grad()
outputs = model(texts)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# 评估
model.eval()
total, correct = 0, 0
with torch.no_grad():
for texts, labels in test_loader:
outputs = model(texts)
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f"Epoch {epoch+1}, Accuracy: {correct / total:.4f}")
5. 高级技巧
使用预训练模型(如 BERT)
结合 Hugging Face transformers 库:
pip install transformers
代码示例:BERT 文本分类
from transformers import BertTokenizer, BertModel
# 加载预训练模型和分词器
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
bert_model = BertModel.from_pretrained("bert-base-uncased")
class BertClassifier(nn.Module):
def __init__(self, num_class):
super().__init__()
self.bert = bert_model
self.classifier = nn.Linear(768, num_class)
def forward(self, input_ids, attention_mask):
outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
pooled_output = outputs.pooler_output
return self.classifier(pooled_output)
6. 关键知识点
数据预处理:分词、构建词表、填充/截断序列。
模型架构:Embedding 层、RNN/LSTM/Transformer、全连接层。
训练优化:学习率调整、梯度裁剪、早停法。
工具库:torchtext 用于数据处理,transformers 用于预训练模型。