别再死记硬背Attention公式了!用Python+PyTorch手撕一个Hierarchical Attention Network(HAN)
从零实现层次注意力网络用PyTorch构建可解释的文本分析模型在自然语言处理领域理解长文档的层次结构一直是个挑战。传统的注意力机制虽然强大但面对嵌套的文本层级如词→句→段落时往往力不从心。这就是Hierarchical Attention NetworkHAN的用武之地——它像人类阅读一样先理解词语再把握句子最后整合段落含义。本文将带您用PyTorch从零搭建这个精妙的架构过程中您会发现注意力机制不再是黑箱通过可视化权重能看到模型关注了什么GRU单元如何在不同层级间传递和提炼信息为什么说HAN特别适合代码变更分析、医疗报告解析等结构化文本任务1. 环境准备与数据预处理1.1 安装必要依赖建议使用Python 3.8和最新版PyTorch。创建一个干净的虚拟环境后安装pip install torch1.12.0 torchtext0.13.0 matplotlib numpy1.2 构建示例数据集我们将模拟代码审查场景构造包含三个层级的虚拟数据import torch from collections import defaultdict # 示例数据结构hunk → lines → words sample_data [ { hunk_id: 1, lines: [ {line_id: 1, text: fix null pointer exception}, {line_id: 2, text: add input validation} ] }, { hunk_id: 2, lines: [ {line_id: 3, text: optimize database query}, {line_id: 4, text: remove redundant joins} ] } ] # 构建词汇表 word_vocab defaultdict(lambda: len(word_vocab)) word_vocab[pad] 0 # 填充标记 word_vocab[unk] 1 # 未知词 for hunk in sample_data: for line in hunk[lines]: for word in line[text].split(): _ word_vocab[word.lower()] print(f词汇表大小: {len(word_vocab)})2. 词级编码与注意力实现2.1 双向GRU编码器词级编码器需要捕获每个词的上下文信息import torch.nn as nn class WordLevelEncoder(nn.Module): def __init__(self, vocab_size, embed_dim100, hidden_size50): super().__init__() self.embedding nn.Embedding(vocab_size, embed_dim, padding_idx0) self.gru nn.GRU( input_sizeembed_dim, hidden_sizehidden_size, bidirectionalTrue, batch_firstTrue ) def forward(self, x): # x形状: (batch_size, seq_len) embedded self.embedding(x) # (batch_size, seq_len, embed_dim) outputs, _ self.gru(embedded) # (batch_size, seq_len, 2*hidden_size) return outputs2.2 词级注意力机制注意力层让模型学会聚焦关键词语class WordAttention(nn.Module): def __init__(self, hidden_size): super().__init__() self.linear nn.Linear(2*hidden_size, hidden_size) self.context_vector nn.Parameter(torch.randn(hidden_size)) def forward(self, encoder_outputs): # encoder_outputs形状: (batch_size, seq_len, 2*hidden_size) u torch.tanh(self.linear(encoder_outputs)) # (batch_size, seq_len, hidden_size) scores torch.matmul(u, self.context_vector) # (batch_size, seq_len) alphas torch.softmax(scores, dim1) # 注意力权重 return torch.sum(encoder_outputs * alphas.unsqueeze(-1), dim1), alphas3. 行级编码与注意力实现3.1 行编码器结构行编码器处理词级编码器的输出序列class LineLevelEncoder(nn.Module): def __init__(self, input_size, hidden_size50): super().__init__() self.gru nn.GRU( input_sizeinput_size, hidden_sizehidden_size, bidirectionalTrue, batch_firstTrue ) def forward(self, x): # x形状: (batch_size, num_lines, 2*word_hidden_size) outputs, _ self.gru(x) # (batch_size, num_lines, 2*hidden_size) return outputs3.2 行级注意力层行级注意力识别文档中的关键句子class LineAttention(nn.Module): def __init__(self, hidden_size): super().__init__() self.linear nn.Linear(2*hidden_size, hidden_size) self.context_vector nn.Parameter(torch.randn(hidden_size)) def forward(self, encoder_outputs): # 实现与WordAttention类似但处理的是行级表示 u torch.tanh(self.linear(encoder_outputs)) scores torch.matmul(u, self.context_vector.unsqueeze(1).squeeze(-1)) alphas torch.softmax(scores, dim1) return torch.sum(encoder_outputs * alphas.unsqueeze(-1), dim1), alphas4. 块级编码与完整HAN集成4.1 块编码器设计块级编码器处理行级表示序列class HunkLevelEncoder(nn.Module): def __init__(self, input_size, hidden_size50): super().__init__() self.gru nn.GRU( input_sizeinput_size, hidden_sizehidden_size, bidirectionalTrue, batch_firstTrue ) def forward(self, x): # x形状: (batch_size, num_hunks, 2*line_hidden_size) outputs, _ self.gru(x) return outputs4.2 完整HAN架构整合所有组件构建端到端模型class HierarchicalAttentionNetwork(nn.Module): def __init__(self, vocab_size, word_embed_dim100, word_hidden_size50, line_hidden_size50, hunk_hidden_size50): super().__init__() self.word_encoder WordLevelEncoder(vocab_size, word_embed_dim, word_hidden_size) self.word_attention WordAttention(word_hidden_size) self.line_encoder LineLevelEncoder(2*word_hidden_size, line_hidden_size) self.line_attention LineAttention(line_hidden_size) self.hunk_encoder HunkLevelEncoder(2*line_hidden_size, hunk_hidden_size) self.hunk_attention LineAttention(hunk_hidden_size) # 复用LineAttention结构 def forward(self, hunks): # hunks是预处理后的输入数据 batch_size len(hunks) # 词级处理 line_representations [] word_attentions [] for lines in hunks: line_reps [] word_atts [] for words in lines: word_outputs self.word_encoder(words) line_rep, word_att self.word_attention(word_outputs) line_reps.append(line_rep) word_atts.append(word_att) line_representations.append(torch.stack(line_reps)) word_attentions.append(torch.stack(word_atts)) # 行级处理 hunk_representations [] line_attentions [] for lines in line_representations: line_outputs self.line_encoder(lines.unsqueeze(0)) hunk_rep, line_att self.line_attention(line_outputs) hunk_representations.append(hunk_rep) line_attentions.append(line_att) # 块级处理 hunk_outputs torch.stack(hunk_representations) final_output, hunk_attentions self.hunk_attention(hunk_outputs) return { output: final_output, word_attentions: word_attentions, line_attentions: line_attentions, hunk_attentions: hunk_attentions }5. 模型训练与注意力可视化5.1 自定义训练循环实现带注意力监控的训练过程def train_model(model, data_loader, criterion, optimizer, epochs10): model.train() for epoch in range(epochs): total_loss 0 for batch in data_loader: optimizer.zero_grad() outputs model(batch[input]) loss criterion(outputs[output], batch[label]) loss.backward() optimizer.step() total_loss loss.item() print(fEpoch {epoch1}, Loss: {total_loss/len(data_loader):.4f}) # 可视化第一个样本的注意力权重 visualize_attention(batch[input][0], outputs[word_attentions][0], outputs[line_attentions][0], outputs[hunk_attentions][0])5.2 注意力权重可视化理解模型关注的重点import matplotlib.pyplot as plt def visualize_attention(sample, word_att, line_att, hunk_att): fig, axes plt.subplots(3, 1, figsize(10, 12)) # 词级注意力 words [pad]*len(word_att) # 这里应替换为实际的词序列 axes[0].bar(range(len(word_att)), word_att.detach().numpy()) axes[0].set_title(Word-level Attention) # 行级注意力 lines [Line 1, Line 2] # 示例行内容 axes[1].bar(range(len(line_att)), line_att.detach().numpy()) axes[1].set_xticks(range(len(lines))) axes[1].set_xticklabels(lines, rotation45) axes[1].set_title(Line-level Attention) # 块级注意力 hunks [Hunk 1, Hunk 2] # 示例块描述 axes[2].bar(range(len(hunk_att)), hunk_att.detach().numpy()) axes[2].set_xticks(range(len(hunks))) axes[2].set_xticklabels(hunks) axes[2].set_title(Hunk-level Attention) plt.tight_layout() plt.show()6. 实际应用中的优化技巧在真实项目中部署HAN时有几个关键优化点值得注意批处理优化原始实现逐样本处理效率低可改用pack_padded_sequence处理变长序列注意力计算加速当序列较长时使用缩放点积注意力(scaled dot-product)计算更快多任务学习在输出层同时预测多个相关标签如代码审查中同时预测缺陷类型和严重程度层次Dropout在不同层级应用不同dropout率通常词级行级块级一个优化后的注意力计算示例class EfficientAttention(nn.Module): def __init__(self, hidden_size): super().__init__() self.query nn.Linear(hidden_size, hidden_size) self.key nn.Linear(hidden_size, hidden_size) def forward(self, encoder_outputs): # encoder_outputs: (batch_size, seq_len, hidden_size) q self.query(encoder_outputs) # (batch_size, seq_len, hidden_size) k self.key(encoder_outputs) # (batch_size, seq_len, hidden_size) scores torch.bmm(q, k.transpose(1,2)) / (encoder_outputs.size(-1)**0.5) alphas torch.softmax(scores, dim-1) return torch.bmm(alphas, encoder_outputs), alphas