基于GitHub項目:https://github.com/datawhalechina/llms-from-scratch-cn
設計 LLM 的架構
GPT 模型基于 Transformer 的?decoder-only?架構,其主要特點包括:
-
順序生成文本
-
參數數量龐大(而非代碼量復雜)
-
大量重復的模塊化組件
以 GPT-2 small 模型(124M 參數)為例,其配置如下:
GPT_CONFIG_124M = {"vocab_size": 50257, # BPE 分詞器詞表大小"ctx_len": 1024, # 最大上下文長度"emb_dim": 768, # 嵌入維度"n_heads": 12, # 注意力頭數量"n_layers": 12, # Transformer 塊層數"drop_rate": 0.1, # Dropout 比例"qkv_bias": False # QKV 計算是否使用偏置
}
GPT 模型基本結構
cfg是配置實例
import torch.nn as nnclass GPTModel(nn.Module):def __init__(self, cfg):super().__init__()# Token 嵌入層self.tok_emb = nn.Embedding(cfg["vocab_size"], cfg["emb_dim"])# 位置嵌入層self.pos_emb = nn.Embedding(cfg["ctx_len"], cfg["emb_dim"])# Dropout 層self.drop_emb = nn.Dropout(cfg["drop_rate"])# 堆疊n_layers相同的Transformer 塊self.trf_blocks = nn.Sequential(*[TransformerBlock(cfg) for _ in range(cfg["n_layers"])])# 最終層歸一化self.final_norm = LayerNorm(cfg["emb_dim"])# 輸出層self.out_head = nn.Linear(cfg["emb_dim"], cfg["vocab_size"], bias=False)def forward(self, in_idx):batch_size, seq_len = in_idx.shape# Token 嵌入tok_embeds = self.tok_emb(in_idx)# 位置嵌入pos_embeds = self.pos_emb(torch.arange(seq_len, device=in_idx.device))# 組合嵌入x = tok_embeds + pos_embedsx = self.drop_emb(x)# 通過 Transformer 塊x = self.trf_blocks(x)# 最終歸一化x = self.final_norm(x)# 輸出 logitslogits = self.out_head(x)return logits
?層歸一化 (Layer Normalization)
層歸一化將激活值規范化為均值為 0、方差為 1 的分布,加速模型收斂:
class LayerNorm(nn.Module):def __init__(self, emb_dim):super().__init__()self.eps = 1e-5 # 防止除零錯誤的標準設定值self.scale = nn.Parameter(torch.ones(emb_dim)) #可學習縮放參數,初始化為全1向量self.shift = nn.Parameter(torch.zeros(emb_dim)) #可學習平移參數,初始化為全0向量def forward(self, x):mean = x.mean(dim=-1, keepdim=True) #計算均值 μ,沿最后一維,保持維度var = x.var(dim=-1, keepdim=True, unbiased=False) #計算方差 σ2,同均值維度,有偏估計(分母n)norm_x = (x - mean) / torch.sqrt(var + self.eps) #標準化計算,分母添加ε防溢出return self.scale * norm_x + self.shift #仿射變換,恢復模型表達能力
GELU 激活函數與前饋網絡
GPT 使用 GELU(高斯誤差線性單元)激活函數:
場景 | ReLU 的行為 | GELU 的行為 |
---|---|---|
處理微弱負信號 | 直接丟棄(可能丟失細節) | 部分保留(如:保留 30% 的信號強度) |
遇到強烈正信號 | 完全放行 | 幾乎完全放行(保留 95% 以上) |
訓練穩定性 | 容易在臨界點卡頓 | 平滑過渡,減少訓練震蕩 |
應對復雜模式 | 需要堆疊更多層數 | 單層就能捕捉更細膩的變化 |
class GELU(nn.Module):def __init__(self):super().__init__()def forward(self, x):return 0.5 * x * (1 + torch.tanh(torch.sqrt(torch.tensor(2.0 / torch.pi)) * (x + 0.044715 * torch.pow(x, 3))))
前饋神經網絡實現:
class FeedForward(nn.Module):def __init__(self, cfg):super().__init__()self.layers = nn.Sequential(nn.Linear(cfg["emb_dim"], 4 * cfg["emb_dim"]),GELU(),nn.Linear(4 * cfg["emb_dim"], cfg["emb_dim"]),nn.Dropout(cfg["drop_rate"]))def forward(self, x):return self.layers(x)
Shortcut 連接
Shortcut 連接(殘差連接)解決深度網絡中的梯度消失問題:
class TransformerBlock(nn.Module):def __init__(self, cfg):super().__init__()self.att = MultiHeadAttention(d_in=cfg["emb_dim"],d_out=cfg["emb_dim"],block_size=cfg["ctx_len"],num_heads=cfg["n_heads"], dropout=cfg["drop_rate"],qkv_bias=cfg["qkv_bias"])self.ff = FeedForward(cfg)self.norm1 = LayerNorm(cfg["emb_dim"])self.norm2 = LayerNorm(cfg["emb_dim"])self.drop_resid = nn.Dropout(cfg["drop_rate"])def forward(self, x):# 注意力塊的殘差連接shortcut = xx = self.norm1(x)x = self.att(x)x = self.drop_resid(x)x = x + shortcut# 前饋網絡的殘差連接shortcut = xx = self.norm2(x)x = self.ff(x)x = self.drop_resid(x)x = x + shortcutreturn x
Transformer 塊整合
將多頭注意力與前饋網絡整合為 Transformer 塊:
class TransformerBlock(nn.Module):def __init__(self, cfg):super().__init__()self.att = MultiHeadAttention(d_in=cfg["emb_dim"],d_out=cfg["emb_dim"],block_size=cfg["ctx_len"],num_heads=cfg["n_heads"], dropout=cfg["drop_rate"],qkv_bias=cfg["qkv_bias"])self.ff = FeedForward(cfg)self.norm1 = LayerNorm(cfg["emb_dim"])self.norm2 = LayerNorm(cfg["emb_dim"])self.drop_resid = nn.Dropout(cfg["drop_rate"])def forward(self, x):# 注意力塊的殘差連接shortcut = xx = self.norm1(x)x = self.att(x)x = self.drop_resid(x)x = x + shortcut# 前饋網絡的殘差連接shortcut = xx = self.norm2(x)x = self.ff(x)x = self.drop_resid(x)x = x + shortcutreturn x
完整 GPT 模型實現
class GPTModel(nn.Module):def __init__(self, cfg):super().__init__()self.tok_emb = nn.Embedding(cfg["vocab_size"], cfg["emb_dim"])self.pos_emb = nn.Embedding(cfg["ctx_len"], cfg["emb_dim"])self.drop_emb = nn.Dropout(cfg["drop_rate"])self.trf_blocks = nn.Sequential(*[TransformerBlock(cfg) for _ in range(cfg["n_layers"])])self.final_norm = LayerNorm(cfg["emb_dim"])self.out_head = nn.Linear(cfg["emb_dim"], cfg["vocab_size"], bias=False)def forward(self, in_idx):batch_size, seq_len = in_idx.shapetok_embeds = self.tok_emb(in_idx)pos_embeds = self.pos_emb(torch.arange(seq_len, device=in_idx.device))x = tok_embeds + pos_embedsx = self.drop_emb(x)x = self.trf_blocks(x)x = self.final_norm(x)logits = self.out_head(x)return logits
文本生成
使用貪婪解碼生成文本:
def generate_text_simple(model, idx, max_new_tokens, context_size):for _ in range(max_new_tokens):# 截斷超過上下文長度的部分idx_cond = idx[:, -context_size:]with torch.no_grad():logits = model(idx_cond)# 獲取最后一個 token 的 logitslogits = logits[:, -1, :] probas = torch.softmax(logits, dim=-1)idx_next = torch.argmax(probas, dim=-1, keepdim=True)idx = torch.cat((idx, idx_next), dim=1)return idx
使用示例:
# 初始化模型
model = GPTModel(GPT_CONFIG_124M)# 設置評估模式
model.eval()# 生成文本
start_context = "Every effort moves you"
encoded = tokenizer.encode(start_context)
encoded_tensor = torch.tensor(encoded).unsqueeze(0)generated = generate_text_simple(model=model,idx=encoded_tensor,max_new_tokens=10,context_size=GPT_CONFIG_124M["ctx_len"]
)decoded_text = tokenizer.decode(generated.squeeze(0).tolist())
print(decoded_text)