題目:Transformer-LSTM-SVM回歸
文章目錄
- 題目:Transformer-LSTM-SVM回歸
- 前言一:Transformer
- 1. Transformer的原理
- 1.1 Transformer的核心結構
- 1.2 注意力機制
- 1.4 位置編碼
- 1.5 損失函數
- 2. 完整案例
- LSTM
- SVM
前言一:Transformer
1. Transformer的原理
Transformer是一種基于注意力機制的深度學習模型,最早由Vaswani等人在2017年的論文《Attention Is All You Need》中提出。它主要用于序列建模任務,如:自然語言處理和時間序列預測。
1.1 Transformer的核心結構
Transformer主要由兩部分組成:
- 編碼器:處理輸入數據,將其轉化為高維向量表示。
- 解碼器:根據編碼的輸出和目標序列生成結果。
解碼器
包含 N N N層,每一層都由兩個子層組成:
- 多頭注意力機制。
- 前向全連接網絡。
使用殘差連接和層歸一化以提高訓練效率。
解碼器
包含 N N N層,每一層都由兩個子層組成:
- 多頭注意力機制。
- 編碼器-解碼器注意力機制。
- 前向全連接網絡。
使用殘差連接和層歸一化以提高訓練效率。
1.2 注意力機制
注意力得分計算
Attention ( Q , K , V ) = softmax ( Q K T d k V ) \text{Attention}\left(Q,K,V\right)=\text{softmax}\left(\frac{QK^T}{\sqrt{d_k}}V\right) Attention(Q,K,V)=softmax(dk??QKT?V)
- Q Q Q是查詢向量。
- K K K鍵向量。
- V V V值向量。
- d k d_k dk?:向量維度
多頭注意力
多頭注意力是將輸入分成多組,獨立計算每組的注意力,然后拼接在一起:
MultiHead ( Q , K , V ) = Concat ( h e a d 1 , h e a d 2 , … , h e a d n ) W O \text{MultiHead}\left( Q,K,V\right)=\text{Concat}\left(head_1,head_2,\dots ,head_n\right)W^O MultiHead(Q,K,V)=Concat(head1?,head2?,…,headn?)WO
每個頭的計算如下:
h e a d i = Attention ( Q W i Q , K W i K , V W i V ) head_i=\text{Attention}\left(QW_{i}^{Q},KW_{i}^{K},VW_{i}^{V}\right) headi?=Attention(QWiQ?,KWiK?,VWiV?)
- W i Q , W i K , W i V W_{i}^{Q},W_{i}^{K},W_{i}^{V} WiQ?,WiK?,WiV?:學習參數矩陣
1.4 位置編碼
由于Transformer沒有循環結構,如RNN,無法顯試捕獲序列順序信息,因此引入位置編碼:
P E ( p o s , 2 i ) = sin ( p o s 1000 0 2 i d ) PE\left(pos,2i\right)=\text{sin}\left(\frac{pos}{10000^{\frac{2i}{d}}}\right) PE(pos,2i)=sin(10000d2i?pos?)
P E ( p o s , 2 i + 1 ) = cos ( p o s 1000 0 2 i d ) PE\left(pos,2i+1\right)=\text{cos}\left(\frac{pos}{10000^{\frac{2i}{d}}}\right) PE(pos,2i+1)=cos(10000d2i?pos?)
- p o s pos pos:序列中的位置。
- d d d:嵌入向量的維度。
1.5 損失函數
Transformer常常使用交叉熵損失:
L = ? 1 N ∑ i = 1 N ∑ j = 1 C y i j ln ( y i j ^ ) \mathcal{L}=-\frac{1}{N}\sum_{i=1}^{N}\sum_{j=1}^{C}y_{ij}\text{ln}\left(\hat{y_{ij}}\right) L=?N1?i=1∑N?j=1∑C?yij?ln(yij?^?)
- y i j y_{ij} yij?:真實標簽。
- y i j ^ \hat{y_{ij}} yij?^?:模型預測概率。
2. 完整案例
導入包
import torch
import torch.nn as nn
import torch.utils.data as Data
import numpy as np
from torch import optim
import random
from tqdm import *
import matplotlib.pyplot as plt
數據集生成
# 數據集生成
soundmark = ['ei', 'bi:', 'si:', 'di:', 'i:', 'ef', 'd?i:', 'eit∫', 'ai', 'd?ei', 'kei', 'el', 'em', 'en', '?u', 'pi:', 'kju:','ɑ:', 'es', 'ti:', 'ju:', 'vi:', 'd∧blju:', 'eks', 'wai', 'zi:']alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']t = 1000 #總條數
r = 0.9 #擾動項
seq_len = 6
src_tokens, tgt_tokens = [],[] #原始序列、目標序列列表for i in range(t):src, tgt = [],[]for j in range(seq_len):ind = random.randint(0,25)src.append(soundmark[ind])if random.random() < r:tgt.append(alphabet[ind])else:tgt.append(alphabet[random.randint(0,25)])src_tokens.append(src)tgt_tokens.append(tgt)
src_tokens[:2], tgt_tokens[:2]
from collections import Counter # 計數類flatten = lambda l: [item for sublist in l for item in sublist] # 展平數組
# 構建詞表
class Vocab:def __init__(self, tokens):self.tokens = tokens # 傳入的tokens是二維列表self.token2index = {'<pad>': 0, '<bos>': 1, '<eos>': 2, '<unk>': 3} # 先存好特殊詞元# 將詞元按詞頻排序后生成列表self.token2index.update({token: index + 4for index, (token, freq) in enumerate(sorted(Counter(flatten(self.tokens)).items(), key=lambda x: x[1], reverse=True))})# 構建id到詞元字典self.index2token = {index: token for token, index in self.token2index.items()}def __getitem__(self, query):# 單一索引if isinstance(query, (str, int)):if isinstance(query, str):return self.token2index.get(query, 3)elif isinstance(query, (int)):return self.index2token.get(query, '<unk>')# 數組索引elif isinstance(query, (list, tuple)):return [self.__getitem__(item) for item in query]def __len__(self):return len(self.index2token)
構造數據集
from torch.utils.data import DataLoader, TensorDataset#實例化source和target詞表
src_vocab, tgt_vocab = Vocab(src_tokens), Vocab(tgt_tokens)
src_vocab_size = len(src_vocab) # 源語言詞表大小
tgt_vocab_size = len(tgt_vocab) # 目標語言詞表大小#增加開始標識<bos>和結尾標識<eos>
encoder_input = torch.tensor([src_vocab[line + ['<pad>']] for line in src_tokens])
decoder_input = torch.tensor([tgt_vocab[['<bos>'] + line] for line in tgt_tokens])
decoder_output = torch.tensor([tgt_vocab[line + ['<eos>']] for line in tgt_tokens])# 訓練集和測試集比例8比2,batch_size = 16
train_size = int(len(encoder_input) * 0.8)
test_size = len(encoder_input) - train_size
batch_size = 16# 自定義數據集函數
class MyDataSet(Data.Dataset):def __init__(self, enc_inputs, dec_inputs, dec_outputs):super(MyDataSet, self).__init__()self.enc_inputs = enc_inputsself.dec_inputs = dec_inputsself.dec_outputs = dec_outputsdef __len__(self):return self.enc_inputs.shape[0]def __getitem__(self, idx):return self.enc_inputs[idx], self.dec_inputs[idx], self.dec_outputs[idx]train_loader = DataLoader(MyDataSet(encoder_input[:train_size], decoder_input[:train_size], decoder_output[:train_size]), batch_size=batch_size)
test_loader = DataLoader(MyDataSet(encoder_input[-test_size:], decoder_input[-test_size:], decoder_output[-test_size:]), batch_size=1)
位置編碼
def get_sinusoid_encoding_table(n_position, d_model):def cal_angle(position, hid_idx):return position / np.power(10000, 2 * (hid_idx // 2) / d_model)def get_posi_angle_vec(position):return [cal_angle(position, hid_j) for hid_j in range(d_model)]sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)])sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # 偶數位用正弦函數sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # 奇數位用余弦函數return torch.FloatTensor(sinusoid_table)
print(get_sinusoid_encoding_table(30, 512))
掩碼操作
# mask掉沒有意義的占位符
def get_attn_pad_mask(seq_q, seq_k): # seq_q: [batch_size, seq_len] ,seq_k: [batch_size, seq_len]batch_size, len_q = seq_q.size()batch_size, len_k = seq_k.size()pad_attn_mask = seq_k.data.eq(0).unsqueeze(1) # 判斷 輸入那些含有P(=0),用1標記 ,[batch_size, 1, len_k]return pad_attn_mask.expand(batch_size, len_q, len_k)# mask掉未來信息
def get_attn_subsequence_mask(seq): # seq: [batch_size, tgt_len]attn_shape = [seq.size(0), seq.size(1), seq.size(1)]subsequence_mask = np.triu(np.ones(attn_shape), k=1) # 生成上三角矩陣,[batch_size, tgt_len, tgt_len]subsequence_mask = torch.from_numpy(subsequence_mask).byte() # [batch_size, tgt_len, tgt_len]return subsequence_mask
注意力計算函數
# 縮放點積注意力計算
class ScaledDotProductAttention(nn.Module):def __init__(self):super(ScaledDotProductAttention, self).__init__()def forward(self, Q, K, V, attn_mask):'''Q: [batch_size, n_heads, len_q, d_k]K: [batch_size, n_heads, len_k, d_k]V: [batch_size, n_heads, len_v(=len_k), d_v]attn_mask: [batch_size, n_heads, seq_len, seq_len]'''scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(d_k) # scores : [batch_size, n_heads, len_q, len_k]scores.masked_fill_(attn_mask, -1e9) # Fills elements of self tensor with value where mask is True.attn = nn.Softmax(dim=-1)(scores)context = torch.matmul(attn, V) # [batch_size, n_heads, len_q, d_v]return context, attn#多頭注意力計算
class MultiHeadAttention(nn.Module):def __init__(self):super(MultiHeadAttention, self).__init__()self.W_Q = nn.Linear(d_model, d_k * n_heads, bias=False)self.W_K = nn.Linear(d_model, d_k * n_heads, bias=False)self.W_V = nn.Linear(d_model, d_v * n_heads, bias=False)self.fc = nn.Linear(n_heads * d_v, d_model, bias=False)def forward(self, input_Q, input_K, input_V, attn_mask):'''input_Q: [batch_size, len_q, d_model]input_K: [batch_size, len_k, d_model]input_V: [batch_size, len_v(=len_k), d_model]attn_mask: [batch_size, seq_len, seq_len]'''residual, batch_size = input_Q, input_Q.size(0)# (B, S, D) -proj-> (B, S, D_new) -split-> (B, S, H, W) -trans-> (B, H, S, W)Q = self.W_Q(input_Q).view(batch_size, -1, n_heads, d_k).transpose(1,2) # Q: [batch_size, n_heads, len_q, d_k]K = self.W_K(input_K).view(batch_size, -1, n_heads, d_k).transpose(1,2) # K: [batch_size, n_heads, len_k, d_k]V = self.W_V(input_V).view(batch_size, -1, n_heads, d_v).transpose(1,2) # V: [batch_size, n_heads, len_v(=len_k), d_v]attn_mask = attn_mask.unsqueeze(1).repeat(1, n_heads, 1, 1) # attn_mask : [batch_size, n_heads, seq_len, seq_len]# context: [batch_size, n_heads, len_q, d_v], attn: [batch_size, n_heads, len_q, len_k]context, attn = ScaledDotProductAttention()(Q, K, V, attn_mask)context = context.transpose(1, 2).reshape(batch_size, -1, n_heads * d_v) # context: [batch_size, len_q, n_heads * d_v]output = self.fc(context) # [batch_size, len_q, d_model]return nn.LayerNorm(d_model)(output + residual), attn
前饋神經網絡
class PoswiseFeedForwardNet(nn.Module):def __init__(self):super(PoswiseFeedForwardNet, self).__init__()self.fc = nn.Sequential(nn.Linear(d_model, d_ff, bias=False),nn.ReLU(),nn.Linear(d_ff, d_model, bias=False))def forward(self, inputs): # inputs: [batch_size, seq_len, d_model]residual = inputsoutput = self.fc(inputs)return nn.LayerNorm(d_model)(output + residual) # 殘差 + LayerNorm
編碼器與解碼器
# 編碼器層
class EncoderLayer(nn.Module):def __init__(self):super(EncoderLayer, self).__init__()self.enc_self_attn = MultiHeadAttention() # 多頭注意力self.pos_ffn = PoswiseFeedForwardNet() # 前饋網絡def forward(self, enc_inputs, enc_self_attn_mask):'''enc_inputs: [batch_size, src_len, d_model]enc_self_attn_mask: [batch_size, src_len, src_len]'''# enc_outputs: [batch_size, src_len, d_model], attn: [batch_size, n_heads, src_len, src_len]enc_outputs, attn = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs, enc_self_attn_mask) # enc_inputs to same Q,K,Venc_outputs = self.pos_ffn(enc_outputs) # enc_outputs: [batch_size, src_len, d_model]return enc_outputs, attn# 編碼器模塊
class Encoder(nn.Module):def __init__(self):super(Encoder, self).__init__()self.src_emb = nn.Embedding(src_vocab_size, d_model)self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(src_vocab_size, d_model), freeze=True)self.layers = nn.ModuleList([EncoderLayer() for _ in range(n_layers)])def forward(self, enc_inputs):'''enc_inputs: [batch_size, src_len]'''word_emb = self.src_emb(enc_inputs) # [batch_size, src_len, d_model]pos_emb = self.pos_emb(enc_inputs) # [batch_size, src_len, d_model]enc_outputs = word_emb + pos_embenc_self_attn_mask = get_attn_pad_mask(enc_inputs, enc_inputs) # [batch_size, src_len, src_len]enc_self_attns = []for layer in self.layers:# enc_outputs: [batch_size, src_len, d_model], enc_self_attn: [batch_size, n_heads, src_len, src_len]enc_outputs, enc_self_attn = layer(enc_outputs, enc_self_attn_mask)enc_self_attns.append(enc_self_attn)return enc_outputs, enc_self_attns
# 解碼器層
class DecoderLayer(nn.Module):def __init__(self):super(DecoderLayer, self).__init__()self.dec_self_attn = MultiHeadAttention()self.dec_enc_attn = MultiHeadAttention()self.pos_ffn = PoswiseFeedForwardNet()def forward(self, dec_inputs, enc_outputs, dec_self_attn_mask, dec_enc_attn_mask):'''dec_inputs: [batch_size, tgt_len, d_model]enc_outputs: [batch_size, src_len, d_model]dec_self_attn_mask: [batch_size, tgt_len, tgt_len]dec_enc_attn_mask: [batch_size, tgt_len, src_len]'''# dec_outputs: [batch_size, tgt_len, d_model], dec_self_attn: [batch_size, n_heads, tgt_len, tgt_len]dec_outputs, dec_self_attn = self.dec_self_attn(dec_inputs, dec_inputs, dec_inputs, dec_self_attn_mask)# dec_outputs: [batch_size, tgt_len, d_model], dec_enc_attn: [batch_size, h_heads, tgt_len, src_len]dec_outputs, dec_enc_attn = self.dec_enc_attn(dec_outputs, enc_outputs, enc_outputs, dec_enc_attn_mask)dec_outputs = self.pos_ffn(dec_outputs) # [batch_size, tgt_len, d_model]return dec_outputs, dec_self_attn, dec_enc_attn# 解碼器模塊
class Decoder(nn.Module):def __init__(self):super(Decoder, self).__init__()self.tgt_emb = nn.Embedding(tgt_vocab_size, d_model)self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(tgt_vocab_size, d_model),freeze=True)self.layers = nn.ModuleList([DecoderLayer() for _ in range(n_layers)])def forward(self, dec_inputs, enc_inputs, enc_outputs):'''dec_inputs: [batch_size, tgt_len]enc_intpus: [batch_size, src_len]enc_outputs: [batsh_size, src_len, d_model]'''word_emb = self.tgt_emb(dec_inputs) # [batch_size, tgt_len, d_model]pos_emb = self.pos_emb(dec_inputs) # [batch_size, tgt_len, d_model]dec_outputs = word_emb + pos_embdec_self_attn_pad_mask = get_attn_pad_mask(dec_inputs, dec_inputs) # [batch_size, tgt_len, tgt_len]dec_self_attn_subsequent_mask = get_attn_subsequence_mask(dec_inputs) # [batch_size, tgt_len]dec_self_attn_mask = torch.gt((dec_self_attn_pad_mask + dec_self_attn_subsequent_mask), 0) # [batch_size, tgt_len, tgt_len]dec_enc_attn_mask = get_attn_pad_mask(dec_inputs, enc_inputs) # [batc_size, tgt_len, src_len]dec_self_attns, dec_enc_attns = [], []for layer in self.layers:# dec_outputs: [batch_size, tgt_len, d_model], dec_self_attn: [batch_size, n_heads, tgt_len, tgt_len], dec_enc_attn: [batch_size, h_heads, tgt_len,src_len]dec_outputs, dec_self_attn, dec_enc_attn = layer(dec_outputs, enc_outputs, dec_self_attn_mask, dec_enc_attn_mask)dec_self_attns.append(dec_self_attn)dec_enc_attns.append(dec_enc_attn)return dec_outputs, dec_self_attns, dec_enc_attns
transformer模型
class Transformer(nn.Module):def __init__(self):super(Transformer, self).__init__()self.encoder = Encoder()self.decoder = Decoder()self.projection = nn.Linear(d_model, tgt_vocab_size, bias=False)def forward(self, enc_inputs, dec_inputs):'''enc_inputs: [batch_size, src_len]dec_inputs: [batch_size, tgt_len]'''# tensor to store decoder outputs# outputs = torch.zeros(batch_size, tgt_len, tgt_vocab_size).to(self.device)# enc_outputs: [batch_size, src_len, d_model], enc_self_attns: [n_layers, batch_size, n_heads, src_len, src_len]enc_outputs, enc_self_attns = self.encoder(enc_inputs)# dec_outpus: [batch_size, tgt_len, d_model], dec_self_attns: [n_layers, batch_size, n_heads, tgt_len, tgt_len], dec_enc_attn: [n_layers, batch_size, tgt_len, src_len]dec_outputs, dec_self_attns, dec_enc_attns = self.decoder(dec_inputs, enc_inputs, enc_outputs)dec_logits = self.projection(dec_outputs) # dec_logits: [batch_size, tgt_len, tgt_vocab_size]return dec_logits.view(-1, dec_logits.size(-1)), enc_self_attns, dec_self_attns, dec_enc_attns
模型訓練
d_model = 512 # 字 Embedding 的維度
d_ff = 2048 # 前向傳播隱藏層維度
d_k = d_v = 64 # K(=Q), V的維度
n_layers = 6 # 有多少個encoder和decoder
n_heads = 8 # Multi-Head Attention設置為8
num_epochs = 50 # 訓練50輪
# 記錄損失變化
loss_history = []model = Transformer()
criterion = nn.CrossEntropyLoss(ignore_index=0)
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.99)for epoch in tqdm(range(num_epochs)):total_loss = 0for enc_inputs, dec_inputs, dec_outputs in train_loader:'''enc_inputs: [batch_size, src_len]dec_inputs: [batch_size, tgt_len]dec_outputs: [batch_size, tgt_len]'''# enc_inputs, dec_inputs, dec_outputs = enc_inputs.to(device), dec_inputs.to(device), dec_outputs.to(device)# outputs: [batch_size * tgt_len, tgt_vocab_size]outputs, enc_self_attns, dec_self_attns, dec_enc_attns = model(enc_inputs, dec_inputs)loss = criterion(outputs, dec_outputs.view(-1))optimizer.zero_grad()loss.backward()optimizer.step()total_loss += loss.item()avg_loss = total_loss/len(train_loader)loss_history.append(avg_loss)print('Epoch:', '%d' % (epoch + 1), 'loss =', '{:.6f}'.format(avg_loss))