- ?🍨 本文為🔗365天深度學習訓練營 中的學習記錄博客
- 🍖 原作者:K同學啊 | 接輔導、項目定制
🏡我的環境:
- 語言環境:Python3.11.4
- 編譯器:Jupyter Notebook
- torcch版本:2.0.1
1.定義模型(數據準備)
from torchtext.datasets import WikiText2
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
from torch.utils.data import dataset
from torch import nn, Tensor
import math, os, torch
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from tempfile import TemporaryDirectory# 全局設備對象
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')# 加載訓練集,創建詞匯表
train_iter = WikiText2(split='train', root='.')
tokenizer = get_tokenizer('basic_english')
vocab = build_vocab_from_iterator(map(tokenizer, train_iter), specials=['<unk>'])def data_process(raw_text_iter: dataset.IterableDataset) -> Tensor:"""將原始文本轉換成扁平的張量"""data = [torch.tensor(vocab(tokenizer(item)), dtype=torch.long) for item in raw_text_iter]return torch.cat(tuple(filter(lambda t: t.numel() > 0, data)))def batchify(data: Tensor, bsz: int) -> Tensor:"""將數據劃分為bsz個單獨的序列,去除不能完全容納的額外元素參數:data: Tensor, 形狀為``[N]``bsz: int, 批大小返回:形狀為 [N // bsz, bsz] 的張量"""seq_len = data.size(0) // bszdata = data[:seq_len*bsz]data = data.view(bsz, seq_len).t().contiguous()return data.to(device)# 創建數據集
train_iter, val_iter, test_iter = WikiText2(root='.')
train_data = data_process(train_iter)
val_data = data_process(val_iter)
test_data = data_process(test_iter)batch_size = 20
eval_batch_size = 10# 將三類數據集都處理成固定長度
train_data = batchify(train_data, batch_size)
val_data = batchify(val_data, batch_size)
test_data = batchify(test_data, batch_size)# 編寫數據集取值函數(就像CV里的data_loader一樣)
bptt = 35def get_batch(source: Tensor, i: int) -> tuple[Tensor, Tensor]:"""獲取批次數據參數:source: Tensor, 形狀為 ``[full_seq_len, batch_size]``i: int, 當前批次索引返回:tuple(data, target),- data形狀為[seq_len, batch_size]- target形狀為[seq_len * batch_size]"""# 計算當前批次的序列長度,最大為bptt,確保不超過source的長度seq_len = min(bptt, len(source) - 1 - i)# 獲取data,從i開始,長度為seq_lendata = source[i:i+seq_len]# 獲取target,從i+1開始,長度為seq_len,并將其形狀轉換為一維張量target = source[i+1:i+1+seq_len].reshape(-1)return data, target
?2.模型搭建
# 位置編碼
class PositionalEncoding(nn.Module):def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000):super().__init__()self.dropout = nn.Dropout(p=dropout)# 生成位置編碼的位置張量position = torch.arange(max_len).unsqueeze(1)# 計算位置編碼的除數項div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))# 創建位置編碼張量pe = torch.zeros(max_len, 1, d_model)# 使用正弦函數計算位置編碼中的奇數維度部分pe[:, 0, 0::2] = torch.sin(position * div_term)# 使用余弦函數計算位置編碼中的偶數維度部分pe[:, 0, 1::2] = torch.cos(position * div_term)self.register_buffer('pe', pe)def forward(self, x: Tensor) -> Tensor:"""Arguments:x: Tensor, 形狀為 [seq_len, batch_size, embedding_dim]"""# 將位置編碼添加到輸入張量x = x + self.pe[:x.size(0)]# 應用dropoutreturn self.dropout(x)# Transformer模型
class TransformerModel(nn.Module):def __init__(self, ntoken: int, d_model: int, nhead: int, d_hid: int, nlayers: int, dropout: float = 0.5):super().__init__()# 位置編碼self.pos_encoder = PositionalEncoding(d_model, dropout)# 定義編碼器層encoder_layers = TransformerEncoderLayer(d_model, nhead, d_hid, dropout)# 定義編碼器self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)self.embedding = nn.Embedding(ntoken, d_model)self.d_model = d_modelself.linear = nn.Linear(d_model, ntoken)self.init_weights()def init_weights(self) -> None:initrange = 0.1self.embedding.weight.data.uniform_(-initrange, initrange)self.linear.bias.data.zero_()self.linear.weight.data.uniform_(-initrange, initrange)def forward(self, src: Tensor, src_mask: Tensor = None) -> Tensor:"""Argumentssrc: Tensor, 形狀為 [seq_len, batch_size]src_mask: Tensor, 形狀為 [seq_len, seq_len]Returns:輸出的Tensor, 形狀為 [seq_len, batch_size, ntoken]"""src = self.embedding(src) * math.sqrt(self.d_model)src = self.pos_encoder(src)output = self.transformer_encoder(src, src_mask)output = self.linear(output)return output
3.創建模型
ntokens = len(vocab) # 詞匯表的大小
emsize = 200 # 嵌入維度
d_hid = 200 # TransformerEncoder中前饋網絡模型的維度
nlayers = 2 # TransformerEncoder中EncoderLayer層數
nhead = 2 # Transformer中的頭數
dropout = 0.2 # 丟棄概率model = TransformerModel(ntokens, emsize, nhead, d_hid, nlayers, dropout).to(device)
4. 訓練模型
import time# 定義交叉熵損失函數
criterion = nn.CrossEntropyLoss()
# 學習率
lr = 5.0
# 使用隨機梯度下降(SGD)優化器,將模型參數傳入優化器
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
# 使用學習率調度器,每隔1個epoch,將學習率按0.95的比例進行衰減
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)
def train(model: nn.Module) -> None:"""單輪訓練過程"""model.train()total_loss = 0log_interval = 200 # 每隔200個batch打印一次日志start_time = time.time()# 計算總的batch數量num_batches = len(train_data)for batch, i in enumerate(range(0, train_data.size(0) - 1, bptt)):data, targets = get_batch(train_data, i)output = model(data)output_flat = output.view(-1, ntokens)loss = criterion(output_flat, targets)optimizer.zero_grad()loss.backward()torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)# 梯度裁剪,防止梯度爆炸optimizer.step()total_loss += loss.item()if batch % log_interval == 0 and batch > 0:lr = scheduler.get_last_lr()[0]ms_per_batch = (time.time() - start_time) * 1000 / log_intervalcur_loss = total_loss / log_intervalppl = math.exp(cur_loss) # 計算困惑度print(f"|epoch {epoch:3d} | {batch:5d}/{num_batches:5d} batches | "f"lr {lr:02.2f} | ms/batch {ms_per_batch:5.2f} | "f"loss {cur_loss:5.2f} | ppl {ppl:8.2f}")total_loss = 0start_time = time.time()def evaluate(model: nn.Module, eval_data: Tensor) -> None:"""單輪評估過程"""model.eval()total_loss = 0with torch.no_grad():for i in range(0, eval_data.size(0) - 1, bptt):data, targets = get_batch(eval_data, i )seq_len = data.size(0)output = model(data)output_flat = output.view(-1, ntokens)total_loss += seq_len * criterion(output_flat, targets).item()return total_loss / (len(eval_data) -1)
best_val_loss = float('inf')
epochs = 1with TemporaryDirectory() as tempdir: # 創建臨時目錄來保存最佳模型參數# 最佳模型參數的實際保存路徑best_model_params_path = os.path.join(tempdir, 'best_model_params.pth')for epoch in range(1, epochs + 1):epoch_start_time = time.time()train(model)val_loss = evaluate(model, val_data)val_ppl = math.exp(val_loss)elapsed = time.time() - epoch_start_time # 計算當前epoch的耗時print('-'*89)print(f'| end of epoch {epoch:3d} | time: {elapsed:5.2f}s | 'f'valid loss {val_loss:5.2f} | valid ppl {val_ppl:8.2f}')print('-'*89)if val_loss < best_val_loss:best_val_loss = val_losstorch.save(model.state_dict(), best_model_params_path)scheduler.step()# 退出前加載性能最好的模型model.load_state_dict(torch.load(best_model_params_path))
?5.訓練過程
|epoch 1 | 200/102499 batches | lr 5.00 | ms/batch 6.75 | loss 8.14 | ppl 3426.82
|epoch 1 | 400/102499 batches | lr 5.00 | ms/batch 5.60 | loss 6.25 | ppl 517.82
|epoch 1 | 600/102499 batches | lr 5.00 | ms/batch 6.05 | loss 5.61 | ppl 272.82
|epoch 1 | 800/102499 batches | lr 5.00 | ms/batch 5.80 | loss 5.27 | ppl 194.54
|epoch 1 | 1000/102499 batches | lr 5.00 | ms/batch 6.78 | loss 4.90 | ppl 133.77
|epoch 1 | 1200/102499 batches | lr 5.00 | ms/batch 7.06 | loss 4.51 | ppl 91.22
|epoch 1 | 1400/102499 batches | lr 5.00 | ms/batch 6.35 | loss 4.20 | ppl 66.74
|epoch 1 | 1600/102499 batches | lr 5.00 | ms/batch 6.72 | loss 4.00 | ppl 54.55
|epoch 1 | 1800/102499 batches | lr 5.00 | ms/batch 5.59 | loss 3.76 | ppl 42.92
|epoch 1 | 2000/102499 batches | lr 5.00 | ms/batch 6.63 | loss 3.63 | ppl 37.74
|epoch 1 | 2200/102499 batches | lr 5.00 | ms/batch 6.46 | loss 3.47 | ppl 32.27
|epoch 1 | 2400/102499 batches | lr 5.00 | ms/batch 6.50 | loss 3.45 | ppl 31.41
|epoch 1 | 2600/102499 batches | lr 5.00 | ms/batch 6.94 | loss 3.41 | ppl 30.35
|epoch 1 | 2800/102499 batches | lr 5.00 | ms/batch 6.64 | loss 3.27 | ppl 26.42
-----------------------------------------------------------------------------------------
| end of epoch 1 | time: 19.20s | valid loss 1.95 | valid ppl 7.01
-----------------------------------------------------------------------------------------
6.模型效果
test_loss = evaluate(model, test_data)
test_ppl = math.exp(test_loss)
print('='*89)
print(f'| End of training | test loss {test_loss:5.2f} | 'f'test ppl {test_ppl:8.2f}')
print('='*89)
7.測試結果
=========================================================================================
| End of training | test loss 1.93 | test ppl 6.88
=========================================================================================
8.總結?
在數據的導入步中多次報錯,推薦下載地址“https://aistudio.baidu.com/datasetdetail/230431
”先下載數據在進行測試。