54循環神經網絡的從零開始實現
import math
import torch
from torch import nn
from torch.nn import functional as F
from d2l import torch as d2l
import matplotlib.pyplot as plt
import liliPytorch as lp# 讀取H.G.Wells的時光機器數據集
batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)# 查看數據集
# for X, Y in train_iter:
# print('X:', X.shape)
# print('Y:', Y.shape)
# print(vocab.token_freqs)
# print(vocab.idx_to_token)
# print(vocab.token_to_idx)# 獨熱編碼
# 將每個索引映射為相互不同的單位向量: 假設詞表中不同詞元的數目為N(即len(vocab)), 詞元索引的范圍為0
# 到N-1。 如果詞元的索引是整數i, 那么我們將創建一個長度為N的全0向量, 并將第i處的元素設置為1。
# 此向量是原始詞元的一個獨熱向量。
# print(F.one_hot(torch.tensor([0,3,6]), len(vocab)))
"""
tensor([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0],[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
"""# 每次采樣的小批量數據形狀是二維張量: (批量大小,時間步數)。
# one_hot函數將這樣一個小批量數據轉換成三維張量, 張量的最后一個維度等于詞表大小(len(vocab))。
# 我們經常轉換輸入的維度,以便獲得形狀為 (時間步數,批量大小,詞表大小)的輸出。
# 這將使我們能夠更方便地通過最外層的維度, 一步一步地更新小批量數據的隱狀態。# X = torch.arange(10).reshape((2, 5))
# print(X)
# tensor([[0, 1, 2, 3, 4],
# [5, 6, 7, 8, 9]])
# print(X.T)
# tensor([[0, 5],
# [1, 6],
# [2, 7],
# [3, 8],
# [4, 9]])
# print(F.one_hot(X.T, 28).shape) # torch.Size([5, 2, 28])
# print(F.one_hot(X.T, 28))
"""
tensor([[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0]],[[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0]],[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0]],[[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0]],[[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0]]])
"""# 初始化模型參數
def get_params(vocab_size, num_hiddens, device):# 設置輸入和輸出的數量為詞匯表的大小num_inputs = num_outputs = vocab_size# 定義一個函數,用于以正態分布初始化權重def normal(shape):return torch.randn(size=shape, device=device) * 0.01# 初始化隱藏層參數W_xh = normal((num_inputs, num_hiddens)) # 輸入到隱藏層的權重W_hh = normal((num_hiddens, num_hiddens)) # 隱藏層到隱藏層的權重(循環權重)b_h = torch.zeros(num_hiddens, device=device) # 隱藏層的偏置# 初始化輸出層參數W_hq = normal((num_hiddens, num_outputs)) # 隱藏層到輸出層的權重b_q = torch.zeros(num_outputs, device=device) # 輸出層的偏置# 將所有參數收集到一個列表中params = [W_xh, W_hh, b_h, W_hq, b_q]# 設置每個參數的requires_grad屬性為True,以便在反向傳播期間計算梯度for param in params:param.requires_grad_(True)return params # 返回參數列表# 循環神經網絡模型
# 初始化時返回隱狀態
def init_rnn_state(batch_size, num_hiddens, device):# batch_size:批量的大小,即每次輸入到RNN的序列數量。# num_hiddens:隱藏層單元的數量,即隱藏狀態的維度。return (torch.zeros((batch_size, num_hiddens), device=device), ) # 返回一個包含一個張量的元組def rnn(inputs, state, params):# inputs的形狀:(時間步數量,批量大小,詞表大小)# state:初始隱藏狀態,通常是一個元組,包含隱藏層的狀態。# params:RNN的參數,包含權重和偏置。W_xh, W_hh, b_h, W_hq, b_q = paramsH, = state # 當前的隱藏狀態。outputs = []# X的形狀:(批量大小,詞表大小)for X in inputs:H = torch.tanh(torch.mm(X, W_xh) + torch.mm(H, W_hh) + b_h)Y = torch.mm(H, W_hq) + b_qoutputs.append(Y)return torch.cat(outputs, dim=0), (H,)# 存儲從零開始實現的循環神經網絡模型的參數
class RNNModelScratch: #@save"""從零開始實現的循環神經網絡模型"""def __init__(self, vocab_size, num_hiddens, device,get_params, init_state, forward_fn):self.vocab_size, self.num_hiddens = vocab_size, num_hiddensself.params = get_params(vocab_size, num_hiddens, device)self.init_state, self.forward_fn = init_state, forward_fndef __call__(self, X, state): # 前向傳播方法X = F.one_hot(X.T, self.vocab_size).type(torch.float32)return self.forward_fn(X, state, self.params)def begin_state(self, batch_size, device): # 初始化隱藏狀態return self.init_state(batch_size, self.num_hiddens, device)# X = torch.arange(10).reshape((2, 5))
num_hiddens = 512
# net = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params,
# init_rnn_state, rnn)
# state = net.begin_state(X.shape[0], d2l.try_gpu()) # 初始化隱藏狀態
# 調用模型實例的 __call__ 方法執行前向傳播。
# Y, new_state = net(X.to(d2l.try_gpu()), state)
# Y:模型輸出。
# new_state:更新后的隱藏狀態。# print(Y.shape, len(new_state), new_state[0].shape)
# torch.Size([10, 28]) 1 torch.Size([2, 512])
# 輸出形狀是(時間步數 X 批量大小,詞表大小), 而隱狀態形狀保持不變,即(批量大小,隱藏單元數)def predict_ch8(prefix, num_preds, net, vocab, device): #@save"""在prefix后面生成新字符prefix:生成文本的前綴,即初始輸入字符序列。num_preds:要預測的字符數。net:訓練好的循環神經網絡模型。vocab:詞匯表,包含字符到索引和索引到字符的映射。"""state = net.begin_state(batch_size=1, device=device)outputs = [vocab[prefix[0]]] # outputs:用于存儲生成字符的索引列表。get_input = lambda: torch.tensor([outputs[-1]], device=device).reshape((1, 1))for y in prefix[1:]: # 預熱期,遍歷前綴中的剩余字符(從第二個字符開始)。_, state = net(get_input(), state) # 調用 net 進行前向傳播,更新隱藏狀態 state。outputs.append(vocab[y]) # 將當前字符的索引添加到 outputs 中。for _ in range(num_preds): # 預測num_preds步# 調用 net 進行前向傳播,獲取預測結果 y 和更新后的隱藏狀態 state。y, state = net(get_input(), state)# 使用 y.argmax(dim=1) 獲取預測的字符索引,并將其添加到 outputs 中。outputs.append(int(y.argmax(dim=1).reshape(1)))return ''.join([vocab.idx_to_token[i] for i in outputs])# print(predict_ch8('time traveller ', 10, net, vocab, d2l.try_gpu()))
# time traveller cfjwsthaqc# 梯度裁剪
"""
在訓練深層神經網絡(特別是循環神經網絡)時,梯度爆炸(gradients exploding)問題會導致梯度值變得非常大,
從而導致模型不穩定甚至訓練失敗。為了防止梯度爆炸,可以對梯度進行裁剪,使得梯度的范數不超過某個預設的閾值。
"""
def grad_clipping(net, theta): #@save"""裁剪梯度net:神經網絡模型。theta:梯度裁剪的閾值。"""if isinstance(net, nn.Module):params = [p for p in net.parameters() if p.requires_grad]else:params = net.params# 計算梯度范數, L2 范數norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params))if norm > theta:for param in params:param.grad[:] *= theta / norm# 將每個參數的梯度按比例縮放,使得新的梯度范數等于 theta。# 訓練
def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):"""訓練網絡一個迭代周期(定義見第8章)"""state, timer = None, d2l.Timer()metric = lp.Accumulator(2) # 訓練損失之和,詞元數量for X, Y in train_iter:if state is None or use_random_iter:# 在第一次迭代或使用隨機抽樣時初始化statestate = net.begin_state(batch_size=X.shape[0], device=device)else:if isinstance(net, nn.Module) and not isinstance(state, tuple):# state對于nn.GRU是個張量state.detach_()else:# state對于nn.LSTM或對于我們從零開始實現的模型是個張量for s in state:s.detach_()y = Y.T.reshape(-1)X, y = X.to(device), y.to(device)y_hat, state = net(X, state)l = loss(y_hat, y.long()).mean()if isinstance(updater, torch.optim.Optimizer):updater.zero_grad()l.backward()grad_clipping(net, 1)updater.step()else:l.backward()grad_clipping(net, 1)# 因為已經調用了mean函數updater(batch_size=1)metric.add(l * y.numel(), y.numel())return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()#@save
def train_ch8(net, train_iter, vocab, lr, num_epochs, device,use_random_iter=False):"""訓練模型(定義見第8章)"""loss = nn.CrossEntropyLoss()animator = lp.Animator(xlabel='epoch', ylabel='perplexity',legend=['train'], xlim=[10, num_epochs])# 初始化if isinstance(net, nn.Module):updater = torch.optim.SGD(net.parameters(), lr)else:updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)# 訓練和預測for epoch in range(num_epochs):ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)if (epoch + 1) % 10 == 0:print(predict('time traveller'))animator.add(epoch + 1, [ppl])print(f'困惑度 {ppl:.1f}, {speed:.1f} 詞元/秒 {str(device)}')print(predict('time traveller '))print(predict('traveller '))# 順序抽樣方法
num_epochs, lr = 500, 1
# train_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu())
# plt.show()
"""
困惑度 1.0, 95138.3 詞元/秒 cuda:0
time traveller you can show black is white by argument said filby
traveller you can show black is white by argument said filby
"""# 隨機抽樣方法
net = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params,init_rnn_state, rnn)
train_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(),use_random_iter=True)
plt.show()
"""
困惑度 1.3, 109268.9 詞元/秒 cuda:0
time traveller held in his hand was a glitteringmetallic framewor
traveller held in his hand was a glitteringmetallic framewor
"""
順序抽樣:
隨機抽樣:
55循環神經網絡的簡潔實現
import torch
from torch import nn
from torch.nn import functional as F
from d2l import torch as d2l
import matplotlib.pyplot as plt# 加載時光機器數據集并設置批量大小和序列長度
batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)# 定義RNN模型
num_hiddens = 256
rnn_layer = nn.RNN(len(vocab), num_hiddens)# 用零張量初始化隱藏狀態
state = torch.zeros((1, batch_size, num_hiddens))
# print(state.shape) # torch.Size([1, 32, 256])# X = torch.rand(size=(num_steps, batch_size, len(vocab)))
# Y, state_new = rnn_layer(X, state)
# print(Y.shape, state_new.shape, X.shape)
# torch.Size([35, 32, 256]) torch.Size([1, 32, 256]) torch.Size([35, 32, 28])# 完整的循環神經網絡模型定義了一個RNNModel類
#@save
class RNNModel(nn.Module):"""循環神經網絡模型"""def __init__(self, rnn_layer, vocab_size, **kwargs):super(RNNModel, self).__init__(**kwargs)self.rnn = rnn_layerself.vocab_size = vocab_sizeself.num_hiddens = self.rnn.hidden_size# 如果RNN是雙向的,num_directions應該是2,否則應該是1if not self.rnn.bidirectional:self.num_directions = 1self.linear = nn.Linear(self.num_hiddens, self.vocab_size)else:self.num_directions = 2self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)def forward(self, inputs, state):X = F.one_hot(inputs.T.long(), self.vocab_size)X = X.to(torch.float32)Y, state = self.rnn(X, state)# 全連接層首先將Y的形狀改為(時間步數*批量大小,隱藏單元數)# 它的輸出形狀是(時間步數*批量大小,詞表大小)。output = self.linear(Y.reshape((-1, Y.shape[-1])))return output, statedef begin_state(self, device, batch_size=1):if not isinstance(self.rnn, nn.LSTM):# nn.GRU以張量作為隱狀態return torch.zeros((self.num_directions * self.rnn.num_layers,batch_size, self.num_hiddens),device=device)else:# nn.LSTM以元組作為隱狀態return (torch.zeros((self.num_directions * self.rnn.num_layers,batch_size, self.num_hiddens), device=device),torch.zeros((self.num_directions * self.rnn.num_layers,batch_size, self.num_hiddens), device=device))# 訓練與預測device = d2l.try_gpu()
net = RNNModel(rnn_layer, vocab_size=len(vocab))
net = net.to(device)
num_epochs, lr = 500, 1
d2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)
"""
perplexity 1.3, 236379.1 tokens/sec on cuda:0
time traveller held in his hand was a glitteringmetallic framewo
traveller fith a slan but move anotle bothe thon st stagee
"""
plt.show()
print(d2l.predict_ch8('time traveller', 10, net, vocab, device))
# time traveller held in h