解析torch官方代碼腳本文件:transformer.py。版本:1.9.1+cu111。
首先查看《Torch中多頭注意力MultiheadAttention
的中文注釋》解析;
最后查看下方transformer解析。
話不多說,看代碼吧!
import copy
from typing import Optional, Anyimport torch
from torch import Tensor
from .. import functional as F
from .module import Module
from .activation import MultiheadAttention
from .container import ModuleList
from ..init import xavier_uniform_
from .dropout import Dropout
from .linear import Linear
from .normalization import LayerNormclass Transformer(Module):r"""這是一個變換器模型,用戶可以根據需要修改其屬性。該架構基于論文《Attention Is All You Need》。該論文由Ashish Vaswani、Noam Shazeer、Niki Parmar、Jakob Uszkoreit、Llion Jones、Aidan N Gomez、Lukasz Kaiser和Illia Polosukhin于2017年發表,在神經信息處理系統進展(Advances in Neural Information Processing Systems)的第6000至6010頁。用戶可以使用對應的參數構建BERT模型(參見https://arxiv.org/abs/1810.04805)。參數如下:d_model:編碼器/解碼器輸入中預期的特征數量(默認為512)。nhead:多頭注意力模型中的頭數(默認為8)。num_encoder_layers:編碼器中子編碼層的數量(默認為6)。num_decoder_layers:解碼器中子解碼層的數量(默認為6)。dim_feedforward:前饋網絡模型的維度(默認為2048)。dropout:丟棄率(默認為0.1)。activation:編碼器/解碼器中間層的激活函數,可選relu或gelu(默認為relu)。custom_encoder:自定義編碼器(默認為None)。custom_decoder:自定義解碼器(默認為None)。layer_norm_eps:層歸一化組件中的eps值(默認為1e-5)。batch_first:如果設為True,則輸入和輸出張量將以(批次,序列,特征)的形式提供。默認值為False(序列,批次,特征)。Examples::>>> transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12)>>> src = torch.rand((10, 32, 512))>>> tgt = torch.rand((20, 32, 512))>>> out = transformer_model(src, tgt)Note: A full example to apply nn.Transformer module for the word language model is available inhttps://github.com/pytorch/examples/tree/master/word_language_model__init__:batch_first: 如果為 True,則輸入和輸出張量將按 (batch, seq, feature) 的順序提供;否則,順序為 (seq, batch, feature)。編碼器和解碼器組件:self.encoder: 如果提供了 custom_encoder,則直接使用這個自定義編碼器;否則,創建一個標準的 TransformerEncoder 實例,它由多個 TransformerEncoderLayer 層堆疊構成。self.decoder: 同樣地,如果提供了 custom_decoder,則直接使用;否則,創建一個標準的 TransformerDecoder 實例,由多個 TransformerDecoderLayer 層構成。"""def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6,num_decoder_layers: int = 6, dim_feedforward: int = 2048, dropout: float = 0.1,activation: str = "relu", custom_encoder: Optional[Any] = None, custom_decoder: Optional[Any] = None,layer_norm_eps: float = 1e-5, batch_first: bool = False,device=None, dtype=None) -> None:factory_kwargs = {'device': device, 'dtype': dtype}super(Transformer, self).__init__()if custom_encoder is not None:self.encoder = custom_encoderelse:encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout,activation, layer_norm_eps, batch_first,**factory_kwargs)encoder_norm = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)if custom_decoder is not None:self.decoder = custom_decoderelse:decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout,activation, layer_norm_eps, batch_first,**factory_kwargs)decoder_norm = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)self._reset_parameters()self.d_model = d_modelself.nhead = nheadself.batch_first = batch_firstdef forward(self, src: Tensor, tgt: Tensor, src_mask: Optional[Tensor] = None, tgt_mask: Optional[Tensor] = None,memory_mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None,tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None) -> Tensor:r"""接受并處理帶有掩碼的源序列和目標序列。參數:src: 輸入到編碼器的序列(必需)。tgt: 輸入到解碼器的序列(必需)。src_mask: 源序列的加性掩碼(可選)。tgt_mask: 目標序列的加性掩碼(可選)。memory_mask: 編碼器輸出的加性掩碼(可選)。src_key_padding_mask: 每批源鍵的ByteTensor掩碼(可選)。tgt_key_padding_mask: 每批目標鍵的ByteTensor掩碼(可選)。memory_key_padding_mask: 每批記憶鍵的ByteTensor掩碼(可選)。形狀:- src: :math:`(S, N, E)`,如果`batch_first`為真,則為`(N, S, E)`。- tgt: :math:`(T, N, E)`,如果`batch_first`為真,則為`(N, T, E)`。- src_mask: :math:`(S, S)`。- tgt_mask: :math:`(T, T)`。- memory_mask: :math:`(T, S)`。- src_key_padding_mask: :math:`(N, S)`。- tgt_key_padding_mask: :math:`(N, T)`。- memory_key_padding_mask: :math:`(N, S)`。注意: [src/tgt/memory]_mask確保位置i能訪問未被掩碼的位置。如果提供的是ByteTensor,非零位置不允許訪問,而零位置保持不變。如果提供的是BoolTensor,`True`的位置不允許訪問,而`False`值將保持不變。如果提供的是FloatTensor,它將被添加到注意力權重中。[src/tgt/memory]_key_padding_mask提供了在鍵中應被注意力忽略的指定元素。如果提供的是ByteTensor,非零位置將被忽略,而零位置保持不變。如果提供的是BoolTensor,值為`True`的位置將被忽略,而值為`False`的位置將保持不變。- 輸出: :math:`(T, N, E)`,如果`batch_first`為真,則為`(N, T, E)`。注意: 由于變換器模型中的多頭注意力架構,變換器的輸出序列長度與輸入序列(即解碼的目標)的長度相同。其中S是源序列長度,T是目標序列長度,N是批次大小,E是特征數量。Examples:>>> output = transformer_model(src, tgt, src_mask=src_mask, tgt_mask=tgt_mask)"""if not self.batch_first and src.size(1) != tgt.size(1):raise RuntimeError("the batch number of src and tgt must be equal")elif self.batch_first and src.size(0) != tgt.size(0):raise RuntimeError("the batch number of src and tgt must be equal")if src.size(2) != self.d_model or tgt.size(2) != self.d_model:raise RuntimeError("the feature number of src and tgt must be equal to d_model")memory = self.encoder(src, mask=src_mask, src_key_padding_mask=src_key_padding_mask)output = self.decoder(tgt, memory, tgt_mask=tgt_mask, memory_mask=memory_mask,tgt_key_padding_mask=tgt_key_padding_mask,memory_key_padding_mask=memory_key_padding_mask)return outputdef generate_square_subsequent_mask(self, sz: int) -> Tensor:r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').Unmasked positions are filled with float(0.0)."""mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))return maskdef _reset_parameters(self):r"""Initiate parameters in the transformer model."""for p in self.parameters():if p.dim() > 1:xavier_uniform_(p)class TransformerEncoder(Module):r"""TransformerEncoder is a stack of N encoder layers參數:- encoder_layer: TransformerEncoderLayer類的一個實例(必需)。- num_layers: 編碼器中子編碼器層的數量(必需)。- norm: 層歸一化組件(可選)。__init__:encoder_layer: 這是單個編碼器層的實例,通常由 TransformerEncoderLayer 構造。num_layers: 表示要堆疊的編碼器層數量。norm: 可選參數,用于指定層歸一化組件。在初始化過程中,通過 _get_clones 函數復制 encoder_layer 指定次數來創建編碼器層的列表,這個列表存儲在 self.layers 中。self.num_layers 存儲了編碼器層數量,而 self.norm 則保存了提供的層歸一化組件(如果有)Examples::>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)>>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)>>> src = torch.rand(10, 32, 512)>>> out = transformer_encoder(src)"""__constants__ = ['norm']def __init__(self, encoder_layer, num_layers, norm=None):super(TransformerEncoder, self).__init__()self.layers = _get_clones(encoder_layer, num_layers)self.num_layers = num_layersself.norm = normdef forward(self, src: Tensor, mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None) -> Tensor:r"""依次通過編碼器層傳遞輸入.參數:Src:給編碼器的序列(必需的)。Mask: SRC序列的掩碼(可選)。Src_key_padding_mask:每批SRC鍵的掩碼(可選)。形狀:請參閱Transformer類中的文檔。流程:src: 輸入序列,是必須提供的參數。mask: 可選參數,用于輸入序列的掩碼。src_key_padding_mask: 可選參數,用于每批次源鍵的掩碼。在前向傳播中,數據 src 依次通過每一個編碼器層,其中可以使用 mask 和 src_key_padding_mask 來控制哪些部分需要被忽略。最后,如果提供了層歸一化組件 norm,那么整個編碼器的輸出將會通過該組件進行歸一化處理"""output = srcfor mod in self.layers:output = mod(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask)if self.norm is not None:output = self.norm(output)return outputclass TransformerDecoder(Module):r"""TransformerDecoder 是由 N 個解碼器層堆疊而成的模塊參數:decoder_layer: 必須提供的 TransformerDecoderLayer() 類的一個實例。這定義了單個解碼器層的行為和結構。num_layers: 解碼器中包含的子解碼器層數量,這是一個必需的參數,決定了解碼器的深度。norm: 層歸一化組件,這是一個可選參數,用于在解碼器所有層的輸出之后執行歸一化操作,有助于穩定訓練過程并加速收斂。Examples::>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)>>> transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)>>> memory = torch.rand(10, 32, 512)>>> tgt = torch.rand(20, 32, 512)>>> out = transformer_decoder(tgt, memory)"""__constants__ = ['norm']def __init__(self, decoder_layer, num_layers, norm=None):super(TransformerDecoder, self).__init__()self.layers = _get_clones(decoder_layer, num_layers)self.num_layers = num_layersself.norm = normdef forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None,memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None,memory_key_padding_mask: Optional[Tensor] = None) -> Tensor:r"""依次通過解碼器層傳遞輸入(和掩碼)。參數:Tgt:到解碼器的序列(必需)。存儲器:來自編碼器最后一層的序列(必需)。Tgt_mask: TGT序列的掩碼(可選)。Memory_mask:內存序列的掩碼(可選)。Tgt_key_padding_mask:每批TGT鍵的掩碼(可選)。Memory_key_padding_mask:每批內存鍵的掩碼(可選)。形狀:請參閱Transformer類中的文檔。"""output = tgtfor mod in self.layers:output = mod(output, memory, tgt_mask=tgt_mask,memory_mask=memory_mask,tgt_key_padding_mask=tgt_key_padding_mask,memory_key_padding_mask=memory_key_padding_mask)if self.norm is not None:output = self.norm(output)return outputclass TransformerEncoderLayer(Module):r"""`TransformerEncoderLayer` 由自注意力(self-attn)和前饋網絡(feedforward network)組成。這個標準的編碼器層基于論文 "Attention Is All You Need"。Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,Lukasz Kaiser, 和 Illia Polosukhin。2017年。注意力就是你所需要的。在《神經信息處理系統進展》期刊中,第6000至6010頁。用戶在應用過程中可以對其進行修改或以不同方式實現。參數:d_model: 輸入中預期特征的數量(必需)。nhead: 多頭注意力模型中的頭數(必需)。dim_feedforward: 前饋網絡模型的維度(默認=2048)。dropout: dropout的值(默認=0.1)。activation: 中間層的激活函數,可選relu或gelu(默認=relu)。layer_norm_eps: 層歸一化組件中的eps值(默認=1e-5)。batch_first: 如果設為`True`,則輸入和輸出張量將按照(batch, seq, feature)的形式提供。默認:`False`。Examples::>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)>>> src = torch.rand(10, 32, 512)>>> out = encoder_layer(src)Alternatively, when ``batch_first`` is ``True``:>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=True)>>> src = torch.rand(32, 10, 512)>>> out = encoder_layer(src)__init__:self_attn: 使用 MultiheadAttention 實現的自注意力機制。linear1, linear2: 用于前饋網絡的線性層。dropout: 用于前饋網絡中的Dropout操作。norm1, norm2: 層歸一化層,用于自注意力和前饋網絡之后。dropout1, dropout2: 用于殘差連接后的Dropout操作。activation: 激活函數,根據傳入的activation參數選擇。"""__constants__ = ['batch_first']def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu",layer_norm_eps=1e-5, batch_first=False,device=None, dtype=None) -> None:factory_kwargs = {'device': device, 'dtype': dtype}super(TransformerEncoderLayer, self).__init__()self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,**factory_kwargs)# Implementation of Feedforward modelself.linear1 = Linear(d_model, dim_feedforward, **factory_kwargs)self.dropout = Dropout(dropout)self.linear2 = Linear(dim_feedforward, d_model, **factory_kwargs)self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)self.dropout1 = Dropout(dropout)self.dropout2 = Dropout(dropout)self.activation = _get_activation_fn(activation)def __setstate__(self, state):if 'activation' not in state:state['activation'] = F.relusuper(TransformerEncoderLayer, self).__setstate__(state)def forward(self, src: Tensor, src_mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None) -> Tensor:r"""Pass the input through the encoder layer.Args:src: the sequence to the encoder layer (required).src_mask: the mask for the src sequence (optional).src_key_padding_mask: the mask for the src keys per batch (optional).Shape:see the docs in Transformer class.forword flow:首先通過自注意力層(self_attn)處理輸入src,得到src2。將src與經過Dropout操作的src2相加,然后通過層歸一化(norm1)。接著,src通過前饋網絡,依次經過linear1、激活函數、dropout、linear2、dropout2,再與src相加,最后通過層歸一化(norm2)。"""src2 = self.self_attn(src, src, src, attn_mask=src_mask,key_padding_mask=src_key_padding_mask)[0]src = src + self.dropout1(src2)src = self.norm1(src)src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))src = src + self.dropout2(src2)src = self.norm2(src)return srcclass TransformerDecoderLayer(Module):r"""`TransformerDecoderLayer` 由自注意力(self-attn)、多頭注意力(multi-head-attn)和前饋網絡(feedforward network)組成。這個標準的解碼器層基于論文 "Attention Is All You Need"。Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,Lukasz Kaiser, 和 Illia Polosukhin。2017年。注意力就是你所需要的一切。在《神經信息處理系統進展》期刊中,第6000至6010頁。用戶在應用過程中可以對其進行修改或以不同方式實現。參數:d_model: 輸入中預期特征的數量(必需)。nhead: 多頭注意力模型中的頭數(必需)。dim_feedforward: 前饋網絡模型的維度(默認=2048)。dropout: dropout的值(默認=0.1)。activation: 中間層的激活函數,可選relu或gelu(默認=relu)。layer_norm_eps: 層歸一化組件中的eps值(默認=1e-5)。batch_first: 如果設為`True`,則輸入和輸出張量將按照(batch, seq, feature)的形式提供。默認:`False`。Examples::>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)>>> memory = torch.rand(10, 32, 512)>>> tgt = torch.rand(20, 32, 512)>>> out = decoder_layer(tgt, memory)Alternatively, when ``batch_first`` is ``True``:>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=True)>>> memory = torch.rand(32, 10, 512)>>> tgt = torch.rand(32, 20, 512)>>> out = decoder_layer(tgt, memory)__init__:self_attn: 自注意力機制,用于處理目標序列內部的關系。multihead_attn: 多頭注意力機制,用于處理目標序列和記憶序列之間的關系。linear1, linear2: 前饋網絡的兩個線性層。dropout: 用于前饋網絡中的Dropout操作。norm1, norm2, norm3: 三個層歸一化層,分別應用于自注意力、多頭注意力和前饋網絡之后。dropout1, dropout2, dropout3: 用于殘差連接后的Dropout操作。activation: 激活函數,根據activation參數確定。"""__constants__ = ['batch_first']def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu",layer_norm_eps=1e-5, batch_first=False, device=None, dtype=None) -> None:factory_kwargs = {'device': device, 'dtype': dtype}super(TransformerDecoderLayer, self).__init__()self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,**factory_kwargs)self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,**factory_kwargs)# Implementation of Feedforward modelself.linear1 = Linear(d_model, dim_feedforward, **factory_kwargs)self.dropout = Dropout(dropout)self.linear2 = Linear(dim_feedforward, d_model, **factory_kwargs)self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)self.norm3 = LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs)self.dropout1 = Dropout(dropout)self.dropout2 = Dropout(dropout)self.dropout3 = Dropout(dropout)self.activation = _get_activation_fn(activation)def __setstate__(self, state):if 'activation' not in state:state['activation'] = F.relusuper(TransformerDecoderLayer, self).__setstate__(state)def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None,tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None) -> Tensor:r"""使輸入(及掩碼)通過解碼器層進行處理。參數:tgt: 需要送入解碼器層的序列(必需)。memory: 來自編碼器最后一層的序列(必需)。tgt_mask: 對于目標序列的掩碼(可選)。memory_mask: 對于記憶序列的掩碼(可選)。tgt_key_padding_mask: 每個批次的目標序列鍵的掩碼(可選)。memory_key_padding_mask: 每個批次的記憶序列鍵的掩碼(可選)。形狀:請參閱Transformer類中的文檔。流程:首先,目標序列通過自注意力機制處理,結果與原目標序列殘差連接后進行層歸一化。然后,處理后的目標序列與記憶序列通過多頭注意力機制交互,結果與目標序列殘差連接后進行層歸一化。最后,目標序列通過前饋神經網絡,包括線性層、激活函數、Dropout和另一個線性層,處理后的結果與目標序列殘差連接后進行層歸一化。"""tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask,key_padding_mask=tgt_key_padding_mask)[0]tgt = tgt + self.dropout1(tgt2)tgt = self.norm1(tgt)tgt2 = self.multihead_attn(tgt, memory, memory, attn_mask=memory_mask,key_padding_mask=memory_key_padding_mask)[0]tgt = tgt + self.dropout2(tgt2)tgt = self.norm2(tgt)tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))tgt = tgt + self.dropout3(tgt2)tgt = self.norm3(tgt)return tgtdef _get_clones(module, N):return ModuleList([copy.deepcopy(module) for i in range(N)])def _get_activation_fn(activation):if activation == "relu":return F.reluelif activation == "gelu":return F.geluraise RuntimeError("activation should be relu/gelu, not {}".format(activation))