????????前面的博文讀論文:Restormer: Efficient Transformer for High-Resolution Image Restoration?介紹了 Restormer 網絡結構的網絡技術特點,本文用 pytorch 實現其中的主要網絡結構模塊。
1. MDTA(Multi-Dconv Head Transposed Attention:多頭注意力機制
## Multi-DConv Head Transposed Self-Attention (MDTA)
class Attention(nn.Module):def __init__(self, dim, num_heads, bias):super(Attention, self).__init__()self.num_heads = num_heads # 注意力頭的個數self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1)) # 可學習系數# 1*1 升維self.qkv = nn.Conv2d(dim, dim*3, kernel_size=1, bias=bias)# 3*3 分組卷積self.qkv_dwconv = nn.Conv2d(dim*3, dim*3, kernel_size=3, stride=1, padding=1, groups=dim*3, bias=bias)# 1*1 卷積self.project_out = nn.Conv2d(dim, dim, kernel_size=1, bias=bias)def forward(self, x):b,c,h,w = x.shape # 輸入的結構 batch 數,通道數和高寬qkv = self.qkv_dwconv(self.qkv(x))q,k,v = qkv.chunk(3, dim=1) # 第 1 個維度方向切分成 3 塊# 改變 q, k, v 的結構為 b head c (h w),將每個二維 plane 展平q = rearrange(q, 'b (head c) h w -> b head c (h w)', head=self.num_heads)k = rearrange(k, 'b (head c) h w -> b head c (h w)', head=self.num_heads)v = rearrange(v, 'b (head c) h w -> b head c (h w)', head=self.num_heads)q = torch.nn.functional.normalize(q, dim=-1) # C 維度標準化,這里的 C 與通道維度略有不同k = torch.nn.functional.normalize(k, dim=-1)attn = (q @ k.transpose(-2, -1)) * self.temperature # @ 是矩陣乘attn = attn.softmax(dim=-1)out = (attn @ v) # 注意力圖(嚴格來說不算圖)# 將展平后的注意力圖恢復out = rearrange(out, 'b head c (h w) -> b (head c) h w', head=self.num_heads, h=h, w=w)# 真正的注意力圖out = self.project_out(out)return out
2.?GDFN( Gated-Dconv Feed-Forward Network)?
## Gated-Dconv Feed-Forward Network (GDFN)
class FeedForward(nn.Module):def __init__(self, dim, ffn_expansion_factor, bias):super(FeedForward, self).__init__()# 隱藏層特征維度等于輸入維度乘以擴張因子hidden_features = int(dim*ffn_expansion_factor)# 1*1 升維self.project_in = nn.Conv2d(dim, hidden_features*2, kernel_size=1, bias=bias)# 3*3 分組卷積self.dwconv = nn.Conv2d(hidden_features*2, hidden_features*2, kernel_size=3, stride=1, padding=1, groups=hidden_features*2, bias=bias)# 1*1 降維self.project_out = nn.Conv2d(hidden_features, dim, kernel_size=1, bias=bias)def forward(self, x):x = self.project_in(x)x1, x2 = self.dwconv(x).chunk(2, dim=1) # 第 1 個維度方向切分成 2 塊x = F.gelu(x1) * x2 # gelu 相當于 relu+dropoutx = self.project_out(x)return x
3.?TransformerBlock
## 就是標準的 Transformer 架構
class TransformerBlock(nn.Module):def __init__(self, dim, num_heads, ffn_expansion_factor, bias, LayerNorm_type):super(TransformerBlock, self).__init__()self.norm1 = LayerNorm(dim, LayerNorm_type) # 層標準化self.attn = Attention(dim, num_heads, bias) # 自注意力self.norm2 = LayerNorm(dim, LayerNorm_type) # 層表轉化self.ffn = FeedForward(dim, ffn_expansion_factor, bias) # FFNdef forward(self, x):x = x + self.attn(self.norm1(x)) # 殘差x = x + self.ffn(self.norm2(x)) # 殘差return x
4. 測試樣例
model = Restormer()
print(model) # 打印網絡結構x = torch.randn((1, 3, 512, 512)) #隨機生成輸入圖像
x = model(x) # 送入網絡
print(x.shape) # 打印網絡輸入的圖像結構