模塊出處
[TITS 23] [link] [code] Lightweight Real-Time Semantic Segmentation Network With Efficient Transformer and CNN
模塊名稱
Lightweight Dilated Bottleneck (LDB)
模塊作用
改進的編碼器塊
模塊結構
模塊代碼
import torch
import torch.nn as nn
import torch.nn.functional as Fclass Conv(nn.Module):def __init__(self, nIn, nOut, kSize, stride, padding, dilation=(1, 1), groups=1, bn_acti=False, bias=False):super().__init__()self.bn_acti = bn_actiself.conv = nn.Conv2d(nIn, nOut, kernel_size=kSize,stride=stride, padding=padding,dilation=dilation, groups=groups, bias=bias)if self.bn_acti:self.bn_prelu = BNPReLU(nOut)def forward(self, input):output = self.conv(input)if self.bn_acti:output = self.bn_prelu(output)return outputclass BNPReLU(nn.Module):def __init__(self, nIn):super().__init__()self.bn = nn.BatchNorm2d(nIn, eps=1e-3)self.acti = nn.PReLU(nIn)def forward(self, input):output = self.bn(input)output = self.acti(output)return outputclass ShuffleBlock(nn.Module):def __init__(self, groups):super(ShuffleBlock, self).__init__()self.groups = groupsdef forward(self, x):'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''N, C, H, W = x.size()g = self.groups#return x.view(N, g, int(C / g), H, W).permute(0, 2, 1, 3, 4).contiguous().view(N, C, H, W)class eca_layer(nn.Module):"""Constructs a ECA module.Args:channel: Number of channels of the input feature mapk_size: Adaptive selection of kernel size"""def __init__(self, channel, k_size=3):super(eca_layer, self).__init__()self.avg_pool = nn.AdaptiveAvgPool2d(1)self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)self.sigmoid = nn.Sigmoid()def forward(self, x):b, c, h, w = x.size()# feature descriptor on the global spatial informationy = self.avg_pool(x)# Two different branches of ECA moduley = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)# Multi-scale information fusiony = self.sigmoid(y)return x * y.expand_as(x)class LDB(nn.Module):def __init__(self, nIn, d=1, kSize=3, dkSize=3):super().__init__()self.bn_relu_1 = BNPReLU(nIn)self.conv1x1_in = Conv(nIn, nIn // 2, 1, 1, padding=0, bn_acti=False)self.conv3x1 = Conv(nIn // 2, nIn // 2, (kSize, 1), 1, padding=(1, 0), bn_acti=True)self.conv1x3 = Conv(nIn // 2, nIn // 2, (1, kSize), 1, padding=(0, 1), bn_acti=True)self.dconv3x1 = Conv(nIn // 2, nIn // 2, (dkSize, 1), 1, padding=(1, 0), groups=nIn // 2, bn_acti=True)self.dconv1x3 = Conv(nIn // 2, nIn // 2, (1, dkSize), 1, padding=(0, 1), groups=nIn // 2, bn_acti=True)self.ca11 = eca_layer(nIn // 2)self.ddconv3x1 = Conv(nIn // 2, nIn // 2, (dkSize, 1), 1, padding=(1 * d, 0), dilation=(d, 1), groups=nIn // 2, bn_acti=True)self.ddconv1x3 = Conv(nIn // 2, nIn // 2, (1, dkSize), 1, padding=(0, 1 * d), dilation=(1, d), groups=nIn // 2, bn_acti=True)self.ca22 = eca_layer(nIn // 2)self.bn_relu_2 = BNPReLU(nIn // 2)self.conv1x1 = Conv(nIn // 2, nIn, 1, 1, padding=0, bn_acti=False)self.shuffle = ShuffleBlock(nIn // 2)def forward(self, input):output = self.bn_relu_1(input)output = self.conv1x1_in(output)output = self.conv3x1(output)output = self.conv1x3(output)br1 = self.dconv3x1(output)br1 = self.dconv1x3(br1)br1 = self.ca11(br1)br2 = self.ddconv3x1(output)br2 = self.ddconv1x3(br2)br2 = self.ca22(br2)output = br1 + br2 + outputoutput = self.bn_relu_2(output)output = self.conv1x1(output)output = self.shuffle(output + input)return outputif __name__ == '__main__':x = torch.randn([3, 256, 32, 32])ldb = LDB(nIn=256)out = ldb(x)print(out.shape) # 3, 256, 32, 32
原文表述
LDB的結構整體上借鑒了ResNet的思想,將模塊設計為殘差模塊,以在網絡層數盡可能少的情況下收集更多的特征信息。具體來說,在bottleneck處,通過1×1卷積將輸入特征的通道數減半,減少通道數之后,參數量和計算量大大減少,雖然這樣會損失一部分準確率,但此時多堆疊兩個模塊比彌補損失更為有利。同時,由于使用了1×1卷積,必須加深網絡深度才能獲得更大的感受野,因此在1×1卷積之后,增加了3×1和1×3的分解卷積,以拓展感受野,從而捕捉更大范圍的上下文信息。而且分解卷積也是基于考慮參數數量和計算量。同樣,在接下來的雙分支結構中,兩個分支也都使用了分解卷積,其中一個負責局部、短距離的特征信息,另一個則使用了空洞卷積,負責在不同空洞率下從更大的感受野中提取特征信息。緊接著這兩個分支的是通道注意力機制,其靈感來自ECANet,旨在在通道維度上構建注意力矩陣,以增強特征表達,抑制噪聲干擾,因為對于CNN來說,大部分特征信息都包含在通道中。然后,將兩個低維分支和中間特征融合,輸入到下面的1×1逐點卷積中,以將特征圖的通道數恢復為與輸入特征圖的通道數相同。最后,采用channel shuffle的策略,避免depth-wise convolution帶來的信息獨立、通道間無相關性的弊端,促進不同通道間語義信息的交換。