-
Notifications
You must be signed in to change notification settings - Fork 2
/
selective_kernel.py
52 lines (35 loc) · 1.36 KB
/
selective_kernel.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import torch
from torch import nn
from .vanilla_conv2d import Conv2d1x1, Conv2d1x1Block
from .depthwise_separable_conv2d import DepthwiseBlock
from .channel import Combine
from ..functional import make_divisible
class SelectiveKernelBlock(nn.Module):
r"""
Paper: Selective Kernel Networks, https://arxiv.org/abs/1903.06586
"""
def __init__(
self,
in_channels,
rd_ratio: float = 1/8,
rd_divisor: int = 8,
) -> None:
super().__init__()
self.in_channels = in_channels
rd_channels = max(make_divisible(in_channels * rd_ratio, rd_divisor), 32)
self.conv3x3 = DepthwiseBlock(in_channels, in_channels, kernel_size=3, dilation=1)
self.conv5x5 = DepthwiseBlock(in_channels, in_channels, kernel_size=3, dilation=2)
self.fuse = Combine('ADD')
self.pool = nn.AdaptiveAvgPool2d((1, 1))
self.reduce = Conv2d1x1Block(in_channels, rd_channels)
self.qk = Conv2d1x1(rd_channels, in_channels * 2, bias=True)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
u3 = self.conv3x3(x)
u5 = self.conv5x5(x)
u = self.fuse([u3, u5])
s = self.pool(u)
z = self.reduce(s)
ab = self.softmax(self.qk(z).view(-1, 2, self.in_channels, 1, 1))
v = torch.sum(torch.stack([u3, u5], dim=1) * ab, dim=1)
return v