-
Notifications
You must be signed in to change notification settings - Fork 135
/
net.py
129 lines (103 loc) · 3.78 KB
/
net.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
from pathlib import Path
import torch
import torch.nn as nn
from timm.models.efficientnet import mobilenetv2_100
from timm.models.efficientnet_builder import efficientnet_init_weights
from mylib.pytorch_lightning.base_module import load_pretrained_dict
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1, norm_layer=None):
padding = (kernel_size - 1) // 2
if norm_layer is None:
norm_layer = nn.BatchNorm2d
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
norm_layer(out_planes),
nn.ReLU6(inplace=True),
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, norm_layer=None):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
if norm_layer is None:
norm_layer = nn.BatchNorm2d
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer))
layers.extend([
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim, norm_layer=norm_layer),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
norm_layer(oup),
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class UpSampleBlock(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
):
super(UpSampleBlock, self).__init__()
self.dconv = nn.ConvTranspose2d(in_channels, out_channels, 4, padding=1, stride=2)
self.invres = InvertedResidual(out_channels * 2, out_channels, 1, 6)
def forward(self, x0, x1):
x = torch.cat([
x0,
self.dconv(x1)
], dim=1)
x = self.invres(x)
return x
class MobileNetV2_unet(nn.Module):
def __init__(self, **kwargs):
super(MobileNetV2_unet, self).__init__()
self.backbone = mobilenetv2_100(pretrained=True, **kwargs)
self.up_sample_blocks = nn.ModuleList([
UpSampleBlock(1280, 96),
UpSampleBlock(96, 32),
UpSampleBlock(32, 24),
UpSampleBlock(24, 16),
])
self.conv_last = nn.Sequential(
nn.Conv2d(16, 3, 1),
nn.Conv2d(3, 1, 1),
nn.Sigmoid(),
)
del self.backbone.bn2, self.backbone.act2, self.backbone.global_pool, self.backbone.classifier
efficientnet_init_weights(self.up_sample_blocks)
efficientnet_init_weights(self.conv_last)
def forward(self, x):
x = self.backbone.conv_stem(x)
x = self.backbone.bn1(x)
x = self.backbone.act1(x)
down_feats = []
for b in self.backbone.blocks:
x = b(x)
if x.shape[1] in [16, 24, 32, 96]:
down_feats.append(x)
x = self.backbone.conv_head(x)
for (f, b) in zip(reversed(down_feats), self.up_sample_blocks):
x = b(f, x)
x = self.conv_last(x)
return x
def load_trained_model(ckpt_path: Path) -> MobileNetV2_unet:
state_dict = load_pretrained_dict(ckpt_path)
model = MobileNetV2_unet()
model.load_state_dict(state_dict)
return model
# %%
if __name__ == '__main__':
# %%
model = MobileNetV2_unet()
inputs = torch.randn((2, 3, 224, 224))
out = model(inputs)
print(model)
print(out.shape)