huqiming513
commited on
Commit
•
03b684c
1
Parent(s):
e538b68
Upload 7 files
Browse files- models/__init__.py +2 -0
- models/losses.py +279 -0
- models/lr_scheduler.py +20 -0
- models/metrics.py +63 -0
- models/networks/__init__.py +1 -0
- models/networks/modules.py +144 -0
- models/networks/networks.py +65 -0
models/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
from .networks import *
|
2 |
+
|
models/losses.py
ADDED
@@ -0,0 +1,279 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
from pytorch_msssim import SSIM, MS_SSIM
|
4 |
+
from torch.nn import L1Loss, MSELoss
|
5 |
+
from torchvision.models import vgg16
|
6 |
+
import torch.nn.functional as F
|
7 |
+
|
8 |
+
|
9 |
+
def compute_gradient(img):
|
10 |
+
gradx = img[..., 1:, :] - img[..., :-1, :]
|
11 |
+
grady = img[..., 1:] - img[..., :-1]
|
12 |
+
return gradx, grady
|
13 |
+
|
14 |
+
|
15 |
+
class GradientLoss(nn.Module):
|
16 |
+
def __init__(self):
|
17 |
+
super(GradientLoss, self).__init__()
|
18 |
+
self.loss = nn.L1Loss()
|
19 |
+
|
20 |
+
def forward(self, predict, target):
|
21 |
+
predict_gradx, predict_grady = compute_gradient(predict)
|
22 |
+
target_gradx, target_grady = compute_gradient(target)
|
23 |
+
|
24 |
+
return self.loss(predict_gradx, target_gradx) + self.loss(predict_grady, target_grady)
|
25 |
+
|
26 |
+
|
27 |
+
class SSIMLoss(nn.Module):
|
28 |
+
def __init__(self, channels):
|
29 |
+
super(SSIMLoss, self).__init__()
|
30 |
+
self.ssim = SSIM(data_range=1., size_average=True, channel=channels)
|
31 |
+
|
32 |
+
def forward(self, output, target):
|
33 |
+
ssim_loss = 1 - self.ssim(output, target)
|
34 |
+
return ssim_loss
|
35 |
+
|
36 |
+
|
37 |
+
class SSIML1Loss(nn.Module):
|
38 |
+
def __init__(self, channels):
|
39 |
+
super(SSIML1Loss, self).__init__()
|
40 |
+
self.l1_loss_func = nn.L1Loss()
|
41 |
+
self.ssim = SSIM(data_range=1., size_average=True, channel=channels)
|
42 |
+
self.alpha = 1.4
|
43 |
+
|
44 |
+
def forward(self, output, target):
|
45 |
+
l1_loss = self.l1_loss_func(output, target)
|
46 |
+
ssim_loss = 1 - self.ssim(output, target)
|
47 |
+
total_loss = l1_loss + self.alpha * ssim_loss
|
48 |
+
return total_loss
|
49 |
+
|
50 |
+
|
51 |
+
class GradSSIML1Loss(nn.Module):
|
52 |
+
def __init__(self, channels):
|
53 |
+
super(GradSSIML1Loss, self).__init__()
|
54 |
+
self.l1_loss_func = nn.L1Loss()
|
55 |
+
self.ssim = SSIM(data_range=1., size_average=True, channel=channels)
|
56 |
+
self.grad_loss_func = GradientLoss()
|
57 |
+
self.alpha = 1.4
|
58 |
+
|
59 |
+
def forward(self, output, target):
|
60 |
+
l1_loss = self.l1_loss_func(output, target)
|
61 |
+
ssim_loss = 1 - self.ssim(output, target)
|
62 |
+
grad_loss = self.grad_loss_func(output, target)
|
63 |
+
total_loss = l1_loss + self.alpha * ssim_loss + 0.2 * grad_loss
|
64 |
+
return total_loss
|
65 |
+
|
66 |
+
|
67 |
+
class SSIML2Loss(nn.Module):
|
68 |
+
def __init__(self, channels):
|
69 |
+
super(SSIML2Loss, self).__init__()
|
70 |
+
self.l2_loss_func = nn.MSELoss()
|
71 |
+
self.ssim = SSIM(data_range=1., size_average=True, channel=channels)
|
72 |
+
self.alpha = 1.
|
73 |
+
|
74 |
+
def forward(self, output, target):
|
75 |
+
l2_loss = self.l2_loss_func(output, target)
|
76 |
+
ssim_loss = 1 - self.ssim(output, target)
|
77 |
+
total_loss = l2_loss + self.alpha * ssim_loss
|
78 |
+
return total_loss
|
79 |
+
|
80 |
+
|
81 |
+
class MSSSIML1Loss(nn.Module):
|
82 |
+
def __init__(self, channels):
|
83 |
+
super(MSSSIML1Loss, self).__init__()
|
84 |
+
self.l1_loss_func = nn.L1Loss()
|
85 |
+
self.ms_ssim = MS_SSIM(data_range=1., size_average=True, channel=channels)
|
86 |
+
self.alpha = 1.0
|
87 |
+
|
88 |
+
def forward(self, output, target):
|
89 |
+
ms_ssim_loss = 1 - self.ms_ssim(output, target)
|
90 |
+
l1_loss = self.l1_loss_func(output, target)
|
91 |
+
total_loss = l1_loss + self.alpha * ms_ssim_loss
|
92 |
+
return total_loss
|
93 |
+
|
94 |
+
|
95 |
+
class MSSSIML2Loss(nn.Module):
|
96 |
+
def __init__(self, channels):
|
97 |
+
super(MSSSIML2Loss, self).__init__()
|
98 |
+
self.l2_loss_func = nn.MSELoss()
|
99 |
+
self.ms_ssim = MS_SSIM(data_range=1., size_average=True, channel=channels)
|
100 |
+
# self.alpha = 0.84
|
101 |
+
self.alpha = 1.2
|
102 |
+
|
103 |
+
def forward(self, output, target):
|
104 |
+
l2_loss = self.l2_loss_func(output, target)
|
105 |
+
ms_ssim_loss = 1 - self.ms_ssim(output, target)
|
106 |
+
total_loss = l2_loss + self.alpha * ms_ssim_loss
|
107 |
+
return total_loss
|
108 |
+
|
109 |
+
|
110 |
+
class PerLoss(torch.nn.Module):
|
111 |
+
def __init__(self):
|
112 |
+
super(PerLoss, self).__init__()
|
113 |
+
vgg_model = vgg16(pretrained=True).features[:16]
|
114 |
+
vgg_model = vgg_model.to('cuda')
|
115 |
+
for param in vgg_model.parameters():
|
116 |
+
param.requires_grad = False
|
117 |
+
|
118 |
+
self.vgg_layers = vgg_model
|
119 |
+
|
120 |
+
self.layer_name_mapping = {
|
121 |
+
'3': "relu1_2",
|
122 |
+
'8': "relu2_2",
|
123 |
+
'15': "relu3_3"
|
124 |
+
}
|
125 |
+
|
126 |
+
def output_features(self, x):
|
127 |
+
output = {}
|
128 |
+
for name, module in self.vgg_layers._modules.items():
|
129 |
+
x = module(x)
|
130 |
+
if name in self.layer_name_mapping:
|
131 |
+
output[self.layer_name_mapping[name]] = x
|
132 |
+
return list(output.values())
|
133 |
+
|
134 |
+
def forward(self, data, gt):
|
135 |
+
loss = []
|
136 |
+
if data.shape[1] == 1:
|
137 |
+
data = data.repeat(1, 3, 1, 1)
|
138 |
+
gt = gt.repeat(1, 3, 1, 1)
|
139 |
+
|
140 |
+
dehaze_features = self.output_features(data)
|
141 |
+
gt_features = self.output_features(gt)
|
142 |
+
for dehaze_feature, gt_feature in zip(dehaze_features, gt_features):
|
143 |
+
loss.append(F.mse_loss(dehaze_feature, gt_feature))
|
144 |
+
|
145 |
+
return sum(loss) / len(loss)
|
146 |
+
|
147 |
+
|
148 |
+
class PerL1Loss(torch.nn.Module):
|
149 |
+
def __init__(self):
|
150 |
+
super(PerL1Loss, self).__init__()
|
151 |
+
self.l1_loss_func = nn.L1Loss()
|
152 |
+
self.per_loss_func = PerLoss().to('cuda')
|
153 |
+
|
154 |
+
def forward(self, output, target):
|
155 |
+
l1_loss = self.l1_loss_func(output, target)
|
156 |
+
per_loss = self.per_loss_func(output, target)
|
157 |
+
# total_loss = l1_loss + 0.04 * per_loss
|
158 |
+
total_loss = l1_loss + 0.2 * per_loss
|
159 |
+
return total_loss
|
160 |
+
|
161 |
+
|
162 |
+
class MSPerL1Loss(torch.nn.Module):
|
163 |
+
def __init__(self, channels):
|
164 |
+
super(MSPerL1Loss, self).__init__()
|
165 |
+
self.l1_loss_func = nn.L1Loss()
|
166 |
+
self.ms_ssim = MS_SSIM(data_range=1., size_average=True, channel=channels)
|
167 |
+
self.per_loss_func = PerLoss().to('cuda')
|
168 |
+
|
169 |
+
def forward(self, output, target):
|
170 |
+
ms_ssim_loss = 1 - self.ms_ssim(output, target)
|
171 |
+
l1_loss = self.l1_loss_func(output, target)
|
172 |
+
per_loss = self.per_loss_func(output, target)
|
173 |
+
total_loss = l1_loss + 1.2 * ms_ssim_loss + 0.04 * per_loss
|
174 |
+
return total_loss
|
175 |
+
|
176 |
+
|
177 |
+
class MSPerL2Loss(torch.nn.Module):
|
178 |
+
def __init__(self):
|
179 |
+
super(MSPerL2Loss, self).__init__()
|
180 |
+
self.l2_loss_func = nn.MSELoss()
|
181 |
+
self.ms_ssim = MS_SSIM(data_range=1., size_average=True, channel=3)
|
182 |
+
self.per_loss_func = PerLoss().to('cuda')
|
183 |
+
|
184 |
+
def forward(self, output, target):
|
185 |
+
ms_ssim_loss = 1 - self.ms_ssim(output, target)
|
186 |
+
l2_loss = self.l2_loss_func(output, target)
|
187 |
+
per_loss = self.per_loss_func(output, target)
|
188 |
+
total_loss = l2_loss + 0.16 * ms_ssim_loss + 0.2 * per_loss
|
189 |
+
return total_loss
|
190 |
+
|
191 |
+
|
192 |
+
class TVLoss(torch.nn.Module):
|
193 |
+
def __init__(self):
|
194 |
+
super(TVLoss, self).__init__()
|
195 |
+
|
196 |
+
def forward(self, data):
|
197 |
+
w_variance = torch.sum(torch.pow(data[:, :, :, :-1] - data[:, :, :, 1:], 2))
|
198 |
+
h_variance = torch.sum(torch.pow(data[:, :, :-1, :] - data[:, :, 1:, :], 2))
|
199 |
+
|
200 |
+
count_h = self._tensor_size(data[:, :, 1:, :])
|
201 |
+
count_w = self._tensor_size(data[:, :, :, 1:])
|
202 |
+
|
203 |
+
tv_loss = h_variance / count_h + w_variance / count_w
|
204 |
+
return tv_loss
|
205 |
+
|
206 |
+
def _tensor_size(self, t):
|
207 |
+
return t.size()[1] * t.size()[2] * t.size()[3]
|
208 |
+
|
209 |
+
|
210 |
+
def safe_div(a, b, eps=1e-2):
|
211 |
+
return a / torch.clamp_min(b, eps)
|
212 |
+
|
213 |
+
|
214 |
+
class WTVLoss(torch.nn.Module):
|
215 |
+
def __init__(self):
|
216 |
+
super(WTVLoss, self).__init__()
|
217 |
+
self.eps = 1e-2
|
218 |
+
|
219 |
+
def forward(self, data, aux):
|
220 |
+
data_dw = data[:, :, :, :-1] - data[:, :, :, 1:]
|
221 |
+
data_dh = data[:, :, :-1, :] - data[:, :, 1:, :]
|
222 |
+
aux_dw = torch.abs(aux[:, :, :, :-1] - aux[:, :, :, 1:])
|
223 |
+
aux_dh = torch.abs(aux[:, :, :-1, :] - aux[:, :, 1:, :])
|
224 |
+
|
225 |
+
w_variance = torch.sum(torch.pow(safe_div(data_dw, aux_dw, self.eps), 2))
|
226 |
+
h_variance = torch.sum(torch.pow(safe_div(data_dh, aux_dh, self.eps), 2))
|
227 |
+
|
228 |
+
count_h = self._tensor_size(data[:, :, 1:, :])
|
229 |
+
count_w = self._tensor_size(data[:, :, :, 1:])
|
230 |
+
|
231 |
+
tv_loss = h_variance / count_h + w_variance / count_w
|
232 |
+
return tv_loss
|
233 |
+
|
234 |
+
def _tensor_size(self, t):
|
235 |
+
return t.size()[1] * t.size()[2] * t.size()[3]
|
236 |
+
|
237 |
+
|
238 |
+
class WTVLoss2(torch.nn.Module):
|
239 |
+
def __init__(self):
|
240 |
+
super(WTVLoss2, self).__init__()
|
241 |
+
self.eps = 1e-2
|
242 |
+
self.criterion = nn.MSELoss()
|
243 |
+
|
244 |
+
def forward(self, data, aux):
|
245 |
+
N, C, H, W = data.shape
|
246 |
+
|
247 |
+
data_dw = F.pad(torch.abs(data[:, :, :, :-1] - data[:, :, :, 1:]), (1, 0, 0, 0))
|
248 |
+
data_dh = F.pad(torch.abs(data[:, :, :-1, :] - data[:, :, 1:, :]), (0, 0, 1, 0))
|
249 |
+
aux_dw = F.pad(torch.abs(aux[:, :, :, :-1] - aux[:, :, :, 1:]), (1, 0, 0, 0))
|
250 |
+
aux_dh = F.pad(torch.abs(aux[:, :, :-1, :] - aux[:, :, 1:, :]), (0, 0, 1, 0))
|
251 |
+
|
252 |
+
data_d = data_dw + data_dh
|
253 |
+
aux_d = aux_dw + aux_dh
|
254 |
+
|
255 |
+
loss1 = self.criterion(data_d, aux_d)
|
256 |
+
# loss2 = torch.norm(data_d / (aux_d + self.eps), p=1) / (C * H * W)
|
257 |
+
loss2 = torch.norm(data_d / (aux_d + self.eps)) / (C * H * W)
|
258 |
+
return loss1 * 0.5 + loss2 * 4.0
|
259 |
+
|
260 |
+
|
261 |
+
class MSTVPerL1Loss(torch.nn.Module):
|
262 |
+
def __init__(self):
|
263 |
+
super(MSTVPerL1Loss, self).__init__()
|
264 |
+
self.l1_loss_func = nn.L1Loss()
|
265 |
+
self.ms_ssim = MS_SSIM(data_range=1., size_average=True, channel=3)
|
266 |
+
self.per_loss_func = PerLoss().to('cuda')
|
267 |
+
self.tv_loss_func = TVLoss()
|
268 |
+
|
269 |
+
def forward(self, output, target):
|
270 |
+
ms_ssim_loss = 1 - self.ms_ssim(output, target)
|
271 |
+
l1_loss = self.l1_loss_func(output, target)
|
272 |
+
per_loss = self.per_loss_func(output, target)
|
273 |
+
tv_loss = self.tv_loss_func(output)
|
274 |
+
total_loss = l1_loss + 1.2 * ms_ssim_loss + 0.04 * per_loss + 1e-7 * tv_loss
|
275 |
+
return total_loss
|
276 |
+
|
277 |
+
|
278 |
+
if __name__ == "__main__":
|
279 |
+
MSTVPerL1Loss()
|
models/lr_scheduler.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch.optim.lr_scheduler import _LRScheduler
|
3 |
+
import math
|
4 |
+
|
5 |
+
|
6 |
+
class CosineLR(_LRScheduler):
|
7 |
+
def __init__(self, optimizer, init_lr, total_epochs, last_epoch=-1):
|
8 |
+
super(CosineLR, self).__init__(optimizer, last_epoch=-1)
|
9 |
+
self.optimizer = optimizer
|
10 |
+
self.init_lr = init_lr
|
11 |
+
self.total_epochs = total_epochs
|
12 |
+
self.last_epoch = last_epoch
|
13 |
+
print(f'CosineLR start from epoch(step) {last_epoch} with init_lr {init_lr} ')
|
14 |
+
|
15 |
+
def get_lr(self):
|
16 |
+
if self.last_epoch == 0:
|
17 |
+
return [group['lr'] for group in self.optimizer.param_groups]
|
18 |
+
|
19 |
+
return [0.5 * (1 + math.cos(self.last_epoch * math.pi / self.total_epochs)) * self.init_lr for group in
|
20 |
+
self.optimizer.param_groups]
|
models/metrics.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
from math import exp
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import torch
|
6 |
+
import torch.nn.functional as F
|
7 |
+
from torch.autograd import Variable
|
8 |
+
|
9 |
+
|
10 |
+
def gaussian(window_size, sigma):
|
11 |
+
gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
|
12 |
+
return gauss / gauss.sum()
|
13 |
+
|
14 |
+
|
15 |
+
def create_window(window_size, channel):
|
16 |
+
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
|
17 |
+
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
|
18 |
+
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
|
19 |
+
return window
|
20 |
+
|
21 |
+
|
22 |
+
def _ssim(img1, img2, window, window_size, channel, size_average=True):
|
23 |
+
mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
|
24 |
+
mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
|
25 |
+
mu1_sq = mu1.pow(2)
|
26 |
+
mu2_sq = mu2.pow(2)
|
27 |
+
mu1_mu2 = mu1 * mu2
|
28 |
+
sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
|
29 |
+
sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
|
30 |
+
sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
|
31 |
+
C1 = 0.01 ** 2
|
32 |
+
C2 = 0.03 ** 2
|
33 |
+
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
|
34 |
+
|
35 |
+
if size_average:
|
36 |
+
return ssim_map.mean()
|
37 |
+
else:
|
38 |
+
return ssim_map.mean(1).mean(1).mean(1)
|
39 |
+
|
40 |
+
|
41 |
+
def SSIM(img1, img2, window_size=11, size_average=True):
|
42 |
+
img1 = torch.clamp(img1, min=0, max=1)
|
43 |
+
img2 = torch.clamp(img2, min=0, max=1)
|
44 |
+
(_, channel, _, _) = img1.size()
|
45 |
+
window = create_window(window_size, channel)
|
46 |
+
if img1.is_cuda:
|
47 |
+
window = window.cuda(img1.get_device())
|
48 |
+
window = window.type_as(img1)
|
49 |
+
return _ssim(img1, img2, window, window_size, channel, size_average)
|
50 |
+
|
51 |
+
|
52 |
+
def PSNR(pred, gt):
|
53 |
+
pred = pred.clamp(0, 1).detach().cpu().numpy()
|
54 |
+
gt = gt.clamp(0, 1).detach().cpu().numpy()
|
55 |
+
imdff = pred - gt
|
56 |
+
rmse = math.sqrt(np.mean(imdff ** 2))
|
57 |
+
if rmse == 0:
|
58 |
+
return 100
|
59 |
+
return 20 * math.log10(1.0 / rmse)
|
60 |
+
|
61 |
+
|
62 |
+
if __name__ == "__main__":
|
63 |
+
pass
|
models/networks/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .networks import *
|
models/networks/modules.py
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
|
5 |
+
|
6 |
+
class PALayer(nn.Module):
|
7 |
+
def __init__(self, channel):
|
8 |
+
super(PALayer, self).__init__()
|
9 |
+
self.pa = nn.Sequential(
|
10 |
+
nn.Conv2d(channel, channel // 8, 1, padding=0, bias=True),
|
11 |
+
nn.ReLU(inplace=True),
|
12 |
+
nn.Conv2d(channel // 8, 1, 1, padding=0, bias=True),
|
13 |
+
nn.Sigmoid()
|
14 |
+
)
|
15 |
+
|
16 |
+
def forward(self, x):
|
17 |
+
y = self.pa(x)
|
18 |
+
return x * y
|
19 |
+
|
20 |
+
|
21 |
+
class CALayer(nn.Module):
|
22 |
+
def __init__(self, channel):
|
23 |
+
super(CALayer, self).__init__()
|
24 |
+
self.avg_pool = nn.AdaptiveAvgPool2d(1)
|
25 |
+
self.ca = nn.Sequential(
|
26 |
+
nn.Conv2d(channel, channel // 8, 1, padding=0, bias=True),
|
27 |
+
nn.ReLU(inplace=True),
|
28 |
+
nn.Conv2d(channel // 8, channel, 1, padding=0, bias=True),
|
29 |
+
nn.Sigmoid()
|
30 |
+
)
|
31 |
+
|
32 |
+
def forward(self, x):
|
33 |
+
y = self.avg_pool(x)
|
34 |
+
y = self.ca(y)
|
35 |
+
|
36 |
+
return x * y
|
37 |
+
|
38 |
+
|
39 |
+
class DoubleConv(nn.Module):
|
40 |
+
def __init__(self, in_channels, out_channels, norm=False, leaky=True):
|
41 |
+
super().__init__()
|
42 |
+
self.conv = nn.Sequential(
|
43 |
+
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
|
44 |
+
nn.BatchNorm2d(out_channels) if norm else nn.Identity(),
|
45 |
+
nn.LeakyReLU(0.2, inplace=True) if leaky else nn.ReLU(inplace=True),
|
46 |
+
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
|
47 |
+
nn.BatchNorm2d(out_channels) if norm else nn.Identity(),
|
48 |
+
nn.LeakyReLU(0.2, inplace=True) if leaky else nn.ReLU(inplace=True)
|
49 |
+
)
|
50 |
+
|
51 |
+
def forward(self, x):
|
52 |
+
return self.conv(x)
|
53 |
+
|
54 |
+
|
55 |
+
class OutConv(nn.Module):
|
56 |
+
def __init__(self, in_channels, out_channels, act=True):
|
57 |
+
super(OutConv, self).__init__()
|
58 |
+
self.conv = nn.Sequential(
|
59 |
+
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
|
60 |
+
nn.Sigmoid() if act else nn.Identity()
|
61 |
+
)
|
62 |
+
|
63 |
+
def forward(self, x):
|
64 |
+
return self.conv(x)
|
65 |
+
|
66 |
+
|
67 |
+
class Down(nn.Module):
|
68 |
+
"""Downscaling with maxpool then double conv"""
|
69 |
+
|
70 |
+
def __init__(self, in_channels, out_channels, norm=True, leaky=True):
|
71 |
+
super().__init__()
|
72 |
+
self.maxpool_conv = nn.Sequential(
|
73 |
+
nn.MaxPool2d(2),
|
74 |
+
DoubleConv(in_channels, out_channels, norm=norm, leaky=leaky)
|
75 |
+
)
|
76 |
+
|
77 |
+
def forward(self, x):
|
78 |
+
return self.maxpool_conv(x)
|
79 |
+
|
80 |
+
|
81 |
+
class Up(nn.Module):
|
82 |
+
"""Upscaling then double conv"""
|
83 |
+
|
84 |
+
def __init__(self, in_channels, out_channels, bilinear=True, norm=True, leaky=True):
|
85 |
+
super().__init__()
|
86 |
+
|
87 |
+
# if bilinear, use the normal convolutions to reduce the number of channels
|
88 |
+
if bilinear:
|
89 |
+
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
|
90 |
+
self.conv = DoubleConv(in_channels, out_channels, norm=norm, leaky=leaky)
|
91 |
+
else:
|
92 |
+
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
|
93 |
+
self.conv = DoubleConv(in_channels, out_channels, norm=norm, leaky=leaky)
|
94 |
+
|
95 |
+
def forward(self, x1, x2):
|
96 |
+
x1 = self.up(x1)
|
97 |
+
# input is CHW
|
98 |
+
diffY = x2.size()[2] - x1.size()[2]
|
99 |
+
diffX = x2.size()[3] - x1.size()[3]
|
100 |
+
|
101 |
+
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
|
102 |
+
diffY // 2, diffY - diffY // 2])
|
103 |
+
|
104 |
+
x = torch.cat([x2, x1], dim=1)
|
105 |
+
return self.conv(x)
|
106 |
+
|
107 |
+
|
108 |
+
class AttentiveDown(nn.Module):
|
109 |
+
def __init__(self, in_channels, out_channels, norm=False, leaky=True):
|
110 |
+
super().__init__()
|
111 |
+
self.down = Down(in_channels, out_channels, norm=norm, leaky=leaky)
|
112 |
+
self.attention = nn.Sequential(
|
113 |
+
CALayer(out_channels),
|
114 |
+
PALayer(out_channels)
|
115 |
+
)
|
116 |
+
|
117 |
+
def forward(self, x):
|
118 |
+
return self.attention(self.down(x))
|
119 |
+
|
120 |
+
|
121 |
+
class AttentiveUp(nn.Module):
|
122 |
+
def __init__(self, in_channels, out_channels, bilinear=True, norm=False, leaky=True):
|
123 |
+
super().__init__()
|
124 |
+
self.up = Up(in_channels, out_channels, bilinear, norm=norm, leaky=leaky)
|
125 |
+
self.attention = nn.Sequential(
|
126 |
+
CALayer(out_channels),
|
127 |
+
PALayer(out_channels)
|
128 |
+
)
|
129 |
+
|
130 |
+
def forward(self, x1, x2):
|
131 |
+
return self.attention(self.up(x1, x2))
|
132 |
+
|
133 |
+
|
134 |
+
class AttentiveDoubleConv(nn.Module):
|
135 |
+
def __init__(self, in_channels, out_channels, norm=False, leaky=False):
|
136 |
+
super().__init__()
|
137 |
+
self.conv = DoubleConv(in_channels, out_channels, norm=norm, leaky=leaky)
|
138 |
+
self.attention = nn.Sequential(
|
139 |
+
CALayer(out_channels),
|
140 |
+
PALayer(out_channels)
|
141 |
+
)
|
142 |
+
|
143 |
+
def forward(self, x):
|
144 |
+
return self.attention(self.conv(x))
|
models/networks/networks.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from models.networks.modules import *
|
2 |
+
|
3 |
+
|
4 |
+
class BaseNet(nn.Module):
|
5 |
+
def __init__(self, in_channels=1, out_channels=1, norm=True):
|
6 |
+
super(BaseNet, self).__init__()
|
7 |
+
self.n_channels = in_channels
|
8 |
+
self.n_classes = out_channels
|
9 |
+
|
10 |
+
self.inc = DoubleConv(in_channels, 32, norm=norm)
|
11 |
+
self.down1 = Down(32, 64, norm=norm)
|
12 |
+
self.down2 = Down(64, 128, norm=norm)
|
13 |
+
self.down3 = Down(128, 128, norm=norm)
|
14 |
+
|
15 |
+
self.up1 = Up(256, 64, bilinear=True, norm=norm)
|
16 |
+
self.up2 = Up(128, 32, bilinear=True, norm=norm)
|
17 |
+
self.up3 = Up(64, 32, bilinear=True, norm=norm)
|
18 |
+
self.outc = OutConv(32, out_channels)
|
19 |
+
|
20 |
+
def forward(self, x):
|
21 |
+
x1 = self.inc(x)
|
22 |
+
x2 = self.down1(x1)
|
23 |
+
x3 = self.down2(x2)
|
24 |
+
x4 = self.down3(x3)
|
25 |
+
x = self.up1(x4, x3)
|
26 |
+
x = self.up2(x, x2)
|
27 |
+
x = self.up3(x, x1)
|
28 |
+
logits = self.outc(x)
|
29 |
+
return logits
|
30 |
+
|
31 |
+
|
32 |
+
class IAN(BaseNet):
|
33 |
+
def __init__(self, in_channels=1, out_channels=1, norm=True):
|
34 |
+
super(IAN, self).__init__(in_channels, out_channels, norm)
|
35 |
+
|
36 |
+
|
37 |
+
class ANSN(BaseNet):
|
38 |
+
def __init__(self, in_channels=1, out_channels=1, norm=True):
|
39 |
+
super(ANSN, self).__init__(in_channels, out_channels, norm)
|
40 |
+
self.outc = OutConv(32, out_channels, act=False)
|
41 |
+
|
42 |
+
|
43 |
+
class FuseNet(nn.Module):
|
44 |
+
def __init__(self, in_channels=1, out_channels=1, norm=False):
|
45 |
+
super(FuseNet, self).__init__()
|
46 |
+
self.inc = AttentiveDoubleConv(in_channels, 32, norm=norm, leaky=False)
|
47 |
+
self.down1 = AttentiveDown(32, 64, norm=norm, leaky=False)
|
48 |
+
self.down2 = AttentiveDown(64, 64, norm=norm, leaky=False)
|
49 |
+
self.up1 = AttentiveUp(128, 32, bilinear=True, norm=norm, leaky=False)
|
50 |
+
self.up2 = AttentiveUp(64, 32, bilinear=True, norm=norm, leaky=False)
|
51 |
+
self.outc = OutConv(32, out_channels)
|
52 |
+
|
53 |
+
def forward(self, x):
|
54 |
+
x1 = self.inc(x)
|
55 |
+
x2 = self.down1(x1)
|
56 |
+
x3 = self.down2(x2)
|
57 |
+
x = self.up1(x3, x2)
|
58 |
+
x = self.up2(x, x1)
|
59 |
+
logits = self.outc(x)
|
60 |
+
return logits
|
61 |
+
|
62 |
+
|
63 |
+
if __name__ == '__main__':
|
64 |
+
for key in FuseNet(4, 2).state_dict().keys():
|
65 |
+
print(key)
|