复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162#################### class stride_OctaveConv(nn.Module): def __init__(self, in_nc, out_nc, kernel_size, alpha=0.5, stride=1, dilation=1, groups=1, bias=True, pad_type='zero', norm_type=None, act_type='prelu', mode='CNA'): super(stride_OctaveConv, self).__init__() assert mode in ['CNA', 'NAC', 'CNAC'], 'Wong conv mode [{:s}]'.format(mode) padding = get_valid_padding(kernel_size, dilation) if pad_type == 'zero' else 0 self.h2g_pool = nn.AvgPool2d(kernel_size=(2, 2), stride=2) self.upsample = nn.Upsample(scale_factor=2, mode='nearest') self.stride = stride self.l2l = nn.Conv2d(int(alpha * in_nc), int(alpha * out_nc), kernel_size, 1, padding, dilation, groups, bias) self.l2h = nn.Conv2d(int(alpha * in_nc), out_nc - int(alpha * out_nc), kernel_size, 1, padding, dilation, groups, bias) self.h2l = nn.Conv2d(in_nc - int(alpha * in_nc), int(alpha * out_nc), kernel_size, 2, padding, dilation, groups, bias) self.h2h = nn.Conv2d(in_nc - int(alpha * in_nc), out_nc - int(alpha * out_nc), kernel_size, 1, padding, dilation, groups, bias) self.a = act(act_type) if act_type else None self.n_h = norm(norm_type, int(out_nc*(1 - alpha))) if norm_type else None self.n_l = norm(norm_type, int(out_nc*alpha)) if norm_type else None def forward(self, x): X_h, X_l = x if self.stride ==2: X_h, X_l = self.h2g_pool(X_h), self.h2g_pool(X_l) X_h2h = self.h2h(X_h) X_l2h = self.upsample(self.l2h(X_l)) X_l2l = self.l2l(X_l) X_h2l = self.h2l(X_h) #print(X_l2h.shape,"~~~~",X_h2h.shape) X_h = X_l2h + X_h2h X_l = X_h2l + X_l2l if self.n_h and self.n_l: X_h = self.n_h(X_h) X_l = self.n_l(X_l) if self.a: X_h = self.a(X_h) X_l = self.a(X_l) return X_h, X_l class stride_FirstOctaveConv(nn.Module): def __init__(self, in_nc, out_nc, kernel_size, alpha=0.5, stride=1, dilation=1, groups=1, bias=True, pad_type='zero', norm_type=None, act_type='prelu', mode='CNA'): super(stride_FirstOctaveConv, self).__init__() assert mode in ['CNA', 'NAC', 'CNAC'], 'Wong conv mode [{:s}]'.format(mode) padding = get_valid_padding(kernel_size, dilation) if pad_type == 'zero' else 0 stride2=2 #padding2 = get_valid_padding(kernel_size, dilation2) if pad_type == 'zero' else 0 #self.h2g_pool = nn.AvgPool2d(kernel_size=(2, 2), stride=2) self.stride = stride self.h2l = nn.Conv2d(in_nc, int(alpha * out_nc), kernel_size, 2, padding,dilation, groups, bias) self.h2h = nn.Conv2d(in_nc, out_nc - int(alpha * out_nc), kernel_size, 1, padding, dilation, groups, bias) self.a = act(act_type) if act_type else None self.n_h = norm(norm_type, int(out_nc*(1 - alpha))) if norm_type else None self.n_l = norm(norm_type, int(out_nc*alpha)) if norm_type else None def forward(self, x): if self.stride ==2: x = self.h2g_pool(x) X_h = self.h2h(x) #X_l = self.h2l(self.h2g_pool(x)) X_l = self.h2l(x) # print (X_h.shape) # print (X_l.shape) # exit() if self.n_h and self.n_l: X_h = self.n_h(X_h) X_l = self.n_l(X_l) if self.a: X_h = self.a(X_h) X_l = self.a(X_l) return X_h, X_l class stride_LastOctaveConv(nn.Module): def __init__(self, in_nc, out_nc, kernel_size, alpha=0.5, stride=1, dilation=1, groups=1, bias=True, pad_type='zero', norm_type=None, act_type='prelu', mode='CNA'): super(stride_LastOctaveConv, self).__init__() assert mode in ['CNA', 'NAC', 'CNAC'], 'Wong conv mode [{:s}]'.format(mode) padding = get_valid_padding(kernel_size, dilation) if pad_type == 'zero' else 0 self.h2g_pool = nn.AvgPool2d(kernel_size=(2,2), stride=2) self.upsample = nn.Upsample(scale_factor=2, mode='nearest') self.stride = stride self.l2h = nn.Conv2d(int(alpha * in_nc), out_nc, kernel_size, 1, padding, dilation, groups, bias) self.h2h = nn.Conv2d(in_nc - int(alpha * in_nc), out_nc, kernel_size, 1, padding, dilation, groups, bias) self.a = act(act_type) if act_type else None self.n_h = norm(norm_type, out_nc) if norm_type else None def forward(self, x): X_h, X_l = x if self.stride ==2: X_h, X_l = self.h2g_pool(X_h), self.h2g_pool(X_l) X_h2h = self.h2h(X_h) X_l2h = self.upsample(self.l2h(X_l)) X_h = X_h2h + X_l2h if self.n_h: X_h = self.n_h(X_h) if self.a: X_h = self.a(X_h) return X_h class stride_OctaveResBlock(nn.Module): ''' ResNet Block, 3-3 style with extra residual scaling used in EDSR (Enhanced Deep Residual Networks for Single Image Super-Resolution, CVPRW 17) ''' def __init__(self, in_nc, mid_nc, out_nc, kernel_size=3, alpha=0.75, stride=1, dilation=1, groups=1, bias=True, pad_type='zero', norm_type=None, act_type='prelu', mode='CNA', res_scale=1): super(stride_OctaveResBlock, self).__init__() conv0 = OctaveConv(in_nc, mid_nc, kernel_size, alpha, stride, dilation, groups, bias, pad_type, norm_type, act_type, mode) if mode == 'CNA': act_type = None if mode == 'CNAC': # Residual path: |-CNAC-| act_type = None norm_type = None conv1 = stride_OctaveConv(mid_nc, out_nc, kernel_size, alpha, stride, dilation, groups, bias, pad_type, norm_type, act_type, mode) self.res = sequential(conv0, conv1) self.res_scale = res_scale def forward(self, x): #if(len(x)>2): #print(x[0].shape," ",x[1].shape," ",x[2].shape," ",x[3].shape) #print(len(x)) res = self.res(x) res = (res[0].mul(self.res_scale), res[1].mul(self.res_scale)) x = (x[0] + res[0], x[1] + res[1]) #print(len(x),"~~~",len(res),"~~~",len(x + res)) #return (x[0] + res[0], x[1]+res[1]) return x
最后
以上就是温婉香水最近收集整理的关于stride_OctaveConv的全部内容,更多相关stride_OctaveConv内容请搜索靠谱客的其他文章。
本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
发表评论 取消回复