@@ -41,3 +41,49 @@ def forward(self, x):
4141 if (self .acti_func is not None ):
4242 f = self .acti_func (f )
4343 return f
44+
45+ class DepthSeperableDeconvolutionLayer (nn .Module ):
46+ """
47+ A compose layer with the following components:
48+ convolution -> (batch_norm) -> activation -> (dropout)
49+ batch norm and dropout are optional
50+ """
51+ def __init__ (self , in_channels , out_channels , kernel_size ,
52+ dim = 3 , stride = 1 , padding = 0 , output_padding = 0 ,
53+ dilation = 1 , groups = 1 , bias = True ,
54+ batch_norm = True , acti_func = None ):
55+ super (DepthSeperableDeconvolutionLayer , self ).__init__ ()
56+ self .n_in_chns = in_channels
57+ self .n_out_chns = out_channels
58+ self .batch_norm = batch_norm
59+ self .acti_func = acti_func
60+ self .groups = groups
61+ assert (dim == 2 or dim == 3 )
62+ if (dim == 2 ):
63+ self .conv1x1 = nn .Conv2d (in_channels , out_channels ,
64+ kernel_size = 1 , stride = 1 , padding = 0 , dilation = dilation ,
65+ groups = self .groups , bias = bias )
66+ self .conv = nn .ConvTranspose2d (out_channels , out_channels ,
67+ kernel_size , stride , padding , output_padding ,
68+ groups = out_channels , bias = bias , dilation = dilation )
69+
70+ if (self .batch_norm ):
71+ self .bn = nn .modules .BatchNorm2d (out_channels )
72+ else :
73+ self .conv1x1 = nn .Conv3d (in_channels , out_channels ,
74+ kernel_size = 1 , stride = 1 , padding = 0 , dilation = dilation ,
75+ groups = self .groups , bias = bias )
76+ self .conv = nn .ConvTranspose3d (out_channels , out_channels ,
77+ kernel_size , stride , padding , output_padding ,
78+ groups = out_channels , bias = bias , dilation = dilation )
79+ if (self .batch_norm ):
80+ self .bn = nn .modules .BatchNorm3d (out_channels )
81+
82+ def forward (self , x ):
83+ f = self .conv1x1 (x )
84+ f = self .conv (f )
85+ if (self .batch_norm ):
86+ f = self .bn (f )
87+ if (self .acti_func is not None ):
88+ f = self .acti_func (f )
89+ return f
0 commit comments