@@ -86,12 +86,12 @@ def __init__(self, num_classes: int):
8686 super (Proposed , self ).__init__ ()
8787 resnet34 = torchvision .models .resnet34 (pretrained = True )
8888
89- self .encode0 = self .double_conv (3 , 64 )
89+ self .initial_conv = self .double_conv (3 , 64 )
9090 self .encode1 = resnet34 .layer1 # 64
9191 self .encode2 = resnet34 .layer2 # 128
9292 self .encode3 = resnet34 .layer3 # 256
9393 self .encode4 = resnet34 .layer4 # 512
94- self .encode_end = ASPP (512 , 512 )
94+ self .aspp = ASPP (512 , 512 )
9595
9696 self .upconv3 = nn .ConvTranspose2d (512 , 256 , kernel_size = 2 , stride = 2 )
9797 self .decode3 = self .double_conv (512 , 256 )
@@ -107,8 +107,10 @@ def __init__(self, num_classes: int):
107107 def double_conv (self , in_channels : int , out_channels : int ):
108108 return nn .Sequential (
109109 nn .Conv2d (in_channels , out_channels , kernel_size = 3 , stride = 1 , padding = 1 ),
110+ nn .BatchNorm2d (out_channels ),
110111 nn .ReLU (inplace = True ),
111112 nn .Conv2d (out_channels , out_channels , kernel_size = 3 , stride = 1 , padding = 1 ),
113+ nn .BatchNorm2d (out_channels ),
112114 nn .ReLU (inplace = True )
113115 )
114116
@@ -122,10 +124,10 @@ def make_layer(self, in_channels, out_channels, num_blocks):
122124
123125 def forward (self , x ):
124126 # Encoder
125- encode1 = self .encode1 (self .encode0 (x ))
127+ encode1 = self .encode1 (self .initial_conv (x ))
126128 encode2 = self .encode2 (encode1 )
127129 encode3 = self .encode3 (encode2 )
128- encode_end = self .encode_end (self .encode4 (encode3 ))
130+ encode_end = self .aspp (self .encode4 (encode3 ))
129131
130132 # Decoder
131133 out = self .decode3 (torch .cat ([self .upconv3 (encode_end ), encode3 ], dim = 1 ))
0 commit comments