@@ -12,17 +12,17 @@ def __init__(
12
12
in_channels : int ,
13
13
out_channels : int ,
14
14
pool_size : int ,
15
- use_bathcnorm : bool = True ,
15
+ use_batchnorm : bool = True ,
16
16
):
17
17
super ().__init__ ()
18
18
19
19
if pool_size == 1 :
20
- use_bathcnorm = False # PyTorch does not support BatchNorm for 1x1 shape
20
+ use_batchnorm = False # PyTorch does not support BatchNorm for 1x1 shape
21
21
22
22
self .pool = nn .Sequential (
23
23
nn .AdaptiveAvgPool2d (output_size = (pool_size , pool_size )),
24
24
modules .Conv2dReLU (
25
- in_channels , out_channels , (1 , 1 ), use_batchnorm = use_bathcnorm
25
+ in_channels , out_channels , (1 , 1 ), use_batchnorm = use_batchnorm
26
26
),
27
27
)
28
28
@@ -38,7 +38,7 @@ def __init__(
38
38
self ,
39
39
in_channels : int ,
40
40
sizes : Tuple [int , ...] = (1 , 2 , 3 , 6 ),
41
- use_bathcnorm : bool = True ,
41
+ use_batchnorm : bool = True ,
42
42
):
43
43
super ().__init__ ()
44
44
@@ -48,7 +48,7 @@ def __init__(
48
48
in_channels ,
49
49
in_channels // len (sizes ),
50
50
size ,
51
- use_bathcnorm = use_bathcnorm ,
51
+ use_batchnorm = use_batchnorm ,
52
52
)
53
53
for size in sizes
54
54
]
@@ -73,7 +73,7 @@ def __init__(
73
73
self .psp = PSPModule (
74
74
in_channels = encoder_channels [- 1 ],
75
75
sizes = (1 , 2 , 3 , 6 ),
76
- use_bathcnorm = use_batchnorm ,
76
+ use_batchnorm = use_batchnorm ,
77
77
)
78
78
79
79
self .conv = modules .Conv2dReLU (
0 commit comments