@@ -64,7 +64,7 @@ def __init__(
6464 bias : bool, default=False,
6565 Include bias term in the convolution.
6666 attention : str, default=None
67- Attention method. One of: "se", "scse", "gc", "eca", None
67+ Attention method. One of: "se", "scse", "gc", "eca", "msca", None
6868 preattend : bool, default=False
6969 If True, Attention is applied at the beginning of forward pass.
7070 """
@@ -186,7 +186,7 @@ def __init__(
186186 bias : bool, default=False,
187187 Include bias term in the convolution.
188188 attention : str, default=None
189- Attention method. One of: "se", "scse", "gc", "eca", None
189+ Attention method. One of: "se", "scse", "gc", "eca", "msca", None
190190 preattend : bool, default=False
191191 If True, Attention is applied at the beginning of forward pass.
192192 """
@@ -336,7 +336,7 @@ def __init__(
336336 kernel_size : int, default=3
337337 The size of the convolution kernel.
338338 attention : str, default=None
339- Attention method. One of: "se", "scse", "gc", "eca", None
339+ Attention method. One of: "se", "scse", "gc", "eca", "msca", None
340340 preattend : bool, default=False
341341 If True, Attention is applied at the beginning of forward pass.
342342 """
@@ -375,7 +375,7 @@ def __init__(
375375 self .act2 = Activation (activation )
376376
377377 def forward_features (self , x : torch .Tensor ) -> torch .Tensor :
378- """Forward pass with pre-activation ."""
378+ """Forward pass."""
379379 if self .preattend :
380380 x = self .att (x )
381381
@@ -394,7 +394,7 @@ def forward_features(self, x: torch.Tensor) -> torch.Tensor:
394394 return x
395395
396396 def forward_features_preact (self , x : torch .Tensor ) -> torch .Tensor :
397- """Forward pass."""
397+ """Forward pass ith pre-activation ."""
398398 if self .preattend :
399399 x = self .att (x )
400400
@@ -459,7 +459,7 @@ def __init__(
459459 kernel_size : int, default=3
460460 The size of the convolution kernel.
461461 attention : str, default=None
462- Attention method. One of: "se", "scse", "gc", "eca", None
462+ Attention method. One of: "se", "scse", "gc", "eca", "msca", None
463463 preattend : bool, default=False
464464 If True, Attention is applied at the beginning of forward pass.
465465 """
@@ -615,7 +615,7 @@ def __init__(
615615 preactivate : bool, default=False
616616 If True, normalization will be applied before convolution.
617617 attention : str, default=None
618- Attention method. One of: "se", "scse", "gc", "eca", None
618+ Attention method. One of: "se", "scse", "gc", "eca", "msca", None
619619 preattend : bool, default=False
620620 If True, Attention is applied at the beginning of forward pass.
621621 """
@@ -750,7 +750,7 @@ def __init__(
750750 kernel_size : int, default=3
751751 The size of the convolution kernel.
752752 attention : str, default=None
753- Attention method. One of: "se", "scse", "gc", "eca", None
753+ Attention method. One of: "se", "scse", "gc", "eca", "msca", None
754754 preattend : bool, default=False
755755 If True, Attention is applied at the beginning of forward pass.
756756 """
@@ -880,7 +880,7 @@ def __init__(
880880 bias : bool, default=False,
881881 Include bias term in the convolution.
882882 attention : str, default=None
883- Attention method. One of: "se", "scse", "gc", "eca", None
883+ Attention method. One of: "se", "scse", "gc", "eca", "msca", None
884884 preattend : bool, default=False
885885 If True, Attention is applied at the beginning of forward pass.
886886 """
0 commit comments