|
6 | 6 |
|
7 | 7 |
|
8 | 8 | class MAnet(SegmentationModel):
|
9 |
| - """MAnet_ : Multi-scale Attention Net. |
10 |
| - The MA-Net can capture rich contextual dependencies based on the attention mechanism, using two blocks: |
11 |
| - Position-wise Attention Block (PAB, which captures the spatial dependencies between pixels in a global view) |
12 |
| - and Multi-scale Fusion Attention Block (MFAB, which captures the channel dependencies between any feature map by |
13 |
| - multi-scale semantic feature fusion) |
| 9 | + """MAnet_ : Multi-scale Attention Net. The MA-Net can capture rich contextual dependencies based on the attention mechanism, |
| 10 | + using two blocks: |
| 11 | + - Position-wise Attention Block (PAB), which captures the spatial dependencies between pixels in a global view |
| 12 | + - Multi-scale Fusion Attention Block (MFAB), which captures the channel dependencies between any feature map by |
| 13 | + multi-scale semantic feature fusion |
14 | 14 |
|
15 | 15 | Args:
|
16 | 16 | encoder_name: Name of the classification model that will be used as an encoder (a.k.a backbone)
|
@@ -52,7 +52,7 @@ def __init__(
|
52 | 52 | self,
|
53 | 53 | encoder_name: str = "resnet34",
|
54 | 54 | encoder_depth: int = 5,
|
55 |
| - encoder_weights: str = "imagenet", |
| 55 | + encoder_weights: Optional[str] = "imagenet", |
56 | 56 | decoder_use_batchnorm: bool = True,
|
57 | 57 | decoder_channels: List[int] = (256, 128, 64, 32, 16),
|
58 | 58 | decoder_pab_channels: int = 64,
|
|
0 commit comments