File tree Expand file tree Collapse file tree 3 files changed +62
-2
lines changed
segmentation_models_pytorch Expand file tree Collapse file tree 3 files changed +62
-2
lines changed Original file line number Diff line number Diff line change 4
4
5
5
6
6
class Linknet (EncoderDecoder ):
7
+ """Linknet_ is a fully convolution neural network for fast image semantic segmentation
8
+
9
+ Note:
10
+ This implementation by default has 4 skip connections (original - 3).
11
+
12
+ Args:
13
+ encoder_name: name of classification model (without last dense layers) used as feature
14
+ extractor to build segmentation model.
15
+ encoder_weights: one of ``None`` (random initialization), ``imagenet`` (pre-training on ImageNet).
16
+ decoder_use_batchnorm: if ``True``, ``BatchNormalisation`` layer between ``Conv2D`` and ``Activation`` layers
17
+ is used.
18
+ classes: a number of classes for output (output shape - ``(h, w, classes)``).
19
+ activation: one of [``sigmoid``, ``softmax``, None]
20
+
21
+ Returns:
22
+ ``torch.nn.Module``: **Linknet**
23
+
24
+ .. _Linknet:
25
+ https://arxiv.org/pdf/1707.03718.pdf
26
+ """
7
27
8
28
def __init__ (
9
29
self ,
@@ -13,7 +33,6 @@ def __init__(
13
33
classes = 1 ,
14
34
activation = 'sigmoid' ,
15
35
):
16
-
17
36
encoder = get_encoder (
18
37
encoder_name ,
19
38
encoder_weights = encoder_weights
Original file line number Diff line number Diff line change 4
4
5
5
6
6
class PSPNet (EncoderDecoder ):
7
+ """PSPNet_ is a fully convolution neural network for image semantic segmentation
8
+
9
+ Args:
10
+ encoder_name: name of classification model used as feature
11
+ extractor to build segmentation model.
12
+ encoder_weights: one of ``None`` (random initialization), ``imagenet`` (pre-training on ImageNet).
13
+ psp_in_factor: one of 4, 8 and 16. Downsampling rate or in other words backbone depth
14
+ to construct PSP module on it.
15
+ psp_out_channels: number of filters in PSP block.
16
+ psp_use_batchnorm: if ``True``, ``BatchNormalisation`` layer between ``Conv2D`` and ``Activation`` layers
17
+ is used.
18
+ psp_aux_output: if ``True`` add auxiliary classification output for encoder training
19
+ psp_dropout: spatial dropout rate between 0 and 1.
20
+ classes: a number of classes for output (output shape - ``(batch, classes, h, w)``).
21
+ activation: one of [``sigmoid``, ``softmax``, None]
22
+
23
+ Returns:
24
+ ``torch.nn.Module``: **PSPNet**
25
+
26
+ .. _PSPNet:
27
+ https://arxiv.org/pdf/1612.01105.pdf
28
+ """
7
29
8
30
def __init__ (
9
31
self ,
@@ -17,7 +39,6 @@ def __init__(
17
39
dropout = 0.2 ,
18
40
activation = 'softmax' ,
19
41
):
20
-
21
42
encoder = get_encoder (
22
43
encoder_name ,
23
44
encoder_weights = encoder_weights
Original file line number Diff line number Diff line change 4
4
5
5
6
6
class Unet (EncoderDecoder ):
7
+ """Unet_ is a fully convolution neural network for image semantic segmentation
8
+
9
+ Args:
10
+ encoder_name: name of classification model (without last dense layers) used as feature
11
+ extractor to build segmentation model.
12
+ encoder_weights: one of ``None`` (random initialization), ``imagenet`` (pre-training on ImageNet).
13
+ decoder_channels: list of numbers of ``Conv2D`` layer filters in decoder blocks
14
+ decoder_use_batchnorm: if ``True``, ``BatchNormalisation`` layer between ``Conv2D`` and ``Activation`` layers
15
+ is used.
16
+ classes: a number of classes for output (output shape - ``(h, w, classes)``).
17
+ activation: one of [``sigmoid``, ``softmax``, None]
18
+ center: if ``True`` add ``Conv2dReLU`` block on encoder head (useful for VGG models)
19
+
20
+ Returns:
21
+ ``torch.nn.Module``: **Unet**
22
+
23
+ .. _Unet:
24
+ https://arxiv.org/pdf/1505.04597
25
+
26
+ """
7
27
8
28
def __init__ (
9
29
self ,
You can’t perform that action at this time.
0 commit comments