File tree Expand file tree Collapse file tree 2 files changed +3
-3
lines changed
segmentation_models_pytorch/decoders/dpt Expand file tree Collapse file tree 2 files changed +3
-3
lines changed Original file line number Diff line number Diff line change 1
1
import torch
2
2
import torch .nn as nn
3
3
from segmentation_models_pytorch .base .modules import Activation
4
- from typing import Optional , Sequence
4
+ from typing import Optional , Sequence , Union , Callable
5
5
6
6
7
7
class ProjectionBlock (nn .Module ):
@@ -241,7 +241,7 @@ def __init__(
241
241
self ,
242
242
in_channels : int ,
243
243
out_channels : int ,
244
- activation : Optional [str ] = None ,
244
+ activation : Optional [Union [ str , Callable ] ] = None ,
245
245
kernel_size : int = 3 ,
246
246
upsampling : float = 2.0 ,
247
247
):
Original file line number Diff line number Diff line change @@ -25,7 +25,7 @@ class DPT(SegmentationModel):
25
25
26
26
Note:
27
27
Since this model uses a Vision Transformer backbone, it typically requires a fixed input image size.
28
- To handle variable input sizes, you can set `dynamic_img_size=True` in the model initialization
28
+ To handle variable input sizes, you can set `dynamic_img_size=True` in the model initialization
29
29
(if supported by the specific `timm` encoder). You can check if an encoder requires fixed size
30
30
using `model.encoder.is_fixed_input_size`, and get the required input dimensions from
31
31
`model.encoder.input_size`, however it's no guarantee that information is available.
You can’t perform that action at this time.
0 commit comments