Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion .github/workflows/mypy-type-check.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,4 +46,8 @@ jobs:
tiatoolbox/tools \
tiatoolbox/data \
tiatoolbox/annotation \
tiatoolbox/cli/common.py
tiatoolbox/cli/common.py \
tiatoolbox/models/__init__.py \
tiatoolbox/models/models_abc.py \
tiatoolbox/models/architecture/__init__.py \
tiatoolbox/models/architecture/utils.py \
7 changes: 7 additions & 0 deletions tests/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1614,10 +1614,17 @@ def test_fetch_pretrained_weights(tmp_path: Path) -> None:
fetch_pretrained_weights(model_name="mobilenet_v3_small-pcam", save_path=file_path)
assert file_path.exists()
assert file_path.stat().st_size > 0
file_path.unlink()

with pytest.raises(ValueError, match="does not exist"):
fetch_pretrained_weights("abc", file_path)

# Test save_path is str
file_path_str = str(file_path)
file_path = fetch_pretrained_weights("mobilenet_v3_small-pcam", file_path_str)
assert Path(file_path).exists()
assert Path(file_path).stat().st_size > 0


def test_imwrite(tmp_path: Path) -> NoReturn:
"""Create a temporary file path."""
Expand Down
37 changes: 25 additions & 12 deletions tiatoolbox/models/architecture/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@

from __future__ import annotations

import os
from pathlib import Path
from pydoc import locate
from typing import TYPE_CHECKING, Optional, Union
from typing import TYPE_CHECKING

import torch

Expand All @@ -13,8 +13,6 @@
from tiatoolbox.utils import download_data

if TYPE_CHECKING: # pragma: no cover
from pathlib import Path

from tiatoolbox.models.models_abc import IOConfigABC


Expand Down Expand Up @@ -53,10 +51,14 @@ def fetch_pretrained_weights(

if save_path is None:
file_name = info["url"].split("/")[-1]
save_path = rcParam["TIATOOLBOX_HOME"] / "models" / file_name
processed_save_path = rcParam["TIATOOLBOX_HOME"] / "models" / file_name
elif type(save_path) is str:
processed_save_path = Path(save_path)
else:
processed_save_path = save_path

download_data(info["url"], save_path=save_path, overwrite=overwrite)
return save_path
download_data(info["url"], save_path=processed_save_path, overwrite=overwrite)
return processed_save_path


def get_pretrained_model(
Expand Down Expand Up @@ -129,9 +131,15 @@ def get_pretrained_model(
info = PRETRAINED_INFO[pretrained_model]

arch_info = info["architecture"]
creator = locate(f"tiatoolbox.models.architecture.{arch_info['class']}")

model = creator(**arch_info["kwargs"])
model_class_info = arch_info["class"]
model_module_name = str(".".join(model_class_info.split(".")[:-1]))
model_name = str(model_class_info.split(".")[-1])

# Import module containing required model class
arch_module = locate(f"tiatoolbox.models.architecture.{model_module_name}")
# Get model class form module
model_class = getattr(arch_module, model_name)
model = model_class(**arch_info["kwargs"])
# TODO(TBC): Dictionary of dataset specific or transformation? # noqa: FIX002,TD003
if "dataset" in info:
# ! this is a hack currently, need another PR to clean up
Expand All @@ -152,7 +160,12 @@ def get_pretrained_model(
# !

io_info = info["ioconfig"]
creator = locate(f"tiatoolbox.models.engine.{io_info['class']}")
io_class_info = io_info["class"]
io_module_name = str(".".join(io_class_info.split(".")[:-1]))
io_class_name = str(io_class_info.split(".")[-1])

engine_module = locate(f"tiatoolbox.models.engine.{io_module_name}")
engine_class = getattr(engine_module, io_class_name)

iostate = creator(**io_info["kwargs"])
iostate = engine_class(**io_info["kwargs"])
return model, iostate
14 changes: 8 additions & 6 deletions tiatoolbox/models/architecture/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from __future__ import annotations

import sys
from typing import cast

import numpy as np
import torch
Expand Down Expand Up @@ -45,7 +46,7 @@ def is_torch_compile_compatible() -> bool:


def compile_model(
model: nn.Module | None = None,
model: nn.Module,
*,
mode: str = "default",
) -> nn.Module:
Expand Down Expand Up @@ -97,12 +98,12 @@ def compile_model(
)
return model

return torch.compile(model, mode=mode) # pragma: no cover
return cast(nn.Module, torch.compile(model, mode=mode)) # pragma: no cover


def centre_crop(
img: np.ndarray | torch.tensor,
crop_shape: np.ndarray | torch.tensor,
img: np.ndarray | torch.Tensor,
crop_shape: np.ndarray | torch.Tensor | tuple,
data_format: str = "NCHW",
) -> np.ndarray | torch.Tensor:
"""A function to center crop image with given crop shape.
Expand Down Expand Up @@ -136,8 +137,8 @@ def centre_crop(


def centre_crop_to_shape(
x: np.ndarray | torch.tensor,
y: np.ndarray | torch.tensor,
x: np.ndarray | torch.Tensor,
y: np.ndarray | torch.Tensor,
data_format: str = "NCHW",
) -> np.ndarray | torch.Tensor:
"""A function to center crop image to shape.
Expand Down Expand Up @@ -200,6 +201,7 @@ def __init__(self: UpSample2x) -> None:
"""Initialize :class:`UpSample2x`."""
super().__init__()
# correct way to create constant within module
self.unpool_mat: torch.Tensor
self.register_buffer(
"unpool_mat",
torch.from_numpy(np.ones((2, 2), dtype="float32")),
Expand Down
33 changes: 22 additions & 11 deletions tiatoolbox/models/models_abc.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,9 @@

import torch
import torch._dynamo
from torch import device as torch_device

torch._dynamo.config.suppress_errors = True # skipcq: PYL-W0212 # noqa: SLF001


if TYPE_CHECKING: # pragma: no cover
from pathlib import Path

Expand Down Expand Up @@ -57,8 +55,8 @@ def model_to(model: torch.nn.Module, device: str = "cpu") -> torch.nn.Module:
# DataParallel work only for cuda
model = torch.nn.DataParallel(model)

device = torch.device(device)
return model.to(device)
torch_device = torch.device(device)
return model.to(torch_device)


class ModelABC(ABC, torch.nn.Module):
Expand All @@ -72,7 +70,9 @@ def __init__(self: ModelABC) -> None:

@abstractmethod
# This is generic abc, else pylint will complain
def forward(self: ModelABC, *args: tuple[Any, ...], **kwargs: dict) -> None:
def forward(
self: ModelABC, *args: tuple[Any, ...], **kwargs: dict
) -> None | torch.Tensor:
"""Torch method, this contains logic for using layers defined in init."""
... # pragma: no cover

Expand Down Expand Up @@ -175,27 +175,38 @@ def postproc_func(self: ModelABC, func: Callable) -> None:
else:
self._postproc = func

def to(self: ModelABC, device: str = "cpu") -> torch.nn.Module:
def to( # type: ignore[override]
self: ModelABC,
device: str = "cpu",
dtype: torch.dtype | None = None,
*,
non_blocking: bool = False,
) -> ModelABC | torch.nn.DataParallel[ModelABC]:
"""Transfers model to cpu/gpu.

Args:
model (torch.nn.Module):
PyTorch defined model.
device (str):
Transfers model to the specified device. Default is "cpu".
dtype (:class:`torch.dtype`): the desired floating point or complex dtype of
the parameters and buffers in this module.
non_blocking (bool): When set, it tries to convert/move asynchronously
with respect to the host if possible, e.g., moving CPU Tensors with
pinned memory to CUDA devices.

Returns:
torch.nn.Module:
torch.nn.Module | torch.nn.DataParallel:
The model after being moved to cpu/gpu.

"""
device = torch_device(device)
model = super().to(device)
torch_device = torch.device(device)
model = super().to(torch_device, dtype=dtype, non_blocking=non_blocking)

# If target device istorch.cuda and more
# than one GPU is available, use DataParallel
if device.type == "cuda" and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model) # pragma: no cover
if torch_device.type == "cuda" and torch.cuda.device_count() > 1:
return torch.nn.DataParallel(model) # pragma: no cover

return model

Expand Down