diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml index 1e5e48f28..d3cfe4601 100644 --- a/.github/workflows/docker-publish.yml +++ b/.github/workflows/docker-publish.yml @@ -15,10 +15,6 @@ jobs: fail-fast: true matrix: include: - - dockerfile: ./docker/3.9/Debian/Dockerfile - mtag: py3.9-debian - - dockerfile: ./docker/3.9/Ubuntu/Dockerfile - mtag: py3.9-ubuntu - dockerfile: ./docker/3.10/Debian/Dockerfile mtag: py3.10-debian - dockerfile: ./docker/3.10/Ubuntu/Dockerfile @@ -31,7 +27,11 @@ jobs: mtag: py3.12-debian - dockerfile: ./docker/3.12/Ubuntu/Dockerfile mtag: py3.12-ubuntu - - dockerfile: ./docker/3.12/Ubuntu/Dockerfile + - dockerfile: ./docker/3.13/Ubuntu/Dockerfile + mtag: py3.13-debian + - dockerfile: ./docker/3.13/Ubuntu/Dockerfile + mtag: py3.13-ubuntu + - dockerfile: ./docker/3.13/Ubuntu/Dockerfile mtag: latest permissions: contents: read diff --git a/.github/workflows/mypy-type-check.yml b/.github/workflows/mypy-type-check.yml index 987db45ad..7ec585482 100644 --- a/.github/workflows/mypy-type-check.yml +++ b/.github/workflows/mypy-type-check.yml @@ -16,7 +16,7 @@ jobs: strategy: matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] + python-version: ["3.10", "3.11", "3.12", "3.13"] steps: diff --git a/.github/workflows/pip-install.yml b/.github/workflows/pip-install.yml index b543cdfee..9b0e9bcd6 100644 --- a/.github/workflows/pip-install.yml +++ b/.github/workflows/pip-install.yml @@ -14,7 +14,7 @@ jobs: strategy: fail-fast: true matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] + python-version: ["3.10", "3.11", "3.12", "3.13"] os: [ubuntu-24.04, windows-latest, macos-latest] steps: - name: Set up Python ${{ matrix.python-version }} diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 8b96f5bc0..465b0c3b8 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -17,7 +17,7 @@ jobs: strategy: fail-fast: true matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] + python-version: ["3.10", "3.11", "3.12", "3.13"] steps: - uses: actions/checkout@v3 diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 5b2d435b8..eaaf6f3d7 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -98,7 +98,7 @@ Before you submit a pull request, check that it meets these guidelines: 1. The pull request should include tests. 2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring, and add the feature to the pull request description. -3. The pull request should work for Python 3.9, 3.10, 3.11, and 3.12, and for PyPy. Check https://github.com/TissueImageAnalytics/tiatoolbox/actions/workflows/python-package.yml and make sure that the tests pass for all supported Python versions. +3. The pull request should work for Python 3.10, 3.11, 3.12 and 3.13, and for PyPy. Check https://github.com/TissueImageAnalytics/tiatoolbox/actions/workflows/python-package.yml and make sure that the tests pass for all supported Python versions. Tips ---- diff --git a/README.md b/README.md index 1e517db0d..0745e936f 100644 --- a/README.md +++ b/README.md @@ -106,7 +106,7 @@ Prepare a computer as a convenient platform for further development of the Pytho 5. Create virtual environment for TIAToolbox using ```sh - $ conda create -n tiatoolbox-dev python=3.9 # select version of your choice + $ conda create -n tiatoolbox-dev python=3.10 # select version of your choice $ conda activate tiatoolbox-dev $ pip install -r requirements/requirements_dev.txt ``` diff --git a/benchmarks/annotation_store.ipynb b/benchmarks/annotation_store.ipynb index 6cf6cf8e6..4646a04e9 100644 --- a/benchmarks/annotation_store.ipynb +++ b/benchmarks/annotation_store.ipynb @@ -355,7 +355,7 @@ " capsize=capsize,\n", " **kwargs,\n", " )\n", - " for i, (runs, c) in enumerate(zip(experiments, color)):\n", + " for i, (runs, c) in enumerate(zip(experiments, color, strict=False)):\n", " plt.text(\n", " i,\n", " min(runs),\n", @@ -2418,7 +2418,7 @@ " )\n", " total = np.sum(counts)\n", " frequencies = dict.fromkeys(range(256), 0)\n", - " for v, x in zip(values, counts):\n", + " for v, x in zip(values, counts, strict=False):\n", " frequencies[v] = x / total\n", " frequency_array = np.array(list(frequencies.values()))\n", " epsilon = 1e-16\n", diff --git a/docs/installation.rst b/docs/installation.rst index 808517739..f6e39fc16 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -55,7 +55,7 @@ MacPorts Installing Stable Release ========================= -Please note that TIAToolbox is tested for Python versions 3.9, 3.10, 3.11, and 3.12. +Please note that TIAToolbox is tested for Python versions 3.10, 3.11, 3.12 and 3.13. Recommended ----------- diff --git a/examples/full-pipelines/slide-graph.ipynb b/examples/full-pipelines/slide-graph.ipynb index 9ded87d9e..965d77eec 100644 --- a/examples/full-pipelines/slide-graph.ipynb +++ b/examples/full-pipelines/slide-graph.ipynb @@ -133,7 +133,7 @@ "import warnings\n", "from collections import OrderedDict\n", "from pathlib import Path\n", - "from typing import TYPE_CHECKING, Callable\n", + "from typing import TYPE_CHECKING\n", "\n", "# Third party imports\n", "import joblib\n", @@ -192,7 +192,7 @@ ")\n", "\n", "if TYPE_CHECKING: # pragma: no cover\n", - " from collections.abc import Iterator\n", + " from collections.abc import Callable, Iterator\n", "\n", "warnings.filterwarnings(\"ignore\")\n", "mpl.rcParams[\"figure.dpi\"] = 300 # for high resolution figure in notebook" @@ -394,7 +394,7 @@ "patient_uids = patient_uids[sel]\n", "patient_labels = patient_labels_[sel]\n", "assert len(patient_uids) == len(patient_labels) # noqa: S101\n", - "clinical_info = OrderedDict(list(zip(patient_uids, patient_labels)))\n", + "clinical_info = OrderedDict(list(zip(patient_uids, patient_labels, strict=False)))\n", "\n", "# Retrieve patient code of each WSI, this is based on TCGA barcodes:\n", "# https://docs.gdc.cancer.gov/Encyclopedia/pages/TCGA_Barcode/\n", @@ -412,7 +412,7 @@ "wsi_names = np.array(wsi_names)[sel]\n", "wsi_labels = np.array(wsi_labels)[sel]\n", "\n", - "label_df = list(zip(wsi_names, wsi_labels))\n", + "label_df = list(zip(wsi_names, wsi_labels, strict=False))\n", "label_df = pd.DataFrame(label_df, columns=[\"WSI-CODE\", \"LABEL\"])" ] }, @@ -529,9 +529,9 @@ "\n", " splits.append(\n", " {\n", - " \"train\": list(zip(train_x, train_y)),\n", - " \"valid\": list(zip(valid_x, valid_y)),\n", - " \"test\": list(zip(test_x, test_y)),\n", + " \"train\": list(zip(train_x, train_y, strict=False)),\n", + " \"valid\": list(zip(valid_x, valid_y, strict=False)),\n", + " \"test\": list(zip(test_x, test_y, strict=False)),\n", " },\n", " )\n", " return splits" @@ -2025,7 +2025,7 @@ " output = [np.split(v, batch_size, axis=0) for v in output]\n", " # pairing such that it will be\n", " # N batch size x H head list\n", - " output = list(zip(*output))\n", + " output = list(zip(*output, strict=False))\n", " step_output.extend(output)\n", " pbar.update()\n", " pbar.close()\n", @@ -2042,7 +2042,7 @@ " ):\n", " # Expand the list of N dataset size x H heads\n", " # back to a list of H Head each with N samples.\n", - " output = list(zip(*step_output))\n", + " output = list(zip(*step_output, strict=False))\n", " logit, true = output\n", " logit = np.squeeze(np.array(logit))\n", " true = np.squeeze(np.array(true))\n", diff --git a/examples/inference-pipelines/slide-graph.ipynb b/examples/inference-pipelines/slide-graph.ipynb index 4d2c62de3..e102799d5 100644 --- a/examples/inference-pipelines/slide-graph.ipynb +++ b/examples/inference-pipelines/slide-graph.ipynb @@ -219,7 +219,7 @@ "import shutil\n", "import warnings\n", "from pathlib import Path\n", - "from typing import TYPE_CHECKING, Callable\n", + "from typing import TYPE_CHECKING\n", "\n", "# Third party imports\n", "import joblib\n", @@ -260,6 +260,8 @@ "from tiatoolbox.wsicore.wsireader import WSIReader\n", "\n", "if TYPE_CHECKING:\n", + " from collections.abc import Callable\n", + "\n", " from tiatoolbox.wsicore.wsimeta import Resolution, Units\n", "\n", "warnings.filterwarnings(\"ignore\")\n", diff --git a/pre-commit/notebook_markdown_format.py b/pre-commit/notebook_markdown_format.py index 991241b4a..4195fb36f 100644 --- a/pre-commit/notebook_markdown_format.py +++ b/pre-commit/notebook_markdown_format.py @@ -57,6 +57,7 @@ def main(files: list[Path]) -> None: for cell, formatted_cell in zip( notebook["cells"], formatted_notebook["cells"], + strict=False, ) ) if not changed: diff --git a/pre-commit/requirements_consistency.py b/pre-commit/requirements_consistency.py index 4f8d4f442..ed57f1f9e 100644 --- a/pre-commit/requirements_consistency.py +++ b/pre-commit/requirements_consistency.py @@ -220,7 +220,7 @@ def in_common_consistent(all_requirements: dict[Path, dict[str, Requirement]]) - ] # Unzip the specs to get a list of constraints and versions - _, constraints, versions = zip(*zipped_file_specs) + _, constraints, versions = zip(*zipped_file_specs, strict=False) # Check that the constraints and versions are the same across files formatted_reqs = [f"{c}{v} ({p.name})" for p, c, v in zipped_file_specs] diff --git a/pyproject.toml b/pyproject.toml index 2d29ca7c8..aec86265d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -136,8 +136,8 @@ line-length = 88 # Allow unused variables when underscore-prefixed. lint.dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" -# Minimum Python version 3.9. -target-version = "py39" +# Minimum Python version 3.10. +target-version = "py310" [tool.ruff.lint.mccabe] # Unlike Flake8, default to a complexity level of 10. @@ -174,4 +174,4 @@ skip-magic-trailing-comma = false [tool.mypy] ignore_missing_imports = true -python_version = 3.9 +python_version = "3.10" diff --git a/requirements/requirements.txt b/requirements/requirements.txt index fd85350ef..6fce32280 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -1,5 +1,5 @@ # torch installation ---extra-index-url https://download.pytorch.org/whl/cu118; sys_platform != "darwin" +--extra-index-url https://download.pytorch.org/whl/cu126; sys_platform != "darwin" aiohttp>=3.8.1 albumentations>=1.3.0 bokeh>=3.1.1, <3.6.0 @@ -8,13 +8,13 @@ defusedxml>=0.7.1 filelock>=3.9.0 flask>=2.2.2 flask-cors>=4.0.0 -glymur>=0.12.7, < 0.14 # 0.14 is not compatible with python3.9 +glymur>=0.12.7 imagecodecs>=2022.9.26 joblib>=1.1.1 jupyterlab>=3.5.2 matplotlib>=3.6.2 numba>=0.57.0 -numpy>=1.23.5, <2.0.0 +numpy>=2.0.0 opencv-python>=4.6.0 openslide-bin>=4.0.0.2 openslide-python>=1.4.0 diff --git a/setup.py b/setup.py index 31d30e5e9..cbc17fda4 100644 --- a/setup.py +++ b/setup.py @@ -34,16 +34,16 @@ setup( author="TIA Centre", author_email="tia@dcs.warwick.ac.uk", - python_requires=">=3.9, <3.13", + python_requires=">=3.10, <3.14", classifiers=[ "Development Status :: 2 - Pre-Alpha", "Intended Audience :: Developers", "Natural Language :: English", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", ], description="Computational pathology toolbox developed by TIA Centre.", dependency_links=dependency_links, diff --git a/tests/conftest.py b/tests/conftest.py index 2b7de0fd6..092913ea9 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -6,7 +6,7 @@ import shutil import time from pathlib import Path -from typing import Callable +from typing import TYPE_CHECKING import pytest import torch @@ -16,6 +16,9 @@ from tiatoolbox.data import _fetch_remote_sample from tiatoolbox.utils.env_detection import has_gpu, running_on_ci +if TYPE_CHECKING: + from collections.abc import Callable + # ------------------------------------------------------------------------------------- # Generate Parameterized Tests # ------------------------------------------------------------------------------------- diff --git a/tests/models/test_arch_mapde.py b/tests/models/test_arch_mapde.py index 4ec404826..c4f40be9e 100644 --- a/tests/models/test_arch_mapde.py +++ b/tests/models/test_arch_mapde.py @@ -1,6 +1,6 @@ """Unit test package for SCCNN.""" -from typing import Callable +from collections.abc import Callable import numpy as np import torch diff --git a/tests/models/test_arch_micronet.py b/tests/models/test_arch_micronet.py index e7aa23d5b..83cc1b597 100644 --- a/tests/models/test_arch_micronet.py +++ b/tests/models/test_arch_micronet.py @@ -1,7 +1,7 @@ """Unit test package for MicroNet.""" +from collections.abc import Callable from pathlib import Path -from typing import Callable import numpy as np import pytest diff --git a/tests/models/test_arch_nuclick.py b/tests/models/test_arch_nuclick.py index b84516125..84b102134 100644 --- a/tests/models/test_arch_nuclick.py +++ b/tests/models/test_arch_nuclick.py @@ -1,7 +1,7 @@ """Unit test package for NuClick.""" +from collections.abc import Callable from pathlib import Path -from typing import Callable import numpy as np import pytest diff --git a/tests/models/test_arch_sccnn.py b/tests/models/test_arch_sccnn.py index 16c99cc49..2629304af 100644 --- a/tests/models/test_arch_sccnn.py +++ b/tests/models/test_arch_sccnn.py @@ -1,6 +1,6 @@ """Unit test package for SCCNN.""" -from typing import Callable +from collections.abc import Callable import numpy as np import torch diff --git a/tests/models/test_arch_unet.py b/tests/models/test_arch_unet.py index 2ac231c7c..63f20c89d 100644 --- a/tests/models/test_arch_unet.py +++ b/tests/models/test_arch_unet.py @@ -1,7 +1,7 @@ """Unit test package for Unet.""" +from collections.abc import Callable from pathlib import Path -from typing import Callable import numpy as np import pytest diff --git a/tests/models/test_feature_extractor.py b/tests/models/test_feature_extractor.py index 9ceb549be..b9c5799f9 100644 --- a/tests/models/test_feature_extractor.py +++ b/tests/models/test_feature_extractor.py @@ -1,8 +1,8 @@ """Test for feature extractor.""" import shutil +from collections.abc import Callable from pathlib import Path -from typing import Callable import numpy as np import pytest diff --git a/tests/models/test_hovernet.py b/tests/models/test_hovernet.py index 2567018b8..34ddab2c2 100644 --- a/tests/models/test_hovernet.py +++ b/tests/models/test_hovernet.py @@ -1,6 +1,6 @@ """Unit test package for HoVerNet.""" -from typing import Callable +from collections.abc import Callable import numpy as np import pytest diff --git a/tests/models/test_hovernetplus.py b/tests/models/test_hovernetplus.py index 1377fdd82..f336ef14f 100644 --- a/tests/models/test_hovernetplus.py +++ b/tests/models/test_hovernetplus.py @@ -1,6 +1,6 @@ """Unit test package for HoVerNet+.""" -from typing import Callable +from collections.abc import Callable import torch diff --git a/tests/models/test_multi_task_segmentor.py b/tests/models/test_multi_task_segmentor.py index 8b234ac55..3cec30121 100644 --- a/tests/models/test_multi_task_segmentor.py +++ b/tests/models/test_multi_task_segmentor.py @@ -6,8 +6,8 @@ import gc import multiprocessing import shutil +from collections.abc import Callable from pathlib import Path -from typing import Callable import joblib import numpy as np diff --git a/tests/models/test_nucleus_instance_segmentor.py b/tests/models/test_nucleus_instance_segmentor.py index 2956849fb..f657a6347 100644 --- a/tests/models/test_nucleus_instance_segmentor.py +++ b/tests/models/test_nucleus_instance_segmentor.py @@ -5,8 +5,8 @@ # ! The garbage collector import gc import shutil +from collections.abc import Callable from pathlib import Path -from typing import Callable import joblib import numpy as np diff --git a/tests/models/test_patch_predictor.py b/tests/models/test_patch_predictor.py index f7da54b25..c878d64ff 100644 --- a/tests/models/test_patch_predictor.py +++ b/tests/models/test_patch_predictor.py @@ -5,7 +5,7 @@ import copy import shutil from pathlib import Path -from typing import Callable +from typing import TYPE_CHECKING import cv2 import numpy as np @@ -28,6 +28,9 @@ from tiatoolbox.utils.misc import select_device from tiatoolbox.wsicore.wsireader import WSIReader +if TYPE_CHECKING: + from collections.abc import Callable + ON_GPU = toolbox_env.has_gpu() RNG = np.random.default_rng() # Numpy Random Generator diff --git a/tests/models/test_semantic_segmentation.py b/tests/models/test_semantic_segmentation.py index 01776b800..4a6a90800 100644 --- a/tests/models/test_semantic_segmentation.py +++ b/tests/models/test_semantic_segmentation.py @@ -9,7 +9,7 @@ import multiprocessing import shutil from pathlib import Path -from typing import Callable +from typing import TYPE_CHECKING import numpy as np import pytest @@ -35,6 +35,9 @@ from tiatoolbox.utils.misc import select_device from tiatoolbox.wsicore.wsireader import WSIReader +if TYPE_CHECKING: + from collections.abc import Callable + ON_GPU = toolbox_env.has_gpu() # The value is based on 2 TitanXP each with 12GB BATCH_SIZE = 1 if not ON_GPU else 16 diff --git a/tests/test_annotation_stores.py b/tests/test_annotation_stores.py index 8d0d651c0..3c0189286 100644 --- a/tests/test_annotation_stores.py +++ b/tests/test_annotation_stores.py @@ -6,11 +6,11 @@ import pickle import sqlite3 import sys -from collections.abc import Generator +from collections.abc import Callable, Generator from itertools import repeat, zip_longest from pathlib import Path from timeit import timeit -from typing import TYPE_CHECKING, Callable, ClassVar +from typing import TYPE_CHECKING, ClassVar import numpy as np import pandas as pd diff --git a/tests/test_annotation_tilerendering.py b/tests/test_annotation_tilerendering.py index abfdb495a..4e373d16f 100644 --- a/tests/test_annotation_tilerendering.py +++ b/tests/test_annotation_tilerendering.py @@ -7,7 +7,7 @@ from __future__ import annotations from pathlib import Path -from typing import Callable +from typing import TYPE_CHECKING import matplotlib.pyplot as plt import numpy as np @@ -26,6 +26,9 @@ from tiatoolbox.utils.visualization import AnnotationRenderer, _find_minimum_mpp_sf from tiatoolbox.wsicore import wsireader +if TYPE_CHECKING: + from collections.abc import Callable + RNG = np.random.default_rng(0) # Numpy Random Generator diff --git a/tests/test_dsl.py b/tests/test_dsl.py index 53dab3630..66c49717e 100644 --- a/tests/test_dsl.py +++ b/tests/test_dsl.py @@ -5,7 +5,7 @@ import json import sqlite3 from numbers import Number -from typing import Callable, ClassVar +from typing import TYPE_CHECKING, ClassVar import pytest @@ -19,6 +19,9 @@ py_regexp, ) +if TYPE_CHECKING: + from collections.abc import Callable + BINARY_OP_STRINGS = [ "+", "-", diff --git a/tests/test_magic.py b/tests/test_magic.py index c5ecfe0c9..6bf337a9b 100644 --- a/tests/test_magic.py +++ b/tests/test_magic.py @@ -2,9 +2,10 @@ import sqlite3 import zipfile +from collections.abc import Callable from io import BytesIO from pathlib import Path -from typing import BinaryIO, Callable +from typing import BinaryIO import pytest diff --git a/tests/test_tiffreader.py b/tests/test_tiffreader.py index 9fd38da4e..cc956254a 100644 --- a/tests/test_tiffreader.py +++ b/tests/test_tiffreader.py @@ -1,6 +1,6 @@ """Test TIFFWSIReader.""" -from typing import Callable +from collections.abc import Callable import pytest from defusedxml import ElementTree diff --git a/tests/test_tileserver.py b/tests/test_tileserver.py index c22f31281..7c8bd2395 100644 --- a/tests/test_tileserver.py +++ b/tests/test_tileserver.py @@ -6,7 +6,7 @@ import logging import urllib from pathlib import Path, PureWindowsPath -from typing import TYPE_CHECKING, Callable, NoReturn +from typing import TYPE_CHECKING, NoReturn import joblib import numpy as np @@ -24,6 +24,8 @@ from tiatoolbox.wsicore import WSIReader if TYPE_CHECKING: + from collections.abc import Callable + from flask.testing import FlaskClient RNG = np.random.default_rng(0) # Numpy Random Generator diff --git a/tests/test_type_hints.py b/tests/test_type_hints.py new file mode 100644 index 000000000..2d8317551 --- /dev/null +++ b/tests/test_type_hints.py @@ -0,0 +1,60 @@ +"""Tests for tiatoolbox.type_hints module.""" + +from collections.abc import Callable +from typing import Literal + +import pytest + +from tiatoolbox import type_hints + + +def test_aliases_exist() -> None: + """Ensure all expected type aliases are defined in type_hints.""" + expected_aliases = [ + "JSON", + "NumPair", + "IntPair", + "Resolution", + "Units", + "Bounds", + "IntBounds", + "Geometry", + "Properties", + "QueryGeometry", + "CallablePredicate", + "CallableSelect", + "Predicate", + "Select", + "NumpyPadLiteral", + ] + for alias in expected_aliases: + assert hasattr(type_hints, alias), f"Missing alias: {alias}" + + +def test_units_is_literal() -> None: + """Check that Units alias is a Literal type.""" + assert isinstance(type_hints.Units, type(Literal["mpp"])) + + +def test_callable_predicate_signature() -> None: + """Verify CallablePredicate expects Properties and returns bool.""" + alias = type_hints.CallablePredicate + # Check that it's a typing Callable + assert getattr(alias, "__origin__", None) is Callable + # Check argument and return types + args = alias.__args__ + assert len(args) == 2 + assert args[1] is bool + + +@pytest.mark.parametrize("alias", ["Bounds", "IntBounds"]) +def test_bounds_alias_is_tuple(alias: str) -> None: + """Check that Bounds and IntBounds are tuple type hints.""" + assert "tuple" in str(getattr(type_hints, alias)) + + +def test_numpy_pad_literal_contains_expected_values() -> None: + """Ensure NumpyPadLiteral includes common numpy pad modes.""" + modes = ["constant", "reflect", "wrap"] + for mode in modes: + assert mode in type_hints.NumpyPadLiteral.__args__ diff --git a/tests/test_utils.py b/tests/test_utils.py index 4bfefa81c..0a96e2884 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -803,7 +803,7 @@ def test_fuzz_bounds2locsize() -> None: for _ in range(1000): size = (rng.integers(-1000, 1000), rng.integers(-1000, 1000)) location = (rng.integers(-1000, 1000), rng.integers(-1000, 1000)) - bounds = (*location, *(sum(x) for x in zip(size, location))) + bounds = (*location, *(sum(x) for x in zip(size, location, strict=False))) assert utils.transforms.bounds2locsize(bounds)[1] == pytest.approx(size) @@ -1137,7 +1137,7 @@ def test_parse_cv2_interpolaton() -> None: cases = [str.upper, str.lower, str.capitalize] mode_strings = ["cubic", "linear", "area", "lanczos"] mode_enums = [cv2.INTER_CUBIC, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_LANCZOS4] - for string, cv2_enum in zip(mode_strings, mode_enums): + for string, cv2_enum in zip(mode_strings, mode_enums, strict=False): for case in cases: assert utils.misc.parse_cv2_interpolaton(case(string)) == cv2_enum assert utils.misc.parse_cv2_interpolaton(cv2_enum) == cv2_enum diff --git a/tests/test_wsireader.py b/tests/test_wsireader.py index c8c69c84e..315d92749 100644 --- a/tests/test_wsireader.py +++ b/tests/test_wsireader.py @@ -3,13 +3,14 @@ from __future__ import annotations import copy +import itertools import json import logging import re import shutil from copy import deepcopy from pathlib import Path -from typing import TYPE_CHECKING, Callable +from typing import TYPE_CHECKING from unittest.mock import patch import cv2 @@ -50,7 +51,7 @@ ) if TYPE_CHECKING: # pragma: no cover - from collections.abc import Iterable + from collections.abc import Callable, Iterable import requests from openslide import OpenSlide @@ -135,7 +136,7 @@ def strictly_increasing(sequence: Iterable) -> bool: bool: True if strictly increasing. """ - return all(a < b for a, b in zip(sequence, sequence[1:])) + return all(a < b for a, b in itertools.pairwise(sequence)) def strictly_decreasing(sequence: Iterable) -> bool: @@ -149,7 +150,7 @@ def strictly_decreasing(sequence: Iterable) -> bool: bool: True if strictly decreasing. """ - return all(a > b for a, b in zip(sequence, sequence[1:])) + return all(a > b for a, b in itertools.pairwise(sequence)) def read_rect_objective_power(wsi: WSIReader, location: IntPair, size: IntPair) -> None: @@ -557,6 +558,7 @@ def test_find_optimal_level_and_downsample_mpp(sample_ndpi: Path) -> None: mpps, expected_levels, expected_scales, + strict=False, ): read_level, post_read_scale_factor = wsi._find_optimal_level_and_downsample( mpp, @@ -573,7 +575,9 @@ def test_find_optimal_level_and_downsample_power(sample_ndpi: Path) -> None: objective_powers = [20, 10, 5, 2.5, 1.25] expected_levels = [0, 1, 2, 3, 4] - for objective_power, expected_level in zip(objective_powers, expected_levels): + for objective_power, expected_level in zip( + objective_powers, expected_levels, strict=False + ): read_level, post_read_scale_factor = wsi._find_optimal_level_and_downsample( objective_power, "power", @@ -1498,7 +1502,7 @@ def test_tissue_mask_morphological(sample_svs: Path) -> None: resolutions = [5, 10] units = ["power", "mpp"] scale_fns = [lambda x: x * 2, lambda x: 32 / x] - for unit, scaler in zip(units, scale_fns): + for unit, scaler in zip(units, scale_fns, strict=False): for resolution in resolutions: mask = wsi.tissue_mask( method="morphological", diff --git a/tiatoolbox/annotation/dsl.py b/tiatoolbox/annotation/dsl.py index cbf9bf7d5..7caf9cba3 100644 --- a/tiatoolbox/annotation/dsl.py +++ b/tiatoolbox/annotation/dsl.py @@ -61,9 +61,9 @@ import json import operator import re +from collections.abc import Callable from dataclasses import dataclass from numbers import Number -from typing import Callable from typing_extensions import TypedDict diff --git a/tiatoolbox/annotation/storage.py b/tiatoolbox/annotation/storage.py index ae65ccede..012ea23d2 100644 --- a/tiatoolbox/annotation/storage.py +++ b/tiatoolbox/annotation/storage.py @@ -41,6 +41,7 @@ from abc import ABC, abstractmethod from collections import defaultdict from collections.abc import ( + Callable, Generator, ItemsView, Iterable, @@ -56,7 +57,6 @@ IO, TYPE_CHECKING, Any, - Callable, ClassVar, TypeVar, cast, @@ -746,7 +746,7 @@ def append_many( if keys: result.extend( self.append(annotation, key) - for key, annotation in zip(keys, annotations) + for key, annotation in zip(keys, annotations, strict=False) ) return result result.extend(self.append(annotation) for annotation in annotations) @@ -816,7 +816,9 @@ def patch_many( properties_iter = properties_iter or ({} for _ in keys) # pragma: no branch geometries = geometries or (None for _ in keys) # pragma: no branch # Update the store - for key, geometry, properties in zip(keys, geometries, properties_iter): + for key, geometry, properties in zip( + keys, geometries, properties_iter, strict=False + ): properties_ = cast("dict[str, Any]", copy.deepcopy(properties)) self.patch(key, geometry, properties_) @@ -2722,7 +2724,7 @@ def append_many( if self.auto_commit: cur.execute("BEGIN") result = [] - for annotation, key in zip(annotations, keys): + for annotation, key in zip(annotations, keys, strict=False): self._append(key, annotation, cur) result.append(key) if self.auto_commit: @@ -3640,7 +3642,9 @@ def patch_many( # Begin a transaction if self.auto_commit: cur.execute("BEGIN") - for key, geometry, properties in zip(keys, geometries, properties_iter): + for key, geometry, properties in zip( + keys, geometries, properties_iter, strict=False + ): # Annotation is not in DB: if key not in self: self._append(str(key), Annotation(geometry, properties), cur) @@ -3680,8 +3684,10 @@ def _patch_geometry( cur (sqlite3.Cursor): The cursor to use. """ - bounds = dict(zip(("min_x", "min_y", "max_x", "max_y"), geometry.bounds)) - xy = dict(zip("xy", np.array(geometry.centroid.coords[0]))) + bounds = dict( + zip(("min_x", "min_y", "max_x", "max_y"), geometry.bounds, strict=False) + ) + xy = dict(zip("xy", np.array(geometry.centroid.coords[0]), strict=False)) query_parameters = dict( **bounds, **xy, diff --git a/tiatoolbox/cli/common.py b/tiatoolbox/cli/common.py index c663170c9..88364fa45 100644 --- a/tiatoolbox/cli/common.py +++ b/tiatoolbox/cli/common.py @@ -3,11 +3,13 @@ from __future__ import annotations from pathlib import Path -from typing import TYPE_CHECKING, Any, Callable +from typing import TYPE_CHECKING, Any import click if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable + from tiatoolbox.models.models_abc import IOConfigABC diff --git a/tiatoolbox/cli/show_wsi.py b/tiatoolbox/cli/show_wsi.py index 6ac5856af..f3d61565f 100644 --- a/tiatoolbox/cli/show_wsi.py +++ b/tiatoolbox/cli/show_wsi.py @@ -49,7 +49,9 @@ def show_wsi( if len(name) == 0: app = TileServer("TileServer", list(img_input), renderer=renderer) elif len(name) == len(img_input): - app = TileServer("TileServer", dict(zip(name, img_input)), renderer=renderer) + app = TileServer( + "TileServer", dict(zip(name, img_input, strict=False)), renderer=renderer + ) else: msg = "if names are provided, must match the number of paths provided" raise ValueError(msg) diff --git a/tiatoolbox/models/architecture/hovernet.py b/tiatoolbox/models/architecture/hovernet.py index 9798df62a..c0be9ad47 100644 --- a/tiatoolbox/models/architecture/hovernet.py +++ b/tiatoolbox/models/architecture/hovernet.py @@ -694,7 +694,7 @@ def get_instance_info(pred_inst: np.ndarray, pred_type: np.ndarray = None) -> di inst_type = inst_type_crop[inst_map_crop] (type_list, type_pixels) = np.unique(inst_type, return_counts=True) - type_list = list(zip(type_list, type_pixels)) + type_list = list(zip(type_list, type_pixels, strict=False)) type_list = sorted(type_list, key=lambda x: x[1], reverse=True) inst_type = type_list[0][0] diff --git a/tiatoolbox/models/architecture/utils.py b/tiatoolbox/models/architecture/utils.py index bfd759b3a..8f8f2bb22 100644 --- a/tiatoolbox/models/architecture/utils.py +++ b/tiatoolbox/models/architecture/utils.py @@ -103,7 +103,7 @@ def compile_model( def centre_crop( img: np.ndarray | torch.Tensor, - crop_shape: np.ndarray | torch.Tensor | tuple, + crop_shape: np.ndarray | torch.Tensor | tuple[int, int], data_format: str = "NCHW", ) -> np.ndarray | torch.Tensor: """A function to center crop image with given crop shape. @@ -126,10 +126,10 @@ def centre_crop( msg = f"Unknown input format `{data_format}`." raise ValueError(msg) - crop_t = crop_shape[0] // 2 - crop_b = crop_shape[0] - crop_t - crop_l = crop_shape[1] // 2 - crop_r = crop_shape[1] - crop_l + crop_t: int = int(crop_shape[0] // 2) + crop_b: int = int(crop_shape[0] - crop_t) + crop_l: int = int(crop_shape[1] // 2) + crop_r: int = int(crop_shape[1] - crop_l) if data_format == "NCHW": return img[:, :, crop_t:-crop_b, crop_l:-crop_r] diff --git a/tiatoolbox/models/dataset/classification.py b/tiatoolbox/models/dataset/classification.py index b73d2c9f3..359d8c52a 100644 --- a/tiatoolbox/models/dataset/classification.py +++ b/tiatoolbox/models/dataset/classification.py @@ -3,7 +3,7 @@ from __future__ import annotations from pathlib import Path -from typing import TYPE_CHECKING, Callable +from typing import TYPE_CHECKING import cv2 import numpy as np @@ -17,6 +17,8 @@ from tiatoolbox.wsicore.wsireader import VirtualWSIReader, WSIReader if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable + import torch from PIL.Image import Image diff --git a/tiatoolbox/models/dataset/dataset_abc.py b/tiatoolbox/models/dataset/dataset_abc.py index b60ecd66e..7d7160e48 100644 --- a/tiatoolbox/models/dataset/dataset_abc.py +++ b/tiatoolbox/models/dataset/dataset_abc.py @@ -4,15 +4,15 @@ from abc import ABC, abstractmethod from pathlib import Path -from typing import TYPE_CHECKING, Callable, Union +from typing import TYPE_CHECKING if TYPE_CHECKING: # pragma: no cover - from collections.abc import Iterable + from collections.abc import Callable, Iterable try: from typing import TypeGuard except ImportError: - from typing_extensions import TypeGuard # to support python <3.10 + from typing import TypeGuard # to support python <3.10 import numpy as np @@ -20,7 +20,7 @@ from tiatoolbox.utils import imread -input_type = Union[list[Union[str, Path, np.ndarray]], np.ndarray] +input_type = list[str | Path | np.ndarray] | np.ndarray class PatchDatasetABC(ABC, torch.utils.data.Dataset): diff --git a/tiatoolbox/models/dataset/info.py b/tiatoolbox/models/dataset/info.py index 7d6bd8f28..b95a069b5 100644 --- a/tiatoolbox/models/dataset/info.py +++ b/tiatoolbox/models/dataset/info.py @@ -119,7 +119,7 @@ def __init__( paths.sort() all_paths.extend(paths) uid_name_map[label_id] = label_name - inputs, labels = list(zip(*all_paths)) + inputs, labels = list(zip(*all_paths, strict=False)) self.label_names = uid_name_map self.inputs = list(inputs) # type casting to list diff --git a/tiatoolbox/models/engine/multi_task_segmentor.py b/tiatoolbox/models/engine/multi_task_segmentor.py index 2d3df757f..55fd1a2d8 100644 --- a/tiatoolbox/models/engine/multi_task_segmentor.py +++ b/tiatoolbox/models/engine/multi_task_segmentor.py @@ -23,7 +23,7 @@ from __future__ import annotations import shutil -from typing import TYPE_CHECKING, Callable +from typing import TYPE_CHECKING # replace with the sql database once the PR in place import joblib @@ -41,6 +41,8 @@ ) if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable + import torch from tiatoolbox.type_hints import IntBounds @@ -115,7 +117,7 @@ def _process_tile_predictions( # skipcq: PY-R1000 (top_left_x, top_left_y, bottom_x, bottom_y). """ - locations, predictions = list(zip(*tile_output)) + locations, predictions = list(zip(*tile_output, strict=False)) # convert from WSI space to tile space tile_tl = tile_bounds[:2] @@ -344,11 +346,12 @@ def _predict_one_wsi( indices_sem = [i for i, x in enumerate(self.output_types) if x == "semantic"] for s_id in range(len(indices_sem)): + shape = tuple(map(int, np.fliplr([wsi_proc_shape])[0])) self.wsi_layers.append( np.lib.format.open_memmap( f"{cache_dir}/{s_id}.npy", mode="w+", - shape=tuple(np.fliplr([wsi_proc_shape])[0]), + shape=shape, dtype=np.uint8, ), ) diff --git a/tiatoolbox/models/engine/nucleus_instance_segmentor.py b/tiatoolbox/models/engine/nucleus_instance_segmentor.py index 6649324b1..18d795a34 100644 --- a/tiatoolbox/models/engine/nucleus_instance_segmentor.py +++ b/tiatoolbox/models/engine/nucleus_instance_segmentor.py @@ -4,7 +4,7 @@ import uuid from collections import deque -from typing import Callable +from typing import TYPE_CHECKING # replace with the sql database once the PR in place import joblib @@ -21,6 +21,9 @@ ) from tiatoolbox.tools.patchextraction import PatchExtractor +if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable + def _process_instance_predictions( inst_dict: dict, @@ -257,7 +260,7 @@ def _process_tile_predictions( the tiling process. """ - locations, predictions = list(zip(*tile_output)) + locations, predictions = list(zip(*tile_output, strict=False)) # convert from WSI space to tile space tile_tl = tile_bounds[:2] @@ -651,13 +654,13 @@ def _infer_once(self: NucleusInstanceSegmentor) -> list: # repackage so that it's a N list, each contains # L x etc. output sample_outputs = [np.split(v, batch_size, axis=0) for v in sample_outputs] - sample_outputs = list(zip(*sample_outputs)) + sample_outputs = list(zip(*sample_outputs, strict=False)) # tensor to numpy, costly? sample_infos = sample_infos.numpy() sample_infos = np.split(sample_infos, batch_size, axis=0) - sample_outputs = list(zip(sample_infos, sample_outputs)) + sample_outputs = list(zip(sample_infos, sample_outputs, strict=False)) cum_output.extend(sample_outputs) pbar.update() pbar.close() diff --git a/tiatoolbox/models/engine/patch_predictor.py b/tiatoolbox/models/engine/patch_predictor.py index 76d3d3bd6..820f04fe9 100644 --- a/tiatoolbox/models/engine/patch_predictor.py +++ b/tiatoolbox/models/engine/patch_predictor.py @@ -5,7 +5,7 @@ import copy from collections import OrderedDict from pathlib import Path -from typing import TYPE_CHECKING, Callable +from typing import TYPE_CHECKING import numpy as np import torch @@ -21,6 +21,8 @@ from tiatoolbox.wsicore.wsireader import VirtualWSIReader, WSIReader if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable + from tiatoolbox.type_hints import IntPair, Resolution, Units @@ -949,7 +951,7 @@ def predict( # noqa: PLR0913 ioconfig.input_resolutions, ioconfig.input_resolutions[0]["units"], ) - fx_list = zip(fx_list, ioconfig.input_resolutions) + fx_list = zip(fx_list, ioconfig.input_resolutions, strict=False) fx_list = sorted(fx_list, key=lambda x: x[0]) highest_input_resolution = fx_list[0][1] diff --git a/tiatoolbox/models/engine/semantic_segmentor.py b/tiatoolbox/models/engine/semantic_segmentor.py index f4b85e5c1..b222d0266 100644 --- a/tiatoolbox/models/engine/semantic_segmentor.py +++ b/tiatoolbox/models/engine/semantic_segmentor.py @@ -7,7 +7,7 @@ import shutil from concurrent.futures import ProcessPoolExecutor from pathlib import Path -from typing import TYPE_CHECKING, Callable +from typing import TYPE_CHECKING import cv2 import joblib @@ -30,6 +30,7 @@ from tiatoolbox.wsicore.wsireader import VirtualWSIReader, WSIMeta, WSIReader if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable from multiprocessing.managers import Namespace from tiatoolbox.type_hints import IntPair, Resolution, Units @@ -54,12 +55,12 @@ def _estimate_canvas_parameters( """ if len(sample_prediction.shape) == 3: # noqa: PLR2004 num_output_ch = sample_prediction.shape[-1] - canvas_cum_shape_ = (*tuple(canvas_shape), num_output_ch) - canvas_count_shape_ = (*tuple(canvas_shape), 1) + canvas_cum_shape_ = tuple(map(int, (*tuple(canvas_shape), num_output_ch))) + canvas_count_shape_ = tuple(map(int, (*tuple(canvas_shape), 1))) add_singleton_dim = num_output_ch == 1 else: - canvas_cum_shape_ = (*tuple(canvas_shape), 1) - canvas_count_shape_ = (*tuple(canvas_shape), 1) + canvas_cum_shape_ = tuple(map(int, (*tuple(canvas_shape), 1))) + canvas_count_shape_ = tuple(map(int, (*tuple(canvas_shape), 1))) add_singleton_dim = True return canvas_cum_shape_, canvas_count_shape_, add_singleton_dim @@ -827,13 +828,13 @@ def _predict_one_wsi( # repackage so that it's an N list, each contains # L x etc. output sample_outputs = [np.split(v, batch_size, axis=0) for v in sample_outputs] - sample_outputs = list(zip(*sample_outputs)) + sample_outputs = list(zip(*sample_outputs, strict=False)) # tensor to numpy, costly? sample_infos = sample_infos.numpy() sample_infos = np.split(sample_infos, batch_size, axis=0) - sample_outputs = list(zip(sample_infos, sample_outputs)) + sample_outputs = list(zip(sample_infos, sample_outputs, strict=False)) if self.process_prediction_per_batch: self._process_predictions( sample_outputs, @@ -891,7 +892,7 @@ def _process_predictions( return # assume predictions is N, each item has L output element - locations, predictions = list(zip(*cum_batch_predictions)) + locations, predictions = list(zip(*cum_batch_predictions, strict=False)) # Nx4 (N x [tl_x, tl_y, br_x, br_y), denotes the location of # output patch this can exceed the image bound at the requested # resolution remove singleton due to split. @@ -1001,7 +1002,7 @@ def index(arr: np.ndarray, tl: np.ndarray, br: np.ndarray) -> np.ndarray: """Helper to shorten indexing.""" return arr[tl[0] : br[0], tl[1] : br[1]] - patch_infos = list(zip(locations, predictions)) + patch_infos = list(zip(locations, predictions, strict=False)) for _, patch_info in enumerate(patch_infos): # position is assumed to be in XY coordinate (bound_in_wsi, prediction) = patch_info @@ -1555,7 +1556,7 @@ def _process_predictions( """ # assume prediction_list is N, each item has L output elements - location_list, prediction_list = list(zip(*cum_batch_predictions)) + location_list, prediction_list = list(zip(*cum_batch_predictions, strict=False)) # Nx4 (N x [tl_x, tl_y, br_x, br_y), denotes the location of output # patch, this can exceed the image bound at the requested resolution # remove singleton due to split. diff --git a/tiatoolbox/models/models_abc.py b/tiatoolbox/models/models_abc.py index 0c82a9216..4e1f1d755 100644 --- a/tiatoolbox/models/models_abc.py +++ b/tiatoolbox/models/models_abc.py @@ -4,7 +4,7 @@ import os from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Any, Callable +from typing import TYPE_CHECKING, Any import torch import torch._dynamo @@ -16,6 +16,7 @@ torch._dynamo.config.suppress_errors = True # skipcq: PYL-W0212 # noqa: SLF001 if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable from pathlib import Path import numpy as np diff --git a/tiatoolbox/tools/graph.py b/tiatoolbox/tools/graph.py index 0cc036b51..203ebbfd8 100644 --- a/tiatoolbox/tools/graph.py +++ b/tiatoolbox/tools/graph.py @@ -4,7 +4,7 @@ from collections import defaultdict from numbers import Number -from typing import TYPE_CHECKING, Callable, cast +from typing import TYPE_CHECKING, cast import numpy as np import torch @@ -15,6 +15,8 @@ from scipy.spatial import Delaunay, cKDTree if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable + from matplotlib.axes import Axes from numpy.typing import ArrayLike diff --git a/tiatoolbox/tools/patchextraction.py b/tiatoolbox/tools/patchextraction.py index a6caf082c..d1b134e76 100644 --- a/tiatoolbox/tools/patchextraction.py +++ b/tiatoolbox/tools/patchextraction.py @@ -3,7 +3,7 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Callable, TypedDict, overload +from typing import TYPE_CHECKING, TypedDict, overload import numpy as np from typing_extensions import Unpack @@ -16,6 +16,7 @@ from tiatoolbox.wsicore import wsireader if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable from pathlib import Path from pandas import DataFrame @@ -418,7 +419,7 @@ def filter_coordinates( # Scaling the coordinates_list to the `tissue_mask` array resolution scale_factors = np.array(tissue_mask.shape[1::-1]) / np.array(wsi_shape) - scaled_coords = coordinates_list.copy().astype(np.float32) + scaled_coords: np.ndarray = coordinates_list.copy().astype(np.float32) scaled_coords[:, [0, 2]] *= scale_factors[0] scaled_coords[:, [0, 2]] = np.clip( scaled_coords[:, [0, 2]], diff --git a/tiatoolbox/tools/pyramid.py b/tiatoolbox/tools/pyramid.py index 3483a38cb..787142492 100644 --- a/tiatoolbox/tools/pyramid.py +++ b/tiatoolbox/tools/pyramid.py @@ -546,7 +546,9 @@ def __init__( # skipcq: PYL-W0231 types = self.store.pquery(f"props[{self.renderer.score_prop!r}]") # make a random dictionary colour map colors = random_colors(len(types), bright=True) - mapper = {key: (*color, 1) for key, color in zip(types, colors)} + mapper = { + key: (*color, 1) for key, color in zip(types, colors, strict=False) + } self.renderer.mapper = lambda x: mapper[x] def get_thumb_tile(self: AnnotationTileGenerator) -> Image.Image: diff --git a/tiatoolbox/tools/registration/wsi_registration.py b/tiatoolbox/tools/registration/wsi_registration.py index fdca5200a..ef9b28893 100644 --- a/tiatoolbox/tools/registration/wsi_registration.py +++ b/tiatoolbox/tools/registration/wsi_registration.py @@ -3,7 +3,7 @@ from __future__ import annotations import itertools -from typing import TYPE_CHECKING, Callable, cast +from typing import TYPE_CHECKING, cast import cv2 import numpy as np @@ -24,6 +24,8 @@ from tiatoolbox.wsicore.wsireader import VirtualWSIReader, WSIReader if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable + from tiatoolbox.type_hints import IntBounds, Resolution, Units RGB_IMAGE_DIM = 3 @@ -796,13 +798,13 @@ def find_points_inside_boundary(mask: np.ndarray, points: np.ndarray) -> np.ndar Indices of points enclosed by a boundary. """ - kernel = np.ones((25, 25), np.uint8) + kernel: np.ndarray = np.ones((25, 25), np.uint8) mask = cv2.dilate(mask, kernel, iterations=1) mask_reader = VirtualWSIReader(mask) # convert coordinates of shape [N, 2] to [N, 4] - end_x_y = points[:, 0:2] + 1 - bbox_coord = np.c_[points, end_x_y].astype(int) + end_x_y: np.ndarray = points[:, 0:2] + 1 + bbox_coord: np.ndarray = np.c_[points, end_x_y].astype(int) return PatchExtractor.filter_coordinates( mask_reader, bbox_coord, @@ -1382,11 +1384,14 @@ def estimate_bspline_transform( for size, spacing in zip( fixed_image_inv_sitk.GetSize(), fixed_image_inv_sitk.GetSpacing(), + strict=False, ) ] mesh_size = [ int(image_size / grid_spacing + 0.5) - for image_size, grid_spacing in zip(image_physical_size, grid_physical_spacing) + for image_size, grid_spacing in zip( + image_physical_size, grid_physical_spacing, strict=False + ) ] mesh_size = [int(sz / 4 + 0.5) for sz in mesh_size] diff --git a/tiatoolbox/tools/stainnorm.py b/tiatoolbox/tools/stainnorm.py index 05c41398f..75eb4a7cb 100644 --- a/tiatoolbox/tools/stainnorm.py +++ b/tiatoolbox/tools/stainnorm.py @@ -288,7 +288,7 @@ def transform(self: ReinhardNormalizer, img: np.ndarray) -> np.ndarray: return self.merge_back(norm1, norm2, norm3) @staticmethod - def lab_split(img: np.ndarray) -> tuple[float, float, float]: + def lab_split(img: np.ndarray) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """Convert from RGB uint8 to LAB and split into channels. Args: @@ -307,15 +307,17 @@ def lab_split(img: np.ndarray) -> tuple[float, float, float]: """ img = img.astype("uint8") # ensure input image is uint8 img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB) - img_float = img.astype(np.float32) + img_float: np.ndarray = img.astype(np.float32) chan1, chan2, chan3 = cv2.split(img_float) - chan1 /= 2.55 # should now be in range [0,100] - chan2 -= 128.0 # should now be in range [-127,127] - chan3 -= 128.0 # should now be in range [-127,127] + chan1 /= np.asarray(2.55) # should now be in range [0,100] + chan2 -= np.asarray(128.0) # should now be in range [-127,127] + chan3 -= np.asarray(128.0) # should now be in range [-127,127] return chan1, chan2, chan3 @staticmethod - def merge_back(chan1: float, chan2: float, chan3: float) -> np.ndarray: + def merge_back( + chan1: np.ndarray, chan2: np.ndarray, chan3: np.ndarray + ) -> np.ndarray: """Take separate LAB channels and merge back to give RGB uint8. Args: @@ -357,11 +359,11 @@ def get_mean_std( """ img = img.astype("uint8") # ensure input image is uint8 chan1, chan2, chan3 = self.lab_split(img) - m1, sd1 = cv2.meanStdDev(chan1) - m2, sd2 = cv2.meanStdDev(chan2) - m3, sd3 = cv2.meanStdDev(chan3) - means = m1, m2, m3 - stds = sd1, sd2, sd3 + m1, sd1 = cv2.meanStdDev(np.asarray(chan1)) + m2, sd2 = cv2.meanStdDev(np.asarray(chan2)) + m3, sd3 = cv2.meanStdDev(np.asarray(chan3)) + means = float(m1[0][0]), float(m2[0][0]), float(m3[0][0]) + stds = float(sd1[0][0]), float(sd2[0][0]), float(sd3[0][0]) return means, stds diff --git a/tiatoolbox/tools/tissuemask.py b/tiatoolbox/tools/tissuemask.py index 66b9719bc..3923c7e0f 100644 --- a/tiatoolbox/tools/tissuemask.py +++ b/tiatoolbox/tools/tissuemask.py @@ -265,7 +265,7 @@ def __init__( # Set min region size to kernel area if None if self.min_region_size is None: - self.min_region_size = np.sum(self.kernel) + self.min_region_size = int(np.sum(self.kernel)) def transform(self: MorphologicalMasker, images: np.ndarray) -> np.ndarray: """Create masks using the found threshold followed by morphological operations. diff --git a/tiatoolbox/type_hints.py b/tiatoolbox/type_hints.py index 20ff94284..b4b931e3e 100644 --- a/tiatoolbox/type_hints.py +++ b/tiatoolbox/type_hints.py @@ -2,8 +2,8 @@ from __future__ import annotations -from collections.abc import Sequence -from typing import Callable, Literal, SupportsFloat, Union +from collections.abc import Callable, Sequence +from typing import Literal, SupportsFloat import numpy as np from shapely.geometry import LineString, Point, Polygon # type: ignore[import-untyped] @@ -11,24 +11,24 @@ # Proper type annotations for shapely is not yet available. -JSON = Union[dict[str, "JSON"], list["JSON"], str, int, float, bool, None] +JSON = dict[str, "JSON"] | list["JSON"] | str | int | float | bool | None NumPair = tuple[SupportsFloat, SupportsFloat] IntPair = tuple[int, int] # WSIReader -Resolution = Union[SupportsFloat, NumPair, np.ndarray, Sequence[SupportsFloat]] +Resolution = SupportsFloat | NumPair | np.ndarray | Sequence[SupportsFloat] Units = Literal["mpp", "power", "baseline", "level"] Bounds = tuple[SupportsFloat, SupportsFloat, SupportsFloat, SupportsFloat] IntBounds = tuple[int, int, int, int] # Annotation Store -Geometry = Union[Point, LineString, Polygon] +Geometry = Point | LineString | Polygon Properties = JSON # Could define this using a TypedDict -QueryGeometry = Union[Bounds, Geometry] +QueryGeometry = Bounds | Geometry CallablePredicate = Callable[[Properties], bool] CallableSelect = Callable[[Properties], Properties] -Predicate = Union[str, bytes, CallablePredicate] -Select = Union[str, bytes, CallableSelect] +Predicate = str | bytes | CallablePredicate +Select = str | bytes | CallableSelect NumpyPadLiteral = Literal[ "constant", "edge", diff --git a/tiatoolbox/utils/env_detection.py b/tiatoolbox/utils/env_detection.py index bc6f72ca4..04e7abae1 100644 --- a/tiatoolbox/utils/env_detection.py +++ b/tiatoolbox/utils/env_detection.py @@ -240,7 +240,7 @@ def has_network( # Connect to host connection = socket.create_connection((host, 80), timeout=timeout) connection.close() - except (socket.gaierror, socket.timeout): + except (TimeoutError, socket.gaierror): return False else: return True diff --git a/tiatoolbox/utils/image.py b/tiatoolbox/utils/image.py index db22eef21..7567fbfe6 100644 --- a/tiatoolbox/utils/image.py +++ b/tiatoolbox/utils/image.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Callable +from typing import TYPE_CHECKING import numpy as np from PIL import Image @@ -18,6 +18,8 @@ ) if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable + from tiatoolbox.type_hints import IntBounds, NumpyPadLiteral PADDING_TO_BOUNDS = np.array([-1, -1, 1, 1]) @@ -647,7 +649,7 @@ def sub_pixel_read( # skipcq: PY-R1000 # noqa: C901, PLR0912, PLR0913, PLR0915 residuals = np.abs(int_read_bounds - read_bounds) read_bounds = int_read_bounds read_location, read_size = bounds2locsize(int_read_bounds) - valid_int_bounds = find_overlap( + valid_int_bounds: np.ndarray = find_overlap( read_location=read_location, read_size=read_size, image_size=image_size, diff --git a/tiatoolbox/utils/transforms.py b/tiatoolbox/utils/transforms.py index 8c2817b75..bb9f5670d 100644 --- a/tiatoolbox/utils/transforms.py +++ b/tiatoolbox/utils/transforms.py @@ -373,7 +373,7 @@ def bounds2slices( slice_array = np.stack([start[::-1], stop[::-1]], axis=1) slices = [] - for x, s in zip(slice_array, stride_array): + for x, s in zip(slice_array, stride_array, strict=False): slices.append(slice(x[0], x[1], s)) return tuple(slices) diff --git a/tiatoolbox/utils/visualization.py b/tiatoolbox/utils/visualization.py index 6317e5af3..41cb98f9b 100644 --- a/tiatoolbox/utils/visualization.py +++ b/tiatoolbox/utils/visualization.py @@ -4,7 +4,7 @@ import colorsys import random -from typing import TYPE_CHECKING, Callable, TypedDict, cast +from typing import TYPE_CHECKING, TypedDict, cast import cv2 import matplotlib as mpl @@ -18,6 +18,8 @@ from tiatoolbox.enums import GeometryType if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable + from matplotlib.axes import Axes from matplotlib.cm import ScalarMappable from numpy.typing import ArrayLike @@ -177,6 +179,7 @@ def overlay_prediction_mask( raise ValueError(msg) img = np.array(img * 255, dtype=np.uint8) # If `min_val` is defined, only display the overlay for areas with pred > min_val + prediction_sel: np.ndarray = np.ones_like(prediction, dtype=bool) if min_val > 0: prediction_sel = prediction >= min_val @@ -184,13 +187,13 @@ def overlay_prediction_mask( predicted_classes = sorted(np.unique(prediction).tolist()) # Generate random colours if None are given - rand_state = np.random.default_rng().__getstate__() + rand_state = np.random.default_rng().bit_generator.state rng = np.random.default_rng(123) label_info = label_info or { # Use label_info if provided OR generate label_uid: (str(label_uid), rng.integers(0, 255, 3)) for label_uid in predicted_classes } - np.random.default_rng().__setstate__(rand_state) + np.random.default_rng().bit_generator.state = rand_state # Validate label_info missing_label_uids = _validate_label_info(label_info, predicted_classes) @@ -198,7 +201,7 @@ def overlay_prediction_mask( msg = f"Missing label for: {missing_label_uids}." raise ValueError(msg) - rgb_prediction = np.zeros( + rgb_prediction: np.ndarray = np.zeros( [prediction.shape[0], prediction.shape[1], 3], dtype=np.uint8, ) @@ -217,7 +220,7 @@ def overlay_prediction_mask( return overlay # Create colorbar parameters - name_list, color_list = zip(*label_info.values()) # Unzip values + name_list, color_list = zip(*label_info.values(), strict=False) # Unzip values color_list_arr = np.array(color_list) / 255 uid_list = list(label_info.keys()) cmap = mpl.colors.ListedColormap(color_list_arr) @@ -1048,7 +1051,7 @@ def _set_mapper( if isinstance(value, list): colors = random_colors(len(value), bright=True) self.__dict__["mapper"] = { - key: (*color, 1) for key, color in zip(value, colors) + key: (*color, 1) for key, color in zip(value, colors, strict=False) } if isinstance(value, dict): self.raw_mapper = value @@ -1127,7 +1130,9 @@ def render_annotations( min_area = 0.0005 * (output_size[0] * output_size[1]) * (scale * mpp_sf) ** 2 - tile = np.zeros((output_size[0] * res, output_size[1] * res, 4), dtype=np.uint8) + tile: np.ndarray = np.zeros( + (output_size[0] * res, output_size[1] * res, 4), dtype=np.uint8 + ) if scale <= self.max_scale: # get all annotations diff --git a/tiatoolbox/visualization/bokeh_app/main.py b/tiatoolbox/visualization/bokeh_app/main.py index 81395300d..5df7acb04 100644 --- a/tiatoolbox/visualization/bokeh_app/main.py +++ b/tiatoolbox/visualization/bokeh_app/main.py @@ -9,7 +9,7 @@ from cmath import pi from pathlib import Path, PureWindowsPath from shutil import rmtree -from typing import TYPE_CHECKING, Any, Callable, SupportsFloat +from typing import TYPE_CHECKING, Any, SupportsFloat import numpy as np import requests @@ -75,6 +75,8 @@ from tiatoolbox.wsicore.wsireader import WSIReader if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable + from bokeh.document import Document rng = np.random.default_rng() @@ -1634,6 +1636,7 @@ def gather_ui_elements( # noqa: PLR0915 model_row, type_select_row, ], + strict=False, ), ) if "ui_elements_1" in doc_config: @@ -1667,6 +1670,7 @@ def gather_ui_elements( # noqa: PLR0915 edge_size_spinner, res_switch, ], + strict=False, ), ) if "ui_elements_2" in doc_config: diff --git a/tiatoolbox/visualization/tileserver.py b/tiatoolbox/visualization/tileserver.py index b7c750938..236868f17 100644 --- a/tiatoolbox/visualization/tileserver.py +++ b/tiatoolbox/visualization/tileserver.py @@ -422,7 +422,7 @@ def change_mapper(self: TileServer) -> str: session_id = self._get_session_id() cmap = json.loads(request.form["cmap"]) if isinstance(cmap, dict): - cmap = dict(zip(cmap["keys"], cmap["values"])) + cmap = dict(zip(cmap["keys"], cmap["values"], strict=False)) self.renderers[session_id].score_fn = lambda x: x self.renderers[session_id].mapper = cmap self.renderers[session_id].function_mapper = None diff --git a/tiatoolbox/wsicore/wsireader.py b/tiatoolbox/wsicore/wsireader.py index 46f10cc36..0791bc4fa 100644 --- a/tiatoolbox/wsicore/wsireader.py +++ b/tiatoolbox/wsicore/wsireader.py @@ -3439,7 +3439,7 @@ def __init__(self: ArrayView, array: zarr.Array, axes: str) -> None: """ self.array = array self.axes = axes - self._shape = dict(zip(self.axes, self.array.shape)) + self._shape = dict(zip(self.axes, self.array.shape, strict=False)) @property def shape(self: ArrayView) -> tuple: @@ -6253,6 +6253,7 @@ def __init__( for s_dims, t_dims in zip( self.wsi_reader.info.level_dimensions, self.target_wsi_reader.info.level_dimensions, + strict=False, ) ] self.level_pads = [ @@ -6260,6 +6261,7 @@ def __init__( for s_dims, t_dims in zip( self.wsi_reader.info.level_dimensions, self.target_wsi_reader.info.level_dimensions, + strict=False, ) ] self.get_location_array(disp_array) @@ -6316,6 +6318,7 @@ def get_location_array(self, disp_array: np.ndarray) -> None: for s_dims, t_dims in zip( self.wsi_reader.info.level_dimensions, self.target_wsi_reader.info.level_dimensions, + strict=False, ) ) wsimeta.slide_dimensions = wsimeta.level_dimensions[0]