From ebddfaa9a8a33ab63254d99d163a663dc90118c9 Mon Sep 17 00:00:00 2001 From: Shan E Ahmed Raza <13048456+shaneahmed@users.noreply.github.com> Date: Fri, 10 Oct 2025 10:47:03 +0100 Subject: [PATCH 01/16] :arrow_up: Upgrade Python supported versions --- .github/workflows/mypy-type-check.yml | 2 +- CONTRIBUTING.rst | 2 +- README.md | 2 +- docs/installation.rst | 2 +- pyproject.toml | 2 +- setup.py | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/mypy-type-check.yml b/.github/workflows/mypy-type-check.yml index 987db45ad..7ec585482 100644 --- a/.github/workflows/mypy-type-check.yml +++ b/.github/workflows/mypy-type-check.yml @@ -16,7 +16,7 @@ jobs: strategy: matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] + python-version: ["3.10", "3.11", "3.12", "3.13"] steps: diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 5b2d435b8..eaaf6f3d7 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -98,7 +98,7 @@ Before you submit a pull request, check that it meets these guidelines: 1. The pull request should include tests. 2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring, and add the feature to the pull request description. -3. The pull request should work for Python 3.9, 3.10, 3.11, and 3.12, and for PyPy. Check https://github.com/TissueImageAnalytics/tiatoolbox/actions/workflows/python-package.yml and make sure that the tests pass for all supported Python versions. +3. The pull request should work for Python 3.10, 3.11, 3.12 and 3.13, and for PyPy. Check https://github.com/TissueImageAnalytics/tiatoolbox/actions/workflows/python-package.yml and make sure that the tests pass for all supported Python versions. Tips ---- diff --git a/README.md b/README.md index 1e517db0d..0745e936f 100644 --- a/README.md +++ b/README.md @@ -106,7 +106,7 @@ Prepare a computer as a convenient platform for further development of the Pytho 5. Create virtual environment for TIAToolbox using ```sh - $ conda create -n tiatoolbox-dev python=3.9 # select version of your choice + $ conda create -n tiatoolbox-dev python=3.10 # select version of your choice $ conda activate tiatoolbox-dev $ pip install -r requirements/requirements_dev.txt ``` diff --git a/docs/installation.rst b/docs/installation.rst index 808517739..f6e39fc16 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -55,7 +55,7 @@ MacPorts Installing Stable Release ========================= -Please note that TIAToolbox is tested for Python versions 3.9, 3.10, 3.11, and 3.12. +Please note that TIAToolbox is tested for Python versions 3.10, 3.11, 3.12 and 3.13. Recommended ----------- diff --git a/pyproject.toml b/pyproject.toml index 2d29ca7c8..b5a767709 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -174,4 +174,4 @@ skip-magic-trailing-comma = false [tool.mypy] ignore_missing_imports = true -python_version = 3.9 +python_version = 3.10 diff --git a/setup.py b/setup.py index 31d30e5e9..cbc17fda4 100644 --- a/setup.py +++ b/setup.py @@ -34,16 +34,16 @@ setup( author="TIA Centre", author_email="tia@dcs.warwick.ac.uk", - python_requires=">=3.9, <3.13", + python_requires=">=3.10, <3.14", classifiers=[ "Development Status :: 2 - Pre-Alpha", "Intended Audience :: Developers", "Natural Language :: English", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", ], description="Computational pathology toolbox developed by TIA Centre.", dependency_links=dependency_links, From 2820df94ac443164d8977719854e94cee6683d6c Mon Sep 17 00:00:00 2001 From: Shan E Ahmed Raza <13048456+shaneahmed@users.noreply.github.com> Date: Fri, 10 Oct 2025 10:52:43 +0100 Subject: [PATCH 02/16] :arrow_up: Update GitHub workflows for new Python versions --- .github/workflows/docker-publish.yml | 8 ++++---- .github/workflows/pip-install.yml | 2 +- .github/workflows/python-package.yml | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml index 1e5e48f28..7b495284f 100644 --- a/.github/workflows/docker-publish.yml +++ b/.github/workflows/docker-publish.yml @@ -15,10 +15,6 @@ jobs: fail-fast: true matrix: include: - - dockerfile: ./docker/3.9/Debian/Dockerfile - mtag: py3.9-debian - - dockerfile: ./docker/3.9/Ubuntu/Dockerfile - mtag: py3.9-ubuntu - dockerfile: ./docker/3.10/Debian/Dockerfile mtag: py3.10-debian - dockerfile: ./docker/3.10/Ubuntu/Dockerfile @@ -33,6 +29,10 @@ jobs: mtag: py3.12-ubuntu - dockerfile: ./docker/3.12/Ubuntu/Dockerfile mtag: latest + - dockerfile: ./docker/3.13/Ubuntu/Dockerfile + mtag: py3.13-ubuntu + - dockerfile: ./docker/3.13/Ubuntu/Dockerfile + mtag: latest permissions: contents: read packages: write diff --git a/.github/workflows/pip-install.yml b/.github/workflows/pip-install.yml index b543cdfee..9b0e9bcd6 100644 --- a/.github/workflows/pip-install.yml +++ b/.github/workflows/pip-install.yml @@ -14,7 +14,7 @@ jobs: strategy: fail-fast: true matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] + python-version: ["3.10", "3.11", "3.12", "3.13"] os: [ubuntu-24.04, windows-latest, macos-latest] steps: - name: Set up Python ${{ matrix.python-version }} diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index a09b08884..642398234 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -17,7 +17,7 @@ jobs: strategy: fail-fast: true matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] + python-version: ["3.10", "3.11", "3.12", "3.13"] steps: - uses: actions/checkout@v3 From ba31c58fd84022cd07939764271fad3ab17e6f83 Mon Sep 17 00:00:00 2001 From: Shan E Ahmed Raza <13048456+shaneahmed@users.noreply.github.com> Date: Fri, 10 Oct 2025 10:55:30 +0100 Subject: [PATCH 03/16] :arrow_up: Update dependencies --- pyproject.toml | 4 ++-- requirements/requirements.txt | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index b5a767709..1d7addb08 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -136,8 +136,8 @@ line-length = 88 # Allow unused variables when underscore-prefixed. lint.dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" -# Minimum Python version 3.9. -target-version = "py39" +# Minimum Python version 3.10. +target-version = "py310" [tool.ruff.lint.mccabe] # Unlike Flake8, default to a complexity level of 10. diff --git a/requirements/requirements.txt b/requirements/requirements.txt index fd85350ef..187da90c0 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -8,7 +8,7 @@ defusedxml>=0.7.1 filelock>=3.9.0 flask>=2.2.2 flask-cors>=4.0.0 -glymur>=0.12.7, < 0.14 # 0.14 is not compatible with python3.9 +glymur>=0.12.7 imagecodecs>=2022.9.26 joblib>=1.1.1 jupyterlab>=3.5.2 From 8377b6afc327334284646dc2b97c47b6c4a05b01 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 10 Oct 2025 10:04:26 +0000 Subject: [PATCH 04/16] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- benchmarks/annotation_store.ipynb | 4 ++-- examples/full-pipelines/slide-graph.ipynb | 17 ++++++++-------- .../inference-pipelines/slide-graph.ipynb | 3 ++- pre-commit/notebook_markdown_format.py | 1 + pre-commit/requirements_consistency.py | 2 +- tests/conftest.py | 2 +- tests/models/test_arch_mapde.py | 2 +- tests/models/test_arch_micronet.py | 2 +- tests/models/test_arch_nuclick.py | 2 +- tests/models/test_arch_sccnn.py | 2 +- tests/models/test_arch_unet.py | 2 +- tests/models/test_feature_extractor.py | 2 +- tests/models/test_hovernet.py | 2 +- tests/models/test_hovernetplus.py | 2 +- tests/models/test_multi_task_segmentor.py | 2 +- .../models/test_nucleus_instance_segmentor.py | 2 +- tests/models/test_patch_predictor.py | 2 +- tests/models/test_semantic_segmentation.py | 2 +- tests/test_annotation_stores.py | 4 ++-- tests/test_annotation_tilerendering.py | 2 +- tests/test_dsl.py | 3 ++- tests/test_magic.py | 3 ++- tests/test_tiffreader.py | 2 +- tests/test_tileserver.py | 3 ++- tests/test_utils.py | 4 ++-- tests/test_wsireader.py | 14 ++++++++----- tiatoolbox/annotation/dsl.py | 2 +- tiatoolbox/annotation/storage.py | 20 ++++++++++++------- tiatoolbox/cli/common.py | 3 ++- tiatoolbox/cli/show_wsi.py | 4 +++- tiatoolbox/models/architecture/hovernet.py | 2 +- tiatoolbox/models/dataset/classification.py | 3 ++- tiatoolbox/models/dataset/dataset_abc.py | 7 ++++--- tiatoolbox/models/dataset/info.py | 2 +- .../models/engine/multi_task_segmentor.py | 5 +++-- .../engine/nucleus_instance_segmentor.py | 8 ++++---- tiatoolbox/models/engine/patch_predictor.py | 5 +++-- .../models/engine/semantic_segmentor.py | 13 ++++++------ tiatoolbox/models/models_abc.py | 3 ++- tiatoolbox/tools/graph.py | 3 ++- tiatoolbox/tools/patchextraction.py | 3 ++- tiatoolbox/tools/pyramid.py | 4 +++- .../tools/registration/wsi_registration.py | 8 ++++++-- tiatoolbox/type_hints.py | 4 ++-- tiatoolbox/utils/env_detection.py | 2 +- tiatoolbox/utils/image.py | 3 ++- tiatoolbox/utils/transforms.py | 2 +- tiatoolbox/utils/visualization.py | 7 ++++--- tiatoolbox/visualization/bokeh_app/main.py | 5 ++++- tiatoolbox/visualization/tileserver.py | 2 +- tiatoolbox/wsicore/wsireader.py | 5 ++++- 51 files changed, 127 insertions(+), 86 deletions(-) diff --git a/benchmarks/annotation_store.ipynb b/benchmarks/annotation_store.ipynb index 6cf6cf8e6..4646a04e9 100644 --- a/benchmarks/annotation_store.ipynb +++ b/benchmarks/annotation_store.ipynb @@ -355,7 +355,7 @@ " capsize=capsize,\n", " **kwargs,\n", " )\n", - " for i, (runs, c) in enumerate(zip(experiments, color)):\n", + " for i, (runs, c) in enumerate(zip(experiments, color, strict=False)):\n", " plt.text(\n", " i,\n", " min(runs),\n", @@ -2418,7 +2418,7 @@ " )\n", " total = np.sum(counts)\n", " frequencies = dict.fromkeys(range(256), 0)\n", - " for v, x in zip(values, counts):\n", + " for v, x in zip(values, counts, strict=False):\n", " frequencies[v] = x / total\n", " frequency_array = np.array(list(frequencies.values()))\n", " epsilon = 1e-16\n", diff --git a/examples/full-pipelines/slide-graph.ipynb b/examples/full-pipelines/slide-graph.ipynb index 9ded87d9e..33a8a2b5f 100644 --- a/examples/full-pipelines/slide-graph.ipynb +++ b/examples/full-pipelines/slide-graph.ipynb @@ -132,8 +132,9 @@ "import shutil\n", "import warnings\n", "from collections import OrderedDict\n", + "from collections.abc import Callable\n", "from pathlib import Path\n", - "from typing import TYPE_CHECKING, Callable\n", + "from typing import TYPE_CHECKING\n", "\n", "# Third party imports\n", "import joblib\n", @@ -394,7 +395,7 @@ "patient_uids = patient_uids[sel]\n", "patient_labels = patient_labels_[sel]\n", "assert len(patient_uids) == len(patient_labels) # noqa: S101\n", - "clinical_info = OrderedDict(list(zip(patient_uids, patient_labels)))\n", + "clinical_info = OrderedDict(list(zip(patient_uids, patient_labels, strict=False)))\n", "\n", "# Retrieve patient code of each WSI, this is based on TCGA barcodes:\n", "# https://docs.gdc.cancer.gov/Encyclopedia/pages/TCGA_Barcode/\n", @@ -412,7 +413,7 @@ "wsi_names = np.array(wsi_names)[sel]\n", "wsi_labels = np.array(wsi_labels)[sel]\n", "\n", - "label_df = list(zip(wsi_names, wsi_labels))\n", + "label_df = list(zip(wsi_names, wsi_labels, strict=False))\n", "label_df = pd.DataFrame(label_df, columns=[\"WSI-CODE\", \"LABEL\"])" ] }, @@ -529,9 +530,9 @@ "\n", " splits.append(\n", " {\n", - " \"train\": list(zip(train_x, train_y)),\n", - " \"valid\": list(zip(valid_x, valid_y)),\n", - " \"test\": list(zip(test_x, test_y)),\n", + " \"train\": list(zip(train_x, train_y, strict=False)),\n", + " \"valid\": list(zip(valid_x, valid_y, strict=False)),\n", + " \"test\": list(zip(test_x, test_y, strict=False)),\n", " },\n", " )\n", " return splits" @@ -2025,7 +2026,7 @@ " output = [np.split(v, batch_size, axis=0) for v in output]\n", " # pairing such that it will be\n", " # N batch size x H head list\n", - " output = list(zip(*output))\n", + " output = list(zip(*output, strict=False))\n", " step_output.extend(output)\n", " pbar.update()\n", " pbar.close()\n", @@ -2042,7 +2043,7 @@ " ):\n", " # Expand the list of N dataset size x H heads\n", " # back to a list of H Head each with N samples.\n", - " output = list(zip(*step_output))\n", + " output = list(zip(*step_output, strict=False))\n", " logit, true = output\n", " logit = np.squeeze(np.array(logit))\n", " true = np.squeeze(np.array(true))\n", diff --git a/examples/inference-pipelines/slide-graph.ipynb b/examples/inference-pipelines/slide-graph.ipynb index 4d2c62de3..017adc52c 100644 --- a/examples/inference-pipelines/slide-graph.ipynb +++ b/examples/inference-pipelines/slide-graph.ipynb @@ -218,8 +218,9 @@ "import random\n", "import shutil\n", "import warnings\n", + "from collections.abc import Callable\n", "from pathlib import Path\n", - "from typing import TYPE_CHECKING, Callable\n", + "from typing import TYPE_CHECKING\n", "\n", "# Third party imports\n", "import joblib\n", diff --git a/pre-commit/notebook_markdown_format.py b/pre-commit/notebook_markdown_format.py index 991241b4a..4195fb36f 100644 --- a/pre-commit/notebook_markdown_format.py +++ b/pre-commit/notebook_markdown_format.py @@ -57,6 +57,7 @@ def main(files: list[Path]) -> None: for cell, formatted_cell in zip( notebook["cells"], formatted_notebook["cells"], + strict=False, ) ) if not changed: diff --git a/pre-commit/requirements_consistency.py b/pre-commit/requirements_consistency.py index 4f8d4f442..ed57f1f9e 100644 --- a/pre-commit/requirements_consistency.py +++ b/pre-commit/requirements_consistency.py @@ -220,7 +220,7 @@ def in_common_consistent(all_requirements: dict[Path, dict[str, Requirement]]) - ] # Unzip the specs to get a list of constraints and versions - _, constraints, versions = zip(*zipped_file_specs) + _, constraints, versions = zip(*zipped_file_specs, strict=False) # Check that the constraints and versions are the same across files formatted_reqs = [f"{c}{v} ({p.name})" for p, c, v in zipped_file_specs] diff --git a/tests/conftest.py b/tests/conftest.py index 2b7de0fd6..104b422ef 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -5,8 +5,8 @@ import os import shutil import time +from collections.abc import Callable from pathlib import Path -from typing import Callable import pytest import torch diff --git a/tests/models/test_arch_mapde.py b/tests/models/test_arch_mapde.py index 4ec404826..c4f40be9e 100644 --- a/tests/models/test_arch_mapde.py +++ b/tests/models/test_arch_mapde.py @@ -1,6 +1,6 @@ """Unit test package for SCCNN.""" -from typing import Callable +from collections.abc import Callable import numpy as np import torch diff --git a/tests/models/test_arch_micronet.py b/tests/models/test_arch_micronet.py index e7aa23d5b..83cc1b597 100644 --- a/tests/models/test_arch_micronet.py +++ b/tests/models/test_arch_micronet.py @@ -1,7 +1,7 @@ """Unit test package for MicroNet.""" +from collections.abc import Callable from pathlib import Path -from typing import Callable import numpy as np import pytest diff --git a/tests/models/test_arch_nuclick.py b/tests/models/test_arch_nuclick.py index b84516125..84b102134 100644 --- a/tests/models/test_arch_nuclick.py +++ b/tests/models/test_arch_nuclick.py @@ -1,7 +1,7 @@ """Unit test package for NuClick.""" +from collections.abc import Callable from pathlib import Path -from typing import Callable import numpy as np import pytest diff --git a/tests/models/test_arch_sccnn.py b/tests/models/test_arch_sccnn.py index 16c99cc49..2629304af 100644 --- a/tests/models/test_arch_sccnn.py +++ b/tests/models/test_arch_sccnn.py @@ -1,6 +1,6 @@ """Unit test package for SCCNN.""" -from typing import Callable +from collections.abc import Callable import numpy as np import torch diff --git a/tests/models/test_arch_unet.py b/tests/models/test_arch_unet.py index 2ac231c7c..63f20c89d 100644 --- a/tests/models/test_arch_unet.py +++ b/tests/models/test_arch_unet.py @@ -1,7 +1,7 @@ """Unit test package for Unet.""" +from collections.abc import Callable from pathlib import Path -from typing import Callable import numpy as np import pytest diff --git a/tests/models/test_feature_extractor.py b/tests/models/test_feature_extractor.py index 9ceb549be..b9c5799f9 100644 --- a/tests/models/test_feature_extractor.py +++ b/tests/models/test_feature_extractor.py @@ -1,8 +1,8 @@ """Test for feature extractor.""" import shutil +from collections.abc import Callable from pathlib import Path -from typing import Callable import numpy as np import pytest diff --git a/tests/models/test_hovernet.py b/tests/models/test_hovernet.py index 2567018b8..34ddab2c2 100644 --- a/tests/models/test_hovernet.py +++ b/tests/models/test_hovernet.py @@ -1,6 +1,6 @@ """Unit test package for HoVerNet.""" -from typing import Callable +from collections.abc import Callable import numpy as np import pytest diff --git a/tests/models/test_hovernetplus.py b/tests/models/test_hovernetplus.py index 1377fdd82..f336ef14f 100644 --- a/tests/models/test_hovernetplus.py +++ b/tests/models/test_hovernetplus.py @@ -1,6 +1,6 @@ """Unit test package for HoVerNet+.""" -from typing import Callable +from collections.abc import Callable import torch diff --git a/tests/models/test_multi_task_segmentor.py b/tests/models/test_multi_task_segmentor.py index 8b234ac55..3cec30121 100644 --- a/tests/models/test_multi_task_segmentor.py +++ b/tests/models/test_multi_task_segmentor.py @@ -6,8 +6,8 @@ import gc import multiprocessing import shutil +from collections.abc import Callable from pathlib import Path -from typing import Callable import joblib import numpy as np diff --git a/tests/models/test_nucleus_instance_segmentor.py b/tests/models/test_nucleus_instance_segmentor.py index 2956849fb..f657a6347 100644 --- a/tests/models/test_nucleus_instance_segmentor.py +++ b/tests/models/test_nucleus_instance_segmentor.py @@ -5,8 +5,8 @@ # ! The garbage collector import gc import shutil +from collections.abc import Callable from pathlib import Path -from typing import Callable import joblib import numpy as np diff --git a/tests/models/test_patch_predictor.py b/tests/models/test_patch_predictor.py index 913d63241..8c40b11c2 100644 --- a/tests/models/test_patch_predictor.py +++ b/tests/models/test_patch_predictor.py @@ -4,8 +4,8 @@ import copy import shutil +from collections.abc import Callable from pathlib import Path -from typing import Callable import cv2 import numpy as np diff --git a/tests/models/test_semantic_segmentation.py b/tests/models/test_semantic_segmentation.py index 01776b800..bddc32492 100644 --- a/tests/models/test_semantic_segmentation.py +++ b/tests/models/test_semantic_segmentation.py @@ -8,8 +8,8 @@ import gc import multiprocessing import shutil +from collections.abc import Callable from pathlib import Path -from typing import Callable import numpy as np import pytest diff --git a/tests/test_annotation_stores.py b/tests/test_annotation_stores.py index c3879c39f..a363d46a9 100644 --- a/tests/test_annotation_stores.py +++ b/tests/test_annotation_stores.py @@ -6,11 +6,11 @@ import pickle import sqlite3 import sys -from collections.abc import Generator +from collections.abc import Callable, Generator from itertools import repeat, zip_longest from pathlib import Path from timeit import timeit -from typing import TYPE_CHECKING, Callable, ClassVar +from typing import TYPE_CHECKING, ClassVar import numpy as np import pandas as pd diff --git a/tests/test_annotation_tilerendering.py b/tests/test_annotation_tilerendering.py index abfdb495a..fceaa398f 100644 --- a/tests/test_annotation_tilerendering.py +++ b/tests/test_annotation_tilerendering.py @@ -6,8 +6,8 @@ from __future__ import annotations +from collections.abc import Callable from pathlib import Path -from typing import Callable import matplotlib.pyplot as plt import numpy as np diff --git a/tests/test_dsl.py b/tests/test_dsl.py index 3f246ca1b..1bdb23646 100644 --- a/tests/test_dsl.py +++ b/tests/test_dsl.py @@ -4,8 +4,9 @@ import json import sqlite3 +from collections.abc import Callable from numbers import Number -from typing import Callable, ClassVar +from typing import ClassVar import pytest diff --git a/tests/test_magic.py b/tests/test_magic.py index c5ecfe0c9..6bf337a9b 100644 --- a/tests/test_magic.py +++ b/tests/test_magic.py @@ -2,9 +2,10 @@ import sqlite3 import zipfile +from collections.abc import Callable from io import BytesIO from pathlib import Path -from typing import BinaryIO, Callable +from typing import BinaryIO import pytest diff --git a/tests/test_tiffreader.py b/tests/test_tiffreader.py index 9fd38da4e..cc956254a 100644 --- a/tests/test_tiffreader.py +++ b/tests/test_tiffreader.py @@ -1,6 +1,6 @@ """Test TIFFWSIReader.""" -from typing import Callable +from collections.abc import Callable import pytest from defusedxml import ElementTree diff --git a/tests/test_tileserver.py b/tests/test_tileserver.py index add9f4981..ddfd3dbd1 100644 --- a/tests/test_tileserver.py +++ b/tests/test_tileserver.py @@ -5,8 +5,9 @@ import json import logging import urllib +from collections.abc import Callable from pathlib import Path, PureWindowsPath -from typing import TYPE_CHECKING, Callable, NoReturn +from typing import TYPE_CHECKING, NoReturn import joblib import numpy as np diff --git a/tests/test_utils.py b/tests/test_utils.py index f8908f434..241d64842 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -803,7 +803,7 @@ def test_fuzz_bounds2locsize() -> None: for _ in range(1000): size = (rng.integers(-1000, 1000), rng.integers(-1000, 1000)) location = (rng.integers(-1000, 1000), rng.integers(-1000, 1000)) - bounds = (*location, *(sum(x) for x in zip(size, location))) + bounds = (*location, *(sum(x) for x in zip(size, location, strict=False))) assert utils.transforms.bounds2locsize(bounds)[1] == pytest.approx(size) @@ -1137,7 +1137,7 @@ def test_parse_cv2_interpolaton() -> None: cases = [str.upper, str.lower, str.capitalize] mode_strings = ["cubic", "linear", "area", "lanczos"] mode_enums = [cv2.INTER_CUBIC, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_LANCZOS4] - for string, cv2_enum in zip(mode_strings, mode_enums): + for string, cv2_enum in zip(mode_strings, mode_enums, strict=False): for case in cases: assert utils.misc.parse_cv2_interpolaton(case(string)) == cv2_enum assert utils.misc.parse_cv2_interpolaton(cv2_enum) == cv2_enum diff --git a/tests/test_wsireader.py b/tests/test_wsireader.py index 91aa9ad36..3d1da4804 100644 --- a/tests/test_wsireader.py +++ b/tests/test_wsireader.py @@ -7,9 +7,10 @@ import logging import re import shutil +from collections.abc import Callable from copy import deepcopy from pathlib import Path -from typing import TYPE_CHECKING, Callable +from typing import TYPE_CHECKING from unittest.mock import patch import cv2 @@ -135,7 +136,7 @@ def strictly_increasing(sequence: Iterable) -> bool: bool: True if strictly increasing. """ - return all(a < b for a, b in zip(sequence, sequence[1:])) + return all(a < b for a, b in zip(sequence, sequence[1:], strict=False)) def strictly_decreasing(sequence: Iterable) -> bool: @@ -149,7 +150,7 @@ def strictly_decreasing(sequence: Iterable) -> bool: bool: True if strictly decreasing. """ - return all(a > b for a, b in zip(sequence, sequence[1:])) + return all(a > b for a, b in zip(sequence, sequence[1:], strict=False)) def read_rect_objective_power(wsi: WSIReader, location: IntPair, size: IntPair) -> None: @@ -557,6 +558,7 @@ def test_find_optimal_level_and_downsample_mpp(sample_ndpi: Path) -> None: mpps, expected_levels, expected_scales, + strict=False, ): read_level, post_read_scale_factor = wsi._find_optimal_level_and_downsample( mpp, @@ -573,7 +575,9 @@ def test_find_optimal_level_and_downsample_power(sample_ndpi: Path) -> None: objective_powers = [20, 10, 5, 2.5, 1.25] expected_levels = [0, 1, 2, 3, 4] - for objective_power, expected_level in zip(objective_powers, expected_levels): + for objective_power, expected_level in zip( + objective_powers, expected_levels, strict=False + ): read_level, post_read_scale_factor = wsi._find_optimal_level_and_downsample( objective_power, "power", @@ -1498,7 +1502,7 @@ def test_tissue_mask_morphological(sample_svs: Path) -> None: resolutions = [5, 10] units = ["power", "mpp"] scale_fns = [lambda x: x * 2, lambda x: 32 / x] - for unit, scaler in zip(units, scale_fns): + for unit, scaler in zip(units, scale_fns, strict=False): for resolution in resolutions: mask = wsi.tissue_mask( method="morphological", diff --git a/tiatoolbox/annotation/dsl.py b/tiatoolbox/annotation/dsl.py index cbf9bf7d5..7caf9cba3 100644 --- a/tiatoolbox/annotation/dsl.py +++ b/tiatoolbox/annotation/dsl.py @@ -61,9 +61,9 @@ import json import operator import re +from collections.abc import Callable from dataclasses import dataclass from numbers import Number -from typing import Callable from typing_extensions import TypedDict diff --git a/tiatoolbox/annotation/storage.py b/tiatoolbox/annotation/storage.py index ae65ccede..012ea23d2 100644 --- a/tiatoolbox/annotation/storage.py +++ b/tiatoolbox/annotation/storage.py @@ -41,6 +41,7 @@ from abc import ABC, abstractmethod from collections import defaultdict from collections.abc import ( + Callable, Generator, ItemsView, Iterable, @@ -56,7 +57,6 @@ IO, TYPE_CHECKING, Any, - Callable, ClassVar, TypeVar, cast, @@ -746,7 +746,7 @@ def append_many( if keys: result.extend( self.append(annotation, key) - for key, annotation in zip(keys, annotations) + for key, annotation in zip(keys, annotations, strict=False) ) return result result.extend(self.append(annotation) for annotation in annotations) @@ -816,7 +816,9 @@ def patch_many( properties_iter = properties_iter or ({} for _ in keys) # pragma: no branch geometries = geometries or (None for _ in keys) # pragma: no branch # Update the store - for key, geometry, properties in zip(keys, geometries, properties_iter): + for key, geometry, properties in zip( + keys, geometries, properties_iter, strict=False + ): properties_ = cast("dict[str, Any]", copy.deepcopy(properties)) self.patch(key, geometry, properties_) @@ -2722,7 +2724,7 @@ def append_many( if self.auto_commit: cur.execute("BEGIN") result = [] - for annotation, key in zip(annotations, keys): + for annotation, key in zip(annotations, keys, strict=False): self._append(key, annotation, cur) result.append(key) if self.auto_commit: @@ -3640,7 +3642,9 @@ def patch_many( # Begin a transaction if self.auto_commit: cur.execute("BEGIN") - for key, geometry, properties in zip(keys, geometries, properties_iter): + for key, geometry, properties in zip( + keys, geometries, properties_iter, strict=False + ): # Annotation is not in DB: if key not in self: self._append(str(key), Annotation(geometry, properties), cur) @@ -3680,8 +3684,10 @@ def _patch_geometry( cur (sqlite3.Cursor): The cursor to use. """ - bounds = dict(zip(("min_x", "min_y", "max_x", "max_y"), geometry.bounds)) - xy = dict(zip("xy", np.array(geometry.centroid.coords[0]))) + bounds = dict( + zip(("min_x", "min_y", "max_x", "max_y"), geometry.bounds, strict=False) + ) + xy = dict(zip("xy", np.array(geometry.centroid.coords[0]), strict=False)) query_parameters = dict( **bounds, **xy, diff --git a/tiatoolbox/cli/common.py b/tiatoolbox/cli/common.py index c663170c9..bec96134e 100644 --- a/tiatoolbox/cli/common.py +++ b/tiatoolbox/cli/common.py @@ -2,8 +2,9 @@ from __future__ import annotations +from collections.abc import Callable from pathlib import Path -from typing import TYPE_CHECKING, Any, Callable +from typing import TYPE_CHECKING, Any import click diff --git a/tiatoolbox/cli/show_wsi.py b/tiatoolbox/cli/show_wsi.py index 6ac5856af..f3d61565f 100644 --- a/tiatoolbox/cli/show_wsi.py +++ b/tiatoolbox/cli/show_wsi.py @@ -49,7 +49,9 @@ def show_wsi( if len(name) == 0: app = TileServer("TileServer", list(img_input), renderer=renderer) elif len(name) == len(img_input): - app = TileServer("TileServer", dict(zip(name, img_input)), renderer=renderer) + app = TileServer( + "TileServer", dict(zip(name, img_input, strict=False)), renderer=renderer + ) else: msg = "if names are provided, must match the number of paths provided" raise ValueError(msg) diff --git a/tiatoolbox/models/architecture/hovernet.py b/tiatoolbox/models/architecture/hovernet.py index 9798df62a..c0be9ad47 100644 --- a/tiatoolbox/models/architecture/hovernet.py +++ b/tiatoolbox/models/architecture/hovernet.py @@ -694,7 +694,7 @@ def get_instance_info(pred_inst: np.ndarray, pred_type: np.ndarray = None) -> di inst_type = inst_type_crop[inst_map_crop] (type_list, type_pixels) = np.unique(inst_type, return_counts=True) - type_list = list(zip(type_list, type_pixels)) + type_list = list(zip(type_list, type_pixels, strict=False)) type_list = sorted(type_list, key=lambda x: x[1], reverse=True) inst_type = type_list[0][0] diff --git a/tiatoolbox/models/dataset/classification.py b/tiatoolbox/models/dataset/classification.py index b73d2c9f3..9faa6ddc9 100644 --- a/tiatoolbox/models/dataset/classification.py +++ b/tiatoolbox/models/dataset/classification.py @@ -2,8 +2,9 @@ from __future__ import annotations +from collections.abc import Callable from pathlib import Path -from typing import TYPE_CHECKING, Callable +from typing import TYPE_CHECKING import cv2 import numpy as np diff --git a/tiatoolbox/models/dataset/dataset_abc.py b/tiatoolbox/models/dataset/dataset_abc.py index b60ecd66e..dba832931 100644 --- a/tiatoolbox/models/dataset/dataset_abc.py +++ b/tiatoolbox/models/dataset/dataset_abc.py @@ -3,8 +3,9 @@ from __future__ import annotations from abc import ABC, abstractmethod +from collections.abc import Callable from pathlib import Path -from typing import TYPE_CHECKING, Callable, Union +from typing import TYPE_CHECKING, Union if TYPE_CHECKING: # pragma: no cover from collections.abc import Iterable @@ -12,7 +13,7 @@ try: from typing import TypeGuard except ImportError: - from typing_extensions import TypeGuard # to support python <3.10 + from typing import TypeGuard # to support python <3.10 import numpy as np @@ -20,7 +21,7 @@ from tiatoolbox.utils import imread -input_type = Union[list[Union[str, Path, np.ndarray]], np.ndarray] +input_type = Union[list[str | Path | np.ndarray], np.ndarray] class PatchDatasetABC(ABC, torch.utils.data.Dataset): diff --git a/tiatoolbox/models/dataset/info.py b/tiatoolbox/models/dataset/info.py index 7d6bd8f28..b95a069b5 100644 --- a/tiatoolbox/models/dataset/info.py +++ b/tiatoolbox/models/dataset/info.py @@ -119,7 +119,7 @@ def __init__( paths.sort() all_paths.extend(paths) uid_name_map[label_id] = label_name - inputs, labels = list(zip(*all_paths)) + inputs, labels = list(zip(*all_paths, strict=False)) self.label_names = uid_name_map self.inputs = list(inputs) # type casting to list diff --git a/tiatoolbox/models/engine/multi_task_segmentor.py b/tiatoolbox/models/engine/multi_task_segmentor.py index 2d3df757f..fccc05fb0 100644 --- a/tiatoolbox/models/engine/multi_task_segmentor.py +++ b/tiatoolbox/models/engine/multi_task_segmentor.py @@ -23,7 +23,8 @@ from __future__ import annotations import shutil -from typing import TYPE_CHECKING, Callable +from collections.abc import Callable +from typing import TYPE_CHECKING # replace with the sql database once the PR in place import joblib @@ -115,7 +116,7 @@ def _process_tile_predictions( # skipcq: PY-R1000 (top_left_x, top_left_y, bottom_x, bottom_y). """ - locations, predictions = list(zip(*tile_output)) + locations, predictions = list(zip(*tile_output, strict=False)) # convert from WSI space to tile space tile_tl = tile_bounds[:2] diff --git a/tiatoolbox/models/engine/nucleus_instance_segmentor.py b/tiatoolbox/models/engine/nucleus_instance_segmentor.py index 6649324b1..fc8b8914d 100644 --- a/tiatoolbox/models/engine/nucleus_instance_segmentor.py +++ b/tiatoolbox/models/engine/nucleus_instance_segmentor.py @@ -4,7 +4,7 @@ import uuid from collections import deque -from typing import Callable +from collections.abc import Callable # replace with the sql database once the PR in place import joblib @@ -257,7 +257,7 @@ def _process_tile_predictions( the tiling process. """ - locations, predictions = list(zip(*tile_output)) + locations, predictions = list(zip(*tile_output, strict=False)) # convert from WSI space to tile space tile_tl = tile_bounds[:2] @@ -651,13 +651,13 @@ def _infer_once(self: NucleusInstanceSegmentor) -> list: # repackage so that it's a N list, each contains # L x etc. output sample_outputs = [np.split(v, batch_size, axis=0) for v in sample_outputs] - sample_outputs = list(zip(*sample_outputs)) + sample_outputs = list(zip(*sample_outputs, strict=False)) # tensor to numpy, costly? sample_infos = sample_infos.numpy() sample_infos = np.split(sample_infos, batch_size, axis=0) - sample_outputs = list(zip(sample_infos, sample_outputs)) + sample_outputs = list(zip(sample_infos, sample_outputs, strict=False)) cum_output.extend(sample_outputs) pbar.update() pbar.close() diff --git a/tiatoolbox/models/engine/patch_predictor.py b/tiatoolbox/models/engine/patch_predictor.py index 76d3d3bd6..c14a04b7e 100644 --- a/tiatoolbox/models/engine/patch_predictor.py +++ b/tiatoolbox/models/engine/patch_predictor.py @@ -4,8 +4,9 @@ import copy from collections import OrderedDict +from collections.abc import Callable from pathlib import Path -from typing import TYPE_CHECKING, Callable +from typing import TYPE_CHECKING import numpy as np import torch @@ -949,7 +950,7 @@ def predict( # noqa: PLR0913 ioconfig.input_resolutions, ioconfig.input_resolutions[0]["units"], ) - fx_list = zip(fx_list, ioconfig.input_resolutions) + fx_list = zip(fx_list, ioconfig.input_resolutions, strict=False) fx_list = sorted(fx_list, key=lambda x: x[0]) highest_input_resolution = fx_list[0][1] diff --git a/tiatoolbox/models/engine/semantic_segmentor.py b/tiatoolbox/models/engine/semantic_segmentor.py index f4b85e5c1..61c98cdac 100644 --- a/tiatoolbox/models/engine/semantic_segmentor.py +++ b/tiatoolbox/models/engine/semantic_segmentor.py @@ -5,9 +5,10 @@ import copy import logging import shutil +from collections.abc import Callable from concurrent.futures import ProcessPoolExecutor from pathlib import Path -from typing import TYPE_CHECKING, Callable +from typing import TYPE_CHECKING import cv2 import joblib @@ -827,13 +828,13 @@ def _predict_one_wsi( # repackage so that it's an N list, each contains # L x etc. output sample_outputs = [np.split(v, batch_size, axis=0) for v in sample_outputs] - sample_outputs = list(zip(*sample_outputs)) + sample_outputs = list(zip(*sample_outputs, strict=False)) # tensor to numpy, costly? sample_infos = sample_infos.numpy() sample_infos = np.split(sample_infos, batch_size, axis=0) - sample_outputs = list(zip(sample_infos, sample_outputs)) + sample_outputs = list(zip(sample_infos, sample_outputs, strict=False)) if self.process_prediction_per_batch: self._process_predictions( sample_outputs, @@ -891,7 +892,7 @@ def _process_predictions( return # assume predictions is N, each item has L output element - locations, predictions = list(zip(*cum_batch_predictions)) + locations, predictions = list(zip(*cum_batch_predictions, strict=False)) # Nx4 (N x [tl_x, tl_y, br_x, br_y), denotes the location of # output patch this can exceed the image bound at the requested # resolution remove singleton due to split. @@ -1001,7 +1002,7 @@ def index(arr: np.ndarray, tl: np.ndarray, br: np.ndarray) -> np.ndarray: """Helper to shorten indexing.""" return arr[tl[0] : br[0], tl[1] : br[1]] - patch_infos = list(zip(locations, predictions)) + patch_infos = list(zip(locations, predictions, strict=False)) for _, patch_info in enumerate(patch_infos): # position is assumed to be in XY coordinate (bound_in_wsi, prediction) = patch_info @@ -1555,7 +1556,7 @@ def _process_predictions( """ # assume prediction_list is N, each item has L output elements - location_list, prediction_list = list(zip(*cum_batch_predictions)) + location_list, prediction_list = list(zip(*cum_batch_predictions, strict=False)) # Nx4 (N x [tl_x, tl_y, br_x, br_y), denotes the location of output # patch, this can exceed the image bound at the requested resolution # remove singleton due to split. diff --git a/tiatoolbox/models/models_abc.py b/tiatoolbox/models/models_abc.py index 0c82a9216..41eea2460 100644 --- a/tiatoolbox/models/models_abc.py +++ b/tiatoolbox/models/models_abc.py @@ -4,7 +4,8 @@ import os from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Any, Callable +from collections.abc import Callable +from typing import TYPE_CHECKING, Any import torch import torch._dynamo diff --git a/tiatoolbox/tools/graph.py b/tiatoolbox/tools/graph.py index 0cc036b51..8d5bf8429 100644 --- a/tiatoolbox/tools/graph.py +++ b/tiatoolbox/tools/graph.py @@ -3,8 +3,9 @@ from __future__ import annotations from collections import defaultdict +from collections.abc import Callable from numbers import Number -from typing import TYPE_CHECKING, Callable, cast +from typing import TYPE_CHECKING, cast import numpy as np import torch diff --git a/tiatoolbox/tools/patchextraction.py b/tiatoolbox/tools/patchextraction.py index a6caf082c..e3df1740b 100644 --- a/tiatoolbox/tools/patchextraction.py +++ b/tiatoolbox/tools/patchextraction.py @@ -3,7 +3,8 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Callable, TypedDict, overload +from collections.abc import Callable +from typing import TYPE_CHECKING, TypedDict, overload import numpy as np from typing_extensions import Unpack diff --git a/tiatoolbox/tools/pyramid.py b/tiatoolbox/tools/pyramid.py index 3483a38cb..787142492 100644 --- a/tiatoolbox/tools/pyramid.py +++ b/tiatoolbox/tools/pyramid.py @@ -546,7 +546,9 @@ def __init__( # skipcq: PYL-W0231 types = self.store.pquery(f"props[{self.renderer.score_prop!r}]") # make a random dictionary colour map colors = random_colors(len(types), bright=True) - mapper = {key: (*color, 1) for key, color in zip(types, colors)} + mapper = { + key: (*color, 1) for key, color in zip(types, colors, strict=False) + } self.renderer.mapper = lambda x: mapper[x] def get_thumb_tile(self: AnnotationTileGenerator) -> Image.Image: diff --git a/tiatoolbox/tools/registration/wsi_registration.py b/tiatoolbox/tools/registration/wsi_registration.py index fdca5200a..8bd505cc6 100644 --- a/tiatoolbox/tools/registration/wsi_registration.py +++ b/tiatoolbox/tools/registration/wsi_registration.py @@ -3,7 +3,8 @@ from __future__ import annotations import itertools -from typing import TYPE_CHECKING, Callable, cast +from collections.abc import Callable +from typing import TYPE_CHECKING, cast import cv2 import numpy as np @@ -1382,11 +1383,14 @@ def estimate_bspline_transform( for size, spacing in zip( fixed_image_inv_sitk.GetSize(), fixed_image_inv_sitk.GetSpacing(), + strict=False, ) ] mesh_size = [ int(image_size / grid_spacing + 0.5) - for image_size, grid_spacing in zip(image_physical_size, grid_physical_spacing) + for image_size, grid_spacing in zip( + image_physical_size, grid_physical_spacing, strict=False + ) ] mesh_size = [int(sz / 4 + 0.5) for sz in mesh_size] diff --git a/tiatoolbox/type_hints.py b/tiatoolbox/type_hints.py index 20ff94284..039d55b5e 100644 --- a/tiatoolbox/type_hints.py +++ b/tiatoolbox/type_hints.py @@ -2,8 +2,8 @@ from __future__ import annotations -from collections.abc import Sequence -from typing import Callable, Literal, SupportsFloat, Union +from collections.abc import Callable, Sequence +from typing import Literal, SupportsFloat, Union import numpy as np from shapely.geometry import LineString, Point, Polygon # type: ignore[import-untyped] diff --git a/tiatoolbox/utils/env_detection.py b/tiatoolbox/utils/env_detection.py index bc6f72ca4..04e7abae1 100644 --- a/tiatoolbox/utils/env_detection.py +++ b/tiatoolbox/utils/env_detection.py @@ -240,7 +240,7 @@ def has_network( # Connect to host connection = socket.create_connection((host, 80), timeout=timeout) connection.close() - except (socket.gaierror, socket.timeout): + except (TimeoutError, socket.gaierror): return False else: return True diff --git a/tiatoolbox/utils/image.py b/tiatoolbox/utils/image.py index db22eef21..26f4bc9f7 100644 --- a/tiatoolbox/utils/image.py +++ b/tiatoolbox/utils/image.py @@ -2,7 +2,8 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Callable +from collections.abc import Callable +from typing import TYPE_CHECKING import numpy as np from PIL import Image diff --git a/tiatoolbox/utils/transforms.py b/tiatoolbox/utils/transforms.py index 8c2817b75..bb9f5670d 100644 --- a/tiatoolbox/utils/transforms.py +++ b/tiatoolbox/utils/transforms.py @@ -373,7 +373,7 @@ def bounds2slices( slice_array = np.stack([start[::-1], stop[::-1]], axis=1) slices = [] - for x, s in zip(slice_array, stride_array): + for x, s in zip(slice_array, stride_array, strict=False): slices.append(slice(x[0], x[1], s)) return tuple(slices) diff --git a/tiatoolbox/utils/visualization.py b/tiatoolbox/utils/visualization.py index 6317e5af3..312f92487 100644 --- a/tiatoolbox/utils/visualization.py +++ b/tiatoolbox/utils/visualization.py @@ -4,7 +4,8 @@ import colorsys import random -from typing import TYPE_CHECKING, Callable, TypedDict, cast +from collections.abc import Callable +from typing import TYPE_CHECKING, TypedDict, cast import cv2 import matplotlib as mpl @@ -217,7 +218,7 @@ def overlay_prediction_mask( return overlay # Create colorbar parameters - name_list, color_list = zip(*label_info.values()) # Unzip values + name_list, color_list = zip(*label_info.values(), strict=False) # Unzip values color_list_arr = np.array(color_list) / 255 uid_list = list(label_info.keys()) cmap = mpl.colors.ListedColormap(color_list_arr) @@ -1048,7 +1049,7 @@ def _set_mapper( if isinstance(value, list): colors = random_colors(len(value), bright=True) self.__dict__["mapper"] = { - key: (*color, 1) for key, color in zip(value, colors) + key: (*color, 1) for key, color in zip(value, colors, strict=False) } if isinstance(value, dict): self.raw_mapper = value diff --git a/tiatoolbox/visualization/bokeh_app/main.py b/tiatoolbox/visualization/bokeh_app/main.py index 81395300d..fac6a8058 100644 --- a/tiatoolbox/visualization/bokeh_app/main.py +++ b/tiatoolbox/visualization/bokeh_app/main.py @@ -7,9 +7,10 @@ import tempfile import urllib from cmath import pi +from collections.abc import Callable from pathlib import Path, PureWindowsPath from shutil import rmtree -from typing import TYPE_CHECKING, Any, Callable, SupportsFloat +from typing import TYPE_CHECKING, Any, SupportsFloat import numpy as np import requests @@ -1634,6 +1635,7 @@ def gather_ui_elements( # noqa: PLR0915 model_row, type_select_row, ], + strict=False, ), ) if "ui_elements_1" in doc_config: @@ -1667,6 +1669,7 @@ def gather_ui_elements( # noqa: PLR0915 edge_size_spinner, res_switch, ], + strict=False, ), ) if "ui_elements_2" in doc_config: diff --git a/tiatoolbox/visualization/tileserver.py b/tiatoolbox/visualization/tileserver.py index b7c750938..236868f17 100644 --- a/tiatoolbox/visualization/tileserver.py +++ b/tiatoolbox/visualization/tileserver.py @@ -422,7 +422,7 @@ def change_mapper(self: TileServer) -> str: session_id = self._get_session_id() cmap = json.loads(request.form["cmap"]) if isinstance(cmap, dict): - cmap = dict(zip(cmap["keys"], cmap["values"])) + cmap = dict(zip(cmap["keys"], cmap["values"], strict=False)) self.renderers[session_id].score_fn = lambda x: x self.renderers[session_id].mapper = cmap self.renderers[session_id].function_mapper = None diff --git a/tiatoolbox/wsicore/wsireader.py b/tiatoolbox/wsicore/wsireader.py index 46f10cc36..0791bc4fa 100644 --- a/tiatoolbox/wsicore/wsireader.py +++ b/tiatoolbox/wsicore/wsireader.py @@ -3439,7 +3439,7 @@ def __init__(self: ArrayView, array: zarr.Array, axes: str) -> None: """ self.array = array self.axes = axes - self._shape = dict(zip(self.axes, self.array.shape)) + self._shape = dict(zip(self.axes, self.array.shape, strict=False)) @property def shape(self: ArrayView) -> tuple: @@ -6253,6 +6253,7 @@ def __init__( for s_dims, t_dims in zip( self.wsi_reader.info.level_dimensions, self.target_wsi_reader.info.level_dimensions, + strict=False, ) ] self.level_pads = [ @@ -6260,6 +6261,7 @@ def __init__( for s_dims, t_dims in zip( self.wsi_reader.info.level_dimensions, self.target_wsi_reader.info.level_dimensions, + strict=False, ) ] self.get_location_array(disp_array) @@ -6316,6 +6318,7 @@ def get_location_array(self, disp_array: np.ndarray) -> None: for s_dims, t_dims in zip( self.wsi_reader.info.level_dimensions, self.target_wsi_reader.info.level_dimensions, + strict=False, ) ) wsimeta.slide_dimensions = wsimeta.level_dimensions[0] From 32d0dfec137b90fd8f56accda6e9558f96abd39f Mon Sep 17 00:00:00 2001 From: Shan E Ahmed Raza <13048456+shaneahmed@users.noreply.github.com> Date: Fri, 10 Oct 2025 11:42:08 +0100 Subject: [PATCH 05/16] :pushpin: Update `numpy` version to `>2.0.0` --- requirements/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 187da90c0..fedd80fa2 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -14,7 +14,7 @@ joblib>=1.1.1 jupyterlab>=3.5.2 matplotlib>=3.6.2 numba>=0.57.0 -numpy>=1.23.5, <2.0.0 +numpy>=2.0.0 opencv-python>=4.6.0 openslide-bin>=4.0.0.2 openslide-python>=1.4.0 From 74c8822c6887b0eb46c9048a4f0c16f46308106b Mon Sep 17 00:00:00 2001 From: Shan E Ahmed Raza <13048456+shaneahmed@users.noreply.github.com> Date: Fri, 10 Oct 2025 12:14:21 +0100 Subject: [PATCH 06/16] :pushpin: Update cuda version --- requirements/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/requirements.txt b/requirements/requirements.txt index fedd80fa2..6fce32280 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -1,5 +1,5 @@ # torch installation ---extra-index-url https://download.pytorch.org/whl/cu118; sys_platform != "darwin" +--extra-index-url https://download.pytorch.org/whl/cu126; sys_platform != "darwin" aiohttp>=3.8.1 albumentations>=1.3.0 bokeh>=3.1.1, <3.6.0 From 6d20d4dd3b6272027ea43dc6671ea5d794b0b2b3 Mon Sep 17 00:00:00 2001 From: Shan E Ahmed Raza <13048456+shaneahmed@users.noreply.github.com> Date: Fri, 10 Oct 2025 12:23:59 +0100 Subject: [PATCH 07/16] :art: Apply `ruff` fixes --- examples/full-pipelines/slide-graph.ipynb | 3 +- .../inference-pipelines/slide-graph.ipynb | 3 +- tests/conftest.py | 5 +- tests/models/test_patch_predictor.py | 5 +- tests/models/test_semantic_segmentation.py | 5 +- tests/test_annotation_tilerendering.py | 5 +- tests/test_dsl.py | 6 ++- tests/test_tileserver.py | 3 +- tests/test_wsireader.py | 54 +++++++++---------- tiatoolbox/cli/common.py | 3 +- tiatoolbox/models/dataset/classification.py | 3 +- tiatoolbox/models/dataset/dataset_abc.py | 3 +- .../models/engine/multi_task_segmentor.py | 3 +- .../engine/nucleus_instance_segmentor.py | 5 +- tiatoolbox/models/engine/patch_predictor.py | 3 +- .../models/engine/semantic_segmentor.py | 2 +- tiatoolbox/models/models_abc.py | 2 +- tiatoolbox/tools/graph.py | 3 +- tiatoolbox/tools/patchextraction.py | 2 +- .../tools/registration/wsi_registration.py | 3 +- tiatoolbox/utils/image.py | 3 +- tiatoolbox/utils/visualization.py | 3 +- tiatoolbox/visualization/bokeh_app/main.py | 3 +- 23 files changed, 78 insertions(+), 52 deletions(-) diff --git a/examples/full-pipelines/slide-graph.ipynb b/examples/full-pipelines/slide-graph.ipynb index 33a8a2b5f..965d77eec 100644 --- a/examples/full-pipelines/slide-graph.ipynb +++ b/examples/full-pipelines/slide-graph.ipynb @@ -132,7 +132,6 @@ "import shutil\n", "import warnings\n", "from collections import OrderedDict\n", - "from collections.abc import Callable\n", "from pathlib import Path\n", "from typing import TYPE_CHECKING\n", "\n", @@ -193,7 +192,7 @@ ")\n", "\n", "if TYPE_CHECKING: # pragma: no cover\n", - " from collections.abc import Iterator\n", + " from collections.abc import Callable, Iterator\n", "\n", "warnings.filterwarnings(\"ignore\")\n", "mpl.rcParams[\"figure.dpi\"] = 300 # for high resolution figure in notebook" diff --git a/examples/inference-pipelines/slide-graph.ipynb b/examples/inference-pipelines/slide-graph.ipynb index 017adc52c..e102799d5 100644 --- a/examples/inference-pipelines/slide-graph.ipynb +++ b/examples/inference-pipelines/slide-graph.ipynb @@ -218,7 +218,6 @@ "import random\n", "import shutil\n", "import warnings\n", - "from collections.abc import Callable\n", "from pathlib import Path\n", "from typing import TYPE_CHECKING\n", "\n", @@ -261,6 +260,8 @@ "from tiatoolbox.wsicore.wsireader import WSIReader\n", "\n", "if TYPE_CHECKING:\n", + " from collections.abc import Callable\n", + "\n", " from tiatoolbox.wsicore.wsimeta import Resolution, Units\n", "\n", "warnings.filterwarnings(\"ignore\")\n", diff --git a/tests/conftest.py b/tests/conftest.py index 104b422ef..092913ea9 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -5,8 +5,8 @@ import os import shutil import time -from collections.abc import Callable from pathlib import Path +from typing import TYPE_CHECKING import pytest import torch @@ -16,6 +16,9 @@ from tiatoolbox.data import _fetch_remote_sample from tiatoolbox.utils.env_detection import has_gpu, running_on_ci +if TYPE_CHECKING: + from collections.abc import Callable + # ------------------------------------------------------------------------------------- # Generate Parameterized Tests # ------------------------------------------------------------------------------------- diff --git a/tests/models/test_patch_predictor.py b/tests/models/test_patch_predictor.py index ae85101ea..c878d64ff 100644 --- a/tests/models/test_patch_predictor.py +++ b/tests/models/test_patch_predictor.py @@ -4,8 +4,8 @@ import copy import shutil -from collections.abc import Callable from pathlib import Path +from typing import TYPE_CHECKING import cv2 import numpy as np @@ -28,6 +28,9 @@ from tiatoolbox.utils.misc import select_device from tiatoolbox.wsicore.wsireader import WSIReader +if TYPE_CHECKING: + from collections.abc import Callable + ON_GPU = toolbox_env.has_gpu() RNG = np.random.default_rng() # Numpy Random Generator diff --git a/tests/models/test_semantic_segmentation.py b/tests/models/test_semantic_segmentation.py index bddc32492..4a6a90800 100644 --- a/tests/models/test_semantic_segmentation.py +++ b/tests/models/test_semantic_segmentation.py @@ -8,8 +8,8 @@ import gc import multiprocessing import shutil -from collections.abc import Callable from pathlib import Path +from typing import TYPE_CHECKING import numpy as np import pytest @@ -35,6 +35,9 @@ from tiatoolbox.utils.misc import select_device from tiatoolbox.wsicore.wsireader import WSIReader +if TYPE_CHECKING: + from collections.abc import Callable + ON_GPU = toolbox_env.has_gpu() # The value is based on 2 TitanXP each with 12GB BATCH_SIZE = 1 if not ON_GPU else 16 diff --git a/tests/test_annotation_tilerendering.py b/tests/test_annotation_tilerendering.py index fceaa398f..4e373d16f 100644 --- a/tests/test_annotation_tilerendering.py +++ b/tests/test_annotation_tilerendering.py @@ -6,8 +6,8 @@ from __future__ import annotations -from collections.abc import Callable from pathlib import Path +from typing import TYPE_CHECKING import matplotlib.pyplot as plt import numpy as np @@ -26,6 +26,9 @@ from tiatoolbox.utils.visualization import AnnotationRenderer, _find_minimum_mpp_sf from tiatoolbox.wsicore import wsireader +if TYPE_CHECKING: + from collections.abc import Callable + RNG = np.random.default_rng(0) # Numpy Random Generator diff --git a/tests/test_dsl.py b/tests/test_dsl.py index 10697797e..66c49717e 100644 --- a/tests/test_dsl.py +++ b/tests/test_dsl.py @@ -4,9 +4,8 @@ import json import sqlite3 -from collections.abc import Callable from numbers import Number -from typing import ClassVar +from typing import TYPE_CHECKING, ClassVar import pytest @@ -20,6 +19,9 @@ py_regexp, ) +if TYPE_CHECKING: + from collections.abc import Callable + BINARY_OP_STRINGS = [ "+", "-", diff --git a/tests/test_tileserver.py b/tests/test_tileserver.py index 3b84456c5..7c8bd2395 100644 --- a/tests/test_tileserver.py +++ b/tests/test_tileserver.py @@ -5,7 +5,6 @@ import json import logging import urllib -from collections.abc import Callable from pathlib import Path, PureWindowsPath from typing import TYPE_CHECKING, NoReturn @@ -25,6 +24,8 @@ from tiatoolbox.wsicore import WSIReader if TYPE_CHECKING: + from collections.abc import Callable + from flask.testing import FlaskClient RNG = np.random.default_rng(0) # Numpy Random Generator diff --git a/tests/test_wsireader.py b/tests/test_wsireader.py index 96910d35a..152343436 100644 --- a/tests/test_wsireader.py +++ b/tests/test_wsireader.py @@ -3,11 +3,11 @@ from __future__ import annotations import copy +import itertools import json import logging import re import shutil -from collections.abc import Callable from copy import deepcopy from pathlib import Path from typing import TYPE_CHECKING @@ -51,7 +51,7 @@ ) if TYPE_CHECKING: # pragma: no cover - from collections.abc import Iterable + from collections.abc import Callable, Iterable import requests from openslide import OpenSlide @@ -136,7 +136,7 @@ def strictly_increasing(sequence: Iterable) -> bool: bool: True if strictly increasing. """ - return all(a < b for a, b in zip(sequence, sequence[1:], strict=False)) + return all(a < b for a, b in itertools.pairwise(sequence)) def strictly_decreasing(sequence: Iterable) -> bool: @@ -150,7 +150,7 @@ def strictly_decreasing(sequence: Iterable) -> bool: bool: True if strictly decreasing. """ - return all(a > b for a, b in zip(sequence, sequence[1:], strict=False)) + return all(a > b for a, b in itertools.pairwise(sequence)) def read_rect_objective_power(wsi: WSIReader, location: IntPair, size: IntPair) -> None: @@ -1900,29 +1900,29 @@ def test_command_line_jp2_read_bounds(sample_jp2: Path, tmp_path: Path) -> None: assert Path(tmp_path).joinpath("../im_region.jpg").is_file() -@pytest.mark.skipif( - utils.env_detection.running_on_ci(), - reason="No need to display image on travis.", -) -def test_command_line_jp2_read_bounds_show(sample_jp2: Path) -> None: - """Test JP2 read_bounds with mode as 'show'.""" - runner = CliRunner() - read_bounds_result = runner.invoke( - cli.main, - [ - "read-bounds", - "--img-input", - str(Path(sample_jp2)), - "--resolution", - "0", - "--units", - "level", - "--mode", - "show", - ], - ) - - assert read_bounds_result.exit_code == 0 +# @pytest.mark.skipif( +# utils.env_detection.running_on_ci(), +# reason="No need to display image on travis.", +# ) +# def test_command_line_jp2_read_bounds_show(sample_jp2: Path) -> None: +# """Test JP2 read_bounds with mode as 'show'.""" +# runner = CliRunner() +# read_bounds_result = runner.invoke( +# cli.main, +# [ +# "read-bounds", +# "--img-input", +# str(Path(sample_jp2)), +# "--resolution", +# "0", +# "--units", +# "level", +# "--mode", +# "show", +# ], +# ) +# +# assert read_bounds_result.exit_code == 0 def test_command_line_unsupported_file_read_bounds(sample_svs: Path) -> None: diff --git a/tiatoolbox/cli/common.py b/tiatoolbox/cli/common.py index bec96134e..88364fa45 100644 --- a/tiatoolbox/cli/common.py +++ b/tiatoolbox/cli/common.py @@ -2,13 +2,14 @@ from __future__ import annotations -from collections.abc import Callable from pathlib import Path from typing import TYPE_CHECKING, Any import click if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable + from tiatoolbox.models.models_abc import IOConfigABC diff --git a/tiatoolbox/models/dataset/classification.py b/tiatoolbox/models/dataset/classification.py index 9faa6ddc9..359d8c52a 100644 --- a/tiatoolbox/models/dataset/classification.py +++ b/tiatoolbox/models/dataset/classification.py @@ -2,7 +2,6 @@ from __future__ import annotations -from collections.abc import Callable from pathlib import Path from typing import TYPE_CHECKING @@ -18,6 +17,8 @@ from tiatoolbox.wsicore.wsireader import VirtualWSIReader, WSIReader if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable + import torch from PIL.Image import Image diff --git a/tiatoolbox/models/dataset/dataset_abc.py b/tiatoolbox/models/dataset/dataset_abc.py index dba832931..0afd8523c 100644 --- a/tiatoolbox/models/dataset/dataset_abc.py +++ b/tiatoolbox/models/dataset/dataset_abc.py @@ -3,12 +3,11 @@ from __future__ import annotations from abc import ABC, abstractmethod -from collections.abc import Callable from pathlib import Path from typing import TYPE_CHECKING, Union if TYPE_CHECKING: # pragma: no cover - from collections.abc import Iterable + from collections.abc import Callable, Iterable try: from typing import TypeGuard diff --git a/tiatoolbox/models/engine/multi_task_segmentor.py b/tiatoolbox/models/engine/multi_task_segmentor.py index fccc05fb0..ca9d1d3e7 100644 --- a/tiatoolbox/models/engine/multi_task_segmentor.py +++ b/tiatoolbox/models/engine/multi_task_segmentor.py @@ -23,7 +23,6 @@ from __future__ import annotations import shutil -from collections.abc import Callable from typing import TYPE_CHECKING # replace with the sql database once the PR in place @@ -42,6 +41,8 @@ ) if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable + import torch from tiatoolbox.type_hints import IntBounds diff --git a/tiatoolbox/models/engine/nucleus_instance_segmentor.py b/tiatoolbox/models/engine/nucleus_instance_segmentor.py index fc8b8914d..a822e9806 100644 --- a/tiatoolbox/models/engine/nucleus_instance_segmentor.py +++ b/tiatoolbox/models/engine/nucleus_instance_segmentor.py @@ -4,7 +4,7 @@ import uuid from collections import deque -from collections.abc import Callable +from typing import TYPE_CHECKING # replace with the sql database once the PR in place import joblib @@ -21,6 +21,9 @@ ) from tiatoolbox.tools.patchextraction import PatchExtractor +if TYPE_CHECKING: + from collections.abc import Callable + def _process_instance_predictions( inst_dict: dict, diff --git a/tiatoolbox/models/engine/patch_predictor.py b/tiatoolbox/models/engine/patch_predictor.py index c14a04b7e..820f04fe9 100644 --- a/tiatoolbox/models/engine/patch_predictor.py +++ b/tiatoolbox/models/engine/patch_predictor.py @@ -4,7 +4,6 @@ import copy from collections import OrderedDict -from collections.abc import Callable from pathlib import Path from typing import TYPE_CHECKING @@ -22,6 +21,8 @@ from tiatoolbox.wsicore.wsireader import VirtualWSIReader, WSIReader if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable + from tiatoolbox.type_hints import IntPair, Resolution, Units diff --git a/tiatoolbox/models/engine/semantic_segmentor.py b/tiatoolbox/models/engine/semantic_segmentor.py index 61c98cdac..2db26114f 100644 --- a/tiatoolbox/models/engine/semantic_segmentor.py +++ b/tiatoolbox/models/engine/semantic_segmentor.py @@ -5,7 +5,6 @@ import copy import logging import shutil -from collections.abc import Callable from concurrent.futures import ProcessPoolExecutor from pathlib import Path from typing import TYPE_CHECKING @@ -31,6 +30,7 @@ from tiatoolbox.wsicore.wsireader import VirtualWSIReader, WSIMeta, WSIReader if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable from multiprocessing.managers import Namespace from tiatoolbox.type_hints import IntPair, Resolution, Units diff --git a/tiatoolbox/models/models_abc.py b/tiatoolbox/models/models_abc.py index 41eea2460..4e1f1d755 100644 --- a/tiatoolbox/models/models_abc.py +++ b/tiatoolbox/models/models_abc.py @@ -4,7 +4,6 @@ import os from abc import ABC, abstractmethod -from collections.abc import Callable from typing import TYPE_CHECKING, Any import torch @@ -17,6 +16,7 @@ torch._dynamo.config.suppress_errors = True # skipcq: PYL-W0212 # noqa: SLF001 if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable from pathlib import Path import numpy as np diff --git a/tiatoolbox/tools/graph.py b/tiatoolbox/tools/graph.py index 8d5bf8429..203ebbfd8 100644 --- a/tiatoolbox/tools/graph.py +++ b/tiatoolbox/tools/graph.py @@ -3,7 +3,6 @@ from __future__ import annotations from collections import defaultdict -from collections.abc import Callable from numbers import Number from typing import TYPE_CHECKING, cast @@ -16,6 +15,8 @@ from scipy.spatial import Delaunay, cKDTree if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable + from matplotlib.axes import Axes from numpy.typing import ArrayLike diff --git a/tiatoolbox/tools/patchextraction.py b/tiatoolbox/tools/patchextraction.py index e3df1740b..53ae61077 100644 --- a/tiatoolbox/tools/patchextraction.py +++ b/tiatoolbox/tools/patchextraction.py @@ -3,7 +3,6 @@ from __future__ import annotations from abc import ABC, abstractmethod -from collections.abc import Callable from typing import TYPE_CHECKING, TypedDict, overload import numpy as np @@ -17,6 +16,7 @@ from tiatoolbox.wsicore import wsireader if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable from pathlib import Path from pandas import DataFrame diff --git a/tiatoolbox/tools/registration/wsi_registration.py b/tiatoolbox/tools/registration/wsi_registration.py index 8bd505cc6..c8ea8c8b5 100644 --- a/tiatoolbox/tools/registration/wsi_registration.py +++ b/tiatoolbox/tools/registration/wsi_registration.py @@ -3,7 +3,6 @@ from __future__ import annotations import itertools -from collections.abc import Callable from typing import TYPE_CHECKING, cast import cv2 @@ -25,6 +24,8 @@ from tiatoolbox.wsicore.wsireader import VirtualWSIReader, WSIReader if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable + from tiatoolbox.type_hints import IntBounds, Resolution, Units RGB_IMAGE_DIM = 3 diff --git a/tiatoolbox/utils/image.py b/tiatoolbox/utils/image.py index 26f4bc9f7..3ddd0bcd0 100644 --- a/tiatoolbox/utils/image.py +++ b/tiatoolbox/utils/image.py @@ -2,7 +2,6 @@ from __future__ import annotations -from collections.abc import Callable from typing import TYPE_CHECKING import numpy as np @@ -19,6 +18,8 @@ ) if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable + from tiatoolbox.type_hints import IntBounds, NumpyPadLiteral PADDING_TO_BOUNDS = np.array([-1, -1, 1, 1]) diff --git a/tiatoolbox/utils/visualization.py b/tiatoolbox/utils/visualization.py index 312f92487..f7e0b21ee 100644 --- a/tiatoolbox/utils/visualization.py +++ b/tiatoolbox/utils/visualization.py @@ -4,7 +4,6 @@ import colorsys import random -from collections.abc import Callable from typing import TYPE_CHECKING, TypedDict, cast import cv2 @@ -19,6 +18,8 @@ from tiatoolbox.enums import GeometryType if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable + from matplotlib.axes import Axes from matplotlib.cm import ScalarMappable from numpy.typing import ArrayLike diff --git a/tiatoolbox/visualization/bokeh_app/main.py b/tiatoolbox/visualization/bokeh_app/main.py index fac6a8058..5df7acb04 100644 --- a/tiatoolbox/visualization/bokeh_app/main.py +++ b/tiatoolbox/visualization/bokeh_app/main.py @@ -7,7 +7,6 @@ import tempfile import urllib from cmath import pi -from collections.abc import Callable from pathlib import Path, PureWindowsPath from shutil import rmtree from typing import TYPE_CHECKING, Any, SupportsFloat @@ -76,6 +75,8 @@ from tiatoolbox.wsicore.wsireader import WSIReader if TYPE_CHECKING: # pragma: no cover + from collections.abc import Callable + from bokeh.document import Document rng = np.random.default_rng() From cc2f8012e4dd3e0949ae169f68afffc21d291ea1 Mon Sep 17 00:00:00 2001 From: Shan E Ahmed Raza <13048456+shaneahmed@users.noreply.github.com> Date: Fri, 10 Oct 2025 12:55:11 +0100 Subject: [PATCH 08/16] :art: Apply `ruff` fixes --- tests/test_wsireader.py | 46 ++++++++++++++++++++-------------------- tiatoolbox/type_hints.py | 14 ++++++------ 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/tests/test_wsireader.py b/tests/test_wsireader.py index 152343436..315d92749 100644 --- a/tests/test_wsireader.py +++ b/tests/test_wsireader.py @@ -1900,29 +1900,29 @@ def test_command_line_jp2_read_bounds(sample_jp2: Path, tmp_path: Path) -> None: assert Path(tmp_path).joinpath("../im_region.jpg").is_file() -# @pytest.mark.skipif( -# utils.env_detection.running_on_ci(), -# reason="No need to display image on travis.", -# ) -# def test_command_line_jp2_read_bounds_show(sample_jp2: Path) -> None: -# """Test JP2 read_bounds with mode as 'show'.""" -# runner = CliRunner() -# read_bounds_result = runner.invoke( -# cli.main, -# [ -# "read-bounds", -# "--img-input", -# str(Path(sample_jp2)), -# "--resolution", -# "0", -# "--units", -# "level", -# "--mode", -# "show", -# ], -# ) -# -# assert read_bounds_result.exit_code == 0 +@pytest.mark.skipif( + utils.env_detection.running_on_ci(), + reason="No need to display image on travis.", +) +def test_command_line_jp2_read_bounds_show(sample_jp2: Path) -> None: + """Test JP2 read_bounds with mode as 'show'.""" + runner = CliRunner() + read_bounds_result = runner.invoke( + cli.main, + [ + "read-bounds", + "--img-input", + str(Path(sample_jp2)), + "--resolution", + "0", + "--units", + "level", + "--mode", + "show", + ], + ) + + assert read_bounds_result.exit_code == 0 def test_command_line_unsupported_file_read_bounds(sample_svs: Path) -> None: diff --git a/tiatoolbox/type_hints.py b/tiatoolbox/type_hints.py index 039d55b5e..b4b931e3e 100644 --- a/tiatoolbox/type_hints.py +++ b/tiatoolbox/type_hints.py @@ -3,7 +3,7 @@ from __future__ import annotations from collections.abc import Callable, Sequence -from typing import Literal, SupportsFloat, Union +from typing import Literal, SupportsFloat import numpy as np from shapely.geometry import LineString, Point, Polygon # type: ignore[import-untyped] @@ -11,24 +11,24 @@ # Proper type annotations for shapely is not yet available. -JSON = Union[dict[str, "JSON"], list["JSON"], str, int, float, bool, None] +JSON = dict[str, "JSON"] | list["JSON"] | str | int | float | bool | None NumPair = tuple[SupportsFloat, SupportsFloat] IntPair = tuple[int, int] # WSIReader -Resolution = Union[SupportsFloat, NumPair, np.ndarray, Sequence[SupportsFloat]] +Resolution = SupportsFloat | NumPair | np.ndarray | Sequence[SupportsFloat] Units = Literal["mpp", "power", "baseline", "level"] Bounds = tuple[SupportsFloat, SupportsFloat, SupportsFloat, SupportsFloat] IntBounds = tuple[int, int, int, int] # Annotation Store -Geometry = Union[Point, LineString, Polygon] +Geometry = Point | LineString | Polygon Properties = JSON # Could define this using a TypedDict -QueryGeometry = Union[Bounds, Geometry] +QueryGeometry = Bounds | Geometry CallablePredicate = Callable[[Properties], bool] CallableSelect = Callable[[Properties], Properties] -Predicate = Union[str, bytes, CallablePredicate] -Select = Union[str, bytes, CallableSelect] +Predicate = str | bytes | CallablePredicate +Select = str | bytes | CallableSelect NumpyPadLiteral = Literal[ "constant", "edge", From 31fe38a046102e295a513677852dec93fb632ba2 Mon Sep 17 00:00:00 2001 From: Shan E Ahmed Raza <13048456+shaneahmed@users.noreply.github.com> Date: Fri, 10 Oct 2025 12:56:37 +0100 Subject: [PATCH 09/16] :art: Apply `ruff` fixes --- tiatoolbox/models/dataset/dataset_abc.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tiatoolbox/models/dataset/dataset_abc.py b/tiatoolbox/models/dataset/dataset_abc.py index 0afd8523c..7d7160e48 100644 --- a/tiatoolbox/models/dataset/dataset_abc.py +++ b/tiatoolbox/models/dataset/dataset_abc.py @@ -4,7 +4,7 @@ from abc import ABC, abstractmethod from pathlib import Path -from typing import TYPE_CHECKING, Union +from typing import TYPE_CHECKING if TYPE_CHECKING: # pragma: no cover from collections.abc import Callable, Iterable @@ -20,7 +20,7 @@ from tiatoolbox.utils import imread -input_type = Union[list[str | Path | np.ndarray], np.ndarray] +input_type = list[str | Path | np.ndarray] | np.ndarray class PatchDatasetABC(ABC, torch.utils.data.Dataset): From bb19c32b7c6be037680f7013d8847207210d4615 Mon Sep 17 00:00:00 2001 From: Shan E Ahmed Raza <13048456+shaneahmed@users.noreply.github.com> Date: Fri, 10 Oct 2025 13:29:53 +0100 Subject: [PATCH 10/16] :bug: Fix multi-task segmentor --- tests/models/test_multi_task_segmentor.py | 2 +- tiatoolbox/models/engine/multi_task_segmentor.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/models/test_multi_task_segmentor.py b/tests/models/test_multi_task_segmentor.py index 3cec30121..c92dfb409 100644 --- a/tests/models/test_multi_task_segmentor.py +++ b/tests/models/test_multi_task_segmentor.py @@ -113,7 +113,7 @@ def test_functionality_hovernetplus(remote_sample: Callable, tmp_path: Path) -> multi_segmentor = MultiTaskSegmentor( pretrained_model="hovernetplus-oed", batch_size=BATCH_SIZE, - num_postproc_workers=NUM_POSTPROC_WORKERS, + num_postproc_workers=0, ) output = multi_segmentor.predict( [mini_wsi_svs], diff --git a/tiatoolbox/models/engine/multi_task_segmentor.py b/tiatoolbox/models/engine/multi_task_segmentor.py index ca9d1d3e7..e1b6a17c1 100644 --- a/tiatoolbox/models/engine/multi_task_segmentor.py +++ b/tiatoolbox/models/engine/multi_task_segmentor.py @@ -346,15 +346,17 @@ def _predict_one_wsi( indices_sem = [i for i, x in enumerate(self.output_types) if x == "semantic"] for s_id in range(len(indices_sem)): + shape = tuple(map(int, np.fliplr([wsi_proc_shape])[0])) self.wsi_layers.append( np.lib.format.open_memmap( f"{cache_dir}/{s_id}.npy", mode="w+", - shape=tuple(np.fliplr([wsi_proc_shape])[0]), + shape=shape, dtype=np.uint8, ), ) self.wsi_layers[s_id][:] = 0 + self.wsi_layers[s_id].flush() indices_inst = [i for i, x in enumerate(self.output_types) if x == "instance"] From a8773e6322cf0ccdc6849ebf3b3703df5acb35c2 Mon Sep 17 00:00:00 2001 From: Jiaqi Lv Date: Fri, 10 Oct 2025 15:58:33 +0100 Subject: [PATCH 11/16] fix mypy errors --- pyproject.toml | 2 +- tiatoolbox/models/architecture/utils.py | 10 +++++----- tiatoolbox/tools/stainnorm.py | 22 ++++++++++++---------- tiatoolbox/tools/tissuemask.py | 2 +- 4 files changed, 19 insertions(+), 17 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 1d7addb08..aec86265d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -174,4 +174,4 @@ skip-magic-trailing-comma = false [tool.mypy] ignore_missing_imports = true -python_version = 3.10 +python_version = "3.10" diff --git a/tiatoolbox/models/architecture/utils.py b/tiatoolbox/models/architecture/utils.py index bfd759b3a..8f8f2bb22 100644 --- a/tiatoolbox/models/architecture/utils.py +++ b/tiatoolbox/models/architecture/utils.py @@ -103,7 +103,7 @@ def compile_model( def centre_crop( img: np.ndarray | torch.Tensor, - crop_shape: np.ndarray | torch.Tensor | tuple, + crop_shape: np.ndarray | torch.Tensor | tuple[int, int], data_format: str = "NCHW", ) -> np.ndarray | torch.Tensor: """A function to center crop image with given crop shape. @@ -126,10 +126,10 @@ def centre_crop( msg = f"Unknown input format `{data_format}`." raise ValueError(msg) - crop_t = crop_shape[0] // 2 - crop_b = crop_shape[0] - crop_t - crop_l = crop_shape[1] // 2 - crop_r = crop_shape[1] - crop_l + crop_t: int = int(crop_shape[0] // 2) + crop_b: int = int(crop_shape[0] - crop_t) + crop_l: int = int(crop_shape[1] // 2) + crop_r: int = int(crop_shape[1] - crop_l) if data_format == "NCHW": return img[:, :, crop_t:-crop_b, crop_l:-crop_r] diff --git a/tiatoolbox/tools/stainnorm.py b/tiatoolbox/tools/stainnorm.py index 05c41398f..ae23141ae 100644 --- a/tiatoolbox/tools/stainnorm.py +++ b/tiatoolbox/tools/stainnorm.py @@ -288,7 +288,7 @@ def transform(self: ReinhardNormalizer, img: np.ndarray) -> np.ndarray: return self.merge_back(norm1, norm2, norm3) @staticmethod - def lab_split(img: np.ndarray) -> tuple[float, float, float]: + def lab_split(img: np.ndarray) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """Convert from RGB uint8 to LAB and split into channels. Args: @@ -309,13 +309,15 @@ def lab_split(img: np.ndarray) -> tuple[float, float, float]: img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB) img_float = img.astype(np.float32) chan1, chan2, chan3 = cv2.split(img_float) - chan1 /= 2.55 # should now be in range [0,100] - chan2 -= 128.0 # should now be in range [-127,127] - chan3 -= 128.0 # should now be in range [-127,127] + chan1 /= np.asarray(2.55) # should now be in range [0,100] + chan2 -= np.asarray(128.0) # should now be in range [-127,127] + chan3 -= np.asarray(128.0) # should now be in range [-127,127] return chan1, chan2, chan3 @staticmethod - def merge_back(chan1: float, chan2: float, chan3: float) -> np.ndarray: + def merge_back( + chan1: np.ndarray, chan2: np.ndarray, chan3: np.ndarray + ) -> np.ndarray: """Take separate LAB channels and merge back to give RGB uint8. Args: @@ -357,11 +359,11 @@ def get_mean_std( """ img = img.astype("uint8") # ensure input image is uint8 chan1, chan2, chan3 = self.lab_split(img) - m1, sd1 = cv2.meanStdDev(chan1) - m2, sd2 = cv2.meanStdDev(chan2) - m3, sd3 = cv2.meanStdDev(chan3) - means = m1, m2, m3 - stds = sd1, sd2, sd3 + m1, sd1 = cv2.meanStdDev(np.asarray(chan1)) + m2, sd2 = cv2.meanStdDev(np.asarray(chan2)) + m3, sd3 = cv2.meanStdDev(np.asarray(chan3)) + means = float(m1[0][0]), float(m2[0][0]), float(m3[0][0]) + stds = float(sd1[0][0]), float(sd2[0][0]), float(sd3[0][0]) return means, stds diff --git a/tiatoolbox/tools/tissuemask.py b/tiatoolbox/tools/tissuemask.py index 66b9719bc..3923c7e0f 100644 --- a/tiatoolbox/tools/tissuemask.py +++ b/tiatoolbox/tools/tissuemask.py @@ -265,7 +265,7 @@ def __init__( # Set min region size to kernel area if None if self.min_region_size is None: - self.min_region_size = np.sum(self.kernel) + self.min_region_size = int(np.sum(self.kernel)) def transform(self: MorphologicalMasker, images: np.ndarray) -> np.ndarray: """Create masks using the found threshold followed by morphological operations. From 65f55baaa4e0f49c5bd81a31f0ea2773454f4d5c Mon Sep 17 00:00:00 2001 From: Jiaqi Lv Date: Fri, 10 Oct 2025 16:32:24 +0100 Subject: [PATCH 12/16] fix more mypy errors --- tiatoolbox/tools/patchextraction.py | 2 +- tiatoolbox/tools/registration/wsi_registration.py | 6 +++--- tiatoolbox/tools/stainnorm.py | 2 +- tiatoolbox/utils/image.py | 2 +- tiatoolbox/utils/visualization.py | 7 +++++-- 5 files changed, 11 insertions(+), 8 deletions(-) diff --git a/tiatoolbox/tools/patchextraction.py b/tiatoolbox/tools/patchextraction.py index 53ae61077..d1b134e76 100644 --- a/tiatoolbox/tools/patchextraction.py +++ b/tiatoolbox/tools/patchextraction.py @@ -419,7 +419,7 @@ def filter_coordinates( # Scaling the coordinates_list to the `tissue_mask` array resolution scale_factors = np.array(tissue_mask.shape[1::-1]) / np.array(wsi_shape) - scaled_coords = coordinates_list.copy().astype(np.float32) + scaled_coords: np.ndarray = coordinates_list.copy().astype(np.float32) scaled_coords[:, [0, 2]] *= scale_factors[0] scaled_coords[:, [0, 2]] = np.clip( scaled_coords[:, [0, 2]], diff --git a/tiatoolbox/tools/registration/wsi_registration.py b/tiatoolbox/tools/registration/wsi_registration.py index c8ea8c8b5..ef9b28893 100644 --- a/tiatoolbox/tools/registration/wsi_registration.py +++ b/tiatoolbox/tools/registration/wsi_registration.py @@ -798,13 +798,13 @@ def find_points_inside_boundary(mask: np.ndarray, points: np.ndarray) -> np.ndar Indices of points enclosed by a boundary. """ - kernel = np.ones((25, 25), np.uint8) + kernel: np.ndarray = np.ones((25, 25), np.uint8) mask = cv2.dilate(mask, kernel, iterations=1) mask_reader = VirtualWSIReader(mask) # convert coordinates of shape [N, 2] to [N, 4] - end_x_y = points[:, 0:2] + 1 - bbox_coord = np.c_[points, end_x_y].astype(int) + end_x_y: np.ndarray = points[:, 0:2] + 1 + bbox_coord: np.ndarray = np.c_[points, end_x_y].astype(int) return PatchExtractor.filter_coordinates( mask_reader, bbox_coord, diff --git a/tiatoolbox/tools/stainnorm.py b/tiatoolbox/tools/stainnorm.py index ae23141ae..75eb4a7cb 100644 --- a/tiatoolbox/tools/stainnorm.py +++ b/tiatoolbox/tools/stainnorm.py @@ -307,7 +307,7 @@ def lab_split(img: np.ndarray) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """ img = img.astype("uint8") # ensure input image is uint8 img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB) - img_float = img.astype(np.float32) + img_float: np.ndarray = img.astype(np.float32) chan1, chan2, chan3 = cv2.split(img_float) chan1 /= np.asarray(2.55) # should now be in range [0,100] chan2 -= np.asarray(128.0) # should now be in range [-127,127] diff --git a/tiatoolbox/utils/image.py b/tiatoolbox/utils/image.py index 3ddd0bcd0..7567fbfe6 100644 --- a/tiatoolbox/utils/image.py +++ b/tiatoolbox/utils/image.py @@ -649,7 +649,7 @@ def sub_pixel_read( # skipcq: PY-R1000 # noqa: C901, PLR0912, PLR0913, PLR0915 residuals = np.abs(int_read_bounds - read_bounds) read_bounds = int_read_bounds read_location, read_size = bounds2locsize(int_read_bounds) - valid_int_bounds = find_overlap( + valid_int_bounds: np.ndarray = find_overlap( read_location=read_location, read_size=read_size, image_size=image_size, diff --git a/tiatoolbox/utils/visualization.py b/tiatoolbox/utils/visualization.py index f7e0b21ee..10887d03a 100644 --- a/tiatoolbox/utils/visualization.py +++ b/tiatoolbox/utils/visualization.py @@ -179,6 +179,7 @@ def overlay_prediction_mask( raise ValueError(msg) img = np.array(img * 255, dtype=np.uint8) # If `min_val` is defined, only display the overlay for areas with pred > min_val + prediction_sel: np.ndarray = np.ones_like(prediction, dtype=bool) if min_val > 0: prediction_sel = prediction >= min_val @@ -200,7 +201,7 @@ def overlay_prediction_mask( msg = f"Missing label for: {missing_label_uids}." raise ValueError(msg) - rgb_prediction = np.zeros( + rgb_prediction: np.ndarray = np.zeros( [prediction.shape[0], prediction.shape[1], 3], dtype=np.uint8, ) @@ -1129,7 +1130,9 @@ def render_annotations( min_area = 0.0005 * (output_size[0] * output_size[1]) * (scale * mpp_sf) ** 2 - tile = np.zeros((output_size[0] * res, output_size[1] * res, 4), dtype=np.uint8) + tile: np.ndarray = np.zeros( + (output_size[0] * res, output_size[1] * res, 4), dtype=np.uint8 + ) if scale <= self.max_scale: # get all annotations From 7506faa8a72e552096ed25d9bf14bb5aaafd01f6 Mon Sep 17 00:00:00 2001 From: Jiaqi Lv Date: Fri, 10 Oct 2025 16:47:16 +0100 Subject: [PATCH 13/16] fix mypy error --- tiatoolbox/utils/visualization.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tiatoolbox/utils/visualization.py b/tiatoolbox/utils/visualization.py index 10887d03a..41cb98f9b 100644 --- a/tiatoolbox/utils/visualization.py +++ b/tiatoolbox/utils/visualization.py @@ -187,13 +187,13 @@ def overlay_prediction_mask( predicted_classes = sorted(np.unique(prediction).tolist()) # Generate random colours if None are given - rand_state = np.random.default_rng().__getstate__() + rand_state = np.random.default_rng().bit_generator.state rng = np.random.default_rng(123) label_info = label_info or { # Use label_info if provided OR generate label_uid: (str(label_uid), rng.integers(0, 255, 3)) for label_uid in predicted_classes } - np.random.default_rng().__setstate__(rand_state) + np.random.default_rng().bit_generator.state = rand_state # Validate label_info missing_label_uids = _validate_label_info(label_info, predicted_classes) From a98629fc56d24fd508763a31f14a9522ae5c61e0 Mon Sep 17 00:00:00 2001 From: Shan E Ahmed Raza <13048456+shaneahmed@users.noreply.github.com> Date: Wed, 15 Oct 2025 11:11:02 +0100 Subject: [PATCH 14/16] :bug: Fix semantic segmentor npy load --- tests/models/test_multi_task_segmentor.py | 2 +- tiatoolbox/models/engine/multi_task_segmentor.py | 1 - tiatoolbox/models/engine/semantic_segmentor.py | 8 ++++---- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/tests/models/test_multi_task_segmentor.py b/tests/models/test_multi_task_segmentor.py index c92dfb409..3cec30121 100644 --- a/tests/models/test_multi_task_segmentor.py +++ b/tests/models/test_multi_task_segmentor.py @@ -113,7 +113,7 @@ def test_functionality_hovernetplus(remote_sample: Callable, tmp_path: Path) -> multi_segmentor = MultiTaskSegmentor( pretrained_model="hovernetplus-oed", batch_size=BATCH_SIZE, - num_postproc_workers=0, + num_postproc_workers=NUM_POSTPROC_WORKERS, ) output = multi_segmentor.predict( [mini_wsi_svs], diff --git a/tiatoolbox/models/engine/multi_task_segmentor.py b/tiatoolbox/models/engine/multi_task_segmentor.py index e1b6a17c1..55fd1a2d8 100644 --- a/tiatoolbox/models/engine/multi_task_segmentor.py +++ b/tiatoolbox/models/engine/multi_task_segmentor.py @@ -356,7 +356,6 @@ def _predict_one_wsi( ), ) self.wsi_layers[s_id][:] = 0 - self.wsi_layers[s_id].flush() indices_inst = [i for i, x in enumerate(self.output_types) if x == "instance"] diff --git a/tiatoolbox/models/engine/semantic_segmentor.py b/tiatoolbox/models/engine/semantic_segmentor.py index 2db26114f..b222d0266 100644 --- a/tiatoolbox/models/engine/semantic_segmentor.py +++ b/tiatoolbox/models/engine/semantic_segmentor.py @@ -55,12 +55,12 @@ def _estimate_canvas_parameters( """ if len(sample_prediction.shape) == 3: # noqa: PLR2004 num_output_ch = sample_prediction.shape[-1] - canvas_cum_shape_ = (*tuple(canvas_shape), num_output_ch) - canvas_count_shape_ = (*tuple(canvas_shape), 1) + canvas_cum_shape_ = tuple(map(int, (*tuple(canvas_shape), num_output_ch))) + canvas_count_shape_ = tuple(map(int, (*tuple(canvas_shape), 1))) add_singleton_dim = num_output_ch == 1 else: - canvas_cum_shape_ = (*tuple(canvas_shape), 1) - canvas_count_shape_ = (*tuple(canvas_shape), 1) + canvas_cum_shape_ = tuple(map(int, (*tuple(canvas_shape), 1))) + canvas_count_shape_ = tuple(map(int, (*tuple(canvas_shape), 1))) add_singleton_dim = True return canvas_cum_shape_, canvas_count_shape_, add_singleton_dim From 80730f280d5355c57fe2d099a0e3606fc7c8342d Mon Sep 17 00:00:00 2001 From: Shan E Ahmed Raza <13048456+shaneahmed@users.noreply.github.com> Date: Wed, 15 Oct 2025 11:48:45 +0100 Subject: [PATCH 15/16] :bug: Fix coverage --- tiatoolbox/models/engine/nucleus_instance_segmentor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tiatoolbox/models/engine/nucleus_instance_segmentor.py b/tiatoolbox/models/engine/nucleus_instance_segmentor.py index a822e9806..18d795a34 100644 --- a/tiatoolbox/models/engine/nucleus_instance_segmentor.py +++ b/tiatoolbox/models/engine/nucleus_instance_segmentor.py @@ -21,7 +21,7 @@ ) from tiatoolbox.tools.patchextraction import PatchExtractor -if TYPE_CHECKING: +if TYPE_CHECKING: # pragma: no cover from collections.abc import Callable From f071c53fee34884547a9abe2fc7a0396a3d7a169 Mon Sep 17 00:00:00 2001 From: Shan E Ahmed Raza <13048456+shaneahmed@users.noreply.github.com> Date: Thu, 16 Oct 2025 10:25:18 +0100 Subject: [PATCH 16/16] :white_check_mark: Add type hints tests --- .github/workflows/docker-publish.yml | 4 +- tests/test_type_hints.py | 60 ++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 2 deletions(-) create mode 100644 tests/test_type_hints.py diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml index 7b495284f..d3cfe4601 100644 --- a/.github/workflows/docker-publish.yml +++ b/.github/workflows/docker-publish.yml @@ -27,8 +27,8 @@ jobs: mtag: py3.12-debian - dockerfile: ./docker/3.12/Ubuntu/Dockerfile mtag: py3.12-ubuntu - - dockerfile: ./docker/3.12/Ubuntu/Dockerfile - mtag: latest + - dockerfile: ./docker/3.13/Ubuntu/Dockerfile + mtag: py3.13-debian - dockerfile: ./docker/3.13/Ubuntu/Dockerfile mtag: py3.13-ubuntu - dockerfile: ./docker/3.13/Ubuntu/Dockerfile diff --git a/tests/test_type_hints.py b/tests/test_type_hints.py new file mode 100644 index 000000000..2d8317551 --- /dev/null +++ b/tests/test_type_hints.py @@ -0,0 +1,60 @@ +"""Tests for tiatoolbox.type_hints module.""" + +from collections.abc import Callable +from typing import Literal + +import pytest + +from tiatoolbox import type_hints + + +def test_aliases_exist() -> None: + """Ensure all expected type aliases are defined in type_hints.""" + expected_aliases = [ + "JSON", + "NumPair", + "IntPair", + "Resolution", + "Units", + "Bounds", + "IntBounds", + "Geometry", + "Properties", + "QueryGeometry", + "CallablePredicate", + "CallableSelect", + "Predicate", + "Select", + "NumpyPadLiteral", + ] + for alias in expected_aliases: + assert hasattr(type_hints, alias), f"Missing alias: {alias}" + + +def test_units_is_literal() -> None: + """Check that Units alias is a Literal type.""" + assert isinstance(type_hints.Units, type(Literal["mpp"])) + + +def test_callable_predicate_signature() -> None: + """Verify CallablePredicate expects Properties and returns bool.""" + alias = type_hints.CallablePredicate + # Check that it's a typing Callable + assert getattr(alias, "__origin__", None) is Callable + # Check argument and return types + args = alias.__args__ + assert len(args) == 2 + assert args[1] is bool + + +@pytest.mark.parametrize("alias", ["Bounds", "IntBounds"]) +def test_bounds_alias_is_tuple(alias: str) -> None: + """Check that Bounds and IntBounds are tuple type hints.""" + assert "tuple" in str(getattr(type_hints, alias)) + + +def test_numpy_pad_literal_contains_expected_values() -> None: + """Ensure NumpyPadLiteral includes common numpy pad modes.""" + modes = ["constant", "reflect", "wrap"] + for mode in modes: + assert mode in type_hints.NumpyPadLiteral.__args__