Skip to content

Commit 050e6a8

Browse files
committed
initial update
1 parent bca25eb commit 050e6a8

File tree

9 files changed

+1199
-87
lines changed

9 files changed

+1199
-87
lines changed

examples/nucleus-detection.ipynb

Lines changed: 166 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,166 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "code",
5+
"execution_count": null,
6+
"id": "fa584486",
7+
"metadata": {},
8+
"outputs": [
9+
{
10+
"name": "stderr",
11+
"output_type": "stream",
12+
"text": [
13+
"|2025-12-17|18:50:54.872| [WARNING] /home/u1910100/miniconda3/envs/tiatoolbox-dev/lib/python3.12/site-packages/albumentations/__init__.py:28: UserWarning: A new version of Albumentations is available: '2.0.8' (you have '2.0.4'). Upgrade using: pip install -U albumentations. To disable automatic update checks, set the environment variable NO_ALBUMENTATIONS_UPDATE to 1.\n",
14+
" check_for_updates()\n",
15+
"\n"
16+
]
17+
},
18+
{
19+
"ename": "",
20+
"evalue": "",
21+
"output_type": "error",
22+
"traceback": [
23+
"\u001b[1;31mThe Kernel crashed while executing code in the current cell or a previous cell. \n",
24+
"\u001b[1;31mPlease review the code in the cell(s) to identify a possible cause of the failure. \n",
25+
"\u001b[1;31mClick <a href='https://aka.ms/vscodeJupyterKernelCrash'>here</a> for more info. \n",
26+
"\u001b[1;31mView Jupyter <a href='command:jupyter.viewOutput'>log</a> for further details."
27+
]
28+
}
29+
],
30+
"source": [
31+
"import sys\n",
32+
"sys.path.append(\"../\")\n",
33+
"import pathlib\n",
34+
"from tiatoolbox.models.engine.nucleus_detector import NucleusDetector\n",
35+
"from tiatoolbox.models.architecture.kongnet import KongNet"
36+
]
37+
},
38+
{
39+
"cell_type": "code",
40+
"execution_count": null,
41+
"id": "78f694c7",
42+
"metadata": {},
43+
"outputs": [
44+
{
45+
"name": "stderr",
46+
"output_type": "stream",
47+
"text": [
48+
"|2025-12-17|18:50:57.923| [WARNING] GPU is not compatible with torch.compile. Compatible GPUs include NVIDIA V100, A100, and H100. Speedup numbers may be lower than expected.\n"
49+
]
50+
},
51+
{
52+
"name": "stdout",
53+
"output_type": "stream",
54+
"text": [
55+
"|2025-12-17|18:50:58.126| [INFO] When providing multiple whole slide images, the outputs will be saved and the locations of outputs will be returned to the calling function when `run()` finishes successfully.\n"
56+
]
57+
},
58+
{
59+
"data": {
60+
"application/vnd.jupyter.widget-view+json": {
61+
"model_id": "c6892643281f4d72844868ee40e1375d",
62+
"version_major": 2,
63+
"version_minor": 0
64+
},
65+
"text/plain": [
66+
"Processing WSIs: 0%| | 0/1 [00:00<?, ?it/s]"
67+
]
68+
},
69+
"metadata": {},
70+
"output_type": "display_data"
71+
},
72+
{
73+
"name": "stderr",
74+
"output_type": "stream",
75+
"text": [
76+
"|2025-12-17|18:50:58.311| [WARNING] Read: Scale > 1.This means that the desired resolution is higher than the WSI baseline (maximum encoded resolution). Interpolation of read regions may occur.\n"
77+
]
78+
},
79+
{
80+
"data": {
81+
"application/vnd.jupyter.widget-view+json": {
82+
"model_id": "3342b87637924ddcaa77bb121f27f4e7",
83+
"version_major": 2,
84+
"version_minor": 0
85+
},
86+
"text/plain": [
87+
"Inferring patches: 0%| | 0/1004 [00:00<?, ?it/s]"
88+
]
89+
},
90+
"metadata": {},
91+
"output_type": "display_data"
92+
},
93+
{
94+
"name": "stdout",
95+
"output_type": "stream",
96+
"text": [
97+
"Current Memory usage: 75.52868374109333 % exceeds specified threshold: 75. Saving intermediate results to disk.\n"
98+
]
99+
}
100+
],
101+
"source": [
102+
"detector = NucleusDetector(model='KongNet_CoNIC_1')\n",
103+
"\n",
104+
"wsi_path = \"/media/u1910100/data/slides/TUM1.svs\"\n",
105+
"\n",
106+
"out = detector.run(\n",
107+
" images=[pathlib.Path(wsi_path)],\n",
108+
" patch_mode=False,\n",
109+
" device=\"cuda\",\n",
110+
" save_dir=pathlib.Path(\"/media/u1910100/data/overlays/test\"),\n",
111+
" overwrite=True,\n",
112+
" output_type=\"annotationstore\",\n",
113+
" auto_get_mask=True,\n",
114+
" memory_threshold=75,\n",
115+
" num_workers=1,\n",
116+
" batch_size=8,\n",
117+
")"
118+
]
119+
},
120+
{
121+
"cell_type": "code",
122+
"execution_count": null,
123+
"id": "b559e6d0",
124+
"metadata": {},
125+
"outputs": [],
126+
"source": [
127+
"from tiatoolbox.annotation.storage import SQLiteStore\n",
128+
"\n",
129+
"store_path = \"/media/u1910100/data/overlays/test/wsi1_2k_2k.db\"\n",
130+
"store = SQLiteStore.open(store_path)\n",
131+
"\n",
132+
"for ann in store.values():\n",
133+
" print(ann)\n"
134+
]
135+
},
136+
{
137+
"cell_type": "code",
138+
"execution_count": null,
139+
"id": "e0db521a",
140+
"metadata": {},
141+
"outputs": [],
142+
"source": []
143+
}
144+
],
145+
"metadata": {
146+
"kernelspec": {
147+
"display_name": "tiatoolbox-dev",
148+
"language": "python",
149+
"name": "python3"
150+
},
151+
"language_info": {
152+
"codemirror_mode": {
153+
"name": "ipython",
154+
"version": 3
155+
},
156+
"file_extension": ".py",
157+
"mimetype": "text/x-python",
158+
"name": "python",
159+
"nbconvert_exporter": "python",
160+
"pygments_lexer": "ipython3",
161+
"version": "3.12.12"
162+
}
163+
},
164+
"nbformat": 4,
165+
"nbformat_minor": 5
166+
}

test.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
2+
import pathlib
3+
from tiatoolbox.models.engine.nucleus_detector import NucleusDetector
4+
5+
6+
detector = NucleusDetector(model='KongNet_CoNIC_1')
7+
8+
# wsi_path = "/media/u1910100/data/slides/TCGA-AO-A0J2-01Z-00-DX1.7C9FEC7B-6040-4C58-9563-D10C0D7AC72E.svs"
9+
wsi_path = "/media/u1910100/data/slides/CMU-1-Small-Region.svs"
10+
11+
out = detector.run(
12+
images=[pathlib.Path(wsi_path)],
13+
patch_mode=False,
14+
device="cuda",
15+
save_dir=pathlib.Path("/media/u1910100/data/overlays/test"),
16+
overwrite=True,
17+
output_type="annotationstore",
18+
auto_get_mask=True,
19+
memory_threshold=30,
20+
num_workers=1,
21+
batch_size=8,
22+
cache_dir=pathlib.Path("/media/u1910100/data/cache")
23+
)
24+

tests/models/test_arch_kongnet.py

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
"""Unit test package for KongNet Model."""
2+
3+
from collections.abc import Callable
4+
from pathlib import Path
5+
6+
import numpy as np
7+
import pytest
8+
import torch
9+
from torch import nn
10+
11+
from tiatoolbox.annotation.storage import SQLiteStore
12+
from tiatoolbox.models.architecture import (
13+
fetch_pretrained_weights,
14+
get_pretrained_model,
15+
)
16+
from tiatoolbox.models.architecture.kongnet import KongNet
17+
from tiatoolbox.utils import env_detection as toolbox_env
18+
from tiatoolbox.wsicore.wsireader import VirtualWSIReader
19+
20+
device = "cuda" if toolbox_env.has_gpu() else "cpu"
21+
22+
23+
def test_KongNet_Modeling() -> None:
24+
"""Test for KongNet model."""
25+
26+
# test creation
27+
model = KongNet(
28+
num_heads=6,
29+
num_channels_per_head=[3,3,3,3,3,3],
30+
target_channels=[2, 5, 8, 11, 14, 17],
31+
min_distance=5,
32+
threshold_abs=0.5,
33+
wide_decoder=False,
34+
)
35+
36+
# ckp_path = "/media/u1910100/data/Monkey/conic_models/efficientnetv2_l_eq_loss/KongNet_CoNIC_1.pth"
37+
# state_dict = torch.load(ckp_path, map_location=device)["model"]
38+
# model.load_state_dict(state_dict)
39+
model = model.to(device)
40+
# assert model is not None
41+
42+
model.eval()
43+
with torch.no_grad():
44+
input_tensor = torch.randn(1, 3, 256, 256).to(device)
45+
output = model(input_tensor)
46+
assert output.shape == (1, 6, 256, 256)
47+
48+
batch_tensor = torch.randn(4, 3, 256, 256).to(device)
49+
output = model(batch_tensor)
50+
assert output.shape == (4, 6, 256, 256)

tests/models/test_arch_utils.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,11 @@
77

88
from tiatoolbox.models.architecture.utils import (
99
UpSample2x,
10+
Attention,
1011
centre_crop,
1112
centre_crop_to_shape,
1213
peak_detection_map_overlap,
14+
nms_on_detection_maps,
1315
)
1416

1517

@@ -160,3 +162,40 @@ def test_peak_detection_map_overlap() -> None:
160162
assert peak_map[0, 0, 0] == 1.0
161163
assert peak_map[3, 3, 0] == 1.0
162164
assert np.sum(peak_map) == 2.0
165+
166+
167+
def test_nms_on_detection_maps() -> None:
168+
"""Test for NMS on detection maps."""
169+
heatmap = np.zeros((7, 7, 3), dtype=np.float32)
170+
nms_map = nms_on_detection_maps(heatmap, min_distance=3)
171+
assert np.sum(nms_map) == 0.0 # No peaks
172+
173+
heatmap[0, 0, 0] = 0.9 # Peak in channel 0 (valid)
174+
heatmap[0, 1, 0] = 0.6 # Peak in channel 0 (suppressed)
175+
heatmap[0, 0, 1] = 0.8 # Peak in channel 1 (suppressed)
176+
177+
heatmap[5, 5, 2] = 0.9 # Peak in channel 2 (valid)
178+
heatmap[4, 4, 1] = 0.7 # Peak in channel 1 (suppressed)
179+
180+
nms_map = nms_on_detection_maps(heatmap, min_distance=3)
181+
assert nms_map[0, 0, 0] == 0.9
182+
assert nms_map[5, 5, 2] == 0.9
183+
184+
185+
def test_attention_module() -> None:
186+
"""Test for Attention module."""
187+
188+
test_input = torch.zeros((1, 16, 32, 32), dtype=torch.float32)
189+
190+
# Default to identity
191+
attention = Attention(name=None, in_channels=16)
192+
output = attention(test_input)
193+
assert torch.sum(output - test_input) == 0
194+
195+
attention = Attention(name="scse", in_channels=16, reduction=4)
196+
output = attention(test_input)
197+
assert output.shape == test_input.shape
198+
199+
with pytest.raises(ValueError, match=r"Attention random_name is not implemented"):
200+
attention = Attention(name="random_name", in_channels=16)
201+

tiatoolbox/data/pretrained_model.yaml

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -964,3 +964,36 @@ grandqc_tissue_detection:
964964
patch_output_shape: [512, 512]
965965
stride_shape: [256, 256]
966966
save_resolution: {'units': 'mpp', 'resolution': 10.0}
967+
968+
KongNet_CoNIC_1:
969+
hf_repo_id: TIACentre/KongNet_pretrained_weights
970+
architecture:
971+
class: kongnet.KongNet
972+
kwargs:
973+
num_heads: 6
974+
num_channels_per_head: [3, 3, 3, 3, 3, 3]
975+
target_channels: [2, 5, 8, 11, 14, 17]
976+
wide_decoder: False
977+
min_distance: 5
978+
threshold_abs: 0.5
979+
class_dict: {
980+
0: "Neutrophil",
981+
1: "Epithelial",
982+
2: "Lymphocyte",
983+
3: "Plasma",
984+
4: "Eosinophil",
985+
5: "Connective",
986+
}
987+
postproc_tile_shape: [2048, 2048]
988+
ioconfig:
989+
class: io_config.IOSegmentorConfig
990+
kwargs:
991+
input_resolutions:
992+
- {'units': 'mpp', 'resolution': 0.5}
993+
output_resolutions:
994+
- {'units': 'mpp', 'resolution': 0.5}
995+
patch_input_shape: [256, 256]
996+
patch_output_shape: [256, 256]
997+
stride_shape: [248, 248]
998+
save_resolution: {'units': 'baseline', 'resolution': 1.0}
999+

0 commit comments

Comments
 (0)