Skip to content

Commit 50958e2

Browse files
committed
📝 Update docstring formatting.
1 parent 7ae99aa commit 50958e2

File tree

2 files changed

+50
-25
lines changed

2 files changed

+50
-25
lines changed

tiatoolbox/models/architecture/kongnet.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -297,11 +297,11 @@ def __init__(
297297
298298
Args:
299299
in_channels (int):
300-
Number of input channels
300+
Number of input channels.
301301
skip_channels (int):
302-
Number of channels from skip connection
302+
Number of channels from skip connection.
303303
out_channels (int):
304-
Number of output channels
304+
Number of output channels.
305305
attention_type (str):
306306
Type of attention mechanism. Default: 'scse'.
307307
@@ -399,7 +399,7 @@ class KongNetDecoder(nn.Module):
399399
attention mechanisms, and optional center block at the bottleneck.
400400
401401
Args:
402-
encoder_channels (List[int]):
402+
encoder_channels (list[int]):
403403
Number of channels at each encoder level
404404
decoder_channels (Tuple[int, ...]):
405405
Number of channels at each decoder level
@@ -428,7 +428,7 @@ def __init__(
428428
"""Initialize KongNetDecoder.
429429
430430
Args:
431-
encoder_channels (List[int]):
431+
encoder_channels (list[int]):
432432
Number of channels at each encoder level.
433433
decoder_channels (Tuple[int, ...]):
434434
Number of channels at each decoder level.

tiatoolbox/models/architecture/utils.py

Lines changed: 45 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -310,10 +310,14 @@ def __init__(self, name: str | None, in_channels: int, reduction: int = 16) -> N
310310
"""Initialize the Attention module.
311311
312312
Args:
313-
name (str | None): Name of the attention mechanism.
313+
name (str | None):
314+
Name of the attention mechanism.
314315
Only "scse" is implemented. If None, identity is used.
315-
in_channels (int): Number of input channels.
316-
reduction (int): Reduction ratio for channel attention.
316+
in_channels (int):
317+
Number of input channels.
318+
reduction (int):
319+
Reduction ratio for channel attention.
320+
317321
"""
318322
super().__init__()
319323

@@ -329,10 +333,13 @@ def forward(self: AttentionModule, x: torch.Tensor) -> torch.Tensor:
329333
"""Forward pass of the Attention module.
330334
331335
Args:
332-
x (torch.Tensor): Input feature map of shape (N, C, H, W).
336+
x (torch.Tensor):
337+
Input feature map of shape (N, C, H, W).
333338
334339
Returns:
335-
torch.Tensor: Output feature map after applying attention.
340+
torch.Tensor:
341+
Output feature map after applying attention.
342+
336343
"""
337344
return self.attention(x)
338345

@@ -344,8 +351,11 @@ def __init__(self, in_channels: int, reduction: int = 16) -> None:
344351
"""Initialize the SCSE module.
345352
346353
Args:
347-
in_channels (int): Number of input channels.
348-
reduction (int): Reduction ratio for channel attention.
354+
in_channels (int):
355+
Number of input channels.
356+
reduction (int):
357+
Reduction ratio for channel attention.
358+
349359
"""
350360
super().__init__()
351361
self.cSE = nn.Sequential(
@@ -361,10 +371,13 @@ def forward(self: SCSEModule, x: torch.Tensor) -> torch.Tensor:
361371
"""Forward pass of the SCSE module.
362372
363373
Args:
364-
x (torch.Tensor): Input feature map of shape (N, C, H, W).
374+
x (torch.Tensor):
375+
Input feature map of shape (N, C, H, W).
365376
366377
Returns:
367-
torch.Tensor: Output feature map after applying SCSE attention.
378+
torch.Tensor:
379+
Output feature map after applying SCSE attention.
380+
368381
"""
369382
return x * self.cSE(x) + x * self.sSE(x)
370383

@@ -410,21 +423,30 @@ def peak_detection_map_overlap(
410423
Returns same spatial shape as the input block
411424
412425
Args:
413-
block: NumPy array (H, W, C).
414-
min_distance: Minimum number of pixels separating peaks.
415-
threshold_abs: Minimum intensity of peaks. By default, None.
416-
threshold_rel: Minimum relative intensity of peaks. By default, None.
417-
block_info: Dask block info dict.
426+
block:
427+
NumPy array (H, W, C).
428+
min_distance:
429+
Minimum number of pixels separating peaks.
430+
threshold_abs:
431+
Minimum intensity of peaks. By default, None.
432+
threshold_rel:
433+
Minimum relative intensity of peaks. By default, None.
434+
block_info:
435+
Dask block info dict.
418436
Only used when called from dask.array.map_overlap.
419-
depth_h: Halo size in pixels for height (rows).
437+
depth_h:
438+
Halo size in pixels for height (rows).
420439
Only used when called from dask.array.map_overlap.
421-
depth_w: Halo size in pixels for width (cols).
440+
depth_w:
441+
Halo size in pixels for width (cols).
422442
Only used when it's called from dask.array.map_overlap.
423-
return_probability: If True, returns the confidence scores at peak
443+
return_probability:
444+
If True, returns the confidence scores at peak
424445
locations instead of binary peak map.
425446
426447
Returns:
427-
out: NumPy array (H, W, C) with 1.0 at peaks, 0 elsewhere.
448+
out:
449+
NumPy array (H, W, C) with 1.0 at peaks, 0 elsewhere.
428450
429451
"""
430452
block_height, block_width, block_channels = block.shape
@@ -487,10 +509,13 @@ def nms_on_detection_maps(
487509
Args:
488510
detection_maps (np.ndarray):
489511
(H, W, C) where pixels are already local peaks.
490-
min_distance (int): Minimum distance required between ANY detections.
512+
min_distance (int):
513+
Minimum distance required between ANY detections.
491514
492515
Returns:
493-
np.ndarray: The filtered maps with cross-channel suppression applied.
516+
np.ndarray:
517+
The filtered maps with cross-channel suppression applied.
518+
494519
"""
495520
# 1. Collapse channels to find the "Global Best" at every spatial location
496521
# Contains the highest probability found across all classes at each pixel.

0 commit comments

Comments
 (0)