Skip to content

Commit 17e0bcb

Browse files
committed
core\refac: #98 nullable sum to one reg
- nullable sum to one regularizer scaling factor
1 parent 4c37aab commit 17e0bcb

File tree

9 files changed

+58
-61
lines changed

9 files changed

+58
-61
lines changed

core/pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
[project]
22
description = "Framework for handling image segmentation in the context of multiple annotators"
33
name = "seg_tgce"
4-
version = "0.3.14"
4+
version = "0.3.16"
55
readme = "README.md"
66
authors = [{ name = "Brandon Lotero", email = "blotero@gmail.com" }]
77
maintainers = [{ name = "Brandon Lotero", email = "blotero@gmail.com" }]
@@ -15,7 +15,7 @@ Issues = "https://github.com/blotero/seg_tgce/issues"
1515

1616
[tool.poetry]
1717
name = "seg_tgce"
18-
version = "0.3.14"
18+
version = "0.3.16"
1919
authors = ["Brandon Lotero <blotero@gmail.com>"]
2020
description = "A package for the SEG TGCE project"
2121
readme = "README.md"

core/seg_tgce/data/crowd_seg/__retrieve.py renamed to core/seg_tgce/data/crowd_seg/_retrieve.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,8 @@
1313

1414
_TARGET_DIR = "__data__/crowd_seg"
1515
_BUCKET_NAME = "crowd-seg-data"
16-
PATCHES_OBJECT_NAME = "patches_refined.zip"
17-
MASKS_OBJECT_NAME = "masks_refined.zip"
16+
PATCHES_OBJECT_NAME = "patches_refined_v2.zip"
17+
MASKS_OBJECT_NAME = "masks_refined_v2.zip"
1818

1919

2020
s3 = boto3.client("s3", config=Config(signature_version=UNSIGNED))

core/seg_tgce/data/crowd_seg/tfds_builder.py

Lines changed: 12 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
from matplotlib import pyplot as plt
1010
from matplotlib.colors import ListedColormap, to_rgb
1111

12-
from seg_tgce.data.crowd_seg.__retrieve import (
12+
from seg_tgce.data.crowd_seg._retrieve import (
1313
_BUCKET_NAME,
1414
MASKS_OBJECT_NAME,
1515
PATCHES_OBJECT_NAME,
@@ -20,12 +20,9 @@
2020

2121

2222
CLASSES_DEFINITION = {
23-
0: "Ignore",
24-
1: "Other",
25-
2: "Tumor",
26-
3: "Stroma",
27-
4: "B. Inflammation", # Benign Inflammation
28-
5: "Necrosis",
23+
0: "Other",
24+
1: "Tumor",
25+
2: "Stroma",
2926
}
3027

3128
REAL_SCORERS = [
@@ -151,8 +148,9 @@ def map_sample(sample: SampleData, image_size: tuple[int, int]) -> ProcessedSamp
151148
class CrowdSegDataset(tfds.core.GeneratorBasedBuilder):
152149
"""DatasetBuilder for crowd segmentation dataset."""
153150

154-
VERSION = tfds.core.Version("1.0.0")
151+
VERSION = tfds.core.Version("1.1.0")
155152
RELEASE_NOTES = {
153+
"1.1.0": "Use further refined patches and masks",
156154
"1.0.0": "Initial release.",
157155
}
158156

@@ -184,8 +182,8 @@ def _info(self) -> tfds.core.DatasetInfo:
184182
}
185183
),
186184
supervised_keys=("image", "masks"),
187-
homepage="https://github.com/your-repo/crowd-seg",
188-
citation="""@article{your-citation}""",
185+
homepage="https://github.com/blotero/seg_tgce",
186+
# citation="""@article{your-citation}""",
189187
)
190188

191189
def _split_generators(
@@ -559,7 +557,7 @@ def print_sample_info(data: dict) -> None:
559557
print("-" * 50)
560558

561559

562-
def main_processed() -> None:
560+
def main() -> None:
563561
target_size = (128, 128)
564562
batch_size = 16
565563

@@ -568,26 +566,15 @@ def main_processed() -> None:
568566
)
569567
print(f"Dataset train length: {len(train)}")
570568

571-
fig = visualize_sample(
569+
_ = visualize_sample(
572570
validation,
573571
target_size,
574572
batch_index=1,
575573
sample_indexes=[0, 1, 3, 4],
576574
sparse_labelers=False,
577575
)
578-
fig.savefig(
579-
"/home/brandon/unal/maestria/master_thesis/Cap1/Figures/multiannotator-segmentation.png"
580-
)
581-
582-
583-
def main_raw():
584-
train, validation, test = get_crowd_seg_dataset_tfds(
585-
image_size=(64, 64),
586-
)
587-
for i, data in enumerate(train):
588-
print(f"\nSample {i}:")
589-
print_sample_info(data)
576+
plt.show()
590577

591578

592579
if __name__ == "__main__":
593-
main_processed()
580+
main()

core/seg_tgce/experiments/histology/baseline.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,8 @@
1111

1212
from ..utils import handle_training
1313

14-
TARGET_SHAPE = (256, 256)
15-
BATCH_SIZE = 32
14+
TARGET_SHAPE = (128, 128)
15+
BATCH_SIZE = 16
1616
TRAIN_EPOCHS = 20
1717
TUNER_EPOCHS = 2
1818
MAX_TRIALS = 5

core/seg_tgce/experiments/histology/features.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,9 @@
2424
OBJECTIVE = "val_segmentation_output_dice_coefficient"
2525

2626
DEFAULT_HPARAMS = {
27-
"initial_learning_rate": 1e-3,
28-
"q": 0.5,
29-
"noise_tolerance": 0.5,
27+
"initial_learning_rate": 6.5e-4,
28+
"q": 0.66,
29+
"noise_tolerance": 0.62,
3030
"a": 0.5,
3131
"b": 0.5,
3232
"lambda_reg_weight": 0.1,
@@ -52,9 +52,9 @@ def build_model_from_trial(trial: HpTunerTrial | None) -> Model:
5252
)
5353

5454
return build_features_model_from_hparams(
55-
learning_rate=trial.suggest_float("learning_rate", 1e-5, 1e-2, log=True),
56-
q=trial.suggest_float("q", 0.1, 0.9, step=0.01),
57-
noise_tolerance=trial.suggest_float("noise_tolerance", 0.1, 0.9, step=0.01),
55+
learning_rate=DEFAULT_HPARAMS["initial_learning_rate"],
56+
q=DEFAULT_HPARAMS["q"],
57+
noise_tolerance=DEFAULT_HPARAMS["noise_tolerance"],
5858
a=trial.suggest_float("a", 0.1, 10.0, step=0.1),
5959
b=trial.suggest_float("b", 0.1, 0.99, step=0.01),
6060
lambda_reg_weight=trial.suggest_float("lambda_reg_weight", 0.0, 10.0, step=0.1),

core/seg_tgce/experiments/histology/pixel.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,9 @@
2323
OBJECTIVE = "val_segmentation_output_dice_coefficient"
2424

2525
DEFAULT_HPARAMS = {
26-
"initial_learning_rate": 1e-3,
27-
"q": 0.7,
28-
"noise_tolerance": 0.5,
26+
"initial_learning_rate": 6.5e-4,
27+
"q": 0.66,
28+
"noise_tolerance": 0.62,
2929
"a": 0.2,
3030
"b": 0.7,
3131
"lambda_reg_weight": 0.1,
@@ -51,9 +51,9 @@ def build_model_from_trial(trial: HpTunerTrial | None) -> Model:
5151
)
5252

5353
return build_pixel_model_from_hparams(
54-
learning_rate=trial.suggest_float("learning_rate", 1e-5, 1e-2, log=True),
55-
q=trial.suggest_float("q", 0.1, 0.9, step=0.01),
56-
noise_tolerance=trial.suggest_float("noise_tolerance", 0.1, 0.9, step=0.01),
54+
learning_rate=DEFAULT_HPARAMS["initial_learning_rate"],
55+
q=DEFAULT_HPARAMS["q"],
56+
noise_tolerance=DEFAULT_HPARAMS["noise_tolerance"],
5757
a=trial.suggest_float("a", 0.1, 10.0, step=0.1),
5858
b=trial.suggest_float("b", 0.1, 0.99, step=0.01),
5959
lambda_reg_weight=trial.suggest_float("lambda_reg_weight", 0.0, 10.0, step=0.1),

core/seg_tgce/experiments/histology/scalar.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,9 @@
2323
OBJECTIVE = "val_segmentation_output_dice_coefficient"
2424

2525
DEFAULT_HPARAMS = {
26-
"initial_learning_rate": 1e-3,
27-
"q": 0.5,
28-
"noise_tolerance": 0.5,
26+
"initial_learning_rate": 6.5e-4,
27+
"q": 0.66,
28+
"noise_tolerance": 0.62,
2929
"a": 0.3,
3030
"b": 0.7,
3131
"lambda_reg_weight": 0.1,
@@ -51,9 +51,9 @@ def build_model_from_trial(trial: HpTunerTrial | None) -> Model:
5151
)
5252

5353
return build_scalar_model_from_hparams(
54-
learning_rate=trial.suggest_float("learning_rate", 1e-5, 1e-2, log=True),
55-
q=trial.suggest_float("q", 0.1, 0.9, step=0.01),
56-
noise_tolerance=trial.suggest_float("noise_tolerance", 0.1, 0.9, step=0.01),
54+
learning_rate=DEFAULT_HPARAMS["initial_learning_rate"],
55+
q=DEFAULT_HPARAMS["q"],
56+
noise_tolerance=DEFAULT_HPARAMS["noise_tolerance"],
5757
a=trial.suggest_float("a", 0.1, 10.0, step=0.1),
5858
b=trial.suggest_float("b", 0.1, 0.99, step=0.01),
5959
lambda_reg_weight=trial.suggest_float("lambda_reg_weight", 0.0, 10.0, step=0.1),
@@ -93,6 +93,7 @@ def build_model_from_trial(trial: HpTunerTrial | None) -> Model:
9393
tuner_epochs=TUNER_EPOCHS,
9494
objective=OBJECTIVE,
9595
tuner_max_trials=MAX_TRIALS,
96+
study_name=STUDY_NAME,
9697
)
9798

9899
vis_callback = ScalarVisualizationCallback(

core/seg_tgce/loss/tgce.py

Lines changed: 18 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def __init__( # pylint: disable=too-many-arguments
4040
b: float = 0.7,
4141
lambda_reg_weight: float = 0.1,
4242
lambda_entropy_weight: float = 0.1,
43-
lambda_sum_weight: float = 0.1,
43+
lambda_sum_weight: float | None = None,
4444
epsilon: float = 1e-8,
4545
) -> None:
4646
self.q = q
@@ -98,8 +98,11 @@ def call(
9898
+ (1 - valid_lambda_r) * tf.math.log1p(1 - valid_lambda_r)
9999
)
100100

101-
lambda_sum = self.lambda_sum_weight * tf.reduce_mean(
102-
tf.square(tf.reduce_sum(valid_lambda_r, axis=-1) - 1.0)
101+
lambda_sum = (
102+
self.lambda_sum_weight
103+
* tf.reduce_mean(tf.square(tf.reduce_sum(valid_lambda_r, axis=-1) - 1.0))
104+
if self.lambda_sum_weight is not None
105+
else 0.0
103106
)
104107

105108
total_loss = (
@@ -153,7 +156,7 @@ def __init__( # pylint: disable=too-many-arguments
153156
b: float = 0.7,
154157
lambda_reg_weight: float = 0.1,
155158
lambda_entropy_weight: float = 0.1,
156-
lambda_sum_weight: float = 0.1,
159+
lambda_sum_weight: float | None = None,
157160
epsilon: float = 1e-8,
158161
) -> None:
159162
self.a = a
@@ -217,8 +220,11 @@ def call(
217220
+ (1 - valid_lambda_r) * tf.math.log1p(1 - valid_lambda_r)
218221
)
219222

220-
lambda_sum = self.lambda_sum_weight * tf.reduce_mean(
221-
tf.square(tf.reduce_sum(valid_lambda_r, axis=-1) - 1.0)
223+
lambda_sum = (
224+
self.lambda_sum_weight
225+
* tf.reduce_mean(tf.square(tf.reduce_sum(valid_lambda_r, axis=-1) - 1.0))
226+
if self.lambda_sum_weight is not None
227+
else 0.0
222228
)
223229

224230
total_loss = (
@@ -271,7 +277,7 @@ def __init__( # pylint: disable=too-many-arguments
271277
b: float = 0.7,
272278
lambda_reg_weight: float = 0.1,
273279
lambda_entropy_weight: float = 0.1,
274-
lambda_sum_weight: float = 0.1,
280+
lambda_sum_weight: float | None = None,
275281
epsilon: float = 1e-8,
276282
) -> None:
277283
self.a = a
@@ -330,8 +336,11 @@ def call(
330336
+ (1 - valid_lambda_r) * tf.math.log1p(1 - valid_lambda_r)
331337
)
332338

333-
lambda_sum = self.lambda_sum_weight * tf.reduce_mean(
334-
tf.square(tf.reduce_sum(valid_lambda_r, axis=-1) - 1.0)
339+
lambda_sum = (
340+
self.lambda_sum_weight
341+
* tf.reduce_mean(tf.square(tf.reduce_sum(valid_lambda_r, axis=-1) - 1.0))
342+
if self.lambda_sum_weight is not None
343+
else 0.0
335344
)
336345

337346
total_loss = (

core/seg_tgce/models/builders.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ def build_scalar_model_from_hparams(
6363
b: float,
6464
lambda_reg_weight: float,
6565
lambda_entropy_weight: float,
66-
lambda_sum_weight: float,
66+
lambda_sum_weight: float | None,
6767
num_classes: int,
6868
target_shape: tuple,
6969
n_scorers: int,
@@ -116,7 +116,7 @@ def build_features_model_from_hparams(
116116
b: float,
117117
lambda_reg_weight: float,
118118
lambda_entropy_weight: float,
119-
lambda_sum_weight: float,
119+
lambda_sum_weight: float | None,
120120
num_classes: int,
121121
target_shape: tuple,
122122
n_scorers: int,
@@ -184,7 +184,7 @@ def build_pixel_model_from_hparams(
184184
b: float,
185185
lambda_reg_weight: float,
186186
lambda_entropy_weight: float,
187-
lambda_sum_weight: float,
187+
lambda_sum_weight: float | None,
188188
num_classes: int,
189189
target_shape: tuple,
190190
n_scorers: int,

0 commit comments

Comments
 (0)