Skip to content

Commit d67aa02

Browse files
committed
core\refac: #98 refac hp names
- remove useless hp - suppport python 312
1 parent bfffb4c commit d67aa02

File tree

11 files changed

+148
-160
lines changed

11 files changed

+148
-160
lines changed

.vscode/settings.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,5 +16,5 @@
1616
},
1717
"workbench.colorTheme": "Gruvbox Dark Hard",
1818
"mypy-type-checker.cwd": "${nearestConfig}",
19-
"python.defaultInterpreterPath": "${workspaceFolder}/core/.venv/bin/python"
19+
"python.defaultInterpreterPath": "${workspaceFolder}/core/.env/bin/python"
2020
}

core/poetry.lock

Lines changed: 10 additions & 5 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

core/pyproject.toml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
[project]
22
description = "Framework for handling image segmentation in the context of multiple annotators"
33
name = "seg_tgce"
4-
version = "0.3.9"
4+
version = "0.3.12"
55
readme = "README.md"
66
authors = [{ name = "Brandon Lotero", email = "blotero@gmail.com" }]
77
maintainers = [{ name = "Brandon Lotero", email = "blotero@gmail.com" }]
@@ -15,7 +15,7 @@ Issues = "https://github.com/blotero/seg_tgce/issues"
1515

1616
[tool.poetry]
1717
name = "seg_tgce"
18-
version = "0.3.9"
18+
version = "0.3.12"
1919
authors = ["Brandon Lotero <blotero@gmail.com>"]
2020
description = "A package for the SEG TGCE project"
2121
readme = "README.md"
@@ -28,7 +28,7 @@ repository = "https://github.com/blotero/seg_tgce"
2828

2929

3030
[tool.poetry.dependencies]
31-
python = ">=3.10,<3.12"
31+
python = ">=3.10,<3.13"
3232
numpy = "2.0.2"
3333
keras = "3.8.0"
3434
tensorflow = "2.18.0"

core/seg_tgce/experiments/histology/features.py

Lines changed: 37 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import argparse
22

3+
from keras import Model
34
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
45

56
from seg_tgce.data.crowd_seg.tfds_builder import (
@@ -8,65 +9,59 @@
89
get_processed_data,
910
)
1011
from seg_tgce.experiments.plot_utils import plot_training_history, print_test_metrics
12+
from seg_tgce.experiments.types import HpTunerTrial
1113
from seg_tgce.models.builders import build_features_model_from_hparams
1214
from seg_tgce.models.ma_model import FeatureVisualizationCallback
1315

14-
from ..utils import handle_training
16+
from ..utils import handle_training_optuna
1517

16-
TARGET_SHAPE = (128, 128)
18+
TARGET_SHAPE = (256, 256)
1719
BATCH_SIZE = 4
1820
TRAIN_EPOCHS = 20
1921
TUNER_EPOCHS = 1
22+
TUNER_MAX_TRIALS = 10
23+
STUDY_NAME = "histology_features_tuning"
24+
OBJECTIVE = "val_segmentation_output_dice_coefficient"
2025

2126
DEFAULT_HPARAMS = {
2227
"initial_learning_rate": 1e-3,
2328
"q": 0.5,
2429
"noise_tolerance": 0.5,
2530
"a": 0.5,
2631
"b": 0.5,
27-
"c": 1.0,
2832
"lambda_reg_weight": 0.1,
2933
"lambda_entropy_weight": 0.1,
3034
"lambda_sum_weight": 0.1,
3135
}
3236

3337

34-
def build_model(hp=None):
35-
if hp is None:
36-
params = DEFAULT_HPARAMS
37-
else:
38-
params = {
39-
"initial_learning_rate": hp.Float(
40-
"learning_rate", min_value=1e-5, max_value=1e-2, sampling="LOG"
41-
),
42-
"q": hp.Float("q", min_value=0.1, max_value=0.9, step=0.1),
43-
"noise_tolerance": hp.Float(
44-
"noise_tolerance", min_value=0.1, max_value=0.9, step=0.1
45-
),
46-
"lambda_reg_weight": hp.Float(
47-
"lambda_reg_weight", min_value=0.01, max_value=0.5, step=0.01
48-
),
49-
"lambda_entropy_weight": hp.Float(
50-
"lambda_entropy_weight", min_value=0.01, max_value=0.5, step=0.01
51-
),
52-
"lambda_sum_weight": hp.Float(
53-
"lambda_sum_weight", min_value=0.01, max_value=0.5, step=0.01
54-
),
55-
"a": hp.Float("a", min_value=0.0, max_value=1.0, step=0.1),
56-
"b": hp.Float("b", min_value=0.0, max_value=1.0, step=0.1),
57-
"c": hp.Float("c", min_value=0.0, max_value=1.0, step=0.1),
58-
}
38+
def build_model_from_trial(trial: HpTunerTrial | None) -> Model:
39+
if trial is None:
40+
return build_features_model_from_hparams(
41+
learning_rate=DEFAULT_HPARAMS["initial_learning_rate"],
42+
q=DEFAULT_HPARAMS["q"],
43+
noise_tolerance=DEFAULT_HPARAMS["noise_tolerance"],
44+
b=DEFAULT_HPARAMS["b"],
45+
a=DEFAULT_HPARAMS["a"],
46+
lambda_reg_weight=DEFAULT_HPARAMS["lambda_reg_weight"],
47+
lambda_entropy_weight=DEFAULT_HPARAMS["lambda_entropy_weight"],
48+
lambda_sum_weight=DEFAULT_HPARAMS["lambda_sum_weight"],
49+
num_classes=N_CLASSES,
50+
target_shape=TARGET_SHAPE,
51+
n_scorers=N_REAL_SCORERS,
52+
)
5953

6054
return build_features_model_from_hparams(
61-
learning_rate=params["initial_learning_rate"],
62-
q=params["q"],
63-
noise_tolerance=params["noise_tolerance"],
64-
a=params["a"],
65-
b=params["b"],
66-
c=params["c"],
67-
lambda_reg_weight=params["lambda_reg_weight"],
68-
lambda_entropy_weight=params["lambda_entropy_weight"],
69-
lambda_sum_weight=params["lambda_sum_weight"],
55+
learning_rate=trial.suggest_float("learning_rate", 1e-5, 1e-2, log=True),
56+
q=trial.suggest_float("q", 0.1, 0.9, step=0.01),
57+
noise_tolerance=trial.suggest_float("noise_tolerance", 0.1, 0.9, step=0.01),
58+
a=trial.suggest_float("a", 0.1, 10.0, step=0.1),
59+
b=trial.suggest_float("b", 0.1, 0.99, step=0.01),
60+
lambda_reg_weight=trial.suggest_float("lambda_reg_weight", 0.0, 10.0, step=0.1),
61+
lambda_entropy_weight=trial.suggest_float(
62+
"lambda_entropy_weight", 0.0, 10.0, step=0.1
63+
),
64+
lambda_sum_weight=trial.suggest_float("lambda_sum_weight", 0.0, 10.0, step=0.1),
7065
num_classes=N_CLASSES,
7166
target_shape=TARGET_SHAPE,
7267
n_scorers=N_REAL_SCORERS,
@@ -88,21 +83,21 @@ def build_model(hp=None):
8883
image_size=TARGET_SHAPE, batch_size=BATCH_SIZE, use_augmentation=False
8984
)
9085

91-
model = handle_training(
86+
model = handle_training_optuna(
9287
processed_train,
9388
processed_validation,
94-
model_builder=build_model,
89+
model_builder=build_model_from_trial,
9590
use_tuner=args.use_tuner,
9691
tuner_epochs=TUNER_EPOCHS,
97-
objective="val_segmentation_output_dice_coefficient",
92+
objective=OBJECTIVE,
9893
)
9994

10095
vis_callback = FeatureVisualizationCallback(
10196
processed_validation, save_dir="vis/histology/features"
10297
)
10398

10499
lr_scheduler = ReduceLROnPlateau(
105-
monitor="val_segmentation_output_dice_coefficient",
100+
monitor=OBJECTIVE,
106101
factor=0.5,
107102
patience=3,
108103
min_lr=1e-6,
@@ -120,7 +115,7 @@ def build_model(hp=None):
120115
vis_callback,
121116
lr_scheduler,
122117
EarlyStopping(
123-
monitor="val_segmentation_output_dice_coefficient",
118+
monitor=OBJECTIVE,
124119
patience=5,
125120
mode="max",
126121
restore_best_weights=True,
Lines changed: 51 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import argparse
22

3+
from keras import Model
34
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
45

56
from seg_tgce.data.crowd_seg.tfds_builder import (
@@ -8,41 +9,58 @@
89
get_processed_data,
910
)
1011
from seg_tgce.experiments.plot_utils import plot_training_history, print_test_metrics
12+
from seg_tgce.experiments.types import HpTunerTrial
13+
from seg_tgce.experiments.utils import handle_training_optuna
1114
from seg_tgce.models.builders import build_pixel_model_from_hparams
1215
from seg_tgce.models.ma_model import PixelVisualizationCallback
1316

14-
from ..utils import handle_training
15-
1617
TARGET_SHAPE = (256, 256)
1718
BATCH_SIZE = 8
1819
TRAIN_EPOCHS = 50
1920
TUNER_EPOCHS = 10
2021
MAX_TRIALS = 10
21-
22-
23-
def build_model(hp):
24-
learning_rate = hp.Float(
25-
"learning_rate", min_value=1e-5, max_value=1e-2, sampling="LOG"
26-
)
27-
q = hp.Float("q", min_value=0.1, max_value=0.9, step=0.1)
28-
noise_tolerance = hp.Float("noise_tolerance", min_value=0.1, max_value=0.9, step=0.1)
29-
lambda_reg_weight = hp.Float(
30-
"lambda_reg_weight", min_value=0.01, max_value=0.5, step=0.01
31-
)
32-
lambda_entropy_weight = hp.Float(
33-
"lambda_entropy_weight", min_value=0.01, max_value=0.5, step=0.01
34-
)
35-
lambda_sum_weight = hp.Float(
36-
"lambda_sum_weight", min_value=0.01, max_value=0.5, step=0.01
37-
)
22+
STUDY_NAME = "histology_pixel_tuning"
23+
OBJECTIVE = "val_segmentation_output_dice_coefficient"
24+
25+
DEFAULT_HPARAMS = {
26+
"initial_learning_rate": 1e-3,
27+
"q": 0.7,
28+
"noise_tolerance": 0.5,
29+
"a": 0.2,
30+
"b": 0.7,
31+
"lambda_reg_weight": 0.1,
32+
"lambda_entropy_weight": 0.1,
33+
"lambda_sum_weight": 0.1,
34+
}
35+
36+
37+
def build_model_from_trial(trial: HpTunerTrial | None) -> Model:
38+
if trial is None:
39+
return build_pixel_model_from_hparams(
40+
learning_rate=DEFAULT_HPARAMS["initial_learning_rate"],
41+
q=DEFAULT_HPARAMS["q"],
42+
noise_tolerance=DEFAULT_HPARAMS["noise_tolerance"],
43+
b=DEFAULT_HPARAMS["b"],
44+
a=DEFAULT_HPARAMS["a"],
45+
lambda_reg_weight=DEFAULT_HPARAMS["lambda_reg_weight"],
46+
lambda_entropy_weight=DEFAULT_HPARAMS["lambda_entropy_weight"],
47+
lambda_sum_weight=DEFAULT_HPARAMS["lambda_sum_weight"],
48+
num_classes=N_CLASSES,
49+
target_shape=TARGET_SHAPE,
50+
n_scorers=N_REAL_SCORERS,
51+
)
3852

3953
return build_pixel_model_from_hparams(
40-
learning_rate=learning_rate,
41-
q=q,
42-
noise_tolerance=noise_tolerance,
43-
lambda_reg_weight=lambda_reg_weight,
44-
lambda_entropy_weight=lambda_entropy_weight,
45-
lambda_sum_weight=lambda_sum_weight,
54+
learning_rate=trial.suggest_float("learning_rate", 1e-5, 1e-2, log=True),
55+
q=trial.suggest_float("q", 0.1, 0.9, step=0.01),
56+
noise_tolerance=trial.suggest_float("noise_tolerance", 0.1, 0.9, step=0.01),
57+
a=trial.suggest_float("a", 0.1, 10.0, step=0.1),
58+
b=trial.suggest_float("b", 0.1, 0.99, step=0.01),
59+
lambda_reg_weight=trial.suggest_float("lambda_reg_weight", 0.0, 10.0, step=0.1),
60+
lambda_entropy_weight=trial.suggest_float(
61+
"lambda_entropy_weight", 0.0, 10.0, step=0.1
62+
),
63+
lambda_sum_weight=trial.suggest_float("lambda_sum_weight", 0.0, 10.0, step=0.1),
4664
num_classes=N_CLASSES,
4765
target_shape=TARGET_SHAPE,
4866
n_scorers=N_REAL_SCORERS,
@@ -67,22 +85,23 @@ def build_model(hp):
6785
augmentation_factor=2,
6886
)
6987

70-
model = handle_training(
88+
model = handle_training_optuna(
7189
processed_train,
7290
processed_validation,
73-
model_builder=build_model,
91+
model_builder=build_model_from_trial,
7492
use_tuner=args.use_tuner,
7593
tuner_epochs=TUNER_EPOCHS,
76-
objective="val_segmentation_output_dice_coefficient",
94+
objective=OBJECTIVE,
7795
tuner_max_trials=MAX_TRIALS,
96+
study_name=STUDY_NAME,
7897
)
7998

8099
vis_callback = PixelVisualizationCallback(
81100
processed_validation, save_dir="vis/histology/features"
82101
)
83102

84103
lr_scheduler = ReduceLROnPlateau(
85-
monitor="val_segmentation_output_dice_coefficient",
104+
monitor=OBJECTIVE,
86105
factor=0.5,
87106
patience=3,
88107
min_lr=1e-6,
@@ -100,13 +119,13 @@ def build_model(hp):
100119
vis_callback,
101120
lr_scheduler,
102121
EarlyStopping(
103-
monitor="val_segmentation_output_dice_coefficient",
122+
monitor=OBJECTIVE,
104123
patience=5,
105124
mode="max",
106125
restore_best_weights=True,
107126
),
108127
],
109128
)
110129

111-
plot_training_history(history, "Histology Features Model Training History")
112-
print_test_metrics(model, processed_test, "Histology Features")
130+
plot_training_history(history, "Histology Pixel Model Training History")
131+
print_test_metrics(model, processed_test, "Histology Pixel")

0 commit comments

Comments
 (0)