Skip to content

Commit b12345c

Browse files
committed
core\refac: #98 missing hps
- missing hps for features and pixel experiments
1 parent 42a9beb commit b12345c

File tree

6 files changed

+219
-104
lines changed

6 files changed

+219
-104
lines changed

core/pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
[project]
22
description = "Framework for handling image segmentation in the context of multiple annotators"
33
name = "seg_tgce"
4-
version = "0.3.7"
4+
version = "0.3.9"
55
readme = "README.md"
66
authors = [{ name = "Brandon Lotero", email = "blotero@gmail.com" }]
77
maintainers = [{ name = "Brandon Lotero", email = "blotero@gmail.com" }]
@@ -15,7 +15,7 @@ Issues = "https://github.com/blotero/seg_tgce/issues"
1515

1616
[tool.poetry]
1717
name = "seg_tgce"
18-
version = "0.3.7"
18+
version = "0.3.9"
1919
authors = ["Brandon Lotero <blotero@gmail.com>"]
2020
description = "A package for the SEG TGCE project"
2121
readme = "README.md"

core/seg_tgce/experiments/pets/features.py

Lines changed: 85 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,15 @@
1-
import keras_tuner as kt
2-
import tensorflow as tf
1+
import argparse
2+
3+
from keras import Model
4+
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
35

46
from seg_tgce.data.oxford_pet.oxford_pet import (
57
fetch_models,
68
get_data_multiple_annotators,
79
)
810
from seg_tgce.experiments.plot_utils import plot_training_history, print_test_metrics
11+
from seg_tgce.experiments.types import HpTunerTrial
12+
from seg_tgce.experiments.utils import handle_training_optuna
913
from seg_tgce.models.builders import build_features_model_from_hparams
1014
from seg_tgce.models.ma_model import FeatureVisualizationCallback
1115

@@ -16,39 +20,68 @@
1620
NUM_SCORERS = len(NOISE_LEVELS)
1721
TRAIN_EPOCHS = 50
1822
TUNER_EPOCHS = 1
19-
TUNER_TRIALS = 1
23+
TUNER_MAX_TRIALS = 1
24+
STUDY_NAME = "pets_features_tuning"
25+
OBJECTIVE = "val_segmentation_output_dice_coefficient"
2026

27+
DEFAULT_HPARAMS = {
28+
"initial_learning_rate": 1e-3,
29+
"q": 0.7,
30+
"noise_tolerance": 0.5,
31+
"a": 0.2,
32+
"b": 0.7,
33+
"c": 1.0,
34+
"lambda_reg_weight": 0.1,
35+
"lambda_entropy_weight": 0.1,
36+
"lambda_sum_weight": 0.1,
37+
}
2138

22-
def build_model(hp: kt.HyperParameters) -> tf.keras.Model:
23-
learning_rate = hp.Float(
24-
"learning_rate", min_value=1e-5, max_value=1e-2, sampling="LOG"
25-
)
26-
q = hp.Float("q", min_value=0.1, max_value=0.9, step=0.1)
27-
noise_tolerance = hp.Float("noise_tolerance", min_value=0.1, max_value=0.9, step=0.1)
28-
lambda_reg_weight = hp.Float(
29-
"lambda_reg_weight", min_value=0.01, max_value=0.5, step=0.01
30-
)
31-
lambda_entropy_weight = hp.Float(
32-
"lambda_entropy_weight", min_value=0.01, max_value=0.5, step=0.01
33-
)
34-
lambda_sum_weight = hp.Float(
35-
"lambda_sum_weight", min_value=0.01, max_value=0.5, step=0.01
36-
)
39+
40+
def build_model_from_trial(trial: HpTunerTrial | None) -> Model:
41+
if trial is None:
42+
return build_features_model_from_hparams(
43+
learning_rate=DEFAULT_HPARAMS["initial_learning_rate"],
44+
q=DEFAULT_HPARAMS["q"],
45+
noise_tolerance=DEFAULT_HPARAMS["noise_tolerance"],
46+
b=DEFAULT_HPARAMS["b"],
47+
c=DEFAULT_HPARAMS["c"],
48+
a=DEFAULT_HPARAMS["a"],
49+
lambda_reg_weight=DEFAULT_HPARAMS["lambda_reg_weight"],
50+
lambda_entropy_weight=DEFAULT_HPARAMS["lambda_entropy_weight"],
51+
lambda_sum_weight=DEFAULT_HPARAMS["lambda_sum_weight"],
52+
num_classes=NUM_CLASSES,
53+
target_shape=TARGET_SHAPE,
54+
n_scorers=NUM_SCORERS,
55+
)
3756

3857
return build_features_model_from_hparams(
39-
learning_rate=learning_rate,
40-
q=q,
41-
noise_tolerance=noise_tolerance,
42-
lambda_reg_weight=lambda_reg_weight,
43-
lambda_entropy_weight=lambda_entropy_weight,
44-
lambda_sum_weight=lambda_sum_weight,
58+
learning_rate=trial.suggest_float("learning_rate", 1e-5, 1e-2, log=True),
59+
q=trial.suggest_float("q", 0.1, 0.9, step=0.01),
60+
noise_tolerance=trial.suggest_float("noise_tolerance", 0.1, 0.9, step=0.01),
61+
b=trial.suggest_float("b", 0.1, 1.0, step=0.01),
62+
a=trial.suggest_float("a", 0.1, 1.0, step=0.01),
63+
c=trial.suggest_float("c", 0.1, 10.0, step=0.1),
64+
lambda_reg_weight=trial.suggest_float("lambda_reg_weight", 0.0, 10.0, step=0.1),
65+
lambda_entropy_weight=trial.suggest_float(
66+
"lambda_entropy_weight", 0.0, 10.0, step=0.1
67+
),
68+
lambda_sum_weight=trial.suggest_float("lambda_sum_weight", 0.0, 10.0, step=0.1),
4569
num_classes=NUM_CLASSES,
4670
target_shape=TARGET_SHAPE,
4771
n_scorers=NUM_SCORERS,
4872
)
4973

5074

5175
if __name__ == "__main__":
76+
parser = argparse.ArgumentParser(
77+
description="Train pets features model with or without hyperparameter tuning"
78+
)
79+
parser.add_argument(
80+
"--use-tuner",
81+
action="store_true",
82+
help="Use Keras Tuner for hyperparameter optimization",
83+
)
84+
args = parser.parse_args()
5285
disturbance_models = fetch_models(NOISE_LEVELS)
5386
train, val, test = get_data_multiple_annotators(
5487
annotation_models=disturbance_models,
@@ -57,37 +90,43 @@ def build_model(hp: kt.HyperParameters) -> tf.keras.Model:
5790
labeling_rate=1.0,
5891
)
5992

60-
tuner = kt.BayesianOptimization(
61-
build_model,
62-
objective=kt.Objective(
63-
"val_segmentation_output_dice_coefficient", direction="max"
64-
),
65-
max_trials=TUNER_TRIALS,
66-
directory="tuner_results",
67-
project_name="features_tuning",
93+
model = handle_training_optuna(
94+
train.take(10).cache(),
95+
val.take(10).cache(),
96+
model_builder=build_model_from_trial,
97+
use_tuner=args.use_tuner,
98+
tuner_epochs=TUNER_EPOCHS,
99+
objective=OBJECTIVE,
100+
tuner_max_trials=TUNER_MAX_TRIALS,
101+
study_name=STUDY_NAME,
68102
)
69103

70-
print("Starting hyperparameter search...")
71-
tuner.search(
72-
train.take(16).cache(),
73-
epochs=TUNER_EPOCHS,
74-
validation_data=val.take(8).cache(),
75-
)
76-
77-
best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]
78-
print("\nBest hyperparameters:")
79-
for param, value in best_hps.values.items():
80-
print(f"{param}: {value}")
104+
vis_callback = FeatureVisualizationCallback(val, save_dir="vis/pets/features")
81105

82-
model = build_model(best_hps)
83-
vis_callback = FeatureVisualizationCallback(val)
106+
lr_scheduler = ReduceLROnPlateau(
107+
monitor=OBJECTIVE,
108+
factor=0.5,
109+
patience=3,
110+
min_lr=1e-6,
111+
mode="max",
112+
verbose=1,
113+
)
84114

85115
print("\nTraining with best hyperparameters...")
86116
history = model.fit(
87117
train.take(16).cache(),
88118
epochs=TRAIN_EPOCHS,
89119
validation_data=val.take(8).cache(),
90-
callbacks=[vis_callback],
120+
callbacks=[
121+
vis_callback,
122+
lr_scheduler,
123+
EarlyStopping(
124+
monitor=OBJECTIVE,
125+
patience=5,
126+
mode="max",
127+
restore_best_weights=True,
128+
),
129+
],
91130
)
92131

93132
plot_training_history(history, "Features Model Training History")
Lines changed: 93 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,15 @@
1-
import keras_tuner as kt
2-
import tensorflow as tf
1+
import argparse
2+
3+
from keras import Model
4+
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
35

46
from seg_tgce.data.oxford_pet.oxford_pet import (
57
fetch_models,
68
get_data_multiple_annotators,
79
)
810
from seg_tgce.experiments.plot_utils import plot_training_history, print_test_metrics
11+
from seg_tgce.experiments.types import HpTunerTrial
12+
from seg_tgce.experiments.utils import handle_training_optuna
913
from seg_tgce.models.builders import build_pixel_model_from_hparams
1014
from seg_tgce.models.ma_model import PixelVisualizationCallback
1115

@@ -16,80 +20,116 @@
1620
NUM_SCORERS = len(NOISE_LEVELS)
1721
TRAIN_EPOCHS = 50
1822
TUNER_EPOCHS = 1
19-
TUNER_TRIALS = 1
23+
TUNER_MAX_TRIALS = 1
24+
STUDY_NAME = "pets_pixel_tuning"
25+
OBJECTIVE = "val_segmentation_output_dice_coefficient"
26+
LABELING_RATE = 1.0
2027

28+
DEFAULT_HPARAMS = {
29+
"initial_learning_rate": 1e-3,
30+
"q": 0.7,
31+
"noise_tolerance": 0.5,
32+
"a": 0.2,
33+
"b": 0.7,
34+
"c": 1.0,
35+
"lambda_reg_weight": 0.1,
36+
"lambda_entropy_weight": 0.1,
37+
"lambda_sum_weight": 0.1,
38+
}
2139

22-
def build_model(hp: kt.HyperParameters) -> tf.keras.Model:
23-
learning_rate = hp.Float(
24-
"learning_rate", min_value=1e-5, max_value=1e-2, sampling="LOG"
25-
)
26-
q = hp.Float("q", min_value=0.1, max_value=0.9, step=0.1)
27-
noise_tolerance = hp.Float("noise_tolerance", min_value=0.1, max_value=0.9, step=0.1)
28-
lambda_reg_weight = hp.Float(
29-
"lambda_reg_weight", min_value=0.01, max_value=0.5, step=0.01
30-
)
31-
lambda_entropy_weight = hp.Float(
32-
"lambda_entropy_weight", min_value=0.01, max_value=0.5, step=0.01
33-
)
34-
lambda_sum_weight = hp.Float(
35-
"lambda_sum_weight", min_value=0.01, max_value=0.5, step=0.01
36-
)
40+
41+
def build_model_from_trial(trial: HpTunerTrial | None) -> Model:
42+
if trial is None:
43+
return build_pixel_model_from_hparams(
44+
learning_rate=DEFAULT_HPARAMS["initial_learning_rate"],
45+
q=DEFAULT_HPARAMS["q"],
46+
noise_tolerance=DEFAULT_HPARAMS["noise_tolerance"],
47+
b=DEFAULT_HPARAMS["b"],
48+
c=DEFAULT_HPARAMS["c"],
49+
a=DEFAULT_HPARAMS["a"],
50+
lambda_reg_weight=DEFAULT_HPARAMS["lambda_reg_weight"],
51+
lambda_entropy_weight=DEFAULT_HPARAMS["lambda_entropy_weight"],
52+
lambda_sum_weight=DEFAULT_HPARAMS["lambda_sum_weight"],
53+
num_classes=NUM_CLASSES,
54+
target_shape=TARGET_SHAPE,
55+
n_scorers=NUM_SCORERS,
56+
)
3757

3858
return build_pixel_model_from_hparams(
39-
learning_rate=learning_rate,
40-
q=q,
41-
noise_tolerance=noise_tolerance,
42-
lambda_reg_weight=lambda_reg_weight,
43-
lambda_entropy_weight=lambda_entropy_weight,
44-
lambda_sum_weight=lambda_sum_weight,
59+
learning_rate=trial.suggest_float("learning_rate", 1e-5, 1e-2, log=True),
60+
q=trial.suggest_float("q", 0.1, 0.9, step=0.01),
61+
noise_tolerance=trial.suggest_float("noise_tolerance", 0.1, 0.9, step=0.01),
62+
b=trial.suggest_float("b", 0.1, 1.0, step=0.01),
63+
a=trial.suggest_float("a", 0.1, 1.0, step=0.01),
64+
c=trial.suggest_float("c", 0.1, 10.0, step=0.1),
65+
lambda_reg_weight=trial.suggest_float("lambda_reg_weight", 0.0, 10.0, step=0.1),
66+
lambda_entropy_weight=trial.suggest_float(
67+
"lambda_entropy_weight", 0.0, 10.0, step=0.1
68+
),
69+
lambda_sum_weight=trial.suggest_float("lambda_sum_weight", 0.0, 10.0, step=0.1),
4570
num_classes=NUM_CLASSES,
4671
target_shape=TARGET_SHAPE,
4772
n_scorers=NUM_SCORERS,
4873
)
4974

5075

5176
if __name__ == "__main__":
77+
parser = argparse.ArgumentParser(
78+
description="Train pets pixel model with or without hyperparameter tuning"
79+
)
80+
parser.add_argument(
81+
"--use-tuner",
82+
action="store_true",
83+
help="Use Keras Tuner for hyperparameter optimization",
84+
)
85+
args = parser.parse_args()
86+
5287
disturbance_models = fetch_models(NOISE_LEVELS)
5388
train, val, test = get_data_multiple_annotators(
5489
annotation_models=disturbance_models,
5590
target_shape=TARGET_SHAPE,
5691
batch_size=BATCH_SIZE,
57-
labeling_rate=0.5,
92+
labeling_rate=LABELING_RATE,
5893
)
5994

60-
tuner = kt.BayesianOptimization(
61-
build_model,
62-
objective=kt.Objective(
63-
"val_segmentation_output_dice_coefficient", direction="max"
64-
),
65-
max_trials=TUNER_TRIALS,
66-
directory="tuner_results",
67-
project_name="pixel_tuning",
95+
model = handle_training_optuna(
96+
train.take(10).cache(),
97+
val.take(10).cache(),
98+
model_builder=build_model_from_trial,
99+
use_tuner=args.use_tuner,
100+
tuner_epochs=TUNER_EPOCHS,
101+
objective=OBJECTIVE,
102+
tuner_max_trials=TUNER_MAX_TRIALS,
103+
study_name=STUDY_NAME,
68104
)
69105

70-
print("Starting hyperparameter search...")
71-
tuner.search(
72-
train.take(16).cache(),
73-
epochs=TUNER_EPOCHS,
74-
validation_data=val.take(8).cache(),
75-
)
106+
vis_callback = PixelVisualizationCallback(val, save_dir="vis/pets/pixel")
76107

77-
best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]
78-
print("\nBest hyperparameters:")
79-
for param, value in best_hps.values.items():
80-
print(f"{param}: {value}")
81-
82-
model = build_model(best_hps)
83-
vis_callback = PixelVisualizationCallback(val)
108+
lr_scheduler = ReduceLROnPlateau(
109+
monitor=OBJECTIVE,
110+
factor=0.5,
111+
patience=3,
112+
min_lr=1e-6,
113+
mode="max",
114+
verbose=1,
115+
)
84116

85-
print("\nTraining with best hyperparameters...")
117+
print("\nTraining final model...")
86118
history = model.fit(
87-
train.take(16).cache(),
119+
train,
88120
epochs=TRAIN_EPOCHS,
89-
validation_data=val.take(8).cache(),
90-
callbacks=[vis_callback],
121+
validation_data=val.cache(),
122+
callbacks=[
123+
vis_callback,
124+
lr_scheduler,
125+
EarlyStopping(
126+
monitor=OBJECTIVE,
127+
patience=5,
128+
mode="max",
129+
restore_best_weights=True,
130+
),
131+
],
91132
)
92133

93-
plot_training_history(history, "Pixel Model Training History")
94-
95-
print_test_metrics(model, test, "Pixel")
134+
plot_training_history(history, "Pets Pixel Model Training History")
135+
print_test_metrics(model, test, "Pets Pixel")

core/seg_tgce/experiments/pets/scalar.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from seg_tgce.models.builders import build_scalar_model_from_hparams
1414
from seg_tgce.models.ma_model import ScalarVisualizationCallback
1515

16-
TARGET_SHAPE = (128, 128)
16+
TARGET_SHAPE = (256, 256)
1717
BATCH_SIZE = 16
1818
NUM_CLASSES = 3
1919
NOISE_LEVELS = [-20.0, 10.0]
@@ -24,6 +24,7 @@
2424
TUNER_MAX_TRIALS = 3
2525
STUDY_NAME = "pets_scalar_tuning"
2626
OBJECTIVE = "val_segmentation_output_dice_coefficient"
27+
2728
DEFAULT_HPARAMS = {
2829
"initial_learning_rate": 1e-3,
2930
"q": 0.7,

0 commit comments

Comments
 (0)