Skip to content

Commit 67af4b2

Browse files
committed
fix metrics for estimators; improve error handling
1 parent 31808db commit 67af4b2

File tree

3 files changed

+137
-151
lines changed

3 files changed

+137
-151
lines changed

examples/ml_visualization_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ def main():
1919
print("=" * 40)
2020

2121
# Create KNN classifier function
22-
knn_func = KNeighborsClassifierFunction(metric="score")
22+
knn_func = KNeighborsClassifierFunction(metric="accuracy")
2323

2424
# Override search space with smaller values for faster demo
2525
def reduced_search_space(**kwargs):

scripts/generate_ml_plots.py

Lines changed: 83 additions & 93 deletions
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,11 @@
6868

6969
def create_reduced_ml_function(func_class, reduced_space):
7070
"""Create ML function instance with reduced search space for faster evaluation."""
71-
# Create instance
72-
func = func_class(metric="score")
71+
# Create instance with appropriate metric
72+
if 'Classifier' in func_class.__name__:
73+
func = func_class(metric="accuracy") # Use accuracy for classification
74+
else:
75+
func = func_class(metric="neg_mean_squared_error") # Use MSE for regression
7376

7477
# Override search_space method with reduced space
7578
original_search_space = func.search_space
@@ -92,71 +95,58 @@ def generate_ml_plots():
9295
print(f"Processing {func_name}...")
9396
print(f"{'='*50}")
9497

95-
try:
96-
# Create function instance with reduced search space
97-
func_class = config['class']
98-
reduced_space = config['reduced_search_space']
99-
ml_func = create_reduced_ml_function(func_class, reduced_space)
98+
# Create function instance with reduced search space
99+
func_class = config['class']
100+
reduced_space = config['reduced_search_space']
101+
ml_func = create_reduced_ml_function(func_class, reduced_space)
102+
103+
# Create function-specific output directory
104+
func_output_dir = os.path.join(ml_output_dir, ml_func._name_)
105+
os.makedirs(func_output_dir, exist_ok=True)
106+
107+
print(f"Search space: {ml_func.search_space()}")
108+
109+
# 1. Generate hyperparameter vs hyperparameter plots
110+
print("\n1. Generating hyperparameter interaction plots...")
111+
for param1, param2 in config['hyperparameter_pairs']:
112+
print(f" Creating {param1} vs {param2} plot...")
100113

101-
# Create function-specific output directory
102-
func_output_dir = os.path.join(ml_output_dir, ml_func._name_)
103-
os.makedirs(func_output_dir, exist_ok=True)
114+
fig = plotly_ml_hyperparameter_heatmap(
115+
ml_func, param1, param2,
116+
title=f"{ml_func.name} - {param1} vs {param2}"
117+
)
104118

105-
print(f"Search space: {ml_func.search_space()}")
119+
# Save as image
120+
output_path = os.path.join(func_output_dir, f"{param1}_vs_{param2}_heatmap.jpg")
121+
fig.write_image(output_path, format="jpeg", width=900, height=700)
106122

107-
# 1. Generate hyperparameter vs hyperparameter plots
108-
print("\n1. Generating hyperparameter interaction plots...")
109-
for param1, param2 in config['hyperparameter_pairs']:
110-
print(f" Creating {param1} vs {param2} plot...")
111-
112-
try:
113-
fig = plotly_ml_hyperparameter_heatmap(
114-
ml_func, param1, param2,
115-
title=f"{ml_func.name} - {param1} vs {param2}"
116-
)
117-
118-
# Save as image
119-
output_path = os.path.join(func_output_dir, f"{param1}_vs_{param2}_heatmap.jpg")
120-
fig.write_image(output_path, format="jpeg", width=900, height=700)
121-
122-
# Also save to main images directory for README
123-
main_output_path = os.path.join(output_dir, f"{ml_func._name_}_{param1}_vs_{param2}_heatmap.jpg")
124-
fig.write_image(main_output_path, format="jpeg", width=900, height=700)
125-
126-
print(f" ✓ Saved {param1} vs {param2} heatmap")
127-
128-
except Exception as e:
129-
print(f" ✗ Error creating {param1} vs {param2} plot: {e}")
123+
# Also save to main images directory for README
124+
main_output_path = os.path.join(output_dir, f"{ml_func._name_}_{param1}_vs_{param2}_heatmap.jpg")
125+
fig.write_image(main_output_path, format="jpeg", width=900, height=700)
126+
127+
print(f" ✓ Saved {param1} vs {param2} heatmap")
128+
129+
# 2. Generate dataset vs hyperparameter plots
130+
print("\n2. Generating dataset analysis plots...")
131+
for hyperparameter in config['dataset_analyses']:
132+
print(f" Creating dataset vs {hyperparameter} plot...")
133+
134+
fig = plotly_dataset_hyperparameter_analysis(
135+
ml_func, hyperparameter,
136+
title=f"{ml_func.name} - Dataset vs {hyperparameter}"
137+
)
138+
139+
# Save as image
140+
output_path = os.path.join(func_output_dir, f"dataset_vs_{hyperparameter}_analysis.jpg")
141+
fig.write_image(output_path, format="jpeg", width=1000, height=700)
142+
143+
# Also save to main images directory for README
144+
main_output_path = os.path.join(output_dir, f"{ml_func._name_}_dataset_vs_{hyperparameter}_analysis.jpg")
145+
fig.write_image(main_output_path, format="jpeg", width=1000, height=700)
130146

131-
# 2. Generate dataset vs hyperparameter plots
132-
print("\n2. Generating dataset analysis plots...")
133-
for hyperparameter in config['dataset_analyses']:
134-
print(f" Creating dataset vs {hyperparameter} plot...")
147+
print(f" ✓ Saved dataset vs {hyperparameter} analysis")
135148

136-
try:
137-
fig = plotly_dataset_hyperparameter_analysis(
138-
ml_func, hyperparameter,
139-
title=f"{ml_func.name} - Dataset vs {hyperparameter}"
140-
)
141-
142-
# Save as image
143-
output_path = os.path.join(func_output_dir, f"dataset_vs_{hyperparameter}_analysis.jpg")
144-
fig.write_image(output_path, format="jpeg", width=1000, height=700)
145-
146-
# Also save to main images directory for README
147-
main_output_path = os.path.join(output_dir, f"{ml_func._name_}_dataset_vs_{hyperparameter}_analysis.jpg")
148-
fig.write_image(main_output_path, format="jpeg", width=1000, height=700)
149-
150-
print(f" ✓ Saved dataset vs {hyperparameter} analysis")
151-
152-
except Exception as e:
153-
print(f" ✗ Error creating dataset vs {hyperparameter} plot: {e}")
154-
155-
print(f"✓ Completed {func_name} analysis")
156-
157-
except Exception as e:
158-
print(f"✗ Error processing {func_name}: {e}")
159-
continue
149+
print(f"✓ Completed {func_name} analysis")
160150

161151
print(f"\n✓ ML plot generation complete! Images saved to:")
162152
print(f" - Individual function plots: {ml_output_dir}")
@@ -166,37 +156,37 @@ def generate_sample_plots():
166156
"""Generate a few sample plots for testing."""
167157
print("Generating sample ML plots for testing...")
168158

169-
# Test with KNeighborsClassifier (smallest/fastest)
170-
try:
171-
ml_func = create_reduced_ml_function(
172-
KNeighborsClassifierFunction,
173-
{
174-
'n_neighbors': [3, 10, 20], # Very small for testing
175-
'algorithm': ['auto', 'ball_tree'],
176-
'cv': [3],
177-
}
178-
)
179-
180-
print("Creating sample hyperparameter plot...")
181-
fig1 = plotly_ml_hyperparameter_heatmap(
182-
ml_func, 'n_neighbors', 'algorithm',
183-
title="Sample: KNN Hyperparameter Analysis"
184-
)
185-
fig1.write_image(os.path.join(output_dir, "sample_knn_hyperparams.jpg"),
186-
format="jpeg", width=900, height=700)
187-
188-
print("Creating sample dataset analysis plot...")
189-
fig2 = plotly_dataset_hyperparameter_analysis(
190-
ml_func, 'n_neighbors',
191-
title="Sample: Dataset vs n_neighbors"
192-
)
193-
fig2.write_image(os.path.join(output_dir, "sample_knn_datasets.jpg"),
194-
format="jpeg", width=1000, height=700)
195-
196-
print("✓ Sample plots generated successfully!")
197-
198-
except Exception as e:
199-
print(f"✗ Error generating sample plots: {e}")
159+
# Create with proper metric
160+
ml_func = KNeighborsClassifierFunction(metric="accuracy")
161+
162+
# Override search space with small values for testing
163+
def reduced_search_space_method(**kwargs):
164+
return {
165+
'n_neighbors': [3, 10, 20], # Very small for testing
166+
'algorithm': ['auto', 'ball_tree'],
167+
'cv': [3],
168+
'dataset': ml_func.dataset_default # Keep all datasets
169+
}
170+
171+
ml_func.search_space = reduced_search_space_method
172+
173+
print("Creating sample hyperparameter plot...")
174+
fig1 = plotly_ml_hyperparameter_heatmap(
175+
ml_func, 'n_neighbors', 'algorithm',
176+
title="Sample: KNN Hyperparameter Analysis"
177+
)
178+
fig1.write_image(os.path.join(output_dir, "sample_knn_hyperparams.jpg"),
179+
format="jpeg", width=900, height=700)
180+
181+
print("Creating sample dataset analysis plot...")
182+
fig2 = plotly_dataset_hyperparameter_analysis(
183+
ml_func, 'n_neighbors',
184+
title="Sample: Dataset vs n_neighbors"
185+
)
186+
fig2.write_image(os.path.join(output_dir, "sample_knn_datasets.jpg"),
187+
format="jpeg", width=1000, height=700)
188+
189+
print("✓ Sample plots generated successfully!")
200190

201191
if __name__ == "__main__":
202192
# Check if we should generate sample plots first (faster for testing)

src/surfaces/visualize.py

Lines changed: 53 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -514,59 +514,56 @@ def plotly_ml_hyperparameter_heatmap(
514514
Plotly Figure object
515515
"""
516516
search_space = ml_function.search_space()
517-
518-
# Use provided fixed params or defaults
519-
if fixed_params is None:
520-
fixed_params = {}
517+
fixed_params = fixed_params or {}
521518

522519
# Get parameter ranges
523520
param1_values = search_space[param1]
524521
param2_values = search_space[param2]
525522

526-
# Create grid for evaluation
523+
# SINGLE VALIDATION TEST - fail fast if configuration is wrong
524+
test_params = fixed_params.copy()
525+
test_params[param1] = param1_values[0]
526+
test_params[param2] = param2_values[0]
527+
528+
# Fill in missing required parameters
529+
for param_name in search_space:
530+
if param_name not in test_params:
531+
test_params[param_name] = search_space[param_name][0]
532+
533+
# Test once - if this fails, the whole configuration is wrong
534+
test_result = ml_function.objective_function(test_params)
535+
print(f"✓ ML function validation successful (test result: {test_result:.4f})")
536+
537+
# Create evaluation grid - NO TRY-CATCH, let errors surface
527538
results = []
528-
param1_grid = []
529-
param2_grid = []
530539

531540
print(f"Evaluating {len(param1_values)} x {len(param2_values)} = {len(param1_values) * len(param2_values)} combinations...")
532541
pbar = tqdm(total=len(param1_values) * len(param2_values), desc="ML Evaluation")
533542

534543
for p1_val in param1_values:
535544
row_results = []
536-
row_p1 = []
537-
row_p2 = []
538545

539546
for p2_val in param2_values:
540547
# Create parameter dict
541548
params = fixed_params.copy()
542549
params[param1] = p1_val
543550
params[param2] = p2_val
544551

545-
# Fill in any missing required parameters with defaults
552+
# Fill in missing required parameters
546553
for param_name in search_space:
547554
if param_name not in params:
548-
default_val = search_space[param_name][0] # Use first value as default
549-
params[param_name] = default_val
555+
params[param_name] = search_space[param_name][0]
550556

551-
# Evaluate
552-
try:
553-
score = ml_function.objective_function(params)
554-
row_results.append(score)
555-
except Exception as e:
556-
print(f"Error evaluating {params}: {e}")
557-
row_results.append(np.nan)
558-
559-
row_p1.append(p1_val)
560-
row_p2.append(p2_val)
557+
# Evaluate - NO TRY-CATCH, let real errors surface
558+
score = ml_function.objective_function(params)
559+
row_results.append(float(score))
561560
pbar.update(1)
562561

563562
results.append(row_results)
564-
param1_grid.append(row_p1)
565-
param2_grid.append(row_p2)
566563

567564
pbar.close()
568565

569-
# Convert to numpy arrays
566+
# Convert to numpy and create plot
570567
z_values = np.array(results)
571568

572569
# Handle categorical parameters for display
@@ -633,10 +630,21 @@ def plotly_dataset_hyperparameter_analysis(
633630
Plotly Figure object
634631
"""
635632
search_space = ml_function.search_space()
636-
637-
# Use provided fixed params or defaults
638-
if fixed_params is None:
639-
fixed_params = {}
633+
fixed_params = fixed_params or {}
634+
635+
# SINGLE VALIDATION TEST - fail fast if configuration is wrong
636+
test_params = fixed_params.copy()
637+
test_params['dataset'] = search_space['dataset'][0]
638+
test_params[hyperparameter] = search_space[hyperparameter][0]
639+
640+
# Fill in missing required parameters
641+
for param_name in search_space:
642+
if param_name not in test_params:
643+
test_params[param_name] = search_space[param_name][0]
644+
645+
# Test once - if this fails, the whole configuration is wrong
646+
test_result = ml_function.objective_function(test_params)
647+
print(f"✓ ML function validation successful (test result: {test_result:.4f})")
640648

641649
datasets = search_space['dataset']
642650
hyperparameter_values = search_space[hyperparameter]
@@ -647,7 +655,7 @@ def plotly_dataset_hyperparameter_analysis(
647655
name = dataset_func.__name__.replace('_data', '').replace('_', ' ').title()
648656
dataset_names.append(name)
649657

650-
# Evaluate across datasets and hyperparameter values
658+
# Evaluate across datasets and hyperparameter values - NO TRY-CATCH
651659
results = []
652660

653661
print(f"Evaluating {len(datasets)} datasets x {len(hyperparameter_values)} {hyperparameter} values...")
@@ -665,17 +673,11 @@ def plotly_dataset_hyperparameter_analysis(
665673
# Fill in missing required parameters with defaults
666674
for param_name in search_space:
667675
if param_name not in params:
668-
default_val = search_space[param_name][0]
669-
params[param_name] = default_val
670-
671-
# Evaluate
672-
try:
673-
score = ml_function.objective_function(params)
674-
dataset_results.append(score)
675-
except Exception as e:
676-
print(f"Error evaluating {params}: {e}")
677-
dataset_results.append(np.nan)
676+
params[param_name] = search_space[param_name][0]
678677

678+
# Evaluate - NO TRY-CATCH, let real errors surface
679+
score = ml_function.objective_function(params)
680+
dataset_results.append(float(score))
679681
pbar.update(1)
680682

681683
results.append(dataset_results)
@@ -741,7 +743,7 @@ def create_ml_function_analysis_suite(
741743
search_space = ml_function.search_space()
742744
figures = {}
743745

744-
# Get numeric hyperparameters (exclude dataset and cv)
746+
# Get hyperparameters (exclude dataset and cv)
745747
numeric_params = []
746748
categorical_params = []
747749

@@ -761,26 +763,20 @@ def create_ml_function_analysis_suite(
761763
continue
762764

763765
plot_name = f"hyperparam_{param1}_vs_{param2}"
764-
try:
765-
fig = plotly_ml_hyperparameter_heatmap(
766-
ml_function, param1, param2,
767-
title=f"{ml_function.name} - Hyperparameter Analysis"
768-
)
769-
figures[plot_name] = fig
770-
except Exception as e:
771-
print(f"Error creating plot {plot_name}: {e}")
766+
fig = plotly_ml_hyperparameter_heatmap(
767+
ml_function, param1, param2,
768+
title=f"{ml_function.name} - Hyperparameter Analysis"
769+
)
770+
figures[plot_name] = fig
772771

773772
# 2. Dataset vs Hyperparameter plots
774773
print("Creating dataset analysis plots...")
775774
for param_name in numeric_params + categorical_params:
776775
plot_name = f"dataset_vs_{param_name}"
777-
try:
778-
fig = plotly_dataset_hyperparameter_analysis(
779-
ml_function, param_name,
780-
title=f"{ml_function.name} - Dataset Analysis"
781-
)
782-
figures[plot_name] = fig
783-
except Exception as e:
784-
print(f"Error creating plot {plot_name}: {e}")
776+
fig = plotly_dataset_hyperparameter_analysis(
777+
ml_function, param_name,
778+
title=f"{ml_function.name} - Dataset Analysis"
779+
)
780+
figures[plot_name] = fig
785781

786782
return figures

0 commit comments

Comments
 (0)