diff --git a/.gitignore b/.gitignore index aef66ec3..74c71f53 100644 --- a/.gitignore +++ b/.gitignore @@ -160,6 +160,13 @@ cython_debug/ # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ +# Visualization examples output +*.png +*.jpg +*.jpeg +*.gif +*.svg + # General .DS_Store .AppleDouble diff --git a/VISUALIZATION.md b/VISUALIZATION.md new file mode 100644 index 00000000..afcc8e97 --- /dev/null +++ b/VISUALIZATION.md @@ -0,0 +1,329 @@ +# Visualization Module + +Comprehensive visualization and stability testing tools for optimization algorithms. + +## Features + +- **Convergence Curves**: Track best fitness values over iterations +- **Trajectory Plots**: Visualize search paths through 2D solution spaces +- **Average Fitness Tracking**: Monitor population fitness with standard deviation bands +- **Stability Testing**: Run algorithms multiple times with different seeds +- **Statistical Analysis**: Generate box plots, histograms, and summary statistics +- **Multi-Optimizer Comparison**: Compare performance and stability across algorithms + +## Installation + +The visualization module requires matplotlib as an optional dependency: + +```bash +pip install useful-optimizer[visualization] +``` + +Or install manually: + +```bash +pip install matplotlib>=3.7.0 +``` + +## Quick Start + +### Basic Visualization + +```python +from opt.swarm_intelligence.particle_swarm import ParticleSwarm +from opt.benchmark.functions import shifted_ackley +from opt.visualization import Visualizer + +# Create optimizer with history tracking enabled +pso = ParticleSwarm( + func=shifted_ackley, + lower_bound=-5, + upper_bound=5, + dim=2, + max_iter=100, + track_history=True, # Enable history tracking + population_size=30, +) + +# Run optimization +best_solution, best_fitness = pso.search() + +# Create visualizer and generate plots +viz = Visualizer(pso) +viz.plot_convergence() # Show convergence curve +viz.plot_trajectory() # Show 2D search trajectory +viz.plot_average_fitness() # Show population fitness evolution +viz.plot_all() # Generate all plots in one figure +``` + +### Stability Testing + +```python +from opt.visualization import run_stability_test + +# Run optimizer multiple times with different seeds +results = run_stability_test( + optimizer_class=ParticleSwarm, + func=shifted_ackley, + lower_bound=-5, + upper_bound=5, + dim=2, + max_iter=100, + seeds=[42, 123, 456, 789, 1011], # Specific seeds + # OR use: n_runs=10 # Random seeds +) + +# Print statistical summary +results.print_summary() + +# Generate visualizations +results.plot_boxplot() +results.plot_histogram() +``` + +### Compare Multiple Optimizers + +```python +from opt.visualization import compare_optimizers_stability +from opt.swarm_intelligence.particle_swarm import ParticleSwarm +from opt.evolutionary.genetic_algorithm import GeneticAlgorithm + +# Compare two or more optimizers +results_dict, fig = compare_optimizers_stability( + optimizer_classes=[ParticleSwarm, GeneticAlgorithm], + func=shifted_ackley, + lower_bound=-5, + upper_bound=5, + dim=2, + max_iter=100, + n_runs=10, +) + +# Access individual results +for name, results in results_dict.items(): + print(f"{name}: {results.summary()}") +``` + +## API Reference + +### Visualizer Class + +The `Visualizer` class provides visualization methods for a single optimizer run. + +**Constructor:** +```python +Visualizer(optimizer: AbstractOptimizer) +``` + +**Methods:** + +- `plot_convergence(log_scale=False, show=True, ax=None)`: Plot best fitness over iterations +- `plot_trajectory(show=True, ax=None, max_points=1000)`: Plot 2D search trajectory +- `plot_average_fitness(show_std=True, show=True, ax=None)`: Plot population fitness with std bands +- `plot_all(save_path=None)`: Generate comprehensive multi-panel visualization + +### StabilityResults Class + +Stores and analyzes results from multiple optimizer runs. + +**Attributes:** +- `optimizer_name`: Name of the optimizer +- `function_name`: Name of the objective function +- `solutions`: List of best solutions from each run +- `fitness_values`: Array of best fitness values +- `seeds`: List of random seeds used + +**Methods:** + +- `summary()`: Get statistical summary (mean, std, min, max, median, quartiles) +- `print_summary()`: Print formatted summary +- `plot_boxplot(show=True, save_path=None)`: Generate box plot +- `plot_histogram(bins=20, show=True, save_path=None)`: Generate histogram + +### Functions + +**run_stability_test()** + +Run stability test for an optimization algorithm. + +```python +run_stability_test( + optimizer_class: type[AbstractOptimizer], + func: Callable, + lower_bound: float, + upper_bound: float, + dim: int, + max_iter: int = 100, + seeds: Sequence[int] | None = None, + n_runs: int = 10, + verbose: bool = True, + **optimizer_kwargs +) -> StabilityResults +``` + +**compare_optimizers_stability()** + +Compare stability of multiple optimizers. + +```python +compare_optimizers_stability( + optimizer_classes: list[type[AbstractOptimizer]], + func: Callable, + lower_bound: float, + upper_bound: float, + dim: int, + max_iter: int = 100, + n_runs: int = 10, + show: bool = True, + save_path: str | None = None, +) -> tuple[dict[str, StabilityResults], Figure] +``` + +## History Tracking + +To use visualization features, optimizers must be run with `track_history=True`: + +```python +optimizer = ParticleSwarm( + func=shifted_ackley, + lower_bound=-5, + upper_bound=5, + dim=2, + max_iter=100, + track_history=True, # Required for visualization +) +``` + +**Note:** History tracking adds memory overhead proportional to `max_iter × population_size`. For very long runs or large populations, consider using it selectively. + +## Advanced Usage + +### Custom Matplotlib Integration + +The visualization methods return matplotlib Figure objects and accept axes parameters, allowing full customization: + +```python +import matplotlib.pyplot as plt + +# Create custom layout +fig, axes = plt.subplots(2, 2, figsize=(12, 10)) + +viz = Visualizer(pso) + +# Plot to specific axes +viz.plot_convergence(show=False, ax=axes[0, 0]) +viz.plot_trajectory(show=False, ax=axes[0, 1]) +viz.plot_average_fitness(show=False, ax=axes[1, 0]) +viz.plot_convergence(log_scale=True, show=False, ax=axes[1, 1]) + +plt.tight_layout() +plt.savefig("custom_visualization.png", dpi=300) +``` + +### Log Scale Convergence + +For functions with wide fitness ranges, use log scale: + +```python +viz.plot_convergence(log_scale=True) +``` + +### Saving Plots + +All plot methods support saving: + +```python +# Individual plots +viz.plot_convergence(show=False) +plt.savefig("convergence.png", dpi=300, bbox_inches="tight") + +# Or use save_path parameter +viz.plot_all(save_path="all_plots.png") +results.plot_boxplot(save_path="stability.png") +``` + +### Reproducible Results + +Use specific seeds for reproducible stability tests: + +```python +results = run_stability_test( + optimizer_class=ParticleSwarm, + func=shifted_ackley, + lower_bound=-5, + upper_bound=5, + dim=2, + seeds=[42, 123, 456], # Same seeds = same results +) +``` + +## Examples + +See `examples_visualization.py` for complete working examples including: + +1. Basic visualization workflow +2. Stability testing with multiple seeds +3. Multi-optimizer comparison +4. Log scale convergence plots +5. Custom matplotlib integration + +Run the examples: + +```bash +python examples_visualization.py +``` + +## Supported Optimizers + +The visualization module works with **all 58+ optimizers** in the package that inherit from `AbstractOptimizer`. This includes: + +- **Swarm Intelligence**: ParticleSwarm, AntColony, FireflyAlgorithm, etc. +- **Evolutionary**: GeneticAlgorithm, DifferentialEvolution, CMAESAlgorithm, etc. +- **Gradient-Based**: AdamW, SGDMomentum, BFGS, etc. +- **Metaheuristic**: SimulatedAnnealing, TabuSearch, HarmonySearch, etc. +- And many more! + +## Performance Considerations + +- **History Tracking**: Adds `O(max_iter × population_size)` memory overhead +- **2D Trajectory**: Only available for 2D problems (dim=2) +- **Large Runs**: For `max_iter > 10000`, consider using `max_points` parameter in `plot_trajectory()` +- **Stability Tests**: Running N tests with M iterations each requires `N × M` function evaluations + +## Tips + +1. **Start Small**: Test with `max_iter=50-100` before running longer optimizations +2. **Use Seeds**: Specify seeds for reproducible results in papers/reports +3. **Compare Fairly**: Use same `max_iter`, bounds, and function for comparison +4. **Check Convergence**: Use log scale to see if optimizer is still improving +5. **Population Diversity**: Use `plot_average_fitness()` to monitor exploration vs exploitation + +## Troubleshooting + +**ValueError: "track_history=True"** +- Ensure optimizer is created with `track_history=True` + +**ValueError: "2D problems"** +- Trajectory plotting only works for `dim=2` +- Other plots work for any dimensionality + +**Memory Issues** +- Reduce `max_iter` or `population_size` +- Don't track history for production runs + +**Different Results** +- Ensure same `seed` value for reproducibility +- Check that function evaluations are deterministic + +## Citation + +If you use this visualization module in your research, please cite: + +```bibtex +@software{useful_optimizer, + title = {Useful Optimizer: A Collection of Optimization Algorithms}, + author = {Hahn, Anselm}, + year = {2024}, + url = {https://github.com/Anselmoo/useful-optimizer} +} +``` diff --git a/examples_visualization.py b/examples_visualization.py new file mode 100644 index 00000000..bec5c315 --- /dev/null +++ b/examples_visualization.py @@ -0,0 +1,273 @@ +"""Example usage of the visualization module for optimization algorithms. + +This script demonstrates how to use the visualization capabilities +including convergence plots, trajectory plots, average fitness tracking, +and stability testing. +""" + +from __future__ import annotations + +import matplotlib as mpl +import matplotlib.pyplot as plt + +from opt.benchmark.functions import rosenbrock +from opt.benchmark.functions import shifted_ackley +from opt.benchmark.functions import sphere +from opt.evolutionary.genetic_algorithm import GeneticAlgorithm +from opt.swarm_intelligence.particle_swarm import ParticleSwarm +from opt.visualization import Visualizer +from opt.visualization import compare_optimizers_stability +from opt.visualization import run_stability_test + + +def example_basic_visualization() -> None: + """Example 1: Basic visualization with ParticleSwarm.""" + print("\n" + "=" * 70) + print("Example 1: Basic Visualization with Particle Swarm Optimization") + print("=" * 70) + + # Create optimizer with history tracking enabled + pso = ParticleSwarm( + func=shifted_ackley, + lower_bound=-5, + upper_bound=5, + dim=2, + max_iter=100, + track_history=True, # Enable history tracking + population_size=30, + seed=42, + ) + + # Run optimization + best_solution, best_fitness = pso.search() + print("\nOptimization completed!") + print(f"Best solution: {best_solution}") + print(f"Best fitness: {best_fitness:.6f}") + + # Create visualizer + viz = Visualizer(pso) + + # Plot convergence curve + print("\nGenerating convergence plot...") + viz.plot_convergence(show=False) + plt.savefig("example_convergence.png", dpi=300, bbox_inches="tight") + print("✓ Saved to example_convergence.png") + + # Plot trajectory (2D only) + print("Generating trajectory plot...") + viz.plot_trajectory(show=False) + plt.savefig("example_trajectory.png", dpi=300, bbox_inches="tight") + print("✓ Saved to example_trajectory.png") + + # Plot average fitness + print("Generating average fitness plot...") + viz.plot_average_fitness(show=False) + plt.savefig("example_average_fitness.png", dpi=300, bbox_inches="tight") + print("✓ Saved to example_average_fitness.png") + + # Plot all visualizations in one figure + print("Generating comprehensive plot...") + viz.plot_all(save_path="example_all_plots.png") + print("✓ Saved to example_all_plots.png") + + plt.close("all") + + +def example_stability_testing() -> None: + """Example 2: Stability testing with multiple seeds.""" + print("\n" + "=" * 70) + print("Example 2: Stability Testing with Multiple Seeds") + print("=" * 70) + + # Run stability test with specific seeds + results = run_stability_test( + optimizer_class=ParticleSwarm, + func=shifted_ackley, + lower_bound=-5, + upper_bound=5, + dim=2, + max_iter=100, + seeds=[42, 123, 456, 789, 1011, 2022, 3033, 4044, 5055, 6066], + verbose=True, + population_size=30, + ) + + # Get statistical summary + print("\nGenerating statistical summary...") + summary = results.summary() + print(f"Mean fitness: {summary['mean']:.6f}") + print(f"Std deviation: {summary['std']:.6f}") + print(f"Coefficient of variation: {summary['std'] / summary['mean']:.4f}") + + # Generate box plot + print("\nGenerating box plot...") + results.plot_boxplot(show=False, save_path="example_stability_boxplot.png") + print("✓ Saved to example_stability_boxplot.png") + + # Generate histogram + print("Generating histogram...") + results.plot_histogram(show=False, save_path="example_stability_histogram.png") + print("✓ Saved to example_stability_histogram.png") + + plt.close("all") + + +def example_optimizer_comparison() -> None: + """Example 3: Compare stability of multiple optimizers.""" + print("\n" + "=" * 70) + print("Example 3: Compare Stability of Multiple Optimizers") + print("=" * 70) + + # Compare ParticleSwarm and GeneticAlgorithm + results_dict, _fig = compare_optimizers_stability( + optimizer_classes=[ParticleSwarm, GeneticAlgorithm], + func=sphere, + lower_bound=-10, + upper_bound=10, + dim=2, + max_iter=100, + n_runs=10, + show=False, + save_path="example_optimizer_comparison.png", + ) + + print("\nComparison Results:") + print("-" * 70) + for name, results in results_dict.items(): + summary = results.summary() + print(f"\n{name}:") + print(f" Mean: {summary['mean']:.6f} ± {summary['std']:.6f}") + print(f" Min: {summary['min']:.6f}") + print(f" Max: {summary['max']:.6f}") + + print("\n✓ Saved to example_optimizer_comparison.png") + plt.close("all") + + +def example_convergence_log_scale() -> None: + """Example 4: Convergence plot with log scale.""" + print("\n" + "=" * 70) + print("Example 4: Convergence Plot with Log Scale") + print("=" * 70) + + pso = ParticleSwarm( + func=rosenbrock, + lower_bound=-5, + upper_bound=5, + dim=2, + max_iter=200, + track_history=True, + population_size=50, + seed=42, + ) + + _best_solution, best_fitness = pso.search() + print(f"\nBest fitness: {best_fitness:.6f}") + + viz = Visualizer(pso) + + # Create side-by-side comparison + _fig, axes = plt.subplots(1, 2, figsize=(14, 5)) + + # Regular scale + viz.plot_convergence(show=False, ax=axes[0]) + axes[0].set_title("Convergence Curve - Linear Scale", fontsize=14) + + # Log scale + viz.plot_convergence(log_scale=True, show=False, ax=axes[1]) + axes[1].set_title("Convergence Curve - Log Scale", fontsize=14) + + plt.tight_layout() + plt.savefig("example_log_scale_comparison.png", dpi=300, bbox_inches="tight") + print("✓ Saved to example_log_scale_comparison.png") + + plt.close("all") + + +def example_custom_visualization() -> None: + """Example 5: Custom visualization with matplotlib integration.""" + print("\n" + "=" * 70) + print("Example 5: Custom Visualization with Matplotlib Integration") + print("=" * 70) + + # Run optimizer with history + pso = ParticleSwarm( + func=shifted_ackley, + lower_bound=-5, + upper_bound=5, + dim=2, + max_iter=100, + track_history=True, + population_size=30, + seed=42, + ) + pso.search() + + # Create custom multi-panel figure + fig = plt.figure(figsize=(16, 10)) + gs = fig.add_gridspec(3, 2, hspace=0.3, wspace=0.3) + + # Add visualizations to custom grid + viz = Visualizer(pso) + + ax1 = fig.add_subplot(gs[0, :]) + viz.plot_convergence(show=False, ax=ax1) + + ax2 = fig.add_subplot(gs[1, 0]) + viz.plot_trajectory(show=False, ax=ax2) + + ax3 = fig.add_subplot(gs[1, 1]) + viz.plot_average_fitness(show=False, ax=ax3) + + ax4 = fig.add_subplot(gs[2, :]) + viz.plot_convergence(log_scale=True, show=False, ax=ax4) + ax4.set_title("Convergence Curve (Log Scale)", fontsize=14) + + plt.suptitle( + "Custom Visualization Dashboard - Particle Swarm Optimization", + fontsize=16, + fontweight="bold", + ) + + plt.savefig("example_custom_dashboard.png", dpi=300, bbox_inches="tight") + print("✓ Saved to example_custom_dashboard.png") + + plt.close("all") + + +def main() -> None: + """Run all examples.""" + print("\n" + "=" * 70) + print("OPTIMIZATION VISUALIZATION EXAMPLES") + print("=" * 70) + print("\nThis script demonstrates the visualization capabilities") + print("of the useful-optimizer package.") + + # Set matplotlib to non-interactive backend + mpl.use("Agg") + + # Run all examples + example_basic_visualization() + example_stability_testing() + example_optimizer_comparison() + example_convergence_log_scale() + example_custom_visualization() + + print("\n" + "=" * 70) + print("ALL EXAMPLES COMPLETED!") + print("=" * 70) + print("\nGenerated files:") + print(" - example_convergence.png") + print(" - example_trajectory.png") + print(" - example_average_fitness.png") + print(" - example_all_plots.png") + print(" - example_stability_boxplot.png") + print(" - example_stability_histogram.png") + print(" - example_optimizer_comparison.png") + print(" - example_log_scale_comparison.png") + print(" - example_custom_dashboard.png") + print("\nFeel free to open and examine these visualizations!") + + +if __name__ == "__main__": + main() diff --git a/opt/abstract_optimizer.py b/opt/abstract_optimizer.py index 673ac33e..c612ddfb 100644 --- a/opt/abstract_optimizer.py +++ b/opt/abstract_optimizer.py @@ -24,6 +24,7 @@ class AbstractOptimizer(ABC): upper_bound (float): The upper bound of the search space. dim (int): The dimensionality of the search space. max_iter (int, optional): The maximum number of iterations for the optimization process. Defaults to 1000. + track_history (bool, optional): Whether to track optimization history for visualization. Defaults to False. Attributes: func (Callable): The objective function to be optimized. @@ -33,6 +34,8 @@ class AbstractOptimizer(ABC): max_iter (int): The maximum number of iterations for the optimization process. seed (int): The seed for the random number generator. population_size (int): The number of individuals in the population. + track_history (bool): Whether to track optimization history. + history (dict): Dictionary containing optimization history if track_history is True. Methods: @@ -52,6 +55,7 @@ def __init__( max_iter: int = 1000, seed: int | None = None, population_size: int = 100, + track_history: bool = False, ) -> None: """Initialize the optimizer.""" self.func = func @@ -64,6 +68,17 @@ def __init__( else: self.seed = seed self.population_size = population_size + self.track_history = track_history + self.history: dict[str, list] = ( + { + "best_fitness": [], + "best_solution": [], + "population_fitness": [], + "population": [], + } + if track_history + else {} + ) @abstractmethod def search(self) -> tuple[np.ndarray, float]: diff --git a/opt/swarm_intelligence/particle_swarm.py b/opt/swarm_intelligence/particle_swarm.py index 181792ff..8dbc0c31 100644 --- a/opt/swarm_intelligence/particle_swarm.py +++ b/opt/swarm_intelligence/particle_swarm.py @@ -80,6 +80,7 @@ def __init__( c2: float = 1.5, w: float = 0.5, seed: int | None = None, + track_history: bool = False, ) -> None: """Initialize the ParticleSwarm class. @@ -94,6 +95,7 @@ def __init__( c2 (float, optional): The social parameter (default: 1.5). w (float, optional): The inertia weight (default: 0.5). seed (int | None, optional): The seed for the random number generator (default: None). + track_history (bool, optional): Whether to track optimization history for visualization (default: False). """ super().__init__( func=func, @@ -103,6 +105,7 @@ def __init__( max_iter=max_iter, seed=seed, population_size=population_size, + track_history=track_history, ) self.c1 = c1 self.c2 = c2 @@ -129,6 +132,13 @@ def search(self) -> tuple[np.ndarray, float]: # Main loop for _ in range(self.max_iter): + # Track history if enabled + if self.track_history: + self.history["best_fitness"].append(float(best_fitness)) + self.history["best_solution"].append(best_position.copy()) + self.history["population_fitness"].append(fitness.copy()) + self.history["population"].append(population.copy()) + self.seed += 1 # Update velocity r1 = np.random.default_rng(self.seed + 1).random( @@ -158,6 +168,13 @@ def search(self) -> tuple[np.ndarray, float]: best_position = population[best_index] best_fitness = fitness[best_index] + # Track final state + if self.track_history: + self.history["best_fitness"].append(float(best_fitness)) + self.history["best_solution"].append(best_position.copy()) + self.history["population_fitness"].append(fitness.copy()) + self.history["population"].append(population.copy()) + return best_position, best_fitness diff --git a/opt/test/test_visualization.py b/opt/test/test_visualization.py new file mode 100644 index 00000000..dea256c7 --- /dev/null +++ b/opt/test/test_visualization.py @@ -0,0 +1,314 @@ +"""Unit tests for visualization module.""" + +from __future__ import annotations + +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +import pytest + +from opt.benchmark.functions import shifted_ackley +from opt.swarm_intelligence.particle_swarm import ParticleSwarm +from opt.visualization import StabilityResults +from opt.visualization import Visualizer +from opt.visualization import run_stability_test + + +# Use non-interactive backend for testing +matplotlib.use("Agg") + + +class TestVisualizer: + """Tests for the Visualizer class.""" + + @pytest.fixture + def optimizer_with_history(self): + """Create an optimizer with history tracking.""" + pso = ParticleSwarm( + func=shifted_ackley, + lower_bound=-5, + upper_bound=5, + dim=2, + max_iter=20, + track_history=True, + population_size=20, + seed=42, + ) + pso.search() + return pso + + @pytest.fixture + def optimizer_without_history(self): + """Create an optimizer without history tracking.""" + pso = ParticleSwarm( + func=shifted_ackley, + lower_bound=-5, + upper_bound=5, + dim=2, + max_iter=10, + track_history=False, + seed=42, + ) + pso.search() + return pso + + def test_visualizer_initialization(self, optimizer_with_history) -> None: + """Test that Visualizer initializes correctly with history.""" + viz = Visualizer(optimizer_with_history) + assert viz.optimizer == optimizer_with_history + assert viz.history == optimizer_with_history.history + assert len(viz.history["best_fitness"]) > 0 + + def test_visualizer_without_history_raises_error( + self, optimizer_without_history + ) -> None: + """Test that Visualizer raises error when history is not tracked.""" + with pytest.raises(ValueError, match="track_history=True"): + Visualizer(optimizer_without_history) + + def test_plot_convergence(self, optimizer_with_history) -> None: + """Test convergence plot generation.""" + viz = Visualizer(optimizer_with_history) + fig = viz.plot_convergence(show=False) + assert fig is not None + plt.close(fig) + + def test_plot_convergence_log_scale(self, optimizer_with_history) -> None: + """Test convergence plot with log scale.""" + viz = Visualizer(optimizer_with_history) + fig = viz.plot_convergence(log_scale=True, show=False) + assert fig is not None + plt.close(fig) + + def test_plot_trajectory(self, optimizer_with_history) -> None: + """Test trajectory plot generation for 2D problems.""" + viz = Visualizer(optimizer_with_history) + fig = viz.plot_trajectory(show=False) + assert fig is not None + plt.close(fig) + + def test_plot_trajectory_non_2d_raises_error(self) -> None: + """Test that trajectory plot raises error for non-2D problems.""" + from opt.benchmark.functions import sphere + + pso = ParticleSwarm( + func=sphere, + lower_bound=-5, + upper_bound=5, + dim=3, # 3D problem + max_iter=10, + track_history=True, + seed=42, + ) + pso.search() + viz = Visualizer(pso) + + with pytest.raises(ValueError, match="2D problems"): + viz.plot_trajectory(show=False) + + def test_plot_average_fitness(self, optimizer_with_history) -> None: + """Test average fitness plot generation.""" + viz = Visualizer(optimizer_with_history) + fig = viz.plot_average_fitness(show=False) + assert fig is not None + plt.close(fig) + + def test_plot_average_fitness_without_std(self, optimizer_with_history) -> None: + """Test average fitness plot without std deviation bands.""" + viz = Visualizer(optimizer_with_history) + fig = viz.plot_average_fitness(show_std=False, show=False) + assert fig is not None + plt.close(fig) + + def test_plot_all_2d(self, optimizer_with_history) -> None: + """Test plot_all for 2D problems.""" + viz = Visualizer(optimizer_with_history) + viz.plot_all(save_path="/tmp/test_plot_all_2d.png") + plt.close("all") + + def test_plot_all_3d(self) -> None: + """Test plot_all for non-2D problems.""" + from opt.benchmark.functions import sphere + + pso = ParticleSwarm( + func=sphere, + lower_bound=-5, + upper_bound=5, + dim=3, + max_iter=10, + track_history=True, + seed=42, + ) + pso.search() + viz = Visualizer(pso) + viz.plot_all(save_path="/tmp/test_plot_all_3d.png") + plt.close("all") + + +class TestStabilityResults: + """Tests for the StabilityResults class.""" + + @pytest.fixture + def sample_results(self): + """Create sample stability results.""" + solutions = [np.array([1.0, 0.5]), np.array([1.1, 0.6]), np.array([0.9, 0.4])] + fitness_values = [0.01, 0.02, 0.015] + seeds = [42, 123, 456] + return StabilityResults( + optimizer_name="ParticleSwarm", + function_name="shifted_ackley", + solutions=solutions, + fitness_values=fitness_values, + seeds=seeds, + ) + + def test_stability_results_initialization(self, sample_results) -> None: + """Test StabilityResults initialization.""" + assert sample_results.optimizer_name == "ParticleSwarm" + assert sample_results.function_name == "shifted_ackley" + assert len(sample_results.solutions) == 3 + assert len(sample_results.fitness_values) == 3 + assert len(sample_results.seeds) == 3 + + def test_summary(self, sample_results) -> None: + """Test summary statistics generation.""" + summary = sample_results.summary() + assert "mean" in summary + assert "std" in summary + assert "min" in summary + assert "max" in summary + assert "median" in summary + assert "q25" in summary + assert "q75" in summary + assert summary["min"] == 0.01 + assert summary["max"] == 0.02 + + def test_print_summary(self, sample_results, capsys) -> None: + """Test print_summary output.""" + sample_results.print_summary() + captured = capsys.readouterr() + assert "Stability Test Results" in captured.out + assert "ParticleSwarm" in captured.out + assert "shifted_ackley" in captured.out + + def test_plot_boxplot(self, sample_results) -> None: + """Test box plot generation.""" + fig = sample_results.plot_boxplot(show=False) + assert fig is not None + plt.close(fig) + + def test_plot_histogram(self, sample_results) -> None: + """Test histogram generation.""" + fig = sample_results.plot_histogram(show=False) + assert fig is not None + plt.close(fig) + + +class TestStabilityTesting: + """Tests for stability testing functions.""" + + def test_run_stability_test_with_seeds(self) -> None: + """Test stability test with specific seeds.""" + results = run_stability_test( + optimizer_class=ParticleSwarm, + func=shifted_ackley, + lower_bound=-5, + upper_bound=5, + dim=2, + max_iter=10, + seeds=[42, 123], + verbose=False, + population_size=10, + ) + + assert isinstance(results, StabilityResults) + assert len(results.fitness_values) == 2 + assert len(results.solutions) == 2 + assert results.seeds == [42, 123] + + def test_run_stability_test_with_n_runs(self) -> None: + """Test stability test with n_runs parameter.""" + results = run_stability_test( + optimizer_class=ParticleSwarm, + func=shifted_ackley, + lower_bound=-5, + upper_bound=5, + dim=2, + max_iter=10, + n_runs=3, + verbose=False, + population_size=10, + ) + + assert isinstance(results, StabilityResults) + assert len(results.fitness_values) == 3 + assert len(results.solutions) == 3 + + def test_run_stability_test_reproducibility(self) -> None: + """Test that same seeds produce same results.""" + results1 = run_stability_test( + optimizer_class=ParticleSwarm, + func=shifted_ackley, + lower_bound=-5, + upper_bound=5, + dim=2, + max_iter=10, + seeds=[42], + verbose=False, + population_size=10, + ) + + results2 = run_stability_test( + optimizer_class=ParticleSwarm, + func=shifted_ackley, + lower_bound=-5, + upper_bound=5, + dim=2, + max_iter=10, + seeds=[42], + verbose=False, + population_size=10, + ) + + assert results1.fitness_values[0] == results2.fitness_values[0] + np.testing.assert_array_equal(results1.solutions[0], results2.solutions[0]) + + +class TestHistoryTracking: + """Tests for history tracking in optimizers.""" + + def test_particle_swarm_tracks_history(self) -> None: + """Test that ParticleSwarm correctly tracks history.""" + pso = ParticleSwarm( + func=shifted_ackley, + lower_bound=-5, + upper_bound=5, + dim=2, + max_iter=10, + track_history=True, + seed=42, + ) + + pso.search() + + assert pso.track_history is True + assert len(pso.history["best_fitness"]) == 11 # max_iter + 1 + assert len(pso.history["best_solution"]) == 11 + assert len(pso.history["population_fitness"]) == 11 + assert len(pso.history["population"]) == 11 + + def test_particle_swarm_no_history_by_default(self) -> None: + """Test that ParticleSwarm doesn't track history by default.""" + pso = ParticleSwarm( + func=shifted_ackley, + lower_bound=-5, + upper_bound=5, + dim=2, + max_iter=10, + seed=42, + ) + + pso.search() + + assert pso.track_history is False + assert pso.history == {} diff --git a/opt/visualization/__init__.py b/opt/visualization/__init__.py new file mode 100644 index 00000000..7603cc9e --- /dev/null +++ b/opt/visualization/__init__.py @@ -0,0 +1,62 @@ +"""Visualization and stability testing for optimization algorithms. + +This module provides comprehensive visualization and stability testing tools +for optimization algorithms. + +Classes: + - Visualizer: Visualize optimization algorithm behavior and performance + - StabilityResults: Store and analyze results from stability tests + +Functions: + - run_stability_test: Run an optimizer multiple times with different seeds + - compare_optimizers_stability: Compare stability of multiple optimizers + +Example: + >>> from opt.swarm_intelligence.particle_swarm import ParticleSwarm + >>> from opt.benchmark.functions import shifted_ackley + >>> from opt.visualization import Visualizer, run_stability_test + >>> + >>> # Single run with visualization + >>> pso = ParticleSwarm( + ... func=shifted_ackley, + ... lower_bound=-5, + ... upper_bound=5, + ... dim=2, + ... max_iter=100, + ... track_history=True, + ... ) + >>> best_solution, best_fitness = pso.search() + >>> + >>> viz = Visualizer(pso) + >>> viz.plot_convergence() + >>> viz.plot_trajectory() + >>> viz.plot_average_fitness() + >>> + >>> # Stability test with multiple seeds + >>> results = run_stability_test( + ... optimizer_class=ParticleSwarm, + ... func=shifted_ackley, + ... lower_bound=-5, + ... upper_bound=5, + ... dim=2, + ... max_iter=100, + ... seeds=[42, 123, 456, 789, 1011], + ... ) + >>> results.print_summary() + >>> results.plot_boxplot() +""" + +from __future__ import annotations + +from opt.visualization.stability import StabilityResults +from opt.visualization.stability import compare_optimizers_stability +from opt.visualization.stability import run_stability_test +from opt.visualization.visualizer import Visualizer + + +__all__ = [ + "StabilityResults", + "Visualizer", + "compare_optimizers_stability", + "run_stability_test", +] diff --git a/opt/visualization/stability.py b/opt/visualization/stability.py new file mode 100644 index 00000000..79dd640b --- /dev/null +++ b/opt/visualization/stability.py @@ -0,0 +1,451 @@ +"""Stability testing framework for optimization algorithms. + +This module provides tools for running optimization algorithms multiple times +with different random seeds to assess their stability and performance consistency. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + + +if TYPE_CHECKING: + from collections.abc import Callable + from collections.abc import Sequence + + from matplotlib.figure import Figure + + from opt.abstract_optimizer import AbstractOptimizer + + +class StabilityResults: + """Results from stability testing of an optimization algorithm. + + This class stores and analyzes results from multiple runs of an optimizer + with different random seeds. + + Args: + optimizer_name (str): Name of the optimizer class. + function_name (str): Name of the objective function. + solutions (list[np.ndarray]): List of best solutions from each run. + fitness_values (list[float]): List of best fitness values from each run. + seeds (list[int]): List of random seeds used for each run. + + Attributes: + optimizer_name (str): Name of the optimizer class. + function_name (str): Name of the objective function. + solutions (list[np.ndarray]): List of best solutions from each run. + fitness_values (np.ndarray): Array of best fitness values from each run. + seeds (list[int]): List of random seeds used for each run. + """ + + def __init__( + self, + optimizer_name: str, + function_name: str, + solutions: list[np.ndarray], + fitness_values: list[float], + seeds: list[int], + ) -> None: + """Initialize StabilityResults.""" + self.optimizer_name = optimizer_name + self.function_name = function_name + self.solutions = solutions + self.fitness_values = np.array(fitness_values) + self.seeds = seeds + + def summary(self) -> dict[str, float]: + """Generate statistical summary of the results. + + Returns: + dict[str, float]: Dictionary containing mean, std, min, max, and median fitness values. + + Example: + >>> results.summary() + {'mean': 0.123, 'std': 0.045, 'min': 0.001, 'max': 0.234, 'median': 0.112} + """ + return { + "mean": float(np.mean(self.fitness_values)), + "std": float(np.std(self.fitness_values)), + "min": float(np.min(self.fitness_values)), + "max": float(np.max(self.fitness_values)), + "median": float(np.median(self.fitness_values)), + "q25": float(np.percentile(self.fitness_values, 25)), + "q75": float(np.percentile(self.fitness_values, 75)), + } + + def print_summary(self) -> None: + """Print a formatted summary of the results. + + Example: + >>> results.print_summary() + Stability Test Results for ParticleSwarm on shifted_ackley + ============================================================ + Number of runs: 10 + ... + """ + stats = self.summary() + print( + f"\nStability Test Results for {self.optimizer_name} on {self.function_name}" + ) + print("=" * 60) + print(f"Number of runs: {len(self.fitness_values)}") + print(f"Mean fitness: {stats['mean']:.6f}") + print(f"Std deviation: {stats['std']:.6f}") + print(f"Min fitness: {stats['min']:.6f}") + print(f"Max fitness: {stats['max']:.6f}") + print(f"Median fitness: {stats['median']:.6f}") + print(f"Q25 fitness: {stats['q25']:.6f}") + print(f"Q75 fitness: {stats['q75']:.6f}") + if stats["mean"] != 0: + cv = stats["std"] / stats["mean"] + print(f"CV (std/mean): {cv:.4f}") + else: + print("CV: N/A") + print("=" * 60) + + def plot_boxplot(self, show: bool = True, save_path: str | None = None) -> Figure: + """Generate box plot of fitness values across runs. + + Args: + show (bool, optional): Whether to display the plot. Defaults to True. + save_path (str | None, optional): Path to save the figure. If None, doesn't save. + + Returns: + Figure: The matplotlib figure object. + + Example: + >>> results.plot_boxplot(save_path="stability_boxplot.png") + """ + import matplotlib.pyplot as plt + + fig, ax = plt.subplots(figsize=(8, 6)) + + # Create box plot + bp = ax.boxplot( + [self.fitness_values], + tick_labels=[self.optimizer_name], + patch_artist=True, + widths=0.6, + ) + + # Customize box plot colors + for patch in bp["boxes"]: + patch.set_facecolor("lightblue") + patch.set_alpha(0.7) + + for whisker in bp["whiskers"]: + whisker.set(linewidth=1.5) + + for median in bp["medians"]: + median.set(color="red", linewidth=2) + + # Add individual points + y_points = self.fitness_values + rng = np.random.default_rng(42) + x_points = rng.normal(1, 0.04, size=len(y_points)) + ax.scatter(x_points, y_points, alpha=0.5, color="darkblue", s=30, zorder=3) + + ax.set_ylabel("Best Fitness Value", fontsize=12) + ax.set_title( + f"Stability Analysis: {self.optimizer_name} on {self.function_name}\n" + f"(n={len(self.fitness_values)} runs)", + fontsize=13, + ) + ax.grid(True, alpha=0.3, axis="y") + + # Add statistics text + stats = self.summary() + stats_text = ( + f"Mean: {stats['mean']:.4f}\n" + f"Std: {stats['std']:.4f}\n" + f"Min: {stats['min']:.4f}\n" + f"Max: {stats['max']:.4f}" + ) + ax.text( + 0.02, + 0.98, + stats_text, + transform=ax.transAxes, + fontsize=10, + verticalalignment="top", + bbox={"boxstyle": "round", "facecolor": "wheat", "alpha": 0.5}, + ) + + plt.tight_layout() + + if save_path: + plt.savefig(save_path, dpi=300, bbox_inches="tight") + print(f"Box plot saved to {save_path}") + + if show: + plt.show() + + return fig + + def plot_histogram( + self, bins: int = 20, show: bool = True, save_path: str | None = None + ) -> Figure: + """Generate histogram of fitness values across runs. + + Args: + bins (int, optional): Number of bins for histogram. Defaults to 20. + show (bool, optional): Whether to display the plot. Defaults to True. + save_path (str | None, optional): Path to save the figure. If None, doesn't save. + + Returns: + Figure: The matplotlib figure object. + + Example: + >>> results.plot_histogram(bins=30) + """ + import matplotlib.pyplot as plt + + fig, ax = plt.subplots(figsize=(10, 6)) + + # Create histogram + ax.hist( + self.fitness_values, + bins=bins, + color="skyblue", + edgecolor="black", + alpha=0.7, + ) + + # Add vertical lines for statistics + stats = self.summary() + ax.axvline( + stats["mean"], color="red", linestyle="--", linewidth=2, label="Mean" + ) + ax.axvline( + stats["median"], color="green", linestyle="--", linewidth=2, label="Median" + ) + + ax.set_xlabel("Best Fitness Value", fontsize=12) + ax.set_ylabel("Frequency", fontsize=12) + ax.set_title( + f"Fitness Distribution: {self.optimizer_name} on {self.function_name}\n" + f"(n={len(self.fitness_values)} runs)", + fontsize=13, + ) + ax.legend() + ax.grid(True, alpha=0.3, axis="y") + + plt.tight_layout() + + if save_path: + plt.savefig(save_path, dpi=300, bbox_inches="tight") + print(f"Histogram saved to {save_path}") + + if show: + plt.show() + + return fig + + +def run_stability_test( + optimizer_class: type[AbstractOptimizer], + func: Callable[[np.ndarray], float], + lower_bound: float, + upper_bound: float, + dim: int, + max_iter: int = 100, + seeds: Sequence[int] | None = None, + n_runs: int = 10, + verbose: bool = True, + **optimizer_kwargs, +) -> StabilityResults: + """Run stability test for an optimization algorithm. + + Runs the optimizer multiple times with different random seeds to assess + performance stability and consistency. + + Args: + optimizer_class (type[AbstractOptimizer]): The optimizer class to test. + func (Callable): The objective function to optimize. + lower_bound (float): Lower bound of the search space. + upper_bound (float): Upper bound of the search space. + dim (int): Dimensionality of the search space. + max_iter (int, optional): Maximum iterations per run. Defaults to 100. + seeds (Sequence[int] | None, optional): Specific seeds to use. If None, generates random seeds. + n_runs (int, optional): Number of runs if seeds not specified. Defaults to 10. + verbose (bool, optional): Whether to print progress. Defaults to True. + **optimizer_kwargs: Additional keyword arguments for the optimizer. + + Returns: + StabilityResults: Object containing results from all runs. + + Example: + >>> from opt.swarm_intelligence.particle_swarm import ParticleSwarm + >>> from opt.benchmark.functions import shifted_ackley + >>> from opt.visualization import run_stability_test + >>> results = run_stability_test( + ... optimizer_class=ParticleSwarm, + ... func=shifted_ackley, + ... lower_bound=-5, + ... upper_bound=5, + ... dim=2, + ... max_iter=100, + ... seeds=[42, 123, 456, 789, 1011], + ... ) + >>> results.print_summary() + >>> results.plot_boxplot() + """ + # Determine seeds to use + if seeds is None: + rng = np.random.default_rng(42) + test_seeds = rng.integers(0, 2**31, size=n_runs).tolist() + else: + test_seeds = list(seeds) + + if verbose: + print(f"\nRunning stability test for {optimizer_class.__name__}") + print(f"Function: {func.__name__}") + print(f"Number of runs: {len(test_seeds)}") + print(f"Max iterations per run: {max_iter}") + print("-" * 60) + + solutions = [] + fitness_values = [] + + for i, seed in enumerate(test_seeds): + if verbose: + print(f"Run {i + 1}/{len(test_seeds)} (seed={seed})...", end=" ") + + optimizer = optimizer_class( + func=func, + lower_bound=lower_bound, + upper_bound=upper_bound, + dim=dim, + max_iter=max_iter, + seed=seed, + **optimizer_kwargs, + ) + + solution, fitness = optimizer.search() + solutions.append(solution) + fitness_values.append(fitness) + + if verbose: + print(f"Fitness: {fitness:.6f}") + + results = StabilityResults( + optimizer_name=optimizer_class.__name__, + function_name=func.__name__, + solutions=solutions, + fitness_values=fitness_values, + seeds=test_seeds, + ) + + if verbose: + results.print_summary() + + return results + + +def compare_optimizers_stability( + optimizer_classes: list[type[AbstractOptimizer]], + func: Callable[[np.ndarray], float], + lower_bound: float, + upper_bound: float, + dim: int, + max_iter: int = 100, + n_runs: int = 10, + show: bool = True, + save_path: str | None = None, +) -> tuple[dict[str, StabilityResults], Figure]: + """Compare stability of multiple optimizers. + + Runs multiple optimizers on the same function and compares their stability + using box plots. + + Args: + optimizer_classes (list[type[AbstractOptimizer]]): List of optimizer classes to compare. + func (Callable): The objective function to optimize. + lower_bound (float): Lower bound of the search space. + upper_bound (float): Upper bound of the search space. + dim (int): Dimensionality of the search space. + max_iter (int, optional): Maximum iterations per run. Defaults to 100. + n_runs (int, optional): Number of runs per optimizer. Defaults to 10. + show (bool, optional): Whether to display the plot. Defaults to True. + save_path (str | None, optional): Path to save the figure. If None, doesn't save. + + Returns: + tuple[dict[str, StabilityResults], Figure]: Dictionary of results and comparison figure. + + Example: + >>> from opt.swarm_intelligence.particle_swarm import ParticleSwarm + >>> from opt.evolutionary.genetic_algorithm import GeneticAlgorithm + >>> results, fig = compare_optimizers_stability( + ... optimizer_classes=[ParticleSwarm, GeneticAlgorithm], + ... func=shifted_ackley, + ... lower_bound=-5, + ... upper_bound=5, + ... dim=2, + ... ) + """ + import matplotlib.pyplot as plt + + all_results = {} + + # Run stability tests for each optimizer + for optimizer_class in optimizer_classes: + results = run_stability_test( + optimizer_class=optimizer_class, + func=func, + lower_bound=lower_bound, + upper_bound=upper_bound, + dim=dim, + max_iter=max_iter, + n_runs=n_runs, + verbose=False, + ) + all_results[optimizer_class.__name__] = results + + # Create comparison box plot + fig, ax = plt.subplots(figsize=(max(10, len(optimizer_classes) * 2), 6)) + + data = [results.fitness_values for results in all_results.values()] + labels = list(all_results.keys()) + + bp = ax.boxplot(data, tick_labels=labels, patch_artist=True, widths=0.6) + + # Customize colors + colors = plt.cm.Set3(np.linspace(0, 1, len(optimizer_classes))) + # Python 3.10+ supports strict parameter, but we ensure equal lengths + assert len(bp["boxes"]) == len(colors) + for patch, color in zip(bp["boxes"], colors): + patch.set_facecolor(color) + patch.set_alpha(0.7) + + for median in bp["medians"]: + median.set(color="red", linewidth=2) + + # Add individual points + for i, (_name, results) in enumerate(all_results.items(), 1): + y_points = results.fitness_values + rng = np.random.default_rng(42) + x_points = rng.normal(i, 0.04, size=len(y_points)) + ax.scatter(x_points, y_points, alpha=0.5, s=30, zorder=3, color="darkblue") + + ax.set_ylabel("Best Fitness Value", fontsize=12) + ax.set_xlabel("Optimizer", fontsize=12) + ax.set_title( + f"Optimizer Stability Comparison on {func.__name__}\n(n={n_runs} runs each)", + fontsize=13, + ) + ax.grid(True, alpha=0.3, axis="y") + plt.xticks(rotation=45, ha="right") + + plt.tight_layout() + + if save_path: + plt.savefig(save_path, dpi=300, bbox_inches="tight") + print(f"Comparison plot saved to {save_path}") + + if show: + plt.show() + + return all_results, fig diff --git a/opt/visualization/visualizer.py b/opt/visualization/visualizer.py new file mode 100644 index 00000000..7dcdaf5b --- /dev/null +++ b/opt/visualization/visualizer.py @@ -0,0 +1,343 @@ +"""Visualization module for optimization algorithms. + +This module provides visualization capabilities for optimization algorithms, +including convergence curves, trajectory plots, and average fitness tracking. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + + +if TYPE_CHECKING: + from matplotlib.axes import Axes + from matplotlib.figure import Figure + + from opt.abstract_optimizer import AbstractOptimizer + + +class Visualizer: + """Visualizer for optimization algorithms. + + This class provides various visualization methods for optimization algorithms, + including convergence curves, trajectory plots, and population fitness tracking. + + Args: + optimizer (AbstractOptimizer): The optimizer instance to visualize. + Must have been run with track_history=True. + + Raises: + ValueError: If optimizer doesn't have history tracked. + + Example: + >>> from opt.swarm_intelligence.particle_swarm import ParticleSwarm + >>> from opt.benchmark.functions import shifted_ackley + >>> from opt.visualization import Visualizer + >>> pso = ParticleSwarm( + ... func=shifted_ackley, + ... lower_bound=-5, + ... upper_bound=5, + ... dim=2, + ... max_iter=100, + ... track_history=True, + ... ) + >>> best_solution, best_fitness = pso.search() + >>> viz = Visualizer(pso) + >>> viz.plot_convergence() + >>> viz.plot_trajectory() + """ + + def __init__(self, optimizer: AbstractOptimizer) -> None: + """Initialize the Visualizer. + + Args: + optimizer (AbstractOptimizer): The optimizer instance to visualize. + + Raises: + ValueError: If optimizer doesn't have history tracked. + """ + if not optimizer.track_history or not optimizer.history: + msg = ( + "Optimizer must be run with track_history=True to use visualization. " + "Re-run the optimizer with track_history=True." + ) + raise ValueError(msg) + + self.optimizer = optimizer + self.history = optimizer.history + + def plot_convergence( + self, log_scale: bool = False, show: bool = True, ax: Axes | None = None + ) -> Figure: + """Plot convergence curve showing best fitness over iterations. + + Args: + log_scale (bool, optional): Whether to use log scale for y-axis. Defaults to False. + show (bool, optional): Whether to display the plot. Defaults to True. + ax (Axes | None, optional): Matplotlib axes to plot on. If None, creates new figure. + + Returns: + Figure: The matplotlib figure object. + + Example: + >>> viz.plot_convergence(log_scale=True) + """ + import matplotlib.pyplot as plt + + if ax is None: + fig, ax = plt.subplots(figsize=(10, 6)) + else: + fig = ax.get_figure() + + iterations = range(len(self.history["best_fitness"])) + best_fitness = self.history["best_fitness"] + + ax.plot( + iterations, best_fitness, linewidth=2, color="blue", label="Best Fitness" + ) + ax.set_xlabel("Iteration", fontsize=12) + ax.set_ylabel("Best Fitness Value", fontsize=12) + ax.set_title( + f"Convergence Curve - {self.optimizer.__class__.__name__}", fontsize=14 + ) + ax.grid(True, alpha=0.3) + ax.legend() + + if log_scale: + ax.set_yscale("log") + + if show: + plt.tight_layout() + plt.show() + + return fig + + def plot_trajectory( + self, show: bool = True, ax: Axes | None = None, max_points: int = 1000 + ) -> Figure: + """Plot 2D trajectory of the best solution through the search space. + + This visualization shows how the best solution moves through the search space + over iterations. Only works for 2D problems. + + Args: + show (bool, optional): Whether to display the plot. Defaults to True. + ax (Axes | None, optional): Matplotlib axes to plot on. If None, creates new figure. + max_points (int, optional): Maximum number of points to plot. Defaults to 1000. + + Returns: + Figure: The matplotlib figure object. + + Raises: + ValueError: If optimizer dimensionality is not 2. + + Example: + >>> viz.plot_trajectory() + """ + import matplotlib.pyplot as plt + + if self.optimizer.dim != 2: + msg = "Trajectory plotting only works for 2D problems (dim=2)" + raise ValueError(msg) + + if ax is None: + fig, ax = plt.subplots(figsize=(10, 8)) + else: + fig = ax.get_figure() + + # Extract trajectory + best_solutions = np.array(self.history["best_solution"]) + + # Subsample if too many points + if len(best_solutions) > max_points: + indices = np.linspace(0, len(best_solutions) - 1, max_points, dtype=int) + best_solutions = best_solutions[indices] + + x_coords = best_solutions[:, 0] + y_coords = best_solutions[:, 1] + + # Plot trajectory with color gradient + scatter = ax.scatter( + x_coords, + y_coords, + c=range(len(x_coords)), + cmap="viridis", + s=50, + alpha=0.6, + edgecolors="black", + linewidth=0.5, + ) + + # Plot start and end points + ax.plot( + x_coords[0], + y_coords[0], + "go", + markersize=12, + label="Start", + markeredgecolor="black", + markeredgewidth=2, + ) + ax.plot( + x_coords[-1], + y_coords[-1], + "r*", + markersize=15, + label="End", + markeredgecolor="black", + markeredgewidth=2, + ) + + # Add colorbar + cbar = plt.colorbar(scatter, ax=ax) + cbar.set_label("Iteration", fontsize=11) + + ax.set_xlabel("Dimension 1", fontsize=12) + ax.set_ylabel("Dimension 2", fontsize=12) + ax.set_title( + f"Search Trajectory - {self.optimizer.__class__.__name__}", fontsize=14 + ) + ax.legend() + ax.grid(True, alpha=0.3) + + if show: + plt.tight_layout() + plt.show() + + return fig + + def plot_average_fitness( + self, show_std: bool = True, show: bool = True, ax: Axes | None = None + ) -> Figure: + """Plot average fitness of population over iterations with standard deviation. + + This visualization shows the mean fitness of the entire population over time, + with optional standard deviation bands to show population diversity. + + Args: + show_std (bool, optional): Whether to show standard deviation bands. Defaults to True. + show (bool, optional): Whether to display the plot. Defaults to True. + ax (Axes | None, optional): Matplotlib axes to plot on. If None, creates new figure. + + Returns: + Figure: The matplotlib figure object. + + Example: + >>> viz.plot_average_fitness(show_std=True) + """ + import matplotlib.pyplot as plt + + if ax is None: + fig, ax = plt.subplots(figsize=(10, 6)) + else: + fig = ax.get_figure() + + iterations = range(len(self.history["population_fitness"])) + population_fitness = self.history["population_fitness"] + + # Calculate mean and std + mean_fitness = [np.mean(f) for f in population_fitness] + std_fitness = [np.std(f) for f in population_fitness] + + # Plot mean fitness + ax.plot( + iterations, mean_fitness, linewidth=2, color="green", label="Mean Fitness" + ) + + # Plot best fitness for comparison + best_fitness = self.history["best_fitness"] + ax.plot( + iterations, + best_fitness, + linewidth=2, + color="blue", + label="Best Fitness", + linestyle="--", + ) + + # Add standard deviation bands + if show_std: + mean_arr = np.array(mean_fitness) + std_arr = np.array(std_fitness) + ax.fill_between( + iterations, + mean_arr - std_arr, + mean_arr + std_arr, + alpha=0.2, + color="green", + label="±1 Std Dev", + ) + + ax.set_xlabel("Iteration", fontsize=12) + ax.set_ylabel("Fitness Value", fontsize=12) + ax.set_title( + f"Population Fitness Over Time - {self.optimizer.__class__.__name__}", + fontsize=14, + ) + ax.grid(True, alpha=0.3) + ax.legend() + + if show: + plt.tight_layout() + plt.show() + + return fig + + def plot_all(self, save_path: str | None = None) -> None: + """Plot all available visualizations in a single figure. + + Creates a comprehensive visualization with convergence, trajectory (if 2D), + and average fitness plots. + + Args: + save_path (str | None, optional): Path to save the figure. If None, displays instead. + + Example: + >>> viz.plot_all(save_path="optimization_results.png") + """ + import matplotlib.pyplot as plt + + if self.optimizer.dim == 2: + _fig, axes = plt.subplots(2, 2, figsize=(16, 12)) + axes = axes.flatten() + + # Convergence plot + self.plot_convergence(show=False, ax=axes[0]) + + # Convergence plot (log scale) + self.plot_convergence(log_scale=True, show=False, ax=axes[1]) + axes[1].set_title( + f"Convergence Curve (Log Scale) - {self.optimizer.__class__.__name__}", + fontsize=14, + ) + + # Trajectory plot + self.plot_trajectory(show=False, ax=axes[2]) + + # Average fitness plot + self.plot_average_fitness(show=False, ax=axes[3]) + else: + _fig, axes = plt.subplots(1, 3, figsize=(18, 5)) + + # Convergence plot + self.plot_convergence(show=False, ax=axes[0]) + + # Convergence plot (log scale) + self.plot_convergence(log_scale=True, show=False, ax=axes[1]) + axes[1].set_title( + f"Convergence Curve (Log Scale) - {self.optimizer.__class__.__name__}", + fontsize=14, + ) + + # Average fitness plot + self.plot_average_fitness(show=False, ax=axes[2]) + + plt.tight_layout() + + if save_path: + plt.savefig(save_path, dpi=300, bbox_inches="tight") + print(f"Figure saved to {save_path}") + else: + plt.show() diff --git a/pyproject.toml b/pyproject.toml index 08d968ee..19021b46 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,6 +18,9 @@ dev = [ "pytest>=8.0.0", "pre-commit>=4.0.0", ] +visualization = [ + "matplotlib>=3.7.0", +] [build-system] requires = ["hatchling"] @@ -91,7 +94,10 @@ exclude = [".venv"] dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" [tool.ruff.lint.per-file-ignores] -"opt/test/*" = ["S101", "PLR2004", "PLC0415"] +"opt/test/*" = ["S101", "PLR2004", "PLC0415", "ANN001", "ANN201", "ICN001", "S108"] +"opt/visualization/*" = ["FBT001", "FBT002", "FBT003", "PLC0415", "PLR2004", "ANN003", "S101", "B905"] +"opt/abstract_optimizer.py" = ["FBT001", "FBT002"] +"opt/swarm_intelligence/particle_swarm.py" = ["FBT001", "FBT002"] [tool.ruff.lint.pydocstyle]