From 2656a42d4f6ed9186daeece9e42f9ec49e360071 Mon Sep 17 00:00:00 2001 From: morrisnein Date: Thu, 26 Oct 2023 02:15:09 +0300 Subject: [PATCH 01/17] add "history" as a parameter of tuner interface --- golem/core/tuning/hyperopt_tuner.py | 8 +++- golem/core/tuning/iopt_tuner.py | 4 +- golem/core/tuning/optuna_tuner.py | 9 ++-- golem/core/tuning/sequential.py | 9 ++-- golem/core/tuning/simultaneous.py | 2 +- golem/core/tuning/tuner_interface.py | 63 +++++++++++++++++++++------- 6 files changed, 68 insertions(+), 27 deletions(-) diff --git a/golem/core/tuning/hyperopt_tuner.py b/golem/core/tuning/hyperopt_tuner.py index 153216ba9..8aa5af24d 100644 --- a/golem/core/tuning/hyperopt_tuner.py +++ b/golem/core/tuning/hyperopt_tuner.py @@ -10,6 +10,7 @@ from golem.core.adapter import BaseOptimizationAdapter from golem.core.log import default_log from golem.core.optimisers.objective import ObjectiveFunction +from golem.core.optimisers.opt_history_objects.opt_history import OptHistory from golem.core.optimisers.timer import Timer from golem.core.tuning.search_space import SearchSpace, get_node_operation_parameter_label from golem.core.tuning.tuner_interface import BaseTuner @@ -31,6 +32,7 @@ class HyperoptTuner(BaseTuner, ABC): By default, ``deviation=0.05``, which means that tuned graph will be returned if it's metric will be at least 0.05% better than the initial. algo: algorithm for hyperparameters optimization with signature similar to :obj:`hyperopt.tse.suggest` + history: object to store tuning history if needed. """ def __init__(self, objective_evaluate: ObjectiveFunction, @@ -41,7 +43,8 @@ def __init__(self, objective_evaluate: ObjectiveFunction, timeout: timedelta = timedelta(minutes=5), n_jobs: int = -1, deviation: float = 0.05, - algo: Callable = tpe.suggest): + algo: Callable = tpe.suggest, + history: Optional[OptHistory] = None): early_stopping_rounds = early_stopping_rounds or max(100, int(np.sqrt(iterations) * 10)) super().__init__(objective_evaluate, search_space, @@ -50,7 +53,8 @@ def __init__(self, objective_evaluate: ObjectiveFunction, early_stopping_rounds, timeout, n_jobs, - deviation) + deviation, + history) self.early_stop_fn = no_progress_loss(iteration_stop_count=self.early_stopping_rounds) self.max_seconds = int(timeout.seconds) if timeout is not None else None diff --git a/golem/core/tuning/iopt_tuner.py b/golem/core/tuning/iopt_tuner.py index 021baf5f3..62a37842a 100644 --- a/golem/core/tuning/iopt_tuner.py +++ b/golem/core/tuning/iopt_tuner.py @@ -1,12 +1,12 @@ from dataclasses import dataclass, field -from typing import List, Dict, Generic, Tuple, Any, Optional +from typing import Any, Dict, Generic, List, Optional, Tuple import numpy as np from iOpt.method.listener import ConsoleFullOutputListener from iOpt.problem import Problem from iOpt.solver import Solver from iOpt.solver_parametrs import SolverParameters -from iOpt.trial import Point, FunctionValue +from iOpt.trial import FunctionValue, Point from golem.core.adapter import BaseOptimizationAdapter from golem.core.optimisers.graph import OptGraph diff --git a/golem/core/tuning/optuna_tuner.py b/golem/core/tuning/optuna_tuner.py index aabcb95d0..48b164464 100644 --- a/golem/core/tuning/optuna_tuner.py +++ b/golem/core/tuning/optuna_tuner.py @@ -10,6 +10,7 @@ from golem.core.adapter import BaseOptimizationAdapter from golem.core.optimisers.graph import OptGraph from golem.core.optimisers.objective import ObjectiveFunction +from golem.core.optimisers.opt_history_objects.opt_history import OptHistory from golem.core.tuning.search_space import SearchSpace, get_node_operation_parameter_label from golem.core.tuning.tuner_interface import BaseTuner, DomainGraphForTune @@ -23,7 +24,8 @@ def __init__(self, objective_evaluate: ObjectiveFunction, timeout: timedelta = timedelta(minutes=5), n_jobs: int = -1, deviation: float = 0.05, - objectives_number: int = 1): + objectives_number: int = 1, + history: Optional[OptHistory] = None): super().__init__(objective_evaluate, search_space, adapter, @@ -31,7 +33,8 @@ def __init__(self, objective_evaluate: ObjectiveFunction, early_stopping_rounds, timeout, n_jobs, - deviation) + deviation, + history) self.objectives_number = objectives_number self.study = None @@ -82,7 +85,7 @@ def tune(self, graph: DomainGraphForTune, show_progress: bool = True) -> \ def objective(self, trial: Trial, graph: OptGraph) -> Union[float, Sequence[float, ]]: new_parameters = self._get_parameters_from_trial(graph, trial) new_graph = BaseTuner.set_arg_graph(graph, new_parameters) - metric_value = self.get_metric_value(new_graph) + metric_value = self.evaluate_graph(new_graph) return metric_value def _get_parameters_from_trial(self, graph: OptGraph, trial: Trial) -> dict: diff --git a/golem/core/tuning/sequential.py b/golem/core/tuning/sequential.py index fd5924549..6cfe0cac0 100644 --- a/golem/core/tuning/sequential.py +++ b/golem/core/tuning/sequential.py @@ -7,6 +7,7 @@ from golem.core.adapter import BaseOptimizationAdapter from golem.core.optimisers.graph import OptGraph from golem.core.optimisers.objective import ObjectiveFunction +from golem.core.optimisers.opt_history_objects.opt_history import OptHistory from golem.core.tuning.hyperopt_tuner import HyperoptTuner, get_node_parameters_for_hyperopt from golem.core.tuning.search_space import SearchSpace from golem.core.tuning.tuner_interface import DomainGraphForTune @@ -26,7 +27,8 @@ def __init__(self, objective_evaluate: ObjectiveFunction, n_jobs: int = -1, deviation: float = 0.05, algo: Callable = tpe.suggest, - inverse_node_order: bool = False): + inverse_node_order: bool = False, + history: Optional[OptHistory] = None): super().__init__(objective_evaluate, search_space, adapter, @@ -34,7 +36,8 @@ def __init__(self, objective_evaluate: ObjectiveFunction, early_stopping_rounds, timeout, n_jobs, deviation, - algo) + algo, + history) self.inverse_node_order = inverse_node_order @@ -191,5 +194,5 @@ def _objective(self, node_params: dict, graph: OptGraph, node_id: int) -> float: # Set hyperparameters for node graph = self.set_arg_node(graph=graph, node_id=node_id, node_params=node_params) - metric_value = self.get_metric_value(graph=graph) + metric_value = self.evaluate_graph(graph=graph) return metric_value diff --git a/golem/core/tuning/simultaneous.py b/golem/core/tuning/simultaneous.py index 465dc81a8..2ca73b585 100644 --- a/golem/core/tuning/simultaneous.py +++ b/golem/core/tuning/simultaneous.py @@ -178,6 +178,6 @@ def _objective(self, parameters_dict: dict, graph: OptGraph, unchangeable_parame # Set hyperparameters for every node graph = self.set_arg_graph(graph, parameters_dict) - metric_value = self.get_metric_value(graph=graph) + metric_value = self.evaluate_graph(graph=graph) return metric_value diff --git a/golem/core/tuning/tuner_interface.py b/golem/core/tuning/tuner_interface.py index 0d9886578..13a24a712 100644 --- a/golem/core/tuning/tuner_interface.py +++ b/golem/core/tuning/tuner_interface.py @@ -1,7 +1,7 @@ from abc import abstractmethod from copy import deepcopy from datetime import timedelta -from typing import TypeVar, Generic, Optional, Union, Sequence +from typing import Generic, Optional, Sequence, TypeVar, Union import numpy as np @@ -10,9 +10,12 @@ from golem.core.constants import MAX_TUNING_METRIC_VALUE from golem.core.dag.graph_utils import graph_structure from golem.core.log import default_log -from golem.core.optimisers.fitness import SingleObjFitness, MultiObjFitness +from golem.core.optimisers.fitness import Fitness, MultiObjFitness, SingleObjFitness from golem.core.optimisers.graph import OptGraph from golem.core.optimisers.objective import ObjectiveEvaluate, ObjectiveFunction +from golem.core.optimisers.opt_history_objects.individual import Individual +from golem.core.optimisers.opt_history_objects.opt_history import OptHistory +from golem.core.optimisers.opt_history_objects.parent_operator import ParentOperator from golem.core.tuning.search_space import SearchSpace, convert_parameters from golem.core.utilities.data_structures import ensure_wrapped_in_sequence @@ -34,6 +37,7 @@ class BaseTuner(Generic[DomainGraphForTune]): deviation: required improvement (in percent) of a metric to return tuned graph. By default, ``deviation=0.05``, which means that tuned graph will be returned if it's metric will be at least 0.05% better than the initial. + history: object to store tuning history if needed. """ def __init__(self, objective_evaluate: ObjectiveFunction, @@ -43,8 +47,10 @@ def __init__(self, objective_evaluate: ObjectiveFunction, early_stopping_rounds: Optional[int] = None, timeout: timedelta = timedelta(minutes=5), n_jobs: int = -1, - deviation: float = 0.05): + deviation: float = 0.05, + history: Optional[OptHistory] = None): self.iterations = iterations + self.current_iteration = 0 self.adapter = adapter or IdentityAdapter() self.search_space = search_space self.n_jobs = n_jobs @@ -61,6 +67,7 @@ def __init__(self, objective_evaluate: ObjectiveFunction, self.init_graph = None self.init_metric = None self.obtained_metric = None + self.history = history self.log = default_log(self) @abstractmethod @@ -79,7 +86,7 @@ def tune(self, graph: DomainGraphForTune) -> Union[DomainGraphForTune, Sequence[ def init_check(self, graph: OptGraph) -> None: """ - Method get metric on validation set before start optimization + Method gets metric on validation set before starting optimization Args: graph: graph to calculate objective @@ -89,7 +96,7 @@ def init_check(self, graph: OptGraph) -> None: # Train graph self.init_graph = deepcopy(graph) - self.init_metric = self.get_metric_value(graph=self.init_graph) + self.init_metric = self.evaluate_graph(graph=self.init_graph, label='tuning_start') self.log.message(f'Initial graph: {graph_structure(self.init_graph)} \n' f'Initial metric: ' f'{list(map(lambda x: round(abs(x), 3), ensure_wrapped_in_sequence(self.init_metric)))}') @@ -111,7 +118,7 @@ def final_check(self, tuned_graphs: Union[OptGraph, Sequence[OptGraph]], multi_o return self._single_obj_final_check(tuned_graphs) def _single_obj_final_check(self, tuned_graph: OptGraph): - self.obtained_metric = self.get_metric_value(graph=tuned_graph) + self.obtained_metric = self.evaluate_graph(graph=tuned_graph, label='tuning_result') prefix_tuned_phrase = 'Return tuned graph due to the fact that obtained metric' prefix_init_phrase = 'Return init graph due to the fact that obtained metric' @@ -148,7 +155,7 @@ def _multi_obj_final_check(self, tuned_graphs: Sequence[OptGraph]) -> Sequence[O self.obtained_metric = [] final_graphs = [] for tuned_graph in tuned_graphs: - obtained_metric = self.get_metric_value(graph=tuned_graph) + obtained_metric = self.evaluate_graph(graph=tuned_graph, label='tuning_result') for e, value in enumerate(obtained_metric): if np.isclose(value, self._default_metric_value): obtained_metric[e] = None @@ -165,30 +172,35 @@ def _multi_obj_final_check(self, tuned_graphs: Sequence[OptGraph]) -> Sequence[O final_graphs = self.init_graph return final_graphs - def get_metric_value(self, graph: OptGraph) -> Union[float, Sequence[float]]: + def evaluate_graph(self, graph: OptGraph, label: Optional[str] = None) -> Union[float, Sequence[float]]: """ - Method calculates metric for algorithm validation + Method calculates metric for algorithm validation. + Also, responsible for saving of tuning history. Args: graph: Graph to evaluate + label: Label for tuning history. Returns: value of loss function """ graph_fitness = self.objective_evaluate(graph) + self._add_to_history(graph, graph_fitness, label) + if isinstance(graph_fitness, SingleObjFitness): metric_value = graph_fitness.value if not graph_fitness.valid: - return self._default_metric_value - return metric_value + metric_value = self._default_metric_value elif isinstance(graph_fitness, MultiObjFitness): - metric_values = graph_fitness.values - for e, value in enumerate(metric_values): - if value is None: - metric_values[e] = self._default_metric_value - return metric_values + metric_value = graph_fitness.values + for e, value in enumerate(metric_value): + metric_value[e] = self._default_metric_value if value is None else value + else: + raise ValueError(f'Objective evaluation must be a Fitness instance, not {graph_fitness}.') + + return metric_value @staticmethod def set_arg_graph(graph: OptGraph, parameters: dict) -> OptGraph: @@ -235,3 +247,22 @@ def set_arg_node(graph: OptGraph, node_id: int, node_params: dict) -> OptGraph: def _stop_tuning_with_message(self, message: str): self.log.message(message) self.obtained_metric = self.init_metric + + def _add_to_history(self, graph: OptGraph, fitness: Fitness, label: Optional[str]): + if not self.history: + return + history = self.history + if history.generations: + parent_individuals = history.generations[-1] + else: + parent_individuals = [] + tuner_name = self.__class__.__name__ + parent_operator = ParentOperator(type_='tuning', operators=[tuner_name], + parent_individuals=parent_individuals) + individual = Individual(graph, parent_operator=parent_operator, fitness=fitness) + if label is None: + label = f'tuning_iteration_{self.current_iteration}' + history.add_to_history(individuals=[individual], + generation_label=label, + generation_metadata=dict(tuner=tuner_name)) + self.current_iteration += 1 From d2eb0f228fbff2fe46582de92e3351e7b7fab764 Mon Sep 17 00:00:00 2001 From: morrisnein Date: Thu, 26 Oct 2023 02:17:42 +0300 Subject: [PATCH 02/17] rename fields of history to avoid ambiguity; test history after tuning --- examples/molecule_search/experiment.py | 4 +- .../experiment_setup.py | 2 +- experiments/experiment_analyzer.py | 2 +- experiments/experiment_launcher.py | 2 +- .../optimisers/adaptive/experience_buffer.py | 2 +- .../optimisers/meta/surrogate_optimizer.py | 2 +- .../opt_history_objects/opt_history.py | 41 ++++----- .../core/optimisers/populational_optimizer.py | 4 +- golem/core/optimisers/random/random_search.py | 4 +- .../coders/opt_history_serialization.py | 12 ++- .../opt_history/graphs_interactive.py | 2 +- golem/visualisation/opt_viz_extra.py | 2 +- test/integration/test_quality_improvement.py | 2 +- .../unit/optimizers/test_composing_history.py | 89 ++++++++++++++++--- .../serialization/test_external_serialize.py | 4 +- 15 files changed, 118 insertions(+), 56 deletions(-) diff --git a/examples/molecule_search/experiment.py b/examples/molecule_search/experiment.py index 79cea2bbe..d1ef5d433 100644 --- a/examples/molecule_search/experiment.py +++ b/examples/molecule_search/experiment.py @@ -109,7 +109,7 @@ def visualize_results(molecules: Iterable[MolGraph], # Plot pareto front (if multi-objective) if objective.is_multi_objective: - visualise_pareto(history.archive_history[-1], + visualise_pareto(history.evolution_best_archive[-1], objectives_names=objective.metric_names[:2], folder=str(save_path)) @@ -193,7 +193,7 @@ def run_experiment(optimizer_setup: Callable, result_dir = Path('results') / exp_name result_dir.mkdir(parents=True, exist_ok=True) history.save(result_dir / f'history_trial_{trial}.json') - trial_results.extend(history.final_choices) + trial_results.extend(history.evolution_results) trial_histories.append(history) # Compute mean & std for metrics of trials diff --git a/examples/synthetic_graph_evolution/experiment_setup.py b/examples/synthetic_graph_evolution/experiment_setup.py index 1b5b48aab..972a116f1 100644 --- a/examples/synthetic_graph_evolution/experiment_setup.py +++ b/examples/synthetic_graph_evolution/experiment_setup.py @@ -66,7 +66,7 @@ def run_experiments(optimizer_setup: Callable, found_graph = found_graphs[0] if isinstance(found_graphs, Sequence) else found_graphs history = optimizer.history - trial_results.extend(history.final_choices) + trial_results.extend(history.evolution_results) found_nx_graph = BaseNetworkxAdapter().restore(found_graph) duration = datetime.now() - start_time diff --git a/experiments/experiment_analyzer.py b/experiments/experiment_analyzer.py index 5d0a2310e..82c1a0c4f 100644 --- a/experiments/experiment_analyzer.py +++ b/experiments/experiment_analyzer.py @@ -104,7 +104,7 @@ def _analyze_convergence(history: OptHistory, metrics_num: int) -> List[float]: # find best final metric for each objective best_fitness_per_objective = [np.inf] * metrics_num for i in range(metrics_num): - for ind in history.final_choices.data: + for ind in history.evolution_results.data: if ind.fitness.values[i] < best_fitness_per_objective[i]: best_fitness_per_objective[i] = ind.fitness.values[i] diff --git a/experiments/experiment_launcher.py b/experiments/experiment_launcher.py index 5b8bcfc98..a42f6f756 100644 --- a/experiments/experiment_launcher.py +++ b/experiments/experiment_launcher.py @@ -133,7 +133,7 @@ def launch(self, optimizer_setup: Callable, **kwargs): found_graph = found_graphs[0] if isinstance(found_graphs, Sequence) else found_graphs history = optimizer.history - trial_results.extend(history.final_choices) + trial_results.extend(history.evolution_results) found_nx_graph = BaseNetworkxAdapter().restore(found_graph) duration = datetime.now() - start_time diff --git a/golem/core/optimisers/adaptive/experience_buffer.py b/golem/core/optimisers/adaptive/experience_buffer.py index 6b4852dd1..8e61dd4f0 100644 --- a/golem/core/optimisers/adaptive/experience_buffer.py +++ b/golem/core/optimisers/adaptive/experience_buffer.py @@ -57,7 +57,7 @@ def unroll_trajectories(history: OptHistory) -> List[GraphTrajectory]: """Iterates through history and find continuous sequences of applied operator actions.""" trajectories = [] seen_uids = set() - for terminal_individual in history.final_choices: + for terminal_individual in history.evolution_results: trajectory = [] next_ind = terminal_individual while True: diff --git a/golem/core/optimisers/meta/surrogate_optimizer.py b/golem/core/optimisers/meta/surrogate_optimizer.py index 0b1b40dc2..dfe9a7e33 100644 --- a/golem/core/optimisers/meta/surrogate_optimizer.py +++ b/golem/core/optimisers/meta/surrogate_optimizer.py @@ -54,5 +54,5 @@ def optimise(self, objective: ObjectiveFunction) -> Sequence[OptGraph]: break # Adding of new population to history self._update_population(new_population) - self._update_population(self.best_individuals, 'final_choices') + self._update_population(self.best_individuals, 'evolution_results') return [ind.graph for ind in self.best_individuals] diff --git a/golem/core/optimisers/opt_history_objects/opt_history.py b/golem/core/optimisers/opt_history_objects/opt_history.py index b14cd551e..99f6a59c2 100644 --- a/golem/core/optimisers/opt_history_objects/opt_history.py +++ b/golem/core/optimisers/opt_history_objects/opt_history.py @@ -6,18 +6,16 @@ import os import shutil from pathlib import Path -from typing import Any, Dict, List, Optional, Sequence, Union, TYPE_CHECKING +from typing import Any, Dict, List, Optional, Sequence, TYPE_CHECKING, Union from golem.core.log import default_log from golem.core.optimisers.objective.objective import ObjectiveInfo from golem.core.optimisers.opt_history_objects.generation import Generation - from golem.core.paths import default_data_dir from golem.serializers.serializer import default_load, default_save from golem.visualisation.opt_viz import OptHistoryVisualizer if TYPE_CHECKING: - from golem.core.dag.graph import Graph from golem.core.optimisers.opt_history_objects.individual import Individual @@ -36,8 +34,7 @@ def __init__(self, default_save_dir: Optional[os.PathLike] = None): self._objective = objective or ObjectiveInfo() self._generations: List[Generation] = [] - self.archive_history: List[List[Individual]] = [] - self._tuning_result: Optional[Graph] = None + self.evolution_best_archive: List[List[Individual]] = [] # init default save directory if default_save_dir: @@ -61,8 +58,8 @@ def add_to_history(self, individuals: Sequence[Individual], generation_label: Op generation = Generation(individuals, self.generations_count, generation_label, generation_metadata) self.generations.append(generation) - def add_to_archive_history(self, individuals: Sequence[Individual]): - self.archive_history.append(list(individuals)) + def add_to_evolution_best_archive(self, individuals: Sequence[Individual]): + self.evolution_best_archive.append(list(individuals)) def to_csv(self, save_dir: Optional[os.PathLike] = None, file: os.PathLike = 'history.csv'): save_dir = save_dir or self._default_save_dir @@ -219,27 +216,24 @@ def initial_assumptions(self) -> Optional[Generation]: return gen @property - def final_choices(self) -> Optional[Generation]: + def generations_count(self) -> int: + return len(self.generations) + + @property + def evolution_results(self) -> Optional[Generation]: if not self.generations: return None for gen in reversed(self.generations): - if gen.label == 'final_choices': + if gen.label == 'evolution_results': return gen @property - def generations_count(self) -> int: - return len(self.generations) - - @property - def tuning_result(self): - if hasattr(self, '_tuning_result'): - return self._tuning_result - else: + def tuning_result(self) -> Optional[Generation]: + if not self.generations: return None - - @tuning_result.setter - def tuning_result(self, val): - self._tuning_result = val + for gen in reversed(self.generations): + if gen.label == 'tuning_result': + return gen @property def generations(self): @@ -270,8 +264,7 @@ def lighten_history(history: OptHistory) -> OptHistory: without excessive memory usage. """ light_history = OptHistory() light_history._generations = \ - [Generation(iterable=gen, generation_num=i) for i, gen in enumerate(history.archive_history)] - light_history.archive_history = history.archive_history + [Generation(iterable=gen, generation_num=i) for i, gen in enumerate(history.evolution_best_archive)] + light_history.evolution_best_archive = history.evolution_best_archive light_history._objective = history.objective - light_history._tuning_result = history.tuning_result return light_history diff --git a/golem/core/optimisers/populational_optimizer.py b/golem/core/optimisers/populational_optimizer.py index 6c73f1ed7..c65c86ba5 100644 --- a/golem/core/optimisers/populational_optimizer.py +++ b/golem/core/optimisers/populational_optimizer.py @@ -105,7 +105,7 @@ def optimise(self, objective: ObjectiveFunction) -> Sequence[Graph]: # Adding of new population to history self._update_population(new_population) pbar.close() - self._update_population(self.best_individuals, 'final_choices') + self._update_population(self.best_individuals, 'evolution_results') return [ind.graph for ind in self.best_individuals] @property @@ -146,7 +146,7 @@ def _update_population(self, next_population: PopulationT, label: Optional[str] def _log_to_history(self, population: PopulationT, label: Optional[str] = None, metadata: Optional[Dict[str, Any]] = None): self.history.add_to_history(population, label, metadata) - self.history.add_to_archive_history(self.generations.best_individuals) + self.history.add_to_evolution_best_archive(self.generations.best_individuals) if self.requirements.history_dir: self.history.save_current_results(self.requirements.history_dir) diff --git a/golem/core/optimisers/random/random_search.py b/golem/core/optimisers/random/random_search.py index 0a7e7e501..da86172ae 100644 --- a/golem/core/optimisers/random/random_search.py +++ b/golem/core/optimisers/random/random_search.py @@ -53,7 +53,7 @@ def optimise(self, objective: ObjectiveFunction) -> Sequence[OptGraph]: self.current_iteration_num += 1 self._update_best_individual(new_individual) pbar.update() - self._update_best_individual(self.best_individual, 'final_choices') + self._update_best_individual(self.best_individual, 'evolution_results') pbar.close() return [self.best_individual.graph] @@ -68,7 +68,7 @@ def _update_best_individual(self, new_individual: Individual, label: Optional[st f'Best individuals fitness {str(self.generations)}') self.history.add_to_history([new_individual], label) - self.history.add_to_archive_history(self.generations.best_individuals) + self.history.add_to_evolution_best_archive(self.generations.best_individuals) def _eval_initial_individual(self, evaluator: EvaluationOperator) -> Individual: init_ind = Individual(choice(self.initial_graphs)) if self.initial_graphs else self._generate_new_individual() diff --git a/golem/serializers/coders/opt_history_serialization.py b/golem/serializers/coders/opt_history_serialization.py index 8e12db2cb..cca665643 100644 --- a/golem/serializers/coders/opt_history_serialization.py +++ b/golem/serializers/coders/opt_history_serialization.py @@ -46,7 +46,7 @@ def opt_history_to_json(obj: OptHistory) -> Dict[str, Any]: serialized = any_to_json(obj) serialized['individuals_pool'] = _flatten_generations_list(serialized['_generations']) serialized['_generations'] = _generations_list_to_uids(serialized['_generations']) - serialized['archive_history'] = _archive_to_uids(serialized['archive_history']) + serialized['evolution_best_archive'] = _archive_to_uids(serialized['evolution_best_archive']) return serialized @@ -103,14 +103,18 @@ def opt_history_from_json(cls: Type[OptHistory], json_obj: Dict[str, Any]) -> Op if 'individuals' in json_obj: json_obj['_generations'] = json_obj.pop('individuals') + # Renamed since #... + if 'archive_history' in json_obj: + json_obj['evolution_best_archive'] = json_obj.pop('archive_history') + history = any_from_json(cls, json_obj) # Read all individuals from history. individuals_pool = history.individuals_pool uid_to_individual_map = {ind.uid: ind for ind in individuals_pool} - # The attributes `individuals` and `archive_history` at the moment contain uid strings that must be converted - # to `Individual` instances. + # The attributes `individuals` and `evolution_best_archive` at the moment contain uid strings that must + # be converted to `Individual` instances. _deserialize_generations_list(history.generations, uid_to_individual_map) - _deserialize_generations_list(history.archive_history, uid_to_individual_map) + _deserialize_generations_list(history.evolution_best_archive, uid_to_individual_map) # Process histories with zero generations. if len(history.generations) > 0: # Process older histories to wrap generations into the new class. diff --git a/golem/visualisation/opt_history/graphs_interactive.py b/golem/visualisation/opt_history/graphs_interactive.py index 8e2b62ccb..7594f74f3 100644 --- a/golem/visualisation/opt_history/graphs_interactive.py +++ b/golem/visualisation/opt_history/graphs_interactive.py @@ -37,7 +37,7 @@ def visualize(self, save_path: Optional[Union[os.PathLike, str]] = None, dpi: Op x_template = 'best individual #{}' best_individuals = {i: ind - for i, ind in enumerate(self.history.archive_history[-1])} + for i, ind in enumerate(self.history.evolution_best_archive[-1])} class InteractivePlot: temp_path = Path(default_data_dir(), 'current_graph.png') diff --git a/golem/visualisation/opt_viz_extra.py b/golem/visualisation/opt_viz_extra.py index 2b71606ca..2195d5a20 100644 --- a/golem/visualisation/opt_viz_extra.py +++ b/golem/visualisation/opt_viz_extra.py @@ -46,7 +46,7 @@ def pareto_gif_create(self, objectives_numbers: Tuple[int, int] = (0, 1), objectives_names: Tuple[str] = ('ROC-AUC', 'Complexity')): files = [] - pareto_fronts = self.history.archive_history + pareto_fronts = self.history.evolution_best_archive individuals = self.history.generations array_for_analysis = individuals if individuals else pareto_fronts all_objectives = extract_objectives(array_for_analysis, objectives_numbers) diff --git a/test/integration/test_quality_improvement.py b/test/integration/test_quality_improvement.py index f34d64a3e..42cc8da9f 100644 --- a/test/integration/test_quality_improvement.py +++ b/test/integration/test_quality_improvement.py @@ -46,7 +46,7 @@ def test_multiobjective_improvement(optimizer_cls, run_fun): def check_improvement(history: OptHistory): first_pop = history.generations[1] - pareto_front = history.archive_history[-1] + pareto_front = history.evolution_best_archive[-1] first_pop_metrics = get_mean_metrics(first_pop) pareto_front_metrics = get_mean_metrics(pareto_front) diff --git a/test/unit/optimizers/test_composing_history.py b/test/unit/optimizers/test_composing_history.py index d90642ab5..18f56802c 100644 --- a/test/unit/optimizers/test_composing_history.py +++ b/test/unit/optimizers/test_composing_history.py @@ -5,6 +5,7 @@ import numpy as np import pytest +from hyperopt import hp from golem.core.optimisers.fitness.multi_objective_fitness import MultiObjFitness from golem.core.optimisers.genetic.evaluation import MultiprocessingDispatcher @@ -21,6 +22,8 @@ from golem.core.optimisers.optimization_parameters import GraphRequirements from golem.core.optimisers.optimizer import GraphGenerationParams from golem.core.paths import project_root +from golem.core.tuning.search_space import SearchSpace +from golem.core.tuning.simultaneous import SimultaneousTuner from golem.visualisation.opt_viz import PlotTypesEnum, OptHistoryVisualizer from golem.visualisation.opt_viz_extra import OptHistoryExtraVisualizer from test.unit.mocks.common_mocks import MockAdapter, MockDomainStructure, MockNode, MockObjectiveEvaluate @@ -61,11 +64,11 @@ def generate_history(request) -> OptHistory: new_pop.append(ind) history.add_to_history(new_pop) # since only n best individuals need to be added to archive history - history.add_to_archive_history([sorted(new_pop, key=lambda ind: ind.fitness.values[0], reverse=False)[0]]) + history.add_to_evolution_best_archive([sorted(new_pop, key=lambda ind: ind.fitness.values[0], reverse=False)[0]]) return history -def _test_individuals_in_history(history: OptHistory): +def test_individuals_in_history(history: OptHistory): uids = set() ids = set() for ind in itertools.chain(*history.generations): @@ -232,8 +235,57 @@ def test_all_historical_quality(generate_history): assert all_quality[0] == -0.9 and all_quality[4] == -1.4 and all_quality[5] == -1.3 and all_quality[10] == -1.2 +@pytest.fixture() +def search_space(): + params_per_operation = { + 'a': { + 'a1': { + 'hyperopt-dist': hp.uniformint, + 'sampling-scope': [2, 7], + 'type': 'discrete' + }, + 'a2': { + 'hyperopt-dist': hp.loguniform, + 'sampling-scope': [1e-3, 1], + 'type': 'continuous' + } + }, + 'b': { + 'b1': { + 'hyperopt-dist': hp.choice, + 'sampling-scope': [["first", "second", "third"]], + 'type': 'categorical' + }, + 'b2': { + 'hyperopt-dist': hp.uniform, + 'sampling-scope': [0.05, 1.0], + 'type': 'continuous' + }, + }, + 'e': { + 'e1': { + 'hyperopt-dist': hp.uniform, + 'sampling-scope': [0.05, 1.0], + 'type': 'continuous' + }, + 'e2': { + 'hyperopt-dist': hp.uniform, + 'sampling-scope': [0.05, 1.0], + 'type': 'continuous' + } + }, + 'f': { + 'f': { + 'hyperopt-dist': hp.uniform, + 'sampling-scope': [1e-2, 10.0], + 'type': 'continuous' + } + }} + return SearchSpace(params_per_operation) + + @pytest.mark.parametrize('n_jobs', [1, 2]) -def test_newly_generated_history(n_jobs: int): +def test_newly_generated_history(n_jobs: int, search_space): num_of_gens = 5 objective = Objective({'random_metric': RandomMetric.get_value}) init_graphs = [graph_first(), graph_second(), graph_third(), graph_fourth(), graph_fifth()] @@ -245,19 +297,32 @@ def test_newly_generated_history(n_jobs: int): opt.optimise(obj_eval) history = opt.history + tuning_iterations = 2 + tuner = SimultaneousTuner(obj_eval, search_space, MockAdapter(), iterations=tuning_iterations, n_jobs=n_jobs, + history=history) + tuner.tune(history.evolution_results[0].graph) + + # initial_assumptions=1 + num_of_gens=5 + evolution_results=1 + tuning_start=1 + + # tuning_iterations=2 + tuning_result=1 -> num_of_gens + tuning_iterations + 4 + expected_gen_num = num_of_gens + tuning_iterations + 4 + + # initial_assumptions=1 + num_of_gens=5 + evolution_results=1 -> num_of_gens + 2 + expected_evolution_gen_num = num_of_gens + 2 + assert history is not None - assert len(history.generations) == num_of_gens + 2 # initial_assumptions + num_of_gens + final_choices - assert len(history.archive_history) == num_of_gens + 2 # initial_assumptions + num_of_gens + final_choices + assert len(history.generations) == expected_gen_num + assert len(history.evolution_best_archive) == expected_evolution_gen_num assert len(history.initial_assumptions) == 5 - assert len(history.final_choices) == 1 + assert len(history.evolution_results) == 1 + assert len(history.tuning_result) == 1 assert hasattr(history, 'objective') - _test_individuals_in_history(history) + test_individuals_in_history(history) # Test history dumps dumped_history_json = history.save() loaded_history = OptHistory.load(dumped_history_json) assert dumped_history_json is not None assert dumped_history_json == loaded_history.save(), 'The history is not equal to itself after reloading!' - _test_individuals_in_history(loaded_history) + test_individuals_in_history(loaded_history) @pytest.mark.parametrize('generate_history', [[3, 4, create_individual], @@ -300,7 +365,7 @@ def test_history_correct_serialization(): assert history.generations == reloaded_history.generations assert dumped_history_json == reloaded_history.save(), 'The history is not equal to itself after reloading!' - _test_individuals_in_history(reloaded_history) + test_individuals_in_history(reloaded_history) def test_collect_intermediate_metric(): @@ -326,7 +391,7 @@ def test_load_zero_generations_history(): path_to_history = os.path.join(project_root(), 'test', 'data', 'zero_gen_history.json') history = OptHistory.load(path_to_history) assert isinstance(history, OptHistory) - assert len(history.archive_history) == 0 + assert len(history.evolution_best_archive) == 0 assert history.objective is not None @@ -340,9 +405,9 @@ def test_save_load_light_history(generate_history): assert file_name in os.listdir(path_to_dir) loaded_history = OptHistory().load(path_to_history) assert isinstance(loaded_history, OptHistory) - assert len(loaded_history.archive_history) == len(loaded_history.generations) == 100 + assert len(loaded_history.evolution_best_archive) == len(loaded_history.generations) == 100 for i, _ in enumerate(loaded_history.generations): - assert len(loaded_history.generations[i]) == len(loaded_history.archive_history[i]) == 1 + assert len(loaded_history.generations[i]) == len(loaded_history.evolution_best_archive[i]) == 1 os.remove(path=os.path.join(path_to_dir, file_name)) diff --git a/test/unit/serialization/test_external_serialize.py b/test/unit/serialization/test_external_serialize.py index 9d611dc31..60d4554a3 100644 --- a/test/unit/serialization/test_external_serialize.py +++ b/test/unit/serialization/test_external_serialize.py @@ -31,7 +31,7 @@ def test_external_history_load(history_path): assert history is not None history_plausible(history) - assert len(history.individuals) > 0 + assert len(history.generations) > 0 def history_plausible(history: OptHistory): @@ -44,7 +44,7 @@ def individual_plausible(individual: Individual): parent_operator = individual.parent_operator operations_correct = True if parent_operator: - type_correct = parent_operator.type_ in ['mutation', 'crossover', 'selection'] + type_correct = parent_operator.type_ in ['mutation', 'crossover', 'selection', 'tuning'] parent_inds_correct = all(isinstance(ind, Individual) for ind in parent_operator.parent_individuals) operations_correct = type_correct and parent_inds_correct assert graph_correct From 7068a0da07a02bb7e4dfa774cb8081bcc74fd1f7 Mon Sep 17 00:00:00 2001 From: morrisnein Date: Thu, 26 Oct 2023 02:18:10 +0300 Subject: [PATCH 03/17] add test for tuner classes to support tuning history --- test/unit/tuning/test_tuning.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/test/unit/tuning/test_tuning.py b/test/unit/tuning/test_tuning.py index cec2a15a3..875eebed2 100644 --- a/test/unit/tuning/test_tuning.py +++ b/test/unit/tuning/test_tuning.py @@ -4,14 +4,16 @@ from hyperopt import hp from golem.core.optimisers.objective import Objective, ObjectiveEvaluate +from golem.core.optimisers.opt_history_objects.opt_history import OptHistory from golem.core.tuning.iopt_tuner import IOptTuner from golem.core.tuning.optuna_tuner import OptunaTuner from golem.core.tuning.search_space import SearchSpace from golem.core.tuning.sequential import SequentialTuner from golem.core.tuning.simultaneous import SimultaneousTuner -from test.unit.mocks.common_mocks import MockAdapter, MockObjectiveEvaluate, mock_graph_with_params, \ - opt_graph_with_params, MockNode, MockDomainStructure -from test.unit.utils import ParamsSumMetric, ParamsProductMetric +from test.unit.mocks.common_mocks import MockAdapter, MockDomainStructure, MockNode, MockObjectiveEvaluate, \ + mock_graph_with_params, opt_graph_with_params +from test.unit.optimizers.test_composing_history import test_individuals_in_history +from test.unit.utils import ParamsProductMetric, ParamsSumMetric def not_tunable_mock_graph(): @@ -128,3 +130,18 @@ def test_multi_objective_tuning(search_space, tuner_cls, init_graph, adapter, ob final_metric = obj_eval.evaluate(graph) assert final_metric is not None assert not init_metric.dominates(final_metric) + + +@pytest.mark.parametrize('tuner_cls', [OptunaTuner, SimultaneousTuner, SequentialTuner, IOptTuner]) +@pytest.mark.parametrize('graph, adapter, obj_eval', + [(mock_graph_with_params(), MockAdapter(), + MockObjectiveEvaluate(Objective({'sum_metric': ParamsSumMetric.get_value}))), + (opt_graph_with_params(), None, + ObjectiveEvaluate(Objective({'sum_metric': ParamsSumMetric.get_value})))]) +def test_tuning_supports_history(search_space, tuner_cls, graph, adapter, obj_eval): + history = OptHistory() + iterations = 10 + tuner = tuner_cls(obj_eval, search_space, adapter, iterations=iterations, history=history) + tuner.tune(deepcopy(graph)) + assert history.tuning_result is not None + test_individuals_in_history(history) From be1457e6f0c1bcb84e29b2439c6dc818698805a2 Mon Sep 17 00:00:00 2001 From: morrisnein Date: Thu, 26 Oct 2023 02:31:44 +0300 Subject: [PATCH 04/17] =?UTF-8?q?=D0=B7=D1=83=D0=B78?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- experiments/experiment_analyzer.py | 8 +++++--- test/unit/optimizers/test_composing_history.py | 3 ++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/experiments/experiment_analyzer.py b/experiments/experiment_analyzer.py index 82c1a0c4f..9ba617692 100644 --- a/experiments/experiment_analyzer.py +++ b/experiments/experiment_analyzer.py @@ -231,7 +231,8 @@ def plot_convergence(self, path_to_save: str, with_confidence: bool = True, def analyze_statistical_significance(self, data_to_analyze: Dict[str, Dict[str, List[float]]], stat_tests: List[Callable], path_to_save: Optional[str] = None, - test_format: Optional[List[str]] = None) -> Dict[str, Dict[str, Dict[str, float]]]: + test_format: Optional[List[str]] = None + ) -> Dict[str, Dict[str, Dict[str, float]]]: """ Method to perform statistical analysis of data. Metric data obtained with 'analyze_metrics' and convergence data obtained with 'analyze_convergence' can be simply analyzed, for example. :param data_to_analyze: data to analyze. @@ -291,7 +292,8 @@ def analyze_structural_complexity(self, path_to_save: str, dir_name: str, class_ os.makedirs(path_to_save, exist_ok=True) for setup, dataset, path_to_launch in self._get_path_to_launch(): - if not self._check_if_file_or_folder_present(path=os.path.join(path_to_launch, dir_name), is_raise=is_raise): + if not self._check_if_file_or_folder_present(path=os.path.join(path_to_launch, dir_name), + is_raise=is_raise): continue path_to_json = None @@ -324,7 +326,7 @@ def analyze_structural_complexity(self, path_to_save: str, dir_name: str, class_ saved_results = [int(cur_name.split("_")[0]) for cur_name in os.listdir(cur_path_to_save) if cur_name not in self._folders_to_ignore] max_saved_num = max(saved_results) if saved_results else 0 - cur_path_to_save = os.path.join(cur_path_to_save, f'{max_saved_num+1}_result.png') + cur_path_to_save = os.path.join(cur_path_to_save, f'{max_saved_num + 1}_result.png') result.show(cur_path_to_save, title=title) self._log.info(f"Resulting graph was saved to {cur_path_to_save}") diff --git a/test/unit/optimizers/test_composing_history.py b/test/unit/optimizers/test_composing_history.py index 18f56802c..286a1eb24 100644 --- a/test/unit/optimizers/test_composing_history.py +++ b/test/unit/optimizers/test_composing_history.py @@ -64,7 +64,8 @@ def generate_history(request) -> OptHistory: new_pop.append(ind) history.add_to_history(new_pop) # since only n best individuals need to be added to archive history - history.add_to_evolution_best_archive([sorted(new_pop, key=lambda ind: ind.fitness.values[0], reverse=False)[0]]) + history.add_to_evolution_best_archive( + [sorted(new_pop, key=lambda ind: ind.fitness.values[0], reverse=False)[0]]) return history From 2b21d96d9e6dc70c64259b2492dd977d0864d0b4 Mon Sep 17 00:00:00 2001 From: morrisnein Date: Thu, 26 Oct 2023 02:48:46 +0300 Subject: [PATCH 05/17] fix default fitness value support for multi objective tuning --- golem/core/tuning/tuner_interface.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/golem/core/tuning/tuner_interface.py b/golem/core/tuning/tuner_interface.py index 13a24a712..432b95301 100644 --- a/golem/core/tuning/tuner_interface.py +++ b/golem/core/tuning/tuner_interface.py @@ -194,8 +194,8 @@ def evaluate_graph(self, graph: OptGraph, label: Optional[str] = None) -> Union[ metric_value = self._default_metric_value elif isinstance(graph_fitness, MultiObjFitness): - metric_value = graph_fitness.values - for e, value in enumerate(metric_value): + metric_value = list(graph_fitness.values) # MultiObjFitness.values returns a tuple (immutable). + for e, value in enumerate(metric_value): # Consequently, the next line would cause an error by assignment. metric_value[e] = self._default_metric_value if value is None else value else: raise ValueError(f'Objective evaluation must be a Fitness instance, not {graph_fitness}.') From d9d6c3287850e6fc3272f5003ffae642d2ac84f4 Mon Sep 17 00:00:00 2001 From: morrisnein Date: Thu, 26 Oct 2023 02:57:33 +0300 Subject: [PATCH 06/17] fix test function name --- test/unit/optimizers/test_composing_history.py | 8 ++++---- test/unit/tuning/test_tuning.py | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/test/unit/optimizers/test_composing_history.py b/test/unit/optimizers/test_composing_history.py index 286a1eb24..ed0bf7878 100644 --- a/test/unit/optimizers/test_composing_history.py +++ b/test/unit/optimizers/test_composing_history.py @@ -69,7 +69,7 @@ def generate_history(request) -> OptHistory: return history -def test_individuals_in_history(history: OptHistory): +def check_individuals_in_history(history: OptHistory): uids = set() ids = set() for ind in itertools.chain(*history.generations): @@ -317,13 +317,13 @@ def test_newly_generated_history(n_jobs: int, search_space): assert len(history.evolution_results) == 1 assert len(history.tuning_result) == 1 assert hasattr(history, 'objective') - test_individuals_in_history(history) + check_individuals_in_history(history) # Test history dumps dumped_history_json = history.save() loaded_history = OptHistory.load(dumped_history_json) assert dumped_history_json is not None assert dumped_history_json == loaded_history.save(), 'The history is not equal to itself after reloading!' - test_individuals_in_history(loaded_history) + check_individuals_in_history(loaded_history) @pytest.mark.parametrize('generate_history', [[3, 4, create_individual], @@ -366,7 +366,7 @@ def test_history_correct_serialization(): assert history.generations == reloaded_history.generations assert dumped_history_json == reloaded_history.save(), 'The history is not equal to itself after reloading!' - test_individuals_in_history(reloaded_history) + check_individuals_in_history(reloaded_history) def test_collect_intermediate_metric(): diff --git a/test/unit/tuning/test_tuning.py b/test/unit/tuning/test_tuning.py index 875eebed2..801b69e03 100644 --- a/test/unit/tuning/test_tuning.py +++ b/test/unit/tuning/test_tuning.py @@ -12,7 +12,7 @@ from golem.core.tuning.simultaneous import SimultaneousTuner from test.unit.mocks.common_mocks import MockAdapter, MockDomainStructure, MockNode, MockObjectiveEvaluate, \ mock_graph_with_params, opt_graph_with_params -from test.unit.optimizers.test_composing_history import test_individuals_in_history +from test.unit.optimizers.test_composing_history import check_individuals_in_history from test.unit.utils import ParamsProductMetric, ParamsSumMetric @@ -144,4 +144,4 @@ def test_tuning_supports_history(search_space, tuner_cls, graph, adapter, obj_ev tuner = tuner_cls(obj_eval, search_space, adapter, iterations=iterations, history=history) tuner.tune(deepcopy(graph)) assert history.tuning_result is not None - test_individuals_in_history(history) + check_individuals_in_history(history) From 03915f1fab2c4d893983fa48c3df22ba5183c5ad Mon Sep 17 00:00:00 2001 From: morrisnein Date: Mon, 30 Oct 2023 01:18:04 +0300 Subject: [PATCH 07/17] use individuals & fitness in tuner interface --- .../opt_history_objects/opt_history.py | 8 + golem/core/tuning/optuna_tuner.py | 2 +- golem/core/tuning/tuner_interface.py | 141 ++++++++++-------- 3 files changed, 88 insertions(+), 63 deletions(-) diff --git a/golem/core/optimisers/opt_history_objects/opt_history.py b/golem/core/optimisers/opt_history_objects/opt_history.py index 99f6a59c2..4aa554033 100644 --- a/golem/core/optimisers/opt_history_objects/opt_history.py +++ b/golem/core/optimisers/opt_history_objects/opt_history.py @@ -227,6 +227,14 @@ def evolution_results(self) -> Optional[Generation]: if gen.label == 'evolution_results': return gen + @property + def tuning_start(self) -> Optional[Generation]: + if not self.generations: + return None + for gen in reversed(self.generations): + if gen.label == 'tuning_start': + return gen + @property def tuning_result(self) -> Optional[Generation]: if not self.generations: diff --git a/golem/core/tuning/optuna_tuner.py b/golem/core/tuning/optuna_tuner.py index 48b164464..c633127bd 100644 --- a/golem/core/tuning/optuna_tuner.py +++ b/golem/core/tuning/optuna_tuner.py @@ -51,7 +51,7 @@ def tune(self, graph: DomainGraphForTune, show_progress: bool = True) -> \ init_parameters, has_parameters_to_optimize = self._get_initial_point(graph) if not has_parameters_to_optimize: self._stop_tuning_with_message(f'Graph {graph.graph_description} has no parameters to optimize') - tuned_graphs = self.init_graph + tuned_graphs = self.init_individual.graph else: # Enqueue initial point to try if init_parameters: diff --git a/golem/core/tuning/tuner_interface.py b/golem/core/tuning/tuner_interface.py index 432b95301..732d9ea4e 100644 --- a/golem/core/tuning/tuner_interface.py +++ b/golem/core/tuning/tuner_interface.py @@ -50,7 +50,7 @@ def __init__(self, objective_evaluate: ObjectiveFunction, deviation: float = 0.05, history: Optional[OptHistory] = None): self.iterations = iterations - self.current_iteration = 0 + self.evaluations_count = 0 self.adapter = adapter or IdentityAdapter() self.search_space = search_space self.n_jobs = n_jobs @@ -64,9 +64,8 @@ def __init__(self, objective_evaluate: ObjectiveFunction, self._default_metric_value = MAX_TUNING_METRIC_VALUE self.was_tuned = False - self.init_graph = None - self.init_metric = None - self.obtained_metric = None + self.init_individual = None + self.obtained_individual = None self.history = history self.log = default_log(self) @@ -94,12 +93,15 @@ def init_check(self, graph: OptGraph) -> None: self.log.info('Hyperparameters optimization start: estimation of metric for initial graph') # Train graph - self.init_graph = deepcopy(graph) + graph = deepcopy(graph) + fitness = self.objective_evaluate(graph) + self.init_individual = self._create_individual(graph, fitness) + self._add_to_history([self.init_individual], label='tuning_start') - self.init_metric = self.evaluate_graph(graph=self.init_graph, label='tuning_start') - self.log.message(f'Initial graph: {graph_structure(self.init_graph)} \n' + init_metric = self._fitness_to_metric_value(fitness) + self.log.message(f'Initial graph: {graph_structure(graph)} \n' f'Initial metric: ' - f'{list(map(lambda x: round(abs(x), 3), ensure_wrapped_in_sequence(self.init_metric)))}') + f'{list(map(lambda x: round(abs(x), 3), ensure_wrapped_in_sequence(init_metric)))}') def final_check(self, tuned_graphs: Union[OptGraph, Sequence[OptGraph]], multi_obj: bool = False) \ -> Union[OptGraph, Sequence[OptGraph]]: @@ -118,88 +120,96 @@ def final_check(self, tuned_graphs: Union[OptGraph, Sequence[OptGraph]], multi_o return self._single_obj_final_check(tuned_graphs) def _single_obj_final_check(self, tuned_graph: OptGraph): - self.obtained_metric = self.evaluate_graph(graph=tuned_graph, label='tuning_result') + obtained_fitness = self.objective_evaluate(tuned_graph) + init_individual = self.init_individual prefix_tuned_phrase = 'Return tuned graph due to the fact that obtained metric' prefix_init_phrase = 'Return init graph due to the fact that obtained metric' - if np.isclose(self.obtained_metric, self._default_metric_value): - self.obtained_metric = None - # 0.05% deviation is acceptable - deviation_value = (self.init_metric / 100.0) * self.deviation - init_metric = self.init_metric + deviation_value * (-np.sign(self.init_metric)) - if self.obtained_metric is None: + init_metric = self._fitness_to_metric_value(init_individual.fitness) + deviation_value = (init_metric / 100.0) * self.deviation + init_fitness_with_deviation = SingleObjFitness(init_metric + deviation_value * (-np.sign(init_metric))) + + if not obtained_fitness.valid: self.log.info(f'{prefix_init_phrase} is None. Initial metric is {abs(init_metric):.3f}') - final_graph = self.init_graph - final_metric = self.init_metric - elif self.obtained_metric <= init_metric: - self.log.info(f'{prefix_tuned_phrase} {abs(self.obtained_metric):.3f} equal or ' + final_individual = init_individual + elif obtained_fitness >= init_fitness_with_deviation: + obtained_metric = self._fitness_to_metric_value(obtained_fitness) + self.log.info(f'{prefix_tuned_phrase} {abs(obtained_metric):.3f} equal or ' f'better than initial (+ {self.deviation}% deviation) {abs(init_metric):.3f}') - final_graph = tuned_graph - final_metric = self.obtained_metric + final_individual = self._create_individual(tuned_graph, obtained_fitness) else: - self.log.info(f'{prefix_init_phrase} {abs(self.obtained_metric):.3f} ' + obtained_metric = self._fitness_to_metric_value(obtained_fitness) + self.log.info(f'{prefix_init_phrase} {abs(obtained_metric):.3f} ' f'worse than initial (+ {self.deviation}% deviation) {abs(init_metric):.3f}') - final_graph = self.init_graph - final_metric = self.init_metric - self.obtained_metric = final_metric - self.log.message(f'Final graph: {graph_structure(final_graph)}') + final_individual = init_individual + self.log.message(f'Final graph: {graph_structure(final_individual.graph)}') + + final_metric = self._fitness_to_metric_value(final_individual.fitness) + if final_metric is not None: self.log.message(f'Final metric: {abs(final_metric):.3f}') else: self.log.message('Final metric is None') - return final_graph + + self.obtained_individual = final_individual + self._add_to_history([self.obtained_individual], 'tuning_result') + + return self.obtained_individual.graph def _multi_obj_final_check(self, tuned_graphs: Sequence[OptGraph]) -> Sequence[OptGraph]: - self.obtained_metric = [] + obtained_fitnesses = [self.objective_evaluate(graph) for graph in tuned_graphs] + final_graphs = [] - for tuned_graph in tuned_graphs: - obtained_metric = self.evaluate_graph(graph=tuned_graph, label='tuning_result') - for e, value in enumerate(obtained_metric): - if np.isclose(value, self._default_metric_value): - obtained_metric[e] = None - if not MultiObjFitness(self.init_metric).dominates(MultiObjFitness(obtained_metric)): - self.obtained_metric.append(obtained_metric) + self.obtained_individual = [] + for tuned_graph, obtained_fitness in zip(tuned_graphs, obtained_fitnesses): + if obtained_fitness.dominates(self.init_individual.fitness): + individual = self._create_individual(tuned_graph, obtained_fitness) + self.obtained_individual.append(individual) final_graphs.append(tuned_graph) if final_graphs: - metrics_formatted = [str([round(x, 3) for x in metrics]) for metrics in self.obtained_metric] + obtained_metrics = [self._fitness_to_metric_value(fitness) for fitness in obtained_fitnesses] + metrics_formatted = [str([round(x, 3) for x in metrics]) for metrics in obtained_metrics] metrics_formatted = '\n'.join(metrics_formatted) self.log.message('Return tuned graphs with obtained metrics \n' f'{metrics_formatted}') else: self.log.message('Initial metric dominates all found solutions. Return initial graph.') - final_graphs = self.init_graph + final_graphs = self.init_individual.graph + + self._add_to_history(self.obtained_individual, label='tuning_result') + return final_graphs - def evaluate_graph(self, graph: OptGraph, label: Optional[str] = None) -> Union[float, Sequence[float]]: + def evaluate_graph(self, graph: OptGraph) -> Union[float, Sequence[float]]: """ Method calculates metric for algorithm validation. Also, responsible for saving of tuning history. Args: - graph: Graph to evaluate - label: Label for tuning history. + graph: Graphs to evaluate Returns: - value of loss function + values of loss function for graphs """ graph_fitness = self.objective_evaluate(graph) - - self._add_to_history(graph, graph_fitness, label) - - if isinstance(graph_fitness, SingleObjFitness): - metric_value = graph_fitness.value - if not graph_fitness.valid: + individual = self._create_individual(graph, graph_fitness) + self._add_to_history([individual]) + self.evaluations_count += 1 + return self._fitness_to_metric_value(graph_fitness) + + def _fitness_to_metric_value(self, fitness: Fitness) -> Union[float, Sequence[float]]: + if isinstance(fitness, SingleObjFitness): + metric_value = fitness.value + if not fitness.valid: metric_value = self._default_metric_value - elif isinstance(graph_fitness, MultiObjFitness): - metric_value = list(graph_fitness.values) # MultiObjFitness.values returns a tuple (immutable). - for e, value in enumerate(metric_value): # Consequently, the next line would cause an error by assignment. - metric_value[e] = self._default_metric_value if value is None else value + elif isinstance(fitness, MultiObjFitness): + metric_value = fitness.values + metric_value = tuple(self._default_metric_value if value is None else value for value in metric_value) else: - raise ValueError(f'Objective evaluation must be a Fitness instance, not {graph_fitness}.') - + raise ValueError(f'Objective evaluation must be a Fitness instance, not {fitness}.') return metric_value @staticmethod @@ -246,23 +256,30 @@ def set_arg_node(graph: OptGraph, node_id: int, node_params: dict) -> OptGraph: def _stop_tuning_with_message(self, message: str): self.log.message(message) - self.obtained_metric = self.init_metric + self.obtained_fitness = self.init_individual.fitness - def _add_to_history(self, graph: OptGraph, fitness: Fitness, label: Optional[str]): - if not self.history: - return + def _create_individual(self, graph: OptGraph, fitness: Fitness) -> Individual: history = self.history - if history.generations: - parent_individuals = history.generations[-1] + if history and history.tuning_start: + parent_individuals = history.tuning_start else: parent_individuals = [] tuner_name = self.__class__.__name__ parent_operator = ParentOperator(type_='tuning', operators=[tuner_name], parent_individuals=parent_individuals) individual = Individual(graph, parent_operator=parent_operator, fitness=fitness) + + return individual + + def _add_to_history(self, individuals: Sequence[Individual], label: str = None): + history = self.history + tuner_name = self.__class__.__name__ + + if not history: + return + if label is None: - label = f'tuning_iteration_{self.current_iteration}' - history.add_to_history(individuals=[individual], + label = f'tuning_iteration_{self.evaluations_count}' + history.add_to_history(individuals=individuals, generation_label=label, generation_metadata=dict(tuner=tuner_name)) - self.current_iteration += 1 From 5beeb223f328508df4d17b9d5701aef77c954346 Mon Sep 17 00:00:00 2001 From: morrisnein Date: Mon, 30 Oct 2023 01:18:39 +0300 Subject: [PATCH 08/17] fix tests & add test for multi-objective tuning --- test/unit/tuning/test_tuning.py | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/test/unit/tuning/test_tuning.py b/test/unit/tuning/test_tuning.py index 801b69e03..2d8173ae0 100644 --- a/test/unit/tuning/test_tuning.py +++ b/test/unit/tuning/test_tuning.py @@ -83,8 +83,8 @@ def test_tuner_improves_metric(search_space, tuner_cls, graph, adapter, obj_eval tuner = tuner_cls(obj_eval, search_space, adapter, iterations=20) tuned_graph = tuner.tune(deepcopy(graph)) assert tuned_graph is not None - assert tuner.obtained_metric is not None - assert tuner.init_metric > tuner.obtained_metric + assert tuner.obtained_individual is not None + assert tuner.obtained_individual.fitness > tuner.init_individual.fitness @pytest.mark.parametrize('tuner_cls', [OptunaTuner, SimultaneousTuner, SequentialTuner, IOptTuner]) @@ -95,8 +95,8 @@ def test_tuner_with_no_tunable_params(search_space, tuner_cls, graph, adapter, o tuner = tuner_cls(obj_eval, search_space, adapter, iterations=20) tuned_graph = tuner.tune(deepcopy(graph)) assert tuned_graph is not None - assert tuner.obtained_metric is not None - assert tuner.init_metric == tuner.obtained_metric + assert tuner.obtained_individual is not None + assert tuner.init_individual.fitness == tuner.obtained_individual.fitness @pytest.mark.parametrize('graph', [mock_graph_with_params(), opt_graph_with_params(), not_tunable_mock_graph()]) @@ -107,8 +107,8 @@ def test_node_tuning(search_space, graph): tuner = SequentialTuner(obj_eval, search_space, adapter, iterations=10) tuned_graph = tuner.tune_node(graph, node_idx) assert tuned_graph is not None - assert tuner.obtained_metric is not None - assert tuner.init_metric >= tuner.obtained_metric + assert tuner.obtained_individual is not None + assert tuner.obtained_individual.fitness >= tuner.init_individual.fitness @pytest.mark.parametrize('tuner_cls', [OptunaTuner]) @@ -144,4 +144,24 @@ def test_tuning_supports_history(search_space, tuner_cls, graph, adapter, obj_ev tuner = tuner_cls(obj_eval, search_space, adapter, iterations=iterations, history=history) tuner.tune(deepcopy(graph)) assert history.tuning_result is not None + assert len(history.generations) == tuner.evaluations_count + 2 + check_individuals_in_history(history) + + +@pytest.mark.parametrize('tuner_cls', [OptunaTuner]) +@pytest.mark.parametrize('init_graph, adapter, obj_eval', + [(mock_graph_with_params(), MockAdapter(), + MockObjectiveEvaluate(Objective({'sum_metric': ParamsSumMetric.get_value, + 'prod_metric': ParamsProductMetric.get_value}, + is_multi_objective=True))), + (opt_graph_with_params(), None, + ObjectiveEvaluate(Objective({'sum_metric': ParamsSumMetric.get_value, + 'prod_metric': ParamsProductMetric.get_value}, + is_multi_objective=True)))]) +def test_multi_objective_tuning_supports_history(search_space, tuner_cls, init_graph, adapter, obj_eval): + history = OptHistory() + tuner = tuner_cls(obj_eval, search_space, adapter, iterations=20, objectives_number=2, history=history) + tuner.tune(deepcopy(init_graph), show_progress=False) + assert history.tuning_result is not None + assert len(history.generations) == tuner.evaluations_count + 2 check_individuals_in_history(history) From ae5b242540aa9f5816a2301230f7839e0159abb1 Mon Sep 17 00:00:00 2001 From: morrisnein Date: Mon, 30 Oct 2023 01:27:53 +0300 Subject: [PATCH 09/17] fix historical individuals parenting --- golem/core/tuning/tuner_interface.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/golem/core/tuning/tuner_interface.py b/golem/core/tuning/tuner_interface.py index 732d9ea4e..1b567f5fe 100644 --- a/golem/core/tuning/tuner_interface.py +++ b/golem/core/tuning/tuner_interface.py @@ -260,7 +260,9 @@ def _stop_tuning_with_message(self, message: str): def _create_individual(self, graph: OptGraph, fitness: Fitness) -> Individual: history = self.history - if history and history.tuning_start: + if history and history.evolution_results: + parent_individuals = history.evolution_results + elif history and history.tuning_start: parent_individuals = history.tuning_start else: parent_individuals = [] @@ -271,7 +273,7 @@ def _create_individual(self, graph: OptGraph, fitness: Fitness) -> Individual: return individual - def _add_to_history(self, individuals: Sequence[Individual], label: str = None): + def _add_to_history(self, individuals: Sequence[Individual], label: Optional[str] = None): history = self.history tuner_name = self.__class__.__name__ From e98ec482a0463f354f5fd8ea5ab536bbeb05ac5f Mon Sep 17 00:00:00 2001 From: morrisnein Date: Mon, 30 Oct 2023 01:28:25 +0300 Subject: [PATCH 10/17] add n_jobs=-1 to test of generated history --- test/unit/optimizers/test_composing_history.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/unit/optimizers/test_composing_history.py b/test/unit/optimizers/test_composing_history.py index ed0bf7878..91c02a902 100644 --- a/test/unit/optimizers/test_composing_history.py +++ b/test/unit/optimizers/test_composing_history.py @@ -285,7 +285,7 @@ def search_space(): return SearchSpace(params_per_operation) -@pytest.mark.parametrize('n_jobs', [1, 2]) +@pytest.mark.parametrize('n_jobs', [1, 2, -1]) def test_newly_generated_history(n_jobs: int, search_space): num_of_gens = 5 objective = Objective({'random_metric': RandomMetric.get_value}) From c19ebbb3d7ff76186e714317b17655fc99c4a082 Mon Sep 17 00:00:00 2001 From: morrisnein Date: Mon, 30 Oct 2023 02:21:38 +0300 Subject: [PATCH 11/17] add test for multi-objective history, fix tuner interface --- golem/core/tuning/tuner_interface.py | 5 ++- .../unit/optimizers/test_composing_history.py | 39 +++++++++++++------ 2 files changed, 30 insertions(+), 14 deletions(-) diff --git a/golem/core/tuning/tuner_interface.py b/golem/core/tuning/tuner_interface.py index 1b567f5fe..ac9199382 100644 --- a/golem/core/tuning/tuner_interface.py +++ b/golem/core/tuning/tuner_interface.py @@ -154,7 +154,7 @@ def _single_obj_final_check(self, tuned_graph: OptGraph): self.log.message('Final metric is None') self.obtained_individual = final_individual - self._add_to_history([self.obtained_individual], 'tuning_result') + self._add_to_history([self.obtained_individual], label='tuning_result') return self.obtained_individual.graph @@ -176,9 +176,10 @@ def _multi_obj_final_check(self, tuned_graphs: Sequence[OptGraph]) -> Sequence[O f'{metrics_formatted}') else: self.log.message('Initial metric dominates all found solutions. Return initial graph.') + self.obtained_individual = self.init_individual final_graphs = self.init_individual.graph - self._add_to_history(self.obtained_individual, label='tuning_result') + self._add_to_history(ensure_wrapped_in_sequence(self.obtained_individual), label='tuning_result') return final_graphs diff --git a/test/unit/optimizers/test_composing_history.py b/test/unit/optimizers/test_composing_history.py index 91c02a902..822f98cdb 100644 --- a/test/unit/optimizers/test_composing_history.py +++ b/test/unit/optimizers/test_composing_history.py @@ -12,7 +12,7 @@ from golem.core.optimisers.genetic.gp_optimizer import EvoGraphOptimizer from golem.core.optimisers.genetic.gp_params import GPAlgorithmParameters from golem.core.optimisers.genetic.operators.base_mutations import MutationTypesEnum -from golem.core.optimisers.genetic.operators.crossover import CrossoverTypesEnum, Crossover +from golem.core.optimisers.genetic.operators.crossover import Crossover, CrossoverTypesEnum from golem.core.optimisers.genetic.operators.mutation import Mutation from golem.core.optimisers.graph import OptGraph, OptNode from golem.core.optimisers.objective import Objective, ObjectiveEvaluate @@ -22,12 +22,13 @@ from golem.core.optimisers.optimization_parameters import GraphRequirements from golem.core.optimisers.optimizer import GraphGenerationParams from golem.core.paths import project_root +from golem.core.tuning.optuna_tuner import OptunaTuner from golem.core.tuning.search_space import SearchSpace from golem.core.tuning.simultaneous import SimultaneousTuner -from golem.visualisation.opt_viz import PlotTypesEnum, OptHistoryVisualizer +from golem.visualisation.opt_viz import OptHistoryVisualizer, PlotTypesEnum from golem.visualisation.opt_viz_extra import OptHistoryExtraVisualizer from test.unit.mocks.common_mocks import MockAdapter, MockDomainStructure, MockNode, MockObjectiveEvaluate -from test.unit.utils import RandomMetric, graph_first, graph_second, graph_third, graph_fourth, graph_fifth +from test.unit.utils import RandomMetric, graph_fifth, graph_first, graph_fourth, graph_second, graph_third def create_mock_graph_individual(): @@ -286,9 +287,18 @@ def search_space(): @pytest.mark.parametrize('n_jobs', [1, 2, -1]) -def test_newly_generated_history(n_jobs: int, search_space): +@pytest.mark.parametrize('tuner_cls, objective', + [(SimultaneousTuner, + Objective({'random_metric': RandomMetric.get_value})), + (OptunaTuner, + Objective({ + 'random_metric_1': RandomMetric.get_value, + 'random_metric_2': RandomMetric.get_value, + }, + is_multi_objective=True)) + ]) +def test_newly_generated_history(n_jobs: int, search_space, tuner_cls, objective): num_of_gens = 5 - objective = Objective({'random_metric': RandomMetric.get_value}) init_graphs = [graph_first(), graph_second(), graph_third(), graph_fourth(), graph_fifth()] requirements = GraphRequirements(num_of_generations=num_of_gens) graph_generation_params = GraphGenerationParams(available_node_types=['a', 'b', 'c', 'd', 'e', 'f']) @@ -299,13 +309,14 @@ def test_newly_generated_history(n_jobs: int, search_space): history = opt.history tuning_iterations = 2 - tuner = SimultaneousTuner(obj_eval, search_space, MockAdapter(), iterations=tuning_iterations, n_jobs=n_jobs, - history=history) + objectives_number = len(objective.metric_names) + tuner = tuner_cls(obj_eval, search_space, MockAdapter(), iterations=tuning_iterations, n_jobs=n_jobs, + objectives_number=objectives_number, history=history) tuner.tune(history.evolution_results[0].graph) - # initial_assumptions=1 + num_of_gens=5 + evolution_results=1 + tuning_start=1 + - # tuning_iterations=2 + tuning_result=1 -> num_of_gens + tuning_iterations + 4 - expected_gen_num = num_of_gens + tuning_iterations + 4 + # initial_assumptions=1 + num_of_gens + evolution_results=1 + tuning_start=1 + + # evaluations_count + tuning_result=1 -> num_of_gens + tuning_iterations + 4 + expected_gen_num = num_of_gens + tuner.evaluations_count + 4 # initial_assumptions=1 + num_of_gens=5 + evolution_results=1 -> num_of_gens + 2 expected_evolution_gen_num = num_of_gens + 2 @@ -314,8 +325,12 @@ def test_newly_generated_history(n_jobs: int, search_space): assert len(history.generations) == expected_gen_num assert len(history.evolution_best_archive) == expected_evolution_gen_num assert len(history.initial_assumptions) == 5 - assert len(history.evolution_results) == 1 - assert len(history.tuning_result) == 1 + if objectives_number == 1: + assert len(history.evolution_results) == 1 + assert len(history.tuning_result) == 1 + else: + assert len(history.evolution_results) >= 1 + assert len(history.tuning_result) >= 1 assert hasattr(history, 'objective') check_individuals_in_history(history) # Test history dumps From 56a51f9ed4f1d4724ad811dfbfe423951bd459fd Mon Sep 17 00:00:00 2001 From: morrisnein Date: Mon, 30 Oct 2023 02:37:36 +0300 Subject: [PATCH 12/17] interface fixes --- golem/core/tuning/tuner_interface.py | 8 ++++---- test/unit/optimizers/test_composing_history.py | 3 ++- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/golem/core/tuning/tuner_interface.py b/golem/core/tuning/tuner_interface.py index ac9199382..01ad00e1d 100644 --- a/golem/core/tuning/tuner_interface.py +++ b/golem/core/tuning/tuner_interface.py @@ -176,10 +176,10 @@ def _multi_obj_final_check(self, tuned_graphs: Sequence[OptGraph]) -> Sequence[O f'{metrics_formatted}') else: self.log.message('Initial metric dominates all found solutions. Return initial graph.') - self.obtained_individual = self.init_individual - final_graphs = self.init_individual.graph + self.obtained_individual = [self.init_individual] + final_graphs = [self.init_individual.graph] - self._add_to_history(ensure_wrapped_in_sequence(self.obtained_individual), label='tuning_result') + self._add_to_history(self.obtained_individual, label='tuning_result') return final_graphs @@ -192,7 +192,7 @@ def evaluate_graph(self, graph: OptGraph) -> Union[float, Sequence[float]]: graph: Graphs to evaluate Returns: - values of loss function for graphs + values of loss function for graph """ graph_fitness = self.objective_evaluate(graph) individual = self._create_individual(graph, graph_fitness) diff --git a/test/unit/optimizers/test_composing_history.py b/test/unit/optimizers/test_composing_history.py index 822f98cdb..2b1f455ca 100644 --- a/test/unit/optimizers/test_composing_history.py +++ b/test/unit/optimizers/test_composing_history.py @@ -310,8 +310,9 @@ def test_newly_generated_history(n_jobs: int, search_space, tuner_cls, objective tuning_iterations = 2 objectives_number = len(objective.metric_names) + objectives_number_kwarg = dict(objectives_number=objectives_number) if objectives_number > 1 else {} tuner = tuner_cls(obj_eval, search_space, MockAdapter(), iterations=tuning_iterations, n_jobs=n_jobs, - objectives_number=objectives_number, history=history) + history=history, **objectives_number_kwarg) tuner.tune(history.evolution_results[0].graph) # initial_assumptions=1 + num_of_gens + evolution_results=1 + tuning_start=1 + From 1c7ef13470ecc23456c4faa189f669b004255d86 Mon Sep 17 00:00:00 2001 From: morrisnein Date: Mon, 30 Oct 2023 03:11:43 +0300 Subject: [PATCH 13/17] fix consistency of history --- golem/core/tuning/tuner_interface.py | 10 +++++++--- test/unit/optimizers/test_composing_history.py | 6 +++--- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/golem/core/tuning/tuner_interface.py b/golem/core/tuning/tuner_interface.py index 01ad00e1d..445882293 100644 --- a/golem/core/tuning/tuner_interface.py +++ b/golem/core/tuning/tuner_interface.py @@ -261,10 +261,11 @@ def _stop_tuning_with_message(self, message: str): def _create_individual(self, graph: OptGraph, fitness: Fitness) -> Individual: history = self.history - if history and history.evolution_results: - parent_individuals = history.evolution_results - elif history and history.tuning_start: + + if history and history.tuning_start: parent_individuals = history.tuning_start + elif history and history.evolution_results: + parent_individuals = history.evolution_results else: parent_individuals = [] tuner_name = self.__class__.__name__ @@ -283,6 +284,9 @@ def _add_to_history(self, individuals: Sequence[Individual], label: Optional[str if label is None: label = f'tuning_iteration_{self.evaluations_count}' + if label not in ('tuning_start', 'tuning_result'): + individuals = list(individuals) + individuals.append(self.init_individual) # add initial individual to maintain consistency of inheritance history.add_to_history(individuals=individuals, generation_label=label, generation_metadata=dict(tuner=tuner_name)) diff --git a/test/unit/optimizers/test_composing_history.py b/test/unit/optimizers/test_composing_history.py index 2b1f455ca..dcd0216b0 100644 --- a/test/unit/optimizers/test_composing_history.py +++ b/test/unit/optimizers/test_composing_history.py @@ -74,8 +74,7 @@ def check_individuals_in_history(history: OptHistory): uids = set() ids = set() for ind in itertools.chain(*history.generations): - # All individuals in `history.generations` must have a native generation. - assert ind.has_native_generation + assert ind.has_native_generation # All individuals in `history.generations` must have a native generation. assert ind.fitness if ind.native_generation == 0: continue @@ -86,6 +85,7 @@ def check_individuals_in_history(history: OptHistory): assert ind.parents_from_prev_generation == list(ind.operators_from_prev_generation[0].parent_individuals) # All parents are from previous generations assert all(p.native_generation < ind.native_generation for p in ind.parents_from_prev_generation) + assert all(p in history.generations[ind.native_generation - 1] for p in ind.parents_from_prev_generation) uids.add(ind.uid) ids.add(id(ind)) @@ -308,7 +308,7 @@ def test_newly_generated_history(n_jobs: int, search_space, tuner_cls, objective opt.optimise(obj_eval) history = opt.history - tuning_iterations = 2 + tuning_iterations = 10 objectives_number = len(objective.metric_names) objectives_number_kwarg = dict(objectives_number=objectives_number) if objectives_number > 1 else {} tuner = tuner_cls(obj_eval, search_space, MockAdapter(), iterations=tuning_iterations, n_jobs=n_jobs, From 1c42b3fa46be4c83e5929a7c649fb232abb562b3 Mon Sep 17 00:00:00 2001 From: morrisnein Date: Mon, 30 Oct 2023 03:19:04 +0300 Subject: [PATCH 14/17] create constants for frequently used labels --- golem/core/optimisers/genetic/gp_optimizer.py | 12 +++++++----- golem/core/optimisers/meta/surrogate_optimizer.py | 4 +++- .../optimisers/opt_history_objects/opt_history.py | 14 ++++++++++---- golem/core/optimisers/populational_optimizer.py | 9 +++++---- .../optimisers/random/random_mutation_optimizer.py | 8 +++++--- golem/core/optimisers/random/random_search.py | 9 +++++---- golem/core/tuning/tuner_interface.py | 10 +++++----- 7 files changed, 40 insertions(+), 26 deletions(-) diff --git a/golem/core/optimisers/genetic/gp_optimizer.py b/golem/core/optimisers/genetic/gp_optimizer.py index eec6e4ae1..d15ff55f8 100644 --- a/golem/core/optimisers/genetic/gp_optimizer.py +++ b/golem/core/optimisers/genetic/gp_optimizer.py @@ -1,6 +1,6 @@ from copy import deepcopy from random import choice -from typing import Sequence, Union, Any +from typing import Any, Sequence, Union from golem.core.constants import MAX_GRAPH_GEN_ATTEMPTS from golem.core.dag.graph import Graph @@ -9,15 +9,17 @@ from golem.core.optimisers.genetic.operators.elitism import Elitism from golem.core.optimisers.genetic.operators.inheritance import Inheritance from golem.core.optimisers.genetic.operators.mutation import Mutation -from golem.core.optimisers.genetic.operators.operator import PopulationT, EvaluationOperator +from golem.core.optimisers.genetic.operators.operator import EvaluationOperator, PopulationT from golem.core.optimisers.genetic.operators.regularization import Regularization from golem.core.optimisers.genetic.operators.reproduction import ReproductionController from golem.core.optimisers.genetic.operators.selection import Selection from golem.core.optimisers.genetic.parameters.graph_depth import AdaptiveGraphDepth from golem.core.optimisers.genetic.parameters.operators_prob import init_adaptive_operators_prob -from golem.core.optimisers.genetic.parameters.population_size import init_adaptive_pop_size, PopulationSize +from golem.core.optimisers.genetic.parameters.population_size import PopulationSize, init_adaptive_pop_size from golem.core.optimisers.objective.objective import Objective from golem.core.optimisers.opt_history_objects.individual import Individual +from golem.core.optimisers.opt_history_objects.opt_history import EXTENDED_INITIAL_ASSUMPTIONS_LABEL, \ + INITIAL_ASSUMPTIONS_LABEL from golem.core.optimisers.optimization_parameters import GraphRequirements from golem.core.optimisers.optimizer import GraphGenerationParams from golem.core.optimisers.populational_optimizer import PopulationalOptimizer @@ -64,13 +66,13 @@ def __init__(self, def _initial_population(self, evaluator: EvaluationOperator): """ Initializes the initial population """ # Adding of initial assumptions to history as zero generation - self._update_population(evaluator(self.initial_individuals), 'initial_assumptions') + self._update_population(evaluator(self.initial_individuals), INITIAL_ASSUMPTIONS_LABEL) pop_size = self.graph_optimizer_params.pop_size if len(self.initial_individuals) < pop_size: self.initial_individuals = self._extend_population(self.initial_individuals, pop_size) # Adding of extended population to history - self._update_population(evaluator(self.initial_individuals), 'extended_initial_assumptions') + self._update_population(evaluator(self.initial_individuals), EXTENDED_INITIAL_ASSUMPTIONS_LABEL) def _extend_population(self, pop: PopulationT, target_pop_size: int) -> PopulationT: verifier = self.graph_generation_params.verifier diff --git a/golem/core/optimisers/meta/surrogate_optimizer.py b/golem/core/optimisers/meta/surrogate_optimizer.py index dfe9a7e33..037c2f663 100644 --- a/golem/core/optimisers/meta/surrogate_optimizer.py +++ b/golem/core/optimisers/meta/surrogate_optimizer.py @@ -6,6 +6,7 @@ from golem.core.optimisers.meta.surrogate_evaluator import SurrogateDispatcher from golem.core.optimisers.meta.surrogate_model import RandomValuesSurrogateModel from golem.core.optimisers.objective import Objective, ObjectiveFunction +from golem.core.optimisers.opt_history_objects.opt_history import EVOLUTION_RESULTS_LABEL from golem.core.optimisers.optimization_parameters import GraphRequirements from golem.core.optimisers.optimizer import GraphGenerationParams from golem.core.optimisers.populational_optimizer import EvaluationAttemptsError, _try_unfit_graph @@ -17,6 +18,7 @@ class SurrogateEachNgenOptimizer(EvoGraphOptimizer): Additionally, we need to pass surrogate_model object """ + def __init__(self, objective: Objective, initial_graphs: Sequence[OptGraph], @@ -54,5 +56,5 @@ def optimise(self, objective: ObjectiveFunction) -> Sequence[OptGraph]: break # Adding of new population to history self._update_population(new_population) - self._update_population(self.best_individuals, 'evolution_results') + self._update_population(self.best_individuals, EVOLUTION_RESULTS_LABEL) return [ind.graph for ind in self.best_individuals] diff --git a/golem/core/optimisers/opt_history_objects/opt_history.py b/golem/core/optimisers/opt_history_objects/opt_history.py index 4aa554033..6cc0b4f8c 100644 --- a/golem/core/optimisers/opt_history_objects/opt_history.py +++ b/golem/core/optimisers/opt_history_objects/opt_history.py @@ -18,6 +18,12 @@ if TYPE_CHECKING: from golem.core.optimisers.opt_history_objects.individual import Individual +INITIAL_ASSUMPTIONS_LABEL = 'initial_assumptions' +EXTENDED_INITIAL_ASSUMPTIONS_LABEL = 'extended_initial_assumptions' +EVOLUTION_RESULTS_LABEL = 'evolution_results' +TUNING_START_LABEL = 'tuning_start' +TUNING_RESULT_LABEL = 'tuning_result' + class OptHistory: """ @@ -212,7 +218,7 @@ def initial_assumptions(self) -> Optional[Generation]: if not self.generations: return None for gen in self.generations: - if gen.label == 'initial_assumptions': + if gen.label == INITIAL_ASSUMPTIONS_LABEL: return gen @property @@ -224,7 +230,7 @@ def evolution_results(self) -> Optional[Generation]: if not self.generations: return None for gen in reversed(self.generations): - if gen.label == 'evolution_results': + if gen.label == EVOLUTION_RESULTS_LABEL: return gen @property @@ -232,7 +238,7 @@ def tuning_start(self) -> Optional[Generation]: if not self.generations: return None for gen in reversed(self.generations): - if gen.label == 'tuning_start': + if gen.label == TUNING_START_LABEL: return gen @property @@ -240,7 +246,7 @@ def tuning_result(self) -> Optional[Generation]: if not self.generations: return None for gen in reversed(self.generations): - if gen.label == 'tuning_result': + if gen.label == TUNING_RESULT_LABEL: return gen @property diff --git a/golem/core/optimisers/populational_optimizer.py b/golem/core/optimisers/populational_optimizer.py index c65c86ba5..6ef0f2295 100644 --- a/golem/core/optimisers/populational_optimizer.py +++ b/golem/core/optimisers/populational_optimizer.py @@ -1,17 +1,18 @@ from abc import abstractmethod from random import choice -from typing import Any, Optional, Sequence, Dict +from typing import Any, Dict, Optional, Sequence from golem.core.constants import MIN_POP_SIZE from golem.core.dag.graph import Graph from golem.core.optimisers.archive import GenerationKeeper from golem.core.optimisers.genetic.evaluation import MultiprocessingDispatcher, SequentialDispatcher -from golem.core.optimisers.genetic.operators.operator import PopulationT, EvaluationOperator +from golem.core.optimisers.genetic.operators.operator import EvaluationOperator, PopulationT from golem.core.optimisers.objective import GraphFunction, ObjectiveFunction from golem.core.optimisers.objective.objective import Objective from golem.core.optimisers.opt_history_objects.individual import Individual +from golem.core.optimisers.opt_history_objects.opt_history import EVOLUTION_RESULTS_LABEL from golem.core.optimisers.optimization_parameters import GraphRequirements -from golem.core.optimisers.optimizer import GraphGenerationParams, GraphOptimizer, AlgorithmParameters +from golem.core.optimisers.optimizer import AlgorithmParameters, GraphGenerationParams, GraphOptimizer from golem.core.optimisers.timer import OptimisationTimer from golem.core.utilities.grouped_condition import GroupedCondition @@ -105,7 +106,7 @@ def optimise(self, objective: ObjectiveFunction) -> Sequence[Graph]: # Adding of new population to history self._update_population(new_population) pbar.close() - self._update_population(self.best_individuals, 'evolution_results') + self._update_population(self.best_individuals, EVOLUTION_RESULTS_LABEL) return [ind.graph for ind in self.best_individuals] @property diff --git a/golem/core/optimisers/random/random_mutation_optimizer.py b/golem/core/optimisers/random/random_mutation_optimizer.py index 6110dfe51..49e76e4b7 100644 --- a/golem/core/optimisers/random/random_mutation_optimizer.py +++ b/golem/core/optimisers/random/random_mutation_optimizer.py @@ -1,4 +1,4 @@ -from typing import Union, Optional, Sequence +from typing import Optional, Sequence, Union from golem.core.dag.graph import Graph from golem.core.optimisers.genetic.gp_params import GPAlgorithmParameters @@ -6,6 +6,8 @@ from golem.core.optimisers.genetic.operators.operator import EvaluationOperator, PopulationT from golem.core.optimisers.objective import Objective from golem.core.optimisers.opt_history_objects.individual import Individual +from golem.core.optimisers.opt_history_objects.opt_history import EXTENDED_INITIAL_ASSUMPTIONS_LABEL, \ + INITIAL_ASSUMPTIONS_LABEL from golem.core.optimisers.optimization_parameters import GraphRequirements from golem.core.optimisers.optimizer import GraphGenerationParams from golem.core.optimisers.populational_optimizer import PopulationalOptimizer @@ -38,13 +40,13 @@ def _evolve_population(self, evaluator: EvaluationOperator) -> PopulationT: return new_population def _initial_population(self, evaluator: EvaluationOperator): - self._update_population(evaluator(self.initial_individuals), 'initial_assumptions') + self._update_population(evaluator(self.initial_individuals), INITIAL_ASSUMPTIONS_LABEL) pop_size = self.graph_optimizer_params.pop_size if len(self.initial_individuals) < pop_size: self.initial_individuals = self._extend_population(self.initial_individuals, pop_size) # Adding of extended population to history - self._update_population(evaluator(self.initial_individuals), 'extended_initial_assumptions') + self._update_population(evaluator(self.initial_individuals), EXTENDED_INITIAL_ASSUMPTIONS_LABEL) class RandomMutationOptimizer(RandomSearchOptimizer): diff --git a/golem/core/optimisers/random/random_search.py b/golem/core/optimisers/random/random_search.py index da86172ae..ebe40f560 100644 --- a/golem/core/optimisers/random/random_search.py +++ b/golem/core/optimisers/random/random_search.py @@ -9,8 +9,9 @@ from golem.core.optimisers.graph import OptGraph from golem.core.optimisers.objective import Objective, ObjectiveFunction from golem.core.optimisers.opt_history_objects.individual import Individual +from golem.core.optimisers.opt_history_objects.opt_history import EVOLUTION_RESULTS_LABEL, INITIAL_ASSUMPTIONS_LABEL from golem.core.optimisers.optimization_parameters import GraphRequirements -from golem.core.optimisers.optimizer import GraphOptimizer, GraphGenerationParams +from golem.core.optimisers.optimizer import GraphGenerationParams, GraphOptimizer from golem.core.optimisers.timer import OptimisationTimer from golem.core.utilities.grouped_condition import GroupedCondition @@ -34,7 +35,7 @@ def __init__(self, 'Optimisation stopped: Time limit is reached' ).add_condition( lambda: requirements.num_of_generations is not None and - self.current_iteration_num >= requirements.num_of_generations, + self.current_iteration_num >= requirements.num_of_generations, 'Optimisation stopped: Max number of iterations reached') def optimise(self, objective: ObjectiveFunction) -> Sequence[OptGraph]: @@ -46,14 +47,14 @@ def optimise(self, objective: ObjectiveFunction) -> Sequence[OptGraph]: with self.timer, self._progressbar as pbar: self.best_individual = self._eval_initial_individual(evaluator) - self._update_best_individual(self.best_individual, 'initial_assumptions') + self._update_best_individual(self.best_individual, INITIAL_ASSUMPTIONS_LABEL) while not self.stop_optimization(): new_individual = self._generate_new_individual() evaluator([new_individual]) self.current_iteration_num += 1 self._update_best_individual(new_individual) pbar.update() - self._update_best_individual(self.best_individual, 'evolution_results') + self._update_best_individual(self.best_individual, EVOLUTION_RESULTS_LABEL) pbar.close() return [self.best_individual.graph] diff --git a/golem/core/tuning/tuner_interface.py b/golem/core/tuning/tuner_interface.py index 445882293..604c4e34c 100644 --- a/golem/core/tuning/tuner_interface.py +++ b/golem/core/tuning/tuner_interface.py @@ -14,7 +14,7 @@ from golem.core.optimisers.graph import OptGraph from golem.core.optimisers.objective import ObjectiveEvaluate, ObjectiveFunction from golem.core.optimisers.opt_history_objects.individual import Individual -from golem.core.optimisers.opt_history_objects.opt_history import OptHistory +from golem.core.optimisers.opt_history_objects.opt_history import OptHistory, TUNING_RESULT_LABEL, TUNING_START_LABEL from golem.core.optimisers.opt_history_objects.parent_operator import ParentOperator from golem.core.tuning.search_space import SearchSpace, convert_parameters from golem.core.utilities.data_structures import ensure_wrapped_in_sequence @@ -96,7 +96,7 @@ def init_check(self, graph: OptGraph) -> None: graph = deepcopy(graph) fitness = self.objective_evaluate(graph) self.init_individual = self._create_individual(graph, fitness) - self._add_to_history([self.init_individual], label='tuning_start') + self._add_to_history([self.init_individual], label=TUNING_START_LABEL) init_metric = self._fitness_to_metric_value(fitness) self.log.message(f'Initial graph: {graph_structure(graph)} \n' @@ -154,7 +154,7 @@ def _single_obj_final_check(self, tuned_graph: OptGraph): self.log.message('Final metric is None') self.obtained_individual = final_individual - self._add_to_history([self.obtained_individual], label='tuning_result') + self._add_to_history([self.obtained_individual], label=TUNING_RESULT_LABEL) return self.obtained_individual.graph @@ -179,7 +179,7 @@ def _multi_obj_final_check(self, tuned_graphs: Sequence[OptGraph]) -> Sequence[O self.obtained_individual = [self.init_individual] final_graphs = [self.init_individual.graph] - self._add_to_history(self.obtained_individual, label='tuning_result') + self._add_to_history(self.obtained_individual, label=TUNING_RESULT_LABEL) return final_graphs @@ -284,7 +284,7 @@ def _add_to_history(self, individuals: Sequence[Individual], label: Optional[str if label is None: label = f'tuning_iteration_{self.evaluations_count}' - if label not in ('tuning_start', 'tuning_result'): + if label not in (TUNING_START_LABEL, TUNING_RESULT_LABEL): individuals = list(individuals) individuals.append(self.init_individual) # add initial individual to maintain consistency of inheritance history.add_to_history(individuals=individuals, From c286828ac3850fc455de05f20ad88e93e85a03d7 Mon Sep 17 00:00:00 2001 From: morrisnein Date: Mon, 30 Oct 2023 13:07:44 +0300 Subject: [PATCH 15/17] wrap labels into str enumeration --- golem/core/optimisers/genetic/gp_optimizer.py | 7 +++---- .../optimisers/meta/surrogate_optimizer.py | 4 ++-- .../opt_history_objects/opt_history.py | 21 +++++++++++-------- .../core/optimisers/populational_optimizer.py | 4 ++-- .../random/random_mutation_optimizer.py | 7 +++---- golem/core/optimisers/random/random_search.py | 6 +++--- golem/core/tuning/tuner_interface.py | 10 ++++----- 7 files changed, 30 insertions(+), 29 deletions(-) diff --git a/golem/core/optimisers/genetic/gp_optimizer.py b/golem/core/optimisers/genetic/gp_optimizer.py index d15ff55f8..1a876ebfc 100644 --- a/golem/core/optimisers/genetic/gp_optimizer.py +++ b/golem/core/optimisers/genetic/gp_optimizer.py @@ -18,8 +18,7 @@ from golem.core.optimisers.genetic.parameters.population_size import PopulationSize, init_adaptive_pop_size from golem.core.optimisers.objective.objective import Objective from golem.core.optimisers.opt_history_objects.individual import Individual -from golem.core.optimisers.opt_history_objects.opt_history import EXTENDED_INITIAL_ASSUMPTIONS_LABEL, \ - INITIAL_ASSUMPTIONS_LABEL +from golem.core.optimisers.opt_history_objects.opt_history import OptHistoryLabels from golem.core.optimisers.optimization_parameters import GraphRequirements from golem.core.optimisers.optimizer import GraphGenerationParams from golem.core.optimisers.populational_optimizer import PopulationalOptimizer @@ -66,13 +65,13 @@ def __init__(self, def _initial_population(self, evaluator: EvaluationOperator): """ Initializes the initial population """ # Adding of initial assumptions to history as zero generation - self._update_population(evaluator(self.initial_individuals), INITIAL_ASSUMPTIONS_LABEL) + self._update_population(evaluator(self.initial_individuals), OptHistoryLabels.initial_assumptions) pop_size = self.graph_optimizer_params.pop_size if len(self.initial_individuals) < pop_size: self.initial_individuals = self._extend_population(self.initial_individuals, pop_size) # Adding of extended population to history - self._update_population(evaluator(self.initial_individuals), EXTENDED_INITIAL_ASSUMPTIONS_LABEL) + self._update_population(evaluator(self.initial_individuals), OptHistoryLabels.extended_initial_assumptions) def _extend_population(self, pop: PopulationT, target_pop_size: int) -> PopulationT: verifier = self.graph_generation_params.verifier diff --git a/golem/core/optimisers/meta/surrogate_optimizer.py b/golem/core/optimisers/meta/surrogate_optimizer.py index 037c2f663..f6b5ca70c 100644 --- a/golem/core/optimisers/meta/surrogate_optimizer.py +++ b/golem/core/optimisers/meta/surrogate_optimizer.py @@ -6,7 +6,7 @@ from golem.core.optimisers.meta.surrogate_evaluator import SurrogateDispatcher from golem.core.optimisers.meta.surrogate_model import RandomValuesSurrogateModel from golem.core.optimisers.objective import Objective, ObjectiveFunction -from golem.core.optimisers.opt_history_objects.opt_history import EVOLUTION_RESULTS_LABEL +from golem.core.optimisers.opt_history_objects.opt_history import OptHistoryLabels from golem.core.optimisers.optimization_parameters import GraphRequirements from golem.core.optimisers.optimizer import GraphGenerationParams from golem.core.optimisers.populational_optimizer import EvaluationAttemptsError, _try_unfit_graph @@ -56,5 +56,5 @@ def optimise(self, objective: ObjectiveFunction) -> Sequence[OptGraph]: break # Adding of new population to history self._update_population(new_population) - self._update_population(self.best_individuals, EVOLUTION_RESULTS_LABEL) + self._update_population(self.best_individuals, OptHistoryLabels.evolution_results) return [ind.graph for ind in self.best_individuals] diff --git a/golem/core/optimisers/opt_history_objects/opt_history.py b/golem/core/optimisers/opt_history_objects/opt_history.py index 6cc0b4f8c..d07a582a6 100644 --- a/golem/core/optimisers/opt_history_objects/opt_history.py +++ b/golem/core/optimisers/opt_history_objects/opt_history.py @@ -5,6 +5,7 @@ import itertools import os import shutil +from enum import Enum from pathlib import Path from typing import Any, Dict, List, Optional, Sequence, TYPE_CHECKING, Union @@ -18,11 +19,13 @@ if TYPE_CHECKING: from golem.core.optimisers.opt_history_objects.individual import Individual -INITIAL_ASSUMPTIONS_LABEL = 'initial_assumptions' -EXTENDED_INITIAL_ASSUMPTIONS_LABEL = 'extended_initial_assumptions' -EVOLUTION_RESULTS_LABEL = 'evolution_results' -TUNING_START_LABEL = 'tuning_start' -TUNING_RESULT_LABEL = 'tuning_result' + +class OptHistoryLabels(str, Enum): + initial_assumptions = 'initial_assumptions' + extended_initial_assumptions = 'extended_initial_assumptions' + evolution_results = 'evolution_results' + tuning_start = 'tuning_start' + tuning_results = 'tuning_results' class OptHistory: @@ -218,7 +221,7 @@ def initial_assumptions(self) -> Optional[Generation]: if not self.generations: return None for gen in self.generations: - if gen.label == INITIAL_ASSUMPTIONS_LABEL: + if gen.label == OptHistoryLabels.initial_assumptions: return gen @property @@ -230,7 +233,7 @@ def evolution_results(self) -> Optional[Generation]: if not self.generations: return None for gen in reversed(self.generations): - if gen.label == EVOLUTION_RESULTS_LABEL: + if gen.label == OptHistoryLabels.evolution_results: return gen @property @@ -238,7 +241,7 @@ def tuning_start(self) -> Optional[Generation]: if not self.generations: return None for gen in reversed(self.generations): - if gen.label == TUNING_START_LABEL: + if gen.label == OptHistoryLabels.tuning_start: return gen @property @@ -246,7 +249,7 @@ def tuning_result(self) -> Optional[Generation]: if not self.generations: return None for gen in reversed(self.generations): - if gen.label == TUNING_RESULT_LABEL: + if gen.label == OptHistoryLabels.tuning_results: return gen @property diff --git a/golem/core/optimisers/populational_optimizer.py b/golem/core/optimisers/populational_optimizer.py index 6ef0f2295..33870682e 100644 --- a/golem/core/optimisers/populational_optimizer.py +++ b/golem/core/optimisers/populational_optimizer.py @@ -10,7 +10,7 @@ from golem.core.optimisers.objective import GraphFunction, ObjectiveFunction from golem.core.optimisers.objective.objective import Objective from golem.core.optimisers.opt_history_objects.individual import Individual -from golem.core.optimisers.opt_history_objects.opt_history import EVOLUTION_RESULTS_LABEL +from golem.core.optimisers.opt_history_objects.opt_history import OptHistoryLabels from golem.core.optimisers.optimization_parameters import GraphRequirements from golem.core.optimisers.optimizer import AlgorithmParameters, GraphGenerationParams, GraphOptimizer from golem.core.optimisers.timer import OptimisationTimer @@ -106,7 +106,7 @@ def optimise(self, objective: ObjectiveFunction) -> Sequence[Graph]: # Adding of new population to history self._update_population(new_population) pbar.close() - self._update_population(self.best_individuals, EVOLUTION_RESULTS_LABEL) + self._update_population(self.best_individuals, OptHistoryLabels.evolution_results) return [ind.graph for ind in self.best_individuals] @property diff --git a/golem/core/optimisers/random/random_mutation_optimizer.py b/golem/core/optimisers/random/random_mutation_optimizer.py index 49e76e4b7..bdf2f80d9 100644 --- a/golem/core/optimisers/random/random_mutation_optimizer.py +++ b/golem/core/optimisers/random/random_mutation_optimizer.py @@ -6,8 +6,7 @@ from golem.core.optimisers.genetic.operators.operator import EvaluationOperator, PopulationT from golem.core.optimisers.objective import Objective from golem.core.optimisers.opt_history_objects.individual import Individual -from golem.core.optimisers.opt_history_objects.opt_history import EXTENDED_INITIAL_ASSUMPTIONS_LABEL, \ - INITIAL_ASSUMPTIONS_LABEL +from golem.core.optimisers.opt_history_objects.opt_history import OptHistoryLabels from golem.core.optimisers.optimization_parameters import GraphRequirements from golem.core.optimisers.optimizer import GraphGenerationParams from golem.core.optimisers.populational_optimizer import PopulationalOptimizer @@ -40,13 +39,13 @@ def _evolve_population(self, evaluator: EvaluationOperator) -> PopulationT: return new_population def _initial_population(self, evaluator: EvaluationOperator): - self._update_population(evaluator(self.initial_individuals), INITIAL_ASSUMPTIONS_LABEL) + self._update_population(evaluator(self.initial_individuals), OptHistoryLabels.initial_assumptions) pop_size = self.graph_optimizer_params.pop_size if len(self.initial_individuals) < pop_size: self.initial_individuals = self._extend_population(self.initial_individuals, pop_size) # Adding of extended population to history - self._update_population(evaluator(self.initial_individuals), EXTENDED_INITIAL_ASSUMPTIONS_LABEL) + self._update_population(evaluator(self.initial_individuals), OptHistoryLabels.extended_initial_assumptions) class RandomMutationOptimizer(RandomSearchOptimizer): diff --git a/golem/core/optimisers/random/random_search.py b/golem/core/optimisers/random/random_search.py index ebe40f560..0927796fb 100644 --- a/golem/core/optimisers/random/random_search.py +++ b/golem/core/optimisers/random/random_search.py @@ -9,7 +9,7 @@ from golem.core.optimisers.graph import OptGraph from golem.core.optimisers.objective import Objective, ObjectiveFunction from golem.core.optimisers.opt_history_objects.individual import Individual -from golem.core.optimisers.opt_history_objects.opt_history import EVOLUTION_RESULTS_LABEL, INITIAL_ASSUMPTIONS_LABEL +from golem.core.optimisers.opt_history_objects.opt_history import OptHistoryLabels.evolution_results, OptHistoryLabels.initial_assumptions from golem.core.optimisers.optimization_parameters import GraphRequirements from golem.core.optimisers.optimizer import GraphGenerationParams, GraphOptimizer from golem.core.optimisers.timer import OptimisationTimer @@ -47,14 +47,14 @@ def optimise(self, objective: ObjectiveFunction) -> Sequence[OptGraph]: with self.timer, self._progressbar as pbar: self.best_individual = self._eval_initial_individual(evaluator) - self._update_best_individual(self.best_individual, INITIAL_ASSUMPTIONS_LABEL) + self._update_best_individual(self.best_individual, OptHistoryLabels.initial_assumptions) while not self.stop_optimization(): new_individual = self._generate_new_individual() evaluator([new_individual]) self.current_iteration_num += 1 self._update_best_individual(new_individual) pbar.update() - self._update_best_individual(self.best_individual, EVOLUTION_RESULTS_LABEL) + self._update_best_individual(self.best_individual, OptHistoryLabels.evolution_results) pbar.close() return [self.best_individual.graph] diff --git a/golem/core/tuning/tuner_interface.py b/golem/core/tuning/tuner_interface.py index 604c4e34c..c92d89f17 100644 --- a/golem/core/tuning/tuner_interface.py +++ b/golem/core/tuning/tuner_interface.py @@ -14,7 +14,7 @@ from golem.core.optimisers.graph import OptGraph from golem.core.optimisers.objective import ObjectiveEvaluate, ObjectiveFunction from golem.core.optimisers.opt_history_objects.individual import Individual -from golem.core.optimisers.opt_history_objects.opt_history import OptHistory, TUNING_RESULT_LABEL, TUNING_START_LABEL +from golem.core.optimisers.opt_history_objects.opt_history import OptHistory, OptHistoryLabels from golem.core.optimisers.opt_history_objects.parent_operator import ParentOperator from golem.core.tuning.search_space import SearchSpace, convert_parameters from golem.core.utilities.data_structures import ensure_wrapped_in_sequence @@ -96,7 +96,7 @@ def init_check(self, graph: OptGraph) -> None: graph = deepcopy(graph) fitness = self.objective_evaluate(graph) self.init_individual = self._create_individual(graph, fitness) - self._add_to_history([self.init_individual], label=TUNING_START_LABEL) + self._add_to_history([self.init_individual], label=OptHistoryLabels.tuning_start) init_metric = self._fitness_to_metric_value(fitness) self.log.message(f'Initial graph: {graph_structure(graph)} \n' @@ -154,7 +154,7 @@ def _single_obj_final_check(self, tuned_graph: OptGraph): self.log.message('Final metric is None') self.obtained_individual = final_individual - self._add_to_history([self.obtained_individual], label=TUNING_RESULT_LABEL) + self._add_to_history([self.obtained_individual], label=OptHistoryLabels.tuning_results) return self.obtained_individual.graph @@ -179,7 +179,7 @@ def _multi_obj_final_check(self, tuned_graphs: Sequence[OptGraph]) -> Sequence[O self.obtained_individual = [self.init_individual] final_graphs = [self.init_individual.graph] - self._add_to_history(self.obtained_individual, label=TUNING_RESULT_LABEL) + self._add_to_history(self.obtained_individual, label=OptHistoryLabels.tuning_results) return final_graphs @@ -284,7 +284,7 @@ def _add_to_history(self, individuals: Sequence[Individual], label: Optional[str if label is None: label = f'tuning_iteration_{self.evaluations_count}' - if label not in (TUNING_START_LABEL, TUNING_RESULT_LABEL): + if label not in (OptHistoryLabels.tuning_start, OptHistoryLabels.tuning_results): individuals = list(individuals) individuals.append(self.init_individual) # add initial individual to maintain consistency of inheritance history.add_to_history(individuals=individuals, From ff5b6e632aab0aed0ea2bb94c2ea6bebe63197b8 Mon Sep 17 00:00:00 2001 From: morrisnein Date: Sat, 4 Nov 2023 14:20:24 +0300 Subject: [PATCH 16/17] fix --- golem/core/tuning/tuner_interface.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/golem/core/tuning/tuner_interface.py b/golem/core/tuning/tuner_interface.py index c92d89f17..3e6174ce9 100644 --- a/golem/core/tuning/tuner_interface.py +++ b/golem/core/tuning/tuner_interface.py @@ -257,7 +257,7 @@ def set_arg_node(graph: OptGraph, node_id: int, node_params: dict) -> OptGraph: def _stop_tuning_with_message(self, message: str): self.log.message(message) - self.obtained_fitness = self.init_individual.fitness + self.obtained_individual = self.init_individual def _create_individual(self, graph: OptGraph, fitness: Fitness) -> Individual: history = self.history @@ -277,9 +277,8 @@ def _create_individual(self, graph: OptGraph, fitness: Fitness) -> Individual: def _add_to_history(self, individuals: Sequence[Individual], label: Optional[str] = None): history = self.history - tuner_name = self.__class__.__name__ - if not history: + if history is None: return if label is None: @@ -287,6 +286,7 @@ def _add_to_history(self, individuals: Sequence[Individual], label: Optional[str if label not in (OptHistoryLabels.tuning_start, OptHistoryLabels.tuning_results): individuals = list(individuals) individuals.append(self.init_individual) # add initial individual to maintain consistency of inheritance + tuner_name = self.__class__.__name__ history.add_to_history(individuals=individuals, generation_label=label, generation_metadata=dict(tuner=tuner_name)) From 02e3ecd879a9b3f00c5eecf4de0f913d0ce898fb Mon Sep 17 00:00:00 2001 From: morrisnein Date: Sat, 4 Nov 2023 15:07:17 +0300 Subject: [PATCH 17/17] fix import --- golem/core/optimisers/random/random_search.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/golem/core/optimisers/random/random_search.py b/golem/core/optimisers/random/random_search.py index 0927796fb..924deae53 100644 --- a/golem/core/optimisers/random/random_search.py +++ b/golem/core/optimisers/random/random_search.py @@ -9,7 +9,7 @@ from golem.core.optimisers.graph import OptGraph from golem.core.optimisers.objective import Objective, ObjectiveFunction from golem.core.optimisers.opt_history_objects.individual import Individual -from golem.core.optimisers.opt_history_objects.opt_history import OptHistoryLabels.evolution_results, OptHistoryLabels.initial_assumptions +from golem.core.optimisers.opt_history_objects.opt_history import OptHistoryLabels from golem.core.optimisers.optimization_parameters import GraphRequirements from golem.core.optimisers.optimizer import GraphGenerationParams, GraphOptimizer from golem.core.optimisers.timer import OptimisationTimer