|
| 1 | +""" |
| 2 | +OpenEvolve <-> lm-evaluation-harness adapter |
| 3 | +
|
| 4 | +Implements generation only, no loglikelihood. Tasks such as GSM8K / BoolQ / MMLU-Math / |
| 5 | +AQUA-RAT and most code suites should work fine because they grade on the generated |
| 6 | +answer string. |
| 7 | +""" |
| 8 | + |
| 9 | +from __future__ import annotations |
| 10 | +import subprocess, tempfile, json, os, argparse, math, pathlib |
| 11 | +from pathlib import Path |
| 12 | +from typing import List, Dict, Tuple, Any, Iterable |
| 13 | + |
| 14 | +import lm_eval |
| 15 | +from lm_eval.tasks import TaskManager |
| 16 | +from lm_eval.evaluator import evaluate |
| 17 | +from lm_eval.api.model import LM |
| 18 | +from lm_eval.api.registry import register_model |
| 19 | +from datetime import datetime |
| 20 | + |
| 21 | +# cd to the parent parent directory of this file |
| 22 | +os.chdir(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) |
| 23 | + |
| 24 | +PIPELINE_CMD = ["python3", "openevolve-run.py"] |
| 25 | + |
| 26 | + |
| 27 | +@register_model("openevolve") |
| 28 | +class OpenEvolve(LM): |
| 29 | + def __init__( |
| 30 | + self, |
| 31 | + init_file: str = "initial_content_stub.txt", |
| 32 | + evaluator_file: str = "evaluator_stub.py", |
| 33 | + config_file: str = "config.yml", |
| 34 | + iterations: int = 5, |
| 35 | + extra_param: List[str] = [], |
| 36 | + **kwargs, |
| 37 | + ): |
| 38 | + super().__init__() |
| 39 | + self.init_file = init_file |
| 40 | + self.evaluator_file = evaluator_file |
| 41 | + self.iterations = iterations |
| 42 | + self.extra_param = extra_param |
| 43 | + self.config_file = config_file |
| 44 | + |
| 45 | + # folder must match prompt:template_dir in config.yml! |
| 46 | + self.prompt_path = "examples/lm_eval/prompts/system_message.txt" |
| 47 | + self.evaluator_prompt_path = "examples/lm_eval/prompts/evaluator_system_message.txt" |
| 48 | + self.best_path = "examples/lm_eval/openevolve_output/best/best_program.txt" |
| 49 | + self.base_system_message = "You are an expert task solver, with a lot of commonsense, math, language and coding knowledge.\n\nConsider this task:\n```{prompt}´´´" |
| 50 | + |
| 51 | + def generate(self, prompts: List[str], max_gen_toks: int = None, stop=None, **kwargs): |
| 52 | + outs = [] |
| 53 | + for prompt in prompts: |
| 54 | + # Task prompt becomes the system message. User prompt is the evolutionary logic. |
| 55 | + # We create temporary prompt files with the system message |
| 56 | + with Path(self.prompt_path).open("w") as f: |
| 57 | + f.write(self.base_system_message.format(prompt=prompt)) |
| 58 | + |
| 59 | + with Path(self.evaluator_prompt_path).open("w") as f: |
| 60 | + f.write(self.base_system_message.format(prompt=prompt)) |
| 61 | + |
| 62 | + cmd = ( |
| 63 | + PIPELINE_CMD |
| 64 | + + ["--config", self.config_file] |
| 65 | + + ["--iterations", str(self.iterations)] |
| 66 | + + self.extra_param |
| 67 | + + [self.init_file, self.evaluator_file] |
| 68 | + ) |
| 69 | + print(f"Running command: {' '.join(cmd)}") |
| 70 | + try: |
| 71 | + res = subprocess.run(cmd, capture_output=True, text=True, check=True) |
| 72 | + text = res.stdout.strip() |
| 73 | + print(f"Process output: {text}") |
| 74 | + except subprocess.CalledProcessError as e: |
| 75 | + print(f"Command failed with return code {e.returncode}") |
| 76 | + print(f"stderr: {e.stderr}") |
| 77 | + text = "" |
| 78 | + |
| 79 | + print(f"# Prompt: {prompt}") |
| 80 | + with Path(self.best_path).open("r") as f: |
| 81 | + best = f.read().strip() |
| 82 | + print(f"# Answer: {best}") |
| 83 | + |
| 84 | + # honour stop tokens |
| 85 | + if stop: |
| 86 | + for s in stop: |
| 87 | + idx = best.find(s) |
| 88 | + if idx != -1: |
| 89 | + best = best[:idx] |
| 90 | + break |
| 91 | + outs.append(best) |
| 92 | + return outs |
| 93 | + |
| 94 | + # for tasks that ask for log likelihood, indicate that it is unsupported |
| 95 | + def loglikelihood(self, requests: Iterable[Tuple[str, str]], **kw): |
| 96 | + # return [(-math.inf, False) for _ in requests] |
| 97 | + raise NotImplementedError |
| 98 | + |
| 99 | + def loglikelihood_rolling(self, requests: Iterable[str], **kw): |
| 100 | + # return [(-math.inf, False) for _ in requests] |
| 101 | + raise NotImplementedError |
| 102 | + |
| 103 | + def generate_until(self, requests: Iterable[Any], **kw) -> List[str]: |
| 104 | + ctxs, stops = [], [] |
| 105 | + |
| 106 | + for req in requests: |
| 107 | + # ---------------- old: plain tuple ---------------- |
| 108 | + if isinstance(req, tuple): |
| 109 | + ctx, until = req |
| 110 | + |
| 111 | + # -------------- new: Instance object -------------- |
| 112 | + else: |
| 113 | + ctx = req.args[0] # first positional arg |
| 114 | + until = [] |
| 115 | + # if a second positional arg exists and is list-like, |
| 116 | + # treat it as the stop sequence |
| 117 | + if len(req.args) > 1 and isinstance(req.args[1], (list, tuple)): |
| 118 | + until = list(req.args[1]) |
| 119 | + |
| 120 | + ctxs.append(ctx) |
| 121 | + stops.append(until) |
| 122 | + |
| 123 | + # 2) run your real generator once per context |
| 124 | + gens = self.generate(ctxs, stop=None) |
| 125 | + |
| 126 | + # 3) post-trim at the first stop sequence |
| 127 | + cleaned = [] |
| 128 | + for g, until in zip(gens, stops): |
| 129 | + for s in until: |
| 130 | + idx = g.find(s) |
| 131 | + if idx != -1: |
| 132 | + g = g[:idx] |
| 133 | + break |
| 134 | + cleaned.append(g) |
| 135 | + return cleaned |
| 136 | + |
| 137 | + |
| 138 | +if __name__ == "__main__": |
| 139 | + # cli arguments for primary model, secondary model, iterations, config and tasks |
| 140 | + p = argparse.ArgumentParser( |
| 141 | + description="OpenEvolve <-> lm-evaluation-harness adapter.", |
| 142 | + ) |
| 143 | + p.add_argument("--config", default="examples/lm_eval/config.yml", help="config file") |
| 144 | + p.add_argument( |
| 145 | + "--init_file", |
| 146 | + default="examples/lm_eval/initial_content_stub.txt", |
| 147 | + help="initial content file", |
| 148 | + ) |
| 149 | + p.add_argument( |
| 150 | + "--evaluator_file", default="examples/lm_eval/evaluator_stub.py", help="evaluator file" |
| 151 | + ) |
| 152 | + p.add_argument("--iterations", default=5, type=int, help="number of iterations") |
| 153 | + p.add_argument( |
| 154 | + "--limit", |
| 155 | + default=None, |
| 156 | + type=int, |
| 157 | + help="limit the number of examples per task that are executed", |
| 158 | + ) |
| 159 | + # p.add_argument("--tasks", default="boolq,gsm8k,mmlu", help="comma-list of tasks to evaluate") |
| 160 | + p.add_argument("--tasks", default="gsm8k", help="list of tasks to evaluate") |
| 161 | + p.add_argument("--output_path", default="results", help="output path for results") |
| 162 | + args = p.parse_args() |
| 163 | + |
| 164 | + lm_obj = OpenEvolve( |
| 165 | + init_file=args.init_file, |
| 166 | + evaluator_file=args.evaluator_file, |
| 167 | + iterations=args.iterations, |
| 168 | + config_file=args.config, |
| 169 | + ) |
| 170 | + |
| 171 | + task_dict = lm_eval.tasks.get_task_dict(args.tasks.split(",")) |
| 172 | + |
| 173 | + results = evaluate( |
| 174 | + lm=lm_obj, |
| 175 | + task_dict=task_dict, |
| 176 | + limit=args.limit, |
| 177 | + ) |
| 178 | + |
| 179 | + # write out the results |
| 180 | + pathlib.Path( |
| 181 | + args.output_path, |
| 182 | + ).mkdir(exist_ok=True) |
| 183 | + |
| 184 | + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") |
| 185 | + results_path = pathlib.Path( |
| 186 | + os.path.join( |
| 187 | + args.output_path, |
| 188 | + f"{timestamp}_iter{args.iterations}.json", |
| 189 | + ) |
| 190 | + ) |
| 191 | + |
| 192 | + with results_path.open("w") as f: |
| 193 | + json.dump(results, f, indent=2) |
| 194 | + |
| 195 | + # print result summary |
| 196 | + short = {} |
| 197 | + for task, metrics in results["results"].items(): |
| 198 | + # pick the first value that is a real number |
| 199 | + for key, val in metrics.items(): |
| 200 | + if isinstance(val, (int, float)): |
| 201 | + short[task] = (key, val) # store *both* name & value |
| 202 | + break |
| 203 | + |
| 204 | + print(f"Full results written to {results_path}\n") |
| 205 | + print("Headline metrics:") |
| 206 | + for task, (name, value) in short.items(): |
| 207 | + print(f" {task:<15} {name:<12} {value:.3%}") |
| 208 | + |
| 209 | + print("\nNote: Never cite the overall average when some components were skipped!") |
0 commit comments