Skip to content

Commit 679e3a2

Browse files
authored
Merge pull request #277 from optimas-org/capture_logs
Allow `FunctionEvaluator` to redirect `stdout` and `stderr` to a log file
2 parents 6096df0 + 732ae85 commit 679e3a2

File tree

3 files changed

+88
-2
lines changed

3 files changed

+88
-2
lines changed

optimas/evaluators/function_evaluator.py

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,15 +20,26 @@ class FunctionEvaluator(Evaluator):
2020
using this option, the current working directory inside the ``function``
2121
will be changed to the corresponding evaluation directory.
2222
By default, ``False``.
23+
redirect_logs_to_file : bool
24+
Whether to redirect the logs (stdout and stderr) of the evaluation
25+
function to a file (log.out and log.err). This can be useful to keep the
26+
logs of the exploration clean, preventing many processes from writing to the
27+
terminal at once. If enabled, `create_evaluation_dirs` will be set to `True`.
2328
2429
"""
2530

2631
def __init__(
27-
self, function: Callable, create_evaluation_dirs: bool = False
32+
self,
33+
function: Callable,
34+
create_evaluation_dirs: bool = False,
35+
redirect_logs_to_file: bool = False,
2836
) -> None:
2937
super().__init__(sim_function=run_function)
3038
self.function = function
3139
self._create_evaluation_dirs = create_evaluation_dirs
40+
self._redirect_logs_to_file = redirect_logs_to_file
41+
if self._redirect_logs_to_file:
42+
self._create_evaluation_dirs = True
3243

3344
def get_sim_specs(
3445
self,
@@ -43,6 +54,7 @@ def get_sim_specs(
4354
)
4455
# Add evaluation function to sim_specs.
4556
sim_specs["user"]["evaluation_func"] = self.function
57+
sim_specs["user"]["redirect_logs_to_file"] = self._redirect_logs_to_file
4658
return sim_specs
4759

4860
def get_libe_specs(self) -> Dict:

optimas/sim_functions.py

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
"""Contains the definition of the simulation functions given to libEnsemble."""
22

3+
from contextlib import redirect_stderr, redirect_stdout
4+
35
import jinja2
46
import numpy as np
57

@@ -122,6 +124,7 @@ def run_function(H, persis_info, sim_specs, libE_info):
122124
else:
123125
user_specs = sim_specs["user"]
124126
evaluation_func = user_specs["evaluation_func"]
127+
redirect_logs_to_file = user_specs["redirect_logs_to_file"]
125128

126129
# Prepare the array that is returned to libE
127130
libE_output = np.zeros(1, dtype=sim_specs["out"])
@@ -130,7 +133,16 @@ def run_function(H, persis_info, sim_specs, libE_info):
130133
libE_output[name].fill(np.nan)
131134

132135
# Run evaluation.
133-
evaluation_func(input_values, libE_output[0])
136+
if redirect_logs_to_file:
137+
with (
138+
open("log.out", "w") as stdout_file,
139+
open("log.err", "w") as stderr_file,
140+
redirect_stdout(stdout_file),
141+
redirect_stderr(stderr_file),
142+
):
143+
evaluation_func(input_values, libE_output[0])
144+
else:
145+
evaluation_func(input_values, libE_output[0])
134146
calc_status = WORKER_DONE
135147

136148
# If required, fail when the objectives are NaN.

tests/test_function_evaluator.py

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import os
2+
import sys
23

34
import numpy as np
45
import matplotlib.pyplot as plt
@@ -25,6 +26,17 @@ def eval_func(input_params, output_params):
2526
plt.savefig("fig.png")
2627

2728

29+
def eval_func_logs(input_params, output_params):
30+
"""Evaluation function used for testing"""
31+
x0 = input_params["x0"]
32+
x1 = input_params["x1"]
33+
result = -(x0 + 10 * np.cos(x0)) * (x1 + 5 * np.cos(x1))
34+
output_params["f"] = result
35+
# write something to stdout and stderr
36+
print("This is a message to stdout.")
37+
print("This is a message to stderr.", file=sys.stderr)
38+
39+
2840
def test_function_evaluator():
2941
"""Test that an exploration runs successfully with a function evaluator."""
3042

@@ -93,5 +105,55 @@ def test_function_evaluator():
93105
diags.get_evaluation_dir_path(trial_index)
94106

95107

108+
def test_function_evaluator_with_logs():
109+
"""Test a function evaluator with redirected stdout and stderr."""
110+
111+
# Define variables and objectives.
112+
var1 = VaryingParameter("x0", -50.0, 5.0)
113+
var2 = VaryingParameter("x1", -5.0, 15.0)
114+
obj = Objective("f", minimize=False)
115+
116+
# Create generator.
117+
gen = RandomSamplingGenerator(
118+
varying_parameters=[var1, var2],
119+
objectives=[obj],
120+
)
121+
122+
# Create function evaluator.
123+
ev = FunctionEvaluator(
124+
function=eval_func_logs,
125+
redirect_logs_to_file=True,
126+
)
127+
128+
# Create exploration.
129+
exploration = Exploration(
130+
generator=gen,
131+
evaluator=ev,
132+
max_evals=10,
133+
sim_workers=2,
134+
exploration_dir_path="./tests_output/test_function_evaluator_logs",
135+
)
136+
137+
# Run exploration.
138+
exploration.run()
139+
140+
# Get diagnostics.
141+
diags = ExplorationDiagnostics(exploration)
142+
143+
# Check that the logs were redirected if specified.
144+
for trial_index in diags.history.trial_index:
145+
trial_dir = diags.get_evaluation_dir_path(trial_index)
146+
assert os.path.exists(os.path.join(trial_dir, "log.out"))
147+
assert os.path.exists(os.path.join(trial_dir, "log.err"))
148+
# Check contents of log files are as expected
149+
with open(os.path.join(trial_dir, "log.out"), "r") as f:
150+
log_out_content = f.read()
151+
assert "This is a message to stdout." in log_out_content
152+
with open(os.path.join(trial_dir, "log.err"), "r") as f:
153+
log_err_content = f.read()
154+
assert "This is a message to stderr." in log_err_content
155+
156+
96157
if __name__ == "__main__":
97158
test_function_evaluator()
159+
test_function_evaluator_with_logs()

0 commit comments

Comments
 (0)