Skip to content

Commit 7068f95

Browse files
Merge pull request #17 from esa/debug-setup
Added timeout feature for optimize calls
2 parents 590ba70 + 34f1255 commit 7068f95

File tree

8 files changed

+172
-12
lines changed

8 files changed

+172
-12
lines changed

CMakeLists.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,10 @@ message(STATUS "System name: ${CMAKE_SYSTEM_NAME}")
1212
# compiler setup
1313

1414
enable_language(Fortran)
15+
# Force debug flags for Fortran, including preprocessed .F files
16+
# set(CMAKE_Fortran_FLAGS_DEBUG "-g -O0" CACHE STRING "" FORCE)
17+
# set(CMAKE_Fortran_FLAGS "-g -O0" CACHE STRING "" FORCE)
18+
1519
set(CMAKE_CXX_STANDARD 11)
1620
set(CMAKE_CXX_STANDARD_REQUIRED ON)
1721
set(CMAKE_CXX_EXTENSIONS OFF)

pyoptgra/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,3 +23,4 @@
2323
triangular_wave_fourier_grad,
2424
)
2525
from .optgra import optgra # noqa
26+
from .timeout import get_optimize_with_timeout_function # noqa

pyoptgra/core/ogexec.F

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,6 @@ SUBROUTINE OGEXEC (VALVAR, VALCON, FINOPT, FINITE, CALVAL, CALDER)
131131
PYGFLA = 3 ! pygmo flag in COMMON: no covergence
132132
CALL OGEVAL (VARVAL, CONVAL, VARDER, CONDER(1:NUMCON+1,:),
133133
& CALVAL, CALDER)
134-
135134
GOTO 9999
136135
ELSEIF (NUMITE .GE. MAXITE .OR.
137136
& (NUMITE-ITECOR .GE. OPTITE-1 .AND. ITECOR .NE. 0)) THEN
@@ -150,7 +149,6 @@ SUBROUTINE OGEXEC (VALVAR, VALCON, FINOPT, FINITE, CALVAL, CALDER)
150149
PYGFLA = 2 ! pygmo flag in COMMON: constraints matched
151150
CALL OGEVAL (VARVAL, CONVAL, VARDER, CONDER(1:NUMCON+1,:),
152151
& CALVAL, CALDER)
153-
154152
GOTO 9999
155153
ENDIF
156154
C ----------------------------------------------------------------------
@@ -293,7 +291,6 @@ SUBROUTINE OGEXEC (VALVAR, VALCON, FINOPT, FINITE, CALVAL, CALDER)
293291
PYGFLA = 4 ! pygmo flag in COMMON: infeasible
294292
CALL OGEVAL (VARVAL, CONVAL, VARDER, CONDER(1:NUMCON+1,:),
295293
& CALVAL, CALDER)
296-
297294
GOTO 9999
298295
ENDIF
299296
C ----------------------------------------------------------------------
@@ -326,7 +323,6 @@ SUBROUTINE OGEXEC (VALVAR, VALCON, FINOPT, FINITE, CALVAL, CALDER)
326323
PYGFLA = 2 ! pygmo flag in COMMON: matched
327324
CALL OGEVAL (VARVAL, CONVAL, VARDER, CONDER(1:NUMCON+1,:),
328325
& CALVAL, CALDER)
329-
330326
GOTO 9999
331327
ENDIF
332328
C ======================================================================
@@ -374,7 +370,6 @@ SUBROUTINE OGEXEC (VALVAR, VALCON, FINOPT, FINITE, CALVAL, CALDER)
374370
PYGFLA = 1 ! covergence
375371
CALL OGEVAL (VARVAL, CONVAL, VARDER, CONDER(1:NUMCON+1,:),
376372
& CALVAL, CALDER)
377-
378373
C WRITE (STR,*) "DIF=",NORM2(VARVAL-VARREF)
379374
C CALL OGWRIT (1,STR)
380375
C ======================================================================

pyoptgra/core/wrapper.hpp

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -203,18 +203,23 @@ namespace optgra
203203

204204
// Ensure that at most one optgra_raii object is active at the same time
205205
optgra_mutex.lock();
206-
206+
207+
// Set number of variables and constraints. Will allocate arrays accordingly
207208
oginit_(&num_variables, &num_constraints);
209+
// Set constraint types: 1: GTE, -1: LTE, 0: EQU, -2=DERIVED DATA
208210
ogctyp_(constraint_types.data());
211+
// Set derivatives computation mode. 1: user-defined, 2: double diff., 3: single diff.
209212
ogderi_(&derivatives_computation, autodiff_deltas.data());
213+
// Set maximum distance per iteration and eps for 2nd order derivatives
210214
ogdist_(&max_distance_per_iteration, &perturbation_for_snd_order_derivatives);
211-
215+
// Set variable types. 0: free variable, 1: parameter for sensitivity
212216
ogvtyp_(variable_types.data());
213217

214218
// Haven't figured out what the others do, but maxiter is an upper bound anyway
215219
int otheriters = max_iterations; // TODO: figure out what it does.
216220
ogiter_(&max_iterations, &max_correction_iterations, &otheriters, &otheriters, &otheriters);
217221

222+
// Set optimization method flag
218223
ogomet_(&optimization_method);
219224

220225
// original OPTGRA screen output configuration
@@ -278,6 +283,7 @@ namespace optgra
278283
static_callable_store::set_x_dim(num_variables);
279284
static_callable_store::set_c_dim(num_constraints + 1);
280285

286+
// Disable sensitivity mode
281287
int sensitivity_mode = 0;
282288
ogsopt_(&sensitivity_mode);
283289

pyoptgra/optgra.py

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
khan_function_tanh,
3333
khan_function_triangle,
3434
)
35+
from .timeout import get_optimize_with_timeout_function
3536

3637

3738
def _get_constraint_violation(
@@ -249,6 +250,7 @@ def __init__(
249250
khan_bounds: Union[str, bool] = False,
250251
optimization_method: int = 2,
251252
log_level: int = 0,
253+
timeout_seconds: Optional[float] = None,
252254
) -> None:
253255
r"""
254256
Initialize a wrapper instance for the OPTGRA algorithm.
@@ -315,6 +317,9 @@ def __init__(
315317
log_level: Control the original screen output of OPTGRA. 0 has no output,
316318
4 and higher have maximum output`. Set this to 0 if you want to use the pygmo
317319
logging system based on `set_verbosity()`.
320+
timeout_seconds: Activate timeout of the optimization process. If given, the
321+
optimization will be launched in a separate process and killed if timeout is
322+
exceeded. By default None
318323
319324
Raises:
320325
@@ -343,6 +348,7 @@ def __init__(
343348

344349
self.log_level = log_level
345350
self.verbosity = 0 # by default no pygmo-style output
351+
self.timeout_seconds = timeout_seconds
346352
self._sens_state = None
347353
self._sens_constraint_types: Union[List[int], None] = None
348354

@@ -559,7 +565,16 @@ def extract_trailing_integer(s):
559565

560566
# get initial x
561567
x0 = population.get_x()[idx]
562-
result = optimize(
568+
569+
# use timeout function (using multiprocessing module) if required
570+
if self.timeout_seconds is not None:
571+
optimize_func = get_optimize_with_timeout_function(
572+
optimize, self.timeout_seconds, x0, fitness_func
573+
)
574+
else:
575+
optimize_func = optimize
576+
577+
result = optimize_func(
563578
initial_x=khanf.eval_inv(x0) if khanf else x0,
564579
constraint_types=constraint_types,
565580
fitness_callback=fitness_func,
@@ -880,6 +895,8 @@ def get_extra_info(self) -> str:
880895
result_str += "Not converged.\n"
881896
elif self.__last_result["finopt"] == 4:
882897
result_str += "Problem appears infeasible.\n"
898+
elif self.__last_result["finopt"] == 5:
899+
result_str += "Timeout reached.\n"
883900
else:
884901
grad_str = ""
885902
result_str = (
@@ -906,6 +923,7 @@ def get_extra_info(self) -> str:
906923
+ "\toptimization_method = {optimization_method},\n"
907924
+ "\tlog_level = {log_level}\n"
908925
+ "\tverbosity = {verbosity}\n"
926+
+ "\ttimeout_seconds = {timeout_seconds}\n"
909927
+ result_str
910928
).format(
911929
max_iterations=self.max_iterations,
@@ -923,4 +941,5 @@ def get_extra_info(self) -> str:
923941
optimization_method=self.optimization_method,
924942
log_level=self.log_level,
925943
verbosity=self.verbosity,
944+
timeout_seconds=self.timeout_seconds,
926945
)

pyoptgra/timeout.py

Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
# Copyright 2008, 2021 European Space Agency
2+
#
3+
# This file is part of pyoptgra, a pygmo affiliated library.
4+
#
5+
# This Source Code Form is available under two different licenses.
6+
# You may choose to license and use it under version 3 of the
7+
# GNU General Public License or under the
8+
# ESA Software Community Licence (ESCL) 2.4 Weak Copyleft.
9+
# We explicitly reserve the right to release future versions of
10+
# Pyoptgra and Optgra under different licenses.
11+
# If copies of GPL3 and ESCL 2.4 were not distributed with this
12+
# file, you can obtain them at https://www.gnu.org/licenses/gpl-3.0.txt
13+
# and https://essr.esa.int/license/european-space-agency-community-license-v2-4-weak-copyleft
14+
15+
import multiprocessing as mp
16+
from typing import Any, Callable, List, Tuple
17+
18+
__all__ = ["get_optimize_with_timeout_function"]
19+
20+
21+
def _run_optimize(
22+
func: Callable[..., Tuple[List[float], List[float], int]],
23+
args: tuple,
24+
kwargs: dict,
25+
return_dict: Any,
26+
) -> None:
27+
"""Worker process that runs the C++ optimizer and stores its result."""
28+
try:
29+
result = func(*args, **kwargs)
30+
return_dict["result"] = result
31+
except Exception as e:
32+
return_dict["error"] = str(e)
33+
34+
35+
def get_optimize_with_timeout_function(
36+
optimize_func: Callable[..., Tuple[List[float], List[float], int]],
37+
timeout_seconds: float,
38+
x_timeout: List[float],
39+
fitness_func: Callable,
40+
) -> Callable[..., Tuple[List[float], List[float], int]]:
41+
"""
42+
Wrap the Pybind11-based `optimize` function with a timeout safeguard.
43+
44+
Parameters
45+
----------
46+
optimize_func : callable
47+
The Pybind11-bound `optimize` function to execute.
48+
Must return a tuple `(x_opt, f_opt, status)`.
49+
timeout_seconds : float
50+
Maximum runtime in seconds before the optimizer process is terminated.
51+
x_timeout : List[float]
52+
Decision vector to return on timeout
53+
fitness_func : callable
54+
Fitness function to return on timeout
55+
56+
Returns
57+
-------
58+
callable
59+
A wrapped version of `optimize_func` with the same signature.
60+
When called:
61+
* Returns `(x_opt, multipliers, status)` if the optimizer completes.
62+
* Returns `([], [], 5)` if the optimizer exceeds the timeout.
63+
64+
Notes
65+
-----
66+
- The wrapped function runs the optimizer in a separate process using
67+
:mod:`multiprocessing` to allow safe termination if the Fortran backend hangs.
68+
- Status code `5` indicates a timeout occurred.
69+
- This approach ensures that the main Python process remains responsive
70+
and that no hanging Fortran thread blocks program exit.
71+
"""
72+
73+
def wrapped_optimize(*args, **kwargs) -> Tuple[List[float], List[float], int]:
74+
manager = mp.Manager()
75+
return_dict = manager.dict()
76+
77+
process = mp.Process(target=_run_optimize, args=(optimize_func, args, kwargs, return_dict))
78+
process.start()
79+
process.join(timeout_seconds)
80+
81+
if process.is_alive():
82+
print(
83+
f"⚠️ Optimization timed out after {timeout_seconds} seconds — terminating process."
84+
)
85+
process.terminate()
86+
process.join()
87+
# Return timeout status code instead of raising
88+
return (x_timeout, fitness_func(x_timeout), 5)
89+
90+
if "error" in return_dict:
91+
print(f"⚠️ Optimizer process failed: {return_dict['error']}")
92+
return (x_timeout, fitness_func(x_timeout), 5)
93+
94+
return return_dict.get("result", (x_timeout, fitness_func(x_timeout), 5))
95+
96+
return wrapped_optimize

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ license = { text = "GPL-3.0 or ESCL-2.4" }
1010
name = "pyoptgra"
1111
readme = "README.rst"
1212
requires-python = ">=3.9"
13-
version = "1.2.2"
13+
version = "1.3.0"
1414

1515
[build-system]
1616
build-backend = "scikit_build_core.build"

tests/python/test.py

Lines changed: 42 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# and https://essr.esa.int/license/european-space-agency-community-license-v2-4-weak-copyleft
1414

1515
import unittest
16-
16+
import time
1717
import numpy as np
1818

1919
import pygmo
@@ -24,6 +24,10 @@
2424
# problem class with numerical gradient, equality and inequality constraints from
2525
# https://esa.github.io/pygmo2/tutorials/coding_udp_constrained.html
2626
class luksan_vlcek:
27+
def __init__(self, sleep_per_call: int = None):
28+
# optional sleep per fitness call to test timeout function
29+
self.sleep_per_call = sleep_per_call
30+
2731
def fitness(self, x):
2832
obj = 0
2933
for i in range(3):
@@ -78,6 +82,8 @@ def fitness(self, x):
7882
ci2 = -(
7983
8 * x[5] * (x[5] ** 2 - x[4]) - 2 * (1 - x[5]) + x[4] ** 2 - x[3] + x[3] ** 2 - x[4]
8084
)
85+
if self.sleep_per_call is not None:
86+
time.sleep(self.sleep_per_call)
8187
return [obj, ce1, ce2, ce3, ce4, ci1, ci2]
8288

8389
def get_bounds(self):
@@ -169,6 +175,7 @@ def runTest(self):
169175
self.get_extra_info_test()
170176
self.verbosity_test()
171177
self.triangle_test()
178+
self.timeout_test()
172179

173180
def constructor_test(self):
174181
# Check that invalid optimization method is rejected
@@ -334,7 +341,7 @@ def gradient_with_constraints_test(self):
334341
# objective function
335342
self.assertLess(pop.champion_f[0], 2.26)
336343
# checking exact value as regression test
337-
self.assertEqual(pop.champion_f[0], 0.82929210248477)
344+
self.assertAlmostEqual(pop.champion_f[0], 0.82929210248477)
338345

339346
# equality constraints
340347
for i in [1, 2, 3, 4]:
@@ -366,7 +373,7 @@ def gradient_with_constraints_test(self):
366373
# objective function
367374
self.assertLess(pop2.champion_f[0], 2.26)
368375
# checking exact value as regression test
369-
self.assertEqual(pop2.champion_f[0], 0.8292921025820391)
376+
self.assertAlmostEqual(pop2.champion_f[0], 0.8292921025820391)
370377

371378
# equality constraints
372379
for i in [1, 2, 3, 4]:
@@ -878,6 +885,38 @@ def triangle_test(self):
878885
tri_grad = pyoptgra.triangular_wave_fourier_grad(0, x)
879886
np.testing.assert_array_equal(tri_grad, np.zeros_like(x, dtype=np.float64))
880887

888+
def timeout_test(self):
889+
"""Testing timeout functionality (2 seconds)"""
890+
# 1. Run Luksan-Vlcek problem with timeout of 0.1 seconds per fitness call
891+
prob = pygmo.problem(luksan_vlcek(0.1))
892+
prob.c_tol = 1e-7
893+
og = pyoptgra.optgra(
894+
optimization_method=1,
895+
max_iterations=100,
896+
max_correction_iterations=100,
897+
max_distance_per_iteration=10,
898+
timeout_seconds=2,
899+
)
900+
og.set_verbosity(1)
901+
algo = pygmo.algorithm(og)
902+
pop = pygmo.population(prob, size=0, seed=1) # empty population
903+
pop.push_back([0.5, 0.5, -0.5, 0.4, 0.3, 0.7]) # add initial guess
904+
905+
# Calling optgra
906+
# 2. Measure execution time of evolve()
907+
start_time = time.time()
908+
pop = algo.evolve(pop)
909+
elapsed = time.time() - start_time
910+
911+
# 3. Check that timeout message is recorded
912+
self.assertIn("Timeout reached", algo.get_extra_info())
913+
914+
# 4. Check that runtime is within expected bounds
915+
# Allow small overhead (±0.5 s)
916+
self.assertTrue(
917+
1.5 <= elapsed <= 3.0, msg=f"Evolve took {elapsed:.2f} s, expected about 2 s"
918+
)
919+
881920

882921
if __name__ == "__main__":
883922
unittest.main()

0 commit comments

Comments
 (0)