Skip to content

Commit 4fe40ff

Browse files
Merge branch 'master' into 'fix/get_extra_info'
# Conflicts: # pyoptgra/optgra.py
2 parents b92105c + 21c357a commit 4fe40ff

File tree

2 files changed

+21
-2
lines changed

2 files changed

+21
-2
lines changed

pyoptgra/optgra.py

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,13 @@ def _get_constraint_violation(
7878
return violation_norm, num_violations
7979

8080

81+
def _assert_finite(arr: np.ndarray, name: str):
82+
mask = ~np.isfinite(arr) # True for NaN, Inf, -Inf
83+
if np.any(mask):
84+
raise ValueError(f"Encountered non-finite values in {name} at indices: {np.where(mask)[0]}")
85+
86+
87+
8188
class optgra:
8289
"""
8390
This class is a user defined algorithm (UDA) providing a wrapper around OPTGRA, which is written
@@ -126,6 +133,7 @@ def wrapped_fitness(x):
126133

127134
# we are using vectorisation internally -> convert to ndarray
128135
x = np.asarray(x, dtype=np.float64)
136+
_assert_finite(x, "decision vector") # catch nan values
129137

130138
if khanf:
131139
# if Khan function is used, we first need to convert to pagmo parameters
@@ -149,6 +157,7 @@ def wrapped_fitness(x):
149157
# reorder constraint order, optgra expects the merit function last, pagmo has it first
150158
# equivalent to rotating in a dequeue
151159
result = np.concatenate([result[1:], result[0:1]])
160+
_assert_finite(result, "fitness") # catch nan values
152161

153162
return result.tolist() # return a list
154163

@@ -173,6 +182,7 @@ def wrapped_gradient(x):
173182

174183
# we are using vectorisation internally -> convert to ndarray
175184
x = np.asarray(x, dtype=np.float64)
185+
_assert_finite(x, "decision vector") # catch nan values
176186

177187
if khanf:
178188
# if Khan function is used, we first need to convert to pagmo parameters
@@ -221,6 +231,8 @@ def wrapped_gradient(x):
221231
khan_grad = khanf.eval_grad(x)
222232
result = result @ khan_grad
223233

234+
_assert_finite(result, "gradient") # catch nan values
235+
224236
return result.tolist() # return as a list, not ndarray
225237

226238
return wrapped_gradient
@@ -229,8 +241,8 @@ def __init__(
229241
self,
230242
max_iterations: int = 150,
231243
max_correction_iterations: int = 90,
232-
max_distance_per_iteration: int = 10,
233-
perturbation_for_snd_order_derivatives: int = 1,
244+
max_distance_per_iteration: float = 10,
245+
perturbation_for_snd_order_derivatives: float = 1,
234246
variable_scaling_factors: List[float] = [], # x_dim
235247
variable_types: List[int] = [], # x_dim
236248
constraint_priorities: List[int] = [], # f_dim

tests/python/test.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -272,6 +272,13 @@ def set_seed(self, seed):
272272
algo = pygmo.algorithm(pyoptgra.optgra(constraint_priorities=[1] * 61))
273273
algo.evolve(pop)
274274

275+
# check than nan in the decision vector is caught
276+
x = pop.get_x()[0]
277+
x[3] = np.nan
278+
pop.set_x(0, x)
279+
with self.assertRaises(ValueError):
280+
algo.evolve(pop)
281+
275282
def basic_no_gradient_test(self):
276283
# Basic test that the call works and the result changes. No constraints, not gradients.
277284

0 commit comments

Comments
 (0)