@@ -78,6 +78,12 @@ def _get_constraint_violation(
7878 return violation_norm , num_violations
7979
8080
81+ def _assert_finite (arr : np .ndarray , name : str ):
82+ mask = ~ np .isfinite (arr ) # True for NaN, Inf, -Inf
83+ if np .any (mask ):
84+ raise ValueError (f"Encountered non-finite values in { name } at indices: { np .where (mask )[0 ]} " )
85+
86+
8187class optgra :
8288 """
8389 This class is a user defined algorithm (UDA) providing a wrapper around OPTGRA, which is written
@@ -126,6 +132,7 @@ def wrapped_fitness(x):
126132
127133 # we are using vectorisation internally -> convert to ndarray
128134 x = np .asarray (x , dtype = np .float64 )
135+ _assert_finite (x , "decision vector" ) # catch nan values
129136
130137 if khanf :
131138 # if Khan function is used, we first need to convert to pagmo parameters
@@ -149,6 +156,7 @@ def wrapped_fitness(x):
149156 # reorder constraint order, optgra expects the merit function last, pagmo has it first
150157 # equivalent to rotating in a dequeue
151158 result = np .concatenate ([result [1 :], result [0 :1 ]])
159+ _assert_finite (result , "fitness" ) # catch nan values
152160
153161 return result .tolist () # return a list
154162
@@ -173,6 +181,7 @@ def wrapped_gradient(x):
173181
174182 # we are using vectorisation internally -> convert to ndarray
175183 x = np .asarray (x , dtype = np .float64 )
184+ _assert_finite (x , "decision vector" ) # catch nan values
176185
177186 if khanf :
178187 # if Khan function is used, we first need to convert to pagmo parameters
@@ -221,6 +230,8 @@ def wrapped_gradient(x):
221230 khan_grad = khanf .eval_grad (x )
222231 result = result @ khan_grad
223232
233+ _assert_finite (result , "gradient" ) # catch nan values
234+
224235 return result .tolist () # return as a list, not ndarray
225236
226237 return wrapped_gradient
@@ -229,8 +240,8 @@ def __init__(
229240 self ,
230241 max_iterations : int = 150 ,
231242 max_correction_iterations : int = 90 ,
232- max_distance_per_iteration : int = 10 ,
233- perturbation_for_snd_order_derivatives : int = 1 ,
243+ max_distance_per_iteration : float = 10 ,
244+ perturbation_for_snd_order_derivatives : float = 1 ,
234245 variable_scaling_factors : List [float ] = [], # x_dim
235246 variable_types : List [int ] = [], # x_dim
236247 constraint_priorities : List [int ] = [], # f_dim
0 commit comments