@@ -36,13 +36,15 @@ class Status(enum.Enum):
3636 NO_IMPROVEMENT = enum .auto ()
3737 MAX_ITER = enum .auto ()
3838 DX_TOL = enum .auto ()
39+ G_TOL = enum .auto ()
3940
4041
4142_STATUS_MESSAGE = {
4243 Status .FACTORIZATION_FAILED : 'factorization failed.' ,
4344 Status .NO_IMPROVEMENT : 'insufficient reduction.' ,
4445 Status .MAX_ITER : 'maximum iterations reached.' ,
4546 Status .DX_TOL : 'norm(dx) < tol.' ,
47+ Status .G_TOL : 'norm(gradient) < tol.' ,
4648}
4749
4850
@@ -57,6 +59,7 @@ class IterLog:
5759 regularizer: Value of the regularizer used for this iteration.
5860 residual: Optional value of the residual at the candidate.
5961 jacobian: Optional value of the Jacobian at the candidate.
62+ grad: Optional value of the gradient at the candidate.
6063 step: Optional change in decision variable during this iteration.
6164 """
6265
@@ -66,6 +69,7 @@ class IterLog:
6669 regularizer : np .float64
6770 residual : Optional [np .ndarray ] = None
6871 jacobian : Optional [np .ndarray ] = None
72+ grad : Optional [np .ndarray ] = None
6973 step : Optional [np .ndarray ] = None
7074
7175
@@ -141,11 +145,12 @@ def least_squares(
141145 bounds : Optional [Sequence [np .ndarray ]] = None ,
142146 jacobian : Optional [Callable [[np .ndarray , np .ndarray ], np .ndarray ]] = None ,
143147 norm : Norm = Quadratic (),
144- eps : float = 1e-6 ,
148+ eps : float = np . finfo ( np . float64 ). eps ** 0.5 ,
145149 mu_min : float = 1e-6 ,
146150 mu_max : float = 1e8 ,
147- mu_factor : float = 10.0 ** 0.1 ,
148- tol : float = 1e-6 ,
151+ mu_factor : float = 10.0 ** 0.1 ,
152+ xtol : float = 1e-8 ,
153+ gtol : float = 1e-8 ,
149154 max_iter : int = 100 ,
150155 verbose : Union [Verbosity , int ] = Verbosity .ITER ,
151156 output : Optional [TextIO ] = None ,
@@ -166,7 +171,8 @@ def least_squares(
166171 mu_min: Minimum value of the regularizer.
167172 mu_max: Maximum value of the regularizer.
168173 mu_factor: Factor for increasing or decreasing the regularizer.
169- tol: Termination tolerance on the step size.
174+ xtol: Termination tolerance on relative step size.
175+ gtol: Termination tolerance on gradient norm.
170176 max_iter: Maximum number of iterations.
171177 verbose: Verbosity level.
172178 output: Optional file or StringIO to which to print messages.
@@ -281,6 +287,23 @@ def increase_mu(mu):
281287 # Get gradient, Gauss-Newton Hessian.
282288 grad , hess = norm .grad_hess (r , jac )
283289
290+ # Get free (unclamped) gradient.
291+ if bounds is None :
292+ grad_free = grad
293+ else :
294+ clamped_lower = (x == bounds [0 ]) & (grad > 0 )
295+ clamped_upper = (x == bounds [1 ]) & (grad < 0 )
296+ clamped = clamped_lower | clamped_upper
297+ grad_free = grad [~ clamped ]
298+
299+ # Check termination condition on gradient norm.
300+ g_norm = np .linalg .norm (grad_free )
301+ if g_norm <= gtol :
302+ status = Status .G_TOL
303+ if g_norm == 0 :
304+ print ('Zero gradient norm: exact minimum found?' , file = output )
305+ break
306+
284307 # Bounds relative to x
285308 dlower = None if bounds is None else bounds [0 ] - x
286309 dupper = None if bounds is None else bounds [1 ] - x
@@ -353,13 +376,15 @@ def increase_mu(mu):
353376 # Append log to trace, call iter_callback.
354377 log = IterLog (candidate = x , objective = y , reduction = reduction , regularizer = mu )
355378 if verbose >= Verbosity .FULLITER .value :
356- log = dataclasses .replace (log , residual = r , jacobian = jac , step = dx )
379+ log = dataclasses .replace (
380+ log , residual = r , jacobian = jac , grad = grad , step = dx
381+ )
357382 trace .append (log )
358383 if iter_callback is not None :
359384 iter_callback (trace )
360385
361- # Check for success .
362- if dx_norm < tol :
386+ # Check termination condition on step norm .
387+ if dx_norm < xtol * ( xtol + np . linalg . norm ( x )) :
363388 status = Status .DX_TOL
364389 break
365390
@@ -376,7 +401,7 @@ def increase_mu(mu):
376401 # Append final log to trace, call iter_callback.
377402 # Note: unlike other iter logs, values are computed at the end point.
378403 yfinal = norm .value (r )
379- red = np .float64 (0.0 ) # No reduction sice we didn't take a step.
404+ red = np .float64 (0.0 ) # No reduction since we didn't take a step.
380405 log = IterLog (candidate = x , objective = yfinal , reduction = red , regularizer = mu )
381406 trace .append (log )
382407 if iter_callback is not None :
@@ -430,13 +455,15 @@ def jacobian_fd(
430455 """
431456 n = x .size
432457 if bounds is None :
433- eps_vec = eps * np .ones (n )
458+ eps_vec = eps * np .ones (( n , 1 ) )
434459 else :
435460 mid = 0.5 * (bounds [1 ] - bounds [0 ])
436- eps_vec = np .where (x > mid , - eps , eps ).flatten ()
437- xh = x + np .diag (eps_vec )
461+ eps_vec = np .where (x > mid , - eps , eps )
462+ eps_vec *= np .maximum (1.0 , np .abs (x ))
463+ eps_vec = (eps_vec + x ) - x
464+ xh = x + np .diag (eps_vec .flatten ())
438465 rh = residual (xh )
439- jac = (rh - r ) / eps_vec
466+ jac = (rh - r ) / eps_vec . T
440467 return jac , n_res + n
441468
442469
0 commit comments