44# ' @name mlr_optimizers_nloptr
55# '
66# ' @description
7- # ' `OptimizerBatchNLoptr` class that implements non-linear optimization. Calls
8- # ' [nloptr::nloptr()] from package \CRANpkg{nloptr}.
7+ # ' `OptimizerBatchNLoptr` class that implements non-linear optimization.
8+ # ' Calls [nloptr::nloptr()] from package \CRANpkg{nloptr}.
99# '
1010# ' @section Parameters:
1111# ' \describe{
1616# ' \item{`ftol_rel`}{`numeric(1)`}
1717# ' \item{`ftol_abs`}{`numeric(1)`}
1818# ' \item{`start_values`}{`character(1)`\cr
19- # ' Create `random` start values or based on `center` of search space? In the
20- # ' latter case, it is the center of the parameters before a trafo is applied.}
19+ # ' Create `random` start values or based on `center` of search space?
20+ # ' In the latter case, it is the center of the parameters before a trafo is applied.}
21+ # ' \item{`approximate_eval_grad_f`}{`logical(1)`\cr
22+ # ' Should gradients be numerically approximated via finite differences ([nloptr::nl.grad]).
23+ # ' Only required for certain algorithms.
24+ # ' Note that function evaluations required for the numerical gradient approximation will be logged as usual
25+ # ' and are not treated differently than regular function evaluations by, e.g., [Terminator]s.}
2126# ' }
2227# '
2328# ' For the meaning of the control parameters, see [nloptr::nloptr()] and
@@ -82,24 +87,27 @@ OptimizerBatchNLoptr = R6Class("OptimizerBatchNLoptr", inherit = OptimizerBatch,
8287 # ' Creates a new instance of this [R6][R6::R6Class] class.
8388 initialize = function () {
8489 param_set = ps(
85- algorithm = p_fct(levels = c(
86- " NLOPT_GN_DIRECT_L" , " NLOPT_GN_DIRECT_L_RAND" , " NLOPT_GN_DIRECT_NOSCAL" , " NLOPT_GN_DIRECT_L_NOSCAL" ,
87- " NLOPT_GN_DIRECT_L_RAND_NOSCAL" , " NLOPT_GN_ORIG_DIRECT" , " NLOPT_GN_ORIG_DIRECT_L" , " NLOPT_GD_STOGO" ,
88- " NLOPT_GD_STOGO_RAND" , " NLOPT_LD_SLSQP" , " NLOPT_LD_LBFGS_NOCEDAL" , " NLOPT_LD_LBFGS" , " NLOPT_LN_PRAXIS" ,
89- " NLOPT_LD_VAR1" , " NLOPT_LD_VAR2" , " NLOPT_LD_TNEWTON" , " NLOPT_LD_TNEWTON_RESTART" , " NLOPT_LD_TNEWTON_PRECOND" ,
90- " NLOPT_LD_TNEWTON_PRECOND_RESTART" , " NLOPT_GN_CRS2_LM" , " NLOPT_GN_MLSL" , " NLOPT_GD_MLSL" , " NLOPT_GN_MLSL_LDS" ,
91- " NLOPT_GD_MLSL_LDS" , " NLOPT_LD_MMA" , " NLOPT_LD_CCSAQ" , " NLOPT_LN_COBYLA" , " NLOPT_LN_NEWUOA" ,
92- " NLOPT_LN_NEWUOA_BOUND" , " NLOPT_LN_NELDERMEAD" , " NLOPT_LN_SBPLX" , " NLOPT_LN_AUGLAG" , " NLOPT_LD_AUGLAG" ,
93- " NLOPT_LN_AUGLAG_EQ" , " NLOPT_LD_AUGLAG_EQ" , " NLOPT_LN_BOBYQA" , " NLOPT_GN_ISRES" ),
94- tags = " required" ),
90+ algorithm = p_fct(
91+ levels = c(
92+ " NLOPT_GN_DIRECT_L" , " NLOPT_GN_DIRECT_L_RAND" , " NLOPT_GN_DIRECT_NOSCAL" , " NLOPT_GN_DIRECT_L_NOSCAL" ,
93+ " NLOPT_GN_DIRECT_L_RAND_NOSCAL" , " NLOPT_GN_ORIG_DIRECT" , " NLOPT_GN_ORIG_DIRECT_L" , " NLOPT_GD_STOGO" ,
94+ " NLOPT_GD_STOGO_RAND" , " NLOPT_LD_SLSQP" , " NLOPT_LD_LBFGS_NOCEDAL" , " NLOPT_LD_LBFGS" , " NLOPT_LN_PRAXIS" ,
95+ " NLOPT_LD_VAR1" , " NLOPT_LD_VAR2" , " NLOPT_LD_TNEWTON" , " NLOPT_LD_TNEWTON_RESTART" , " NLOPT_LD_TNEWTON_PRECOND" ,
96+ " NLOPT_LD_TNEWTON_PRECOND_RESTART" , " NLOPT_GN_CRS2_LM" , " NLOPT_GN_MLSL" , " NLOPT_GD_MLSL" , " NLOPT_GN_MLSL_LDS" ,
97+ " NLOPT_GD_MLSL_LDS" , " NLOPT_LD_MMA" , " NLOPT_LD_CCSAQ" , " NLOPT_LN_COBYLA" , " NLOPT_LN_NEWUOA" ,
98+ " NLOPT_LN_NEWUOA_BOUND" , " NLOPT_LN_NELDERMEAD" , " NLOPT_LN_SBPLX" , " NLOPT_LN_AUGLAG" , " NLOPT_LD_AUGLAG" ,
99+ " NLOPT_LN_AUGLAG_EQ" , " NLOPT_LD_AUGLAG_EQ" , " NLOPT_LN_BOBYQA" , " NLOPT_GN_ISRES" ),
100+ tags = " required" ),
95101 eval_g_ineq = p_uty(default = NULL ),
96102 xtol_rel = p_dbl(default = 10 ^- 4 , lower = 0 , upper = Inf , special_vals = list (- 1 )),
97103 xtol_abs = p_dbl(default = 0 , lower = 0 , upper = Inf , special_vals = list (- 1 )),
98104 ftol_rel = p_dbl(default = 0 , lower = 0 , upper = Inf , special_vals = list (- 1 )),
99105 ftol_abs = p_dbl(default = 0 , lower = 0 , upper = Inf , special_vals = list (- 1 )),
100- start_values = p_fct(default = " random" , levels = c(" random" , " center" ))
106+ start_values = p_fct(default = " random" , levels = c(" random" , " center" )),
107+ approximate_eval_grad_f = p_lgl(default = FALSE )
101108 )
102109 param_set $ values $ start_values = " random"
110+ param_set $ values $ approximate_eval_grad_f = FALSE
103111
104112 super $ initialize(
105113 id = " nloptr" ,
@@ -116,18 +124,34 @@ OptimizerBatchNLoptr = R6Class("OptimizerBatchNLoptr", inherit = OptimizerBatch,
116124 private = list (
117125 .optimize = function (inst ) {
118126 pv = self $ param_set $ values
127+
119128 pv $ x0 = search_start(inst $ search_space , type = pv $ start_values )
120129 pv $ start_values = NULL
130+
131+ eval_grad_f = if (pv $ approximate_eval_grad_f ) {
132+ function (x ) {
133+ invoke(nloptr :: nl.grad , x0 = x , fn = inst $ objective_function )
134+ }
135+ } else {
136+ NULL
137+ }
138+ pv $ eval_grad_f = eval_grad_f
139+ pv $ approximate_eval_grad_f = NULL
140+
121141 opts = pv [which(names(pv ) %nin % formalArgs(nloptr :: nloptr ))]
122- # Deactivate termination criterions which are replaced by Terminators
142+ # deactivate termination criterions which are replaced by Terminators
123143 opts = insert_named(opts , list (
124144 maxeval = - 1 ,
125145 maxtime = - 1 ,
126146 stopval = - Inf
127147 ))
128148 pv = pv [which(names(pv ) %nin % names(opts ))]
129- invoke(nloptr :: nloptr , eval_f = inst $ objective_function ,
130- lb = inst $ search_space $ lower , ub = inst $ search_space $ upper , opts = opts ,
149+
150+ invoke(nloptr :: nloptr ,
151+ eval_f = inst $ objective_function ,
152+ lb = inst $ search_space $ lower ,
153+ ub = inst $ search_space $ upper ,
154+ opts = opts ,
131155 .args = pv )
132156 }
133157 )
0 commit comments