Skip to content

Commit 9519f95

Browse files
committed
feat: optionally allow numerical gradient approximations for OptimizerBatchNLoptr
1 parent 35897cb commit 9519f95

File tree

8 files changed

+126
-23
lines changed

8 files changed

+126
-23
lines changed

R/OptimizerBatchNLoptr.R

Lines changed: 42 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@
44
#' @name mlr_optimizers_nloptr
55
#'
66
#' @description
7-
#' `OptimizerBatchNLoptr` class that implements non-linear optimization. Calls
8-
#' [nloptr::nloptr()] from package \CRANpkg{nloptr}.
7+
#' `OptimizerBatchNLoptr` class that implements non-linear optimization.
8+
#' Calls [nloptr::nloptr()] from package \CRANpkg{nloptr}.
99
#'
1010
#' @section Parameters:
1111
#' \describe{
@@ -16,8 +16,13 @@
1616
#' \item{`ftol_rel`}{`numeric(1)`}
1717
#' \item{`ftol_abs`}{`numeric(1)`}
1818
#' \item{`start_values`}{`character(1)`\cr
19-
#' Create `random` start values or based on `center` of search space? In the
20-
#' latter case, it is the center of the parameters before a trafo is applied.}
19+
#' Create `random` start values or based on `center` of search space?
20+
#' In the latter case, it is the center of the parameters before a trafo is applied.}
21+
#' \item{`approximate_eval_grad_f`}{`logical(1)`\cr
22+
#' Should gradients be numerically approximated via finite differences ([nloptr::nl.grad]).
23+
#' Only required for certain algorithms.
24+
#' Note that function evaluations required for the numerical gradient approximation will be logged as usual
25+
#' and are not treated differently than regular function evaluations by, e.g., [Terminator]s.}
2126
#' }
2227
#'
2328
#' For the meaning of the control parameters, see [nloptr::nloptr()] and
@@ -82,24 +87,27 @@ OptimizerBatchNLoptr = R6Class("OptimizerBatchNLoptr", inherit = OptimizerBatch,
8287
#' Creates a new instance of this [R6][R6::R6Class] class.
8388
initialize = function() {
8489
param_set = ps(
85-
algorithm = p_fct(levels = c(
86-
"NLOPT_GN_DIRECT_L", "NLOPT_GN_DIRECT_L_RAND", "NLOPT_GN_DIRECT_NOSCAL", "NLOPT_GN_DIRECT_L_NOSCAL",
87-
"NLOPT_GN_DIRECT_L_RAND_NOSCAL", "NLOPT_GN_ORIG_DIRECT", "NLOPT_GN_ORIG_DIRECT_L", "NLOPT_GD_STOGO",
88-
"NLOPT_GD_STOGO_RAND", "NLOPT_LD_SLSQP", "NLOPT_LD_LBFGS_NOCEDAL", "NLOPT_LD_LBFGS", "NLOPT_LN_PRAXIS",
89-
"NLOPT_LD_VAR1", "NLOPT_LD_VAR2", "NLOPT_LD_TNEWTON", "NLOPT_LD_TNEWTON_RESTART", "NLOPT_LD_TNEWTON_PRECOND",
90-
"NLOPT_LD_TNEWTON_PRECOND_RESTART", "NLOPT_GN_CRS2_LM", "NLOPT_GN_MLSL", "NLOPT_GD_MLSL", "NLOPT_GN_MLSL_LDS",
91-
"NLOPT_GD_MLSL_LDS", "NLOPT_LD_MMA", "NLOPT_LD_CCSAQ", "NLOPT_LN_COBYLA", "NLOPT_LN_NEWUOA",
92-
"NLOPT_LN_NEWUOA_BOUND", "NLOPT_LN_NELDERMEAD", "NLOPT_LN_SBPLX", "NLOPT_LN_AUGLAG", "NLOPT_LD_AUGLAG",
93-
"NLOPT_LN_AUGLAG_EQ", "NLOPT_LD_AUGLAG_EQ", "NLOPT_LN_BOBYQA", "NLOPT_GN_ISRES"),
94-
tags = "required"),
90+
algorithm = p_fct(
91+
levels = c(
92+
"NLOPT_GN_DIRECT_L", "NLOPT_GN_DIRECT_L_RAND", "NLOPT_GN_DIRECT_NOSCAL", "NLOPT_GN_DIRECT_L_NOSCAL",
93+
"NLOPT_GN_DIRECT_L_RAND_NOSCAL", "NLOPT_GN_ORIG_DIRECT", "NLOPT_GN_ORIG_DIRECT_L", "NLOPT_GD_STOGO",
94+
"NLOPT_GD_STOGO_RAND", "NLOPT_LD_SLSQP", "NLOPT_LD_LBFGS_NOCEDAL", "NLOPT_LD_LBFGS", "NLOPT_LN_PRAXIS",
95+
"NLOPT_LD_VAR1", "NLOPT_LD_VAR2", "NLOPT_LD_TNEWTON", "NLOPT_LD_TNEWTON_RESTART", "NLOPT_LD_TNEWTON_PRECOND",
96+
"NLOPT_LD_TNEWTON_PRECOND_RESTART", "NLOPT_GN_CRS2_LM", "NLOPT_GN_MLSL", "NLOPT_GD_MLSL", "NLOPT_GN_MLSL_LDS",
97+
"NLOPT_GD_MLSL_LDS", "NLOPT_LD_MMA", "NLOPT_LD_CCSAQ", "NLOPT_LN_COBYLA", "NLOPT_LN_NEWUOA",
98+
"NLOPT_LN_NEWUOA_BOUND", "NLOPT_LN_NELDERMEAD", "NLOPT_LN_SBPLX", "NLOPT_LN_AUGLAG", "NLOPT_LD_AUGLAG",
99+
"NLOPT_LN_AUGLAG_EQ", "NLOPT_LD_AUGLAG_EQ", "NLOPT_LN_BOBYQA", "NLOPT_GN_ISRES"),
100+
tags = "required"),
95101
eval_g_ineq = p_uty(default = NULL),
96102
xtol_rel = p_dbl(default = 10^-4, lower = 0, upper = Inf, special_vals = list(-1)),
97103
xtol_abs = p_dbl(default = 0, lower = 0, upper = Inf, special_vals = list(-1)),
98104
ftol_rel = p_dbl(default = 0, lower = 0, upper = Inf, special_vals = list(-1)),
99105
ftol_abs = p_dbl(default = 0, lower = 0, upper = Inf, special_vals = list(-1)),
100-
start_values = p_fct(default = "random", levels = c("random", "center"))
106+
start_values = p_fct(default = "random", levels = c("random", "center")),
107+
approximate_eval_grad_f = p_lgl(default = FALSE)
101108
)
102109
param_set$values$start_values = "random"
110+
param_set$values$approximate_eval_grad_f = FALSE
103111

104112
super$initialize(
105113
id = "nloptr",
@@ -116,18 +124,34 @@ OptimizerBatchNLoptr = R6Class("OptimizerBatchNLoptr", inherit = OptimizerBatch,
116124
private = list(
117125
.optimize = function(inst) {
118126
pv = self$param_set$values
127+
119128
pv$x0 = search_start(inst$search_space, type = pv$start_values)
120129
pv$start_values = NULL
130+
131+
eval_grad_f = if (pv$approximate_eval_grad_f) {
132+
function(x) {
133+
invoke(nloptr::nl.grad, x0 = x, fn = inst$objective_function)
134+
}
135+
} else {
136+
NULL
137+
}
138+
pv$eval_grad_f = eval_grad_f
139+
pv$approximate_eval_grad_f = NULL
140+
121141
opts = pv[which(names(pv) %nin% formalArgs(nloptr::nloptr))]
122-
# Deactivate termination criterions which are replaced by Terminators
142+
# deactivate termination criterions which are replaced by Terminators
123143
opts = insert_named(opts, list(
124144
maxeval = -1,
125145
maxtime = -1,
126146
stopval = -Inf
127147
))
128148
pv = pv[which(names(pv) %nin% names(opts))]
129-
invoke(nloptr::nloptr, eval_f = inst$objective_function,
130-
lb = inst$search_space$lower, ub = inst$search_space$upper, opts = opts,
149+
150+
invoke(nloptr::nloptr,
151+
eval_f = inst$objective_function,
152+
lb = inst$search_space$lower,
153+
ub = inst$search_space$upper,
154+
opts = opts,
131155
.args = pv)
132156
}
133157
)

man/ArchiveAsync.Rd

Lines changed: 18 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

man/ArchiveAsyncFrozen.Rd

Lines changed: 18 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

man/OptimInstanceAsync.Rd

Lines changed: 18 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

man/mlr_optimizers_nloptr.Rd

Lines changed: 9 additions & 4 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

tests/testthat/_snaps/OptimizerNLoptr.md

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,21 @@
55
Output
66
<OptimizerBatchNLoptr>: Non-linear Optimization
77
* Parameters: algorithm=NLOPT_LN_BOBYQA, xtol_rel=-1, xtol_abs=-1,
8-
ftol_rel=-1, ftol_abs=-1, start_values=random
8+
ftol_rel=-1, ftol_abs=-1, start_values=random,
9+
approximate_eval_grad_f=FALSE
10+
* Parameter classes: ParamDbl
11+
* Properties: single-crit
12+
* Packages: bbotk, nloptr
13+
14+
---
15+
16+
Code
17+
z$optimizer
18+
Output
19+
<OptimizerBatchNLoptr>: Non-linear Optimization
20+
* Parameters: algorithm=NLOPT_LD_LBFGS, xtol_rel=-1, xtol_abs=-1,
21+
ftol_rel=-1, ftol_abs=-1, start_values=random,
22+
approximate_eval_grad_f=TRUE
923
* Parameter classes: ParamDbl
1024
* Properties: single-crit
1125
* Packages: bbotk, nloptr

tests/testthat/test_OptimizerChain.R

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,3 +59,4 @@ test_that("OptimizerBatchChain", {
5959
"OptimizerBatchGenSA_1.verbose", "OptimizerBatchGenSA_1.trace.mat")
6060
)
6161
})
62+

tests/testthat/test_OptimizerNLoptr.R

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,4 +7,9 @@ test_that("OptimizerBatchNLoptr", {
77
term_evals = 5L)
88
expect_class(z$optimizer, "OptimizerBatchNLoptr")
99
expect_snapshot(z$optimizer)
10+
11+
z = test_optimizer_1d("nloptr", algorithm = "NLOPT_LD_LBFGS", approximate_eval_grad_f = TRUE,
12+
xtol_rel = -1, xtol_abs = -1, ftol_rel = -1, ftol_abs = -1, term_evals = 5L)
13+
expect_class(z$optimizer, "OptimizerBatchNLoptr")
14+
expect_snapshot(z$optimizer)
1015
})

0 commit comments

Comments
 (0)