Skip to content

Commit fd125cf

Browse files
committed
refactor: Merge Options and move Optimize function
1 parent 90088c3 commit fd125cf

37 files changed

+372
-406
lines changed

benchmarks/dense.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,16 +20,16 @@
2020

2121
using namespace tinyopt;
2222
using namespace tinyopt::benchmark;
23-
using namespace tinyopt::nlls::lm;
23+
using namespace tinyopt::lm;
2424
using namespace tinyopt::losses;
2525

2626
static const bool enable_log = false;
2727

2828
TEST_CASE("Float", "[benchmark][fixed][scalar]") {
2929
auto loss = [](const auto &x) { return x * x - 2.0f; };
3030
Options options = CreateOptions(enable_log);
31-
options.solver.use_ldlt = false;
32-
options.solver.log.print_failure = true;
31+
options.hessian.use_ldlt = false;
32+
options.log.print_failure = true;
3333
BENCHMARK("√2") {
3434
float x = Vec1::Random()[0];
3535
if (enable_log) TINYOPT_LOG("x:{:.12e}", x);
@@ -40,7 +40,7 @@ TEST_CASE("Float", "[benchmark][fixed][scalar]") {
4040
TEST_CASE("Double", "[benchmark][fixed][scalar]") {
4141
auto loss = [](const auto &x) { return x * x - 2.0; };
4242
Options options = CreateOptions(enable_log);
43-
options.solver.use_ldlt = false;
43+
options.hessian.use_ldlt = false;
4444
static StatCounter<double> counter;
4545
BENCHMARK("√2") {
4646
double x = Vec1::Random()[0]; // 0.480009157900 fails to converge

benchmarks/densef.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919
using namespace tinyopt;
2020
using namespace tinyopt::benchmark;
21-
using namespace tinyopt::nlls::lm;
21+
using namespace tinyopt::lm;
2222
using namespace tinyopt::losses;
2323

2424
TEMPLATE_TEST_CASE("Dense", "[benchmark][fixed][dense][float]", Vec3f, Vec6f, VecXf) {

benchmarks/options.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
namespace tinyopt::benchmark {
99

1010
inline auto CreateOptions(bool enable_log = false) {
11-
tinyopt::nlls::lm::Options options;
11+
Options options;
1212
options.max_iters = 10;
1313

1414
options.min_error = 0; // Ceres does not seem to be using this
@@ -21,8 +21,8 @@ inline auto CreateOptions(bool enable_log = false) {
2121

2222
// No log?
2323
options.log.enable = enable_log;
24-
options.solver.log.enable = enable_log;
25-
options.save.H = false;
24+
options.log.enable = enable_log;
25+
options.hessian.save_last = false;
2626
return options;
2727
}
2828

benchmarks/sparse.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,12 +14,13 @@
1414

1515
#include <tinyopt/tinyopt.h>
1616
#include "options.h"
17+
#include "utils.h"
1718

1819
using namespace tinyopt;
1920
using namespace tinyopt::benchmark;
20-
using namespace tinyopt::nlls::lm;
21+
using namespace tinyopt::lm;
2122

22-
auto simple_loss = [](const auto &x, auto &grad, SparseMatrix<double> &H) {
23+
auto simple_loss = [](const auto &x, auto &grad, SparseMat &H) {
2324
const VecX res = 10 * x.array() - 2;
2425
// Update the gradient and Hessian approx.
2526
if constexpr (!traits::is_nullptr_v<decltype(grad)>) {
@@ -39,7 +40,7 @@ auto simple_loss = [](const auto &x, auto &grad, SparseMatrix<double> &H) {
3940
} else if constexpr (0) { // yet another way, using a dense jacobian
4041
H = (J.transpose() * J).sparseView();
4142
} else { // yet another way, using a sparse jacobian
42-
SparseMatrix<double> Js(res.rows(), x.size());
43+
SparseMat Js(res.rows(), x.size());
4344
for (int i = 0; i < x.size(); ++i) Js.coeffRef(i, i) = 10;
4445
H = Js.transpose() * Js;
4546
}

docs/API.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -222,13 +222,13 @@ Optimize(x, loss); // ok, let's optimize
222222
### Sparse Systems
223223

224224
Ok so you have quite large and sparse systems? Just tell `Tinyopt` about it by simply
225-
defining you loss to have a `SparseMatrix<double> &H` type instead of a Dense Matrix or `auto`.
225+
defining you loss to have a `SparseMat &H` type instead of a Dense Matrix or `auto`.
226226

227227
*NOTE* that automatic differentation is not supported for sparse Hessian matrices but is for first order solvers.
228228
In that case, simply use this loss signature `auto loss = []<typename T>(const auto &x, SparseMatrix<T> &gradient)`.
229229

230230
```cpp
231-
auto loss = [](const auto &x, auto &grad, SparseMatrix<double> &H) {
231+
auto loss = [](const auto &x, auto &grad, SparseMat &H) {
232232
// Define your residuals
233233
const VecX res = 10 * x.array() - 2; // the residuals
234234
// Update the full gradient matrix, using the Jacobian J of the residuals w.r.t 'x'
@@ -246,7 +246,7 @@ auto loss = [](const auto &x, auto &grad, SparseMatrix<double> &H) {
246246

247247
```
248248

249-
As an alternative, you can use the `Optimizer<SparseMatrix<MatX>>` class instead of the `Optimize` function.
249+
As an alternative, you can use the `gn::Optimizer<SparseMatX>` class instead of the `Optimize` function.
250250

251251
There are many ways to fill `H` in Eigen, have a look at `tests/sparse.cpp` for some examples.
252252

include/tinyopt/diff/num_diff.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ auto EstimateNumJac(const X_t &x, const Func &f,
189189
* double norm = acc_loss(x, g, H);
190190
*
191191
* The returned function can be passed to an optimizer, e.g.
192-
* auto optimizer = Optimizer<SolverGD<Vec2>>();
192+
* auto optimizer = Optimizer_<SolverGD<Vec2>>();
193193
* optimizer(x, acc_loss);
194194
*
195195
* @endcode
@@ -275,7 +275,7 @@ auto CreateNumDiffFunc1(X_t &, const ResidualsFunc &residuals,
275275
* double norm = acc_loss(x, g, H);
276276
*
277277
* The returned function can be passed to an optimizer, e.g.
278-
* auto optimizer = Optimizer<SolverLM<Mat2>>();
278+
* auto optimizer = Optimizer_<SolverLM<Mat2>>();
279279
* optimizer(x, acc_loss);
280280
*
281281
* @endcode

include/tinyopt/optimize.h

Lines changed: 65 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,15 +4,76 @@
44
#pragma once
55

66
#include <tinyopt/optimizers/optimizer.h>
7+
#include <tinyopt/optimizers/options.h>
8+
9+
#include <tinyopt/optimizers/optimizers.h>
10+
#include "tinyopt/log.h"
711

812
namespace tinyopt {
913

1014
/// Simplest interface to optimize `x` and minimize residuals (loss function).
1115
/// Internally call the optimizer and run the optimization.
12-
template <typename Optimizer, typename X_t, typename Res_t>
13-
inline auto Optimize(X_t &x, const Res_t &func, const typename Optimizer::Options &options = {}) {
14-
Optimizer optimizer(options);
15-
return optimizer(x, func);
16+
template <typename T, typename Func>
17+
inline Output Optimize(T &x, const Func &func, const Options &options = {}) {
18+
// Detect Scalar, supporting at most one nesting level
19+
using Scalar = std::conditional_t<
20+
std::is_scalar_v<typename traits::params_trait<T>::Scalar>,
21+
typename traits::params_trait<T>::Scalar,
22+
typename traits::params_trait<typename traits::params_trait<T>::Scalar>::Scalar>;
23+
static_assert(std::is_scalar_v<Scalar>);
24+
constexpr Index Dims = traits::params_trait<T>::Dims;
25+
26+
// Detect Hessian Type, if it's dense or sparse
27+
constexpr bool isDense =
28+
std::is_invocable_v<Func, const T &> ||
29+
std::is_invocable_v<Func, const T &, Vector<Scalar, Dims> &> ||
30+
std::is_invocable_v<Func, const T &, Vector<Scalar, Dims> &, Matrix<Scalar, Dims, Dims> &>;
31+
32+
using Hessian_t = std::conditional_t<isDense, Matrix<Scalar, Dims, Dims>, SparseMatrix<Scalar>>;
33+
using Gradient_t = std::conditional_t<isDense, Vector<Scalar, Dims>, SparseMatrix<Scalar>>;
34+
35+
constexpr bool secondOrderValid = !std::is_invocable_v<Func, const T &, Vector<Scalar, Dims> &>;
36+
37+
// Check if this is an unconstrained first order problem
38+
constexpr bool firstOrderAllowed = !secondOrderValid;
39+
40+
switch (options.solver_type) {
41+
// Second order methods
42+
case Options::Solver::GaussNewton:
43+
if constexpr (secondOrderValid) {
44+
gn::Optimizer<Hessian_t> optimizer(options);
45+
return optimizer(x, func);
46+
} else {
47+
throw std::invalid_argument(
48+
"Error: GaussNewton can't be used on this gradient only function");
49+
}
50+
case Options::Solver::LevenbergMarquardt:
51+
if constexpr (secondOrderValid) {
52+
lm::Optimizer<Hessian_t> optimizer(options);
53+
return optimizer(x, func);
54+
} else {
55+
throw std::invalid_argument(
56+
"Error: LevenbergMarquardt can't be used on this gradient only function");
57+
}
58+
// First order methods
59+
case Options::Solver::GradientDescent:
60+
if constexpr (std::is_invocable_v<Func, const T &>) {
61+
using ReturnType = std::invoke_result_t<Func, T>;
62+
if constexpr (traits::is_scalar_v<ReturnType>) {
63+
gd::Optimizer<Gradient_t> optimizer(options);
64+
return optimizer(x, func);
65+
} else {
66+
throw std::invalid_argument(
67+
"Error: cost function must return a scalar for Gradient Descent");
68+
}
69+
} else if constexpr (firstOrderAllowed) {
70+
gd::Optimizer<Gradient_t> optimizer(options);
71+
return optimizer(x, func);
72+
}
73+
default:
74+
TINYOPT_LOG("❌ Error: Unknown solver type {}", (int)options.solver_type);
75+
throw std::invalid_argument("Error: Unknown solver type");
76+
}
1677
}
1778

1879
} // namespace tinyopt

include/tinyopt/optimizers/gd.h

Lines changed: 8 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -4,43 +4,24 @@
44
#pragma once
55

66
#include <tinyopt/math.h>
7-
#include <tinyopt/optimize.h>
87

8+
#include <tinyopt/optimizers/optimizer.h>
99
#include <tinyopt/solvers/gd.h>
1010

1111
/// Gradient Descent specific solver, optimizer and their options
1212
namespace tinyopt::gd {
1313

14-
/// Gradient Descent Optimization Options
15-
struct Options : Options1 {
16-
Options(const Options1 options = {}) : Options1{options} {}
17-
gd::SolverOptions solver;
18-
};
19-
2014
/// Gradient Descent Solver
2115
template <typename Gradient_t>
2216
using Solver = solvers::SolverGD<Gradient_t>;
2317

24-
/// Gradient Descent Optimizater type
18+
/// Gradient Descent Sparse Solver
19+
template <typename Hessian_t = SparseMat>
20+
using SparseSolver = solvers::SolverGD<Hessian_t>;
21+
22+
/// Gradient Descent Optimizer
2523
template <typename Gradient_t>
26-
using Optimizer = optimizers::Optimizer<Solver<Gradient_t>, Options>;
27-
28-
/// Gradient Descent Optimize function
29-
template <typename X_t, typename Res_t>
30-
inline auto Optimize(X_t &x, const Res_t &func, const Options &options = Options()) {
31-
using Scalar = std::conditional_t<
32-
std::is_scalar_v<typename traits::params_trait<X_t>::Scalar>,
33-
typename traits::params_trait<X_t>::Scalar,
34-
typename traits::params_trait<typename traits::params_trait<X_t>::Scalar>::Scalar>;
35-
static_assert(std::is_scalar_v<Scalar>);
36-
constexpr Index Dims = traits::params_trait<X_t>::Dims;
37-
// Detect Hessian Type, if it's dense or sparse
38-
constexpr bool isDense = std::is_invocable_v<Res_t, const X_t &> ||
39-
std::is_invocable_v<Res_t, const X_t &, Vector<Scalar, Dims> &>;
40-
using Gradient_t = std::conditional_t<isDense, Vector<Scalar, Dims>, SparseMatrix<Scalar>>;
41-
42-
static_assert(Solver<Gradient_t>::FirstOrder);
43-
return tinyopt::Optimize<Optimizer<Gradient_t>>(x, func, options);
44-
}
24+
using Optimizer = Optimizer_<solvers::SolverGD<Gradient_t>>;
25+
4526

4627
} // namespace tinyopt::gd

include/tinyopt/optimizers/gn.h

Lines changed: 7 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -4,47 +4,23 @@
44
#pragma once
55

66
#include <tinyopt/math.h>
7-
#include <tinyopt/optimize.h>
87

8+
#include <tinyopt/optimizers/optimizer.h>
99
#include <tinyopt/solvers/gn.h>
1010

1111
/// Gauss-Newton specific solver, optimizer and their options
12-
namespace tinyopt::nlls::gn {
13-
14-
/// Gauss-Newton Optimization Options
15-
struct Options : Options2 {
16-
Options(const Options2 options = {}) : Options2{options} {}
17-
gn::SolverOptions solver;
18-
};
12+
namespace tinyopt::gn {
1913

2014
/// Gauss-Newton Solver
2115
template <typename Hessian_t>
2216
using Solver = solvers::SolverGN<Hessian_t>;
2317

2418
/// Gauss-Newton Sparse Solver
25-
template <typename Hessian_t = SparseMatrix<double>>
19+
template <typename Hessian_t = SparseMat>
2620
using SparseSolver = solvers::SolverGN<Hessian_t>;
2721

28-
/// Gauss-Newton Optimizater type
22+
/// Gauss-Newton Optimizer
2923
template <typename Hessian_t>
30-
using Optimizer = optimizers::Optimizer<Solver<Hessian_t>, Options>;
31-
32-
/// Gauss-Newton Optimize function
33-
template <typename X_t, typename Res_t>
34-
inline auto Optimize(X_t &x, const Res_t &func, const Options &options = Options()) {
35-
// Detect Scalar, supporting at most one nesting level
36-
using Scalar = std::conditional_t<
37-
std::is_scalar_v<typename traits::params_trait<X_t>::Scalar>,
38-
typename traits::params_trait<X_t>::Scalar,
39-
typename traits::params_trait<typename traits::params_trait<X_t>::Scalar>::Scalar>;
40-
static_assert(std::is_scalar_v<Scalar>);
41-
constexpr Index Dims = traits::params_trait<X_t>::Dims;
42-
// Detect Hessian Type, if it's dense or sparse
43-
constexpr bool isDense =
44-
std::is_invocable_v<Res_t, const X_t &> ||
45-
std::is_invocable_v<Res_t, const X_t &, Vector<Scalar, Dims> &, Matrix<Scalar, Dims, Dims> &>;
46-
using Hessian_t = std::conditional_t<isDense, Matrix<Scalar, Dims, Dims>, SparseMatrix<Scalar>>;
47-
return tinyopt::Optimize<Optimizer<Hessian_t>>(x, func, options);
48-
}
49-
50-
} // namespace tinyopt::nlls::gn
24+
using Optimizer = Optimizer_<solvers::SolverGN<Hessian_t>>;
25+
26+
} // namespace tinyopt::gn

include/tinyopt/optimizers/lm.h

Lines changed: 7 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -4,49 +4,24 @@
44
#pragma once
55

66
#include <tinyopt/math.h>
7-
#include <tinyopt/optimize.h>
87

9-
#include <tinyopt/optimizers/options.h>
8+
#include <tinyopt/optimizers/optimizer.h>
109
#include <tinyopt/solvers/lm.h>
1110
#include <type_traits>
1211

1312
/// Levenberg-Marquardt specific solver, optimizer and their options
14-
namespace tinyopt::nlls::lm {
15-
16-
/// Levenberg-Marquardt Optimization Options
17-
struct Options : Options2 {
18-
Options(const Options2 options = {}) : Options2{options} {}
19-
lm::SolverOptions solver;
20-
};
13+
namespace tinyopt::lm {
2114

2215
/// Levenberg-Marquardt Solver
2316
template <typename Hessian_t>
2417
using Solver = solvers::SolverLM<Hessian_t>;
2518

2619
/// Levenberg-Marquardt Sparse Solver
27-
template <typename Hessian_t = SparseMatrix<double>>
20+
template <typename Hessian_t = SparseMat>
2821
using SparseSolver = solvers::SolverLM<Hessian_t>;
2922

30-
/// Levenberg-Marquardt Optimizater type
23+
/// Levenberg-Marquardt Optimizer
3124
template <typename Hessian_t>
32-
using Optimizer = optimizers::Optimizer<Solver<Hessian_t>, Options>;
33-
34-
/// Levenberg-Marquardt Optimize function
35-
template <typename X_t, typename Res_t>
36-
inline auto Optimize(X_t &x, const Res_t &func, const Options &options = Options()) {
37-
// Detect Scalar, supporting at most one nesting level
38-
using Scalar = std::conditional_t<
39-
std::is_scalar_v<typename traits::params_trait<X_t>::Scalar>,
40-
typename traits::params_trait<X_t>::Scalar,
41-
typename traits::params_trait<typename traits::params_trait<X_t>::Scalar>::Scalar>;
42-
static_assert(std::is_scalar_v<Scalar>);
43-
constexpr Index Dims = traits::params_trait<X_t>::Dims;
44-
// Detect Hessian Type, if it's dense or sparse
45-
constexpr bool isDense =
46-
std::is_invocable_v<Res_t, const X_t &> ||
47-
std::is_invocable_v<Res_t, const X_t &, Vector<Scalar, Dims> &, Matrix<Scalar, Dims, Dims> &>;
48-
using Hessian_t = std::conditional_t<isDense, Matrix<Scalar, Dims, Dims>, SparseMatrix<Scalar>>;
49-
return tinyopt::Optimize<Optimizer<Hessian_t>>(x, func, options);
50-
}
51-
52-
} // namespace tinyopt::nlls::lm
25+
using Optimizer = Optimizer_<solvers::SolverLM<Hessian_t>>;
26+
27+
} // namespace tinyopt::lm

0 commit comments

Comments
 (0)