Skip to content

Commit ba3b45f

Browse files
Merge pull request #105 from mkg33/master
Reorganized documentation
2 parents 0898719 + 8e49f42 commit ba3b45f

File tree

6 files changed

+226
-35
lines changed

6 files changed

+226
-35
lines changed

docs/make.jl

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -12,22 +12,29 @@ makedocs(
1212

1313
pages=[
1414
"GalacticOptim.jl: Unified Global Optimization Package" => "index.md",
15+
1516
"Tutorials" => [
16-
"Introduction to GalacticOptim.jl" => "tutorials/intro.md"
17+
"Introduction to GalacticOptim.jl" => "tutorials/intro.md",
18+
"Rosenbrock function" => "tutorials/rosenbrock.md",
19+
"Minibatch" => "tutorials/minibatch.md"
20+
],
21+
22+
"API" => [
23+
"OptimizationProblem" => "API/optimization_problem.md",
24+
"OptimizationFunction" => "API/optimization_function.md",
25+
"solve" => "API/solve.md"
1726
],
18-
"Basics" => [
19-
"OptimizationProblem" => "basics/problem.md",
20-
"Solver Options" => "basics/solve.md"
27+
28+
"Global Optimizers" => [
29+
"global_optimizers/global.md",
30+
"global_optimizers/global_constrained.md"
2131
],
32+
2233
"Local Optimizers" => [
2334
"local_optimizers/local_gradient.md",
2435
"local_optimizers/local_derivative_free.md",
2536
"local_optimizers/local_hessian.md",
26-
"local_optimizers/local_hessian_free.md",
27-
],
28-
"Global Optimizers" => [
29-
"global_optimizers/global.md",
30-
"global_optimizers/global_constrained.md"
37+
"local_optimizers/local_hessian_free.md"
3138
]
3239
]
3340
)

docs/src/basics/problem.md renamed to docs/src/API/optimization_function.md

Lines changed: 1 addition & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1,29 +1,4 @@
1-
# Defining OptimizationProblems
2-
3-
All optimizations start by defining an `OptimizationProblem` as follows:
4-
5-
```julia
6-
OptimizationProblem(f, x, p = DiffEqBase.NullParameters(),;
7-
lb = nothing,
8-
ub = nothing,
9-
lcons = nothing,
10-
ucons = nothing,
11-
kwargs...)
12-
```
13-
14-
Formally, the `OptimizationProblem` finds the minimum of `f(x,p)` with an
15-
initial condition `x`. The parameters `p` are optional. `lb` and `ub`
16-
are arrays matching the size of `x` which stand for the lower and upper
17-
bounds of `x` respectively.
18-
19-
`f` is an `OptimizationFunction`, as defined below. If `f` is a
20-
standard Julia function, it is automatically converted into an
21-
`OptimizationFunction` with `NoAD()`, i.e. no automatic generation
22-
of the derivative functions.
23-
24-
Any extra keyword arguments are captured to be sent to the optimizers.
25-
26-
## OptimizationFunction
1+
# [OptimizationFunction](@id optfunction)
272

283
The `OptimizationFunction` type is a function type that holds all of
294
the extra differentiation data required to do fast and accurate

docs/src/API/optimization_problem.md

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
# Defining OptimizationProblems
2+
3+
All optimizations start by defining an `OptimizationProblem` as follows:
4+
5+
```julia
6+
OptimizationProblem(f, x, p = DiffEqBase.NullParameters(),;
7+
lb = nothing,
8+
ub = nothing,
9+
lcons = nothing,
10+
ucons = nothing,
11+
kwargs...)
12+
```
13+
14+
Formally, the `OptimizationProblem` finds the minimum of `f(x,p)` with an
15+
initial condition `x`. The parameters `p` are optional. `lb` and `ub`
16+
are arrays matching the size of `x`, which stand for the lower and upper
17+
bounds of `x`, respectively.
18+
19+
`f` is an `OptimizationFunction`, as defined [here](@ref optfunction).
20+
If `f` is a standard Julia function, it is automatically converted into an
21+
`OptimizationFunction` with `NoAD()`, i.e., no automatic generation
22+
of the derivative functions.
23+
24+
Any extra keyword arguments are captured to be sent to the optimizers.
File renamed without changes.

docs/src/tutorials/minibatch.md

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
# Minibatch examples
2+
3+
```julia
4+
using DiffEqFlux, GalacticOptim, OrdinaryDiffEq
5+
6+
function newtons_cooling(du, u, p, t)
7+
temp = u[1]
8+
k, temp_m = p
9+
du[1] = dT = -k*(temp-temp_m)
10+
end
11+
12+
function true_sol(du, u, p, t)
13+
true_p = [log(2)/8.0, 100.0]
14+
newtons_cooling(du, u, true_p, t)
15+
end
16+
17+
function dudt_(u,p,t)
18+
ann(u, p).* u
19+
end
20+
21+
cb = function (p,l,pred;doplot=false) #callback function to observe training
22+
display(l)
23+
# plot current prediction against data
24+
if doplot
25+
pl = scatter(t,ode_data[1,:],label="data")
26+
scatter!(pl,t,pred[1,:],label="prediction")
27+
display(plot(pl))
28+
end
29+
return false
30+
end
31+
32+
u0 = Float32[200.0]
33+
datasize = 30
34+
tspan = (0.0f0, 1.5f0)
35+
36+
t = range(tspan[1], tspan[2], length=datasize)
37+
true_prob = ODEProblem(true_sol, u0, tspan)
38+
ode_data = Array(solve(true_prob, Tsit5(), saveat=t))
39+
40+
ann = FastChain(FastDense(1,8,tanh), FastDense(8,1,tanh))
41+
pp = initial_params(ann)
42+
prob = ODEProblem{false}(dudt_, u0, tspan, pp)
43+
44+
function predict_adjoint(fullp, time_batch)
45+
Array(solve(prob, Tsit5(), p = fullp, saveat = time_batch))
46+
end
47+
48+
function loss_adjoint(fullp, batch, time_batch)
49+
pred = predict_adjoint(fullp,time_batch)
50+
sum(abs2, batch .- pred), pred
51+
end
52+
53+
54+
k = 10
55+
train_loader = Flux.Data.DataLoader(ode_data, t, batchsize = k)
56+
57+
numEpochs = 300
58+
l1 = loss_adjoint(pp, train_loader.data[1], train_loader.data[2])[1]
59+
60+
optfun = OptimizationFunction((θ, p, batch, time_batch) -> loss_adjoint(θ, batch, time_batch), GalacticOptim.AutoZygote())
61+
optprob = OptimizationProblem(optfun, pp)
62+
using IterTools: ncycle
63+
res1 = GalacticOptim.solve(optprob, ADAM(0.05), ncycle(train_loader, numEpochs), cb = cb, maxiters = numEpochs)
64+
@test 10res1.minimum < l1
65+
```

docs/src/tutorials/rosenbrock.md

Lines changed: 120 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,120 @@
1+
# Rosenbrock function examples
2+
3+
```julia
4+
using GalacticOptim, Optim, Test, Random
5+
6+
rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2
7+
x0 = zeros(2)
8+
_p = [1.0, 100.0]
9+
10+
f = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff())
11+
l1 = rosenbrock(x0, _p)
12+
prob = OptimizationProblem(f, x0, _p)
13+
sol = solve(prob, SimulatedAnnealing())
14+
@test 10*sol.minimum < l1
15+
16+
Random.seed!(1234)
17+
prob = OptimizationProblem(f, x0, _p, lb=[-1.0, -1.0], ub=[0.8, 0.8])
18+
sol = solve(prob, SAMIN())
19+
@test 10*sol.minimum < l1
20+
21+
using CMAEvolutionStrategy
22+
sol = solve(prob, CMAEvolutionStrategyOpt())
23+
@test 10*sol.minimum < l1
24+
25+
rosenbrock(x, p=nothing) = (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2
26+
27+
l1 = rosenbrock(x0)
28+
prob = OptimizationProblem(rosenbrock, x0)
29+
sol = solve(prob, NelderMead())
30+
@test 10*sol.minimum < l1
31+
32+
cons= (x,p) -> [x[1]^2 + x[2]^2]
33+
optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff();cons= cons)
34+
35+
prob = OptimizationProblem(optprob, x0)
36+
37+
sol = solve(prob, ADAM(0.1), maxiters = 1000)
38+
@test 10*sol.minimum < l1
39+
40+
sol = solve(prob, BFGS())
41+
@test 10*sol.minimum < l1
42+
43+
sol = solve(prob, Newton())
44+
@test 10*sol.minimum < l1
45+
46+
sol = solve(prob, Optim.KrylovTrustRegion())
47+
@test 10*sol.minimum < l1
48+
49+
prob = OptimizationProblem(optprob, x0, lcons = [-Inf], ucons = [Inf])
50+
sol = solve(prob, IPNewton())
51+
@test 10*sol.minimum < l1
52+
53+
prob = OptimizationProblem(optprob, x0, lcons = [-5.0], ucons = [10.0])
54+
sol = solve(prob, IPNewton())
55+
@test 10*sol.minimum < l1
56+
57+
prob = OptimizationProblem(optprob, x0, lcons = [-Inf], ucons = [Inf], lb = [-500.0,-500.0], ub=[50.0,50.0])
58+
sol = solve(prob, IPNewton())
59+
@test sol.minimum < l1
60+
61+
function con2_c(x,p)
62+
[x[1]^2 + x[2]^2, x[2]*sin(x[1])-x[1]]
63+
end
64+
65+
optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff();cons= con2_c)
66+
prob = OptimizationProblem(optprob, x0, lcons = [-Inf,-Inf], ucons = [Inf,Inf])
67+
sol = solve(prob, IPNewton())
68+
@test 10*sol.minimum < l1
69+
70+
cons_circ = (x,p) -> [x[1]^2 + x[2]^2]
71+
optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff();cons= cons_circ)
72+
prob = OptimizationProblem(optprob, x0, lcons = [-Inf], ucons = [0.25^2])
73+
sol = solve(prob, IPNewton())
74+
@test sqrt(cons(sol.minimizer,nothing)[1]) 0.25 rtol = 1e-6
75+
76+
optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoZygote())
77+
prob = OptimizationProblem(optprob, x0)
78+
sol = solve(prob, ADAM(), maxiters = 1000, progress = false)
79+
@test 10*sol.minimum < l1
80+
81+
prob = OptimizationProblem(optprob, x0, lb=[-1.0, -1.0], ub=[0.8, 0.8])
82+
sol = solve(prob, Fminbox())
83+
@test 10*sol.minimum < l1
84+
85+
prob = OptimizationProblem(optprob, x0, lb=[-1.0, -1.0], ub=[0.8, 0.8])
86+
@test_broken @test_nowarn sol = solve(prob, SAMIN())
87+
@test 10*sol.minimum < l1
88+
89+
using NLopt
90+
prob = OptimizationProblem(optprob, x0)
91+
sol = solve(prob, Opt(:LN_BOBYQA, 2))
92+
@test 10*sol.minimum < l1
93+
94+
sol = solve(prob, Opt(:LD_LBFGS, 2))
95+
@test 10*sol.minimum < l1
96+
97+
prob = OptimizationProblem(optprob, x0, lb=[-1.0, -1.0], ub=[0.8, 0.8])
98+
sol = solve(prob, Opt(:LD_LBFGS, 2))
99+
@test 10*sol.minimum < l1
100+
101+
sol = solve(prob, Opt(:G_MLSL_LDS, 2), nstart=2, local_method = Opt(:LD_LBFGS, 2), maxiters=10000)
102+
@test 10*sol.minimum < l1
103+
104+
# using MultistartOptimization
105+
# sol = solve(prob, MultistartOptimization.TikTak(100), local_method = NLopt.LD_LBFGS)
106+
# @test 10*sol.minimum < l1
107+
108+
# using QuadDIRECT
109+
# sol = solve(prob, QuadDirect(); splits = ([-0.5, 0.0, 0.5],[-0.5, 0.0, 0.5]))
110+
# @test 10*sol.minimum < l1
111+
112+
using Evolutionary
113+
sol = solve(prob, CMAES=40 , λ = 100),abstol=1e-15)
114+
@test 10*sol.minimum < l1
115+
116+
using BlackBoxOptim
117+
prob = GalacticOptim.OptimizationProblem(optprob, x0, lb=[-1.0, -1.0], ub=[0.8, 0.8])
118+
sol = solve(prob, BBO())
119+
@test 10*sol.minimum < l1
120+
```

0 commit comments

Comments
 (0)