Skip to content

Commit 1fcafd7

Browse files
committed
Update Neural ODE benchmark
1 parent 37a0a44 commit 1fcafd7

File tree

2 files changed

+57
-30
lines changed

2 files changed

+57
-30
lines changed

benchmarks/adam_opt.jl

Lines changed: 53 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
using SimpleChains,
22
StaticArrays, OrdinaryDiffEq, SciMLSensitivity, Optimization, OptimizationFlux, Plots
33

4+
#device!(2)
5+
# Get Tesla V100S
46
u0 = @SArray Float32[2.0, 0.0]
57
datasize = 30
68
tspan = (0.0f0, 1.5f0)
@@ -23,10 +25,10 @@ p_nn = SimpleChains.init_params(sc)
2325

2426
f(u, p, t) = sc(u, p)
2527

26-
prob_nn = ODEProblem(f, u0, tspan)
28+
sprob_nn = ODEProblem(f, u0, tspan)
2729

2830
function predict_neuralode(p)
29-
Array(solve(prob_nn,
31+
Array(solve(sprob_nn,
3032
Tsit5();
3133
p = p,
3234
saveat = tsteps,
@@ -56,6 +58,8 @@ optprob = Optimization.OptimizationProblem(optf, p_nn)
5658
@time res = Optimization.solve(optprob, ADAM(0.05), maxiters = 100)
5759
@show res.objective
5860

61+
@benchmark Optimization.solve(optprob, ADAM(0.05), maxiters = 100)
62+
5963
## PSOGPU stuff
6064

6165
function nn_fn(u::T, p, t)::T where {T}
@@ -78,12 +82,15 @@ function loss(u, p)
7882
sum(abs2, data .- pred)
7983
end
8084

81-
lb = SVector{length(p_static), eltype(p_static)}(fill(eltype(p_static)(-10),
82-
length(p_static))...)
83-
ub = SVector{length(p_static), eltype(p_static)}(fill(eltype(p_static)(10),
84-
length(p_static))...)
85+
# lb = SVector{length(p_static), eltype(p_static)}(fill(eltype(p_static)(-10),
86+
# length(p_static))...)
87+
# ub = SVector{length(p_static), eltype(p_static)}(fill(eltype(p_static)(10),
88+
# length(p_static))...)
89+
90+
lb = @SArray fill(Float32(-Inf), length(p_static))
91+
ub = @SArray fill(Float32(Inf), length(p_static))
8592

86-
optprob = OptimizationProblem(loss, prob_nn.p[2], (prob_nn, tsteps); lb = lb, ub = ub)
93+
soptprob = OptimizationProblem(loss, prob_nn.p[2], (prob_nn, tsteps); lb = lb, ub = ub)
8794

8895
using PSOGPU
8996
using CUDA
@@ -93,27 +100,58 @@ using Adapt
93100
backend = CUDABackend()
94101

95102
## Initialize Particles
96-
gbest, particles = PSOGPU.init_particles(optprob,
103+
gbest, particles = PSOGPU.init_particles(soptprob,
97104
ParallelSyncPSOKernel(n_particles; backend = CUDABackend()),
98105
typeof(prob.u0))
99106

100107
gpu_data = adapt(backend,
101108
[SVector{length(prob_nn.u0), eltype(prob_nn.u0)}(@view data[:, i])
102109
for i in 1:length(tsteps)])
103110

104-
gpu_particles = adapt(backend, particles)
105-
106111
CUDA.allowscalar(false)
107112

108113
function prob_func(prob, gpu_particle)
109114
return remake(prob, p = (prob.p[1], gpu_particle.position))
110115
end
111116

117+
gpu_particles = adapt(backend, particles)
118+
119+
losses = adapt(backend, ones(eltype(prob.u0), (1, n_particles)))
120+
121+
solver_cache = (; losses, gpu_particles, gpu_data, gbest)
122+
112123
@time gsol = PSOGPU.parameter_estim_ode!(prob_nn,
113-
gpu_particles,
114-
gbest,
115-
gpu_data,
124+
solver_cache,
116125
lb,
117-
ub; saveat = tsteps, dt = 0.1f0, prob_func = prob_func, maxiters = 100)
126+
ub;
127+
saveat = tsteps,
128+
dt = 0.1f0,
129+
prob_func = prob_func,
130+
maxiters = 100)
131+
132+
@benchmark PSOGPU.parameter_estim_ode!($prob_nn,
133+
$(deepcopy(solver_cache)),
134+
$lb,
135+
$ub;
136+
saveat = tsteps,
137+
dt = 0.1f0,
138+
prob_func = prob_func,
139+
maxiters = 100)
140+
141+
@show gsol.best
142+
143+
using Plots
144+
145+
function predict_neuralode(p)
146+
Array(solve(prob_nn, Tsit5(); p = p, saveat = tsteps))
147+
end
148+
149+
using Plots
150+
151+
plt = scatter(tsteps, data[1, :], label = "data")
152+
153+
pred_pso = predict_neuralode((sc, gsol.position))
154+
scatter!(plt, tsteps, pred[1, :], label = "PSO prediction")
118155

119-
@show gsol.cost
156+
pred_adam = predict_neuralode((sc, res.u))
157+
scatter!(plt, tsteps, pred_adam[1, :], label = "Adam prediction")

src/ode_pso.jl

Lines changed: 4 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -45,23 +45,17 @@ function default_prob_func(prob, gpu_particle)
4545
return remake(prob, p = gpu_particle.position)
4646
end
4747

48-
function parameter_estim_ode!(prob::ODEProblem,
49-
gpu_particles,
50-
gbest,
51-
data,
48+
function parameter_estim_ode!(prob::ODEProblem, cache,
5249
lb,
5350
ub;
5451
ode_alg = GPUTsit5(),
5552
prob_func = default_prob_func,
5653
w = 0.72980f0,
5754
wdamp = 1.0f0,
5855
maxiters = 100, kwargs...)
56+
(losses, gpu_particles, gpu_data, gbest) = cache
5957
backend = get_backend(gpu_particles)
6058
update_states! = PSOGPU._update_particle_states!(backend)
61-
62-
losses = KernelAbstractions.allocate(backend,
63-
eltype(prob.u0),
64-
(1, length(gpu_particles)))
6559
update_costs! = PSOGPU._update_particle_costs!(backend)
6660

6761
improb = make_prob_compatible(prob)
@@ -80,16 +74,11 @@ function parameter_estim_ode!(prob::ODEProblem,
8074
prob,
8175
ode_alg; kwargs...)
8276

83-
sum!(losses, (map(x -> sum(x .^ 2), data .- us)))
77+
sum!(losses, (map(x -> sum(x .^ 2), gpu_data .- us)))
8478

8579
update_costs!(losses, gpu_particles; ndrange = length(losses))
8680

87-
best_particle = minimum(gpu_particles,
88-
init = PSOGPU.SPSOParticle(gbest.position,
89-
gbest.position,
90-
gbest.cost,
91-
gbest.position,
92-
gbest.cost))
81+
best_particle = minimum(gpu_particles)
9382

9483
gbest = PSOGPU.SPSOGBest(best_particle.best_position, best_particle.best_cost)
9584
w = w * wdamp

0 commit comments

Comments
 (0)