diff --git a/examples/ode_estimation/Lotka_Volterra/lotka_volterra.jl b/examples/ode_estimation/Lotka_Volterra/lotka_volterra.jl index 99f05a4..f82ac8d 100644 --- a/examples/ode_estimation/Lotka_Volterra/lotka_volterra.jl +++ b/examples/ode_estimation/Lotka_Volterra/lotka_volterra.jl @@ -31,7 +31,7 @@ n_particles = 10_000 gbest, particles = PSOGPU.init_particles(optprob, n_particles) -gpu_data = cu([@SArray ones(length(prob.u0))]) +gpu_data = [@SArray ones(length(prob.u0))] gpu_particles = cu(particles) diff --git a/src/ode_pso.jl b/src/ode_pso.jl index 2b381b2..18691a6 100644 --- a/src/ode_pso.jl +++ b/src/ode_pso.jl @@ -48,30 +48,47 @@ function default_prob_func(prob, gpu_particle) return remake(prob, p = gpu_particle.position) end -function parameter_estim_ode!(prob::ODEProblem, +function build_ode_objective(prob::ODEProblem, alg = GPUTsit5(), data = nothing, population = 100, kwargs...) + + if isnothing(data) + throw(ArgumentError("data must be provided for parameter estimation")) + end + + improb = make_prob_compatible(prob) + + function obj(losses, gpu_particles, prob = prob, alg = alg, data = data, kwargs...) + probs = default_prob_func.(Ref(improb), gpu_particles) + + ts, us = vectorized_asolve(probs, + prob, + alg; kwargs...) + + sum!(losses, (map(x -> sum(x .^ 2), data .- us))) + end + + return OptimizationProblem(OptimizationFunction(obj), ones(Float32, population)) +end + + +function parameter_estim_ode!( + prob::OptimizationProblem, gpu_particles, gbest, - data, lb, ub; - ode_alg = GPUTsit5(), - prob_func = default_prob_func, w = 0.72980f0, wdamp = 1.0f0, maxiters = 100, kwargs...) - update_states! = @cuda launch=false PSOGPU._update_particle_states!(gpu_particles, lb, - ub, - gbest, - w) + + update_states! = @cuda launch=false PSOGPU._update_particle_states!(prob, gpu_particles, gbest, w) losses = CUDA.ones(1, length(gpu_particles)) + update_costs! = @cuda launch=false PSOGPU._update_particle_costs!(losses, gpu_particles) config_states = launch_configuration(update_states!.fun) config_costs = launch_configuration(update_costs!.fun) - improb = make_prob_compatible(prob) - for i in 1:maxiters update_states!(gpu_particles, lb, @@ -81,14 +98,7 @@ function parameter_estim_ode!(prob::ODEProblem, config_states.threads, config_states...) - probs = prob_func.(Ref(improb), gpu_particles) - - ts, us = vectorized_asolve(probs, - prob, - ode_alg; kwargs...) - - sum!(losses, (map(x -> sum(x .^ 2), data .- us))) - + update_costs!(losses, gpu_particles; config_costs.threads, config_costs...) best_particle = minimum(gpu_particles, diff --git a/test/neuralode.jl b/test/neuralode.jl new file mode 100644 index 0000000..bcae247 --- /dev/null +++ b/test/neuralode.jl @@ -0,0 +1,88 @@ +using SimpleChains, StaticArrays, OrdinaryDiffEq, SciMLSensitivity +u0 = @SArray Float32[2.0, 0.0] +datasize = 30 +tspan = (0.0f0, 1.5f0) +tsteps = range(tspan[1], tspan[2], length = datasize) + +function trueODE(u, p, t) + true_A = @SMatrix Float32[-0.1 2.0; -2.0 -0.1] + ((u .^ 3)'true_A)' +end + +prob = ODEProblem(trueODE, u0, tspan) +data = Array(solve(prob, Tsit5(), saveat = tsteps)) + +sc = SimpleChain(static(2), + Activation(x -> x .* 3), + TurboDense{true}(tanh, static(2)), + TurboDense{true}(identity, static(2))) + +p_nn = SimpleChains.init_params(sc) + +function nn_fn(u::T, p, t)::T where {T} + nn, ps = p + return nn(u, ps, t) +end + +nn = (u, p, t) -> sc(u, p) + +p_static = SArray{Tuple{size(p_nn)...}}(p_nn...) + +prob_nn = ODEProblem(nn_fn, u0, tspan, (sc, p_static)) + + +function predict_neuralode(p) + _prob = remake(_prob; p = (sc, p)) + Array(solve(_monteprob, GPUTsit5(), EnsembleGPUKernel(CUDABackend(), 0.0), dt = 0.1f0, + trajectories = 2, adaptive = true, saveat = tsteps)[1]) +end + +function loss_neuralode(p) + pred = predict_neuralode(p) + loss = sum(abs2, data .- pred) + return loss +end + +sol = predict_neuralode(p_static) +t = collect(range(0f0, stop = 10.0, length = 200)) +using RecursiveArrayTools # for VectorOfArray +randomized = VectorOfArray([(sol(t[i]) + 0.01randn(2)) for i in 1:length(t)]) + +data = convert(Array, randomized) + +n_particles = 1000 + +maxiters = 1000 + +n = 1 + +lb = @SArray ones(n) + +lb = -10*lb + +ub = @SArray ones(n) + +ub = 10*ub + + +optprob = OptimizationProblem(loss_neuralode, [1.0], (prob,t);lb, ub) + +using PSOGPU +using CUDA + + +gbest, particles = PSOGPU.init_particles(optprob, n_particles) + +gpu_data = cu([SVector{length(prob.u0), eltype(prob.u0)}(@view data[:,i]) for i in 1:length(t)]) + + +gpu_particles = cu(particles) + + +@time gsol = PSOGPU.parameter_estim_ode!(prob, + gpu_particles, + gbest, + gpu_data, + lb, + ub; saveat = t, dt = 0.1) +