Skip to content

Commit 8c7a67c

Browse files
Merge pull request #86 from SciML/smc/cleanup
Cleanup for docs and tests
2 parents 6bdf82a + 44537a7 commit 8c7a67c

File tree

5 files changed

+60
-33
lines changed

5 files changed

+60
-33
lines changed

Project.toml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ Lux = "1.14"
2323
LuxCore = "1.2"
2424
ModelingToolkit = "10"
2525
ModelingToolkitStandardLibrary = "2.24"
26-
Optimization = "4"
26+
OptimizationBase = "4.0.2"
2727
OptimizationOptimisers = "0.3"
2828
OrdinaryDiffEqVerner = "1.2"
2929
Random = "1.10"
@@ -43,7 +43,7 @@ Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
4343
DifferentiationInterface = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63"
4444
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
4545
JET = "c3a54625-cd67-489e-a8e7-0a5a0ff4e31b"
46-
Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba"
46+
OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb"
4747
OptimizationOptimisers = "42dfb2eb-d2b4-4451-abcd-913932933ac1"
4848
OrdinaryDiffEqVerner = "79d7bb75-1356-48c1-b8c0-6832512096c2"
4949
ModelingToolkitStandardLibrary = "16a59e39-deab-5bd0-87e4-056b12336739"
@@ -59,5 +59,5 @@ Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
5959
[targets]
6060
test = ["Aqua", "JET", "Test", "OrdinaryDiffEqVerner", "DifferentiationInterface",
6161
"SciMLSensitivity", "Zygote", "ForwardDiff", "ModelingToolkitStandardLibrary",
62-
"Optimization", "OptimizationOptimisers", "SafeTestsets", "SciMLStructures",
62+
"OptimizationBase", "OptimizationOptimisers", "SafeTestsets", "SciMLStructures",
6363
"StableRNGs", "Statistics", "SymbolicIndexingInterface"]

docs/src/symbolic_ude_tutorial.md

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ Next, we can use [ModelingToolkitNeuralNets.jl](https://github.com/SciML/Modelin
6060
using ModelingToolkitNeuralNets
6161
sym_nn,
6262
θ = SymbolicNeuralNetwork(; nn_p_name = :θ, chain = nn_arch, n_input = 1, n_output = 1)
63-
sym_nn_func(x) = sym_nn([x], θ)[1]
63+
sym_nn_func(x) = sym_nn(x, θ)[1]
6464
```
6565

6666
Now we can create our UDE. We replace the (from now on unknown) function `v * (Y^n) / (K^n + Y^n)` with our symbolic neural network (which we let be a function of the variable `Y` only).
@@ -77,7 +77,7 @@ We can now fit our UDE model (including the neural network and the parameter d)
7777
function loss(ps, (oprob_base, set_ps, sample_t, sample_X, sample_Y))
7878
p = set_ps(oprob_base, ps)
7979
new_oprob = remake(oprob_base; p)
80-
new_osol = solve(new_oprob, Tsit5(); saveat = sample_t, verbose = false, maxiters = 10000)
80+
new_osol = solve(new_oprob, Tsit5(); saveat = sample_t, verbose = false)
8181
SciMLBase.successful_retcode(new_osol) || return Inf # Simulation failed -> Inf loss.
8282
x_error = sum((x_sim - x_data)^2 for (x_sim, x_data) in zip(new_osol[X], sample_X))
8383
y_error = sum((y_sim - y_data)^2 for (y_sim, y_data) in zip(new_osol[Y], sample_Y))
@@ -89,11 +89,12 @@ Next, we use [Optimization.jl](https://github.com/SciML/Optimization.jl) to crea
8989

9090
```@example symbolic_ude
9191
using Optimization
92+
using SymbolicIndexingInterface: setp_oop
9293
oprob_base = ODEProblem(xy_model_ude, u0, (0.0, tend))
93-
set_ps = ModelingToolkit.setp_oop(oprob_base, [d, θ...])
94+
set_ps = setp_oop(oprob_base, [d; θ])
9495
loss_params = (oprob_base, set_ps, sample_t, sample_X, sample_Y)
95-
ps_init = oprob_base.ps[[d, θ...]]
96-
of = OptimizationFunction{true}(loss, AutoForwardDiff())
96+
ps_init = oprob_base.ps[[d; θ]]
97+
of = OptimizationFunction(loss, AutoForwardDiff())
9798
opt_prob = OptimizationProblem(of, ps_init, loss_params)
9899
```
99100

@@ -112,3 +113,14 @@ sol_fitted = solve(oprob_fitted, Tsit5())
112113
plot!(sol_true; lw = 4, la = 0.7, linestyle = :dash, idxs = [X, Y], color = [:blue :red],
113114
label = ["X (UDE)" "Y (UDE)"])
114115
```
116+
117+
We can also inspect how the function described by the neural network looks like and how does it compare
118+
to the known correct function
119+
```@example symbolic_ude
120+
true_func(y) = 1.1 * (y^3) / (2^3 + y^3)
121+
fitted_func(y) = oprob_fitted.ps[sym_nn](y, oprob_fitted.ps[θ])[1]
122+
123+
# Plots the true and fitted functions (we mostly got the correct one, but less accurate in some regions).
124+
plot(true_func, 0.0, 5.0; lw=8, label="True function", color=:lightblue)
125+
plot!(fitted_func, 0.0, 5.0; lw=6, label="Fitted function", color=:blue, la=0.7, linestyle=:dash)
126+
```

test/lotka_volterra.jl

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,9 @@ using Test
22
using JET
33
using ModelingToolkitNeuralNets
44
using ModelingToolkit
5-
using ModelingToolkitStandardLibrary.Blocks
65
using OrdinaryDiffEqVerner
76
using SymbolicIndexingInterface
8-
using Optimization
7+
using OptimizationBase
98
using OptimizationOptimisers: Adam
109
using SciMLStructures
1110
using SciMLStructures: Tunable, canonicalize
@@ -19,7 +18,7 @@ using Lux
1918

2019
function lotka_ude(chain)
2120
@variables t x(t)=3.1 y(t)=1.5
22-
@parameters α=1.3 [tunable=false] δ=1.8 [tunable=false]
21+
@parameters α=1.3 [tunable = false] δ=1.8 [tunable = false]
2322
Dt = ModelingToolkit.D_nounits
2423

2524
@named nn = NeuralNetworkBlock(2, 2; chain, rng = StableRNG(42))
@@ -36,7 +35,7 @@ end
3635

3736
function lotka_true()
3837
@variables t x(t)=3.1 y(t)=1.5
39-
@parameters α=1.3 [tunable=false] β=0.9 γ=0.8 δ=1.8 [tunable=false]
38+
@parameters α=1.3 [tunable = false] β=0.9 γ=0.8 δ=1.8 [tunable = false]
4039
Dt = ModelingToolkit.D_nounits
4140

4241
eqs = [
@@ -133,7 +132,7 @@ res_sol = solve(res_prob, Vern9(), abstol = 1e-8, reltol = 1e-8, saveat = ts)
133132

134133
function lotka_ude2()
135134
@variables t x(t)=3.1 y(t)=1.5 pred(t)[1:2]
136-
@parameters α=1.3 [tunable=false] δ=1.8 [tunable=false]
135+
@parameters α=1.3 [tunable = false] δ=1.8 [tunable = false]
137136
chain = multi_layer_feed_forward(2, 2; width = 5, initial_scaling_factor = 1)
138137
NN, p = SymbolicNeuralNetwork(; chain, n_input = 2, n_output = 2, rng = StableRNG(42))
139138
Dt = ModelingToolkit.D_nounits
Lines changed: 35 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,7 @@
1-
using Test
2-
using ModelingToolkitNeuralNets
3-
using ModelingToolkit
4-
using Lux
5-
using StableRNGs
1+
using Test, Lux, ModelingToolkitNeuralNets, StableRNGs, ModelingToolkit
62
using OrdinaryDiffEqVerner
73

8-
# Test scalar dispatch for SymbolicNeuralNetwork
9-
# This tests the fix for issue #83
10-
@testset "Scalar dispatch" begin
4+
@testset "Scalar dispatch (issue #83)" begin
115
# Create a simple UDE with scalar inputs
126
@variables t X(t) Y(t)
137
@parameters d
@@ -27,8 +21,8 @@ using OrdinaryDiffEqVerner
2721
# Now can use: sym_nn(Y, θ)[1]
2822
Dt = ModelingToolkit.D_nounits
2923
eqs_ude = [
30-
Dt(X) ~ sym_nn(Y, θ)[1] - d*X,
31-
Dt(Y) ~ X - d*Y
24+
Dt(X) ~ sym_nn(Y, θ)[1] - d * X,
25+
Dt(Y) ~ X - d * Y
3226
]
3327

3428
@named sys = System(eqs_ude, ModelingToolkit.t_nounits)
@@ -37,9 +31,8 @@ using OrdinaryDiffEqVerner
3731
# Test that the system can be created and solved
3832
prob = ODEProblem{true, SciMLBase.FullSpecialize}(
3933
sys_compiled,
40-
[X => 1.0, Y => 1.0],
41-
(0.0, 1.0),
42-
[d => 0.1]
34+
[X => 1.0, Y => 1.0, d => 0.1],
35+
(0.0, 1.0)
4336
)
4437

4538
sol = solve(prob, Vern9(), abstol = 1e-8, reltol = 1e-8)
@@ -48,24 +41,47 @@ using OrdinaryDiffEqVerner
4841

4942
# Also test that the old array syntax still works
5043
eqs_ude_old = [
51-
Dt(X) ~ sym_nn([Y], θ)[1] - d*X,
52-
Dt(Y) ~ X - d*Y
44+
Dt(X) ~ sym_nn([Y], θ)[1] - d * X,
45+
Dt(Y) ~ X - d * Y
5346
]
5447

5548
@named sys_old = System(eqs_ude_old, ModelingToolkit.t_nounits)
5649
sys_old_compiled = mtkcompile(sys_old)
5750

5851
prob_old = ODEProblem{true, SciMLBase.FullSpecialize}(
5952
sys_old_compiled,
60-
[X => 1.0, Y => 1.0],
61-
(0.0, 1.0),
62-
[d => 0.1]
53+
[X => 1.0, Y => 1.0, d => 0.1],
54+
(0.0, 1.0)
6355
)
6456

6557
sol_old = solve(prob_old, Vern9(), abstol = 1e-8, reltol = 1e-8)
6658

6759
@test SciMLBase.successful_retcode(sol_old)
6860

6961
# Both solutions should be the same
70-
@test sol.u sol_old.u
62+
@test sol.u == sol_old.u
63+
end
64+
65+
@testset "Issue #58" begin
66+
# Preparation
67+
rng = StableRNG(123)
68+
chain = Lux.Chain(
69+
Lux.Dense(1 => 3, Lux.softplus, use_bias = false),
70+
Lux.Dense(3 => 3, Lux.softplus, use_bias = false),
71+
Lux.Dense(3 => 1, Lux.sigmoid_fast, use_bias = false)
72+
)
73+
74+
# Default names.
75+
NN, NN_p = SymbolicNeuralNetwork(; chain, n_input = 1, n_output = 1, rng)
76+
@test ModelingToolkit.getname(NN) == :nn_name
77+
@test ModelingToolkit.getname(NN_p) == :p
78+
79+
# Trying to set specific names.
80+
nn_name = :custom_nn_name
81+
nn_p_name = :custom_nn_p_name
82+
NN, NN_p = SymbolicNeuralNetwork(;
83+
chain, n_input = 1, n_output = 1, rng, nn_name, nn_p_name)
84+
85+
@test ModelingToolkit.getname(NN)==nn_name broken=true # :nn_name # Should be :custom_nn_name
86+
@test ModelingToolkit.getname(NN_p) == nn_p_name
7187
end

test/runtests.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,5 +6,5 @@ using SafeTestsets
66
@safetestset "QA" include("qa.jl")
77
@safetestset "Basic" include("lotka_volterra.jl")
88
@safetestset "MTK model macro compatibility" include("macro.jl")
9-
@safetestset "Scalar dispatch" include("scalar_dispatch.jl")
9+
@safetestset "Reported issues" include("reported_issues.jl")
1010
end

0 commit comments

Comments
 (0)