From 27614b109206032ac23ad13e88cdc44e0adddb56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Miclu=C8=9Ba-C=C3=A2mpeanu?= Date: Thu, 30 Oct 2025 01:22:53 +0200 Subject: [PATCH 1/3] Clean up SymbolicNeuralNetwork docs --- docs/src/symbolic_ude_tutorial.md | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/docs/src/symbolic_ude_tutorial.md b/docs/src/symbolic_ude_tutorial.md index fd16016..09eddfb 100644 --- a/docs/src/symbolic_ude_tutorial.md +++ b/docs/src/symbolic_ude_tutorial.md @@ -60,7 +60,7 @@ Next, we can use [ModelingToolkitNeuralNets.jl](https://github.com/SciML/Modelin using ModelingToolkitNeuralNets sym_nn, θ = SymbolicNeuralNetwork(; nn_p_name = :θ, chain = nn_arch, n_input = 1, n_output = 1) -sym_nn_func(x) = sym_nn([x], θ)[1] +sym_nn_func(x) = sym_nn(x, θ)[1] ``` Now we can create our UDE. We replace the (from now on unknown) function `v * (Y^n) / (K^n + Y^n)` with our symbolic neural network (which we let be a function of the variable `Y` only). @@ -77,7 +77,7 @@ We can now fit our UDE model (including the neural network and the parameter d) function loss(ps, (oprob_base, set_ps, sample_t, sample_X, sample_Y)) p = set_ps(oprob_base, ps) new_oprob = remake(oprob_base; p) - new_osol = solve(new_oprob, Tsit5(); saveat = sample_t, verbose = false, maxiters = 10000) + new_osol = solve(new_oprob, Tsit5(); saveat = sample_t, verbose = false) SciMLBase.successful_retcode(new_osol) || return Inf # Simulation failed -> Inf loss. x_error = sum((x_sim - x_data)^2 for (x_sim, x_data) in zip(new_osol[X], sample_X)) y_error = sum((y_sim - y_data)^2 for (y_sim, y_data) in zip(new_osol[Y], sample_Y)) @@ -89,11 +89,12 @@ Next, we use [Optimization.jl](https://github.com/SciML/Optimization.jl) to crea ```@example symbolic_ude using Optimization +using SymbolicIndexingInterface: setp_oop oprob_base = ODEProblem(xy_model_ude, u0, (0.0, tend)) -set_ps = ModelingToolkit.setp_oop(oprob_base, [d, θ...]) +set_ps = setp_oop(oprob_base, [d; θ]) loss_params = (oprob_base, set_ps, sample_t, sample_X, sample_Y) -ps_init = oprob_base.ps[[d, θ...]] -of = OptimizationFunction{true}(loss, AutoForwardDiff()) +ps_init = oprob_base.ps[[d; θ]] +of = OptimizationFunction(loss, AutoForwardDiff()) opt_prob = OptimizationProblem(of, ps_init, loss_params) ``` @@ -112,3 +113,14 @@ sol_fitted = solve(oprob_fitted, Tsit5()) plot!(sol_true; lw = 4, la = 0.7, linestyle = :dash, idxs = [X, Y], color = [:blue :red], label = ["X (UDE)" "Y (UDE)"]) ``` + +We can also inspect how the function described by the neural network looks like and how does it compare +to the known correct function +```@example symbolic_ude +true_func(y) = 1.1 * (y^3) / (2^3 + y^3) +fitted_func(y) = oprob_fitted.ps[sym_nn](y, oprob_fitted.ps[θ])[1] + +# Plots the true and fitted functions (we mostly got the correct one, but less accurate in some regions). +plot(true_func, 0.0, 5.0; lw=8, label="True function", color=:lightblue) +plot!(fitted_func, 0.0, 5.0; lw=6, label="Fitted function", color=:blue, la=0.7, linestyle=:dash) +``` From 69ffd1425632d40a836f3c6f9ede11a1cbee0c7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Miclu=C8=9Ba-C=C3=A2mpeanu?= Date: Thu, 30 Oct 2025 01:24:38 +0200 Subject: [PATCH 2/3] simplify deps in tests --- test/lotka_volterra.jl | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/test/lotka_volterra.jl b/test/lotka_volterra.jl index 5cfac18..faecc0d 100644 --- a/test/lotka_volterra.jl +++ b/test/lotka_volterra.jl @@ -2,10 +2,9 @@ using Test using JET using ModelingToolkitNeuralNets using ModelingToolkit -using ModelingToolkitStandardLibrary.Blocks using OrdinaryDiffEqVerner using SymbolicIndexingInterface -using Optimization +using OptimizationBase using OptimizationOptimisers: Adam using SciMLStructures using SciMLStructures: Tunable, canonicalize @@ -19,7 +18,7 @@ using Lux function lotka_ude(chain) @variables t x(t)=3.1 y(t)=1.5 - @parameters α=1.3 [tunable=false] δ=1.8 [tunable=false] + @parameters α=1.3 [tunable = false] δ=1.8 [tunable = false] Dt = ModelingToolkit.D_nounits @named nn = NeuralNetworkBlock(2, 2; chain, rng = StableRNG(42)) @@ -36,7 +35,7 @@ end function lotka_true() @variables t x(t)=3.1 y(t)=1.5 - @parameters α=1.3 [tunable=false] β=0.9 γ=0.8 δ=1.8 [tunable=false] + @parameters α=1.3 [tunable = false] β=0.9 γ=0.8 δ=1.8 [tunable = false] Dt = ModelingToolkit.D_nounits eqs = [ @@ -133,7 +132,7 @@ res_sol = solve(res_prob, Vern9(), abstol = 1e-8, reltol = 1e-8, saveat = ts) function lotka_ude2() @variables t x(t)=3.1 y(t)=1.5 pred(t)[1:2] - @parameters α=1.3 [tunable=false] δ=1.8 [tunable=false] + @parameters α=1.3 [tunable = false] δ=1.8 [tunable = false] chain = multi_layer_feed_forward(2, 2; width = 5, initial_scaling_factor = 1) NN, p = SymbolicNeuralNetwork(; chain, n_input = 2, n_output = 2, rng = StableRNG(42)) Dt = ModelingToolkit.D_nounits From 84c173b38f85671dd8db52b97b9c58f8db4f95bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Miclu=C8=9Ba-C=C3=A2mpeanu?= Date: Thu, 30 Oct 2025 01:24:54 +0200 Subject: [PATCH 3/3] Consolidate reported issues --- ...{scalar_dispatch.jl => reported_issues.jl} | 54 ++++++++++++------- test/runtests.jl | 2 +- 2 files changed, 36 insertions(+), 20 deletions(-) rename test/{scalar_dispatch.jl => reported_issues.jl} (53%) diff --git a/test/scalar_dispatch.jl b/test/reported_issues.jl similarity index 53% rename from test/scalar_dispatch.jl rename to test/reported_issues.jl index 06f1a52..7e68cf0 100644 --- a/test/scalar_dispatch.jl +++ b/test/reported_issues.jl @@ -1,13 +1,7 @@ -using Test -using ModelingToolkitNeuralNets -using ModelingToolkit -using Lux -using StableRNGs +using Test, Lux, ModelingToolkitNeuralNets, StableRNGs, ModelingToolkit using OrdinaryDiffEqVerner -# Test scalar dispatch for SymbolicNeuralNetwork -# This tests the fix for issue #83 -@testset "Scalar dispatch" begin +@testset "Scalar dispatch (issue #83)" begin # Create a simple UDE with scalar inputs @variables t X(t) Y(t) @parameters d @@ -27,8 +21,8 @@ using OrdinaryDiffEqVerner # Now can use: sym_nn(Y, θ)[1] Dt = ModelingToolkit.D_nounits eqs_ude = [ - Dt(X) ~ sym_nn(Y, θ)[1] - d*X, - Dt(Y) ~ X - d*Y + Dt(X) ~ sym_nn(Y, θ)[1] - d * X, + Dt(Y) ~ X - d * Y ] @named sys = System(eqs_ude, ModelingToolkit.t_nounits) @@ -37,9 +31,8 @@ using OrdinaryDiffEqVerner # Test that the system can be created and solved prob = ODEProblem{true, SciMLBase.FullSpecialize}( sys_compiled, - [X => 1.0, Y => 1.0], - (0.0, 1.0), - [d => 0.1] + [X => 1.0, Y => 1.0, d => 0.1], + (0.0, 1.0) ) sol = solve(prob, Vern9(), abstol = 1e-8, reltol = 1e-8) @@ -48,8 +41,8 @@ using OrdinaryDiffEqVerner # Also test that the old array syntax still works eqs_ude_old = [ - Dt(X) ~ sym_nn([Y], θ)[1] - d*X, - Dt(Y) ~ X - d*Y + Dt(X) ~ sym_nn([Y], θ)[1] - d * X, + Dt(Y) ~ X - d * Y ] @named sys_old = System(eqs_ude_old, ModelingToolkit.t_nounits) @@ -57,9 +50,8 @@ using OrdinaryDiffEqVerner prob_old = ODEProblem{true, SciMLBase.FullSpecialize}( sys_old_compiled, - [X => 1.0, Y => 1.0], - (0.0, 1.0), - [d => 0.1] + [X => 1.0, Y => 1.0, d => 0.1], + (0.0, 1.0) ) sol_old = solve(prob_old, Vern9(), abstol = 1e-8, reltol = 1e-8) @@ -67,5 +59,29 @@ using OrdinaryDiffEqVerner @test SciMLBase.successful_retcode(sol_old) # Both solutions should be the same - @test sol.u ≈ sol_old.u + @test sol.u == sol_old.u +end + +@testset "Issue #58" begin + # Preparation + rng = StableRNG(123) + chain = Lux.Chain( + Lux.Dense(1 => 3, Lux.softplus, use_bias = false), + Lux.Dense(3 => 3, Lux.softplus, use_bias = false), + Lux.Dense(3 => 1, Lux.sigmoid_fast, use_bias = false) + ) + + # Default names. + NN, NN_p = SymbolicNeuralNetwork(; chain, n_input = 1, n_output = 1, rng) + @test ModelingToolkit.getname(NN) == :nn_name + @test ModelingToolkit.getname(NN_p) == :p + + # Trying to set specific names. + nn_name = :custom_nn_name + nn_p_name = :custom_nn_p_name + NN, NN_p = SymbolicNeuralNetwork(; + chain, n_input = 1, n_output = 1, rng, nn_name, nn_p_name) + + @test ModelingToolkit.getname(NN)==nn_name broken=true # :nn_name # Should be :custom_nn_name + @test ModelingToolkit.getname(NN_p) == nn_p_name end diff --git a/test/runtests.jl b/test/runtests.jl index 5186285..79497a8 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -6,5 +6,5 @@ using SafeTestsets @safetestset "QA" include("qa.jl") @safetestset "Basic" include("lotka_volterra.jl") @safetestset "MTK model macro compatibility" include("macro.jl") - @safetestset "Scalar dispatch" include("scalar_dispatch.jl") + @safetestset "Reported issues" include("reported_issues.jl") end