diff --git a/docs/Project.toml b/docs/Project.toml index a98646942..8c2f09069 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -12,11 +12,13 @@ DocumenterInterLinks = "d12716ef-a0f6-4df4-a9f1-a5a34e75c656" IncompleteLU = "40713840-3770-5561-ab4c-a76e7d0d7895" InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240" LinearSolve = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae" +LineSearch = "87fe0de2-c867-4266-b59a-2f0a94fc965b" NonlinearSolve = "8913a72c-1f9b-4ce2-8d82-65094dcecaec" NonlinearSolveBase = "be0214bd-f91f-a760-ac4e-3421ce2b2da0" NonlinearSolveFirstOrder = "5959db7a-ea39-4486-b5fe-2dd0bf03d60d" NonlinearSolveHomotopyContinuation = "2ac3b008-d579-4536-8c91-a1a5998c2f8b" NonlinearSolveQuasiNewton = "9a2c21bd-3a47-402d-9113-8faf9a0ee114" +NonlinearSolveSciPy = "4827a3aa-8a82-4c61-8bd0-3c7d3e464ee5" NonlinearSolveSpectralMethods = "26075421-4e9a-44e1-8bd1-420ed7ad02b2" OrdinaryDiffEqTsit5 = "b1df2697-797e-41e3-8120-5422d3b24e4a" PETSc = "ace2c81b-2b5f-4b1e-a30d-d662738edfe0" @@ -44,11 +46,13 @@ DocumenterInterLinks = "1.0.0" IncompleteLU = "0.2" InteractiveUtils = "<0.0.1, 1" LinearSolve = "2, 3" +LineSearch = "0.1" NonlinearSolve = "4" NonlinearSolveBase = "1" NonlinearSolveFirstOrder = "1" NonlinearSolveHomotopyContinuation = "0.1" NonlinearSolveQuasiNewton = "1" +NonlinearSolveSciPy = "1" NonlinearSolveSpectralMethods = "1" OrdinaryDiffEqTsit5 = "1.1.0" PETSc = "0.3" diff --git a/docs/make.jl b/docs/make.jl index d2915275a..ce2876953 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -5,8 +5,8 @@ using Sundials using NonlinearSolveBase, SciMLBase, DiffEqBase using SimpleNonlinearSolve, BracketingNonlinearSolve using NonlinearSolveFirstOrder, NonlinearSolveQuasiNewton, NonlinearSolveSpectralMethods -using NonlinearSolveHomotopyContinuation -using SciMLJacobianOperators +using NonlinearSolveHomotopyContinuation, NonlinearSolveSciPy +using SciMLJacobianOperators, LineSearch using NonlinearSolve, SteadyStateDiffEq cp( @@ -37,8 +37,8 @@ makedocs(; SimpleNonlinearSolve, BracketingNonlinearSolve, NonlinearSolveFirstOrder, NonlinearSolveQuasiNewton, NonlinearSolveSpectralMethods, NonlinearSolveHomotopyContinuation, - Sundials, - SciMLJacobianOperators, + Sundials, LineSearch, + SciMLJacobianOperators, NonlinearSolveSciPy, NonlinearSolve, SteadyStateDiffEq ], clean = true, diff --git a/docs/pages.jl b/docs/pages.jl index 5a95aed37..23e449bba 100644 --- a/docs/pages.jl +++ b/docs/pages.jl @@ -48,6 +48,7 @@ pages = [ "api/nlsolve.md", "api/nlsolvers.md", "api/petsc.md", + "api/scipy.md", "api/siamfanlequations.md", "api/speedmapping.md", "api/sundials.md", diff --git a/docs/src/api/SciMLJacobianOperators.md b/docs/src/api/SciMLJacobianOperators.md index 91b7f61df..79b576ab8 100644 --- a/docs/src/api/SciMLJacobianOperators.md +++ b/docs/src/api/SciMLJacobianOperators.md @@ -10,7 +10,7 @@ operator built on top on DifferentiationInterface.jl. ```julia import Pkg Pkg.add("SciMLJacobianOperators") -using SciMLJacobianOperators +import SciMLJacobianOperators ``` ## Jacobian API diff --git a/docs/src/api/fastlevenbergmarquardt.md b/docs/src/api/fastlevenbergmarquardt.md index 423c70bbc..c8f37f291 100644 --- a/docs/src/api/fastlevenbergmarquardt.md +++ b/docs/src/api/fastlevenbergmarquardt.md @@ -5,9 +5,10 @@ interface. Note that these solvers do not come by default, and thus one needs to the package before using these solvers: ```julia -using Pkg +import Pkg Pkg.add("FastLevenbergMarquardt") -using FastLevenbergMarquardt, NonlinearSolve +import FastLevenbergMarquardt +import NonlinearSolve as NLS ``` ## Solver API diff --git a/docs/src/api/fixedpointacceleration.md b/docs/src/api/fixedpointacceleration.md index 38e6ae0b5..178f23c4f 100644 --- a/docs/src/api/fixedpointacceleration.md +++ b/docs/src/api/fixedpointacceleration.md @@ -5,9 +5,10 @@ interface. Note that these solvers do not come by default, and thus one needs to the package before using these solvers: ```julia -using Pkg +import Pkg Pkg.add("FixedPointAcceleration") -using FixedPointAcceleration, NonlinearSolve +import FixedPointAcceleration +import NonlinearSolve as NLS ``` ## Solver API diff --git a/docs/src/api/homotopycontinuation.md b/docs/src/api/homotopycontinuation.md index 783eb6883..eabacaabd 100644 --- a/docs/src/api/homotopycontinuation.md +++ b/docs/src/api/homotopycontinuation.md @@ -5,9 +5,10 @@ HomotopyContinuation.jl. This solver is not included by default and needs to be installed separately: ```julia -using Pkg +import Pkg Pkg.add("NonlinearSolveHomotopyContinuation") -using NonlinearSolveHomotopyContinuation, NonlinearSolve +import NonlinearSolveHomotopyContinuation +import NonlinearSolve as NLS ``` # Solver API diff --git a/docs/src/api/leastsquaresoptim.md b/docs/src/api/leastsquaresoptim.md index 0581ee7c2..d4f127628 100644 --- a/docs/src/api/leastsquaresoptim.md +++ b/docs/src/api/leastsquaresoptim.md @@ -5,9 +5,10 @@ interface. Note that these solvers do not come by default, and thus one needs to the package before using these solvers: ```julia -using Pkg +import Pkg Pkg.add("LeastSquaresOptim") -using LeastSquaresOptim, NonlinearSolve +import LeastSquaresOptim +import NonlinearSolve as NLS ``` ## Solver API diff --git a/docs/src/api/minpack.md b/docs/src/api/minpack.md index d55491c0e..c4f200ace 100644 --- a/docs/src/api/minpack.md +++ b/docs/src/api/minpack.md @@ -5,9 +5,10 @@ these solvers do not come by default, and thus one needs to install the package these solvers: ```julia -using Pkg +import Pkg Pkg.add("MINPACK") -using MINPACK, NonlinearSolve +import MINPACK +import NonlinearSolve as NLS ``` ## Solver API diff --git a/docs/src/api/nlsolve.md b/docs/src/api/nlsolve.md index bea379953..a4cec0ef5 100644 --- a/docs/src/api/nlsolve.md +++ b/docs/src/api/nlsolve.md @@ -5,9 +5,10 @@ that these solvers do not come by default, and thus one needs to install the pac using these solvers: ```julia -using Pkg +import Pkg Pkg.add("NLsolve") -using NLsolve, NonlinearSolve +import NLsolve +import NonlinearSolve as NLS ``` ## Solver API diff --git a/docs/src/api/nlsolvers.md b/docs/src/api/nlsolvers.md index a1cc72656..deb5afeb2 100644 --- a/docs/src/api/nlsolvers.md +++ b/docs/src/api/nlsolvers.md @@ -5,9 +5,10 @@ that these solvers do not come by default, and thus one needs to install the pac using these solvers: ```julia -using Pkg +import Pkg Pkg.add("NLSolvers") -using NLSolvers, NonlinearSolve +import NLSolvers +import NonlinearSolve as NLS ``` ## Solver API diff --git a/docs/src/api/petsc.md b/docs/src/api/petsc.md index 7593ac5dd..05ac23827 100644 --- a/docs/src/api/petsc.md +++ b/docs/src/api/petsc.md @@ -5,9 +5,10 @@ that these solvers do not come by default, and thus one needs to install the pac using these solvers: ```julia -using Pkg +import Pkg Pkg.add("PETSc") -using PETSc, NonlinearSolve +import PETSc +import NonlinearSolve as NLS ``` ## Solver API diff --git a/docs/src/api/scipy.md b/docs/src/api/scipy.md new file mode 100644 index 000000000..271b5591c --- /dev/null +++ b/docs/src/api/scipy.md @@ -0,0 +1,22 @@ +# SciPy + +This is an extension for importing solvers from +[SciPy](https://scipy.org/) into the SciML +interface. Note that these solvers do not come by default, and thus one needs to install +the package before using these solvers: + +```julia +import Pkg +Pkg.add("NonlinearSolveSciPy") +import NonlinearSolveSciPy as NLSP +``` + +Note that using this package requires Python and SciPy to be available via PythonCall.jl. + +## Solver API + +```@docs +NonlinearSolveSciPy.SciPyLeastSquares +NonlinearSolveSciPy.SciPyRoot +NonlinearSolveSciPy.SciPyRootScalar +``` \ No newline at end of file diff --git a/docs/src/api/siamfanlequations.md b/docs/src/api/siamfanlequations.md index 5ee36ba12..d1b4db535 100644 --- a/docs/src/api/siamfanlequations.md +++ b/docs/src/api/siamfanlequations.md @@ -6,9 +6,10 @@ interface. Note that these solvers do not come by default, and thus one needs to the package before using these solvers: ```julia -using Pkg +import Pkg Pkg.add("SIAMFANLEquations") -using SIAMFANLEquations, NonlinearSolve +import SIAMFANLEquations +import NonlinearSolve as NLS ``` ## Solver API diff --git a/docs/src/api/speedmapping.md b/docs/src/api/speedmapping.md index 76b13ba18..3a99801fc 100644 --- a/docs/src/api/speedmapping.md +++ b/docs/src/api/speedmapping.md @@ -5,9 +5,10 @@ interface. Note that these solvers do not come by default, and thus one needs to the package before using these solvers: ```julia -using Pkg +import Pkg Pkg.add("SpeedMapping") -using SpeedMapping, NonlinearSolve +import SpeedMapping +import NonlinearSolve as NLS ``` ## Solver API diff --git a/docs/src/api/sundials.md b/docs/src/api/sundials.md index 80a6b367d..82f6f0302 100644 --- a/docs/src/api/sundials.md +++ b/docs/src/api/sundials.md @@ -5,9 +5,9 @@ Note that these solvers do not come by default, and thus one needs to install th before using these solvers: ```julia -using Pkg +import Pkg Pkg.add("Sundials") -using Sundials +import Sundials ``` These methods can be used independently of the rest of NonlinearSolve.jl. diff --git a/docs/src/basics/diagnostics_api.md b/docs/src/basics/diagnostics_api.md index b3f8dea4c..716c7faeb 100644 --- a/docs/src/basics/diagnostics_api.md +++ b/docs/src/basics/diagnostics_api.md @@ -27,7 +27,7 @@ Note that you will have to restart Julia to disable the timer outputs once enabl ## Example Usage ```@example diagnostics_example -using NonlinearSolve +import NonlinearSolve as NLS function nlfunc(resid, u0, p) resid[1] = u0[1] * u0[1] - p @@ -36,23 +36,23 @@ function nlfunc(resid, u0, p) nothing end -prob = NonlinearProblem(nlfunc, [1.0, 3.0, 5.0], 2.0) +prob = NLS.NonlinearProblem(nlfunc, [1.0, 3.0, 5.0], 2.0) -solve(prob) +NLS.solve(prob) ``` This produced the output, but it is hard to diagnose what is going on. We can turn on the trace to see what is happening: ```@example diagnostics_example -solve(prob; show_trace = Val(true), trace_level = TraceAll(10)) +NLS.solve(prob; show_trace = Val(true), trace_level = NLS.TraceAll(10)) nothing; # hide ``` You can also store the trace in the solution object: ```@example diagnostics_example -sol = solve(prob; trace_level = TraceAll(), store_trace = Val(true)); +sol = NLS.solve(prob; trace_level = NLS.TraceAll(), store_trace = Val(true)); sol.trace ``` @@ -62,16 +62,16 @@ to use the `init` and `solve!` API for this. The `TimerOutput` will be present i `cache.timer`. However, note that for poly-algorithms this is currently not implemented. ```@example diagnostics_example -cache = init(prob, NewtonRaphson(); show_trace = Val(true)); -solve!(cache) +cache = NLS.init(prob, NLS.NewtonRaphson(); show_trace = Val(true)); +NLS.solve!(cache) cache.timer ``` Let's try for some other solver: ```@example diagnostics_example -cache = init(prob, DFSane(); show_trace = Val(true), trace_level = TraceMinimal(50)); -solve!(cache) +cache = NLS.init(prob, NLS.DFSane(); show_trace = Val(true), trace_level = NLS.TraceMinimal(50)); +NLS.solve!(cache) cache.timer ``` diff --git a/docs/src/basics/faq.md b/docs/src/basics/faq.md index 8411477fe..da2cbf8ba 100644 --- a/docs/src/basics/faq.md +++ b/docs/src/basics/faq.md @@ -6,7 +6,8 @@ This is addressed in a [Twitter thread with the author of the improved fzero](ht On the test example: ```@example -using NonlinearSolve, BenchmarkTools +import NonlinearSolve as NLS +import BenchmarkTools const N = 100_000; levels = 1.5 .* rand(N); @@ -15,23 +16,23 @@ myfun(x, lv) = x * sin(x) - lv function f(out, levels, u0) for i in 1:N - out[i] = solve( - IntervalNonlinearProblem{false}( - IntervalNonlinearFunction{false}(myfun), u0, levels[i]), - Falsi()).u + out[i] = NLS.solve( + NLS.IntervalNonlinearProblem{false}( + NLS.IntervalNonlinearFunction{false}(myfun), u0, levels[i]), + NLS.Falsi()).u end end function f2(out, levels, u0) for i in 1:N - out[i] = solve( - NonlinearProblem{false}(NonlinearFunction{false}(myfun), u0, levels[i]), - SimpleNewtonRaphson()).u + out[i] = NLS.solve( + NLS.NonlinearProblem{false}(NLS.NonlinearFunction{false}(myfun), u0, levels[i]), + NLS.SimpleNewtonRaphson()).u end end -@btime f(out, levels, (0.0, 2.0)) -@btime f2(out, levels, 1.0) +BenchmarkTools.@btime f(out, levels, (0.0, 2.0)) +BenchmarkTools.@btime f2(out, levels, 1.0) ``` MATLAB 2022a achieves 1.66s. Try this code yourself: we receive 0.009 seconds, or a 184x @@ -46,7 +47,8 @@ input types. For example, consider this example taken from [this issue](https://github.com/SciML/NonlinearSolve.jl/issues/298) ```@example dual_error_faq -using NonlinearSolve, Random +import NonlinearSolve as NLS +import Random function fff_incorrect(var, p) v_true = [1.0, 0.1, 2.0, 0.5] @@ -56,11 +58,11 @@ function fff_incorrect(var, p) end v_true = [1.0, 0.1, 2.0, 0.5] -v_init = v_true .+ randn!(similar(v_true)) * 0.1 +v_init = v_true .+ Random.randn!(similar(v_true)) * 0.1 -prob_oop = NonlinearLeastSquaresProblem{false}(fff_incorrect, v_init) +prob_oop = NLS.NonlinearLeastSquaresProblem{false}(fff_incorrect, v_init) try - sol = solve(prob_oop, LevenbergMarquardt(); maxiters = 10000, abstol = 1e-8) + sol = NLS.solve(prob_oop, NLS.LevenbergMarquardt(); maxiters = 10000, abstol = 1e-8) catch e @error e end @@ -74,7 +76,8 @@ be a Dual number. This causes the error. To fix it: 1. Specify the `autodiff` to be `AutoFiniteDiff` ```@example dual_error_faq - sol = solve(prob_oop, LevenbergMarquardt(; autodiff = AutoFiniteDiff()); + import ADTypes + sol = NLS.solve(prob_oop, NLS.LevenbergMarquardt(; autodiff = ADTypes.AutoFiniteDiff()); maxiters = 10000, abstol = 1e-8) ``` @@ -91,8 +94,8 @@ be a Dual number. This causes the error. To fix it: return xx - v_true end - prob_oop = NonlinearLeastSquaresProblem{false}(fff_correct, v_init) - sol = solve(prob_oop, LevenbergMarquardt(); maxiters = 10000, abstol = 1e-8) + prob_oop = NLS.NonlinearLeastSquaresProblem{false}(fff_correct, v_init) + sol = NLS.solve(prob_oop, NLS.LevenbergMarquardt(); maxiters = 10000, abstol = 1e-8) ``` ## I thought NonlinearSolve.jl was type-stable and fast. But it isn't, why? @@ -106,13 +109,15 @@ static arrays, ForwardDiff will create type unstable code and lead to dynamic di internally. See this simple example: ```@example type_unstable -using NonlinearSolve, InteractiveUtils +import NonlinearSolve as NLS +import InteractiveUtils +import ADTypes f(u, p) = @. u^2 - p -prob = NonlinearProblem{false}(f, 1.0, 2.0) +prob = NLS.NonlinearProblem{false}(f, 1.0, 2.0) -@code_warntype solve(prob, NewtonRaphson()) +InteractiveUtils.@code_warntype NLS.solve(prob, NLS.NewtonRaphson()) nothing # hide ``` @@ -120,20 +125,20 @@ Notice that this was type-stable, since it is a scalar problem. Now what happens arrays ```@example type_unstable -using StaticArrays +import StaticArrays -prob = NonlinearProblem{false}(f, @SVector([1.0, 2.0]), 2.0) +prob = NLS.NonlinearProblem{false}(f, StaticArrays.@SVector([1.0, 2.0]), 2.0) -@code_warntype solve(prob, NewtonRaphson()) +InteractiveUtils.@code_warntype NLS.solve(prob, NLS.NewtonRaphson()) nothing # hide ``` Again Type-Stable! Now let's try using a regular array: ```@example type_unstable -prob = NonlinearProblem(f, [1.0, 2.0], 2.0) +prob = NLS.NonlinearProblem(f, [1.0, 2.0], 2.0) -@code_warntype solve(prob, NewtonRaphson()) +InteractiveUtils.@code_warntype NLS.solve(prob, NLS.NewtonRaphson()) nothing # hide ``` @@ -142,10 +147,10 @@ it will be dynamic and lead to dynamic dispatch. To fix this, we directly specif chunksize: ```@example type_unstable -@code_warntype solve( +InteractiveUtils.@code_warntype NLS.solve( prob, - NewtonRaphson(; - autodiff = AutoForwardDiff(; chunksize = NonlinearSolve.pickchunksize(prob.u0)) + NLS.NewtonRaphson(; + autodiff = ADTypes.AutoForwardDiff(; chunksize = NLS.pickchunksize(prob.u0)) ) ) nothing # hide diff --git a/docs/src/native/steadystatediffeq.md b/docs/src/native/steadystatediffeq.md index 471fc3f01..cb0cb4741 100644 --- a/docs/src/native/steadystatediffeq.md +++ b/docs/src/native/steadystatediffeq.md @@ -6,9 +6,9 @@ interface. Note that these solvers do not come by default, and thus one needs to the package before using these solvers: ```julia -using Pkg +import Pkg Pkg.add("SteadyStateDiffEq") -using SteadyStateDiffEq +import SteadyStateDiffEq as SSDE ``` These methods can be used independently of the rest of NonlinearSolve.jl diff --git a/docs/src/solvers/steady_state_solvers.md b/docs/src/solvers/steady_state_solvers.md index 91530c448..3dddddd33 100644 --- a/docs/src/solvers/steady_state_solvers.md +++ b/docs/src/solvers/steady_state_solvers.md @@ -56,11 +56,13 @@ though often computationally more expensive than direct methods. Example usage: ```julia -using NonlinearSolve, SteadyStateDiffEq, OrdinaryDiffEq -sol = solve(prob, DynamicSS(Tsit5())) +import NonlinearSolve as NLS +import SteadyStateDiffEq as SSDE +import OrdinaryDiffEq as ODE +sol = NLS.solve(prob, SSDE.DynamicSS(ODE.Tsit5())) -using Sundials -sol = solve(prob, DynamicSS(CVODE_BDF()), dt = 1.0) +import Sundials +sol = NLS.solve(prob, SSDE.DynamicSS(Sundials.CVODE_BDF()), dt = 1.0) ``` !!! note diff --git a/docs/src/tutorials/code_optimization.md b/docs/src/tutorials/code_optimization.md index b7eb9c174..b26875ad4 100644 --- a/docs/src/tutorials/code_optimization.md +++ b/docs/src/tutorials/code_optimization.md @@ -31,21 +31,21 @@ systems. Take for example a prototypical small nonlinear solver code in its out-of-place form: ```@example small_opt -using NonlinearSolve +import NonlinearSolve as NLS f(u, p) = u .* u .- p u0 = [1.0, 1.0] p = 2.0 -prob = NonlinearProblem(f, u0, p) -sol = solve(prob, NewtonRaphson()) +prob = NLS.NonlinearProblem(f, u0, p) +sol = NLS.solve(prob, NLS.NewtonRaphson()) ``` We can use BenchmarkTools.jl to get more precise timings: ```@example small_opt -using BenchmarkTools +import BenchmarkTools -@benchmark solve(prob, NewtonRaphson()) +BenchmarkTools.@benchmark NLS.solve(prob, NLS.NewtonRaphson()) ``` Note that this way of writing the function is a shorthand for: @@ -70,8 +70,8 @@ function f(du, u, p) return nothing end -prob = NonlinearProblem(f, u0, p) -@benchmark sol = solve(prob, NewtonRaphson()) +prob = NLS.NonlinearProblem(f, u0, p) +BenchmarkTools.@benchmark sol = NLS.solve(prob, NLS.NewtonRaphson()) ``` Notice how much faster this already runs! We can make this code even simpler by using @@ -83,7 +83,7 @@ function f(du, u, p) return nothing end -@benchmark sol = solve(prob, NewtonRaphson()) +BenchmarkTools.@benchmark sol = NLS.solve(prob, NLS.NewtonRaphson()) ``` ## Further Optimizations for Small Nonlinear Solves with Static Arrays and SimpleNonlinearSolve @@ -110,9 +110,9 @@ arrays have their length determined at compile-time. They are created using macr to normal array expressions, for example: ```@example small_opt -using StaticArrays +import StaticArrays -A = SA[2.0, 3.0, 5.0] +A = StaticArrays.SA[2.0, 3.0, 5.0] typeof(A) ``` @@ -135,20 +135,20 @@ array. Doing it with broadcasting looks like: ```@example small_opt f_SA(u, p) = u .* u .- p -u0 = SA[1.0, 1.0] +u0 = StaticArrays.SA[1.0, 1.0] p = 2.0 -prob = NonlinearProblem(f_SA, u0, p) +prob = NLS.NonlinearProblem(f_SA, u0, p) -@benchmark solve(prob, NewtonRaphson()) +BenchmarkTools.@benchmark NLS.solve(prob, NLS.NewtonRaphson()) ``` Note that only change here is that `u0` is made into a StaticArray! If we needed to write `f` out for a more complex nonlinear case, then we'd simply do the following: ```@example small_opt -f_SA(u, p) = SA[u[1] * u[1] - p, u[2] * u[2] - p] +f_SA(u, p) = StaticArrays.SA[u[1] * u[1] - p, u[2] * u[2] - p] -@benchmark solve(prob, NewtonRaphson()) +BenchmarkTools.@benchmark NLS.solve(prob, NLS.NewtonRaphson()) ``` However, notice that this did not give us a speedup but rather a slowdown. This is because @@ -159,7 +159,7 @@ of the methods from SimpleNonlinearSolve.jl which are designed for these small-s examples. Let's now use `SimpleNewtonRaphson`: ```@example small_opt -@benchmark solve(prob, SimpleNewtonRaphson()) +BenchmarkTools.@benchmark NLS.solve(prob, NLS.SimpleNewtonRaphson()) ``` And there we go, around `40ns` from our starting point of almost `4μs`! diff --git a/docs/src/tutorials/getting_started.md b/docs/src/tutorials/getting_started.md index d0729eeaf..67d406bdf 100644 --- a/docs/src/tutorials/getting_started.md +++ b/docs/src/tutorials/getting_started.md @@ -31,13 +31,13 @@ the parameters of the system. For example, the following solves the vector equation $$f(u) = u^2 - p$$ for a vector of equations: ```@example 1 -using NonlinearSolve +import NonlinearSolve as NLS f(u, p) = u .* u .- p u0 = [1.0, 1.0] p = 2.0 -prob = NonlinearProblem(f, u0, p) -sol = solve(prob) +prob = NLS.NonlinearProblem(f, u0, p) +sol = NLS.solve(prob) ``` where `u0` is the initial condition for the rootfinder. Native NonlinearSolve.jl solvers use @@ -77,6 +77,7 @@ the general command `SciMLBase.successful_retcode` to check whether the solution exited as intended: ```@example 1 +import SciMLBase SciMLBase.successful_retcode(sol) ``` @@ -92,13 +93,13 @@ For more information on `NonlinearSolution`s, see the ### Interacting with the Solver Options -While `sol = solve(prob)` worked for our case here, in many situations you may need to +While `sol = NonlinearSolve.solve(prob)` worked for our case here, in many situations you may need to interact more deeply with how the solving process is done. First things first, you can specify the solver using the positional arguments. For example, let's set the solver to `TrustRegion()`: ```@example 1 -solve(prob, TrustRegion()) +NLS.solve(prob, NLS.TrustRegion()) ``` For a complete list of solver choices, see @@ -108,7 +109,7 @@ Next we can modify the tolerances. Here let's set some really low tolerances to tight solution: ```@example 1 -solve(prob, TrustRegion(), reltol = 1e-12, abstol = 1e-12) +NLS.solve(prob, NLS.TrustRegion(), reltol = 1e-12, abstol = 1e-12) ``` There are many more options for doing this configuring. Specifically for handling @@ -124,11 +125,11 @@ condition, you pass a `uspan (a,b)` bracket in which the zero is expected to liv example: ```@example 1 -using NonlinearSolve +import NonlinearSolve as NLS f(u, p) = u * u - 2.0 uspan = (1.0, 2.0) # brackets -prob_int = IntervalNonlinearProblem(f, uspan) -sol = solve(prob_int) +prob_int = NLS.IntervalNonlinearProblem(f, uspan) +sol = NLS.solve(prob_int) ``` All of the same option handling from before works just as before, now just with different @@ -136,7 +137,7 @@ solver choices (see the [bracketing solvers](@ref bracketing) page for more deta example, let's set the solver to `ITP()` and set a high absolute tolerance: ```@example 1 -sol = solve(prob_int, ITP(), abstol = 0.01) +sol = NLS.solve(prob_int, NLS.ITP(), abstol = 0.01) ``` ## Problem Type 3: Solving Steady State Problems @@ -146,26 +147,27 @@ For Steady State Problems, we have a wrapper package automates handling SteadyStateProblems with NonlinearSolve and OrdinaryDiffEq. ```@example 1 -using NonlinearSolve, SteadyStateDiffEq +import NonlinearSolve as NLS +import SteadyStateDiffEq as SSDE f(u, p, t) = [2 - 2u[1]; u[1] - 4u[2]] u0 = [0.0, 0.0] -prob = SteadyStateProblem(f, u0) +prob = SSDE.SteadyStateProblem(f, u0) -solve(prob, SSRootfind()) +NLS.solve(prob, SSDE.SSRootfind()) ``` If you don't provide a nonlinear solver to `SSRootfind` it uses a polyalgorithm from NonlinearSolve. We can also provide the actual nonlinear solver to use: ```@example 1 -solve(prob, SSRootfind(Broyden())) +NLS.solve(prob, SSDE.SSRootfind(NLS.Broyden())) ``` ## Problem Type 4: Solving Nonlinear Least Squares Problems ```@example 1 -using NonlinearSolve +import NonlinearSolve as NLS function nlls!(du, u, p) du[1] = 2u[1] - 2 @@ -180,16 +182,16 @@ be skipped for out of place problems): ```@example 1 u0 = [0.0, 0.0] -prob = NonlinearLeastSquaresProblem( - NonlinearFunction(nlls!, resid_prototype = zeros(3)), u0) +prob = NLS.NonlinearLeastSquaresProblem( + NLS.NonlinearFunction(nlls!, resid_prototype = zeros(3)), u0) -solve(prob) +NLS.solve(prob) ``` Same as before, we can change the solver and tolerances: ```@example 1 -solve(prob, GaussNewton(), reltol = 1e-12, abstol = 1e-12) +NLS.solve(prob, NLS.GaussNewton(), reltol = 1e-12, abstol = 1e-12) ``` ## Going Beyond the Basics: How to use the Documentation diff --git a/docs/src/tutorials/iterator_interface.md b/docs/src/tutorials/iterator_interface.md index 26873fe35..44fb9a234 100644 --- a/docs/src/tutorials/iterator_interface.md +++ b/docs/src/tutorials/iterator_interface.md @@ -4,13 +4,13 @@ There is an iterator form of the nonlinear solver which somewhat mirrors the Dif integrator interface: ```@example iterator_interface -using NonlinearSolve +import NonlinearSolve as NLS f(u, p) = u .* u .- 2.0 u0 = 1.5 -probB = NonlinearProblem(f, u0) +probB = NLS.NonlinearProblem(f, u0) -nlcache = init(probB, NewtonRaphson()) +nlcache = NLS.init(probB, NLS.NewtonRaphson()) ``` `init` takes the same keyword arguments as [`solve`](@ref solver_options), but it returns a @@ -27,7 +27,7 @@ We can perform 10 steps of the Newton-Raphson solver with the following: ```@example iterator_interface for i in 1:10 - step!(nlcache) + NLS.step!(nlcache) end ``` diff --git a/docs/src/tutorials/large_systems.md b/docs/src/tutorials/large_systems.md index 434acb185..294cc32b0 100644 --- a/docs/src/tutorials/large_systems.md +++ b/docs/src/tutorials/large_systems.md @@ -60,7 +60,11 @@ broadcast). Use `dx=dy=1/32`. The resulting `NonlinearProblem` definition is: ```@example ill_conditioned_nlprob -using NonlinearSolve, LinearAlgebra, SparseArrays, LinearSolve +import NonlinearSolve as NLS +import LinearAlgebra +import SparseArrays +import LinearSolve as LS +import ADTypes const N = 32 const xyd_brusselator = range(0, stop = 1, length = N) @@ -99,7 +103,7 @@ function init_brusselator_2d(xyd) end u0 = init_brusselator_2d(xyd_brusselator) -prob_brusselator_2d = NonlinearProblem( +prob_brusselator_2d = NLS.NonlinearProblem( brusselator_2d_loop, u0, p; abstol = 1e-10, reltol = 1e-10 ) ``` @@ -130,28 +134,28 @@ In the next section, we will show how to specify `sparsity` to trigger automatic detection. ```@example ill_conditioned_nlprob -using BenchmarkTools # for @btime +import BenchmarkTools # for @btime -@btime solve(prob_brusselator_2d, NewtonRaphson()); +BenchmarkTools.BenchmarkTools.@btime NLS.solve(prob_brusselator_2d, NLS.NewtonRaphson()); nothing # hide ``` ```@example ill_conditioned_nlprob -using SparseConnectivityTracer +import SparseConnectivityTracer -prob_brusselator_2d_autosparse = NonlinearProblem( - NonlinearFunction(brusselator_2d_loop; sparsity = TracerSparsityDetector()), +prob_brusselator_2d_autosparse = NLS.NonlinearProblem( + NLS.NonlinearFunction(brusselator_2d_loop; sparsity = SparseConnectivityTracer.TracerSparsityDetector()), u0, p; abstol = 1e-10, reltol = 1e-10 ) -@btime solve(prob_brusselator_2d_autosparse, - NewtonRaphson(; autodiff = AutoForwardDiff(; chunksize = 12))); -@btime solve(prob_brusselator_2d_autosparse, - NewtonRaphson(; autodiff = AutoForwardDiff(; chunksize = 12), - linsolve = KLUFactorization())); -@btime solve(prob_brusselator_2d_autosparse, - NewtonRaphson(; autodiff = AutoForwardDiff(; chunksize = 12), - linsolve = KrylovJL_GMRES())); +BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_autosparse, + NLS.NewtonRaphson(; autodiff = ADTypes.AutoForwardDiff(; chunksize = 12))); +BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_autosparse, + NLS.NewtonRaphson(; autodiff = ADTypes.AutoForwardDiff(; chunksize = 12), + linsolve = LS.KLUFactorization())); +BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_autosparse, + NLS.NewtonRaphson(; autodiff = ADTypes.AutoForwardDiff(; chunksize = 12), + linsolve = LS.KrylovJL_GMRES())); nothing # hide ``` @@ -176,38 +180,39 @@ arguments, and it will kick out a sparse matrix with our pattern, that we can tu actual implementation of sparsity detection. ```@example ill_conditioned_nlprob -using SparseConnectivityTracer, ADTypes +import SparseConnectivityTracer +import ADTypes f! = (du, u) -> brusselator_2d_loop(du, u, p) du0 = similar(u0) -jac_sparsity = ADTypes.jacobian_sparsity(f!, du0, u0, TracerSparsityDetector()) +jac_sparsity = ADTypes.jacobian_sparsity(f!, du0, u0, SparseConnectivityTracer.TracerSparsityDetector()) ``` Notice that Julia gives a nice print out of the sparsity pattern. That's neat, and would be tedious to build by hand! Now we just pass it to the `NonlinearFunction` like as before: ```@example ill_conditioned_nlprob -ff = NonlinearFunction(brusselator_2d_loop; jac_prototype = jac_sparsity) +ff = NLS.NonlinearFunction(brusselator_2d_loop; jac_prototype = jac_sparsity) ``` Build the `NonlinearProblem`: ```@example ill_conditioned_nlprob -prob_brusselator_2d_sparse = NonlinearProblem(ff, u0, p; abstol = 1e-10, reltol = 1e-10) +prob_brusselator_2d_sparse = NLS.NonlinearProblem(ff, u0, p; abstol = 1e-10, reltol = 1e-10) ``` Now let's see how the version with sparsity compares to the version without: ```@example ill_conditioned_nlprob -@btime solve(prob_brusselator_2d, NewtonRaphson()); -@btime solve(prob_brusselator_2d_sparse, NewtonRaphson()); -@btime solve(prob_brusselator_2d_sparse, NewtonRaphson(linsolve = KLUFactorization())); +BenchmarkTools.@btime NLS.solve(prob_brusselator_2d, NLS.NewtonRaphson()); +BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_sparse, NLS.NewtonRaphson()); +BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_sparse, NLS.NewtonRaphson(linsolve = LS.KLUFactorization())); nothing # hide ``` Note that depending on the properties of the sparsity pattern, one may want to try -alternative linear solvers such as `NewtonRaphson(linsolve = KLUFactorization())` -or `NewtonRaphson(linsolve = UMFPACKFactorization())` +alternative linear solvers such as `NLS.NewtonRaphson(linsolve = LS.KLUFactorization())` +or `NLS.NewtonRaphson(linsolve = LS.UMFPACKFactorization())` ## Using Jacobian-Free Newton-Krylov @@ -217,7 +222,7 @@ Krylov method. To swap the linear solver out, we use the `linsolve` command and GMRES linear solver. ```@example ill_conditioned_nlprob -@btime solve(prob_brusselator_2d, NewtonRaphson(linsolve = KrylovJL_GMRES())); +BenchmarkTools.@btime NLS.solve(prob_brusselator_2d, NLS.NewtonRaphson(linsolve = LS.KrylovJL_GMRES())); nothing # hide ``` @@ -244,12 +249,12 @@ used in the solution of the ODE. An example of this with using ```julia # FIXME: On 1.10+ this is broken. Skipping this for now. -using IncompleteLU +import IncompleteLU -incompletelu(W, p = nothing) = ilu(W, τ = 50.0), LinearAlgebra.I +incompletelu(W, p = nothing) = IncompleteLU.ilu(W, τ = 50.0), LinearAlgebra.I -@btime solve(prob_brusselator_2d_sparse, - NewtonRaphson(linsolve = KrylovJL_GMRES(precs = incompletelu), concrete_jac = true) +BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_sparse, + NLS.NewtonRaphson(linsolve = LS.KrylovJL_GMRES(precs = incompletelu), concrete_jac = true) ); nothing # hide ``` @@ -271,15 +276,15 @@ parameter. Another option is to use which is more automatic. The setup is very similar to before: ```@example ill_conditioned_nlprob -using AlgebraicMultigrid +import AlgebraicMultigrid function algebraicmultigrid(W, p = nothing) - return aspreconditioner(ruge_stuben(convert(AbstractMatrix, W))), LinearAlgebra.I + return AlgebraicMultigrid.aspreconditioner(AlgebraicMultigrid.ruge_stuben(convert(AbstractMatrix, W))), LinearAlgebra.I end -@btime solve(prob_brusselator_2d_sparse, - NewtonRaphson( - linsolve = KrylovJL_GMRES(; precs = algebraicmultigrid), concrete_jac = true +BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_sparse, + NLS.NewtonRaphson( + linsolve = LS.KrylovJL_GMRES(; precs = algebraicmultigrid), concrete_jac = true ) ); nothing # hide @@ -297,10 +302,10 @@ function algebraicmultigrid2(W, p = nothing) return Pl, LinearAlgebra.I end -@btime solve( +BenchmarkTools.@btime NLS.solve( prob_brusselator_2d_sparse, - NewtonRaphson( - linsolve = KrylovJL_GMRES(precs = algebraicmultigrid2), concrete_jac = true + NLS.NewtonRaphson( + linsolve = LS.KrylovJL_GMRES(precs = algebraicmultigrid2), concrete_jac = true ) ); nothing # hide @@ -313,18 +318,19 @@ for the exact sparsity detection case, we left out the time it takes to perform sparsity detection. Let's compare the two by setting the sparsity detection algorithms. ```@example ill_conditioned_nlprob -using DifferentiationInterface, SparseConnectivityTracer +import DifferentiationInterface +import SparseConnectivityTracer -prob_brusselator_2d_exact_tracer = NonlinearProblem( - NonlinearFunction(brusselator_2d_loop; sparsity = TracerSparsityDetector()), +prob_brusselator_2d_exact_tracer = NLS.NonlinearProblem( + NLS.NonlinearFunction(brusselator_2d_loop; sparsity = SparseConnectivityTracer.TracerSparsityDetector()), u0, p; abstol = 1e-10, reltol = 1e-10) -prob_brusselator_2d_approx_di = NonlinearProblem( - NonlinearFunction(brusselator_2d_loop; - sparsity = DenseSparsityDetector(AutoForwardDiff(); atol = 1e-4)), +prob_brusselator_2d_approx_di = NLS.NonlinearProblem( + NLS.NonlinearFunction(brusselator_2d_loop; + sparsity = DifferentiationInterface.DenseSparsityDetector(ADTypes.AutoForwardDiff(); atol = 1e-4)), u0, p; abstol = 1e-10, reltol = 1e-10) -@btime solve(prob_brusselator_2d_exact_tracer, NewtonRaphson()); -@btime solve(prob_brusselator_2d_approx_di, NewtonRaphson()); +BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_exact_tracer, NLS.NewtonRaphson()); +BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_approx_di, NLS.NewtonRaphson()); nothing # hide ``` diff --git a/docs/src/tutorials/modelingtoolkit.md b/docs/src/tutorials/modelingtoolkit.md index e2016dde0..fde4f1033 100644 --- a/docs/src/tutorials/modelingtoolkit.md +++ b/docs/src/tutorials/modelingtoolkit.md @@ -6,14 +6,15 @@ for the numerical solvers which can make it easy to symbolically modify and gene equations to be solved. The basic form of using ModelingToolkit looks as follows: ```julia -using ModelingToolkit, NonlinearSolve +import ModelingToolkit as MTK +import NonlinearSolve as NLS -@variables x y z -@parameters σ ρ β +MTK.@variables x y z +MTK.@parameters σ ρ β # Define a nonlinear system eqs = [0 ~ σ * (y - x), 0 ~ x * (ρ - z) - y, 0 ~ x * y - β * z] -@mtkbuild ns = NonlinearSystem(eqs, [x, y, z], [σ, ρ, β]) +MTK.@mtkbuild ns = MTK.NonlinearSystem(eqs, [x, y, z], [σ, ρ, β]) u0 = [x => 1.0, y => 0.0, z => 0.0] @@ -21,8 +22,8 @@ ps = [σ => 10.0 ρ => 26.0 β => 8 / 3] -prob = NonlinearProblem(ns, u0, ps) -sol = solve(prob, NewtonRaphson()) +prob = NLS.NonlinearProblem(ns, u0, ps) +sol = NLS.solve(prob, NLS.NewtonRaphson()) ``` ## Symbolic Derivations of Extra Functions @@ -31,21 +32,21 @@ As a symbolic system, ModelingToolkit can be used to represent the equations and forms. For example, let's look at the equations: ```julia -equations(ns) +MTK.equations(ns) ``` We can ask it what the Jacobian of our system is via `calculate_jacobian`: ```julia -calculate_jacobian(ns) +MTK.calculate_jacobian(ns) ``` We can tell MTK to generate a computable form of this analytical Jacobian via `jac = true` to help the solver use efficient forms: ```julia -prob = NonlinearProblem(ns, u0, ps, jac = true) -sol = solve(prob, NewtonRaphson()) +prob = NLS.NonlinearProblem(ns, u0, ps, jac = true) +sol = NLS.solve(prob, NLS.NewtonRaphson()) ``` ## Symbolic Simplification of Nonlinear Systems via Tearing @@ -55,34 +56,34 @@ the systems. It's very easy to write down a mathematical model that, in theory, solved more simply. Let's take a look at a quick system: ```julia -@variables u1 u2 u3 u4 u5 +MTK.@variables u1 u2 u3 u4 u5 eqs = [0 ~ u1 - sin(u5), 0 ~ u2 - cos(u1), 0 ~ u3 - hypot(u1, u2), 0 ~ u4 - hypot(u2, u3), 0 ~ u5 - hypot(u4, u1)] -@named sys = NonlinearSystem(eqs, [u1, u2, u3, u4, u5], []) +MTK.@named sys = MTK.NonlinearSystem(eqs, [u1, u2, u3, u4, u5], []) ``` If we run structural simplification, we receive the following form: ```julia -sys = structural_simplify(sys) +sys = MTK.structural_simplify(sys) ``` ```julia -equations(sys) +MTK.equations(sys) ``` How did it do this? Let's look at the `observed` to see the relationships that it found: ```julia -observed(sys) +MTK.observed(sys) ``` Using ModelingToolkit, we can build and solve the simplified system: ```julia u0 = [u5 .=> 1.0] -prob = NonlinearProblem(sys, u0) -sol = solve(prob, NewtonRaphson()) +prob = NLS.NonlinearProblem(sys, u0) +sol = NLS.solve(prob, NLS.NewtonRaphson()) ``` We can then use symbolic indexing to retrieve any variable: diff --git a/docs/src/tutorials/nonlinear_solve_gpus.md b/docs/src/tutorials/nonlinear_solve_gpus.md index a2f0871ae..f48566d60 100644 --- a/docs/src/tutorials/nonlinear_solve_gpus.md +++ b/docs/src/tutorials/nonlinear_solve_gpus.md @@ -52,13 +52,14 @@ for example In practice when this comes together, it looks like: ```julia -using NonlinearSolve, CUDA +import NonlinearSolve as NLS +import CUDA f(u, p) = u .* u .- p -u0 = cu(ones(1000)) -p = cu(collect(1:1000)) -prob = NonlinearProblem(f, u0, p) -sol = solve(prob, NewtonRaphson(), abstol=1f-4) +u0 = CUDA.cu(ones(1000)) +p = CUDA.cu(collect(1:1000)) +prob = NLS.NonlinearProblem(f, u0, p) +sol = NLS.solve(prob, NLS.NewtonRaphson(), abstol=1f-4) ``` Notice a few things here. One, nothing is different except the input array types. But @@ -85,17 +86,19 @@ it. This function must be defined using the `KernelAbstractions.@kernel` macro. like: ```julia -using NonlinearSolve, StaticArrays -using KernelAbstractions # For writing vendor-agnostic kernels -using CUDA # For if you have an NVIDIA GPU -using AMDGPU # For if you have an AMD GPU -using Metal # For if you have a Mac M-series device and want to use the built-in GPU -using OneAPI # For if you have an Intel GPU - -@kernel function parallel_nonlinearsolve_kernel!(result, @Const(prob), @Const(alg)) +import NonlinearSolve as NLS +import StaticArrays +import SciMLBase +import KernelAbstractions # For writing vendor-agnostic kernels +import CUDA # For if you have an NVIDIA GPU +import AMDGPU # For if you have an AMD GPU +import Metal # For if you have a Mac M-series device and want to use the built-in GPU +import OneAPI # For if you have an Intel GPU + +@KernelAbstractions.kernel function parallel_nonlinearsolve_kernel!(result, @Const(prob), @Const(alg)) i = @index(Global) - prob_i = remake(prob; p = prob.p[i]) - sol = solve(prob_i, alg) + prob_i = SciMLBase.remake(prob; p = prob.p[i]) + sol = NLS.solve(prob_i, alg) @inbounds result[i] = sol.u end ``` @@ -137,11 +140,11 @@ Now let's build a nonlinear system to test it on. out2 = sqrt(p[2]) * (x[3] - x[4]) out3 = (x[2] - p[3] * x[3])^2 out4 = sqrt(p[4]) * (x[1] - x[4]) * (x[1] - x[4]) - SA[out1,out2,out3,out4] + StaticArrays.SA[out1,out2,out3,out4] end -p = @SVector [@SVector(rand(Float32, 4)) for _ in 1:1024] -u0 = SA[1f0, 2f0, 3f0, 4f0] +p = StaticArrays.@SVector [StaticArrays.@SVector(rand(Float32, 4)) for _ in 1:1024] +u0 = StaticArrays.SA[1f0, 2f0, 3f0, 4f0] prob = SciMLBase.ImmutableNonlinearProblem{false}(p2_f, u0, p) ``` @@ -161,15 +164,15 @@ and we then simply call our vectorized kernel to parallelize it: ```julia # Threaded CPU -vectorized_solve(prob, SimpleNewtonRaphson(); backend = CPU()) +vectorized_solve(prob, NLS.SimpleNewtonRaphson(); backend = KernelAbstractions.CPU()) # AMD ROCM GPU -vectorized_solve(prob, SimpleNewtonRaphson(); backend = ROCBackend()) +vectorized_solve(prob, NLS.SimpleNewtonRaphson(); backend = AMDGPU.ROCBackend()) # NVIDIA CUDA GPU -vectorized_solve(prob, SimpleNewtonRaphson(); backend = CUDABackend()) +vectorized_solve(prob, NLS.SimpleNewtonRaphson(); backend = CUDA.CUDABackend()) # Intel GPU -vectorized_solve(prob, SimpleNewtonRaphson(); backend = oneAPI.oneAPIBackend()) +vectorized_solve(prob, NLS.SimpleNewtonRaphson(); backend = OneAPI.oneAPIBackend()) # Mac M-Series, such as M3Max -vectorized_solve(prob, SimpleNewtonRaphson(); backend = Metal.MetalBackend()) +vectorized_solve(prob, NLS.SimpleNewtonRaphson(); backend = Metal.MetalBackend()) ``` !!! warn diff --git a/docs/src/tutorials/optimizing_parameterized_ode.md b/docs/src/tutorials/optimizing_parameterized_ode.md index 69cca060d..49d6c5739 100644 --- a/docs/src/tutorials/optimizing_parameterized_ode.md +++ b/docs/src/tutorials/optimizing_parameterized_ode.md @@ -4,7 +4,9 @@ Let us fit a parameterized ODE to some data. We will use the Lotka-Volterra mode example. We will use Single Shooting to fit the parameters. ```@example parameterized_ode -using OrdinaryDiffEqTsit5, NonlinearSolve, Plots +import OrdinaryDiffEqTsit5 as ODE +import NonlinearSolve as NLS +import Plots ``` Let us simulate some real data from the Lotka-Volterra model. @@ -28,32 +30,31 @@ tsteps = 0.0:0.1:10.0 p = [1.5, 1.0, 3.0, 1.0] # Setup the ODE problem, then solve -prob = ODEProblem(lotka_volterra!, u0, tspan, p) -sol = solve(prob, Tsit5(); saveat = tsteps) +prob = ODE.ODEProblem(lotka_volterra!, u0, tspan, p) +sol = ODE.solve(prob, ODE.Tsit5(); saveat = tsteps) # Plot the solution -using Plots -plot(sol; linewidth = 3) +Plots.plot(sol; linewidth = 3) ``` Let us now formulate the parameter estimation as a Nonlinear Least Squares Problem. ```@example parameterized_ode function loss_function(ode_param, data) - sol = solve(prob, Tsit5(); p = ode_param, saveat = tsteps) + sol = ODE.solve(prob, ODE.Tsit5(); p = ode_param, saveat = tsteps) return vec(reduce(hcat, sol.u)) .- data end p_init = zeros(4) -nlls_prob = NonlinearLeastSquaresProblem(loss_function, p_init, vec(reduce(hcat, sol.u))) +nlls_prob = NLS.NonlinearLeastSquaresProblem(loss_function, p_init, vec(reduce(hcat, sol.u))) ``` Now, we can use any NLLS solver to solve this problem. ```@example parameterized_ode -res = solve(nlls_prob, LevenbergMarquardt(); maxiters = 1000, show_trace = Val(true), - trace_level = TraceWithJacobianConditionNumber(25)) +res = NLS.solve(nlls_prob, NLS.LevenbergMarquardt(); maxiters = 1000, show_trace = Val(true), + trace_level = NLS.TraceWithJacobianConditionNumber(25)) nothing # hide ``` @@ -64,8 +65,8 @@ res We can also use Trust Region methods. ```@example parameterized_ode -res = solve(nlls_prob, TrustRegion(); maxiters = 1000, show_trace = Val(true), - trace_level = TraceWithJacobianConditionNumber(25)) +res = NLS.solve(nlls_prob, NLS.TrustRegion(); maxiters = 1000, show_trace = Val(true), + trace_level = NLS.TraceWithJacobianConditionNumber(25)) nothing # hide ``` @@ -76,9 +77,9 @@ res Let's plot the solution. ```@example parameterized_ode -prob2 = remake(prob; tspan = (0.0, 10.0)) -sol_fit = solve(prob2, Tsit5(); p = res.u) -sol_true = solve(prob2, Tsit5(); p = p) -plot(sol_true; linewidth = 3) -plot!(sol_fit; linewidth = 3, linestyle = :dash) +prob2 = ODE.remake(prob; tspan = (0.0, 10.0)) +sol_fit = ODE.solve(prob2, ODE.Tsit5(); p = res.u) +sol_true = ODE.solve(prob2, ODE.Tsit5(); p = p) +Plots.plot(sol_true; linewidth = 3) +Plots.plot!(sol_fit; linewidth = 3, linestyle = :dash) ``` diff --git a/docs/src/tutorials/small_compile.md b/docs/src/tutorials/small_compile.md index ec28361f4..70999caab 100644 --- a/docs/src/tutorials/small_compile.md +++ b/docs/src/tutorials/small_compile.md @@ -8,13 +8,13 @@ NonlinearSolve.jl can be done with SimpleNonlinearSolve.jl. Thus for example, we the core tutorial problem with just SimpleNonlinearSolve.jl as follows: ```@example simple -using SimpleNonlinearSolve +import SimpleNonlinearSolve as SNLS f(u, p) = u .* u .- p u0 = [1.0, 1.0] p = 2.0 -prob = NonlinearProblem(f, u0, p) -sol = solve(prob, SimpleNewtonRaphson()) +prob = SNLS.NonlinearProblem(f, u0, p) +sol = SNLS.solve(prob, SNLS.SimpleNewtonRaphson()) ``` However, there are a few downsides to SimpleNonlinearSolve's `SimpleX` style algorithms to @@ -37,12 +37,12 @@ As such, you can use the code as shown above to have very low startup with good for more scaling and debuggability we recommend the full NonlinearSolve.jl. But that said, ```@example simple -using StaticArrays +import StaticArrays -u0 = SA[1.0, 1.0] +u0 = StaticArrays.SA[1.0, 1.0] p = 2.0 -prob = NonlinearProblem(f, u0, p) -sol = solve(prob, SimpleNewtonRaphson()) +prob = SNLS.NonlinearProblem(f, u0, p) +sol = SNLS.solve(prob, SNLS.SimpleNewtonRaphson()) ``` using StaticArrays.jl is also the fastest form for small equations, so if you know your diff --git a/docs/src/tutorials/snes_ex2.md b/docs/src/tutorials/snes_ex2.md index 050a39611..62ac81e9d 100644 --- a/docs/src/tutorials/snes_ex2.md +++ b/docs/src/tutorials/snes_ex2.md @@ -8,7 +8,11 @@ This solves the equations sequentially. Newton method to solve `u'' + u^{2} = f`, sequentially. ```@example snes_ex2 -using NonlinearSolve, PETSc, LinearAlgebra, SparseConnectivityTracer, BenchmarkTools +import NonlinearSolve as NLS +import PETSc +import LinearAlgebra +import SparseConnectivityTracer +import BenchmarkTools u0 = fill(0.5, 128) @@ -33,25 +37,25 @@ To use automatic sparsity detection, we need to specify `sparsity` keyword argum details. ```@example snes_ex2 -nlfunc_dense = NonlinearFunction(form_residual!) -nlfunc_sparse = NonlinearFunction(form_residual!; sparsity = TracerSparsityDetector()) +nlfunc_dense = NLS.NonlinearFunction(form_residual!) +nlfunc_sparse = NLS.NonlinearFunction(form_residual!; sparsity = SparseConnectivityTracer.TracerSparsityDetector()) -nlprob_dense = NonlinearProblem(nlfunc_dense, u0) -nlprob_sparse = NonlinearProblem(nlfunc_sparse, u0) +nlprob_dense = NLS.NonlinearProblem(nlfunc_dense, u0) +nlprob_sparse = NLS.NonlinearProblem(nlfunc_sparse, u0) ``` Now we can solve the problem using `PETScSNES` or with one of the native `NonlinearSolve.jl` solvers. ```@example snes_ex2 -sol_dense_nr = solve(nlprob_dense, NewtonRaphson(); abstol = 1e-8) -sol_dense_snes = solve(nlprob_dense, PETScSNES(); abstol = 1e-8) +sol_dense_nr = NLS.solve(nlprob_dense, NLS.NewtonRaphson(); abstol = 1e-8) +sol_dense_snes = NLS.solve(nlprob_dense, NLS.PETScSNES(); abstol = 1e-8) sol_dense_nr .- sol_dense_snes ``` ```@example snes_ex2 -sol_sparse_nr = solve(nlprob_sparse, NewtonRaphson(); abstol = 1e-8) -sol_sparse_snes = solve(nlprob_sparse, PETScSNES(); abstol = 1e-8) +sol_sparse_nr = NLS.solve(nlprob_sparse, NLS.NewtonRaphson(); abstol = 1e-8) +sol_sparse_snes = NLS.solve(nlprob_sparse, NLS.PETScSNES(); abstol = 1e-8) sol_sparse_nr .- sol_sparse_snes ``` @@ -63,19 +67,19 @@ runtimes. ### Dense Jacobian ```@example snes_ex2 -@benchmark solve($(nlprob_dense), $(NewtonRaphson()); abstol = 1e-8) +BenchmarkTools.@benchmark NLS.solve($(nlprob_dense), $(NLS.NewtonRaphson()); abstol = 1e-8) ``` ```@example snes_ex2 -@benchmark solve($(nlprob_dense), $(PETScSNES()); abstol = 1e-8) +BenchmarkTools.@benchmark NLS.solve($(nlprob_dense), $(NLS.PETScSNES()); abstol = 1e-8) ``` ### Sparse Jacobian ```@example snes_ex2 -@benchmark solve($(nlprob_sparse), $(NewtonRaphson()); abstol = 1e-8) +BenchmarkTools.@benchmark NLS.solve($(nlprob_sparse), $(NLS.NewtonRaphson()); abstol = 1e-8) ``` ```@example snes_ex2 -@benchmark solve($(nlprob_sparse), $(PETScSNES()); abstol = 1e-8) +BenchmarkTools.@benchmark NLS.solve($(nlprob_sparse), $(NLS.PETScSNES()); abstol = 1e-8) ```