diff --git a/docs/make.jl b/docs/make.jl index ce2876953..cce131634 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -51,7 +51,7 @@ makedocs(; "https://www.sciencedirect.com/science/article/abs/pii/S0045782523007156", "https://github.com/devernay/cminpack/blob/d1f5f5a273862ca1bbcf58394e4ac060d9e22c76/hybrd.c", "https://github.com/devernay/cminpack/blob/d1f5f5a273862ca1bbcf58394e4ac060d9e22c76/hybrj.c", - "https://github.com/devernay/cminpack/blob/d1f5f5a273862ca1bbcf58394e4ac060d9e22c76/lmder.c", + "https://github.com/devernay/cminpack/blob/d1f5f5a273862ca1bbcf58394e4ac060d9e22c76/lmder.c" ], checkdocs = :exports, warnonly = [:missing_docs], diff --git a/docs/src/api/scipy.md b/docs/src/api/scipy.md index 271b5591c..b2e8ee611 100644 --- a/docs/src/api/scipy.md +++ b/docs/src/api/scipy.md @@ -19,4 +19,4 @@ Note that using this package requires Python and SciPy to be available via Pytho NonlinearSolveSciPy.SciPyLeastSquares NonlinearSolveSciPy.SciPyRoot NonlinearSolveSciPy.SciPyRootScalar -``` \ No newline at end of file +``` diff --git a/docs/src/tutorials/large_systems.md b/docs/src/tutorials/large_systems.md index 294cc32b0..af9f3e2b6 100644 --- a/docs/src/tutorials/large_systems.md +++ b/docs/src/tutorials/large_systems.md @@ -78,7 +78,8 @@ function brusselator_2d_loop(du, u, p) @inbounds for I in CartesianIndices((N, N)) i, j = Tuple(I) x, y = xyd_brusselator[I[1]], xyd_brusselator[I[2]] - ip1, im1, jp1, jm1 = limit(i + 1, N), limit(i - 1, N), limit(j + 1, N), + ip1, im1, jp1, + jm1 = limit(i + 1, N), limit(i - 1, N), limit(j + 1, N), limit(j - 1, N) du[i, j, 1] = alpha * (u[im1, j, 1] + u[ip1, j, 1] + u[i, jp1, 1] + u[i, jm1, 1] - 4u[i, j, 1]) + @@ -279,7 +280,8 @@ which is more automatic. The setup is very similar to before: import AlgebraicMultigrid function algebraicmultigrid(W, p = nothing) - return AlgebraicMultigrid.aspreconditioner(AlgebraicMultigrid.ruge_stuben(convert(AbstractMatrix, W))), LinearAlgebra.I + return AlgebraicMultigrid.aspreconditioner(AlgebraicMultigrid.ruge_stuben(convert(AbstractMatrix, W))), + LinearAlgebra.I end BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_sparse, diff --git a/docs/src/tutorials/nonlinear_solve_gpus.md b/docs/src/tutorials/nonlinear_solve_gpus.md index f48566d60..4bc9c1bd5 100644 --- a/docs/src/tutorials/nonlinear_solve_gpus.md +++ b/docs/src/tutorials/nonlinear_solve_gpus.md @@ -31,7 +31,7 @@ using the first form. In this tutorial we will highlight both use cases in separate parts. !!! note - + If you're looking for GPU-accelerated neural networks inside of nonlinear solvers, check out [DeepEquilibriumNetworks.jl](https://docs.sciml.ai/DeepEquilibriumNetworks/stable/). @@ -59,7 +59,7 @@ f(u, p) = u .* u .- p u0 = CUDA.cu(ones(1000)) p = CUDA.cu(collect(1:1000)) prob = NLS.NonlinearProblem(f, u0, p) -sol = NLS.solve(prob, NLS.NewtonRaphson(), abstol=1f-4) +sol = NLS.solve(prob, NLS.NewtonRaphson(), abstol = 1.0f-4) ``` Notice a few things here. One, nothing is different except the input array types. But @@ -95,7 +95,7 @@ import AMDGPU # For if you have an AMD GPU import Metal # For if you have a Mac M-series device and want to use the built-in GPU import OneAPI # For if you have an Intel GPU -@KernelAbstractions.kernel function parallel_nonlinearsolve_kernel!(result, @Const(prob), @Const(alg)) +KernelAbstractions.@kernel function parallel_nonlinearsolve_kernel!(result, @Const(prob), @Const(alg)) i = @index(Global) prob_i = SciMLBase.remake(prob; p = prob.p[i]) sol = NLS.solve(prob_i, alg) @@ -109,7 +109,7 @@ is saying, "for the ith call, get the i'th parameter set and solve with these pa The ith result is then this solution". !!! note - + Because kernel code needs to be able to be compiled to a GPU kernel, it has very strict specifications of what's allowed because GPU cores are not as flexible as CPU cores. In general, this means that you need to avoid any runtime operations in kernel code, @@ -140,16 +140,16 @@ Now let's build a nonlinear system to test it on. out2 = sqrt(p[2]) * (x[3] - x[4]) out3 = (x[2] - p[3] * x[3])^2 out4 = sqrt(p[4]) * (x[1] - x[4]) * (x[1] - x[4]) - StaticArrays.SA[out1,out2,out3,out4] + StaticArrays.SA[out1, out2, out3, out4] end p = StaticArrays.@SVector [StaticArrays.@SVector(rand(Float32, 4)) for _ in 1:1024] -u0 = StaticArrays.SA[1f0, 2f0, 3f0, 4f0] +u0 = StaticArrays.SA[1.0f0, 2.0f0, 3.0f0, 4.0f0] prob = SciMLBase.ImmutableNonlinearProblem{false}(p2_f, u0, p) ``` !!! note - + Because the custom kernel is going to need to embed the the code for our nonlinear problem into the kernel, it also must be written to be GPU compatible. In general, this means that you need to avoid any runtime operations in kernel code, @@ -176,4 +176,5 @@ vectorized_solve(prob, NLS.SimpleNewtonRaphson(); backend = Metal.MetalBackend() ``` !!! warn + The GPU-based calls will only work on your machine if you have a compatible GPU! diff --git a/docs/src/tutorials/optimizing_parameterized_ode.md b/docs/src/tutorials/optimizing_parameterized_ode.md index 49d6c5739..3c34a7df2 100644 --- a/docs/src/tutorials/optimizing_parameterized_ode.md +++ b/docs/src/tutorials/optimizing_parameterized_ode.md @@ -53,7 +53,8 @@ nlls_prob = NLS.NonlinearLeastSquaresProblem(loss_function, p_init, vec(reduce(h Now, we can use any NLLS solver to solve this problem. ```@example parameterized_ode -res = NLS.solve(nlls_prob, NLS.LevenbergMarquardt(); maxiters = 1000, show_trace = Val(true), +res = NLS.solve( + nlls_prob, NLS.LevenbergMarquardt(); maxiters = 1000, show_trace = Val(true), trace_level = NLS.TraceWithJacobianConditionNumber(25)) nothing # hide ``` diff --git a/docs/src/tutorials/snes_ex2.md b/docs/src/tutorials/snes_ex2.md index 62ac81e9d..8720daa23 100644 --- a/docs/src/tutorials/snes_ex2.md +++ b/docs/src/tutorials/snes_ex2.md @@ -38,7 +38,8 @@ details. ```@example snes_ex2 nlfunc_dense = NLS.NonlinearFunction(form_residual!) -nlfunc_sparse = NLS.NonlinearFunction(form_residual!; sparsity = SparseConnectivityTracer.TracerSparsityDetector()) +nlfunc_sparse = NLS.NonlinearFunction( + form_residual!; sparsity = SparseConnectivityTracer.TracerSparsityDetector()) nlprob_dense = NLS.NonlinearProblem(nlfunc_dense, u0) nlprob_sparse = NLS.NonlinearProblem(nlfunc_sparse, u0) diff --git a/ext/NonlinearSolveFastLevenbergMarquardtExt.jl b/ext/NonlinearSolveFastLevenbergMarquardtExt.jl index a851c3a92..f1de0500b 100644 --- a/ext/NonlinearSolveFastLevenbergMarquardtExt.jl +++ b/ext/NonlinearSolveFastLevenbergMarquardtExt.jl @@ -21,7 +21,8 @@ function SciMLBase.__solve( termination_condition, alg ) - f_wrapped, u, resid = NonlinearSolveBase.construct_extension_function_wrapper( + f_wrapped, u, + resid = NonlinearSolveBase.construct_extension_function_wrapper( prob; alias_u0, can_handle_oop = Val(prob.u0 isa SArray) ) f = if prob.u0 isa SArray @@ -49,7 +50,8 @@ function SciMLBase.__solve( ) if prob.u0 isa SArray - res, fx, info, iter, nfev, njev = FastLM.lmsolve( + res, fx, info, iter, nfev, + njev = FastLM.lmsolve( f, jac_fn, prob.u0; solver_kwargs... ) LM, solver = nothing, nothing @@ -68,7 +70,8 @@ function SciMLBase.__solve( LM = FastLM.LMWorkspace(u, resid, J) - res, fx, info, iter, nfev, njev, LM, solver = FastLM.lmsolve!( + res, fx, info, iter, nfev, njev, + LM, solver = FastLM.lmsolve!( f, jac_fn, LM; solver, solver_kwargs... ) end diff --git a/ext/NonlinearSolveFixedPointAccelerationExt.jl b/ext/NonlinearSolveFixedPointAccelerationExt.jl index 6ff9f240c..bee8e8ee7 100644 --- a/ext/NonlinearSolveFixedPointAccelerationExt.jl +++ b/ext/NonlinearSolveFixedPointAccelerationExt.jl @@ -15,7 +15,8 @@ function SciMLBase.__solve( termination_condition, alg ) - f, u0, resid = NonlinearSolveBase.construct_extension_function_wrapper( + f, u0, + resid = NonlinearSolveBase.construct_extension_function_wrapper( prob; alias_u0, make_fixed_point = Val(true), force_oop = Val(true) ) diff --git a/ext/NonlinearSolveMINPACKExt.jl b/ext/NonlinearSolveMINPACKExt.jl index fc0f919a1..30748ce0c 100644 --- a/ext/NonlinearSolveMINPACKExt.jl +++ b/ext/NonlinearSolveMINPACKExt.jl @@ -17,7 +17,8 @@ function SciMLBase.__solve( termination_condition, alg ) - f_wrapped!, u0, resid = NonlinearSolveBase.construct_extension_function_wrapper( + f_wrapped!, u0, + resid = NonlinearSolveBase.construct_extension_function_wrapper( prob; alias_u0 ) resid_size = size(resid) diff --git a/ext/NonlinearSolveNLSolversExt.jl b/ext/NonlinearSolveNLSolversExt.jl index e4e2206af..b72ffbec3 100644 --- a/ext/NonlinearSolveNLSolversExt.jl +++ b/ext/NonlinearSolveNLSolversExt.jl @@ -33,7 +33,8 @@ function SciMLBase.__solve( prob.f, autodiff, prob.u0, Constant(prob.p) ) - fj_scalar = @closure (Jx, x) -> begin + fj_scalar = @closure (Jx, + x) -> begin return DifferentiationInterface.value_and_derivative( prob.f, prep, autodiff, x, Constant(prob.p) ) diff --git a/ext/NonlinearSolvePETScExt.jl b/ext/NonlinearSolvePETScExt.jl index 7b36cecce..0f8aa2bea 100644 --- a/ext/NonlinearSolvePETScExt.jl +++ b/ext/NonlinearSolvePETScExt.jl @@ -22,7 +22,8 @@ function SciMLBase.__solve( termination_condition, alg; abs_norm_supported = false ) - f_wrapped!, u0, resid = NonlinearSolveBase.construct_extension_function_wrapper( + f_wrapped!, u0, + resid = NonlinearSolveBase.construct_extension_function_wrapper( prob; alias_u0 ) T = eltype(u0) @@ -48,7 +49,9 @@ function SciMLBase.__solve( nf = Ref{Int}(0) - f! = @closure (cfx, cx, user_ctx) -> begin + f! = @closure (cfx, + cx, + user_ctx) -> begin nf[] += 1 fx = cfx isa Ptr{Nothing} ? PETSc.unsafe_localarray(T, cfx; read = false) : cfx x = cx isa Ptr{Nothing} ? PETSc.unsafe_localarray(T, cx; write = false) : cx @@ -76,7 +79,8 @@ function SciMLBase.__solve( ) J_init = zeros(T, 1, 1) else - jac!, J_init = NonlinearSolveBase.construct_extension_jac( + jac!, + J_init = NonlinearSolveBase.construct_extension_jac( prob, alg, u0, resid; autodiff, initial_jacobian = Val(true) ) end @@ -85,7 +89,10 @@ function SciMLBase.__solve( if J_init isa AbstractSparseMatrix PJ = PETSc.MatSeqAIJ(J_init) - jac_fn! = @closure (cx, J, _, user_ctx) -> begin + jac_fn! = @closure (cx, + J, + _, + user_ctx) -> begin njac[] += 1 x = cx isa Ptr{Nothing} ? PETSc.unsafe_localarray(T, cx; write = false) : cx if J isa PETSc.AbstractMat @@ -102,7 +109,10 @@ function SciMLBase.__solve( snes.user_ctx = (; jacobian = J_init) else PJ = PETSc.MatSeqDense(J_init) - jac_fn! = @closure (cx, J, _, user_ctx) -> begin + jac_fn! = @closure (cx, + J, + _, + user_ctx) -> begin njac[] += 1 x = cx isa Ptr{Nothing} ? PETSc.unsafe_localarray(T, cx; write = false) : cx jac!(J, x) diff --git a/ext/NonlinearSolveSIAMFANLEquationsExt.jl b/ext/NonlinearSolveSIAMFANLEquationsExt.jl index bfd2bd72d..eafe079f4 100644 --- a/ext/NonlinearSolveSIAMFANLEquationsExt.jl +++ b/ext/NonlinearSolveSIAMFANLEquationsExt.jl @@ -64,7 +64,8 @@ function SciMLBase.__solve( elseif method == :secant sol = secant(f, prob.u0; maxit = maxiters, atol, rtol, printerr) elseif method == :anderson - f_aa, u, _ = NonlinearSolveBase.construct_extension_function_wrapper( + f_aa, u, + _ = NonlinearSolveBase.construct_extension_function_wrapper( prob; alias_u0, make_fixed_point = Val(true) ) sol = aasol( @@ -73,7 +74,8 @@ function SciMLBase.__solve( ) end else - f, u, resid = NonlinearSolveBase.construct_extension_function_wrapper( + f, u, + resid = NonlinearSolveBase.construct_extension_function_wrapper( prob; alias_u0, make_fixed_point = Val(method == :anderson) ) N = length(u) diff --git a/ext/NonlinearSolveSpeedMappingExt.jl b/ext/NonlinearSolveSpeedMappingExt.jl index c0f39607b..e32dbb559 100644 --- a/ext/NonlinearSolveSpeedMappingExt.jl +++ b/ext/NonlinearSolveSpeedMappingExt.jl @@ -16,7 +16,8 @@ function SciMLBase.__solve( termination_condition, alg ) - m!, u, resid = NonlinearSolveBase.construct_extension_function_wrapper( + m!, u, + resid = NonlinearSolveBase.construct_extension_function_wrapper( prob; alias_u0, make_fixed_point = Val(true) ) tol = NonlinearSolveBase.get_tolerance(abstol, eltype(u)) diff --git a/lib/BracketingNonlinearSolve/ext/BracketingNonlinearSolveForwardDiffExt.jl b/lib/BracketingNonlinearSolve/ext/BracketingNonlinearSolveForwardDiffExt.jl index f531df777..1d3dd326c 100644 --- a/lib/BracketingNonlinearSolve/ext/BracketingNonlinearSolveForwardDiffExt.jl +++ b/lib/BracketingNonlinearSolve/ext/BracketingNonlinearSolveForwardDiffExt.jl @@ -7,7 +7,9 @@ using SciMLBase: SciMLBase, IntervalNonlinearProblem using BracketingNonlinearSolve: Bisection, Brent, Alefeld, Falsi, ITP, Ridder -const DualIntervalNonlinearProblem{T, V, P} = IntervalNonlinearProblem{ +const DualIntervalNonlinearProblem{T, + V, + P} = IntervalNonlinearProblem{ uType, iip, <:Union{<:Dual{T, V, P}, <:AbstractArray{<:Dual{T, V, P}}} } where {uType, iip} diff --git a/lib/BracketingNonlinearSolve/src/BracketingNonlinearSolve.jl b/lib/BracketingNonlinearSolve/src/BracketingNonlinearSolve.jl index 9337ac6fe..b7c16fac3 100644 --- a/lib/BracketingNonlinearSolve/src/BracketingNonlinearSolve.jl +++ b/lib/BracketingNonlinearSolve/src/BracketingNonlinearSolve.jl @@ -29,17 +29,17 @@ function CommonSolve.solve(prob::IntervalNonlinearProblem, nothing, args...; kwa return CommonSolve.solve(prob, ITP(), args...; kwargs...) end -function CommonSolve.solve(prob::IntervalNonlinearProblem, +function CommonSolve.solve(prob::IntervalNonlinearProblem, alg::AbstractBracketingAlgorithm, args...; sensealg = nothing, kwargs...) - return bracketingnonlinear_solve_up(prob::IntervalNonlinearProblem, sensealg, prob.p, alg, args...; kwargs...) + return bracketingnonlinear_solve_up( + prob::IntervalNonlinearProblem, sensealg, prob.p, alg, args...; kwargs...) end - -function bracketingnonlinear_solve_up(prob::IntervalNonlinearProblem, sensealg, p, alg, args...; kwargs...) +function bracketingnonlinear_solve_up( + prob::IntervalNonlinearProblem, sensealg, p, alg, args...; kwargs...) return SciMLBase.__solve(prob, alg, args...; kwargs...) end - @setup_workload begin for T in (Float32, Float64) prob_brack = IntervalNonlinearProblem{false}( diff --git a/lib/BracketingNonlinearSolve/src/bisection.jl b/lib/BracketingNonlinearSolve/src/bisection.jl index 5f056abbc..5af8f275b 100644 --- a/lib/BracketingNonlinearSolve/src/bisection.jl +++ b/lib/BracketingNonlinearSolve/src/bisection.jl @@ -86,7 +86,8 @@ function SciMLBase.__solve( i += 1 end - sol, i, left, right, fl, fr = Impl.bisection( + sol, i, left, right, + fl, fr = Impl.bisection( left, right, fl, fr, f, abstol, maxiters - i, prob, alg ) diff --git a/lib/BracketingNonlinearSolve/src/brent.jl b/lib/BracketingNonlinearSolve/src/brent.jl index 6199bf29a..a1dbb9641 100644 --- a/lib/BracketingNonlinearSolve/src/brent.jl +++ b/lib/BracketingNonlinearSolve/src/brent.jl @@ -118,7 +118,8 @@ function SciMLBase.__solve( i += 1 end - sol, i, left, right, fl, fr = Impl.bisection( + sol, i, left, right, + fl, fr = Impl.bisection( left, right, fl, fr, f, abstol, maxiters - i, prob, alg ) diff --git a/lib/BracketingNonlinearSolve/src/falsi.jl b/lib/BracketingNonlinearSolve/src/falsi.jl index a2bdbde1f..557fc18fe 100644 --- a/lib/BracketingNonlinearSolve/src/falsi.jl +++ b/lib/BracketingNonlinearSolve/src/falsi.jl @@ -76,7 +76,8 @@ function SciMLBase.__solve( i += 1 end - sol, i, left, right, fl, fr = Impl.bisection( + sol, i, left, right, + fl, fr = Impl.bisection( left, right, fl, fr, f, abstol, maxiters - i, prob, alg ) diff --git a/lib/BracketingNonlinearSolve/src/ridder.jl b/lib/BracketingNonlinearSolve/src/ridder.jl index 9e38d25b6..a647098ad 100644 --- a/lib/BracketingNonlinearSolve/src/ridder.jl +++ b/lib/BracketingNonlinearSolve/src/ridder.jl @@ -90,7 +90,8 @@ function SciMLBase.__solve( i += 1 end - sol, i, left, right, fl, fr = Impl.bisection( + sol, i, left, right, + fl, fr = Impl.bisection( left, right, fl, fr, f, abstol, maxiters - i, prob, alg ) diff --git a/lib/BracketingNonlinearSolve/test/rootfind_tests.jl b/lib/BracketingNonlinearSolve/test/rootfind_tests.jl index e32c37df0..8ccd3516e 100644 --- a/lib/BracketingNonlinearSolve/test/rootfind_tests.jl +++ b/lib/BracketingNonlinearSolve/test/rootfind_tests.jl @@ -18,7 +18,7 @@ end @testset for p in 1.1:0.1:100.0 @test g(p)≈sqrt(p) atol=1e-3 rtol=1e-3 - @test ForwardDiff.derivative(g, p)≈1 / (2 * sqrt(p)) atol=1e-3 rtol=1e-3 + @test ForwardDiff.derivative(g, p)≈1/(2*sqrt(p)) atol=1e-3 rtol=1e-3 end t = (p) -> [sqrt(p[2] / p[1])] @@ -30,7 +30,7 @@ end return [sol.u] end - @test g2(p)≈[sqrt(p[2] / p[1])] atol=1e-3 rtol=1e-3 + @test g2(p)≈[sqrt(p[2]/p[1])] atol=1e-3 rtol=1e-3 @test ForwardDiff.jacobian(g2, p)≈ForwardDiff.jacobian(t, p) atol=1e-3 rtol=1e-3 probB = IntervalNonlinearProblem{false}(quadratic_f, (1.0, 2.0), 2.0) @@ -50,8 +50,8 @@ end end @testitem "Tolerance Tests Interval Methods" setup=[RootfindingTestSnippet] tags=[:core] begin - prob = IntervalNonlinearProblem(quadratic_f, (1.0, 20.0), 2.0) - ϵ = eps(Float64) # least possible tol for all methods + prob=IntervalNonlinearProblem(quadratic_f, (1.0, 20.0), 2.0) + ϵ=eps(Float64) # least possible tol for all methods @testset for alg in (Bisection(), Falsi(), ITP(), Muller(), nothing) @testset for abstol in [0.1, 0.01, 0.001, 0.0001, 1e-5, 1e-6] diff --git a/lib/NonlinearSolveBase/ext/NonlinearSolveBaseForwardDiffExt.jl b/lib/NonlinearSolveBase/ext/NonlinearSolveBaseForwardDiffExt.jl index 717daa8e4..7cfc792e8 100644 --- a/lib/NonlinearSolveBase/ext/NonlinearSolveBaseForwardDiffExt.jl +++ b/lib/NonlinearSolveBase/ext/NonlinearSolveBaseForwardDiffExt.jl @@ -124,7 +124,8 @@ for algType in GENERAL_SOLVER_TYPES @eval function SciMLBase.__solve( prob::DualAbstractNonlinearProblem, alg::$(algType), args...; kwargs... ) - sol, partials = NonlinearSolveBase.nonlinearsolve_forwarddiff_solve( + sol, + partials = NonlinearSolveBase.nonlinearsolve_forwarddiff_solve( prob, alg, args...; kwargs... ) dual_soln = NonlinearSolveBase.nonlinearsolve_dual_solution(sol.u, partials, prob.p) diff --git a/lib/NonlinearSolveBase/src/NonlinearSolveBase.jl b/lib/NonlinearSolveBase/src/NonlinearSolveBase.jl index 7d1260260..e3efc9ef5 100644 --- a/lib/NonlinearSolveBase/src/NonlinearSolveBase.jl +++ b/lib/NonlinearSolveBase/src/NonlinearSolveBase.jl @@ -17,7 +17,7 @@ using EnzymeCore: EnzymeCore using MaybeInplace: @bb using RecursiveArrayTools: AbstractVectorOfArray, ArrayPartition using SciMLBase: SciMLBase, ReturnCode, AbstractODEIntegrator, AbstractNonlinearProblem, - AbstractNonlinearAlgorithm, + AbstractNonlinearAlgorithm, NonlinearProblem, NonlinearLeastSquaresProblem, NonlinearFunction, NLStats, LinearProblem, LinearAliasSpecifier, ImmutableNonlinearProblem @@ -71,7 +71,7 @@ include("forward_diff.jl") @compat(public, (construct_jacobian_cache,)) @compat(public, (assert_extension_supported_termination_condition, - construct_extension_function_wrapper, construct_extension_jac)) + construct_extension_function_wrapper, construct_extension_jac)) export TraceMinimal, TraceWithJacobianConditionNumber, TraceAll diff --git a/lib/NonlinearSolveBase/src/autodiff.jl b/lib/NonlinearSolveBase/src/autodiff.jl index 509512a05..b09992c1b 100644 --- a/lib/NonlinearSolveBase/src/autodiff.jl +++ b/lib/NonlinearSolveBase/src/autodiff.jl @@ -118,7 +118,8 @@ function nlls_generate_vjp_function(prob::NonlinearLeastSquaresProblem, sol, uu) # nested autodiff as the last resort if SciMLBase.has_vjp(prob.f) if SciMLBase.isinplace(prob) - return @closure (du, u, p) -> begin + return @closure ( + du, u, p) -> begin resid = Utils.safe_similar(du, length(sol.resid)) prob.f(resid, u, p) prob.f.vjp(du, resid, u, p) @@ -126,14 +127,16 @@ function nlls_generate_vjp_function(prob::NonlinearLeastSquaresProblem, sol, uu) return nothing end else - return @closure (u, p) -> begin + return @closure ( + u, p) -> begin resid = prob.f(u, p) return reshape(2 .* prob.f.vjp(resid, u, p), size(u)) end end elseif SciMLBase.has_jac(prob.f) if SciMLBase.isinplace(prob) - return @closure (du, u, p) -> begin + return @closure ( + du, u, p) -> begin J = Utils.safe_similar(du, length(sol.resid), length(u)) prob.f.jac(J, u, p) resid = Utils.safe_similar(du, length(sol.resid)) @@ -142,7 +145,8 @@ function nlls_generate_vjp_function(prob::NonlinearLeastSquaresProblem, sol, uu) return nothing end else - return @closure (u, p) -> begin + return @closure (u, + p) -> begin return reshape(2 .* vec(prob.f(u, p))' * prob.f.jac(u, p), size(u)) end end @@ -152,7 +156,8 @@ function nlls_generate_vjp_function(prob::NonlinearLeastSquaresProblem, sol, uu) select_reverse_mode_autodiff(prob, nothing) : AutoForwardDiff() if SciMLBase.isinplace(prob) - return @closure (du, u, p) -> begin + return @closure ( + du, u, p) -> begin resid = Utils.safe_similar(du, length(sol.resid)) prob.f(resid, u, p) # Using `Constant` lead to dual ordering issues @@ -163,7 +168,8 @@ function nlls_generate_vjp_function(prob::NonlinearLeastSquaresProblem, sol, uu) return nothing end else - return @closure (u, p) -> begin + return @closure (u, + p) -> begin v = prob.f(u, p) # Using `Constant` lead to dual ordering issues res = only(DI.pullback(Base.Fix2(prob.f, p), autodiff, u, (v,))) diff --git a/lib/NonlinearSolveBase/src/initialization.jl b/lib/NonlinearSolveBase/src/initialization.jl index e3612e2cb..a476eae9b 100644 --- a/lib/NonlinearSolveBase/src/initialization.jl +++ b/lib/NonlinearSolveBase/src/initialization.jl @@ -24,7 +24,8 @@ function _run_initialization!(cache, initalg::SciMLBase.OverrideInit, prob, if alg === nothing && cache isa AbstractNonlinearSolveCache alg = cache.alg end - u0, p, success = SciMLBase.get_initial_values( + u0, p, + success = SciMLBase.get_initial_values( prob, cache, prob.f, initalg, isinplace; nlsolve_alg = alg, abstol = get_abstol(cache), reltol = get_reltol(cache)) cache = update_initial_values!(cache, u0, p) diff --git a/lib/NonlinearSolveBase/src/polyalg.jl b/lib/NonlinearSolveBase/src/polyalg.jl index 3867906b8..1a6e1cfae 100644 --- a/lib/NonlinearSolveBase/src/polyalg.jl +++ b/lib/NonlinearSolveBase/src/polyalg.jl @@ -162,8 +162,8 @@ end if !NonlinearSolveBase.not_terminated($(cache_syms[i])) # If a NonlinearLeastSquaresProblem StalledSuccess, try the next # solver to see if you get a lower residual - if SciMLBase.successful_retcode($(cache_syms[i]).retcode) && - $(cache_syms[i]).retcode != ReturnCode.StalledSuccess + if SciMLBase.successful_retcode($(cache_syms[i]).retcode) && + $(cache_syms[i]).retcode != ReturnCode.StalledSuccess cache.best = $(i) cache.force_stop = true cache.retcode = $(cache_syms[i]).retcode diff --git a/lib/NonlinearSolveBase/src/public.jl b/lib/NonlinearSolveBase/src/public.jl index a9bae2a5e..506f37527 100644 --- a/lib/NonlinearSolveBase/src/public.jl +++ b/lib/NonlinearSolveBase/src/public.jl @@ -87,6 +87,7 @@ for name in (:Norm, :RelNorm, :AbsNorm) end for norm_type in (:RelNorm, :AbsNorm), safety in (:Safe, :SafeBest) + struct_name = Symbol(norm_type, safety, :TerminationMode) supertype_name = Symbol(:Abstract, safety, :NonlinearTerminationMode) diff --git a/lib/NonlinearSolveBase/src/solve.jl b/lib/NonlinearSolveBase/src/solve.jl index cd23afb4a..91b7a6aa6 100644 --- a/lib/NonlinearSolveBase/src/solve.jl +++ b/lib/NonlinearSolveBase/src/solve.jl @@ -174,7 +174,8 @@ end $(prob_syms[i]), alg.algs[$(i)], args...; stats, alias_u0, verbose, kwargs... ) - if SciMLBase.successful_retcode($(cur_sol)) && $(cur_sol).retcode !== ReturnCode.StalledSuccess + if SciMLBase.successful_retcode($(cur_sol)) && + $(cur_sol).retcode !== ReturnCode.StalledSuccess if alias_u0 copyto!(u0, $(cur_sol).u) $(u_result_syms[i]) = u0 diff --git a/lib/NonlinearSolveBase/src/termination_conditions.jl b/lib/NonlinearSolveBase/src/termination_conditions.jl index 2929808a0..de9ac1392 100644 --- a/lib/NonlinearSolveBase/src/termination_conditions.jl +++ b/lib/NonlinearSolveBase/src/termination_conditions.jl @@ -98,7 +98,7 @@ function SciMLBase.reinit!( length(saved_value_prototype) != 0 && (cache.saved_values = saved_value_prototype) mode = cache.mode - if u isa Number || !ArrayInterface.can_setindex(u) + if u isa Number || !ArrayInterface.can_setindex(u) cache.u = u else cache.u .= u @@ -151,7 +151,6 @@ end function (cache::NonlinearTerminationModeCache)( mode::AbstractSafeNonlinearTerminationMode, du, u, uprev, abstol, reltol, args... ) - if mode isa AbsNormSafeTerminationMode || mode isa AbsNormSafeBestTerminationMode objective = Utils.apply_norm(mode.internalnorm, du) criteria = abstol diff --git a/lib/NonlinearSolveFirstOrder/src/NonlinearSolveFirstOrder.jl b/lib/NonlinearSolveFirstOrder/src/NonlinearSolveFirstOrder.jl index eedc54d0d..57a1f0105 100644 --- a/lib/NonlinearSolveFirstOrder/src/NonlinearSolveFirstOrder.jl +++ b/lib/NonlinearSolveFirstOrder/src/NonlinearSolveFirstOrder.jl @@ -55,7 +55,8 @@ include("forward_diff.jl") nonlinear_functions = ( (NonlinearFunction{false, NoSpecialize}((u, p) -> (u .^ 2 .- p)[1:1]), [0.1, 0.0]), ( - NonlinearFunction{false, NoSpecialize}((u, p) -> vcat(u .* u .- p, u .* u .- p)), + NonlinearFunction{false, NoSpecialize}(( + u, p) -> vcat(u .* u .- p, u .* u .- p)), [0.1, 0.1] ), ( @@ -83,10 +84,12 @@ include("forward_diff.jl") @compile_workload begin @sync begin for prob in nonlinear_problems, alg in nlp_algs + Threads.@spawn CommonSolve.solve(prob, alg; abstol = 1e-2, verbose = false) end for prob in nlls_problems, alg in nlls_algs + Threads.@spawn CommonSolve.solve(prob, alg; abstol = 1e-2, verbose = false) end end diff --git a/lib/NonlinearSolveFirstOrder/src/forward_diff.jl b/lib/NonlinearSolveFirstOrder/src/forward_diff.jl index 86f4b072a..204d9009d 100644 --- a/lib/NonlinearSolveFirstOrder/src/forward_diff.jl +++ b/lib/NonlinearSolveFirstOrder/src/forward_diff.jl @@ -24,7 +24,8 @@ end function SciMLBase.__solve( prob::DualAbstractNonlinearProblem, alg::GeneralizedFirstOrderAlgorithm, args...; kwargs... ) - sol, partials = NonlinearSolveBase.nonlinearsolve_forwarddiff_solve( + sol, + partials = NonlinearSolveBase.nonlinearsolve_forwarddiff_solve( prob, alg, args...; kwargs... ) dual_soln = NonlinearSolveBase.nonlinearsolve_dual_solution(sol.u, partials, prob.p) diff --git a/lib/NonlinearSolveFirstOrder/src/solve.jl b/lib/NonlinearSolveFirstOrder/src/solve.jl index ec0d54da6..e9fc30e2e 100644 --- a/lib/NonlinearSolveFirstOrder/src/solve.jl +++ b/lib/NonlinearSolveFirstOrder/src/solve.jl @@ -153,7 +153,8 @@ function SciMLBase.__init( linsolve = NonlinearSolveBase.get_linear_solver(alg.descent) - abstol, reltol, termination_cache = NonlinearSolveBase.init_termination_cache( + abstol, reltol, + termination_cache = NonlinearSolveBase.init_termination_cache( prob, abstol, reltol, fu, u, termination_condition, Val(:regular) ) linsolve_kwargs = merge((; abstol, reltol), linsolve_kwargs) @@ -288,7 +289,8 @@ function InternalAPI.step!( end elseif cache.globalization isa Val{:TrustRegion} @static_timeit cache.timer "trustregion" begin - tr_accepted, u_new, fu_new = InternalAPI.solve!( + tr_accepted, u_new, + fu_new = InternalAPI.solve!( cache.trustregion_cache, J, cache.fu, cache.u, δu, descent_intermediates ) if tr_accepted diff --git a/lib/NonlinearSolveFirstOrder/test/least_squares_tests.jl b/lib/NonlinearSolveFirstOrder/test/least_squares_tests.jl index bf6ed48c8..7d41da88b 100644 --- a/lib/NonlinearSolveFirstOrder/test/least_squares_tests.jl +++ b/lib/NonlinearSolveFirstOrder/test/least_squares_tests.jl @@ -20,6 +20,7 @@ for linsolve in [nothing, LUFactorization(), KrylovJL_GMRES(), KrylovJL_LSMR()] vjp_autodiffs = linsolve isa KrylovJL ? [nothing, AutoZygote(), AutoFiniteDiff()] : [nothing] for linesearch in linesearches, vjp_autodiff in vjp_autodiffs + push!(solvers, GaussNewton(; linsolve, linesearch, vjp_autodiff)) end end @@ -46,10 +47,11 @@ end @testitem "General NLLS Solvers" setup=[CoreNLLSTesting] tags=[:core] begin using LinearAlgebra - nlls_problems = [prob_oop, prob_iip, prob_oop_vjp, prob_iip_vjp] + nlls_problems=[prob_oop, prob_iip, prob_oop_vjp, prob_iip_vjp] for prob in nlls_problems, solver in solvers - sol = solve(prob, solver; maxiters = 10000, abstol = 1e-6) + + sol=solve(prob, solver; maxiters = 10000, abstol = 1e-6) @test SciMLBase.successful_retcode(sol) @test norm(sol.resid, 2) < 1e-6 end diff --git a/lib/NonlinearSolveFirstOrder/test/rootfind_tests.jl b/lib/NonlinearSolveFirstOrder/test/rootfind_tests.jl index 393399589..a1c1968fb 100644 --- a/lib/NonlinearSolveFirstOrder/test/rootfind_tests.jl +++ b/lib/NonlinearSolveFirstOrder/test/rootfind_tests.jl @@ -10,20 +10,20 @@ end using BenchmarkTools: @ballocated using StaticArrays: @SVector using Zygote, ForwardDiff, FiniteDiff - + # Conditionally import Enzyme only if not on Julia prerelease - if isempty(VERSION.prerelease) + if isempty(VERSION.prerelease) using Enzyme end - u0s = ([1.0, 1.0], @SVector[1.0, 1.0], 1.0) + u0s=([1.0, 1.0], @SVector[1.0, 1.0], 1.0) # Filter autodiff backends based on Julia version - autodiff_backends = [AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] + autodiff_backends=[AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] if isempty(VERSION.prerelease) push!(autodiff_backends, AutoEnzyme()) end - + @testset for ad in autodiff_backends @testset "$(nameof(typeof(linesearch)))" for linesearch in ( LineSearchesJL(; method = LineSearches.Static(), autodiff = ad), @@ -55,9 +55,10 @@ end ( Val(true), KrylovJL_GMRES(; - precs = (A, p = nothing) -> ( - Diagonal(randn!(similar(A, size(A, 1)))), LinearAlgebra.I - ) + precs = (A, + p = nothing) -> ( + Diagonal(randn!(similar(A, size(A, 1)))), LinearAlgebra.I + ) ) ), (Val(false), \) @@ -82,7 +83,7 @@ end end @testitem "NewtonRaphson: Iterator Interface" setup=[CoreRootfindTesting] tags=[:core] begin - p = range(0.01, 2, length = 200) + p=range(0.01, 2, length = 200) @test nlprob_iterator_interface(quadratic_f, p, false, NewtonRaphson()) ≈ sqrt.(p) @test nlprob_iterator_interface(quadratic_f!, p, true, NewtonRaphson()) ≈ sqrt.(p) end @@ -90,7 +91,9 @@ end @testitem "NewtonRaphson Termination Conditions" setup=[CoreRootfindTesting] tags=[:core] begin using StaticArrays: @SVector - @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in TERMINATION_CONDITIONS + @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in + TERMINATION_CONDITIONS + @testset "u0: $(typeof(u0))" for u0 in ([1.0, 1.0], 1.0, @SVector([1.0, 1.0])) probN = NonlinearProblem(quadratic_f, u0, 2.0) sol = solve(probN, NewtonRaphson(); termination_condition) @@ -105,23 +108,23 @@ end using BenchmarkTools: @ballocated using StaticArrays: @SVector using Zygote, ForwardDiff, FiniteDiff - + # Conditionally import Enzyme only if not on Julia prerelease - if isempty(VERSION.prerelease) + if isempty(VERSION.prerelease) using Enzyme end - preconditioners = [ - (u0) -> nothing, - u0 -> ((args...) -> (Diagonal(rand!(similar(u0))), nothing)) + preconditioners=[ + (u0)->nothing, + u0->((args...)->(Diagonal(rand!(similar(u0))), nothing)) ] # Filter autodiff backends based on Julia version - autodiff_backends = [AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] + autodiff_backends=[AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] if isempty(VERSION.prerelease) push!(autodiff_backends, AutoEnzyme()) end - + @testset for ad in autodiff_backends u0s = ([1.0, 1.0], @SVector[1.0, 1.0], 1.0) @@ -147,9 +150,10 @@ end ( Val(true), KrylovJL_GMRES(; - precs = (A, p = nothing) -> ( - Diagonal(randn!(similar(A, size(A, 1)))), LinearAlgebra.I - ) + precs = (A, + p = nothing) -> ( + Diagonal(randn!(similar(A, size(A, 1)))), LinearAlgebra.I + ) ) ), (Val(false), \) @@ -172,7 +176,7 @@ end end @testitem "PseudoTransient: Iterator Interface" setup=[CoreRootfindTesting] tags=[:core] begin - p = range(0.01, 2, length = 200) + p=range(0.01, 2, length = 200) @test nlprob_iterator_interface( quadratic_f, p, false, PseudoTransient(; alpha_initial = 10.0) ) ≈ sqrt.(p) @@ -184,7 +188,9 @@ end @testitem "PseudoTransient Termination Conditions" setup=[CoreRootfindTesting] tags=[:core] begin using StaticArrays: @SVector - @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in TERMINATION_CONDITIONS + @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in + TERMINATION_CONDITIONS + @testset "u0: $(typeof(u0))" for u0 in ([1.0, 1.0], 1.0, @SVector([1.0, 1.0])) probN = NonlinearProblem(quadratic_f, u0, 2.0) sol = solve(probN, PseudoTransient(); termination_condition) @@ -199,24 +205,24 @@ end using BenchmarkTools: @ballocated using StaticArrays: @SVector using Zygote, ForwardDiff, FiniteDiff - + # Conditionally import Enzyme only if not on Julia prerelease - if isempty(VERSION.prerelease) + if isempty(VERSION.prerelease) using Enzyme end - radius_update_schemes = [ + radius_update_schemes=[ RadiusUpdateSchemes.Simple, RadiusUpdateSchemes.NocedalWright, RadiusUpdateSchemes.NLsolve, RadiusUpdateSchemes.Hei, RadiusUpdateSchemes.Yuan, RadiusUpdateSchemes.Fan, RadiusUpdateSchemes.Bastin ] # Filter autodiff backends based on Julia version - autodiff_backends = [AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] + autodiff_backends=[AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] if isempty(VERSION.prerelease) push!(autodiff_backends, AutoEnzyme()) end - + @testset for ad in autodiff_backends @testset for radius_update_scheme in radius_update_schemes, linsolve in (nothing, LUFactorization(), KrylovJL_GMRES(), \) @@ -252,52 +258,52 @@ end end @testitem "TrustRegion: Iterator Interface" setup=[CoreRootfindTesting] tags=[:core] begin - p = range(0.01, 2, length = 200) + p=range(0.01, 2, length = 200) @test nlprob_iterator_interface(quadratic_f, p, false, TrustRegion()) ≈ sqrt.(p) @test nlprob_iterator_interface(quadratic_f!, p, true, TrustRegion()) ≈ sqrt.(p) end @testitem "TrustRegion NewtonRaphson Fails" setup=[CoreRootfindTesting] tags=[:core] begin - u0 = [-10.0, -1.0, 1.0, 2.0, 3.0, 4.0, 10.0] - p = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] - sol = solve_oop(newton_fails, u0, p; solver = TrustRegion()) + u0=[-10.0, -1.0, 1.0, 2.0, 3.0, 4.0, 10.0] + p=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + sol=solve_oop(newton_fails, u0, p; solver = TrustRegion()) @test SciMLBase.successful_retcode(sol) @test all(abs.(newton_fails(sol.u, p)) .< 1e-9) end @testitem "TrustRegion: Kwargs" setup=[CoreRootfindTesting] tags=[:core] begin - max_trust_radius = [10.0, 100.0, 1000.0] - initial_trust_radius = [10.0, 1.0, 0.1] - step_threshold = [0.0, 0.01, 0.25] - shrink_threshold = [0.25, 0.3, 0.5] - expand_threshold = [0.5, 0.8, 0.9] - shrink_factor = [0.1, 0.3, 0.5] - expand_factor = [1.5, 2.0, 3.0] - max_shrink_times = [10, 20, 30] - - list_of_options = zip( + max_trust_radius=[10.0, 100.0, 1000.0] + initial_trust_radius=[10.0, 1.0, 0.1] + step_threshold=[0.0, 0.01, 0.25] + shrink_threshold=[0.25, 0.3, 0.5] + expand_threshold=[0.5, 0.8, 0.9] + shrink_factor=[0.1, 0.3, 0.5] + expand_factor=[1.5, 2.0, 3.0] + max_shrink_times=[10, 20, 30] + + list_of_options=zip( max_trust_radius, initial_trust_radius, step_threshold, shrink_threshold, expand_threshold, shrink_factor, expand_factor, max_shrink_times ) for options in list_of_options - alg = TrustRegion(; + alg=TrustRegion(; max_trust_radius = options[1], initial_trust_radius = options[2], step_threshold = options[3], shrink_threshold = options[4], expand_threshold = options[5], shrink_factor = options[6], expand_factor = options[7], max_shrink_times = options[8] ) - sol = solve_oop(quadratic_f, [1.0, 1.0], 2.0; solver = alg) + sol=solve_oop(quadratic_f, [1.0, 1.0], 2.0; solver = alg) @test SciMLBase.successful_retcode(sol) - err = maximum(abs, quadratic_f(sol.u, 2.0)) + err=maximum(abs, quadratic_f(sol.u, 2.0)) @test err < 1e-9 end end @testitem "TrustRegion OOP / IIP Consistency" setup=[CoreRootfindTesting] tags=[:core] begin - maxiterations = [2, 3, 4, 5] - u0 = [1.0, 1.0] + maxiterations=[2, 3, 4, 5] + u0=[1.0, 1.0] @testset for radius_update_scheme in [ RadiusUpdateSchemes.Simple, RadiusUpdateSchemes.NocedalWright, RadiusUpdateSchemes.NLsolve, RadiusUpdateSchemes.Hei, @@ -315,7 +321,9 @@ end @testitem "TrustRegion Termination Conditions" setup=[CoreRootfindTesting] tags=[:core] begin using StaticArrays: @SVector - @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in TERMINATION_CONDITIONS + @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in + TERMINATION_CONDITIONS + @testset "u0: $(typeof(u0))" for u0 in ([1.0, 1.0], 1.0, @SVector([1.0, 1.0])) probN = NonlinearProblem(quadratic_f, u0, 2.0) sol = solve(probN, TrustRegion(); termination_condition) @@ -330,18 +338,18 @@ end using BenchmarkTools: @ballocated using StaticArrays: SVector, @SVector using Zygote, ForwardDiff, FiniteDiff - + # Conditionally import Enzyme only if not on Julia prerelease - if isempty(VERSION.prerelease) + if isempty(VERSION.prerelease) using Enzyme end # Filter autodiff backends based on Julia version - autodiff_backends = [AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] + autodiff_backends=[AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] if isempty(VERSION.prerelease) push!(autodiff_backends, AutoEnzyme()) end - + @testset for ad in autodiff_backends solver = LevenbergMarquardt(; autodiff = ad) @@ -382,15 +390,15 @@ end end @testitem "LevenbergMarquardt NewtonRaphson Fails" setup=[CoreRootfindTesting] tags=[:core] begin - u0 = [-10.0, -1.0, 1.0, 2.0, 3.0, 4.0, 10.0] - p = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] - sol = solve_oop(newton_fails, u0, p; solver = LevenbergMarquardt()) + u0=[-10.0, -1.0, 1.0, 2.0, 3.0, 4.0, 10.0] + p=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + sol=solve_oop(newton_fails, u0, p; solver = LevenbergMarquardt()) @test SciMLBase.successful_retcode(sol) @test all(abs.(newton_fails(sol.u, p)) .< 1e-9) end @testitem "LevenbergMarquardt: Iterator Interface" setup=[CoreRootfindTesting] tags=[:core] begin - p = range(0.01, 2, length = 200) + p=range(0.01, 2, length = 200) @test nlprob_iterator_interface(quadratic_f, p, false, LevenbergMarquardt()) ≈ sqrt.(p) @test nlprob_iterator_interface(quadratic_f!, p, true, LevenbergMarquardt()) ≈ sqrt.(p) end @@ -398,7 +406,9 @@ end @testitem "LevenbergMarquardt Termination Conditions" setup=[CoreRootfindTesting] tags=[:core] begin using StaticArrays: @SVector - @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in TERMINATION_CONDITIONS + @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in + TERMINATION_CONDITIONS + @testset "u0: $(typeof(u0))" for u0 in ([1.0, 1.0], 1.0, @SVector([1.0, 1.0])) probN = NonlinearProblem(quadratic_f, u0, 2.0) sol = solve(probN, LevenbergMarquardt(); termination_condition) @@ -409,42 +419,42 @@ end end @testitem "LevenbergMarquardt: Kwargs" setup=[CoreRootfindTesting] tags=[:core] begin - damping_initial = [0.5, 2.0, 5.0] - damping_increase_factor = [1.5, 3.0, 10.0] - damping_decrease_factor = Float64[2, 5, 10.0] - finite_diff_step_geodesic = [0.02, 0.2, 0.3] - α_geodesic = [0.6, 0.8, 0.9] - b_uphill = Float64[0, 1, 2] - min_damping_D = [1e-12, 1e-9, 1e-4] - - list_of_options = zip( + damping_initial=[0.5, 2.0, 5.0] + damping_increase_factor=[1.5, 3.0, 10.0] + damping_decrease_factor=Float64[2, 5, 10.0] + finite_diff_step_geodesic=[0.02, 0.2, 0.3] + α_geodesic=[0.6, 0.8, 0.9] + b_uphill=Float64[0, 1, 2] + min_damping_D=[1e-12, 1e-9, 1e-4] + + list_of_options=zip( damping_initial, damping_increase_factor, damping_decrease_factor, finite_diff_step_geodesic, α_geodesic, b_uphill, min_damping_D ) for options in list_of_options - alg = LevenbergMarquardt(; + alg=LevenbergMarquardt(; damping_initial = options[1], damping_increase_factor = options[2], damping_decrease_factor = options[3], finite_diff_step_geodesic = options[4], α_geodesic = options[5], b_uphill = options[6], min_damping_D = options[7] ) - sol = solve_oop(quadratic_f, [1.0, 1.0], 2.0; solver = alg, maxiters = 10000) + sol=solve_oop(quadratic_f, [1.0, 1.0], 2.0; solver = alg, maxiters = 10000) @test SciMLBase.successful_retcode(sol) - err = maximum(abs, quadratic_f(sol.u, 2.0)) + err=maximum(abs, quadratic_f(sol.u, 2.0)) @test err < 1e-9 end end @testitem "Simple Sparse AutoDiff" setup=[CoreRootfindTesting] tags=[:core] begin using ADTypes, SparseConnectivityTracer, SparseMatrixColorings - + # Filter autodiff backends based on Julia version - autodiff_backends = [AutoForwardDiff(), AutoFiniteDiff(), AutoZygote()] + autodiff_backends=[AutoForwardDiff(), AutoFiniteDiff(), AutoZygote()] if isempty(VERSION.prerelease) push!(autodiff_backends, AutoEnzyme()) end - + @testset for ad in autodiff_backends @testset for u0 in ([1.0, 1.0], 1.0) prob = NonlinearProblem( @@ -470,51 +480,51 @@ end using LinearAlgebra, LinearSolve, ADTypes function F(u::Vector{Float64}, p::Vector{Float64}) - Δ = Tridiagonal(-ones(99), 2 * ones(100), -ones(99)) - return u + 0.1 * u .* Δ * u - p + Δ=Tridiagonal(-ones(99), 2*ones(100), -ones(99)) + return u+0.1*u .* Δ*u-p end function F!(du::Vector{Float64}, u::Vector{Float64}, p::Vector{Float64}) - Δ = Tridiagonal(-ones(99), 2 * ones(100), -ones(99)) - du .= u + 0.1 * u .* Δ * u - p + Δ=Tridiagonal(-ones(99), 2*ones(100), -ones(99)) + du.=u+0.1*u .* Δ*u-p return nothing end function JVP(v::Vector{Float64}, u::Vector{Float64}, p::Vector{Float64}) - Δ = Tridiagonal(-ones(99), 2 * ones(100), -ones(99)) - return v + 0.1 * (u .* Δ * v + v .* Δ * u) + Δ=Tridiagonal(-ones(99), 2*ones(100), -ones(99)) + return v+0.1*(u .* Δ*v+v .* Δ*u) end function JVP!( du::Vector{Float64}, v::Vector{Float64}, u::Vector{Float64}, p::Vector{Float64}) - Δ = Tridiagonal(-ones(99), 2 * ones(100), -ones(99)) - du .= v + 0.1 * (u .* Δ * v + v .* Δ * u) + Δ=Tridiagonal(-ones(99), 2*ones(100), -ones(99)) + du.=v+0.1*(u .* Δ*v+v .* Δ*u) return nothing end - u0 = rand(100) + u0=rand(100) - prob = NonlinearProblem(NonlinearFunction{false}(F; jvp = JVP), u0, u0) - sol = solve(prob, NewtonRaphson(; linsolve = KrylovJL_GMRES()); abstol = 1e-13) - err = maximum(abs, sol.resid) + prob=NonlinearProblem(NonlinearFunction{false}(F; jvp = JVP), u0, u0) + sol=solve(prob, NewtonRaphson(; linsolve = KrylovJL_GMRES()); abstol = 1e-13) + err=maximum(abs, sol.resid) @test err < 1e-6 - sol = solve( + sol=solve( prob, TrustRegion(; linsolve = KrylovJL_GMRES(), vjp_autodiff = AutoFiniteDiff()); abstol = 1e-13 ) - err = maximum(abs, sol.resid) + err=maximum(abs, sol.resid) @test err < 1e-6 - prob = NonlinearProblem(NonlinearFunction{true}(F!; jvp = JVP!), u0, u0) - sol = solve(prob, NewtonRaphson(; linsolve = KrylovJL_GMRES()); abstol = 1e-13) - err = maximum(abs, sol.resid) + prob=NonlinearProblem(NonlinearFunction{true}(F!; jvp = JVP!), u0, u0) + sol=solve(prob, NewtonRaphson(; linsolve = KrylovJL_GMRES()); abstol = 1e-13) + err=maximum(abs, sol.resid) @test err < 1e-6 - sol = solve( + sol=solve( prob, TrustRegion(; linsolve = KrylovJL_GMRES(), vjp_autodiff = AutoFiniteDiff()); abstol = 1e-13 ) - err = maximum(abs, sol.resid) + err=maximum(abs, sol.resid) @test err < 1e-6 end diff --git a/lib/NonlinearSolveFirstOrder/test/sparsity_tests.jl b/lib/NonlinearSolveFirstOrder/test/sparsity_tests.jl index 3e4df284f..fda9eeb47 100644 --- a/lib/NonlinearSolveFirstOrder/test/sparsity_tests.jl +++ b/lib/NonlinearSolveFirstOrder/test/sparsity_tests.jl @@ -14,7 +14,8 @@ @inbounds for I in CartesianIndices((N, N)) i, j = Tuple(I) x, y = xyd_brusselator[I[1]], xyd_brusselator[I[2]] - ip1, im1, jp1, jm1 = limit(i + 1, N), limit(i - 1, N), limit(j + 1, N), + ip1, im1, + jp1, jm1 = limit(i + 1, N), limit(i - 1, N), limit(j + 1, N), limit(j - 1, N) du[i, j, 1] = alpha * (u[im1, j, 1] + u[ip1, j, 1] + u[i, jp1, 1] + u[i, jm1, 1] - diff --git a/lib/NonlinearSolveHomotopyContinuation/src/jacobian_handling.jl b/lib/NonlinearSolveHomotopyContinuation/src/jacobian_handling.jl index 1a757d629..2c26b6be0 100644 --- a/lib/NonlinearSolveHomotopyContinuation/src/jacobian_handling.jl +++ b/lib/NonlinearSolveHomotopyContinuation/src/jacobian_handling.jl @@ -81,7 +81,8 @@ end function (f::ComplexDIJacobian{OutOfPlace})(u, U, x, p) U_tmp = f.buffers - u_tmp, _ = DI.value_and_jacobian!( + u_tmp, + _ = DI.value_and_jacobian!( f.f, U_tmp, f.prep, f.autodiff, reinterpret(Float64, x), DI.Constant(p)) copyto!(u, reinterpret(ComplexF64, u_tmp)) U = reinterpret(Float64, U) @@ -120,7 +121,8 @@ function construct_jacobian(f, autodiff, variant, u0, p) if variant == OutOfPlace prep = DI.prepare_jacobian(f, autodiff, tmp, DI.Constant(p), strict = Val(false)) else - prep = DI.prepare_jacobian(f, tmp, autodiff, copy(tmp), DI.Constant(p), strict = Val(false)) + prep = DI.prepare_jacobian( + f, tmp, autodiff, copy(tmp), DI.Constant(p), strict = Val(false)) end if variant == Scalar @@ -186,9 +188,11 @@ function construct_jacobian(f, autodiff::AutoEnzyme, variant, u0, p) else tmp = Vector{ComplexF64}(undef, length(u0)) if variant == Inplace - prep = DI.prepare_jacobian(f, tmp, autodiff, copy(tmp), DI.Constant(p), strict = Val(false)) + prep = DI.prepare_jacobian( + f, tmp, autodiff, copy(tmp), DI.Constant(p), strict = Val(false)) else - prep = DI.prepare_jacobian(f, autodiff, tmp, DI.Constant(p), strict = Val(false)) + prep = DI.prepare_jacobian( + f, autodiff, tmp, DI.Constant(p), strict = Val(false)) end end return EnzymeJacobian{variant}(f, prep, autodiff) diff --git a/lib/NonlinearSolveHomotopyContinuation/test/allroots.jl b/lib/NonlinearSolveHomotopyContinuation/test/allroots.jl index 9031c5c01..aa813e0c1 100644 --- a/lib/NonlinearSolveHomotopyContinuation/test/allroots.jl +++ b/lib/NonlinearSolveHomotopyContinuation/test/allroots.jl @@ -23,7 +23,7 @@ alg = HomotopyContinuationJL{true}(; threading = false) if isempty(VERSION.prerelease) push!(autodiff_backends, (AutoEnzyme(), "no jac - enzyme")) end - + @testset "`NonlinearProblem` - $name" for (jac_or_autodiff, name) in autodiff_backends if jac_or_autodiff isa Function jac = jac_or_autodiff diff --git a/lib/NonlinearSolveHomotopyContinuation/test/single_root.jl b/lib/NonlinearSolveHomotopyContinuation/test/single_root.jl index a9057e731..289116f33 100644 --- a/lib/NonlinearSolveHomotopyContinuation/test/single_root.jl +++ b/lib/NonlinearSolveHomotopyContinuation/test/single_root.jl @@ -17,7 +17,7 @@ alg = HomotopyContinuationJL{false}(; threading = false) if isempty(VERSION.prerelease) push!(autodiff_backends, (AutoEnzyme(), "no jac - enzyme")) end - + @testset "`NonlinearProblem` - $name" for (jac_or_autodiff, name) in autodiff_backends if jac_or_autodiff isa Function jac = jac_or_autodiff diff --git a/lib/NonlinearSolveQuasiNewton/ext/NonlinearSolveQuasiNewtonForwardDiffExt.jl b/lib/NonlinearSolveQuasiNewton/ext/NonlinearSolveQuasiNewtonForwardDiffExt.jl index 74ec64031..1101b8e03 100644 --- a/lib/NonlinearSolveQuasiNewton/ext/NonlinearSolveQuasiNewtonForwardDiffExt.jl +++ b/lib/NonlinearSolveQuasiNewton/ext/NonlinearSolveQuasiNewtonForwardDiffExt.jl @@ -23,7 +23,8 @@ const DualAbstractNonlinearProblem = Union{ function SciMLBase.__solve( prob::DualAbstractNonlinearProblem, alg::QuasiNewtonAlgorithm, args...; kwargs... ) - sol, partials = NonlinearSolveBase.nonlinearsolve_forwarddiff_solve( + sol, + partials = NonlinearSolveBase.nonlinearsolve_forwarddiff_solve( prob, alg, args...; kwargs... ) dual_soln = NonlinearSolveBase.nonlinearsolve_dual_solution(sol.u, partials, prob.p) diff --git a/lib/NonlinearSolveQuasiNewton/src/NonlinearSolveQuasiNewton.jl b/lib/NonlinearSolveQuasiNewton/src/NonlinearSolveQuasiNewton.jl index 02bbc865f..167f1fa85 100644 --- a/lib/NonlinearSolveQuasiNewton/src/NonlinearSolveQuasiNewton.jl +++ b/lib/NonlinearSolveQuasiNewton/src/NonlinearSolveQuasiNewton.jl @@ -50,6 +50,7 @@ include("solve.jl") @compile_workload begin @sync for prob in nonlinear_problems, alg in algs + Threads.@spawn CommonSolve.solve(prob, alg; abstol = 1e-2, verbose = false) end end diff --git a/lib/NonlinearSolveQuasiNewton/src/solve.jl b/lib/NonlinearSolveQuasiNewton/src/solve.jl index 53289c117..24e740d3f 100644 --- a/lib/NonlinearSolveQuasiNewton/src/solve.jl +++ b/lib/NonlinearSolveQuasiNewton/src/solve.jl @@ -158,15 +158,17 @@ function SciMLBase.__init( stats, linsolve, maxiters, internalnorm ) - abstol, reltol, termination_cache = NonlinearSolveBase.init_termination_cache( + abstol, reltol, + termination_cache = NonlinearSolveBase.init_termination_cache( prob, abstol, reltol, fu, u, termination_condition, Val(:regular) ) linsolve_kwargs = merge((; abstol, reltol), linsolve_kwargs) J = initialization_cache(nothing) - inv_workspace, J = Utils.unwrap_val(inverted_jac) ? - Utils.maybe_pinv!!_workspace(J) : (nothing, J) + inv_workspace, + J = Utils.unwrap_val(inverted_jac) ? + Utils.maybe_pinv!!_workspace(J) : (nothing, J) descent_cache = InternalAPI.init( prob, alg.descent, J, fu, u; @@ -352,7 +354,8 @@ function InternalAPI.step!( end elseif cache.globalization isa Val{:TrustRegion} @static_timeit cache.timer "trustregion" begin - tr_accepted, u_new, fu_new = InternalAPI.solve!( + tr_accepted, u_new, + fu_new = InternalAPI.solve!( cache.trustregion_cache, J, cache.fu, cache.u, δu, descent_intermediates ) if tr_accepted diff --git a/lib/NonlinearSolveQuasiNewton/test/core_tests.jl b/lib/NonlinearSolveQuasiNewton/test/core_tests.jl index d8b4c49cd..367e23246 100644 --- a/lib/NonlinearSolveQuasiNewton/test/core_tests.jl +++ b/lib/NonlinearSolveQuasiNewton/test/core_tests.jl @@ -10,20 +10,20 @@ end using BenchmarkTools: @ballocated using StaticArrays: @SVector using Zygote, ForwardDiff, FiniteDiff - + # Conditionally import Enzyme only if not on Julia prerelease if isempty(VERSION.prerelease) using Enzyme end - u0s = ([1.0, 1.0], @SVector[1.0, 1.0], 1.0) + u0s=([1.0, 1.0], @SVector[1.0, 1.0], 1.0) # Filter autodiff backends based on Julia version - autodiff_backends = [AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] + autodiff_backends=[AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] if isempty(VERSION.prerelease) push!(autodiff_backends, AutoEnzyme()) end - + @testset for ad in autodiff_backends @testset "$(nameof(typeof(linesearch)))" for linesearch in ( # LineSearchesJL(; method = LineSearches.Static(), autodiff = ad), @@ -72,7 +72,7 @@ end end @testitem "Broyden: Iterator Interface" setup=[CoreRootfindTesting] tags=[:core] begin - p = range(0.01, 2, length = 200) + p=range(0.01, 2, length = 200) @test nlprob_iterator_interface(quadratic_f, p, false, Broyden()) ≈ sqrt.(p) @test nlprob_iterator_interface(quadratic_f!, p, true, Broyden()) ≈ sqrt.(p) end @@ -80,7 +80,9 @@ end @testitem "Broyden Termination Conditions" setup=[CoreRootfindTesting] tags=[:core] begin using StaticArrays: @SVector - @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in TERMINATION_CONDITIONS + @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in + TERMINATION_CONDITIONS + @testset "u0: $(typeof(u0))" for u0 in ([1.0, 1.0], 1.0, @SVector([1.0, 1.0])) probN = NonlinearProblem(quadratic_f, u0, 2.0) sol = solve(probN, Broyden(); termination_condition) @@ -96,18 +98,18 @@ end using BenchmarkTools: @ballocated using StaticArrays: @SVector using Zygote, ForwardDiff, FiniteDiff - + # Conditionally import Enzyme only if not on Julia prerelease if isempty(VERSION.prerelease) using Enzyme end # Filter autodiff backends based on Julia version - autodiff_backends = [AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] + autodiff_backends=[AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] if isempty(VERSION.prerelease) push!(autodiff_backends, AutoEnzyme()) end - + @testset for ad in autodiff_backends @testset "$(nameof(typeof(linesearch)))" for linesearch in ( # LineSearchesJL(; method = LineSearches.Static(), autodiff = ad), @@ -156,7 +158,7 @@ end end @testitem "Klement: Iterator Interface" setup=[CoreRootfindTesting] tags=[:core] begin - p = range(0.01, 2, length = 200) + p=range(0.01, 2, length = 200) @test nlprob_iterator_interface(quadratic_f, p, false, Klement()) ≈ sqrt.(p) @test nlprob_iterator_interface(quadratic_f!, p, true, Klement()) ≈ sqrt.(p) end @@ -164,7 +166,9 @@ end @testitem "Klement Termination Conditions" setup=[CoreRootfindTesting] tags=[:core] begin using StaticArrays: @SVector - @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in TERMINATION_CONDITIONS + @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in + TERMINATION_CONDITIONS + @testset "u0: $(typeof(u0))" for u0 in ([1.0, 1.0], 1.0, @SVector([1.0, 1.0])) probN = NonlinearProblem(quadratic_f, u0, 2.0) sol = solve(probN, Klement(); termination_condition) @@ -180,18 +184,18 @@ end using BenchmarkTools: @ballocated using StaticArrays: @SVector using Zygote, ForwardDiff, FiniteDiff - + # Conditionally import Enzyme only if not on Julia prerelease if isempty(VERSION.prerelease) using Enzyme end # Filter autodiff backends based on Julia version - autodiff_backends = [AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] + autodiff_backends=[AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] if isempty(VERSION.prerelease) push!(autodiff_backends, AutoEnzyme()) end - + @testset for ad in autodiff_backends @testset "$(nameof(typeof(linesearch)))" for linesearch in ( # LineSearchesJL(; method = LineSearches.Static(), autodiff = ad), @@ -240,7 +244,7 @@ end end @testitem "LimitedMemoryBroyden: Iterator Interface" setup=[CoreRootfindTesting] tags=[:core] begin - p = range(0.01, 2, length = 200) + p=range(0.01, 2, length = 200) @test nlprob_iterator_interface(quadratic_f, p, false, LimitedMemoryBroyden()) ≈ sqrt.(p) @test nlprob_iterator_interface(quadratic_f!, p, true, LimitedMemoryBroyden()) ≈ @@ -250,7 +254,9 @@ end @testitem "LimitedMemoryBroyden Termination Conditions" setup=[CoreRootfindTesting] tags=[:core] begin using StaticArrays: @SVector - @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in TERMINATION_CONDITIONS + @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in + TERMINATION_CONDITIONS + @testset "u0: $(typeof(u0))" for u0 in ([1.0, 1.0], 1.0, @SVector([1.0, 1.0])) probN = NonlinearProblem(quadratic_f, u0, 2.0) sol = solve(probN, LimitedMemoryBroyden(); termination_condition) diff --git a/lib/NonlinearSolveSciPy/src/NonlinearSolveSciPy.jl b/lib/NonlinearSolveSciPy/src/NonlinearSolveSciPy.jl index 4f25a3b12..c328e508a 100644 --- a/lib/NonlinearSolveSciPy/src/NonlinearSolveSciPy.jl +++ b/lib/NonlinearSolveSciPy/src/NonlinearSolveSciPy.jl @@ -15,16 +15,17 @@ function __init__() PY_NONE[] = pyimport("builtins").None _SCIPY_AVAILABLE[] = true catch - _SCIPY_AVAILABLE[] = false end end using SciMLBase -using NonlinearSolveBase: AbstractNonlinearSolveAlgorithm, construct_extension_function_wrapper +using NonlinearSolveBase: AbstractNonlinearSolveAlgorithm, + construct_extension_function_wrapper """ SciPyLeastSquares(; method="trf", loss="linear") + Wrapper over `scipy.optimize.least_squares` (via PythonCall) for solving `NonlinearLeastSquaresProblem`s. The keyword arguments correspond to the `method` ("trf", "dogbox", "lm") and the robust loss function ("linear", @@ -37,9 +38,10 @@ Wrapper over `scipy.optimize.least_squares` (via PythonCall) for solving end function SciPyLeastSquares(; method::String = "trf", loss::String = "linear") - _SCIPY_AVAILABLE[] || error("`SciPyLeastSquares` requires the Python package `scipy` to be available to PythonCall.") + _SCIPY_AVAILABLE[] || + error("`SciPyLeastSquares` requires the Python package `scipy` to be available to PythonCall.") valid_methods = ("trf", "dogbox", "lm") - valid_losses = ("linear", "soft_l1", "huber", "cauchy", "arctan") + valid_losses = ("linear", "soft_l1", "huber", "cauchy", "arctan") method in valid_methods || throw(ArgumentError("Invalid method: $method. Valid methods are: $(join(valid_methods, ", "))")) loss in valid_losses || @@ -47,12 +49,13 @@ function SciPyLeastSquares(; method::String = "trf", loss::String = "linear") return SciPyLeastSquares(method, loss, :SciPyLeastSquares) end -SciPyLeastSquaresTRF() = SciPyLeastSquares(method = "trf") +SciPyLeastSquaresTRF() = SciPyLeastSquares(method = "trf") SciPyLeastSquaresDogbox() = SciPyLeastSquares(method = "dogbox") -SciPyLeastSquaresLM() = SciPyLeastSquares(method = "lm") +SciPyLeastSquaresLM() = SciPyLeastSquares(method = "lm") """ SciPyRoot(; method="hybr") + Wrapper over `scipy.optimize.root` for solving `NonlinearProblem`s. Available methods include "hybr" (default), "lm", "broyden1", "broyden2", "anderson", "diagbroyden", "linearmixing", "excitingmixing", "krylov", "df-sane" – any @@ -64,12 +67,14 @@ method accepted by SciPy. end function SciPyRoot(; method::String = "hybr") - _SCIPY_AVAILABLE[] || error("`SciPyRoot` requires the Python package `scipy` to be available to PythonCall.") + _SCIPY_AVAILABLE[] || + error("`SciPyRoot` requires the Python package `scipy` to be available to PythonCall.") return SciPyRoot(method, :SciPyRoot) end """ SciPyRootScalar(; method="brentq") + Wrapper over `scipy.optimize.root_scalar` for scalar `IntervalNonlinearProblem`s (bracketing problems). The default method uses Brent's algorithm ("brentq"). Other valid options: "bisect", "brentq", "brenth", "ridder", "toms748", @@ -81,20 +86,25 @@ Other valid options: "bisect", "brentq", "brenth", "ridder", "toms748", end function SciPyRootScalar(; method::String = "brentq") - _SCIPY_AVAILABLE[] || error("`SciPyRootScalar` requires the Python package `scipy` to be available to PythonCall.") + _SCIPY_AVAILABLE[] || + error("`SciPyRootScalar` requires the Python package `scipy` to be available to PythonCall.") return SciPyRootScalar(method, :SciPyRootScalar) end -""" Internal: wrap a Julia residual function into a Python callable """ +""" +Internal: wrap a Julia residual function into a Python callable +""" function _make_py_residual(f, p) return pyfunc(x_py -> begin x = Vector{Float64}(x_py) - r = f(x, p) + r = f(x, p) return r end) end -""" Internal: wrap a Julia scalar function into a Python callable """ +""" +Internal: wrap a Julia scalar function into a Python callable +""" function _make_py_scalar(f, p) return pyfunc(x_py -> begin x = Float64(x_py) @@ -102,9 +112,10 @@ function _make_py_scalar(f, p) end) end -function SciMLBase.__solve(prob::SciMLBase.NonlinearLeastSquaresProblem, alg::SciPyLeastSquares; - abstol = nothing, maxiters = 10_000, alias_u0::Bool = false, - kwargs...) +function SciMLBase.__solve( + prob::SciMLBase.NonlinearLeastSquaresProblem, alg::SciPyLeastSquares; + abstol = nothing, maxiters = 10_000, alias_u0::Bool = false, + kwargs...) # Construct Python residual py_f = _make_py_residual(prob.f, prob.p) @@ -113,18 +124,18 @@ function SciMLBase.__solve(prob::SciMLBase.NonlinearLeastSquaresProblem, alg::Sc has_ub = hasproperty(prob, :ub) if has_lb || has_ub lb = has_lb ? getproperty(prob, :lb) : fill(-Inf, length(prob.u0)) - ub = has_ub ? getproperty(prob, :ub) : fill( Inf, length(prob.u0)) + ub = has_ub ? getproperty(prob, :ub) : fill(Inf, length(prob.u0)) bounds = (lb, ub) else bounds = nothing end res = scipy_optimize[].least_squares(py_f, collect(prob.u0); - method = alg.method, - loss = alg.loss, - max_nfev = maxiters, - bounds = bounds === nothing ? PY_NONE[] : bounds, - kwargs...) + method = alg.method, + loss = alg.loss, + max_nfev = maxiters, + bounds = bounds === nothing ? PY_NONE[] : bounds, + kwargs...) u_vec = Vector{Float64}(res.x) resid = Vector{Float64}(res.fun) @@ -140,13 +151,12 @@ function SciMLBase.__solve(prob::SciMLBase.NonlinearLeastSquaresProblem, alg::Sc stats = SciMLBase.NLStats(res.nfev, njev, 0, 0, res.nfev) return SciMLBase.build_solution(prob, alg, u, resid; retcode = ret, - original = res, stats = stats) + original = res, stats = stats) end function SciMLBase.__solve(prob::SciMLBase.NonlinearProblem, alg::SciPyRoot; - abstol = nothing, maxiters = 10_000, alias_u0::Bool = false, - kwargs...) - + abstol = nothing, maxiters = 10_000, alias_u0::Bool = false, + kwargs...) f!, u0, resid = construct_extension_function_wrapper(prob; alias_u0) py_f = pyfunc(x_py -> begin @@ -158,27 +168,37 @@ function SciMLBase.__solve(prob::SciMLBase.NonlinearProblem, alg::SciPyRoot; tol = abstol === nothing ? nothing : abstol res = scipy_optimize[].root(py_f, collect(u0); - method = alg.method, - tol = tol, - options = Dict("maxiter" => maxiters), - kwargs...) + method = alg.method, + tol = tol, + options = Dict("maxiter" => maxiters), + kwargs...) u_vec = Vector{Float64}(res.x) - f!(resid, u_vec) + f!(resid, u_vec) u_out = prob.u0 isa Number ? u_vec[1] : reshape(u_vec, size(prob.u0)) ret = res.success ? SciMLBase.ReturnCode.Success : SciMLBase.ReturnCode.Failure - nfev = try Int(res.nfev) catch; 0 end - niter = try Int(res.nit) catch; 0 end + nfev = try + Int(res.nfev) + catch + ; + 0 + end + niter = try + Int(res.nit) + catch + ; + 0 + end stats = SciMLBase.NLStats(nfev, 0, 0, 0, niter) return SciMLBase.build_solution(prob, alg, u_out, resid; retcode = ret, - original = res, stats = stats) + original = res, stats = stats) end function SciMLBase.__solve(prob::SciMLBase.IntervalNonlinearProblem, alg::SciPyRootScalar; - abstol = nothing, maxiters = 10_000, kwargs...) + abstol = nothing, maxiters = 10_000, kwargs...) f = prob.f p = prob.p py_f = _make_py_scalar(f, p) @@ -186,28 +206,38 @@ function SciMLBase.__solve(prob::SciMLBase.IntervalNonlinearProblem, alg::SciPyR a, b = prob.tspan res = scipy_optimize[].root_scalar(py_f; - method = alg.method, - bracket = (a, b), - maxiter = maxiters, - xtol = abstol, - kwargs...) + method = alg.method, + bracket = (a, b), + maxiter = maxiters, + xtol = abstol, + kwargs...) u_root = res.root - resid = f(u_root, p) + resid = f(u_root, p) ret = res.converged ? SciMLBase.ReturnCode.Success : SciMLBase.ReturnCode.Failure - nfev = try Int(res.function_calls) catch; 0 end - niter = try Int(res.iterations) catch; 0 end + nfev = try + Int(res.function_calls) + catch + ; + 0 + end + niter = try + Int(res.iterations) + catch + ; + 0 + end stats = SciMLBase.NLStats(nfev, 0, 0, 0, niter) return SciMLBase.build_solution(prob, alg, u_root, resid; retcode = ret, - original = res, stats = stats) + original = res, stats = stats) end @reexport using SciMLBase, NonlinearSolveBase -export SciPyLeastSquares, SciPyLeastSquaresTRF, SciPyLeastSquaresDogbox, SciPyLeastSquaresLM, +export SciPyLeastSquares, SciPyLeastSquaresTRF, SciPyLeastSquaresDogbox, + SciPyLeastSquaresLM, SciPyRoot, SciPyRootScalar end - diff --git a/lib/NonlinearSolveSciPy/test/basic_tests.jl b/lib/NonlinearSolveSciPy/test/basic_tests.jl index 40f33b51a..523c946c6 100644 --- a/lib/NonlinearSolveSciPy/test/basic_tests.jl +++ b/lib/NonlinearSolveSciPy/test/basic_tests.jl @@ -2,4 +2,4 @@ using Test, NonlinearSolveSciPy @test isdefined(NonlinearSolveSciPy, :SciPyLeastSquares) @test isdefined(NonlinearSolveSciPy, :SciPyRoot) -end \ No newline at end of file +end diff --git a/lib/NonlinearSolveSciPy/test/runtests.jl b/lib/NonlinearSolveSciPy/test/runtests.jl index b4c0bec5b..578d6f99d 100644 --- a/lib/NonlinearSolveSciPy/test/runtests.jl +++ b/lib/NonlinearSolveSciPy/test/runtests.jl @@ -12,5 +12,5 @@ const NTHREADS = 1 ReTestItems.runtests( NonlinearSolveSciPy; tags = (GROUP == "all" ? nothing : [Symbol(GROUP)]), nworkers = NWORKERS, nworker_threads = NTHREADS, - testitem_timeout = 3600, -) \ No newline at end of file + testitem_timeout = 3600 +) diff --git a/lib/NonlinearSolveSciPy/test/wrappers_tests.jl b/lib/NonlinearSolveSciPy/test/wrappers_tests.jl index ddf86dcdb..8ae9e7e82 100644 --- a/lib/NonlinearSolveSciPy/test/wrappers_tests.jl +++ b/lib/NonlinearSolveSciPy/test/wrappers_tests.jl @@ -9,8 +9,8 @@ end if success xdata = collect(0:0.1:1) - ydata = 2.0 .* xdata .+ 1.0 - function residuals(params, p=nothing) + ydata = 2.0 .* xdata .+ 1.0 + function residuals(params, p = nothing) a, b = params return ydata .- (a .* xdata .+ b) end @@ -18,11 +18,12 @@ prob = NonlinearLeastSquaresProblem(residuals, x0_ls) sol = solve(prob, SciPyLeastSquaresTRF()) @test SciMLBase.successful_retcode(sol) - prob_bounded = NonlinearLeastSquaresProblem(residuals, x0_ls; lb = [0.0, -2.0], ub = [5.0, 3.0]) - sol2 = solve(prob_bounded, SciPyLeastSquares(method="trf")) + prob_bounded = NonlinearLeastSquaresProblem(residuals, x0_ls; lb = [0.0, -2.0], ub = [ + 5.0, 3.0]) + sol2 = solve(prob_bounded, SciPyLeastSquares(method = "trf")) @test SciMLBase.successful_retcode(sol2) else - @test true + @test true end end @@ -36,7 +37,6 @@ end catch end if success - function fvec(u, p) return [2 - 2u[1]; u[1] - 4u[2]] end @@ -45,7 +45,6 @@ end @test SciMLBase.successful_retcode(sol_vec) @test maximum(abs, sol_vec.resid) < 1e-6 - fscalar(x, p) = x^2 - 2 prob_interval = IntervalNonlinearProblem(fscalar, (1.0, 2.0)) sol_scalar = solve(prob_interval, SciPyRootScalar()) @@ -54,4 +53,4 @@ end else @test true end -end \ No newline at end of file +end diff --git a/lib/NonlinearSolveSpectralMethods/ext/NonlinearSolveSpectralMethodsForwardDiffExt.jl b/lib/NonlinearSolveSpectralMethods/ext/NonlinearSolveSpectralMethodsForwardDiffExt.jl index 930c4861c..0aa288922 100644 --- a/lib/NonlinearSolveSpectralMethods/ext/NonlinearSolveSpectralMethodsForwardDiffExt.jl +++ b/lib/NonlinearSolveSpectralMethods/ext/NonlinearSolveSpectralMethodsForwardDiffExt.jl @@ -23,7 +23,8 @@ const DualAbstractNonlinearProblem = Union{ function SciMLBase.__solve( prob::DualAbstractNonlinearProblem, alg::GeneralizedDFSane, args...; kwargs... ) - sol, partials = NonlinearSolveBase.nonlinearsolve_forwarddiff_solve( + sol, + partials = NonlinearSolveBase.nonlinearsolve_forwarddiff_solve( prob, alg, args...; kwargs... ) dual_soln = NonlinearSolveBase.nonlinearsolve_dual_solution(sol.u, partials, prob.p) diff --git a/lib/NonlinearSolveSpectralMethods/src/NonlinearSolveSpectralMethods.jl b/lib/NonlinearSolveSpectralMethods/src/NonlinearSolveSpectralMethods.jl index 49949e438..93a620761 100644 --- a/lib/NonlinearSolveSpectralMethods/src/NonlinearSolveSpectralMethods.jl +++ b/lib/NonlinearSolveSpectralMethods/src/NonlinearSolveSpectralMethods.jl @@ -34,6 +34,7 @@ include("solve.jl") @compile_workload begin @sync for prob in nonlinear_problems, alg in algs + Threads.@spawn CommonSolve.solve(prob, alg; abstol = 1e-2, verbose = false) end end diff --git a/lib/NonlinearSolveSpectralMethods/src/dfsane.jl b/lib/NonlinearSolveSpectralMethods/src/dfsane.jl index 082c23176..cfe8bb2f6 100644 --- a/lib/NonlinearSolveSpectralMethods/src/dfsane.jl +++ b/lib/NonlinearSolveSpectralMethods/src/dfsane.jl @@ -21,7 +21,8 @@ For other keyword arguments, see RobustNonMonotoneLineSearch in LineSearch.jl. function DFSane(; sigma_min = 1 // 10^10, sigma_max = 1e10, sigma_1 = 1, M::Int = 10, gamma = 1 // 10^4, tau_min = 1 // 10, tau_max = 1 // 2, n_exp::Int = 2, - max_inner_iterations::Int = 100, eta_strategy::F = (fn_1, n, x_n, f_n) -> fn_1 / n^2 + max_inner_iterations::Int = 100, eta_strategy::F = ( + fn_1, n, x_n, f_n) -> fn_1 / n^2 ) where {F} linesearch = RobustNonMonotoneLineSearch(; gamma = gamma, sigma_1 = sigma_1, M, tau_min = tau_min, tau_max = tau_max, diff --git a/lib/NonlinearSolveSpectralMethods/src/solve.jl b/lib/NonlinearSolveSpectralMethods/src/solve.jl index fd71527c0..ebb8d13a9 100644 --- a/lib/NonlinearSolveSpectralMethods/src/solve.jl +++ b/lib/NonlinearSolveSpectralMethods/src/solve.jl @@ -130,7 +130,8 @@ function SciMLBase.__init( linesearch_cache = CommonSolve.init(prob, alg.linesearch, fu, u; stats, kwargs...) - abstol, reltol, tc_cache = NonlinearSolveBase.init_termination_cache( + abstol, reltol, + tc_cache = NonlinearSolveBase.init_termination_cache( prob, abstol, reltol, fu, u_cache, termination_condition, Val(:regular) ) trace = NonlinearSolveBase.init_nonlinearsolve_trace( diff --git a/lib/NonlinearSolveSpectralMethods/test/core_tests.jl b/lib/NonlinearSolveSpectralMethods/test/core_tests.jl index e127d1e16..9144e4daf 100644 --- a/lib/NonlinearSolveSpectralMethods/test/core_tests.jl +++ b/lib/NonlinearSolveSpectralMethods/test/core_tests.jl @@ -8,7 +8,7 @@ end using BenchmarkTools: @ballocated using StaticArrays: @SVector - u0s = ([1.0, 1.0], @SVector[1.0, 1.0], 1.0) + u0s=([1.0, 1.0], @SVector[1.0, 1.0], 1.0) @testset "[OOP] u0: $(typeof(u0))" for u0 in u0s sol = solve_oop(quadratic_f, u0; solver = DFSane()) @@ -32,44 +32,44 @@ end end @testitem "DFSane Iterator Interface" setup=[CoreRootfindTesting] tags=[:core] begin - p = range(0.01, 2, length = 200) + p=range(0.01, 2, length = 200) @test nlprob_iterator_interface(quadratic_f, p, false, DFSane()) ≈ sqrt.(p) @test nlprob_iterator_interface(quadratic_f!, p, true, DFSane()) ≈ sqrt.(p) end @testitem "DFSane NewtonRaphson Fails" setup=[CoreRootfindTesting] tags=[:core] begin - u0 = [-10.0, -1.0, 1.0, 2.0, 3.0, 4.0, 10.0] - p = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] - sol = solve_oop(newton_fails, u0, p; solver = DFSane()) + u0=[-10.0, -1.0, 1.0, 2.0, 3.0, 4.0, 10.0] + p=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + sol=solve_oop(newton_fails, u0, p; solver = DFSane()) @test SciMLBase.successful_retcode(sol) @test all(abs.(newton_fails(sol.u, p)) .< 1e-9) end @testitem "DFSane: Kwargs" setup=[CoreRootfindTesting] tags=[:core] begin - σ_min = [1e-10, 1e-5, 1e-4] - σ_max = [1e10, 1e5, 1e4] - σ_1 = [1.0, 0.5, 2.0] - M = [10, 1, 100] - γ = [1e-4, 1e-3, 1e-5] - τ_min = [0.1, 0.2, 0.3] - τ_max = [0.5, 0.8, 0.9] - nexp = [2, 1, 2] - η_strategy = [ - (f_1, k, x, F) -> f_1 / k^2, (f_1, k, x, F) -> f_1 / k^3, - (f_1, k, x, F) -> f_1 / k^4 + σ_min=[1e-10, 1e-5, 1e-4] + σ_max=[1e10, 1e5, 1e4] + σ_1=[1.0, 0.5, 2.0] + M=[10, 1, 100] + γ=[1e-4, 1e-3, 1e-5] + τ_min=[0.1, 0.2, 0.3] + τ_max=[0.5, 0.8, 0.9] + nexp=[2, 1, 2] + η_strategy=[ + (f_1, k, x, F)->f_1/k^2, (f_1, k, x, F)->f_1/k^3, + (f_1, k, x, F)->f_1/k^4 ] - list_of_options = zip(σ_min, σ_max, σ_1, M, γ, τ_min, τ_max, nexp, η_strategy) + list_of_options=zip(σ_min, σ_max, σ_1, M, γ, τ_min, τ_max, nexp, η_strategy) for options in list_of_options local probN, sol, alg - alg = DFSane(; + alg=DFSane(; sigma_min = options[1], sigma_max = options[2], sigma_1 = options[3], M = options[4], gamma = options[5], tau_min = options[6], tau_max = options[7], n_exp = options[8], eta_strategy = options[9] ) - probN = NonlinearProblem{false}(quadratic_f, [1.0, 1.0], 2.0) - sol = solve(probN, alg, abstol = 1e-11) + probN=NonlinearProblem{false}(quadratic_f, [1.0, 1.0], 2.0) + sol=solve(probN, alg, abstol = 1e-11) @test all(abs.(quadratic_f(sol.u, 2.0)) .< 1e-6) end end @@ -77,7 +77,9 @@ end @testitem "DFSane Termination Conditions" setup=[CoreRootfindTesting] tags=[:core] begin using StaticArrays: @SVector - @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in TERMINATION_CONDITIONS + @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in + TERMINATION_CONDITIONS + @testset "u0: $(typeof(u0))" for u0 in ([1.0, 1.0], 1.0, @SVector([1.0, 1.0])) probN = NonlinearProblem(quadratic_f, u0, 2.0) sol = solve(probN, DFSane(); termination_condition) diff --git a/lib/SCCNonlinearSolve/src/SCCNonlinearSolve.jl b/lib/SCCNonlinearSolve/src/SCCNonlinearSolve.jl index 815f5382e..104bc384c 100644 --- a/lib/SCCNonlinearSolve/src/SCCNonlinearSolve.jl +++ b/lib/SCCNonlinearSolve/src/SCCNonlinearSolve.jl @@ -27,7 +27,8 @@ function CommonSolve.solve(prob::SciMLBase.SCCNonlinearProblem; kwargs...) CommonSolve.solve(prob, SCCAlg(nothing, nothing); kwargs...) end -function CommonSolve.solve(prob::SciMLBase.SCCNonlinearProblem, alg::SciMLBase.AbstractNonlinearAlgorithm; kwargs...) +function CommonSolve.solve(prob::SciMLBase.SCCNonlinearProblem, + alg::SciMLBase.AbstractNonlinearAlgorithm; kwargs...) CommonSolve.solve(prob, SCCAlg(alg, nothing); kwargs...) end diff --git a/lib/SciMLJacobianOperators/src/SciMLJacobianOperators.jl b/lib/SciMLJacobianOperators/src/SciMLJacobianOperators.jl index 3f85706cb..8a697372c 100644 --- a/lib/SciMLJacobianOperators/src/SciMLJacobianOperators.jl +++ b/lib/SciMLJacobianOperators/src/SciMLJacobianOperators.jl @@ -290,7 +290,8 @@ function prepare_vjp(::Val{false}, prob::AbstractNonlinearProblem, if autodiff === nothing && SciMLBase.has_jac(f) if SciMLBase.isinplace(f) jac_cache = similar(u, eltype(fu), length(fu), length(u)) - return @closure (vJ, v, u, p) -> begin + return @closure ( + vJ, v, u, p) -> begin f.jac(jac_cache, u, p) LinearAlgebra.mul!(vec(vJ), jac_cache', vec(v)) return @@ -307,15 +308,22 @@ function prepare_vjp(::Val{false}, prob::AbstractNonlinearProblem, @assert DI.check_inplace(autodiff) "Backend: $(autodiff) doesn't support in-place \ problems." fu_cache = copy(fu) - di_extras = DI.prepare_pullback(f, fu_cache, autodiff, u, (fu,), Constant(prob.p), strict=Val(false)) - return @closure (vJ, v, u, p) -> begin + di_extras = DI.prepare_pullback( + f, fu_cache, autodiff, u, (fu,), Constant(prob.p), strict = Val(false)) + return @closure (vJ, + v, + u, + p) -> begin DI.pullback!(f, fu_cache, (reshape(vJ, size(u)),), di_extras, autodiff, u, (reshape(v, size(fu_cache)),), Constant(p)) return end else - di_extras = DI.prepare_pullback(f, autodiff, u, (fu,), Constant(prob.p), strict=Val(false)) - return @closure (v, u, p) -> begin + di_extras = DI.prepare_pullback( + f, autodiff, u, (fu,), Constant(prob.p), strict = Val(false)) + return @closure (v, + u, + p) -> begin return only(DI.pullback( f, di_extras, autodiff, u, (reshape(v, size(fu)),), Constant(p))) end @@ -336,7 +344,8 @@ function prepare_jvp(::Val{false}, prob::AbstractNonlinearProblem, if autodiff === nothing && SciMLBase.has_jac(f) if SciMLBase.isinplace(f) jac_cache = similar(u, eltype(fu), length(fu), length(u)) - return @closure (Jv, v, u, p) -> begin + return @closure ( + Jv, v, u, p) -> begin f.jac(jac_cache, u, p) LinearAlgebra.mul!(vec(Jv), jac_cache, vec(v)) return @@ -352,15 +361,22 @@ function prepare_jvp(::Val{false}, prob::AbstractNonlinearProblem, @assert DI.check_inplace(autodiff) "Backend: $(autodiff) doesn't support in-place \ problems." fu_cache = copy(fu) - di_extras = DI.prepare_pushforward(f, fu_cache, autodiff, u, (u,), Constant(prob.p), strict=Val(false)) - return @closure (Jv, v, u, p) -> begin + di_extras = DI.prepare_pushforward( + f, fu_cache, autodiff, u, (u,), Constant(prob.p), strict = Val(false)) + return @closure (Jv, + v, + u, + p) -> begin DI.pushforward!(f, fu_cache, (reshape(Jv, size(fu_cache)),), di_extras, autodiff, u, (reshape(v, size(u)),), Constant(p)) return end else - di_extras = DI.prepare_pushforward(f, autodiff, u, (u,), Constant(prob.p), strict=Val(false)) - return @closure (v, u, p) -> begin + di_extras = DI.prepare_pushforward( + f, autodiff, u, (u,), Constant(prob.p), strict = Val(false)) + return @closure (v, + u, + p) -> begin return only(DI.pushforward( f, di_extras, autodiff, u, (reshape(v, size(u)),), Constant(p))) end @@ -375,7 +391,7 @@ function prepare_scalar_op(::Val{false}, prob::AbstractNonlinearProblem, @assert autodiff!==nothing "`autodiff` must be provided if `f` doesn't have \ analytic `vjp` or `jvp` or `jac`." - di_extras = DI.prepare_derivative(f, autodiff, u, Constant(prob.p), strict=Val(false)) + di_extras = DI.prepare_derivative(f, autodiff, u, Constant(prob.p), strict = Val(false)) return @closure (v, u, p) -> DI.derivative(f, di_extras, autodiff, u, Constant(p)) * v end diff --git a/lib/SciMLJacobianOperators/test/core_tests.jl b/lib/SciMLJacobianOperators/test/core_tests.jl index fbbd51844..7160ae0ec 100644 --- a/lib/SciMLJacobianOperators/test/core_tests.jl +++ b/lib/SciMLJacobianOperators/test/core_tests.jl @@ -2,9 +2,9 @@ using ADTypes, SciMLBase using Zygote, ForwardDiff, FiniteDiff, ReverseDiff, Tracker using SciMLJacobianOperators - + # Conditionally import Enzyme only if not on Julia prerelease - if isempty(VERSION.prerelease) + if isempty(VERSION.prerelease) using Enzyme end @@ -36,9 +36,11 @@ @testset "AutoDiff" begin @testset for jvp_autodiff in forward_ADs, vjp_autodiff in reverse_ADs + jac_op = JacobianOperator(prob, -1.0, 1.0; jvp_autodiff, vjp_autodiff) @testset for u in rand(4), v in rand(4) + sop = StatefulJacobianOperator(jac_op, u, prob.p) @test (sop * v)≈analytic_jvp(v, u, prob.p) atol=1e-5 @test (sop' * v)≈analytic_vjp(v, u, prob.p) atol=1e-5 @@ -60,6 +62,7 @@ jac_op = JacobianOperator(prob, -1.0, 1.0) @testset for u in rand(4), v in rand(4) + sop = StatefulJacobianOperator(jac_op, u, prob.p) @test (sop * v)≈analytic_jvp(v, u, prob.p) atol=1e-5 @test (sop' * v)≈analytic_vjp(v, u, prob.p) atol=1e-5 @@ -79,6 +82,7 @@ jac_op = JacobianOperator(prob, -1.0, 1.0) @testset for u in rand(4), v in rand(4) + sop = StatefulJacobianOperator(jac_op, u, prob.p) @test (sop * v)≈2 * u * v atol=1e-5 @test (sop' * v)≈2 * u * v atol=1e-5 @@ -96,9 +100,9 @@ end using ADTypes, SciMLBase using ForwardDiff, FiniteDiff, ReverseDiff using SciMLJacobianOperators - + # Conditionally import Enzyme only if not on Julia prerelease - if isempty(VERSION.prerelease) + if isempty(VERSION.prerelease) using Enzyme end @@ -133,9 +137,11 @@ end @testset "AutoDiff" begin @testset for jvp_autodiff in forward_ADs, vjp_autodiff in reverse_ADs + jac_op = JacobianOperator(prob, [2.0, 3.0], prob.u0; jvp_autodiff, vjp_autodiff) @testset for u in [rand(2) for _ in 1:4], v in [rand(2) for _ in 1:4] + sop = StatefulJacobianOperator(jac_op, u, prob.p) @test (sop * v)≈analytic_jvp(v, u, prob.p) atol=1e-5 @test (sop' * v)≈analytic_vjp(v, u, prob.p) atol=1e-5 @@ -157,6 +163,7 @@ end jac_op = JacobianOperator(prob, [2.0, 3.0], prob.u0) @testset for u in [rand(2) for _ in 1:4], v in [rand(2) for _ in 1:4] + sop = StatefulJacobianOperator(jac_op, u, prob.p) @test (sop * v)≈analytic_jvp(v, u, prob.p) atol=1e-5 @test (sop' * v)≈analytic_vjp(v, u, prob.p) atol=1e-5 @@ -177,6 +184,7 @@ end jac_op = JacobianOperator(prob, [2.0, 3.0], prob.u0) @testset for u in [rand(2) for _ in 1:4], v in [rand(2) for _ in 1:4] + sop = StatefulJacobianOperator(jac_op, u, prob.p) @test (sop * v)≈analytic_jvp(v, u, prob.p) atol=1e-5 @test (sop' * v)≈analytic_vjp(v, u, prob.p) atol=1e-5 @@ -194,9 +202,9 @@ end using ADTypes, SciMLBase using ForwardDiff, FiniteDiff, ReverseDiff, Zygote, Tracker using SciMLJacobianOperators - + # Conditionally import Enzyme only if not on Julia prerelease - if isempty(VERSION.prerelease) + if isempty(VERSION.prerelease) using Enzyme end @@ -229,9 +237,11 @@ end @testset "AutoDiff" begin @testset for jvp_autodiff in forward_ADs, vjp_autodiff in reverse_ADs + jac_op = JacobianOperator(prob, [2.0, 3.0], prob.u0; jvp_autodiff, vjp_autodiff) @testset for u in [rand(2) for _ in 1:4], v in [rand(2) for _ in 1:4] + sop = StatefulJacobianOperator(jac_op, u, prob.p) @test (sop * v)≈analytic_jvp(v, u, prob.p) atol=1e-5 @test (sop' * v)≈analytic_vjp(v, u, prob.p) atol=1e-5 @@ -253,6 +263,7 @@ end jac_op = JacobianOperator(prob, [2.0, 3.0], prob.u0) @testset for u in [rand(2) for _ in 1:4], v in [rand(2) for _ in 1:4] + sop = StatefulJacobianOperator(jac_op, u, prob.p) @test (sop * v)≈analytic_jvp(v, u, prob.p) atol=1e-5 @test (sop' * v)≈analytic_vjp(v, u, prob.p) atol=1e-5 @@ -273,6 +284,7 @@ end jac_op = JacobianOperator(prob, [2.0, 3.0], prob.u0) @testset for u in [rand(2) for _ in 1:4], v in [rand(2) for _ in 1:4] + sop = StatefulJacobianOperator(jac_op, u, prob.p) @test (sop * v)≈analytic_jvp(v, u, prob.p) atol=1e-5 @test (sop' * v)≈analytic_vjp(v, u, prob.p) atol=1e-5 diff --git a/lib/SimpleNonlinearSolve/ext/SimpleNonlinearSolveChainRulesCoreExt.jl b/lib/SimpleNonlinearSolve/ext/SimpleNonlinearSolveChainRulesCoreExt.jl index 50905279a..a9d86ea84 100644 --- a/lib/SimpleNonlinearSolve/ext/SimpleNonlinearSolveChainRulesCoreExt.jl +++ b/lib/SimpleNonlinearSolve/ext/SimpleNonlinearSolveChainRulesCoreExt.jl @@ -13,7 +13,8 @@ function ChainRulesCore.rrule( prob::Union{ImmutableNonlinearProblem, NonlinearLeastSquaresProblem}, sensealg, u0, u0_changed, p, p_changed, alg, args...; kwargs... ) - out, ∇internal = solve_adjoint( + out, + ∇internal = solve_adjoint( prob, sensealg, u0, p, ChainRulesOriginator(), alg, args...; kwargs... ) function ∇simplenonlinearsolve_solve_up(Δ) diff --git a/lib/SimpleNonlinearSolve/ext/SimpleNonlinearSolveReverseDiffExt.jl b/lib/SimpleNonlinearSolve/ext/SimpleNonlinearSolveReverseDiffExt.jl index d34d8bac7..dca55621a 100644 --- a/lib/SimpleNonlinearSolve/ext/SimpleNonlinearSolveReverseDiffExt.jl +++ b/lib/SimpleNonlinearSolve/ext/SimpleNonlinearSolveReverseDiffExt.jl @@ -26,7 +26,8 @@ for pType in (ImmutableNonlinearProblem, NonlinearLeastSquaresProblem) tp, p_changed, alg, args...; kwargs...) u0, p = ReverseDiff.value(tu0), ReverseDiff.value(tp) prob = remake(tprob; u0, p) - out, ∇internal = solve_adjoint( + out, + ∇internal = solve_adjoint( prob, sensealg, u0, p, ReverseDiffOriginator(), alg, args...; kwargs...) function ∇simplenonlinearsolve_solve_up(Δ...) diff --git a/lib/SimpleNonlinearSolve/ext/SimpleNonlinearSolveTrackerExt.jl b/lib/SimpleNonlinearSolve/ext/SimpleNonlinearSolveTrackerExt.jl index d56854316..551d7080a 100644 --- a/lib/SimpleNonlinearSolve/ext/SimpleNonlinearSolveTrackerExt.jl +++ b/lib/SimpleNonlinearSolve/ext/SimpleNonlinearSolveTrackerExt.jl @@ -25,7 +25,8 @@ for pType in (ImmutableNonlinearProblem, NonlinearLeastSquaresProblem) tp, p_changed, alg, args...; kwargs...) u0, p = Tracker.data(tu0), Tracker.data(tp) prob = remake(tprob; u0, p) - out, ∇internal = solve_adjoint( + out, + ∇internal = solve_adjoint( prob, sensealg, u0, p, TrackerOriginator(), alg, args...; kwargs...) function ∇simplenonlinearsolve_solve_up(Δ) diff --git a/lib/SimpleNonlinearSolve/src/SimpleNonlinearSolve.jl b/lib/SimpleNonlinearSolve/src/SimpleNonlinearSolve.jl index 495cadef2..1908a1131 100644 --- a/lib/SimpleNonlinearSolve/src/SimpleNonlinearSolve.jl +++ b/lib/SimpleNonlinearSolve/src/SimpleNonlinearSolve.jl @@ -153,6 +153,7 @@ function solve_adjoint_internal end @compile_workload begin @sync for prob in (prob_scalar, prob_iip, prob_oop), alg in algs + Threads.@spawn CommonSolve.solve(prob, alg; abstol = 1e-2, verbose = false) end end diff --git a/lib/SimpleNonlinearSolve/src/broyden.jl b/lib/SimpleNonlinearSolve/src/broyden.jl index 48a056b7d..c7394c433 100644 --- a/lib/SimpleNonlinearSolve/src/broyden.jl +++ b/lib/SimpleNonlinearSolve/src/broyden.jl @@ -55,7 +55,8 @@ function SciMLBase.__solve( @bb δJ⁻¹n = copy(x) @bb δJ⁻¹ = copy(J⁻¹) - abstol, reltol, tc_cache = NonlinearSolveBase.init_termination_cache( + abstol, reltol, + tc_cache = NonlinearSolveBase.init_termination_cache( prob, abstol, reltol, fx, x, termination_condition, Val(:simple) ) diff --git a/lib/SimpleNonlinearSolve/src/dfsane.jl b/lib/SimpleNonlinearSolve/src/dfsane.jl index fb371e3c3..0ba330440 100644 --- a/lib/SimpleNonlinearSolve/src/dfsane.jl +++ b/lib/SimpleNonlinearSolve/src/dfsane.jl @@ -81,7 +81,8 @@ function SciMLBase.__solve( τ_min = T(alg.τ_min) τ_max = T(alg.τ_max) - abstol, reltol, tc_cache = NonlinearSolveBase.init_termination_cache( + abstol, reltol, + tc_cache = NonlinearSolveBase.init_termination_cache( prob, abstol, reltol, fx, x, termination_condition, Val(:simple) ) diff --git a/lib/SimpleNonlinearSolve/src/halley.jl b/lib/SimpleNonlinearSolve/src/halley.jl index 8894821f3..7935e1ede 100644 --- a/lib/SimpleNonlinearSolve/src/halley.jl +++ b/lib/SimpleNonlinearSolve/src/halley.jl @@ -41,7 +41,8 @@ function SciMLBase.__solve( iszero(fx) && return SciMLBase.build_solution(prob, alg, x, fx; retcode = ReturnCode.Success) - abstol, reltol, tc_cache = NonlinearSolveBase.init_termination_cache( + abstol, reltol, + tc_cache = NonlinearSolveBase.init_termination_cache( prob, abstol, reltol, fx, x, termination_condition, Val(:simple) ) diff --git a/lib/SimpleNonlinearSolve/src/klement.jl b/lib/SimpleNonlinearSolve/src/klement.jl index a8fb7705c..041e1962f 100644 --- a/lib/SimpleNonlinearSolve/src/klement.jl +++ b/lib/SimpleNonlinearSolve/src/klement.jl @@ -15,7 +15,8 @@ function SciMLBase.__solve( T = eltype(x) fx = NLBUtils.evaluate_f(prob, x) - abstol, reltol, tc_cache = NonlinearSolveBase.init_termination_cache( + abstol, reltol, + tc_cache = NonlinearSolveBase.init_termination_cache( prob, abstol, reltol, fx, x, termination_condition, Val(:simple) ) diff --git a/lib/SimpleNonlinearSolve/src/lbroyden.jl b/lib/SimpleNonlinearSolve/src/lbroyden.jl index 5f0273c80..979b244f8 100644 --- a/lib/SimpleNonlinearSolve/src/lbroyden.jl +++ b/lib/SimpleNonlinearSolve/src/lbroyden.jl @@ -73,7 +73,8 @@ end U, Vᵀ = init_low_rank_jacobian(x, fx, x isa StaticArray ? alg.threshold : Val(η)) - abstol, reltol, tc_cache = NonlinearSolveBase.init_termination_cache( + abstol, reltol, + tc_cache = NonlinearSolveBase.init_termination_cache( prob, abstol, reltol, fx, x, termination_condition, Val(:simple) ) @@ -165,7 +166,8 @@ function internal_static_solve( init_α = inv(alg.alpha) end - converged, res = internal_unrolled_lbroyden_initial_iterations( + converged, + res = internal_unrolled_lbroyden_initial_iterations( prob, xo, fo, δx, abstol, U, Vᵀ, alg.threshold, ls_cache, init_α) converged && return SciMLBase.build_solution( diff --git a/lib/SimpleNonlinearSolve/src/raphson.jl b/lib/SimpleNonlinearSolve/src/raphson.jl index 510de2901..812c53d02 100644 --- a/lib/SimpleNonlinearSolve/src/raphson.jl +++ b/lib/SimpleNonlinearSolve/src/raphson.jl @@ -44,7 +44,8 @@ function SciMLBase.__solve( iszero(fx) && return SciMLBase.build_solution(prob, alg, x, fx; retcode = ReturnCode.Success) - abstol, reltol, tc_cache = NonlinearSolveBase.init_termination_cache( + abstol, reltol, + tc_cache = NonlinearSolveBase.init_termination_cache( prob, abstol, reltol, fx, x, termination_condition, Val(:simple) ) diff --git a/lib/SimpleNonlinearSolve/src/trust_region.jl b/lib/SimpleNonlinearSolve/src/trust_region.jl index 04d170d85..25d34bc17 100644 --- a/lib/SimpleNonlinearSolve/src/trust_region.jl +++ b/lib/SimpleNonlinearSolve/src/trust_region.jl @@ -102,7 +102,8 @@ function SciMLBase.__solve( jac_cache = Utils.prepare_jacobian(prob, autodiff, fx_cache, x) J = Utils.compute_jacobian!!(nothing, prob, autodiff, fx_cache, x, jac_cache) - abstol, reltol, tc_cache = NonlinearSolveBase.init_termination_cache( + abstol, reltol, + tc_cache = NonlinearSolveBase.init_termination_cache( prob, abstol, reltol, fx, x, termination_condition, Val(:simple) ) @@ -158,7 +159,8 @@ function SciMLBase.__solve( if r ≥ η₁ # Termination Checks - solved, retcode, fx_sol, x_sol = Utils.check_termination( + solved, retcode, fx_sol, + x_sol = Utils.check_termination( tc_cache, fx, x, xo, prob ) solved && return SciMLBase.build_solution(prob, alg, x_sol, fx_sol; retcode) diff --git a/lib/SimpleNonlinearSolve/src/utils.jl b/lib/SimpleNonlinearSolve/src/utils.jl index 88bac15db..2f8b49715 100644 --- a/lib/SimpleNonlinearSolve/src/utils.jl +++ b/lib/SimpleNonlinearSolve/src/utils.jl @@ -87,10 +87,12 @@ end function prepare_jacobian(prob, autodiff, fx, x) SciMLBase.has_jac(prob.f) && return AnalyticJacobian() if SciMLBase.isinplace(prob.f) - return DIExtras(DI.prepare_jacobian(prob.f, fx, autodiff, x, Constant(prob.p), strict = Val(false))) + return DIExtras(DI.prepare_jacobian( + prob.f, fx, autodiff, x, Constant(prob.p), strict = Val(false))) else x isa SArray && return DINoPreparation() - return DIExtras(DI.prepare_jacobian(prob.f, autodiff, x, Constant(prob.p), strict = Val(false))) + return DIExtras(DI.prepare_jacobian( + prob.f, autodiff, x, Constant(prob.p), strict = Val(false))) end end @@ -164,7 +166,8 @@ function compute_hvvp(prob, autodiff, _, x::Number, dir::Number) end function compute_hvvp(prob, autodiff, fx, x, dir) jvp_fn = if SciMLBase.isinplace(prob) - @closure (u, p) -> begin + @closure (u, + p) -> begin du = NLBUtils.safe_similar(fx, promote_type(eltype(fx), eltype(u))) return only(DI.pushforward(prob.f, du, autodiff, u, (dir,), Constant(p))) end diff --git a/lib/SimpleNonlinearSolve/test/core/forward_diff_tests.jl b/lib/SimpleNonlinearSolve/test/core/forward_diff_tests.jl index 68a5d6ec0..be0adbf8a 100644 --- a/lib/SimpleNonlinearSolve/test/core/forward_diff_tests.jl +++ b/lib/SimpleNonlinearSolve/test/core/forward_diff_tests.jl @@ -33,6 +33,7 @@ @testset "Scalar AD" begin for p in 1.0:0.1:100.0, u0 in us + sol = solve(NonlinearProblem{false}(test_f, u0, p), alg) if SciMLBase.successful_retcode(sol) gs = abs.(ForwardDiff.derivative(p) do pᵢ diff --git a/lib/SimpleNonlinearSolve/test/core/rootfind_tests.jl b/lib/SimpleNonlinearSolve/test/core/rootfind_tests.jl index a0f29c819..def3b9ab1 100644 --- a/lib/SimpleNonlinearSolve/test/core/rootfind_tests.jl +++ b/lib/SimpleNonlinearSolve/test/core/rootfind_tests.jl @@ -1,7 +1,7 @@ @testsnippet RootfindTestSnippet begin using StaticArrays, Random, LinearAlgebra, ForwardDiff, NonlinearSolveBase, SciMLBase using ADTypes, PolyesterForwardDiff, ReverseDiff - + # Conditionally import Enzyme only if not on Julia prerelease if isempty(VERSION.prerelease) using Enzyme @@ -62,7 +62,7 @@ end if isempty(VERSION.prerelease) push!(autodiff_backends, AutoEnzyme()) end - + @testset for autodiff in autodiff_backends @testset "[OOP] u0: $(typeof(u0))" for u0 in ( [1.0, 1.0], @SVector[1.0, 1.0], 1.0) @@ -80,7 +80,8 @@ end @test maximum(abs, quadratic_f(sol.u, 2.0)) < 1e-9 end - @testset "Termination Condition: $(nameof(typeof(termination_condition))) u0: $(nameof(typeof(u0)))" for termination_condition in TERMINATION_CONDITIONS, + @testset "Termination Condition: $(nameof(typeof(termination_condition))) u0: $(nameof(typeof(u0)))" for termination_condition in + TERMINATION_CONDITIONS, u0 in (1.0, [1.0, 1.0], @SVector[1.0, 1.0]) probN = NonlinearProblem(quadratic_f, u0, 2.0) @@ -113,7 +114,8 @@ end end end - @testset "Termination Condition: $(nameof(typeof(termination_condition))) u0: $(nameof(typeof(u0)))" for termination_condition in TERMINATION_CONDITIONS, + @testset "Termination Condition: $(nameof(typeof(termination_condition))) u0: $(nameof(typeof(u0)))" for termination_condition in + TERMINATION_CONDITIONS, u0 in (1.0, [1.0, 1.0], @SVector[1.0, 1.0]) probN = NonlinearProblem(quadratic_f, u0, 2.0) @@ -145,7 +147,8 @@ end @test maximum(abs, quadratic_f(sol.u, 2.0)) < 1e-9 end - @testset "Termination Condition: $(nameof(typeof(termination_condition))) u0: $(nameof(typeof(u0)))" for termination_condition in TERMINATION_CONDITIONS, + @testset "Termination Condition: $(nameof(typeof(termination_condition))) u0: $(nameof(typeof(u0)))" for termination_condition in + TERMINATION_CONDITIONS, u0 in (1.0, [1.0, 1.0], @SVector[1.0, 1.0]) probN = NonlinearProblem(quadratic_f, u0, 2.0) @@ -155,8 +158,8 @@ end end @testitem "Newton Fails" setup=[RootfindTestSnippet] tags=[:core] begin - u0 = [-10.0, -1.0, 1.0, 2.0, 3.0, 4.0, 10.0] - p = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + u0=[-10.0, -1.0, 1.0, 2.0, 3.0, 4.0, 10.0] + p=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] @testset "$(nameof(typeof(alg)))" for alg in ( SimpleDFSane(), @@ -171,7 +174,7 @@ end end @testitem "Kwargs Propagation" setup=[RootfindTestSnippet] tags=[:core] begin - prob = NonlinearProblem(quadratic_f, ones(4), 2.0; maxiters = 2) - sol = solve(prob, SimpleNewtonRaphson()) + prob=NonlinearProblem(quadratic_f, ones(4), 2.0; maxiters = 2) + sol=solve(prob, SimpleNewtonRaphson()) @test sol.retcode === ReturnCode.MaxIters end diff --git a/src/NonlinearSolve.jl b/src/NonlinearSolve.jl index c5c031b7d..b765d9573 100644 --- a/src/NonlinearSolve.jl +++ b/src/NonlinearSolve.jl @@ -67,7 +67,8 @@ include("forward_diff.jl") nonlinear_functions = ( (NonlinearFunction{false, NoSpecialize}((u, p) -> (u .^ 2 .- p)[1:1]), [0.1, 0.0]), ( - NonlinearFunction{false, NoSpecialize}((u, p) -> vcat(u .* u .- p, u .* u .- p)), + NonlinearFunction{false, NoSpecialize}(( + u, p) -> vcat(u .* u .- p, u .* u .- p)), [0.1, 0.1] ), ( diff --git a/src/forward_diff.jl b/src/forward_diff.jl index 76fdf6f52..8030d90b4 100644 --- a/src/forward_diff.jl +++ b/src/forward_diff.jl @@ -33,7 +33,8 @@ for algType in EXTENSION_SOLVER_TYPES @eval function SciMLBase.__solve( prob::DualAbstractNonlinearProblem, alg::$(algType), args...; kwargs... ) - sol, partials = NonlinearSolveBase.nonlinearsolve_forwarddiff_solve( + sol, + partials = NonlinearSolveBase.nonlinearsolve_forwarddiff_solve( prob, alg, args...; kwargs... ) dual_soln = NonlinearSolveBase.nonlinearsolve_dual_solution(sol.u, partials, prob.p) diff --git a/test/23_test_problems_tests.jl b/test/23_test_problems_tests.jl index 83a7bef38..684dc8967 100644 --- a/test/23_test_problems_tests.jl +++ b/test/23_test_problems_tests.jl @@ -10,7 +10,8 @@ function test_on_library( x = dict["start"] res = similar(x) nlprob = NonlinearProblem(problem, copy(x)) - @testset "$idx: $(dict["title"]) | alg #$(alg_id)" for (alg_id, alg) in enumerate(alg_ops) + @testset "$idx: $(dict["title"]) | alg #$(alg_id)" for (alg_id, alg) in + enumerate(alg_ops) try sol = solve(nlprob, alg; maxiters = 10000) problem(res, sol.u, nothing) @@ -39,38 +40,38 @@ export test_on_library, problems, dicts end @testitem "23 Test Problems: PolyAlgorithms" setup=[RobustnessTesting] tags=[:core] begin - alg_ops = (RobustMultiNewton(), FastShortcutNonlinearPolyalg()) + alg_ops=(RobustMultiNewton(), FastShortcutNonlinearPolyalg()) - broken_tests = Dict(alg => Int[] for alg in alg_ops) - broken_tests[alg_ops[1]] = [] - broken_tests[alg_ops[2]] = [] + broken_tests=Dict(alg=>Int[] for alg in alg_ops) + broken_tests[alg_ops[1]]=[] + broken_tests[alg_ops[2]]=[] test_on_library(problems, dicts, alg_ops, broken_tests) end @testitem "23 Test Problems: NewtonRaphson" setup=[RobustnessTesting] tags=[:core] begin - alg_ops = ( + alg_ops=( NewtonRaphson(), SimpleNewtonRaphson() ) - broken_tests = Dict(alg => Int[] for alg in alg_ops) - broken_tests[alg_ops[1]] = [1] + broken_tests=Dict(alg=>Int[] for alg in alg_ops) + broken_tests[alg_ops[1]]=[1] test_on_library(problems, dicts, alg_ops, broken_tests) end @testitem "23 Test Problems: Halley" setup=[RobustnessTesting] tags=[:core] begin - alg_ops = (SimpleHalley(; autodiff = AutoForwardDiff()),) + alg_ops=(SimpleHalley(; autodiff = AutoForwardDiff()),) - broken_tests = Dict(alg => Int[] for alg in alg_ops) - broken_tests[alg_ops[1]] = [1, 5, 15, 16, 18] + broken_tests=Dict(alg=>Int[] for alg in alg_ops) + broken_tests[alg_ops[1]]=[1, 5, 15, 16, 18] test_on_library(problems, dicts, alg_ops, broken_tests) end @testitem "23 Test Problems: TrustRegion" setup=[RobustnessTesting] tags=[:core] begin - alg_ops = ( + alg_ops=( TrustRegion(; radius_update_scheme = RadiusUpdateSchemes.Simple), TrustRegion(; radius_update_scheme = RadiusUpdateSchemes.Fan), TrustRegion(; radius_update_scheme = RadiusUpdateSchemes.Hei), @@ -81,15 +82,15 @@ end SimpleTrustRegion(; nlsolve_update_rule = Val(true)) ) - broken_tests = Dict(alg => Int[] for alg in alg_ops) - broken_tests[alg_ops[1]] = [11, 21] - broken_tests[alg_ops[2]] = [11, 21] - broken_tests[alg_ops[3]] = [11, 21] - broken_tests[alg_ops[4]] = [8, 11, 21] - broken_tests[alg_ops[5]] = [21] - broken_tests[alg_ops[6]] = [11, 21] - broken_tests[alg_ops[7]] = [3, 15, 16, 21] - broken_tests[alg_ops[8]] = [15, 16] + broken_tests=Dict(alg=>Int[] for alg in alg_ops) + broken_tests[alg_ops[1]]=[11, 21] + broken_tests[alg_ops[2]]=[11, 21] + broken_tests[alg_ops[3]]=[11, 21] + broken_tests[alg_ops[4]]=[8, 11, 21] + broken_tests[alg_ops[5]]=[21] + broken_tests[alg_ops[6]]=[11, 21] + broken_tests[alg_ops[7]]=[3, 15, 16, 21] + broken_tests[alg_ops[8]]=[15, 16] test_on_library(problems, dicts, alg_ops, broken_tests) end @@ -97,43 +98,43 @@ end @testitem "23 Test Problems: LevenbergMarquardt" setup=[RobustnessTesting] tags=[:core] begin using LinearSolve - alg_ops = ( + alg_ops=( LevenbergMarquardt(), LevenbergMarquardt(; α_geodesic = 0.1), LevenbergMarquardt(; linsolve = CholeskyFactorization()) ) - broken_tests = Dict(alg => Int[] for alg in alg_ops) - broken_tests[alg_ops[1]] = [11, 21] - broken_tests[alg_ops[2]] = [11, 21] - broken_tests[alg_ops[3]] = [11, 21] + broken_tests=Dict(alg=>Int[] for alg in alg_ops) + broken_tests[alg_ops[1]]=[11, 21] + broken_tests[alg_ops[2]]=[11, 21] + broken_tests[alg_ops[3]]=[11, 21] test_on_library(problems, dicts, alg_ops, broken_tests) end @testitem "23 Test Problems: DFSane" setup=[RobustnessTesting] tags=[:core] begin - alg_ops = ( + alg_ops=( DFSane(), SimpleDFSane() ) - broken_tests = Dict(alg => Int[] for alg in alg_ops) - broken_tests[alg_ops[1]] = [1, 2, 3, 5, 21] + broken_tests=Dict(alg=>Int[] for alg in alg_ops) + broken_tests[alg_ops[1]]=[1, 2, 3, 5, 21] if Sys.isapple() - if VERSION ≥ v"1.11-" - broken_tests[alg_ops[2]] = [1, 2, 3, 5, 6, 11, 21] + if VERSION≥v"1.11-" + broken_tests[alg_ops[2]]=[1, 2, 3, 5, 6, 11, 21] else - broken_tests[alg_ops[2]] = [1, 2, 3, 5, 6, 21] + broken_tests[alg_ops[2]]=[1, 2, 3, 5, 6, 21] end else - broken_tests[alg_ops[2]] = [1, 2, 3, 5, 6, 11, 21] + broken_tests[alg_ops[2]]=[1, 2, 3, 5, 6, 11, 21] end test_on_library(problems, dicts, alg_ops, broken_tests) end @testitem "23 Test Problems: Broyden" setup=[RobustnessTesting] tags=[:core] retries=3 begin - alg_ops = ( + alg_ops=( Broyden(), Broyden(; init_jacobian = Val(:true_jacobian)), Broyden(; update_rule = Val(:bad_broyden)), @@ -141,37 +142,37 @@ end SimpleBroyden() ) - broken_tests = Dict(alg => Int[] for alg in alg_ops) - broken_tests[alg_ops[2]] = [1, 5, 8, 11, 18] - broken_tests[alg_ops[4]] = [5, 6, 8, 11] + broken_tests=Dict(alg=>Int[] for alg in alg_ops) + broken_tests[alg_ops[2]]=[1, 5, 8, 11, 18] + broken_tests[alg_ops[4]]=[5, 6, 8, 11] if Sys.isapple() - broken_tests[alg_ops[1]] = [1, 5, 11] - broken_tests[alg_ops[3]] = [1, 5, 6, 9, 11] - if VERSION ≥ v"1.11-" - broken_tests[alg_ops[5]] = [1, 4, 5, 11] + broken_tests[alg_ops[1]]=[1, 5, 11] + broken_tests[alg_ops[3]]=[1, 5, 6, 9, 11] + if VERSION≥v"1.11-" + broken_tests[alg_ops[5]]=[1, 4, 5, 11] else - broken_tests[alg_ops[5]] = [1, 5, 11] + broken_tests[alg_ops[5]]=[1, 5, 11] end else - broken_tests[alg_ops[1]] = [1, 5, 11, 15] - broken_tests[alg_ops[3]] = [1, 5, 6, 9, 11, 16] - broken_tests[alg_ops[5]] = [1, 5, 11] + broken_tests[alg_ops[1]]=[1, 5, 11, 15] + broken_tests[alg_ops[3]]=[1, 5, 6, 9, 11, 16] + broken_tests[alg_ops[5]]=[1, 5, 11] end test_on_library(problems, dicts, alg_ops, broken_tests, Sys.isapple() ? 1e-3 : 1e-4) end @testitem "23 Test Problems: Klement" setup=[RobustnessTesting] tags=[:core] begin - alg_ops = ( + alg_ops=( Klement(), Klement(; init_jacobian = Val(:true_jacobian_diagonal)), SimpleKlement() ) - broken_tests = Dict(alg => Int[] for alg in alg_ops) - broken_tests[alg_ops[1]] = [1, 2, 4, 5, 11, 18, 22] - broken_tests[alg_ops[2]] = [2, 4, 5, 7, 18, 22] - broken_tests[alg_ops[3]] = [1, 2, 4, 5, 11, 22] + broken_tests=Dict(alg=>Int[] for alg in alg_ops) + broken_tests[alg_ops[1]]=[1, 2, 4, 5, 11, 18, 22] + broken_tests[alg_ops[2]]=[2, 4, 5, 7, 18, 22] + broken_tests[alg_ops[3]]=[1, 2, 4, 5, 11, 22] test_on_library(problems, dicts, alg_ops, broken_tests) end @@ -179,10 +180,10 @@ end @testitem "23 Test Problems: PseudoTransient" setup=[RobustnessTesting] tags=[:core] begin # PT relies on the root being a stable equilibrium for convergence, so it won't work on # most problems - alg_ops = (PseudoTransient(),) + alg_ops=(PseudoTransient(),) - broken_tests = Dict(alg => Int[] for alg in alg_ops) - broken_tests[alg_ops[1]] = [1, 2, 3, 11, 15, 16] + broken_tests=Dict(alg=>Int[] for alg in alg_ops) + broken_tests[alg_ops[1]]=[1, 2, 3, 11, 15, 16] test_on_library(problems, dicts, alg_ops, broken_tests) end diff --git a/test/core_tests.jl b/test/core_tests.jl index a62fe87a6..fc0bb2026 100644 --- a/test/core_tests.jl +++ b/test/core_tests.jl @@ -429,8 +429,8 @@ end end @testitem "NonlinearLeastSquares ReturnCode" tags=[:core] begin - f(u,p) = [1.0] - nlf = NonlinearFunction(f; resid_prototype=zeros(1)) + f(u, p) = [1.0] + nlf = NonlinearFunction(f; resid_prototype = zeros(1)) prob = NonlinearLeastSquaresProblem(nlf, [1.0]) sol = solve(prob) @test SciMLBase.successful_retcode(sol) @@ -442,4 +442,4 @@ end prob = NonlinearProblem(f, [1.0, 1.0]) sol = solve(prob) @test SciMLBase.successful_retcode(sol) -end \ No newline at end of file +end diff --git a/test/cuda_tests.jl b/test/cuda_tests.jl index 26e27fda7..af1a21590 100644 --- a/test/cuda_tests.jl +++ b/test/cuda_tests.jl @@ -67,7 +67,8 @@ end end @testset "Mode: $(tcond)" for tcond in NORM_TERMINATION_CONDITIONS - for nfn in (Base.Fix1(maximum, abs), Base.Fix2(norm, 2), Base.Fix2(norm, Inf)) + for nfn in + (Base.Fix1(maximum, abs), Base.Fix2(norm, 2), Base.Fix2(norm, Inf)) @test_nowarn NonlinearSolveBase.check_convergence( tcond(nfn), du, u, uprev, 1e-3, 1e-3) end diff --git a/test/forward_ad_tests.jl b/test/forward_ad_tests.jl index 163349267..f3dd8746e 100644 --- a/test/forward_ad_tests.jl +++ b/test/forward_ad_tests.jl @@ -104,7 +104,6 @@ end for u0 in us, p in ([2.0, 1.0], [2.0 1.0; 3.0 4.0]), mode in (:iip, :oop, :iip_cache, :oop_cache) - compatible(u0, p) || continue compatible(u0, alg) || continue compatible(u0, Val(mode)) || continue diff --git a/test/wrappers/least_squares_tests.jl b/test/wrappers/least_squares_tests.jl index effef126e..99adc847f 100644 --- a/test/wrappers/least_squares_tests.jl +++ b/test/wrappers/least_squares_tests.jl @@ -7,9 +7,9 @@ end @testitem "LeastSquaresOptim.jl" setup=[WrapperNLLSSetup] tags=[:wrappers] begin import LeastSquaresOptim - nlls_problems = [prob_oop, prob_iip] + nlls_problems=[prob_oop, prob_iip] - solvers = [] + solvers=[] for alg in (:lm, :dogleg), autodiff in (nothing, AutoForwardDiff(), AutoFiniteDiff(), :central, :forward) @@ -17,7 +17,8 @@ end end for prob in nlls_problems, solver in solvers - sol = solve(prob, solver; maxiters = 10000, abstol = 1e-8) + + sol=solve(prob, solver; maxiters = 10000, abstol = 1e-8) @test SciMLBase.successful_retcode(sol) @test maximum(abs, sol.resid) < 1e-6 end @@ -28,14 +29,14 @@ end using ForwardDiff function jac!(J, θ, p) - resid = zeros(length(p)) - ForwardDiff.jacobian!(J, (resid, θ) -> loss_function(resid, θ, p), resid, θ) + resid=zeros(length(p)) + ForwardDiff.jacobian!(J, (resid, θ)->loss_function(resid, θ, p), resid, θ) return J end - jac(θ, p) = ForwardDiff.jacobian(θ -> loss_function(θ, p), θ) + jac(θ, p)=ForwardDiff.jacobian(θ->loss_function(θ, p), θ) - probs = [ + probs=[ NonlinearLeastSquaresProblem( NonlinearFunction{true}( loss_function; resid_prototype = zero(y_target), jac = jac! @@ -53,11 +54,12 @@ end ) ] - solvers = Any[FastLevenbergMarquardtJL(linsolve) for linsolve in (:cholesky, :qr)] - Sys.isapple() || push!(solvers, CMINPACK()) + solvers=Any[FastLevenbergMarquardtJL(linsolve) for linsolve in (:cholesky, :qr)] + Sys.isapple()||push!(solvers, CMINPACK()) for solver in solvers, prob in probs - sol = solve(prob, solver; maxiters = 10000, abstol = 1e-8) + + sol=solve(prob, solver; maxiters = 10000, abstol = 1e-8) @test maximum(abs, sol.resid) < 1e-6 end end @@ -65,7 +67,7 @@ end @testitem "FastLevenbergMarquardt.jl + CMINPACK: Jacobian Not Provided" setup=[WrapperNLLSSetup] tags=[:wrappers] begin import FastLevenbergMarquardt, MINPACK - probs = [ + probs=[ NonlinearLeastSquaresProblem( NonlinearFunction{true}(loss_function; resid_prototype = zero(y_target)), θ_init, x @@ -77,7 +79,7 @@ end NonlinearLeastSquaresProblem(NonlinearFunction{false}(loss_function), θ_init, x) ] - solvers = [] + solvers=[] for linsolve in (:cholesky, :qr), autodiff in (nothing, AutoForwardDiff(), AutoFiniteDiff()) @@ -90,7 +92,8 @@ end end for solver in solvers, prob in probs - sol = solve(prob, solver; maxiters = 10000, abstol = 1e-8) + + sol=solve(prob, solver; maxiters = 10000, abstol = 1e-8) @test maximum(abs, sol.resid) < 1e-6 end end @@ -98,18 +101,18 @@ end @testitem "FastLevenbergMarquardt.jl + StaticArrays" setup=[WrapperNLLSSetup] tags=[:wrappers] begin using StaticArrays, FastLevenbergMarquardt - x_sa = SA[-1.0, -0.5, 0.0, 0.5, 1.0] + x_sa=SA[-1.0, -0.5, 0.0, 0.5, 1.0] - const y_target_sa = true_function(x_sa, θ_true) + const y_target_sa=true_function(x_sa, θ_true) function loss_function_sa(θ, p) - ŷ = true_function(p, θ) + ŷ=true_function(p, θ) return ŷ .- y_target_sa end - θ_init_sa = SVector{4}(θ_init) - prob_sa = NonlinearLeastSquaresProblem{false}(loss_function_sa, θ_init_sa, x) + θ_init_sa=SVector{4}(θ_init) + prob_sa=NonlinearLeastSquaresProblem{false}(loss_function_sa, θ_init_sa, x) - sol = solve(prob_sa, FastLevenbergMarquardtJL()) + sol=solve(prob_sa, FastLevenbergMarquardtJL()) @test maximum(abs, sol.resid) < 1e-6 end diff --git a/test/wrappers/rootfind_tests.jl b/test/wrappers/rootfind_tests.jl index 654194e26..3ecedc0f0 100644 --- a/test/wrappers/rootfind_tests.jl +++ b/test/wrappers/rootfind_tests.jl @@ -2,11 +2,11 @@ import NLSolvers, NLsolve, SIAMFANLEquations, MINPACK, PETSc function f_iip(du, u, p, t) - du[1] = 2 - 2u[1] - du[2] = u[1] - 4u[2] + du[1]=2-2u[1] + du[2]=u[1]-4u[2] end - u0 = zeros(2) - prob_iip = SteadyStateProblem(f_iip, u0) + u0=zeros(2) + prob_iip=SteadyStateProblem(f_iip, u0) @testset "$(nameof(typeof(alg)))" for alg in [ NLSolversJL(NLSolvers.LineSearch(NLSolvers.Newton(), NLSolvers.Backtracking())), @@ -23,9 +23,9 @@ @test maximum(abs, sol.resid) < 1e-6 end - f_oop(u, p, t) = [2 - 2u[1], u[1] - 4u[2]] - u0 = zeros(2) - prob_oop = SteadyStateProblem(f_oop, u0) + f_oop(u, p, t)=[2-2u[1], u[1]-4u[2]] + u0=zeros(2) + prob_oop=SteadyStateProblem(f_oop, u0) @testset "$(nameof(typeof(alg)))" for alg in [ NLSolversJL(NLSolvers.LineSearch(NLSolvers.Newton(), NLSolvers.Backtracking())), @@ -48,11 +48,11 @@ end import NLSolvers, NLsolve, SIAMFANLEquations, MINPACK, PETSc function f_iip(du, u, p) - du[1] = 2 - 2u[1] - du[2] = u[1] - 4u[2] + du[1]=2-2u[1] + du[2]=u[1]-4u[2] end - u0 = zeros(2) - prob_iip = NonlinearProblem{true}(f_iip, u0) + u0=zeros(2) + prob_iip=NonlinearProblem{true}(f_iip, u0) @testset "$(nameof(typeof(alg)))" for alg in [ NLSolversJL(NLSolvers.LineSearch(NLSolvers.Newton(), NLSolvers.Backtracking())), @@ -70,9 +70,9 @@ end @test maximum(abs, sol.resid) < 1e-6 end - f_oop(u, p) = [2 - 2u[1], u[1] - 4u[2]] - u0 = zeros(2) - prob_oop = NonlinearProblem{false}(f_oop, u0) + f_oop(u, p)=[2-2u[1], u[1]-4u[2]] + u0=zeros(2) + prob_oop=NonlinearProblem{false}(f_oop, u0) @testset "$(nameof(typeof(alg)))" for alg in [ NLSolversJL(NLSolvers.LineSearch(NLSolvers.Newton(), NLSolvers.Backtracking())), NLsolveJL(), @@ -89,8 +89,8 @@ end @test maximum(abs, sol.resid) < 1e-6 end - f_tol(u, p) = u^2 - 2 - prob_tol = NonlinearProblem(f_tol, 1.0) + f_tol(u, p)=u^2-2 + prob_tol=NonlinearProblem(f_tol, 1.0) for tol in [1e-1, 1e-3, 1e-6, 1e-10, 1e-15], alg in [ NLSolversJL(NLSolvers.LineSearch(NLSolvers.Newton(), NLSolvers.Backtracking())), @@ -103,62 +103,62 @@ end SIAMFANLEquationsJL(; method = :secant) ] - alg isa CMINPACK && Sys.isapple() && continue - alg isa PETScSNES && Sys.iswindows() && continue - sol = solve(prob_tol, alg, abstol = tol) + alg isa CMINPACK&&Sys.isapple()&&continue + alg isa PETScSNES&&Sys.iswindows()&&continue + sol=solve(prob_tol, alg, abstol = tol) @test abs(sol.u[1] - sqrt(2)) < tol end - f_jfnk(u, p) = u^2 - 2 - prob_jfnk = NonlinearProblem(f_jfnk, 1.0) + f_jfnk(u, p)=u^2-2 + prob_jfnk=NonlinearProblem(f_jfnk, 1.0) for tol in [1e-1, 1e-3, 1e-6, 1e-10, 1e-11] - sol = solve(prob_jfnk, SIAMFANLEquationsJL(linsolve = :gmres), abstol = tol) + sol=solve(prob_jfnk, SIAMFANLEquationsJL(linsolve = :gmres), abstol = tol) @test abs(sol.u[1] - sqrt(2)) < tol end # Test the finite differencing technique function f!(fvec, x, p) - fvec[1] = (x[1] + 3) * (x[2]^3 - 7) + 18 - fvec[2] = sin(x[2] * exp(x[1]) - 1) + fvec[1]=(x[1]+3)*(x[2]^3-7)+18 + fvec[2]=sin(x[2]*exp(x[1])-1) end - prob = NonlinearProblem{true}(f!, [0.1; 1.2]) - sol = solve(prob, NLsolveJL(autodiff = :central)) + prob=NonlinearProblem{true}(f!, [0.1; 1.2]) + sol=solve(prob, NLsolveJL(autodiff = :central)) @test maximum(abs, sol.resid) < 1e-6 - sol = solve(prob, SIAMFANLEquationsJL()) + sol=solve(prob, SIAMFANLEquationsJL()) @test maximum(abs, sol.resid) < 1e-6 # Test the autodiff technique - sol = solve(prob, NLsolveJL(autodiff = :forward)) + sol=solve(prob, NLsolveJL(autodiff = :forward)) @test maximum(abs, sol.resid) < 1e-6 # Custom Jacobian - f_custom_jac!(F, u, p) = (F[1:152] = u .^ 2 .- p) - j_custom_jac!(J, u, p) = (J[1:152, 1:152] = diagm(2 .* u)) + f_custom_jac!(F, u, p)=(F[1:152]=u .^ 2 .- p) + j_custom_jac!(J, u, p)=(J[1:152, 1:152]=diagm(2 .* u)) - init = ones(152) - A = ones(152) - A[6] = 0.8 + init=ones(152) + A=ones(152) + A[6]=0.8 - f = NonlinearFunction(f_custom_jac!; jac = j_custom_jac!) - p = A + f=NonlinearFunction(f_custom_jac!; jac = j_custom_jac!) + p=A - ProbN = NonlinearProblem(f, init, p) + ProbN=NonlinearProblem(f, init, p) - sol = solve(ProbN, NLsolveJL(); abstol = 1e-8) + sol=solve(ProbN, NLsolveJL(); abstol = 1e-8) @test maximum(abs, sol.resid) < 1e-6 - sol = solve( + sol=solve( ProbN, NLSolversJL(NLSolvers.LineSearch(NLSolvers.Newton(), NLSolvers.Backtracking())); abstol = 1e-8 ) @test maximum(abs, sol.resid) < 1e-6 - sol = solve(ProbN, SIAMFANLEquationsJL(; method = :newton); abstol = 1e-8) + sol=solve(ProbN, SIAMFANLEquationsJL(; method = :newton); abstol = 1e-8) @test maximum(abs, sol.resid) < 1e-6 - sol = solve(ProbN, SIAMFANLEquationsJL(; method = :pseudotransient); abstol = 1e-8) + sol=solve(ProbN, SIAMFANLEquationsJL(; method = :pseudotransient); abstol = 1e-8) @test maximum(abs, sol.resid) < 1e-6 if !Sys.iswindows() - sol = solve(ProbN, PETScSNES(); abstol = 1e-8) + sol=solve(ProbN, PETScSNES(); abstol = 1e-8) @test maximum(abs, sol.resid) < 1e-6 end end @@ -166,22 +166,22 @@ end @testitem "PETSc SNES Floating Points" tags=[:wrappers] retries=3 skip=:(Sys.iswindows()) begin import PETSc - f(u, p) = u .* u .- 2 + f(u, p)=u .* u .- 2 - u0 = [1.0, 1.0] - probN = NonlinearProblem{false}(f, u0) + u0=[1.0, 1.0] + probN=NonlinearProblem{false}(f, u0) - sol = solve(probN, PETScSNES(); abstol = 1e-8) + sol=solve(probN, PETScSNES(); abstol = 1e-8) @test maximum(abs, sol.resid) < 1e-6 - u0 = [1.0f0, 1.0f0] - probN = NonlinearProblem{false}(f, u0) + u0=[1.0f0, 1.0f0] + probN=NonlinearProblem{false}(f, u0) - sol = solve(probN, PETScSNES(); abstol = 1e-5) + sol=solve(probN, PETScSNES(); abstol = 1e-5) @test maximum(abs, sol.resid) < 1e-4 - u0 = Float16[1.0, 1.0] - probN = NonlinearProblem{false}(f, u0) + u0=Float16[1.0, 1.0] + probN=NonlinearProblem{false}(f, u0) @test_throws AssertionError solve(probN, PETScSNES(); abstol = 1e-8) end