Skip to content

Apply JuliaFormatter to fix code formatting #651

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/make.jl
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ makedocs(;
"https://www.sciencedirect.com/science/article/abs/pii/S0045782523007156",
"https://github.com/devernay/cminpack/blob/d1f5f5a273862ca1bbcf58394e4ac060d9e22c76/hybrd.c",
"https://github.com/devernay/cminpack/blob/d1f5f5a273862ca1bbcf58394e4ac060d9e22c76/hybrj.c",
"https://github.com/devernay/cminpack/blob/d1f5f5a273862ca1bbcf58394e4ac060d9e22c76/lmder.c",
"https://github.com/devernay/cminpack/blob/d1f5f5a273862ca1bbcf58394e4ac060d9e22c76/lmder.c"
],
checkdocs = :exports,
warnonly = [:missing_docs],
Expand Down
2 changes: 1 addition & 1 deletion docs/src/api/scipy.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,4 @@ Note that using this package requires Python and SciPy to be available via Pytho
NonlinearSolveSciPy.SciPyLeastSquares
NonlinearSolveSciPy.SciPyRoot
NonlinearSolveSciPy.SciPyRootScalar
```
```
3 changes: 2 additions & 1 deletion docs/src/basics/diagnostics_api.md
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,8 @@ cache.timer
Let's try for some other solver:

```@example diagnostics_example
cache = NLS.init(prob, NLS.DFSane(); show_trace = Val(true), trace_level = NLS.TraceMinimal(50));
cache = NLS.init(
prob, NLS.DFSane(); show_trace = Val(true), trace_level = NLS.TraceMinimal(50));
NLS.solve!(cache)
cache.timer
```
Expand Down
44 changes: 29 additions & 15 deletions docs/src/tutorials/large_systems.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,14 +78,19 @@ function brusselator_2d_loop(du, u, p)
@inbounds for I in CartesianIndices((N, N))
i, j = Tuple(I)
x, y = xyd_brusselator[I[1]], xyd_brusselator[I[2]]
ip1, im1, jp1, jm1 = limit(i + 1, N), limit(i - 1, N), limit(j + 1, N),
ip1, im1, jp1,
jm1 = limit(i + 1, N), limit(i - 1, N), limit(j + 1, N),
limit(j - 1, N)
du[i, j, 1] = alpha * (u[im1, j, 1] + u[ip1, j, 1] + u[i, jp1, 1] + u[i, jm1, 1] -
4u[i, j, 1]) +
B +
u[i, j, 1]^2 * u[i, j, 2] - (A + 1) * u[i, j, 1] + brusselator_f(x, y)
du[i, j, 2] = alpha * (u[im1, j, 2] + u[ip1, j, 2] + u[i, jp1, 2] + u[i, jm1, 2] -
4u[i, j, 2]) + A * u[i, j, 1] - u[i, j, 1]^2 * u[i, j, 2]
du[i,
j,
1] = alpha * (u[im1, j, 1] + u[ip1, j, 1] + u[i, jp1, 1] + u[i, jm1, 1] -
4u[i, j, 1]) +
B +
u[i, j, 1]^2 * u[i, j, 2] - (A + 1) * u[i, j, 1] + brusselator_f(x, y)
du[i,
j,
2] = alpha * (u[im1, j, 2] + u[ip1, j, 2] + u[i, jp1, 2] + u[i, jm1, 2] -
4u[i, j, 2]) + A * u[i, j, 1] - u[i, j, 1]^2 * u[i, j, 2]
end
end
p = (3.4, 1.0, 10.0, step(xyd_brusselator))
Expand Down Expand Up @@ -144,7 +149,8 @@ nothing # hide
import SparseConnectivityTracer

prob_brusselator_2d_autosparse = NLS.NonlinearProblem(
NLS.NonlinearFunction(brusselator_2d_loop; sparsity = SparseConnectivityTracer.TracerSparsityDetector()),
NLS.NonlinearFunction(
brusselator_2d_loop; sparsity = SparseConnectivityTracer.TracerSparsityDetector()),
u0, p; abstol = 1e-10, reltol = 1e-10
)

Expand Down Expand Up @@ -185,7 +191,8 @@ import ADTypes

f! = (du, u) -> brusselator_2d_loop(du, u, p)
du0 = similar(u0)
jac_sparsity = ADTypes.jacobian_sparsity(f!, du0, u0, SparseConnectivityTracer.TracerSparsityDetector())
jac_sparsity = ADTypes.jacobian_sparsity(
f!, du0, u0, SparseConnectivityTracer.TracerSparsityDetector())
```

Notice that Julia gives a nice print out of the sparsity pattern. That's neat, and would be
Expand All @@ -206,7 +213,8 @@ Now let's see how the version with sparsity compares to the version without:
```@example ill_conditioned_nlprob
BenchmarkTools.@btime NLS.solve(prob_brusselator_2d, NLS.NewtonRaphson());
BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_sparse, NLS.NewtonRaphson());
BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_sparse, NLS.NewtonRaphson(linsolve = LS.KLUFactorization()));
BenchmarkTools.@btime NLS.solve(
prob_brusselator_2d_sparse, NLS.NewtonRaphson(linsolve = LS.KLUFactorization()));
nothing # hide
```

Expand All @@ -222,7 +230,8 @@ Krylov method. To swap the linear solver out, we use the `linsolve` command and
GMRES linear solver.

```@example ill_conditioned_nlprob
BenchmarkTools.@btime NLS.solve(prob_brusselator_2d, NLS.NewtonRaphson(linsolve = LS.KrylovJL_GMRES()));
BenchmarkTools.@btime NLS.solve(
prob_brusselator_2d, NLS.NewtonRaphson(linsolve = LS.KrylovJL_GMRES()));
nothing # hide
```

Expand Down Expand Up @@ -254,7 +263,8 @@ import IncompleteLU
incompletelu(W, p = nothing) = IncompleteLU.ilu(W, τ = 50.0), LinearAlgebra.I

BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_sparse,
NLS.NewtonRaphson(linsolve = LS.KrylovJL_GMRES(precs = incompletelu), concrete_jac = true)
NLS.NewtonRaphson(
linsolve = LS.KrylovJL_GMRES(precs = incompletelu), concrete_jac = true)
);
nothing # hide
```
Expand All @@ -279,7 +289,9 @@ which is more automatic. The setup is very similar to before:
import AlgebraicMultigrid

function algebraicmultigrid(W, p = nothing)
return AlgebraicMultigrid.aspreconditioner(AlgebraicMultigrid.ruge_stuben(convert(AbstractMatrix, W))), LinearAlgebra.I
return AlgebraicMultigrid.aspreconditioner(AlgebraicMultigrid.ruge_stuben(convert(
AbstractMatrix, W))),
LinearAlgebra.I
end

BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_sparse,
Expand Down Expand Up @@ -322,11 +334,13 @@ import DifferentiationInterface
import SparseConnectivityTracer

prob_brusselator_2d_exact_tracer = NLS.NonlinearProblem(
NLS.NonlinearFunction(brusselator_2d_loop; sparsity = SparseConnectivityTracer.TracerSparsityDetector()),
NLS.NonlinearFunction(
brusselator_2d_loop; sparsity = SparseConnectivityTracer.TracerSparsityDetector()),
u0, p; abstol = 1e-10, reltol = 1e-10)
prob_brusselator_2d_approx_di = NLS.NonlinearProblem(
NLS.NonlinearFunction(brusselator_2d_loop;
sparsity = DifferentiationInterface.DenseSparsityDetector(ADTypes.AutoForwardDiff(); atol = 1e-4)),
sparsity = DifferentiationInterface.DenseSparsityDetector(
ADTypes.AutoForwardDiff(); atol = 1e-4)),
u0, p; abstol = 1e-10, reltol = 1e-10)

BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_exact_tracer, NLS.NewtonRaphson());
Expand Down
16 changes: 9 additions & 7 deletions docs/src/tutorials/nonlinear_solve_gpus.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ using the first form.
In this tutorial we will highlight both use cases in separate parts.

!!! note

If you're looking for GPU-accelerated neural networks inside of nonlinear solvers,
check out [DeepEquilibriumNetworks.jl](https://docs.sciml.ai/DeepEquilibriumNetworks/stable/).

Expand Down Expand Up @@ -59,7 +59,7 @@ f(u, p) = u .* u .- p
u0 = CUDA.cu(ones(1000))
p = CUDA.cu(collect(1:1000))
prob = NLS.NonlinearProblem(f, u0, p)
sol = NLS.solve(prob, NLS.NewtonRaphson(), abstol=1f-4)
sol = NLS.solve(prob, NLS.NewtonRaphson(), abstol = 1.0f-4)
```

Notice a few things here. One, nothing is different except the input array types. But
Expand Down Expand Up @@ -95,7 +95,8 @@ import AMDGPU # For if you have an AMD GPU
import Metal # For if you have a Mac M-series device and want to use the built-in GPU
import OneAPI # For if you have an Intel GPU

@KernelAbstractions.kernel function parallel_nonlinearsolve_kernel!(result, @Const(prob), @Const(alg))
KernelAbstractions.@kernel function parallel_nonlinearsolve_kernel!(
result, @Const(prob), @Const(alg))
i = @index(Global)
prob_i = SciMLBase.remake(prob; p = prob.p[i])
sol = NLS.solve(prob_i, alg)
Expand All @@ -109,7 +110,7 @@ is saying, "for the ith call, get the i'th parameter set and solve with these pa
The ith result is then this solution".

!!! note

Because kernel code needs to be able to be compiled to a GPU kernel, it has very strict
specifications of what's allowed because GPU cores are not as flexible as CPU cores.
In general, this means that you need to avoid any runtime operations in kernel code,
Expand Down Expand Up @@ -140,16 +141,16 @@ Now let's build a nonlinear system to test it on.
out2 = sqrt(p[2]) * (x[3] - x[4])
out3 = (x[2] - p[3] * x[3])^2
out4 = sqrt(p[4]) * (x[1] - x[4]) * (x[1] - x[4])
StaticArrays.SA[out1,out2,out3,out4]
StaticArrays.SA[out1, out2, out3, out4]
end

p = StaticArrays.@SVector [StaticArrays.@SVector(rand(Float32, 4)) for _ in 1:1024]
u0 = StaticArrays.SA[1f0, 2f0, 3f0, 4f0]
u0 = StaticArrays.SA[1.0f0, 2.0f0, 3.0f0, 4.0f0]
prob = SciMLBase.ImmutableNonlinearProblem{false}(p2_f, u0, p)
```

!!! note

Because the custom kernel is going to need to embed the the code for our nonlinear
problem into the kernel, it also must be written to be GPU compatible.
In general, this means that you need to avoid any runtime operations in kernel code,
Expand All @@ -176,4 +177,5 @@ vectorized_solve(prob, NLS.SimpleNewtonRaphson(); backend = Metal.MetalBackend()
```

!!! warn

The GPU-based calls will only work on your machine if you have a compatible GPU!
6 changes: 4 additions & 2 deletions docs/src/tutorials/optimizing_parameterized_ode.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,13 +47,15 @@ end

p_init = zeros(4)

nlls_prob = NLS.NonlinearLeastSquaresProblem(loss_function, p_init, vec(reduce(hcat, sol.u)))
nlls_prob = NLS.NonlinearLeastSquaresProblem(
loss_function, p_init, vec(reduce(hcat, sol.u)))
```

Now, we can use any NLLS solver to solve this problem.

```@example parameterized_ode
res = NLS.solve(nlls_prob, NLS.LevenbergMarquardt(); maxiters = 1000, show_trace = Val(true),
res = NLS.solve(
nlls_prob, NLS.LevenbergMarquardt(); maxiters = 1000, show_trace = Val(true),
trace_level = NLS.TraceWithJacobianConditionNumber(25))
nothing # hide
```
Expand Down
3 changes: 2 additions & 1 deletion docs/src/tutorials/snes_ex2.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ details.

```@example snes_ex2
nlfunc_dense = NLS.NonlinearFunction(form_residual!)
nlfunc_sparse = NLS.NonlinearFunction(form_residual!; sparsity = SparseConnectivityTracer.TracerSparsityDetector())
nlfunc_sparse = NLS.NonlinearFunction(
form_residual!; sparsity = SparseConnectivityTracer.TracerSparsityDetector())

nlprob_dense = NLS.NonlinearProblem(nlfunc_dense, u0)
nlprob_sparse = NLS.NonlinearProblem(nlfunc_sparse, u0)
Expand Down
9 changes: 6 additions & 3 deletions ext/NonlinearSolveFastLevenbergMarquardtExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,8 @@ function SciMLBase.__solve(
termination_condition, alg
)

f_wrapped, u, resid = NonlinearSolveBase.construct_extension_function_wrapper(
f_wrapped, u,
resid = NonlinearSolveBase.construct_extension_function_wrapper(
prob; alias_u0, can_handle_oop = Val(prob.u0 isa SArray)
)
f = if prob.u0 isa SArray
Expand Down Expand Up @@ -49,7 +50,8 @@ function SciMLBase.__solve(
)

if prob.u0 isa SArray
res, fx, info, iter, nfev, njev = FastLM.lmsolve(
res, fx, info, iter, nfev,
njev = FastLM.lmsolve(
f, jac_fn, prob.u0; solver_kwargs...
)
LM, solver = nothing, nothing
Expand All @@ -68,7 +70,8 @@ function SciMLBase.__solve(

LM = FastLM.LMWorkspace(u, resid, J)

res, fx, info, iter, nfev, njev, LM, solver = FastLM.lmsolve!(
res, fx, info, iter, nfev, njev,
LM, solver = FastLM.lmsolve!(
f, jac_fn, LM; solver, solver_kwargs...
)
end
Expand Down
3 changes: 2 additions & 1 deletion ext/NonlinearSolveFixedPointAccelerationExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@ function SciMLBase.__solve(
termination_condition, alg
)

f, u0, resid = NonlinearSolveBase.construct_extension_function_wrapper(
f, u0,
resid = NonlinearSolveBase.construct_extension_function_wrapper(
prob; alias_u0, make_fixed_point = Val(true), force_oop = Val(true)
)

Expand Down
3 changes: 2 additions & 1 deletion ext/NonlinearSolveMINPACKExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@ function SciMLBase.__solve(
termination_condition, alg
)

f_wrapped!, u0, resid = NonlinearSolveBase.construct_extension_function_wrapper(
f_wrapped!, u0,
resid = NonlinearSolveBase.construct_extension_function_wrapper(
prob; alias_u0
)
resid_size = size(resid)
Expand Down
3 changes: 2 additions & 1 deletion ext/NonlinearSolveNLSolversExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@ function SciMLBase.__solve(
prob.f, autodiff, prob.u0, Constant(prob.p)
)

fj_scalar = @closure (Jx, x) -> begin
fj_scalar = @closure (Jx,
x) -> begin
return DifferentiationInterface.value_and_derivative(
prob.f, prep, autodiff, x, Constant(prob.p)
)
Expand Down
20 changes: 15 additions & 5 deletions ext/NonlinearSolvePETScExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@ function SciMLBase.__solve(
termination_condition, alg; abs_norm_supported = false
)

f_wrapped!, u0, resid = NonlinearSolveBase.construct_extension_function_wrapper(
f_wrapped!, u0,
resid = NonlinearSolveBase.construct_extension_function_wrapper(
prob; alias_u0
)
T = eltype(u0)
Expand All @@ -48,7 +49,9 @@ function SciMLBase.__solve(

nf = Ref{Int}(0)

f! = @closure (cfx, cx, user_ctx) -> begin
f! = @closure (cfx,
cx,
user_ctx) -> begin
nf[] += 1
fx = cfx isa Ptr{Nothing} ? PETSc.unsafe_localarray(T, cfx; read = false) : cfx
x = cx isa Ptr{Nothing} ? PETSc.unsafe_localarray(T, cx; write = false) : cx
Expand Down Expand Up @@ -76,7 +79,8 @@ function SciMLBase.__solve(
)
J_init = zeros(T, 1, 1)
else
jac!, J_init = NonlinearSolveBase.construct_extension_jac(
jac!,
J_init = NonlinearSolveBase.construct_extension_jac(
prob, alg, u0, resid; autodiff, initial_jacobian = Val(true)
)
end
Expand All @@ -85,7 +89,10 @@ function SciMLBase.__solve(

if J_init isa AbstractSparseMatrix
PJ = PETSc.MatSeqAIJ(J_init)
jac_fn! = @closure (cx, J, _, user_ctx) -> begin
jac_fn! = @closure (cx,
J,
_,
user_ctx) -> begin
njac[] += 1
x = cx isa Ptr{Nothing} ? PETSc.unsafe_localarray(T, cx; write = false) : cx
if J isa PETSc.AbstractMat
Expand All @@ -102,7 +109,10 @@ function SciMLBase.__solve(
snes.user_ctx = (; jacobian = J_init)
else
PJ = PETSc.MatSeqDense(J_init)
jac_fn! = @closure (cx, J, _, user_ctx) -> begin
jac_fn! = @closure (cx,
J,
_,
user_ctx) -> begin
njac[] += 1
x = cx isa Ptr{Nothing} ? PETSc.unsafe_localarray(T, cx; write = false) : cx
jac!(J, x)
Expand Down
6 changes: 4 additions & 2 deletions ext/NonlinearSolveSIAMFANLEquationsExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,8 @@ function SciMLBase.__solve(
elseif method == :secant
sol = secant(f, prob.u0; maxit = maxiters, atol, rtol, printerr)
elseif method == :anderson
f_aa, u, _ = NonlinearSolveBase.construct_extension_function_wrapper(
f_aa, u,
_ = NonlinearSolveBase.construct_extension_function_wrapper(
prob; alias_u0, make_fixed_point = Val(true)
)
sol = aasol(
Expand All @@ -73,7 +74,8 @@ function SciMLBase.__solve(
)
end
else
f, u, resid = NonlinearSolveBase.construct_extension_function_wrapper(
f, u,
resid = NonlinearSolveBase.construct_extension_function_wrapper(
prob; alias_u0, make_fixed_point = Val(method == :anderson)
)
N = length(u)
Expand Down
3 changes: 2 additions & 1 deletion ext/NonlinearSolveSpeedMappingExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ function SciMLBase.__solve(
termination_condition, alg
)

m!, u, resid = NonlinearSolveBase.construct_extension_function_wrapper(
m!, u,
resid = NonlinearSolveBase.construct_extension_function_wrapper(
prob; alias_u0, make_fixed_point = Val(true)
)
tol = NonlinearSolveBase.get_tolerance(abstol, eltype(u))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,9 @@ using SciMLBase: SciMLBase, IntervalNonlinearProblem

using BracketingNonlinearSolve: Bisection, Brent, Alefeld, Falsi, ITP, Ridder

const DualIntervalNonlinearProblem{T, V, P} = IntervalNonlinearProblem{
const DualIntervalNonlinearProblem{T,
V,
P} = IntervalNonlinearProblem{
uType, iip, <:Union{<:Dual{T, V, P}, <:AbstractArray{<:Dual{T, V, P}}}
} where {uType, iip}

Expand Down
10 changes: 5 additions & 5 deletions lib/BracketingNonlinearSolve/src/BracketingNonlinearSolve.jl
Original file line number Diff line number Diff line change
Expand Up @@ -29,17 +29,17 @@ function CommonSolve.solve(prob::IntervalNonlinearProblem, nothing, args...; kwa
return CommonSolve.solve(prob, ITP(), args...; kwargs...)
end

function CommonSolve.solve(prob::IntervalNonlinearProblem,
function CommonSolve.solve(prob::IntervalNonlinearProblem,
alg::AbstractBracketingAlgorithm, args...; sensealg = nothing, kwargs...)
return bracketingnonlinear_solve_up(prob::IntervalNonlinearProblem, sensealg, prob.p, alg, args...; kwargs...)
return bracketingnonlinear_solve_up(
prob::IntervalNonlinearProblem, sensealg, prob.p, alg, args...; kwargs...)
end


function bracketingnonlinear_solve_up(prob::IntervalNonlinearProblem, sensealg, p, alg, args...; kwargs...)
function bracketingnonlinear_solve_up(
prob::IntervalNonlinearProblem, sensealg, p, alg, args...; kwargs...)
return SciMLBase.__solve(prob, alg, args...; kwargs...)
end


@setup_workload begin
for T in (Float32, Float64)
prob_brack = IntervalNonlinearProblem{false}(
Expand Down
Loading
Loading