Skip to content

Commit 4934ba8

Browse files
author
SciML Bot
committed
Apply JuliaFormatter to fix code formatting
- Applied JuliaFormatter with SciML style guide - Formatted 76 files 🤖 Generated by OrgMaintenanceScripts.jl
1 parent e9e4166 commit 4934ba8

File tree

76 files changed

+636
-454
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

76 files changed

+636
-454
lines changed

docs/make.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ makedocs(;
5151
"https://www.sciencedirect.com/science/article/abs/pii/S0045782523007156",
5252
"https://github.com/devernay/cminpack/blob/d1f5f5a273862ca1bbcf58394e4ac060d9e22c76/hybrd.c",
5353
"https://github.com/devernay/cminpack/blob/d1f5f5a273862ca1bbcf58394e4ac060d9e22c76/hybrj.c",
54-
"https://github.com/devernay/cminpack/blob/d1f5f5a273862ca1bbcf58394e4ac060d9e22c76/lmder.c",
54+
"https://github.com/devernay/cminpack/blob/d1f5f5a273862ca1bbcf58394e4ac060d9e22c76/lmder.c"
5555
],
5656
checkdocs = :exports,
5757
warnonly = [:missing_docs],

docs/src/api/scipy.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,4 +19,4 @@ Note that using this package requires Python and SciPy to be available via Pytho
1919
NonlinearSolveSciPy.SciPyLeastSquares
2020
NonlinearSolveSciPy.SciPyRoot
2121
NonlinearSolveSciPy.SciPyRootScalar
22-
```
22+
```

docs/src/basics/diagnostics_api.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,8 @@ cache.timer
7070
Let's try for some other solver:
7171

7272
```@example diagnostics_example
73-
cache = NLS.init(prob, NLS.DFSane(); show_trace = Val(true), trace_level = NLS.TraceMinimal(50));
73+
cache = NLS.init(
74+
prob, NLS.DFSane(); show_trace = Val(true), trace_level = NLS.TraceMinimal(50));
7475
NLS.solve!(cache)
7576
cache.timer
7677
```

docs/src/tutorials/large_systems.md

Lines changed: 29 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -78,14 +78,19 @@ function brusselator_2d_loop(du, u, p)
7878
@inbounds for I in CartesianIndices((N, N))
7979
i, j = Tuple(I)
8080
x, y = xyd_brusselator[I[1]], xyd_brusselator[I[2]]
81-
ip1, im1, jp1, jm1 = limit(i + 1, N), limit(i - 1, N), limit(j + 1, N),
81+
ip1, im1, jp1,
82+
jm1 = limit(i + 1, N), limit(i - 1, N), limit(j + 1, N),
8283
limit(j - 1, N)
83-
du[i, j, 1] = alpha * (u[im1, j, 1] + u[ip1, j, 1] + u[i, jp1, 1] + u[i, jm1, 1] -
84-
4u[i, j, 1]) +
85-
B +
86-
u[i, j, 1]^2 * u[i, j, 2] - (A + 1) * u[i, j, 1] + brusselator_f(x, y)
87-
du[i, j, 2] = alpha * (u[im1, j, 2] + u[ip1, j, 2] + u[i, jp1, 2] + u[i, jm1, 2] -
88-
4u[i, j, 2]) + A * u[i, j, 1] - u[i, j, 1]^2 * u[i, j, 2]
84+
du[i,
85+
j,
86+
1] = alpha * (u[im1, j, 1] + u[ip1, j, 1] + u[i, jp1, 1] + u[i, jm1, 1] -
87+
4u[i, j, 1]) +
88+
B +
89+
u[i, j, 1]^2 * u[i, j, 2] - (A + 1) * u[i, j, 1] + brusselator_f(x, y)
90+
du[i,
91+
j,
92+
2] = alpha * (u[im1, j, 2] + u[ip1, j, 2] + u[i, jp1, 2] + u[i, jm1, 2] -
93+
4u[i, j, 2]) + A * u[i, j, 1] - u[i, j, 1]^2 * u[i, j, 2]
8994
end
9095
end
9196
p = (3.4, 1.0, 10.0, step(xyd_brusselator))
@@ -144,7 +149,8 @@ nothing # hide
144149
import SparseConnectivityTracer
145150
146151
prob_brusselator_2d_autosparse = NLS.NonlinearProblem(
147-
NLS.NonlinearFunction(brusselator_2d_loop; sparsity = SparseConnectivityTracer.TracerSparsityDetector()),
152+
NLS.NonlinearFunction(
153+
brusselator_2d_loop; sparsity = SparseConnectivityTracer.TracerSparsityDetector()),
148154
u0, p; abstol = 1e-10, reltol = 1e-10
149155
)
150156
@@ -185,7 +191,8 @@ import ADTypes
185191
186192
f! = (du, u) -> brusselator_2d_loop(du, u, p)
187193
du0 = similar(u0)
188-
jac_sparsity = ADTypes.jacobian_sparsity(f!, du0, u0, SparseConnectivityTracer.TracerSparsityDetector())
194+
jac_sparsity = ADTypes.jacobian_sparsity(
195+
f!, du0, u0, SparseConnectivityTracer.TracerSparsityDetector())
189196
```
190197

191198
Notice that Julia gives a nice print out of the sparsity pattern. That's neat, and would be
@@ -206,7 +213,8 @@ Now let's see how the version with sparsity compares to the version without:
206213
```@example ill_conditioned_nlprob
207214
BenchmarkTools.@btime NLS.solve(prob_brusselator_2d, NLS.NewtonRaphson());
208215
BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_sparse, NLS.NewtonRaphson());
209-
BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_sparse, NLS.NewtonRaphson(linsolve = LS.KLUFactorization()));
216+
BenchmarkTools.@btime NLS.solve(
217+
prob_brusselator_2d_sparse, NLS.NewtonRaphson(linsolve = LS.KLUFactorization()));
210218
nothing # hide
211219
```
212220

@@ -222,7 +230,8 @@ Krylov method. To swap the linear solver out, we use the `linsolve` command and
222230
GMRES linear solver.
223231

224232
```@example ill_conditioned_nlprob
225-
BenchmarkTools.@btime NLS.solve(prob_brusselator_2d, NLS.NewtonRaphson(linsolve = LS.KrylovJL_GMRES()));
233+
BenchmarkTools.@btime NLS.solve(
234+
prob_brusselator_2d, NLS.NewtonRaphson(linsolve = LS.KrylovJL_GMRES()));
226235
nothing # hide
227236
```
228237

@@ -254,7 +263,8 @@ import IncompleteLU
254263
incompletelu(W, p = nothing) = IncompleteLU.ilu(W, τ = 50.0), LinearAlgebra.I
255264

256265
BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_sparse,
257-
NLS.NewtonRaphson(linsolve = LS.KrylovJL_GMRES(precs = incompletelu), concrete_jac = true)
266+
NLS.NewtonRaphson(
267+
linsolve = LS.KrylovJL_GMRES(precs = incompletelu), concrete_jac = true)
258268
);
259269
nothing # hide
260270
```
@@ -279,7 +289,9 @@ which is more automatic. The setup is very similar to before:
279289
import AlgebraicMultigrid
280290
281291
function algebraicmultigrid(W, p = nothing)
282-
return AlgebraicMultigrid.aspreconditioner(AlgebraicMultigrid.ruge_stuben(convert(AbstractMatrix, W))), LinearAlgebra.I
292+
return AlgebraicMultigrid.aspreconditioner(AlgebraicMultigrid.ruge_stuben(convert(
293+
AbstractMatrix, W))),
294+
LinearAlgebra.I
283295
end
284296
285297
BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_sparse,
@@ -322,11 +334,13 @@ import DifferentiationInterface
322334
import SparseConnectivityTracer
323335
324336
prob_brusselator_2d_exact_tracer = NLS.NonlinearProblem(
325-
NLS.NonlinearFunction(brusselator_2d_loop; sparsity = SparseConnectivityTracer.TracerSparsityDetector()),
337+
NLS.NonlinearFunction(
338+
brusselator_2d_loop; sparsity = SparseConnectivityTracer.TracerSparsityDetector()),
326339
u0, p; abstol = 1e-10, reltol = 1e-10)
327340
prob_brusselator_2d_approx_di = NLS.NonlinearProblem(
328341
NLS.NonlinearFunction(brusselator_2d_loop;
329-
sparsity = DifferentiationInterface.DenseSparsityDetector(ADTypes.AutoForwardDiff(); atol = 1e-4)),
342+
sparsity = DifferentiationInterface.DenseSparsityDetector(
343+
ADTypes.AutoForwardDiff(); atol = 1e-4)),
330344
u0, p; abstol = 1e-10, reltol = 1e-10)
331345
332346
BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_exact_tracer, NLS.NewtonRaphson());

docs/src/tutorials/nonlinear_solve_gpus.md

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ using the first form.
3131
In this tutorial we will highlight both use cases in separate parts.
3232

3333
!!! note
34-
34+
3535
If you're looking for GPU-accelerated neural networks inside of nonlinear solvers,
3636
check out [DeepEquilibriumNetworks.jl](https://docs.sciml.ai/DeepEquilibriumNetworks/stable/).
3737

@@ -59,7 +59,7 @@ f(u, p) = u .* u .- p
5959
u0 = CUDA.cu(ones(1000))
6060
p = CUDA.cu(collect(1:1000))
6161
prob = NLS.NonlinearProblem(f, u0, p)
62-
sol = NLS.solve(prob, NLS.NewtonRaphson(), abstol=1f-4)
62+
sol = NLS.solve(prob, NLS.NewtonRaphson(), abstol = 1.0f-4)
6363
```
6464

6565
Notice a few things here. One, nothing is different except the input array types. But
@@ -95,7 +95,8 @@ import AMDGPU # For if you have an AMD GPU
9595
import Metal # For if you have a Mac M-series device and want to use the built-in GPU
9696
import OneAPI # For if you have an Intel GPU
9797

98-
@KernelAbstractions.kernel function parallel_nonlinearsolve_kernel!(result, @Const(prob), @Const(alg))
98+
KernelAbstractions.@kernel function parallel_nonlinearsolve_kernel!(
99+
result, @Const(prob), @Const(alg))
99100
i = @index(Global)
100101
prob_i = SciMLBase.remake(prob; p = prob.p[i])
101102
sol = NLS.solve(prob_i, alg)
@@ -109,7 +110,7 @@ is saying, "for the ith call, get the i'th parameter set and solve with these pa
109110
The ith result is then this solution".
110111

111112
!!! note
112-
113+
113114
Because kernel code needs to be able to be compiled to a GPU kernel, it has very strict
114115
specifications of what's allowed because GPU cores are not as flexible as CPU cores.
115116
In general, this means that you need to avoid any runtime operations in kernel code,
@@ -140,16 +141,16 @@ Now let's build a nonlinear system to test it on.
140141
out2 = sqrt(p[2]) * (x[3] - x[4])
141142
out3 = (x[2] - p[3] * x[3])^2
142143
out4 = sqrt(p[4]) * (x[1] - x[4]) * (x[1] - x[4])
143-
StaticArrays.SA[out1,out2,out3,out4]
144+
StaticArrays.SA[out1, out2, out3, out4]
144145
end
145146

146147
p = StaticArrays.@SVector [StaticArrays.@SVector(rand(Float32, 4)) for _ in 1:1024]
147-
u0 = StaticArrays.SA[1f0, 2f0, 3f0, 4f0]
148+
u0 = StaticArrays.SA[1.0f0, 2.0f0, 3.0f0, 4.0f0]
148149
prob = SciMLBase.ImmutableNonlinearProblem{false}(p2_f, u0, p)
149150
```
150151

151152
!!! note
152-
153+
153154
Because the custom kernel is going to need to embed the the code for our nonlinear
154155
problem into the kernel, it also must be written to be GPU compatible.
155156
In general, this means that you need to avoid any runtime operations in kernel code,
@@ -176,4 +177,5 @@ vectorized_solve(prob, NLS.SimpleNewtonRaphson(); backend = Metal.MetalBackend()
176177
```
177178

178179
!!! warn
180+
179181
The GPU-based calls will only work on your machine if you have a compatible GPU!

docs/src/tutorials/optimizing_parameterized_ode.md

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,13 +47,15 @@ end
4747
4848
p_init = zeros(4)
4949
50-
nlls_prob = NLS.NonlinearLeastSquaresProblem(loss_function, p_init, vec(reduce(hcat, sol.u)))
50+
nlls_prob = NLS.NonlinearLeastSquaresProblem(
51+
loss_function, p_init, vec(reduce(hcat, sol.u)))
5152
```
5253

5354
Now, we can use any NLLS solver to solve this problem.
5455

5556
```@example parameterized_ode
56-
res = NLS.solve(nlls_prob, NLS.LevenbergMarquardt(); maxiters = 1000, show_trace = Val(true),
57+
res = NLS.solve(
58+
nlls_prob, NLS.LevenbergMarquardt(); maxiters = 1000, show_trace = Val(true),
5759
trace_level = NLS.TraceWithJacobianConditionNumber(25))
5860
nothing # hide
5961
```

docs/src/tutorials/snes_ex2.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,8 @@ details.
3838

3939
```@example snes_ex2
4040
nlfunc_dense = NLS.NonlinearFunction(form_residual!)
41-
nlfunc_sparse = NLS.NonlinearFunction(form_residual!; sparsity = SparseConnectivityTracer.TracerSparsityDetector())
41+
nlfunc_sparse = NLS.NonlinearFunction(
42+
form_residual!; sparsity = SparseConnectivityTracer.TracerSparsityDetector())
4243
4344
nlprob_dense = NLS.NonlinearProblem(nlfunc_dense, u0)
4445
nlprob_sparse = NLS.NonlinearProblem(nlfunc_sparse, u0)

ext/NonlinearSolveFastLevenbergMarquardtExt.jl

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,8 @@ function SciMLBase.__solve(
2121
termination_condition, alg
2222
)
2323

24-
f_wrapped, u, resid = NonlinearSolveBase.construct_extension_function_wrapper(
24+
f_wrapped, u,
25+
resid = NonlinearSolveBase.construct_extension_function_wrapper(
2526
prob; alias_u0, can_handle_oop = Val(prob.u0 isa SArray)
2627
)
2728
f = if prob.u0 isa SArray
@@ -49,7 +50,8 @@ function SciMLBase.__solve(
4950
)
5051

5152
if prob.u0 isa SArray
52-
res, fx, info, iter, nfev, njev = FastLM.lmsolve(
53+
res, fx, info, iter, nfev,
54+
njev = FastLM.lmsolve(
5355
f, jac_fn, prob.u0; solver_kwargs...
5456
)
5557
LM, solver = nothing, nothing
@@ -68,7 +70,8 @@ function SciMLBase.__solve(
6870

6971
LM = FastLM.LMWorkspace(u, resid, J)
7072

71-
res, fx, info, iter, nfev, njev, LM, solver = FastLM.lmsolve!(
73+
res, fx, info, iter, nfev, njev,
74+
LM, solver = FastLM.lmsolve!(
7275
f, jac_fn, LM; solver, solver_kwargs...
7376
)
7477
end

ext/NonlinearSolveFixedPointAccelerationExt.jl

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,8 @@ function SciMLBase.__solve(
1515
termination_condition, alg
1616
)
1717

18-
f, u0, resid = NonlinearSolveBase.construct_extension_function_wrapper(
18+
f, u0,
19+
resid = NonlinearSolveBase.construct_extension_function_wrapper(
1920
prob; alias_u0, make_fixed_point = Val(true), force_oop = Val(true)
2021
)
2122

ext/NonlinearSolveMINPACKExt.jl

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,8 @@ function SciMLBase.__solve(
1717
termination_condition, alg
1818
)
1919

20-
f_wrapped!, u0, resid = NonlinearSolveBase.construct_extension_function_wrapper(
20+
f_wrapped!, u0,
21+
resid = NonlinearSolveBase.construct_extension_function_wrapper(
2122
prob; alias_u0
2223
)
2324
resid_size = size(resid)

0 commit comments

Comments
 (0)