diff --git a/src/factorization.jl b/src/factorization.jl index 0aab1c06c..4b3e946a9 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -861,8 +861,8 @@ patterns with “more structure”. !!! note By default, the SparseArrays.jl are implemented for efficiency by caching the - symbolic factorization. If the sparsity pattern of `A` may change between solves, set `reuse_symbolic=false`. - If the pattern is assumed or known to be constant, set `reuse_symbolic=true` to avoid + symbolic factorization. If the sparsity pattern of `A` may change between solves, set `reuse_symbolic=false`. + If the pattern is assumed or known to be constant, set `reuse_symbolic=true` to avoid unnecessary recomputation. To further reduce computational overhead, you can disable pattern checks entirely by setting `check_pattern = false`. Note that this may error if the sparsity pattern does change unexpectedly. @@ -887,8 +887,8 @@ A fast sparse LU-factorization which specializes on sparsity patterns with “le !!! note By default, the SparseArrays.jl are implemented for efficiency by caching the - symbolic factorization. If the sparsity pattern of `A` may change between solves, set `reuse_symbolic=false`. - If the pattern is assumed or known to be constant, set `reuse_symbolic=true` to avoid + symbolic factorization. If the sparsity pattern of `A` may change between solves, set `reuse_symbolic=false`. + If the pattern is assumed or known to be constant, set `reuse_symbolic=true` to avoid unnecessary recomputation. To further reduce computational overhead, you can disable pattern checks entirely by setting `check_pattern = false`. Note that this may error if the sparsity pattern does change unexpectedly. diff --git a/src/simplegmres.jl b/src/simplegmres.jl index 1cceb7d76..a21826c9f 100644 --- a/src/simplegmres.jl +++ b/src/simplegmres.jl @@ -308,7 +308,7 @@ function SciMLBase.solve!(cache::SimpleGMRESCache{false}, lincache::LinearCache) # [cₖ sₖ] [ r̄ₖ.ₖ ] = [rₖ.ₖ] # [s̄ₖ -cₖ] [hₖ₊₁.ₖ] [ 0 ] (c[inner_iter], s[inner_iter], - R[nr + inner_iter]) = _sym_givens( + R[nr + inner_iter]) = _sym_givens( R[nr + inner_iter], Hbis) diff --git a/test/adjoint.jl b/test/adjoint.jl index 9323a605e..d599162df 100644 --- a/test/adjoint.jl +++ b/test/adjoint.jl @@ -31,7 +31,7 @@ A = rand(n, n); b1 = rand(n); _ff = (x, -y) -> f(x, + y) -> f(x, y; alg = LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.LUFactorization)) _ff(copy(A), copy(b1)) diff --git a/test/hypretests.jl b/test/hypretests.jl index da7e7e3a5..0d04ebd94 100644 --- a/test/hypretests.jl +++ b/test/hypretests.jl @@ -87,7 +87,7 @@ function test_interface(alg; kw...) # Solve prob directly (without cache) y = solve(prob, alg; cache_kwargs..., Pl = HYPRE.BoomerAMG) - @test A * to_array(y.u)≈b atol=atol rtol=rtol + @test A*to_array(y.u)≈b atol=atol rtol=rtol @test y.iters > 0 @test y.resid < rtol @@ -99,7 +99,7 @@ function test_interface(alg; kw...) cache = y.cache @test cache.isfresh == cache.cacheval.isfresh_A == cache.cacheval.isfresh_b == cache.cacheval.isfresh_u == false - @test A * to_array(y.u)≈b atol=atol rtol=rtol + @test A*to_array(y.u)≈b atol=atol rtol=rtol # Update A cache.A = A @@ -109,7 +109,7 @@ function test_interface(alg; kw...) cache = y.cache @test cache.isfresh == cache.cacheval.isfresh_A == cache.cacheval.isfresh_b == cache.cacheval.isfresh_u == false - @test A * to_array(y.u)≈b atol=atol rtol=rtol + @test A*to_array(y.u)≈b atol=atol rtol=rtol # Update b b2 = 2 * to_array(b) @@ -123,7 +123,7 @@ function test_interface(alg; kw...) cache = y.cache @test cache.isfresh == cache.cacheval.isfresh_A == cache.cacheval.isfresh_b == cache.cacheval.isfresh_u == false - @test A * to_array(y.u)≈to_array(b2) atol=atol rtol=rtol + @test A*to_array(y.u)≈to_array(b2) atol=atol rtol=rtol end return end diff --git a/test/nopre/enzyme.jl b/test/nopre/enzyme.jl index 6004ccbcf..0222826f2 100644 --- a/test/nopre/enzyme.jl +++ b/test/nopre/enzyme.jl @@ -33,14 +33,14 @@ b1 = rand(n); db1 = zeros(n); _ff = (x, -y) -> f(x, + y) -> f(x, y; alg = LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.LUFactorization)) _ff(copy(A), copy(b1)) Enzyme.autodiff(Reverse, (x, - y) -> f(x, + y) -> f(x, y; alg = LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.LUFactorization)), Duplicated(copy(A), dA),