diff --git a/docs/src/tutorials/gpu.md b/docs/src/tutorials/gpu.md index d76456368..9e11a3f3e 100644 --- a/docs/src/tutorials/gpu.md +++ b/docs/src/tutorials/gpu.md @@ -2,20 +2,20 @@ LinearSolve.jl provides two ways to GPU accelerate linear solves: -* Offloading: offloading takes a CPU-based problem and automatically transforms it into a - GPU-based problem in the background, and returns the solution on CPU. Thus using - offloading requires no change on the part of the user other than to choose an offloading - solver. -* Array type interface: the array type interface requires that the user defines the - `LinearProblem` using an `AbstractGPUArray` type and chooses an appropriate solver - (or uses the default solver). The solution will then be returned as a GPU array type. + - Offloading: offloading takes a CPU-based problem and automatically transforms it into a + GPU-based problem in the background, and returns the solution on CPU. Thus using + offloading requires no change on the part of the user other than to choose an offloading + solver. + - Array type interface: the array type interface requires that the user defines the + `LinearProblem` using an `AbstractGPUArray` type and chooses an appropriate solver + (or uses the default solver). The solution will then be returned as a GPU array type. The offloading approach has the advantage of being simpler and requiring no change to existing CPU code, while having the disadvantage of having more overhead. In the following sections we will demonstrate how to use each of the approaches. !!! warn - + GPUs are not always faster! Your matrices need to be sufficiently large in order for GPU accelerations to actually be faster. For offloading it's around 1,000 x 1,000 matrices and for Array type interface it's around 100 x 100. For sparse matrices, it is highly @@ -74,7 +74,7 @@ to return it to the CPU. This setup does no automated memory transfers and will move things to CPU on command. !!! warn - + Many GPU functionalities, such as `CUDA.cu`, have a built-in preference for `Float32`. Generally it is much faster to use 32-bit floating point operations on GPU than 64-bit operations, and thus this is generally the right choice if going to such platforms. @@ -118,7 +118,7 @@ sol = LS.solve(prob, LS.LUFactorization()) ``` !!! note - + For now, CUDSS only supports CuSparseMatrixCSR type matrices. Note that `KrylovJL` methods also work with sparse GPU arrays: @@ -130,7 +130,8 @@ sol = LS.solve(prob, LS.KrylovJL_GMRES()) Note that CUSPARSE also has some GPU-based preconditioners, such as a built-in `ilu`. However: ```julia -sol = LS.solve(prob, LS.KrylovJL_GMRES(precs = (A, p) -> (CUDA.CUSPARSE.ilu02!(A, 'O'), LA.I))) +sol = LS.solve( + prob, LS.KrylovJL_GMRES(precs = (A, p) -> (CUDA.CUSPARSE.ilu02!(A, 'O'), LA.I))) ``` However, right now CUSPARSE is missing the right `ldiv!` implementation for this to work diff --git a/docs/src/tutorials/linear.md b/docs/src/tutorials/linear.md index e600db987..8bcc73d04 100644 --- a/docs/src/tutorials/linear.md +++ b/docs/src/tutorials/linear.md @@ -103,11 +103,11 @@ The Matrix-Free operators are provided by the [SciMLOperators.jl interface](http For example, for the matrix `A` defined via: ```@example linsys1 -A = [-2.0 1 0 0 0 - 1 -2 1 0 0 - 0 1 -2 1 0 - 0 0 1 -2 1 - 0 0 0 1 -2] +A = [-2.0 1 0 0 0 + 1 -2 1 0 0 + 0 1 -2 1 0 + 0 0 1 -2 1 + 0 0 0 1 -2] ``` We can define the `FunctionOperator` that does the `A*v` operations, without using the matrix `A`. This is done by defining @@ -116,18 +116,18 @@ operator. See the [SciMLOperators.jl documentation](https://docs.sciml.ai/SciMLO non-constant operators, operator algebras, and many more features). This is done by: ```@example linsys1 -function Afunc!(w,v,u,p,t) +function Afunc!(w, v, u, p, t) w[1] = -2v[1] + v[2] for i in 2:4 - w[i] = v[i-1] - 2v[i] + v[i+1] + w[i] = v[i - 1] - 2v[i] + v[i + 1] end w[5] = v[4] - 2v[5] nothing end -function Afunc!(v,u,p,t) +function Afunc!(v, u, p, t) w = zeros(5) - Afunc!(w,v,u,p,t) + Afunc!(w, v, u, p, t) w end @@ -159,6 +159,8 @@ mfopA * sol.u - b ``` !!! note - Note that not all methods can use a matrix-free operator. For example, `LS.LUFactorization()` requires a matrix. If you use an - invalid method, you will get an error. The methods particularly from KrylovJL are the ones preferred for these cases - (and are defaulted to). + + +Note that not all methods can use a matrix-free operator. For example, `LS.LUFactorization()` requires a matrix. If you use an +invalid method, you will get an error. The methods particularly from KrylovJL are the ones preferred for these cases +(and are defaulted to). diff --git a/ext/LinearSolveCUDAExt.jl b/ext/LinearSolveCUDAExt.jl index 9ec8224b3..aaedcf21a 100644 --- a/ext/LinearSolveCUDAExt.jl +++ b/ext/LinearSolveCUDAExt.jl @@ -1,14 +1,17 @@ module LinearSolveCUDAExt using CUDA -using LinearSolve: LinearSolve, is_cusparse, defaultalg, cudss_loaded, DefaultLinearSolver, - DefaultAlgorithmChoice, ALREADY_WARNED_CUDSS, LinearCache, needs_concrete_A, - error_no_cudss_lu, init_cacheval, OperatorAssumptions, CudaOffloadFactorization, +using LinearSolve: LinearSolve, is_cusparse, defaultalg, cudss_loaded, DefaultLinearSolver, + DefaultAlgorithmChoice, ALREADY_WARNED_CUDSS, LinearCache, + needs_concrete_A, + error_no_cudss_lu, init_cacheval, OperatorAssumptions, + CudaOffloadFactorization, SparspakFactorization, KLUFactorization, UMFPACKFactorization using LinearSolve.LinearAlgebra, LinearSolve.SciMLBase, LinearSolve.ArrayInterface using SciMLBase: AbstractSciMLOperator -function LinearSolve.is_cusparse(A::Union{CUDA.CUSPARSE.CuSparseMatrixCSR, CUDA.CUSPARSE.CuSparseMatrixCSC}) +function LinearSolve.is_cusparse(A::Union{ + CUDA.CUSPARSE.CuSparseMatrixCSR, CUDA.CUSPARSE.CuSparseMatrixCSC}) true end diff --git a/ext/LinearSolveEnzymeExt.jl b/ext/LinearSolveEnzymeExt.jl index 5f7bb2add..467a6b711 100644 --- a/ext/LinearSolveEnzymeExt.jl +++ b/ext/LinearSolveEnzymeExt.jl @@ -1,7 +1,7 @@ module LinearSolveEnzymeExt -using LinearSolve: LinearSolve, SciMLLinearSolveAlgorithm, init, solve!, LinearProblem, - LinearCache, AbstractKrylovSubspaceMethod, DefaultLinearSolver, +using LinearSolve: LinearSolve, SciMLLinearSolveAlgorithm, init, solve!, LinearProblem, + LinearCache, AbstractKrylovSubspaceMethod, DefaultLinearSolver, defaultalg_adjoint_eval, solve using LinearSolve.LinearAlgebra using EnzymeCore @@ -205,9 +205,9 @@ function EnzymeRules.augmented_primal( cache = (copy(res.u), resvals, cachesolve, dAs, dbs) - _res = EnzymeRules.needs_primal(config) ? res : nothing + _res = EnzymeRules.needs_primal(config) ? res : nothing _dres = EnzymeRules.needs_shadow(config) ? dres : nothing - + return EnzymeRules.AugmentedReturn(_res, _dres, cache) end diff --git a/ext/LinearSolveForwardDiffExt.jl b/ext/LinearSolveForwardDiffExt.jl index d133d75c0..79a88f18d 100644 --- a/ext/LinearSolveForwardDiffExt.jl +++ b/ext/LinearSolveForwardDiffExt.jl @@ -150,26 +150,29 @@ function SciMLBase.init( dual_type = get_dual_type(prob.b) end - alg isa LinearSolve.DefaultLinearSolver ? real_alg = LinearSolve.defaultalg(primal_prob.A, primal_prob.b) : real_alg = alg + alg isa LinearSolve.DefaultLinearSolver ? + real_alg = LinearSolve.defaultalg(primal_prob.A, primal_prob.b) : real_alg = alg non_partial_cache = init( primal_prob, real_alg, assumptions, args...; alias = alias, abstol = abstol, reltol = reltol, maxiters = maxiters, verbose = verbose, Pl = Pl, Pr = Pr, assumptions = assumptions, sensealg = sensealg, u0 = new_u0, kwargs...) - return DualLinearCache(non_partial_cache, dual_type, ∂_A, ∂_b, !isnothing(∂_b) ? zero.(∂_b) : ∂_b, A, b, zeros(dual_type, length(b))) + return DualLinearCache(non_partial_cache, dual_type, ∂_A, ∂_b, + !isnothing(∂_b) ? zero.(∂_b) : ∂_b, A, b, zeros(dual_type, length(b))) end function SciMLBase.solve!(cache::DualLinearCache, args...; kwargs...) - solve!(cache, cache.alg, args...; kwargs...) + solve!(cache, cache.alg, args...; kwargs...) end -function SciMLBase.solve!(cache::DualLinearCache, alg::SciMLLinearSolveAlgorithm, args...; kwargs...) +function SciMLBase.solve!( + cache::DualLinearCache, alg::SciMLLinearSolveAlgorithm, args...; kwargs...) sol, partials = linearsolve_forwarddiff_solve( cache::DualLinearCache, cache.alg, args...; kwargs...) dual_sol = linearsolve_dual_solution(sol.u, partials, cache.dual_type) - + if cache.dual_u isa AbstractArray cache.dual_u[:] = dual_sol else @@ -192,7 +195,6 @@ function Base.setproperty!(dc::DualLinearCache, sym::Symbol, val) setproperty!(dc.linear_cache, sym, val) end - # Update the partials if setting A or b if sym === :A setfield!(dc, :dual_A, val) @@ -221,8 +223,6 @@ function Base.getproperty(dc::DualLinearCache, sym::Symbol) end end - - # Enhanced helper functions for Dual numbers to handle recursion get_dual_type(x::Dual{T, V, P}) where {T, V <: AbstractFloat, P} = typeof(x) get_dual_type(x::Dual{T, V, P}) where {T, V <: Dual, P} = typeof(x) @@ -241,7 +241,6 @@ nodual_value(x::Dual{T, V, P}) where {T, V <: AbstractFloat, P} = ForwardDiff.va nodual_value(x::Dual{T, V, P}) where {T, V <: Dual, P} = x.value # Keep the inner dual intact nodual_value(x::AbstractArray{<:Dual}) = map(nodual_value, x) - function partials_to_list(partial_matrix::AbstractVector{T}) where {T} p = eachindex(first(partial_matrix)) [[partial[i] for partial in partial_matrix] for i in p] @@ -263,6 +262,4 @@ function partials_to_list(partial_matrix) return res_list end - end - diff --git a/ext/LinearSolveRecursiveFactorizationExt.jl b/ext/LinearSolveRecursiveFactorizationExt.jl index a7892e6df..9394f5e08 100644 --- a/ext/LinearSolveRecursiveFactorizationExt.jl +++ b/ext/LinearSolveRecursiveFactorizationExt.jl @@ -1,6 +1,7 @@ module LinearSolveRecursiveFactorizationExt -using LinearSolve: LinearSolve, userecursivefactorization, LinearCache, @get_cacheval, RFLUFactorization +using LinearSolve: LinearSolve, userecursivefactorization, LinearCache, @get_cacheval, + RFLUFactorization using LinearSolve.LinearAlgebra, LinearSolve.ArrayInterface, RecursiveFactorization using SciMLBase: SciMLBase, ReturnCode diff --git a/ext/LinearSolveSparseArraysExt.jl b/ext/LinearSolveSparseArraysExt.jl index ae36933ae..57e135164 100644 --- a/ext/LinearSolveSparseArraysExt.jl +++ b/ext/LinearSolveSparseArraysExt.jl @@ -1,12 +1,15 @@ module LinearSolveSparseArraysExt using LinearSolve: LinearSolve, BLASELTYPES, pattern_changed, ArrayInterface, - @get_cacheval, CHOLMODFactorization, GenericFactorization, GenericLUFactorization, - KLUFactorization, LUFactorization, NormalCholeskyFactorization, OperatorAssumptions, - QRFactorization, RFLUFactorization, UMFPACKFactorization, solve + @get_cacheval, CHOLMODFactorization, GenericFactorization, + GenericLUFactorization, + KLUFactorization, LUFactorization, NormalCholeskyFactorization, + OperatorAssumptions, + QRFactorization, RFLUFactorization, UMFPACKFactorization, solve using ArrayInterface: ArrayInterface using LinearAlgebra: LinearAlgebra, I, Hermitian, Symmetric, cholesky, ldiv!, lu, lu!, QR -using SparseArrays: SparseArrays, AbstractSparseArray, AbstractSparseMatrixCSC, SparseMatrixCSC, +using SparseArrays: SparseArrays, AbstractSparseArray, AbstractSparseMatrixCSC, + SparseMatrixCSC, nonzeros, rowvals, getcolptr, sparse, sprand using SparseArrays.UMFPACK: UMFPACK_OK using Base: /, \, convert @@ -107,11 +110,12 @@ function LinearSolve.init_cacheval( alg::LUFactorization, A::AbstractSparseArray{T, Int64}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T<:BLASELTYPES} + verbose::Bool, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} if LinearSolve.is_cusparse(A) ArrayInterface.lu_instance(A) else - SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC{T, Int64}(zero(Int64), zero(Int64), [Int64(1)], Int64[], T[])) + SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC{T, Int64}( + zero(Int64), zero(Int64), [Int64(1)], Int64[], T[])) end end @@ -119,11 +123,12 @@ function LinearSolve.init_cacheval( alg::LUFactorization, A::AbstractSparseArray{T, Int32}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T<:BLASELTYPES} + verbose::Bool, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} if LinearSolve.is_cusparse(A) ArrayInterface.lu_instance(A) else - SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC{T, Int32}(zero(Int32), zero(Int32), [Int32(1)], Int32[], T[])) + SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC{T, Int32}( + zero(Int32), zero(Int32), [Int32(1)], Int32[], T[])) end end @@ -140,7 +145,6 @@ function LinearSolve.init_cacheval( maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) - PREALLOCATED_UMFPACK end @@ -156,16 +160,18 @@ function LinearSolve.init_cacheval( alg::UMFPACKFactorization, A::AbstractSparseArray{T, Int64}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T<:BLASELTYPES} - SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC{T, Int64}(zero(Int64), zero(Int64), [Int64(1)], Int64[], T[])) + verbose::Bool, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} + SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC{T, Int64}( + zero(Int64), zero(Int64), [Int64(1)], Int64[], T[])) end function LinearSolve.init_cacheval( alg::UMFPACKFactorization, A::AbstractSparseArray{T, Int32}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T<:BLASELTYPES} - SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC{T, Int32}(zero(Int32), zero(Int32), [Int32(1)], Int32[], T[])) + verbose::Bool, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} + SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC{T, Int32}( + zero(Int32), zero(Int32), [Int32(1)], Int32[], T[])) end function SciMLBase.solve!( @@ -197,7 +203,8 @@ function SciMLBase.solve!( F = LinearSolve.@get_cacheval(cache, :UMFPACKFactorization) if F.status == UMFPACK_OK y = ldiv!(cache.u, F, cache.b) - SciMLBase.build_linear_solution(alg, y, nothing, cache; retcode = ReturnCode.Success) + SciMLBase.build_linear_solution( + alg, y, nothing, cache; retcode = ReturnCode.Success) else SciMLBase.build_linear_solution( alg, cache.u, nothing, cache; retcode = ReturnCode.Infeasible) @@ -236,7 +243,8 @@ function LinearSolve.init_cacheval( maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) - KLU.KLUFactorization(SparseMatrixCSC{Float64, Int32}(0, 0, [Int32(1)], Int32[], Float64[])) + KLU.KLUFactorization(SparseMatrixCSC{Float64, Int32}( + 0, 0, [Int32(1)], Int32[], Float64[])) end # TODO: guard this against errors @@ -266,14 +274,15 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::KLUFactorization; F = LinearSolve.@get_cacheval(cache, :KLUFactorization) if F.common.status == KLU.KLU_OK y = ldiv!(cache.u, F, cache.b) - SciMLBase.build_linear_solution(alg, y, nothing, cache; retcode = ReturnCode.Success) + SciMLBase.build_linear_solution( + alg, y, nothing, cache; retcode = ReturnCode.Success) else SciMLBase.build_linear_solution( alg, cache.u, nothing, cache; retcode = ReturnCode.Infeasible) end end -const PREALLOCATED_CHOLMOD = cholesky(sparse(reshape([1.0],1,1))) +const PREALLOCATED_CHOLMOD = cholesky(sparse(reshape([1.0], 1, 1))) function LinearSolve.init_cacheval(alg::CHOLMODFactorization, A::Union{SparseMatrixCSC{T, Int}, Symmetric{T, SparseMatrixCSC{T, Int}}}, b, u, @@ -290,7 +299,7 @@ function LinearSolve.init_cacheval(alg::CHOLMODFactorization, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} - cholesky(sparse(reshape([one(T)],1,1))) + cholesky(sparse(reshape([one(T)], 1, 1))) end function LinearSolve.init_cacheval(alg::CHOLMODFactorization, @@ -368,7 +377,8 @@ function LinearSolve.init_cacheval( nothing end -function LinearSolve.init_cacheval(alg::QRFactorization, A::SparseMatrixCSC{Float64, <:Integer}, b, u, Pl, Pr, +function LinearSolve.init_cacheval( + alg::QRFactorization, A::SparseMatrixCSC{Float64, <:Integer}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(convert(AbstractMatrix, A), alg.pivot) diff --git a/ext/LinearSolveSparspakExt.jl b/ext/LinearSolveSparspakExt.jl index 617e3a54d..256ff617c 100644 --- a/ext/LinearSolveSparspakExt.jl +++ b/ext/LinearSolveSparspakExt.jl @@ -20,8 +20,7 @@ function LinearSolve.init_cacheval( ::SparspakFactorization, A::AbstractSparseMatrixCSC{Tv, Ti}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) where {Tv, Ti} - - if size(A,1) == size(A,2) + if size(A, 1) == size(A, 2) A = convert(AbstractMatrix, A) if A isa SparseArrays.AbstractSparseArray return sparspaklu( @@ -29,7 +28,8 @@ function LinearSolve.init_cacheval( nonzeros(A)), factorize = false) else - return sparspaklu(SparseMatrixCSC{Tv, Ti}(zero(Ti), zero(Ti), [one(Ti)], Ti[], eltype(A)[]), + return sparspaklu( + SparseMatrixCSC{Tv, Ti}(zero(Ti), zero(Ti), [one(Ti)], Ti[], eltype(A)[]), factorize = false) end else diff --git a/src/LinearSolve.jl b/src/LinearSolve.jl index 42bc13a7b..d7c20f0e1 100644 --- a/src/LinearSolve.jl +++ b/src/LinearSolve.jl @@ -7,17 +7,20 @@ end import PrecompileTools using ArrayInterface: ArrayInterface using Base: Bool, convert, copyto!, adjoint, transpose, /, \, require_one_based_indexing -using LinearAlgebra: LinearAlgebra, BlasInt, LU, Adjoint, BLAS, Bidiagonal, BunchKaufman, - ColumnNorm, Diagonal, Factorization, Hermitian, I, LAPACK, NoPivot, - RowMaximum, RowNonZero, SymTridiagonal, Symmetric, Transpose, - Tridiagonal, UniformScaling, axpby!, axpy!, bunchkaufman, bunchkaufman!, - cholesky, cholesky!, diagind, dot, inv, ldiv!, ldlt!, lu, lu!, mul!, norm, - qr, qr!, svd, svd! +using LinearAlgebra: LinearAlgebra, BlasInt, LU, Adjoint, BLAS, Bidiagonal, BunchKaufman, + ColumnNorm, Diagonal, Factorization, Hermitian, I, LAPACK, NoPivot, + RowMaximum, RowNonZero, SymTridiagonal, Symmetric, Transpose, + Tridiagonal, UniformScaling, axpby!, axpy!, bunchkaufman, + bunchkaufman!, + cholesky, cholesky!, diagind, dot, inv, ldiv!, ldlt!, lu, lu!, mul!, + norm, + qr, qr!, svd, svd! using LazyArrays: @~, BroadcastArray using SciMLBase: SciMLBase, LinearAliasSpecifier, AbstractSciMLOperator, init, solve!, reinit!, solve, ReturnCode, LinearProblem -using SciMLOperators: SciMLOperators, AbstractSciMLOperator, IdentityOperator, MatrixOperator, - has_ldiv!, issquare +using SciMLOperators: SciMLOperators, AbstractSciMLOperator, IdentityOperator, + MatrixOperator, + has_ldiv!, issquare using Setfield: @set, @set! using UnPack: @unpack using DocStringExtensions: DocStringExtensions @@ -135,14 +138,14 @@ is called. It's a polyalgorithm that detects the optimal method for a given ## Keyword Arguments -* `safetyfallback`: determines whether to fallback to a column-pivoted QR factorization - when an LU factorization fails. This can be required if `A` is rank-deficient. Defaults - to true. + - `safetyfallback`: determines whether to fallback to a column-pivoted QR factorization + when an LU factorization fails. This can be required if `A` is rank-deficient. Defaults + to true. """ struct DefaultLinearSolver <: SciMLLinearSolveAlgorithm alg::DefaultAlgorithmChoice.T safetyfallback::Bool - DefaultLinearSolver(alg; safetyfallback=true) = new(alg,safetyfallback) + DefaultLinearSolver(alg; safetyfallback = true) = new(alg, safetyfallback) end const BLASELTYPES = Union{Float32, Float64, ComplexF32, ComplexF64} diff --git a/src/adjoint.jl b/src/adjoint.jl index 9b522ec20..281a1ee69 100644 --- a/src/adjoint.jl +++ b/src/adjoint.jl @@ -28,7 +28,8 @@ specific structure distinct from ``A`` then passing in a `linsolve` will be more linsolve::L = missing end -function CRC.rrule(T::typeof(SciMLBase.solve), prob::LinearProblem, alg::Nothing, args...; kwargs...) +function CRC.rrule( + T::typeof(SciMLBase.solve), prob::LinearProblem, alg::Nothing, args...; kwargs...) assump = OperatorAssumptions(issquare(prob.A)) alg = defaultalg(prob.A, prob.b, assump) CRC.rrule(T, prob, alg, args...; kwargs...) diff --git a/src/appleaccelerate.jl b/src/appleaccelerate.jl index a867cc53f..a8772f336 100644 --- a/src/appleaccelerate.jl +++ b/src/appleaccelerate.jl @@ -263,5 +263,6 @@ function SciMLBase.solve!(cache::LinearCache, alg::AppleAccelerateLUFactorizatio aa_getrs!('N', A.factors, A.ipiv, cache.u; info) end - SciMLBase.build_linear_solution(alg, cache.u, nothing, cache; retcode = ReturnCode.Success) + SciMLBase.build_linear_solution( + alg, cache.u, nothing, cache; retcode = ReturnCode.Success) end diff --git a/src/common.jl b/src/common.jl index 90d00757f..f447df8e7 100644 --- a/src/common.jl +++ b/src/common.jl @@ -311,7 +311,8 @@ end function SciMLBase.solve(prob::StaticLinearProblem, alg::Nothing, args...; kwargs...) u = prob.A \ prob.b - return SciMLBase.build_linear_solution(alg, u, nothing, prob; retcode = ReturnCode.Success) + return SciMLBase.build_linear_solution( + alg, u, nothing, prob; retcode = ReturnCode.Success) end function SciMLBase.solve(prob::StaticLinearProblem, @@ -333,5 +334,6 @@ function SciMLBase.solve(prob::StaticLinearProblem, cache = init(prob, alg, args...; kwargs...) return solve!(cache) end - return SciMLBase.build_linear_solution(alg, u, nothing, prob; retcode = ReturnCode.Success) + return SciMLBase.build_linear_solution( + alg, u, nothing, prob; retcode = ReturnCode.Success) end diff --git a/src/default.jl b/src/default.jl index 07a12b09c..b3c494df0 100644 --- a/src/default.jl +++ b/src/default.jl @@ -42,7 +42,8 @@ end end # Handle special case of Column-pivoted QR fallback for LU -function __setfield!(cache::DefaultLinearSolverInit, alg::DefaultLinearSolver, v::LinearAlgebra.QRPivoted) +function __setfield!(cache::DefaultLinearSolverInit, + alg::DefaultLinearSolver, v::LinearAlgebra.QRPivoted) setfield!(cache, :QRFactorizationPivoted, v) end @@ -365,7 +366,8 @@ end sol = SciMLBase.solve!(cache, $(algchoice_to_alg(alg)), args...; kwargs...) if sol.retcode === ReturnCode.Failure && alg.safetyfallback ## TODO: Add verbosity logging here about using the fallback - sol = SciMLBase.solve!(cache, QRFactorization(ColumnNorm()), args...; kwargs...) + sol = SciMLBase.solve!( + cache, QRFactorization(ColumnNorm()), args...; kwargs...) SciMLBase.build_linear_solution(alg, sol.u, sol.resid, sol.cache; retcode = sol.retcode, iters = sol.iters, stats = sol.stats) @@ -384,7 +386,8 @@ end sol = SciMLBase.solve!(cache, $(algchoice_to_alg(alg)), args...; kwargs...) if sol.retcode === ReturnCode.Failure && alg.safetyfallback ## TODO: Add verbosity logging here about using the fallback - sol = SciMLBase.solve!(cache, QRFactorization(ColumnNorm()), args...; kwargs...) + sol = SciMLBase.solve!( + cache, QRFactorization(ColumnNorm()), args...; kwargs...) SciMLBase.build_linear_solution(alg, sol.u, sol.resid, sol.cache; retcode = sol.retcode, iters = sol.iters, stats = sol.stats) diff --git a/src/factorization.jl b/src/factorization.jl index d1a496be3..6016c1ba1 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -16,7 +16,8 @@ y = _ldiv!(cache.u, @get_cacheval(cache, $(Meta.quot(defaultalg_symbol(alg)))), cache.b) - return SciMLBase.build_linear_solution(alg, y, nothing, cache; retcode = ReturnCode.Success) + return SciMLBase.build_linear_solution( + alg, y, nothing, cache; retcode = ReturnCode.Success) end end @@ -140,7 +141,8 @@ function SciMLBase.solve!(cache::LinearCache, alg::LUFactorization; kwargs...) end cache.cacheval = fact - if hasmethod(LinearAlgebra.issuccess, Tuple{typeof(fact)}) && !LinearAlgebra.issuccess(fact) + if hasmethod(LinearAlgebra.issuccess, Tuple{typeof(fact)}) && + !LinearAlgebra.issuccess(fact) return SciMLBase.build_linear_solution( alg, cache.u, nothing, cache; retcode = ReturnCode.Failure) end @@ -202,7 +204,8 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::GenericLUFactoriz cache.isfresh = false end - y = ldiv!(cache.u, LinearSolve.@get_cacheval(cache, :GenericLUFactorization)[1], cache.b) + y = ldiv!( + cache.u, LinearSolve.@get_cacheval(cache, :GenericLUFactorization)[1], cache.b) SciMLBase.build_linear_solution(alg, y, nothing, cache; retcode = ReturnCode.Success) end @@ -954,6 +957,7 @@ faster than LU factorization, but is not as numerically stable and thus should o be applied to well-conditioned matrices. !!! warn + `NormalCholeskyFactorization` should only be applied to well-conditioned matrices. As a method it is not able to easily identify possible numerical issues. As a check it is recommended that the user checks `A*u-b` is approximately zero, as this may be untrue @@ -1017,7 +1021,8 @@ function SciMLBase.solve!(cache::LinearCache, alg::NormalCholeskyFactorization; end cache.cacheval = fact - if hasmethod(LinearAlgebra.issuccess, Tuple{typeof(fact)}) && !LinearAlgebra.issuccess(fact) + if hasmethod(LinearAlgebra.issuccess, Tuple{typeof(fact)}) && + !LinearAlgebra.issuccess(fact) return SciMLBase.build_linear_solution( alg, cache.u, nothing, cache; retcode = ReturnCode.Failure) end diff --git a/src/generic_lufact.jl b/src/generic_lufact.jl index 36478376e..40148ceff 100644 --- a/src/generic_lufact.jl +++ b/src/generic_lufact.jl @@ -1,122 +1,125 @@ # From LinearAlgebra.lu.jl # Modified to be non-allocating @static if VERSION < v"1.11" - function generic_lufact!(A::AbstractMatrix{T}, pivot::Union{RowMaximum,NoPivot,RowNonZero} = LinearAlgebra.lupivottype(T), - ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)); - check::Bool = true, allowsingular::Bool = false) where {T} + function generic_lufact!(A::AbstractMatrix{T}, + pivot::Union{RowMaximum, NoPivot, RowNonZero} = LinearAlgebra.lupivottype(T), + ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)); + check::Bool = true, allowsingular::Bool = false) where {T} check && LinearAlgebra.LAPACK.chkfinite(A) # Extract values m, n = size(A) - minmn = min(m,n) + minmn = min(m, n) # Initialize variables info = 0 - + @inbounds begin - for k = 1:minmn + for k in 1:minmn # find index max kp = k if pivot === LinearAlgebra.RowMaximum() && k < m amax = abs(A[k, k]) - for i = k+1:m - absi = abs(A[i,k]) + for i in (k + 1):m + absi = abs(A[i, k]) if absi > amax kp = i amax = absi end end elseif pivot === LinearAlgebra.RowNonZero() - for i = k:m - if !iszero(A[i,k]) + for i in k:m + if !iszero(A[i, k]) kp = i break end end end ipiv[k] = kp - if !iszero(A[kp,k]) + if !iszero(A[kp, k]) if k != kp # Interchange - for i = 1:n - tmp = A[k,i] - A[k,i] = A[kp,i] - A[kp,i] = tmp + for i in 1:n + tmp = A[k, i] + A[k, i] = A[kp, i] + A[kp, i] = tmp end end # Scale first column - Akkinv = inv(A[k,k]) - for i = k+1:m - A[i,k] *= Akkinv + Akkinv = inv(A[k, k]) + for i in (k + 1):m + A[i, k] *= Akkinv end elseif info == 0 info = k end # Update the rest - for j = k+1:n - for i = k+1:m - A[i,j] -= A[i,k]*A[k,j] + for j in (k + 1):n + for i in (k + 1):m + A[i, j] -= A[i, k]*A[k, j] end end end end check && LinearAlgebra.checknonsingular(info, pivot) - return LinearAlgebra.LU{T,typeof(A),typeof(ipiv)}(A, ipiv, convert(LinearAlgebra.BlasInt, info)) + return LinearAlgebra.LU{T, typeof(A), typeof(ipiv)}( + A, ipiv, convert(LinearAlgebra.BlasInt, info)) end elseif VERSION < v"1.13" - function generic_lufact!(A::AbstractMatrix{T}, pivot::Union{RowMaximum,NoPivot,RowNonZero} = LinearAlgebra.lupivottype(T), - ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)); - check::Bool = true, allowsingular::Bool = false) where {T} + function generic_lufact!(A::AbstractMatrix{T}, + pivot::Union{RowMaximum, NoPivot, RowNonZero} = LinearAlgebra.lupivottype(T), + ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)); + check::Bool = true, allowsingular::Bool = false) where {T} check && LAPACK.chkfinite(A) # Extract values m, n = size(A) - minmn = min(m,n) + minmn = min(m, n) # Initialize variables info = 0 - + @inbounds begin - for k = 1:minmn + for k in 1:minmn # find index max kp = k if pivot === LinearAlgebra.RowMaximum() && k < m amax = abs(A[k, k]) - for i = k+1:m - absi = abs(A[i,k]) + for i in (k + 1):m + absi = abs(A[i, k]) if absi > amax kp = i amax = absi end end elseif pivot === LinearAlgebra.RowNonZero() - for i = k:m - if !iszero(A[i,k]) + for i in k:m + if !iszero(A[i, k]) kp = i break end end end ipiv[k] = kp - if !iszero(A[kp,k]) + if !iszero(A[kp, k]) if k != kp # Interchange - for i = 1:n - tmp = A[k,i] - A[k,i] = A[kp,i] - A[kp,i] = tmp + for i in 1:n + tmp = A[k, i] + A[k, i] = A[kp, i] + A[kp, i] = tmp end end # Scale first column - Akkinv = inv(A[k,k]) - for i = k+1:m - A[i,k] *= Akkinv + Akkinv = inv(A[k, k]) + for i in (k + 1):m + A[i, k] *= Akkinv end elseif info == 0 info = k end # Update the rest - for j = k+1:n - for i = k+1:m - A[i,j] -= A[i,k]*A[k,j] + for j in (k + 1):n + for i in (k + 1):m + A[i, j] -= A[i, k]*A[k, j] end end end @@ -127,8 +130,9 @@ elseif VERSION < v"1.13" info = -info end check && LinearAlgebra._check_lu_success(info, allowsingular) - return LinearAlgebra.LU{T,typeof(A),typeof(ipiv)}(A, ipiv, convert(LinearAlgebra.BlasInt, info)) + return LinearAlgebra.LU{T, typeof(A), typeof(ipiv)}( + A, ipiv, convert(LinearAlgebra.BlasInt, info)) end -else - generic_lufact!(args...; kwargs...) = LinearAlgebra.generic_lufact!(args...; kwargs...) -end \ No newline at end of file +else + generic_lufact!(args...; kwargs...) = LinearAlgebra.generic_lufact!(args...; kwargs...) +end diff --git a/src/iterative_wrappers.jl b/src/iterative_wrappers.jl index 6eb45d55f..16d28d908 100644 --- a/src/iterative_wrappers.jl +++ b/src/iterative_wrappers.jl @@ -327,7 +327,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::KrylovJL; kwargs...) else cache.u = convert(typeof(cache.u), cacheval.x) end - + return SciMLBase.build_linear_solution(alg, cache.u, Ref(resid), cache; iters = stats.niter, retcode, stats) end diff --git a/src/simplegmres.jl b/src/simplegmres.jl index c2596efc2..a21826c9f 100644 --- a/src/simplegmres.jl +++ b/src/simplegmres.jl @@ -307,7 +307,8 @@ function SciMLBase.solve!(cache::SimpleGMRESCache{false}, lincache::LinearCache) # Compute and apply current Givens reflection Ωₖ. # [cₖ sₖ] [ r̄ₖ.ₖ ] = [rₖ.ₖ] # [s̄ₖ -cₖ] [hₖ₊₁.ₖ] [ 0 ] - (c[inner_iter], s[inner_iter], R[nr + inner_iter]) = _sym_givens( + (c[inner_iter], s[inner_iter], + R[nr + inner_iter]) = _sym_givens( R[nr + inner_iter], Hbis) diff --git a/test/adjoint.jl b/test/adjoint.jl index 8fd9e163c..d599162df 100644 --- a/test/adjoint.jl +++ b/test/adjoint.jl @@ -30,7 +30,8 @@ db12 = ForwardDiff.gradient(x -> f(eltype(x).(A), x), copy(b1)) A = rand(n, n); b1 = rand(n); -_ff = (x, y) -> f(x, +_ff = (x, + y) -> f(x, y; alg = LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.LUFactorization)) _ff(copy(A), copy(b1)) diff --git a/test/default_algs.jl b/test/default_algs.jl index cf0becfb8..7d912ae13 100644 --- a/test/default_algs.jl +++ b/test/default_algs.jl @@ -136,24 +136,20 @@ sol = solve!(cache) ## Non-square Sparse Defaults # https://github.com/SciML/NonlinearSolve.jl/issues/599 -A = SparseMatrixCSC{Float64, Int64}([ - 1.0 0.0 - 1.0 1.0 -]) +A = SparseMatrixCSC{Float64, Int64}([1.0 0.0 + 1.0 1.0]) b = ones(2) -A2 = hcat(A,A) +A2 = hcat(A, A) prob = LinearProblem(A, b) @test SciMLBase.successful_retcode(solve(prob)) prob2 = LinearProblem(A2, b) @test SciMLBase.successful_retcode(solve(prob2)) -A = SparseMatrixCSC{Float64, Int32}([ - 1.0 0.0 - 1.0 1.0 -]) +A = SparseMatrixCSC{Float64, Int32}([1.0 0.0 + 1.0 1.0]) b = ones(2) -A2 = hcat(A,A) +A2 = hcat(A, A) prob = LinearProblem(A, b) @test_broken SciMLBase.successful_retcode(solve(prob)) @@ -162,12 +158,14 @@ prob2 = LinearProblem(A2, b) # Column-Pivoted QR fallback on failed LU A = [1.0 0 0 0 - 0 1 0 0 - 0 0 1 0 - 0 0 0 0] + 0 1 0 0 + 0 0 1 0 + 0 0 0 0] b = rand(4) prob = LinearProblem(A, b) -sol = solve(prob, LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.LUFactorization; safetyfallback=false)) +sol = solve(prob, + LinearSolve.DefaultLinearSolver( + LinearSolve.DefaultAlgorithmChoice.LUFactorization; safetyfallback = false)) @test sol.retcode === ReturnCode.Failure @test sol.u == zeros(4) diff --git a/test/forwarddiff_overloads.jl b/test/forwarddiff_overloads.jl index a53a4cf0e..3d4a035f3 100644 --- a/test/forwarddiff_overloads.jl +++ b/test/forwarddiff_overloads.jl @@ -94,7 +94,8 @@ function h(p) b = [p[1] + 1, p[2] * 2, p[1]^2]) end -A, b = h([ForwardDiff.Dual(ForwardDiff.Dual(5.0, 1.0, 0.0), 1.0, 0.0), +A, +b = h([ForwardDiff.Dual(ForwardDiff.Dual(5.0, 1.0, 0.0), 1.0, 0.0), ForwardDiff.Dual(ForwardDiff.Dual(5.0, 1.0, 0.0), 0.0, 1.0)]) prob = LinearProblem(A, b) @@ -107,7 +108,8 @@ original_x_p = A \ b prob = LinearProblem(A, b) cache = init(prob) -new_A, new_b = h([ForwardDiff.Dual(ForwardDiff.Dual(10.0, 1.0, 0.0), 1.0, 0.0), +new_A, +new_b = h([ForwardDiff.Dual(ForwardDiff.Dual(10.0, 1.0, 0.0), 1.0, 0.0), ForwardDiff.Dual(ForwardDiff.Dual(10.0, 1.0, 0.0), 0.0, 1.0)]) cache.A = new_A @@ -190,4 +192,4 @@ prob = LinearProblem(sparse(A), sparse(b)) overload_x_p = solve(prob, UMFPACKFactorization()) backslash_x_p = A \ b -@test ≈(overload_x_p, backslash_x_p, rtol = 1e-9) \ No newline at end of file +@test ≈(overload_x_p, backslash_x_p, rtol = 1e-9) diff --git a/test/gpu/cuda.jl b/test/gpu/cuda.jl index 2b2dde30a..8e0b5a3ab 100644 --- a/test/gpu/cuda.jl +++ b/test/gpu/cuda.jl @@ -105,4 +105,4 @@ end prob = LinearProblem(A_gpu_csr, b_gpu) sol = solve(prob) -end \ No newline at end of file +end diff --git a/test/hypretests.jl b/test/hypretests.jl index da7e7e3a5..0d04ebd94 100644 --- a/test/hypretests.jl +++ b/test/hypretests.jl @@ -87,7 +87,7 @@ function test_interface(alg; kw...) # Solve prob directly (without cache) y = solve(prob, alg; cache_kwargs..., Pl = HYPRE.BoomerAMG) - @test A * to_array(y.u)≈b atol=atol rtol=rtol + @test A*to_array(y.u)≈b atol=atol rtol=rtol @test y.iters > 0 @test y.resid < rtol @@ -99,7 +99,7 @@ function test_interface(alg; kw...) cache = y.cache @test cache.isfresh == cache.cacheval.isfresh_A == cache.cacheval.isfresh_b == cache.cacheval.isfresh_u == false - @test A * to_array(y.u)≈b atol=atol rtol=rtol + @test A*to_array(y.u)≈b atol=atol rtol=rtol # Update A cache.A = A @@ -109,7 +109,7 @@ function test_interface(alg; kw...) cache = y.cache @test cache.isfresh == cache.cacheval.isfresh_A == cache.cacheval.isfresh_b == cache.cacheval.isfresh_u == false - @test A * to_array(y.u)≈b atol=atol rtol=rtol + @test A*to_array(y.u)≈b atol=atol rtol=rtol # Update b b2 = 2 * to_array(b) @@ -123,7 +123,7 @@ function test_interface(alg; kw...) cache = y.cache @test cache.isfresh == cache.cacheval.isfresh_A == cache.cacheval.isfresh_b == cache.cacheval.isfresh_u == false - @test A * to_array(y.u)≈to_array(b2) atol=atol rtol=rtol + @test A*to_array(y.u)≈to_array(b2) atol=atol rtol=rtol end return end diff --git a/test/nopre/enzyme.jl b/test/nopre/enzyme.jl index b64b39f0f..06703a117 100644 --- a/test/nopre/enzyme.jl +++ b/test/nopre/enzyme.jl @@ -32,13 +32,15 @@ dA = zeros(n, n); b1 = rand(n); db1 = zeros(n); -_ff = (x, y) -> f(x, +_ff = (x, + y) -> f(x, y; alg = LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.LUFactorization)) _ff(copy(A), copy(b1)) Enzyme.autodiff(Reverse, - (x, y) -> f(x, + (x, + y) -> f(x, y; alg = LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.LUFactorization)), Duplicated(copy(A), dA), @@ -169,7 +171,8 @@ end dA = zeros(n, n); db1 = zeros(n); db2 = zeros(n); -Enzyme.autodiff(set_runtime_activity(Reverse), f3, Duplicated(copy(A), dA), Duplicated(copy(b1), db1), Duplicated(copy(b2), db2)) +Enzyme.autodiff(set_runtime_activity(Reverse), f3, Duplicated(copy(A), dA), + Duplicated(copy(b1), db1), Duplicated(copy(b2), db2)) @test dA ≈ dA2 atol=5e-5 @test db1 ≈ db12 @@ -194,7 +197,8 @@ b2 = rand(n); db2 = zeros(n); f4(A, b1, b2) -@test_throws "Adjoint case currently not handled" Enzyme.autodiff(Reverse, f4, Duplicated(copy(A), dA), +@test_throws "Adjoint case currently not handled" Enzyme.autodiff( + Reverse, f4, Duplicated(copy(A), dA), Duplicated(copy(b1), db1), Duplicated(copy(b2), db2)) #= @@ -250,15 +254,16 @@ end # https://github.com/SciML/LinearSolve.jl/issues/479 function testls(A, b, u) - oa = OperatorAssumptions(true, condition = LinearSolve.OperatorCondition.WellConditioned) + oa = OperatorAssumptions( + true, condition = LinearSolve.OperatorCondition.WellConditioned) prob = LinearProblem(A, b) linsolve = init(prob, LUFactorization(), assumptions = oa) - cache =solve!(linsolve) + cache = solve!(linsolve) sum(cache.u) end -A = [1. 2.; 3. 4.] -b = [1., 2.] +A = [1.0 2.0; 3.0 4.0] +b = [1.0, 2.0] u = zero(b) dA = deepcopy(A) db = deepcopy(b) @@ -266,14 +271,15 @@ du = deepcopy(u) Enzyme.autodiff(Reverse, testls, Duplicated(A, dA), Duplicated(b, db), Duplicated(u, du)) function testls(A, b, u) - oa = OperatorAssumptions(true, condition = LinearSolve.OperatorCondition.WellConditioned) + oa = OperatorAssumptions( + true, condition = LinearSolve.OperatorCondition.WellConditioned) prob = LinearProblem(A, b) linsolve = init(prob, LUFactorization(), assumptions = oa) solve!(linsolve) sum(linsolve.u) end -A = [1. 2.; 3. 4.] -b = [1., 2.] +A = [1.0 2.0; 3.0 4.0] +b = [1.0, 2.0] u = zero(b) dA2 = deepcopy(A) db2 = deepcopy(b) @@ -282,4 +288,4 @@ Enzyme.autodiff(Reverse, testls, Duplicated(A, dA2), Duplicated(b, db2), Duplica @test dA == dA2 @test db == db2 -@test du == du2 \ No newline at end of file +@test du == du2 diff --git a/test/nopre/jet.jl b/test/nopre/jet.jl index 5ed384c7c..bc9aab11f 100644 --- a/test/nopre/jet.jl +++ b/test/nopre/jet.jl @@ -16,4 +16,4 @@ prob = LinearProblem(sparse(A), b) #JET.@test_opt solve(prob, UMFPACKFactorization()) #JET.@test_opt solve(prob, KLUFactorization()) #JET.@test_opt solve(prob, SparspakFactorization()) -#JET.@test_opt solve(prob) \ No newline at end of file +#JET.@test_opt solve(prob) diff --git a/test/qa.jl b/test/qa.jl index f69b08f96..92c676669 100644 --- a/test/qa.jl +++ b/test/qa.jl @@ -24,9 +24,10 @@ end if klu_mod !== nothing unanalyzable_mods = (unanalyzable_mods..., klu_mod) end - - @test check_no_implicit_imports(LinearSolve; skip = (Base, Core), + + @test check_no_implicit_imports(LinearSolve; skip = (Base, Core), allow_unanalyzable = unanalyzable_mods) === nothing - @test check_no_stale_explicit_imports(LinearSolve; allow_unanalyzable = unanalyzable_mods) === nothing + @test check_no_stale_explicit_imports( + LinearSolve; allow_unanalyzable = unanalyzable_mods) === nothing @test check_all_qualified_accesses_via_owners(LinearSolve) === nothing end diff --git a/test/retcodes.jl b/test/retcodes.jl index 4095e7d16..21bfcd923 100644 --- a/test/retcodes.jl +++ b/test/retcodes.jl @@ -27,9 +27,10 @@ lualgs = ( LUFactorization(), QRFactorization(), GenericLUFactorization(), - LinearSolve.DefaultLinearSolver(LinearSolve.DefaultAlgorithmChoice.LUFactorization; safetyfallback=false), + LinearSolve.DefaultLinearSolver( + LinearSolve.DefaultAlgorithmChoice.LUFactorization; safetyfallback = false), RFLUFactorization(), - NormalCholeskyFactorization(), + NormalCholeskyFactorization() ) @testset "Failure" begin for alg in lualgs @@ -40,8 +41,8 @@ lualgs = ( linsolve = init(prob, alg) sol = solve!(linsolve) if alg isa NormalCholeskyFactorization - # This is a known and documented incorrectness in NormalCholeskyFactorization - # due to numerical instability in its method that is fundamental. + # This is a known and documented incorrectness in NormalCholeskyFactorization + # due to numerical instability in its method that is fundamental. @test SciMLBase.successful_retcode(sol.retcode) else @test !SciMLBase.successful_retcode(sol.retcode) diff --git a/test/sparse_vector.jl b/test/sparse_vector.jl index a206da4aa..7c860f0e7 100644 --- a/test/sparse_vector.jl +++ b/test/sparse_vector.jl @@ -52,4 +52,4 @@ A = sprand(ComplexF64, 10, 10, 0.5) b = rand(ComplexF64, 10) cache = init(LinearProblem(A, b, UMFPACKFactorization())) -sol = solve!(cache) \ No newline at end of file +sol = solve!(cache)