diff --git a/docs/src/index.md b/docs/src/index.md index 2d4d813..f9b0402 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -46,4 +46,8 @@ Please cite both [SuiteSparse](https://github.com/DrTimothyAldenDavis/SuiteSpars ```@docs klu +klu! +klu_refactor! +nonzeros +solve! ``` diff --git a/src/KLU.jl b/src/KLU.jl index 9aad615..455d61b 100644 --- a/src/KLU.jl +++ b/src/KLU.jl @@ -2,12 +2,15 @@ module KLU using SparseArrays using SparseArrays: SparseMatrixCSC -import SparseArrays: nnz +import SparseArrays: nnz, nonzeros export klu, klu! +export klu_factor!, klu_refactor!, solve! +export nonzeros const libklu = :libklu include("wrappers.jl") +include("type_resolution.jl") import Base: (\), size, getproperty, setproperty!, propertynames, show @@ -65,10 +68,10 @@ Data structure for parameters of and statistics generated by KLU functions. # Fields - `tol::Float64`: Partial pivoting tolerance for diagonal preference - `btf::Int64`: If `btf != 0` use BTF pre-ordering -- `ordering::Int64`: If `ordering == 0` use AMD to permute, if `ordering == 1` use COLAMD, +- `ordering::Int64`: If `ordering == 0` use AMD to permute, if `ordering == 1` use COLAMD, \ if `ordering == 3` use the user provided ordering function. -- `scale::Int64`: If `scale == 1` then `A[:,i] ./= sum(abs.(A[:,i]))`, if `scale == 2` then -`A[:,i] ./= maximum(abs.(A[:,i]))`. If `scale == 0` no scaling is done, and the input is +- `scale::Int64`: If `scale == 1` then `A[:,i] ./= sum(abs.(A[:,i]))`, if `scale == 2` then \ +`A[:,i] ./= maximum(abs.(A[:,i]))`. If `scale == 0` no scaling is done, and the input is \ checked for errors if `scale >= 0`. See the [KLU User Guide](https://github.com/DrTimothyAldenDavis/SuiteSparse/raw/master/KLU/Doc/KLU_UserGuide.pdf) @@ -87,20 +90,25 @@ macro isok(A) :(kluerror($(esc(A)))) end +# this ought to be handled via multiple dispatch, so it can be done at compile time. function _klu_name(name, Tv, Ti) outname = "klu_" * (Tv === :Float64 ? "" : "z") * (Ti === :Int64 ? "l_" : "_") * name return Symbol(replace(outname, "__"=>"_")) end -function _common(T) - if T == Int64 - common = klu_l_common() - ok = klu_l_defaults(Ref(common)) - elseif T == Int32 - common = klu_common() - ok = klu_defaults(Ref(common)) + +function _common(::Type{Int64}) + common = klu_l_common() + ok = klu_l_defaults(Ref(common)) + if ok == 1 + return common else - throw(ArgumentError("Index type must be Int64 or Int32")) + throw(ErrorException("Could not initialize common struct.")) end +end + +function _common(::Type{Int32}) + common = klu_common() + ok = klu_defaults(Ref(common)) if ok == 1 return common else @@ -144,24 +152,14 @@ end function _free_symbolic(K::AbstractKLUFactorization{Tv, Ti}) where {Ti<:KLUITypes, Tv} K._symbolic == C_NULL && return C_NULL - if Ti == Int64 - klu_l_free_symbolic(Ref(Ptr{klu_l_symbolic}(K._symbolic)), Ref(K.common)) - elseif Ti == Int32 - klu_free_symbolic(Ref(Ptr{klu_symbolic}(K._symbolic)), Ref(K.common)) - end + __free_sym(Ti, Ref(Ptr{__symType(Ti)}(K._symbolic)), Ref(K.common)) K._symbolic = C_NULL end -for Ti ∈ KLUIndexTypes, Tv ∈ KLUValueTypes - klufree = _klu_name("free_numeric", Tv, Ti) - ptr = _klu_name("numeric", :Float64, Ti) - @eval begin - function _free_numeric(K::AbstractKLUFactorization{$Tv, $Ti}) - K._numeric == C_NULL && return C_NULL - $klufree(Ref(Ptr{$ptr}(K._numeric)), Ref(K.common)) - K._numeric = C_NULL - end - end +function _free_numeric(K::AbstractKLUFactorization{Tv, Ti}) where {Tv<:KLUTypes, Ti<:KLUITypes} + K._numeric == C_NULL && return C_NULL + __free_num(Tv, Ti, Ref(Ptr{__numType(Ti)}(K._numeric)), Ref(K.common)) + K._numeric = C_NULL end function KLUFactorization(A::SparseMatrixCSC{Tv, Ti}) where {Tv<:KLUTypes, Ti<:KLUITypes} @@ -185,6 +183,15 @@ function size(K::AbstractKLUFactorization, dim::Integer) end nnz(K::AbstractKLUFactorization) = K.lnz + K.unz + K.nzoff +"""The nonzeros of the factorization `K`. +!!! warning "nonzeros(K) may or may not point to nonzeros(A)" + After `K = klu(A)`, it may or may not be the case that `nonzeros(K) === nonzeros(A)`. \ + If `A` is of type `SparseMatrixCSC{Float32, Int64}`, then `nonzeros(K)` will be \ + a `Vector{Float64}`, while `nonzeros(A)` is a `Vector{Float32}`, so they're definitely distinct. \ + On the other hand, if `A` has value type `Float64`, then changing the values in `nonzeros(A)` \ + will change `nonzeros(K)` as well. +""" +nonzeros(K::AbstractKLUFactorization) = K.nzval if !isdefined(LinearAlgebra, :AdjointFactorization) # VERSION < v"1.10-" Base.adjoint(K::AbstractKLUFactorization) = Adjoint(K) @@ -275,6 +282,7 @@ function getproperty(klu::AbstractKLUFactorization{Tv, Ti}, s::Symbol) where {Tv Lp = Vector{Ti}(undef, klu.n + 1) Li = Vector{Ti}(undef, lnz) Lx = Vector{Float64}(undef, lnz) + # could also be replaced with multiple dispatch. Lz = Tv == Float64 ? C_NULL : Vector{Float64}(undef, lnz) _extract!(klu; Lp, Li, Lx, Lz) return Lp, Li, Lx, Lz @@ -375,13 +383,14 @@ function show(io::IO, mime::MIME{Symbol("text/plain")}, K::AbstractKLUFactorizat end end +""" + klu_analyze!(K::KLUFactorization; check = true) + +Compute and store (as `K._symbolic`) a symbolic factorization of the sparse matrix \ +represented by the fields of `K`.""" function klu_analyze!(K::KLUFactorization{Tv, Ti}; check=true) where {Tv, Ti<:KLUITypes} if K._symbolic != C_NULL return K end - if Ti == Int64 - sym = klu_l_analyze(K.n, K.colptr, K.rowval, Ref(K.common)) - else - sym = klu_analyze(K.n, K.colptr, K.rowval, Ref(K.common)) - end + sym = __analyze(Ti, K.n, K.colptr, K.rowval, Ref(K.common)) if sym == C_NULL && check kluerror(K.common) else @@ -390,14 +399,13 @@ function klu_analyze!(K::KLUFactorization{Tv, Ti}; check=true) where {Tv, Ti<:KL return K end -# User provided permutation vectors: +""" + klu_analyze!(K::KLUFactorization{Tv, Ti}, P::Vector{Ti}, Q::Vector{Ti}; check = true) + +Variant of `klu_analyze!` that allows for user-provided permutation permutation vectors `P` and `Q`.""" function klu_analyze!(K::KLUFactorization{Tv, Ti}, P::Vector{Ti}, Q::Vector{Ti}; check=true) where {Tv, Ti<:KLUITypes} if K._symbolic != C_NULL return K end - if Ti == Int64 - sym = klu_l_analyze_given(K.n, K.colptr, K.rowval, P, Q, Ref(K.common)) - else - sym = klu_analyze_given(K.n, K.colptr, K.rowval, P, Q, Ref(K.common)) - end + sym = __analyze!(K.n, K.colptr, K.rowval, P, Q, Ref(K.common)) if sym == C_NULL && check kluerror(K.common) else @@ -406,85 +414,74 @@ function klu_analyze!(K::KLUFactorization{Tv, Ti}, P::Vector{Ti}, Q::Vector{Ti}; return K end -for Tv ∈ KLUValueTypes, Ti ∈ KLUIndexTypes - factor = _klu_name("factor", Tv, Ti) - @eval begin - function klu_factor!(K::KLUFactorization{$Tv, $Ti}; check=true, allowsingular=false) - K._symbolic == C_NULL && K.common.status >= KLU_OK && klu_analyze!(K) - if K._symbolic != C_NULL && K.common.status >= KLU_OK - K.common.halt_if_singular = !allowsingular && check - num = $factor(K.colptr, K.rowval, K.nzval, K._symbolic, Ref(K.common)) - K.common.halt_if_singular = true - else - num = C_NULL - end - if num == C_NULL && check - kluerror(K.common) - else - if allowsingular - K.common.status < KLU_OK && check && kluerror(K.common) - else - (K.common.status == KLU_OK) || (check && kluerror(K.common)) - end - end - K._numeric = num - return K + +function klu_factor!(K::KLUFactorization{Tv, Ti}; check=true, allowsingular=false) where {Tv<:KLUTypes, Ti<:KLUITypes} + K._symbolic == C_NULL && K.common.status >= KLU_OK && klu_analyze!(K) + if K._symbolic != C_NULL && K.common.status >= KLU_OK + K.common.halt_if_singular = !allowsingular && check + num = __factor(Tv, Ti, K.colptr, K.rowval, K.nzval, K._symbolic, Ref(K.common)) + K.common.halt_if_singular = true + else + num = C_NULL + end + if num == C_NULL && check + kluerror(K.common) + else + if allowsingular + K.common.status < KLU_OK && check && kluerror(K.common) + else + (K.common.status == KLU_OK) || (check && kluerror(K.common)) end end + K._numeric = num + return K end -for Tv ∈ KLUValueTypes, Ti ∈ KLUIndexTypes - rgrowth = _klu_name("rgrowth", Tv, Ti) - rcond = _klu_name("rcond", Tv, Ti) - condest = _klu_name("condest", Tv, Ti) - @eval begin - """ - rgrowth(K::KLUFactorization) - - Calculate the reciprocal pivot growth. - """ - function rgrowth(K::KLUFactorization{$Tv, $Ti}) - K._numeric == C_NULL && klu_factor!(K) - ok = $rgrowth(K.colptr, K.rowval, K.nzval, K._symbolic, K._numeric, Ref(K.common)) - if ok == 0 - kluerror(K.common) - else - return K.common.rgrowth - end - end - """ - rcond(K::KLUFactorization) +""" + rgrowth(K::KLUFactorization) - Cheaply estimate the reciprocal condition number. - """ - function rcond(K::AbstractKLUFactorization{$Tv, $Ti}) - K._numeric == C_NULL && klu_factor!(K) - ok = $rcond(K._symbolic, K._numeric, Ref(K.common)) - if ok == 0 - kluerror(K.common) - else - return K.common.rcond - end - end +Calculate the reciprocal pivot growth. +""" +function rgrowth(K::KLUFactorization{Tv, Ti}) where {Tv<:KLUTypes, Ti<:KLUITypes} + K._numeric == C_NULL && klu_factor!(K) + ok = __rgrowth(Tv, Ti, K.colptr, K.rowval, K.nzval, K._symbolic, K._numeric, Ref(K.common)) + if ok == 0 + kluerror(K.common) + else + return K.common.rgrowth + end +end - """ - condest(K::KLUFactorization) +""" + rcond(K::KLUFactorization) - Accurately estimate the 1-norm condition number of the factorization. - """ - function condest(K::KLUFactorization{$Tv, $Ti}) - K._numeric == C_NULL && klu_factor!(K) - ok = $condest(K.colptr, K.nzval, K._symbolic, K._numeric, Ref(K.common)) - if ok == 0 - kluerror(K.common) - else - return K.common.condest - end - end +Cheaply estimate the reciprocal condition number. +""" +function rcond(K::AbstractKLUFactorization{Tv, Ti}) where {Tv<:KLUTypes, Ti<:KLUITypes} + K._numeric == C_NULL && klu_factor!(K) + ok = __rcond(Tv, Ti, K._symbolic, K._numeric, Ref(K.common)) + if ok == 0 + kluerror(K.common) + else + return K.common.rcond end end +""" + condest(K::KLUFactorization) + +Accurately estimate the 1-norm condition number of the factorization. +""" +function condest(K::KLUFactorization{Tv, Ti}) where {Tv<:KLUTypes, Ti<:KLUITypes} + K._numeric == C_NULL && klu_factor!(K) + ok = __condest(Tv, Ti, K.colptr, K.nzval, K._symbolic, K._numeric, Ref(K.common)) + if ok == 0 + kluerror(K.common) + else + return K.common.condest + end +end """ klu_factor!(K::KLUFactorization; check=true, allowsingular=false) -> K::KLUFactorization @@ -496,11 +493,11 @@ existing `KLUFactorization` instance. # Arguments - `K::KLUFactorization`: The matrix factorization object to be factored. - `check::Bool`: If `true` (default) check for errors after the factorization. If `false` errors must be checked by the user with `klu.common.status`. - - `allowsingular::Bool`: If `true` (default `false`) allow the factorization to proceed even if the matrix is singular. Note that this will allow for - silent divide by zero errors in subsequent `solve!` or `ldiv!` calls if singularity is not checked by the user with `klu.common.status == KLU.KLU_SINGULAR` + - `allowsingular::Bool`: If `true` (default `false`) allow the factorization to proceed even if the matrix is singular. Note that this will allow for \ +silent divide by zero errors in subsequent `solve!` or `ldiv!` calls if singularity is not checked by the user with `klu.common.status == KLU.KLU_SINGULAR` -The `K.common` struct can be used to modify certain options and parameters, see the -[KLU documentation](https://github.com/DrTimothyAldenDavis/SuiteSparse/raw/master/KLU/Doc/KLU_UserGuide.pdf) +The `K.common` struct can be used to modify certain options and parameters, see the \ +[KLU documentation](https://github.com/DrTimothyAldenDavis/SuiteSparse/raw/master/KLU/Doc/KLU_UserGuide.pdf) \ or [`klu_common`](@ref) for more information. """ klu_factor! @@ -538,8 +535,10 @@ The relation between `K` and `A` is # Arguments - `A::SparseMatrixCSC` or `n::Integer`, `colptr::Vector{Ti}`, `rowval::Vector{Ti}`, `nzval::Vector{Tv}`: The sparse matrix or the zero-based sparse matrix components to be factored. - `check::Bool`: If `true` (default) check for errors after the factorization. If `false` errors must be checked by the user with `klu.common.status`. - - `allowsingular::Bool`: If `true` (default `false`) allow the factorization to proceed even if the matrix is singular. Note that this will allow for - silent divide by zero errors in subsequent `solve!` or `ldiv!` calls if singularity is not checked by the user with `klu.common.status == KLU.KLU_SINGULAR` + - `allowsingular::Bool`: If `true` (default `false`) allow the factorization to proceed even if the matrix is singular.\ + Note that this will allow for silent divide by zero errors in subsequent `solve!` or `ldiv!` calls if singularity is not checked by the user with `klu.common.status == KLU.KLU_SINGULAR` + - `full_factor::Bool`: if `true` (default), perform both numeric and symbolic factorization. If `false`, only perform symbolic factorization. \ + Useful for cases where only the sparse structure of `A` is known at time of construction. !!! note `klu(A::SparseMatrixCSC)` uses the KLU[^ACM907] library that is part of @@ -549,31 +548,56 @@ The relation between `K` and `A` is [^ACM907]: Davis, Timothy A., & Palamadai Natarajan, E. (2010). Algorithm 907: KLU, A Direct Sparse Solver for Circuit Simulation Problems. ACM Trans. Math. Softw., 37(3). doi:10.1145/1824801.1824814 """ -function klu(n, colptr::Vector{Ti}, rowval::Vector{Ti}, nzval::Vector{Tv}; check=true, allowsingular=false) where {Ti<:KLUITypes, Tv<:AbstractFloat} +function klu(n, + colptr::Vector{Ti}, + rowval::Vector{Ti}, + nzval::Vector{Tv}; + check=true, + allowsingular=false, + full_factor=true, +) where {Ti<:KLUITypes, Tv<:AbstractFloat} if Tv != Float64 nzval = convert(Vector{Float64}, nzval) end K = KLUFactorization(n, colptr, rowval, nzval) - return klu_factor!(K; check, allowsingular) + return full_factor ? klu_factor!(K; check, allowsingular) : klu_analyze!(K; check) end -function klu(n, colptr::Vector{Ti}, rowval::Vector{Ti}, nzval::Vector{Tv}; check=true, allowsingular=false) where {Ti<:KLUITypes, Tv<:Complex} +function klu(n, + colptr::Vector{Ti}, + rowval::Vector{Ti}, + nzval::Vector{Tv}; + check=true, + allowsingular=false, + full_factor=true, +) where {Ti<:KLUITypes, Tv<:Complex} if Tv != ComplexF64 nzval = convert(Vector{ComplexF64}, nzval) end K = KLUFactorization(n, colptr, rowval, nzval) - return klu_factor!(K; check, allowsingular) + return full_factor ? klu_factor!(K; check, allowsingular) : klu_analyze!(K; check) end -function klu(A::SparseMatrixCSC{Tv, Ti}; check=true, allowsingular=false) where {Tv<:Union{AbstractFloat, Complex}, Ti<:KLUITypes} +function klu(A::SparseMatrixCSC{Tv, Ti}; + check=true, + allowsingular=false, + full_factor=true, +) where {Tv<:Union{AbstractFloat, Complex}, Ti<:KLUITypes} n = size(A, 1) n == size(A, 2) || throw(DimensionMismatch()) - return klu(n, decrement(A.colptr), decrement(A.rowval), A.nzval; check, allowsingular) + return klu(n, + decrement(A.colptr), + decrement(A.rowval), + A.nzval; + check, + allowsingular, + full_factor + ) end """ klu!(K::KLUFactorization, A::SparseMatrixCSC; check=true, allowsingular=false) -> K::KLUFactorization - klu(K::KLUFactorization, nzval::Vector{Tv}; check=true, allowsingular=false) -> K::KLUFactorization + klu!(K::KLUFactorization, nzval::Vector{Tv}; check=true, allowsingular=false) -> K::KLUFactorization Recompute the KLU factorization `K` using the values of `A` or `nzval`. The pattern of the original matrix used to create `K` must match the pattern of `A`. @@ -589,7 +613,7 @@ See also: [`klu`](@ref) - `A::SparseMatrixCSC` or `n::Integer`, `colptr::Vector{Ti}`, `rowval::Vector{Ti}`, `nzval::Vector{Tv}`: The sparse matrix or the zero-based sparse matrix components to be factored. - `check::Bool`: If `true` (default) check for errors after the factorization. If `false` errors must be checked by the user with `klu.common.status`. - `allowsingular::Bool`: If `true` (default `false`) allow the factorization to proceed even if the matrix is singular. Note that this will allow for - silent divide by zero errors in subsequent `solve!` or `ldiv!` calls if singularity is not checked by the user with `klu.common.status == KLU.KLU_SINGULAR` + silent divide by zero errors in subsequent `solve!` or `ldiv!` calls if singularity is not checked by the user with `klu.common.status == KLU.KLU_SINGULAR` !!! note `klu(A::SparseMatrixCSC)` uses the KLU[^ACM907] library that is part of @@ -599,23 +623,20 @@ See also: [`klu`](@ref) [^ACM907]: Davis, Timothy A., & Palamadai Natarajan, E. (2010). Algorithm 907: KLU, A Direct Sparse Solver for Circuit Simulation Problems. ACM Trans. Math. Softw., 37(3). doi:10.1145/1824801.1824814 """ -klu! - -for Tv ∈ KLUValueTypes, Ti ∈ KLUIndexTypes - refactor = _klu_name("refactor", Tv, Ti) - @eval begin - function klu!(K::KLUFactorization{$Tv, $Ti}, nzval::Vector{$Tv}; check=true, allowsingular=false) - length(nzval) != length(K.nzval) && throw(DimensionMismatch()) - K.nzval = nzval - K.common.halt_if_singular = !allowsingular && check - ok = $refactor(K.colptr, K.rowval, K.nzval, K._symbolic, K._numeric, Ref(K.common)) - K.common.halt_if_singular = true - if (ok == 1 || !check || (allowsingular && K.common.status >= KLU_OK)) - return K - else - kluerror(K.common) - end - end +function klu!(K::KLUFactorization{Tv, Ti}, + nzval::Vector{Tv}; + check=true, + allowsingular=false, +) where {Tv<:KLUTypes, Ti<:KLUITypes} + length(nzval) != length(K.nzval) && throw(DimensionMismatch()) + K.nzval = nzval + K.common.halt_if_singular = !allowsingular && check + ok = __refactor(Tv, Ti, K.colptr, K.rowval, K.nzval, K._symbolic, K._numeric, Ref(K.common)) + K.common.halt_if_singular = true + if (ok == 1 || !check || (allowsingular && K.common.status >= KLU_OK)) + return K + else + kluerror(K.common) end end @@ -641,6 +662,12 @@ function klu!(K::KLUFactorization{U}, S::SparseMatrixCSC{U}; check=true, allowsi return klu!(K, S.nzval; check, allowsingular) end +""" + klu_refactor!(...) -> K::KLUFactorization + +Same as [`klu!`](@ref): alias for naming consistency reasons.""" +klu_refactor!(args...) = klu!(args...) + #B is the modified argument here. To match with the math it should be (klu, B). But convention is # modified first. Thoughts? @@ -663,19 +690,13 @@ This function overwrites `B` with the solution `X`, for a new solution vector `X This status should be checked by the user before solve calls if singularity checks were disabled on factorization using `check=false` or `allowsingular=true`. """ -solve! -for Tv ∈ KLUValueTypes, Ti ∈ KLUIndexTypes - solve = _klu_name("solve", Tv, Ti) - @eval begin - function solve!(klu::AbstractKLUFactorization{$Tv, $Ti}, B::StridedVecOrMat{$Tv}; check=true) - stride(B, 1) == 1 || throw(ArgumentError("B must have unit strides")) - klu._numeric == C_NULL && klu_factor!(klu) - size(B, 1) == size(klu, 1) || throw(DimensionMismatch()) - isok = $solve(klu._symbolic, klu._numeric, size(B, 1), size(B, 2), B, Ref(klu.common)) - isok == 0 && check && kluerror(klu.common) - return B - end - end +function solve!(klu::AbstractKLUFactorization{Tv, Ti}, B::StridedVecOrMat{Tv}; check=true) where {Tv<:KLUTypes, Ti<:KLUITypes} + stride(B, 1) == 1 || throw(ArgumentError("B must have unit strides")) + klu._numeric == C_NULL && klu_factor!(klu) + size(B, 1) == size(klu, 1) || throw(DimensionMismatch()) + isok = __solve(Tv, Ti, klu._symbolic, klu._numeric, size(B, 1), size(B, 2), B, Ref(klu.common)) + isok == 0 && check && kluerror(klu.common) + return B end for Tv ∈ KLUValueTypes, Ti ∈ KLUIndexTypes diff --git a/src/type_resolution.jl b/src/type_resolution.jl new file mode 100644 index 0000000..981144a --- /dev/null +++ b/src/type_resolution.jl @@ -0,0 +1,53 @@ +__free_sym(::Type{Int32}, args...) = KLU.klu_free_symbolic(args...) +__free_sym(::Type{Int64}, args...) = KLU.klu_l_free_symbolic(args...) + +__free_num(::Type{Float64}, ::Type{Int32}, args...) = KLU.klu_free_numeric(args...) +__free_num(::Type{Float64}, ::Type{Int64}, args...) = KLU.klu_l_free_numeric(args...) +__free_num(::Type{ComplexF64}, ::Type{Int32}, args...) = KLU.klu_z_free_numeric(args...) +__free_num(::Type{ComplexF64}, ::Type{Int64}, args...) = KLU.klu_zl_free_numeric(args...) + +__analyze(::Type{Int32}, args...) = KLU.klu_analyze(args...) +__analyze(::Type{Int64}, args...) = KLU.klu_l_analyze(args...) + +__analyze!(::Type{Int32}, args...) = KLU.klu_analyze_given(args...) +__analyze!(::Type{Int64}, args...) = KLU.klu_l_analyze_given(args...) + +__factor(::Type{Float64}, ::Type{Int32}, args...) = KLU.klu_factor(args...) +__factor(::Type{Float64}, ::Type{Int64}, args...) = KLU.klu_l_factor(args...) +__factor(::Type{ComplexF64}, ::Type{Int32}, args...) = KLU.klu_z_factor(args...) +__factor(::Type{ComplexF64}, ::Type{Int64}, args...) = KLU.klu_zl_factor(args...) + +__rcond(::Type{Float64}, ::Type{Int32}, args...) = KLU.klu_rcond(args...) +__rcond(::Type{Float64}, ::Type{Int64}, args...) = KLU.klu_l_rcond(args...) +__rcond(::Type{ComplexF64}, ::Type{Int32}, args...) = KLU.klu_z_rcond(args...) +__rcond(::Type{ComplexF64}, ::Type{Int64}, args...) = KLU.klu_zl_rcond(args...) + +__rgrowth(::Type{Float64}, ::Type{Int32}, args...) = KLU.klu_rgrowth(args...) +__rgrowth(::Type{Float64}, ::Type{Int64}, args...) = KLU.klu_l_rgrowth(args...) +__rgrowth(::Type{ComplexF64}, ::Type{Int32}, args...) = KLU.klu_z_rgrowth(args...) +__rgrowth(::Type{ComplexF64}, ::Type{Int64}, args...) = KLU.klu_zl_rgrowth(args...) + +__condest(::Type{Float64}, ::Type{Int32}, args...) = KLU.klu_condest(args...) +__condest(::Type{Float64}, ::Type{Int64}, args...) = KLU.klu_l_condest(args...) +__condest(::Type{ComplexF64}, ::Type{Int32}, args...) = KLU.klu_z_condest(args...) +__condest(::Type{ComplexF64}, ::Type{Int64}, args...) = KLU.klu_zl_condest(args...) + +__refactor(::Type{Float64}, ::Type{Int32}, args...) = KLU.klu_refactor(args...) +__refactor(::Type{Float64}, ::Type{Int64}, args...) = KLU.klu_l_refactor(args...) +__refactor(::Type{ComplexF64}, ::Type{Int32}, args...) = KLU.klu_z_refactor(args...) +__refactor(::Type{ComplexF64}, ::Type{Int64}, args...) = KLU.klu_zl_refactor(args...) + +__solve(::Type{Float64}, ::Type{Int32}, args...) = KLU.klu_solve(args...) +__solve(::Type{Float64}, ::Type{Int64}, args...) = KLU.klu_l_solve(args...) +__solve(::Type{ComplexF64}, ::Type{Int32}, args...) = KLU.klu_z_solve(args...) +__solve(::Type{ComplexF64}, ::Type{Int64}, args...) = KLU.klu_zl_solve(args...) + +__symType(::Type{Int32}) = KLU.klu_symbolic +__symType(::Type{Int64}) = KLU.klu_l_symbolic + +__numType(::Type{Int32}) = KLU.klu_numeric +__numType(::Type{Int64}) = KLU.klu_l_numeric + +# Could rewrite with multiple dispatch, but more awkward because of the +# varying number of arguments between real vs complex cases: tsolve, extract. +# sort is only used in extract, so also not implemented here. \ No newline at end of file diff --git a/test/runtests.jl b/test/runtests.jl index 9e1311e..b68679b 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -2,7 +2,7 @@ using KLU using KLU: increment!, KLUITypes, decrement, klu!, KLUFactorization, klu_analyze!, klu_factor! using Test -using SparseArrays: SparseMatrixCSC, sparse, nnz +using SparseArrays: SparseMatrixCSC, sparse, nnz, nonzeros using LinearAlgebra @testset "KLU Wrappers" begin @@ -123,3 +123,17 @@ end @test issuccess(klu(A; allowsingular=true); allowsingular=true) end end + +@testset "full_factor = false" begin + for T in (Float64, ComplexF64, Float32, ComplexF32) + A = sparse(Matrix{T}([1 0; 1 1])) + nonzeros(A) .= zero(T) + F = klu(A; full_factor = false) + # we do need both here--for non 64 bit types, the nzval of F is not the same as A's. + nonzeros(A) .= one(T) + nonzeros(F) .= one(T) + klu_factor!(F) + b = ones(T, 2) + @test F\b ≈ A\b + end +end \ No newline at end of file