29
29
`LUFactorization(pivot=LinearAlgebra.RowMaximum())`
30
30
31
31
Julia's built in `lu`. Equivalent to calling `lu!(A)`
32
-
32
+
33
33
* On dense matrices, this uses the current BLAS implementation of the user's computer,
34
34
which by default is OpenBLAS but will use MKL if the user does `using MKL` in their
35
35
system.
135
135
`QRFactorization(pivot=LinearAlgebra.NoPivot(),blocksize=16)`
136
136
137
137
Julia's built in `qr`. Equivalent to calling `qr!(A)`.
138
-
138
+
139
139
* On dense matrices, this uses the current BLAS implementation of the user's computer
140
140
which by default is OpenBLAS but will use MKL if the user does `using MKL` in their
141
141
system.
242
242
function do_factorization (alg:: CholeskyFactorization , A, b, u)
243
243
A = convert (AbstractMatrix, A)
244
244
if A isa SparseMatrixCSC
245
- fact = cholesky! (A; shift = alg. shift, check = false , perm = alg. perm)
245
+ # fact = cholesky!(A; shift = alg.shift, check = false, perm = alg.perm)
246
+ # fact = @time cholesky!(A; check = false)
247
+ fact = cholesky (A; shift = alg. shift, check = false , perm = alg. perm)
246
248
elseif alg. pivot === Val (false ) || alg. pivot === NoPivot ()
247
249
fact = cholesky! (A, alg. pivot; check = false )
248
250
else
@@ -268,6 +270,7 @@ function init_cacheval(alg::CholeskyFactorization, A, b, u, Pl, Pr,
268
270
maxiters:: Int , abstol, reltol, verbose:: Bool ,
269
271
assumptions:: OperatorAssumptions )
270
272
ArrayInterface. cholesky_instance (convert (AbstractMatrix, A), alg. pivot)
273
+ # cholesky!(similar(A, 1, 1); check=false)
271
274
end
272
275
273
276
@static if VERSION < v " 1.8beta"
346
349
`SVDFactorization(full=false,alg=LinearAlgebra.DivideAndConquer())`
347
350
348
351
Julia's built in `svd`. Equivalent to `svd!(A)`.
349
-
352
+
350
353
* On dense matrices, this uses the current BLAS implementation of the user's computer
351
354
which by default is OpenBLAS but will use MKL if the user does `using MKL` in their
352
355
system.
444
447
`GenericFactorization(;fact_alg=LinearAlgebra.factorize)`: Constructs a linear solver from a generic
445
448
factorization algorithm `fact_alg` which complies with the Base.LinearAlgebra
446
449
factorization API. Quoting from Base:
447
-
450
+
448
451
* If `A` is upper or lower triangular (or diagonal), no factorization of `A` is
449
452
required. The system is then solved with either forward or backward substitution.
450
453
For non-triangular square matrices, an LU factorization is used.
666
669
"""
667
670
`UMFPACKFactorization(;reuse_symbolic=true, check_pattern=true)`
668
671
669
- A fast sparse multithreaded LU-factorization which specializes on sparsity
672
+ A fast sparse multithreaded LU-factorization which specializes on sparsity
670
673
patterns with “more structure”.
671
674
672
675
!!! note
@@ -850,7 +853,7 @@ Only supports sparse matrices.
850
853
851
854
## Keyword Arguments
852
855
853
- * shift: the shift argument in CHOLMOD.
856
+ * shift: the shift argument in CHOLMOD.
854
857
* perm: the perm argument in CHOLMOD
855
858
"""
856
859
Base. @kwdef struct CHOLMODFactorization{T} <: AbstractFactorization
@@ -916,12 +919,12 @@ end
916
919
# # RFLUFactorization
917
920
918
921
"""
919
- `RFLUFactorization()`
922
+ `RFLUFactorization()`
920
923
921
924
A fast pure Julia LU-factorization implementation
922
925
using RecursiveFactorization.jl. This is by far the fastest LU-factorization
923
926
implementation, usually outperforming OpenBLAS and MKL for smaller matrices
924
- (<500x500), but currently optimized only for Base `Array` with `Float32` or `Float64`.
927
+ (<500x500), but currently optimized only for Base `Array` with `Float32` or `Float64`.
925
928
Additional optimization for complex matrices is in the works.
926
929
"""
927
930
struct RFLUFactorization{P, T} <: AbstractFactorization
@@ -1179,7 +1182,7 @@ end
1179
1182
# But I'm not sure it makes sense as a GenericFactorization
1180
1183
# since it just uses `LAPACK.getrf!`.
1181
1184
"""
1182
- `FastLUFactorization()`
1185
+ `FastLUFactorization()`
1183
1186
1184
1187
The FastLapackInterface.jl version of the LU factorization. Notably,
1185
1188
this version does not allow for choice of pivoting method.
@@ -1210,7 +1213,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::FastLUFactorization; kwargs..
1210
1213
end
1211
1214
1212
1215
"""
1213
- `FastQRFactorization()`
1216
+ `FastQRFactorization()`
1214
1217
1215
1218
The FastLapackInterface.jl version of the QR factorization.
1216
1219
"""
0 commit comments