diff --git a/Project.toml b/Project.toml index 1a13fd494..f4f6c0295 100644 --- a/Project.toml +++ b/Project.toml @@ -23,6 +23,7 @@ Preferences = "21216c6a-2e73-6563-6e65-726566657250" RecursiveArrayTools = "731186ca-8d62-57ce-b412-fbd966d074cd" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" +SciMLLogging = "a6db7da4-7206-11f0-1eab-35f2a5dbe1d1" SciMLOperators = "c0aeaf25-5076-4817-a8d5-81caf7dfa961" Setfield = "efcf1570-3423-57d1-acb7-fd33fddbac46" StaticArraysCore = "1e83bf80-4336-4d27-bf5d-d5a4f845583c" @@ -124,6 +125,7 @@ Reexport = "1.2.2" SafeTestsets = "0.1" SciMLBase = "2.70" SciMLOperators = "1" +SciMLLogging = "1" Setfield = "1.1.1" SparseArrays = "1.10" Sparspak = "0.3.9" diff --git a/docs/src/advanced/developing.md b/docs/src/advanced/developing.md index 31b7a5d1f..ea70d32e1 100644 --- a/docs/src/advanced/developing.md +++ b/docs/src/advanced/developing.md @@ -19,7 +19,7 @@ struct MyLUFactorization{P} <: LinearSolve.SciMLLinearSolveAlgorithm end function LinearSolve.init_cacheval( alg::MyLUFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assump::LinearSolve.OperatorAssumptions) + verbose::LinearVerbosity, assump::LinearSolve.OperatorAssumptions) lu!(convert(AbstractMatrix, A)) end diff --git a/docs/src/basics/common_solver_opts.md b/docs/src/basics/common_solver_opts.md index 80c994621..b86139935 100644 --- a/docs/src/basics/common_solver_opts.md +++ b/docs/src/basics/common_solver_opts.md @@ -26,3 +26,92 @@ solve completely. Error controls only apply to iterative solvers. - `maxiters`: The number of iterations allowed. Defaults to `length(prob.b)` - `Pl,Pr`: The left and right preconditioners, respectively. For more information, see [the Preconditioners page](@ref prec). + +## Verbosity Controls + +The verbosity system in LinearSolve.jl provides fine-grained control over the diagnostic messages, warnings, and errors that are displayed during the solution of linear systems. + +The verbosity system is organized hierarchically into three main categories: + +1. Error Control - Messages related to fallbacks and error handling +2. Performance - Messages related to performance considerations +3. Numerical - Messages related to numerical solvers and iterations + +Each category can be configured independently, and individual settings can be adjusted to suit your needs. + +### Verbosity Levels +The following verbosity levels are available: + +#### Individual Settings +These settings are meant for individual settings within a category. These can also be used to set all of the individual settings in a group to the same value. +- Verbosity.None() - Suppress all messages +- Verbosity.Info() - Show message as log message at info level +- Verbosity.Warn() - Show warnings (default for most settings) +- Verbosity.Error() - Throw errors instead of warnings +- Verbosity.Level(n) - Show messages with a log level setting of n + +#### Group Settings +These settings are meant for controlling a group of settings. +- Verbosity.Default() - Use the default settings +- Verbosity.All() - Show all possible messages + +### Basic Usage + +#### Global Verbosity Control + +```julia +using LinearSolve + +# Suppress all messages +verbose = LinearVerbosity(Verbosity.None()) +prob = LinearProblem(A, b) +sol = solve(prob; verbose=verbose) + +# Show all messages +verbose = LinearVerbosity(Verbosity.All()) +sol = solve(prob; verbose=verbose) + +# Use default settings +verbose = LinearVerbosity(Verbosity.Default()) +sol = solve(prob; verbose=verbose) +``` + +#### Group Level Control + +```julia +# Customize by category +verbose = LinearVerbosity( + error_control = Verbosity.Warn(), # Show warnings for error control related issues + performance = Verbosity.None(), # Suppress performance messages + numerical = Verbosity.Info() # Show all numerical related log messages at info level +) + +sol = solve(prob; verbose=verbose) +``` + +#### Fine-grained Control +The constructor for `LinearVerbosity` allows you to set verbosity for each specific message toggle, giving you fine-grained control. +The verbosity settings for the toggles are automatically passed to the group objects. +```julia +# Set specific message types +verbose = LinearVerbosity( + default_lu_fallback = Verbosity.Info(), # Show info when LU fallback is used + KrylovJL_verbosity = Verbosity.Warn(), # Show warnings from KrylovJL + no_right_preconditioning = Verbosity.None(), # Suppress right preconditioning messages + KrylovKit_verbosity = Verbosity.Level(KrylovKit.WARN_LEVEL) # Set KrylovKit verbosity level using KrylovKit's own verbosity levels +) + +sol = solve(prob; verbose=verbose) + +``` + +#### Verbosity Levels +##### Error Control Settings +- default_lu_fallback: Controls messages when falling back to LU factorization (default: Warn) +##### Performance Settings +- no_right_preconditioning: Controls messages when right preconditioning is not used (default: Warn) +##### Numerical Settings +- using_IterativeSolvers: Controls messages when using the IterativeSolvers.jl package (default: Warn) +- IterativeSolvers_iterations: Controls messages about iteration counts from IterativeSolvers.jl (default: Warn) +- KrylovKit_verbosity: Controls messages from the KrylovKit.jl package (default: Warn) +- KrylovJL_verbosity: Controls verbosity of the KrylovJL.jl package (default: None) \ No newline at end of file diff --git a/ext/LinearSolveAMDGPUExt.jl b/ext/LinearSolveAMDGPUExt.jl index 4fad3d9f3..2b9dae94d 100644 --- a/ext/LinearSolveAMDGPUExt.jl +++ b/ext/LinearSolveAMDGPUExt.jl @@ -2,7 +2,7 @@ module LinearSolveAMDGPUExt using AMDGPU using LinearSolve: LinearSolve, LinearCache, AMDGPUOffloadLUFactorization, - AMDGPUOffloadQRFactorization, init_cacheval, OperatorAssumptions + AMDGPUOffloadQRFactorization, init_cacheval, OperatorAssumptions, LinearVerbosity using LinearSolve.LinearAlgebra, LinearSolve.SciMLBase # LU Factorization @@ -25,7 +25,7 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::AMDGPUOffloadLUFa end function LinearSolve.init_cacheval(alg::AMDGPUOffloadLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) AMDGPU.rocSOLVER.getrf!(AMDGPU.ROCArray(A)) end @@ -57,7 +57,7 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::AMDGPUOffloadQRFa end function LinearSolve.init_cacheval(alg::AMDGPUOffloadQRFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A_gpu = AMDGPU.ROCArray(A) tau = AMDGPU.ROCVector{eltype(A_gpu)}(undef, min(size(A_gpu)...)) diff --git a/ext/LinearSolveBLISExt.jl b/ext/LinearSolveBLISExt.jl index 44177db90..b18f14f88 100644 --- a/ext/LinearSolveBLISExt.jl +++ b/ext/LinearSolveBLISExt.jl @@ -9,7 +9,7 @@ using LinearSolve using LinearAlgebra: BlasInt, LU using LinearAlgebra.LAPACK: require_one_based_indexing, chkfinite, chkstride1, @blasfunc, chkargsok -using LinearSolve: ArrayInterface, BLISLUFactorization, @get_cacheval, LinearCache, SciMLBase +using LinearSolve: ArrayInterface, BLISLUFactorization, @get_cacheval, LinearCache, SciMLBase, LinearVerbosity using SciMLBase: ReturnCode const global libblis = blis_jll.blis @@ -204,13 +204,13 @@ const PREALLOCATED_BLIS_LU = begin end function LinearSolve.init_cacheval(alg::BLISLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_BLIS_LU end function LinearSolve.init_cacheval(alg::BLISLUFactorization, A::AbstractMatrix{<:Union{Float32,ComplexF32,ComplexF64}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A = rand(eltype(A), 0, 0) ArrayInterface.lu_instance(A), Ref{BlasInt}() diff --git a/ext/LinearSolveBandedMatricesExt.jl b/ext/LinearSolveBandedMatricesExt.jl index deb85e25a..376556202 100644 --- a/ext/LinearSolveBandedMatricesExt.jl +++ b/ext/LinearSolveBandedMatricesExt.jl @@ -3,7 +3,7 @@ module LinearSolveBandedMatricesExt using BandedMatrices, LinearAlgebra, LinearSolve import LinearSolve: defaultalg, do_factorization, init_cacheval, DefaultLinearSolver, - DefaultAlgorithmChoice + DefaultAlgorithmChoice, LinearVerbosity # Defaults for BandedMatrices function defaultalg(A::BandedMatrix, b, oa::OperatorAssumptions{Bool}) @@ -41,14 +41,14 @@ for alg in (:SVDFactorization, :MKLLUFactorization, :DiagonalFactorization, :AppleAccelerateLUFactorization, :CholeskyFactorization) @eval begin function init_cacheval(::$(alg), ::BandedMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return nothing end end end function init_cacheval(::LUFactorization, A::BandedMatrix{T}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) where {T} + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T} (T <: BigFloat) && return qr(similar(A, 0, 0)) return lu(similar(A, 0, 0)) end @@ -61,7 +61,7 @@ for alg in (:SVDFactorization, :MKLLUFactorization, :DiagonalFactorization, :AppleAccelerateLUFactorization, :QRFactorization, :LUFactorization) @eval begin function init_cacheval(::$(alg), ::Symmetric{<:Number, <:BandedMatrix}, b, u, Pl, - Pr, maxiters::Int, abstol, reltol, verbose::Bool, + Pr, maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return nothing end diff --git a/ext/LinearSolveCUDAExt.jl b/ext/LinearSolveCUDAExt.jl index 80d559cb3..6ea50a074 100644 --- a/ext/LinearSolveCUDAExt.jl +++ b/ext/LinearSolveCUDAExt.jl @@ -7,7 +7,7 @@ using LinearSolve: LinearSolve, is_cusparse, defaultalg, cudss_loaded, DefaultLi error_no_cudss_lu, init_cacheval, OperatorAssumptions, CudaOffloadFactorization, CudaOffloadLUFactorization, CudaOffloadQRFactorization, CUDAOffload32MixedLUFactorization, - SparspakFactorization, KLUFactorization, UMFPACKFactorization + SparspakFactorization, KLUFactorization, UMFPACKFactorization, LinearVerbosity using LinearSolve.LinearAlgebra, LinearSolve.SciMLBase, LinearSolve.ArrayInterface using SciMLBase: AbstractSciMLOperator @@ -51,7 +51,7 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::CudaOffloadLUFact end function LinearSolve.init_cacheval(alg::CudaOffloadLUFactorization, A::AbstractArray, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) T = eltype(A) noUnitT = typeof(zero(T)) @@ -74,7 +74,7 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::CudaOffloadQRFact end function LinearSolve.init_cacheval(alg::CudaOffloadQRFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) qr(CUDA.CuArray(A)) end @@ -93,26 +93,26 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::CudaOffloadFactor end function LinearSolve.init_cacheval(alg::CudaOffloadFactorization, A::AbstractArray, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) qr(CUDA.CuArray(A)) end function LinearSolve.init_cacheval( ::SparspakFactorization, A::CUDA.CUSPARSE.CuSparseMatrixCSR, b, u, - Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + Pl, Pr, maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function LinearSolve.init_cacheval( ::KLUFactorization, A::CUDA.CUSPARSE.CuSparseMatrixCSR, b, u, - Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + Pl, Pr, maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function LinearSolve.init_cacheval( ::UMFPACKFactorization, A::CUDA.CUSPARSE.CuSparseMatrixCSR, b, u, - Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + Pl, Pr, maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -140,7 +140,7 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::CUDAOffload32Mixe end function LinearSolve.init_cacheval(alg::CUDAOffload32MixedLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Pre-allocate with Float32 arrays A_f32 = Float32.(A) diff --git a/ext/LinearSolveCUSOLVERRFExt.jl b/ext/LinearSolveCUSOLVERRFExt.jl index 68b72c604..0522d5e1a 100644 --- a/ext/LinearSolveCUSOLVERRFExt.jl +++ b/ext/LinearSolveCUSOLVERRFExt.jl @@ -10,7 +10,7 @@ using SciMLBase: SciMLBase, LinearProblem, ReturnCode function LinearSolve.init_cacheval(alg::LinearSolve.CUSOLVERRFFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -18,7 +18,7 @@ function LinearSolve.init_cacheval(alg::LinearSolve.CUSOLVERRFFactorization, A::Union{CuSparseMatrixCSR{Float64, Int32}, SparseMatrixCSC{Float64, <:Integer}}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Create initial factorization with appropriate options nrhs = b isa AbstractMatrix ? size(b, 2) : 1 symbolic = alg.symbolic diff --git a/ext/LinearSolveCliqueTreesExt.jl b/ext/LinearSolveCliqueTreesExt.jl index 4c4530baf..d06a4e3fa 100644 --- a/ext/LinearSolveCliqueTreesExt.jl +++ b/ext/LinearSolveCliqueTreesExt.jl @@ -22,7 +22,7 @@ end function LinearSolve.init_cacheval( alg::CliqueTreesFactorization, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, abstol, - reltol, verbose::Bool, assumptions::OperatorAssumptions) + reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) symbfact = _symbolic(A, alg) cholfact, cholwork = cholinit(A, symbfact) linwork = lininit(1, cholfact) diff --git a/ext/LinearSolveFastAlmostBandedMatricesExt.jl b/ext/LinearSolveFastAlmostBandedMatricesExt.jl index 1ceff10c5..572693b2b 100644 --- a/ext/LinearSolveFastAlmostBandedMatricesExt.jl +++ b/ext/LinearSolveFastAlmostBandedMatricesExt.jl @@ -3,7 +3,7 @@ module LinearSolveFastAlmostBandedMatricesExt using FastAlmostBandedMatrices, LinearAlgebra, LinearSolve import LinearSolve: defaultalg, do_factorization, init_cacheval, DefaultLinearSolver, - DefaultAlgorithmChoice + DefaultAlgorithmChoice, LinearVerbosity function defaultalg(A::AlmostBandedMatrix, b, oa::OperatorAssumptions{Bool}) if oa.issq @@ -21,7 +21,7 @@ for alg in (:SVDFactorization, :MKLLUFactorization, :DiagonalFactorization, :AppleAccelerateLUFactorization, :CholeskyFactorization, :LUFactorization) @eval begin function init_cacheval(::$(alg), ::AlmostBandedMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return nothing end end diff --git a/ext/LinearSolveFastLapackInterfaceExt.jl b/ext/LinearSolveFastLapackInterfaceExt.jl index 45b690037..f924cc8cd 100644 --- a/ext/LinearSolveFastLapackInterfaceExt.jl +++ b/ext/LinearSolveFastLapackInterfaceExt.jl @@ -1,6 +1,7 @@ module LinearSolveFastLapackInterfaceExt using LinearSolve, LinearAlgebra +using LinearSolve: LinearVerbosity using FastLapackInterface struct WorkspaceAndFactors{W, F} @@ -9,7 +10,7 @@ struct WorkspaceAndFactors{W, F} end function LinearSolve.init_cacheval(::FastLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ws = LUWs(A) return WorkspaceAndFactors( @@ -36,7 +37,7 @@ end function LinearSolve.init_cacheval( alg::FastQRFactorization{NoPivot}, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ws = QRWYWs(A; blocksize = alg.blocksize) return WorkspaceAndFactors(ws, @@ -44,7 +45,7 @@ function LinearSolve.init_cacheval( end function LinearSolve.init_cacheval( ::FastQRFactorization{ColumnNorm}, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ws = QRpWs(A) return WorkspaceAndFactors(ws, @@ -52,10 +53,10 @@ function LinearSolve.init_cacheval( end function LinearSolve.init_cacheval(alg::FastQRFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return init_cacheval(alg, convert(AbstractMatrix, A), b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) end diff --git a/ext/LinearSolveForwardDiffExt.jl b/ext/LinearSolveForwardDiffExt.jl index fb0ec730f..28b878a34 100644 --- a/ext/LinearSolveForwardDiffExt.jl +++ b/ext/LinearSolveForwardDiffExt.jl @@ -1,12 +1,13 @@ module LinearSolveForwardDiffExt using LinearSolve -using LinearSolve: SciMLLinearSolveAlgorithm, __init +using LinearSolve: SciMLLinearSolveAlgorithm, __init, LinearVerbosity using LinearAlgebra using ForwardDiff using ForwardDiff: Dual, Partials using SciMLBase using RecursiveArrayTools +using SciMLLogging: Verbosity const DualLinearProblem = LinearProblem{ <:Union{Number, <:AbstractArray, Nothing}, iip, @@ -136,7 +137,7 @@ function __dual_init( abstol = LinearSolve.default_tol(real(eltype(prob.b))), reltol = LinearSolve.default_tol(real(eltype(prob.b))), maxiters::Int = length(prob.b), - verbose::Bool = false, + verbose = LinearVerbosity(Verbosity.None()), Pl = nothing, Pr = nothing, assumptions = OperatorAssumptions(issquare(prob.A)), diff --git a/ext/LinearSolveHYPREExt.jl b/ext/LinearSolveHYPREExt.jl index ad7d98333..2970aa544 100644 --- a/ext/LinearSolveHYPREExt.jl +++ b/ext/LinearSolveHYPREExt.jl @@ -5,7 +5,8 @@ using HYPRE.LibHYPRE: HYPRE_Complex using HYPRE: HYPRE, HYPREMatrix, HYPRESolver, HYPREVector using LinearSolve: HYPREAlgorithm, LinearCache, LinearProblem, LinearSolve, OperatorAssumptions, default_tol, init_cacheval, __issquare, - __conditioning, LinearSolveAdjoint + __conditioning, LinearSolveAdjoint, LinearVerbosity +using SciMLLogging: Verbosity, verbosity_to_int using SciMLBase: LinearProblem, LinearAliasSpecifier, SciMLBase using UnPack: @unpack using Setfield: @set! @@ -22,7 +23,7 @@ end function LinearSolve.init_cacheval(alg::HYPREAlgorithm, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) return HYPRECache(nothing, nothing, nothing, nothing, true, true, true) end @@ -64,7 +65,7 @@ function SciMLBase.init(prob::LinearProblem, alg::HYPREAlgorithm, eltype(prob.A)), # TODO: Implement length() for HYPREVector in HYPRE.jl? maxiters::Int = prob.b isa HYPREVector ? 1000 : length(prob.b), - verbose::Bool = false, + verbose = LinearVerbosity(Verbosity.None()), Pl = LinearAlgebra.I, Pr = LinearAlgebra.I, assumptions = OperatorAssumptions(), @@ -111,6 +112,18 @@ function SciMLBase.init(prob::LinearProblem, alg::HYPREAlgorithm, alias_b = aliases.alias_b end + if verbose isa Bool + #@warn "Using `true` or `false` for `verbose` is being deprecated. Please use a `LinearVerbosity` type to specify verbosity settings. + # For details see the verbosity section of the common solver options documentation page." + if verbose + verbose = LinearVerbosity() + else + verbose = LinearVerbosity(Verbosity.None()) + end + elseif verbose isa Verbosity.Type + verbose = LinearVerbosity(verbose) + end + A = A isa HYPREMatrix ? A : HYPREMatrix(A) b = b isa HYPREVector ? b : HYPREVector(b) u0 = u0 isa HYPREVector ? u0 : (u0 === nothing ? nothing : HYPREVector(u0)) @@ -159,10 +172,11 @@ function create_solver(alg::HYPREAlgorithm, cache::LinearCache) solver = create_solver(alg.solver, comm) # Construct solver options + verbose = isnothing(cache.verbose.numerical) ? 0 : verbosity_to_int(cache.verbose.numerical.HYPRE_verbosity) solver_options = (; AbsoluteTol = cache.abstol, MaxIter = cache.maxiters, - PrintLevel = Int(cache.verbose), + PrintLevel = verbose, Tol = cache.reltol) # Preconditioner (uses Pl even though it might not be a *left* preconditioner just *a* diff --git a/ext/LinearSolveIterativeSolversExt.jl b/ext/LinearSolveIterativeSolversExt.jl index 901b6bf74..253d45e00 100644 --- a/ext/LinearSolveIterativeSolversExt.jl +++ b/ext/LinearSolveIterativeSolversExt.jl @@ -1,8 +1,9 @@ module LinearSolveIterativeSolversExt using LinearSolve, LinearAlgebra -using LinearSolve: LinearCache, DEFAULT_PRECS +using LinearSolve: LinearCache, DEFAULT_PRECS, LinearVerbosity import LinearSolve: IterativeSolversJL +using SciMLLogging: @SciMLMessage, Verbosity using IterativeSolvers @@ -47,7 +48,7 @@ LinearSolve.default_alias_b(::IterativeSolversJL, ::Any, ::Any) = true function LinearSolve.init_cacheval(alg::IterativeSolversJL, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) restart = (alg.gmres_restart == 0) ? min(20, size(A, 1)) : alg.gmres_restart s = :idrs_s in keys(alg.kwargs) ? alg.kwargs.idrs_s : 4 # shadow space @@ -56,7 +57,8 @@ function LinearSolve.init_cacheval(alg::IterativeSolversJL, A, b, u, Pl, Pr, max iterable = if alg.generate_iterator === IterativeSolvers.cg_iterator! !LinearSolve._isidentity_struct(Pr) && - @warn "$(alg.generate_iterator) doesn't support right preconditioning" + @SciMLMessage("$(alg.generate_iterator) doesn't support right preconditioning", + verbose, :no_right_preconditioning, :performance) alg.generate_iterator(u, A, b, Pl; kwargs...) elseif alg.generate_iterator === IterativeSolvers.gmres_iterable! @@ -64,7 +66,8 @@ function LinearSolve.init_cacheval(alg::IterativeSolversJL, A, b, u, Pl, Pr, max kwargs...) elseif alg.generate_iterator === IterativeSolvers.idrs_iterable! !!LinearSolve._isidentity_struct(Pr) && - @warn "$(alg.generate_iterator) doesn't support right preconditioning" + @SciMLMessage("$(alg.generate_iterator) doesn't support right preconditioning", + verbose, :no_right_preconditioning, :performance) history = IterativeSolvers.ConvergenceHistory(partial = true) history[:abstol] = abstol history[:reltol] = reltol @@ -72,7 +75,8 @@ function LinearSolve.init_cacheval(alg::IterativeSolversJL, A, b, u, Pl, Pr, max alg.kwargs...) elseif alg.generate_iterator === IterativeSolvers.bicgstabl_iterator! !!LinearSolve._isidentity_struct(Pr) && - @warn "$(alg.generate_iterator) doesn't support right preconditioning" + @SciMLMessage("$(alg.generate_iterator) doesn't support right preconditioning", + verbose, :no_right_preconditioning, :performance) alg.generate_iterator(u, A, b, alg.args...; Pl = Pl, abstol = abstol, reltol = reltol, max_mv_products = maxiters * 2, @@ -103,14 +107,15 @@ function SciMLBase.solve!(cache::LinearCache, alg::IterativeSolversJL; kwargs... end purge_history!(cache.cacheval, cache.u, cache.b) - cache.verbose && println("Using IterativeSolvers.$(alg.generate_iterator)") + @SciMLMessage("Using IterativeSolvers.$(alg.generate_iterator)", + cache.verbose, :using_IterativeSolvers, :numerical) i = 0 for iter in enumerate(cache.cacheval) i += 1 - cache.verbose && println("Iter: $(iter[1]), residual: $(iter[2])") + @SciMLMessage("Iter: $(iter[1]), residual: $(iter[2])", + cache.verbose, :IterativeSolvers_iterations, :numerical) # TODO inject callbacks KSP into solve! cb!(cache.cacheval) end - cache.verbose && println() resid = cache.cacheval isa IterativeSolvers.IDRSIterable ? cache.cacheval.R : cache.cacheval.residual diff --git a/ext/LinearSolveKrylovKitExt.jl b/ext/LinearSolveKrylovKitExt.jl index 1aa1e5d52..0271b6796 100644 --- a/ext/LinearSolveKrylovKitExt.jl +++ b/ext/LinearSolveKrylovKitExt.jl @@ -2,6 +2,7 @@ module LinearSolveKrylovKitExt using LinearSolve, KrylovKit, LinearAlgebra using LinearSolve: LinearCache, DEFAULT_PRECS +using SciMLLogging: Verbosity, verbosity_to_int function LinearSolve.KrylovKitJL(args...; KrylovAlg = KrylovKit.GMRES, gmres_restart = 0, @@ -25,7 +26,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::KrylovKitJL; kwargs...) atol = float(cache.abstol) rtol = float(cache.reltol) maxiter = cache.maxiters - verbosity = cache.verbose ? 1 : 0 + verbosity = isnothing(cache.verbose.numerical) ? 0 : verbosity_to_int(cache.verbose.numerical.KrylovKit_verbosity) krylovdim = (alg.gmres_restart == 0) ? min(20, size(cache.A, 1)) : alg.gmres_restart kwargs = (atol = atol, rtol = rtol, maxiter = maxiter, verbosity = verbosity, diff --git a/ext/LinearSolveMetalExt.jl b/ext/LinearSolveMetalExt.jl index de0175b86..0207b1425 100644 --- a/ext/LinearSolveMetalExt.jl +++ b/ext/LinearSolveMetalExt.jl @@ -4,13 +4,13 @@ using Metal, LinearSolve using LinearAlgebra, SciMLBase using SciMLBase: AbstractSciMLOperator using LinearSolve: ArrayInterface, MKLLUFactorization, MetalOffload32MixedLUFactorization, - @get_cacheval, LinearCache, SciMLBase, OperatorAssumptions + @get_cacheval, LinearCache, SciMLBase, OperatorAssumptions, LinearVerbosity default_alias_A(::MetalLUFactorization, ::Any, ::Any) = false default_alias_b(::MetalLUFactorization, ::Any, ::Any) = false function LinearSolve.init_cacheval(alg::MetalLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(convert(AbstractMatrix, A)) end @@ -34,7 +34,7 @@ default_alias_A(::MetalOffload32MixedLUFactorization, ::Any, ::Any) = false default_alias_b(::MetalOffload32MixedLUFactorization, ::Any, ::Any) = false function LinearSolve.init_cacheval(alg::MetalOffload32MixedLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Pre-allocate with Float32 arrays A_f32 = Float32.(convert(AbstractMatrix, A)) diff --git a/ext/LinearSolvePardisoExt.jl b/ext/LinearSolvePardisoExt.jl index 5b459d8cc..82fe64055 100644 --- a/ext/LinearSolvePardisoExt.jl +++ b/ext/LinearSolvePardisoExt.jl @@ -3,8 +3,8 @@ module LinearSolvePardisoExt using Pardiso, LinearSolve using SparseArrays using SparseArrays: nonzeros, rowvals, getcolptr -using LinearSolve: PardisoJL, @unpack - +using LinearSolve: PardisoJL, @unpack, LinearVerbosity +using SciMLLogging: @SciMLMessage, verbosity_to_bool using LinearSolve.SciMLBase LinearSolve.needs_concrete_A(alg::PardisoJL) = true @@ -20,7 +20,7 @@ function LinearSolve.init_cacheval(alg::PardisoJL, maxiters::Int, abstol, reltol, - verbose::Bool, + verbose::LinearVerbosity, assumptions::LinearSolve.OperatorAssumptions) @unpack nprocs, solver_type, matrix_type, cache_analysis, iparm, dparm, vendor = alg A = convert(AbstractMatrix, A) @@ -73,8 +73,12 @@ function LinearSolve.init_cacheval(alg::PardisoJL, error("Number type not supported by Pardiso") end end - verbose && Pardiso.set_msglvl!(solver, Pardiso.MESSAGE_LEVEL_ON) - + + if !isnothing(verbose.numerical) + if verbosity_to_bool(verbose.numerical.pardiso_verbosity) + Pardiso.set_msglvl!(solver, Pardiso.MESSAGE_LEVEL_ON) + end + end #= Note: It is recommended to use IPARM(11)=1 (scaling) and IPARM(13)=1 (matchings) for highly indefinite symmetric matrices e.g. from interior point optimizations or saddle point problems. diff --git a/ext/LinearSolveRecursiveFactorizationExt.jl b/ext/LinearSolveRecursiveFactorizationExt.jl index 3e70807f7..11f95b596 100644 --- a/ext/LinearSolveRecursiveFactorizationExt.jl +++ b/ext/LinearSolveRecursiveFactorizationExt.jl @@ -2,7 +2,7 @@ module LinearSolveRecursiveFactorizationExt using LinearSolve: LinearSolve, userecursivefactorization, LinearCache, @get_cacheval, RFLUFactorization, RF32MixedLUFactorization, default_alias_A, - default_alias_b + default_alias_b, LinearVerbosity using LinearSolve.LinearAlgebra, LinearSolve.ArrayInterface, RecursiveFactorization using SciMLBase: SciMLBase, ReturnCode @@ -42,7 +42,7 @@ const PREALLOCATED_RF32_LU = begin end function LinearSolve.init_cacheval(alg::RF32MixedLUFactorization{P, T}, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::LinearSolve.OperatorAssumptions) where {P, T} # Pre-allocate appropriate 32-bit arrays based on input type if eltype(A) <: Complex diff --git a/ext/LinearSolveSparseArraysExt.jl b/ext/LinearSolveSparseArraysExt.jl index 57e135164..273b135ae 100644 --- a/ext/LinearSolveSparseArraysExt.jl +++ b/ext/LinearSolveSparseArraysExt.jl @@ -4,7 +4,7 @@ using LinearSolve: LinearSolve, BLASELTYPES, pattern_changed, ArrayInterface, @get_cacheval, CHOLMODFactorization, GenericFactorization, GenericLUFactorization, KLUFactorization, LUFactorization, NormalCholeskyFactorization, - OperatorAssumptions, + OperatorAssumptions, LinearVerbosity, QRFactorization, RFLUFactorization, UMFPACKFactorization, solve using ArrayInterface: ArrayInterface using LinearAlgebra: LinearAlgebra, I, Hermitian, Symmetric, cholesky, ldiv!, lu, lu!, QR @@ -34,7 +34,7 @@ end function LinearSolve.init_cacheval(alg::RFLUFactorization, A::Union{AbstractSparseArray, LinearSolve.SciMLOperators.AbstractSciMLOperator}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing, nothing end @@ -65,7 +65,7 @@ end function LinearSolve.init_cacheval(alg::GenericFactorization, A::Union{Hermitian{T, <:SparseMatrixCSC}, Symmetric{T, <:SparseMatrixCSC}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T} newA = copy(convert(AbstractMatrix, A)) LinearSolve.do_factorization(alg, newA, b, u) @@ -78,7 +78,7 @@ function LinearSolve.init_cacheval( alg::LUFactorization, A::AbstractSparseArray{<:Number, <:Integer}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -86,7 +86,7 @@ function LinearSolve.init_cacheval( alg::GenericLUFactorization, A::AbstractSparseArray{<:Number, <:Integer}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -94,7 +94,7 @@ function LinearSolve.init_cacheval( alg::UMFPACKFactorization, A::AbstractArray, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -102,7 +102,7 @@ function LinearSolve.init_cacheval( alg::LUFactorization, A::AbstractSparseArray{Float64, Int64}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_UMFPACK end @@ -110,7 +110,7 @@ function LinearSolve.init_cacheval( alg::LUFactorization, A::AbstractSparseArray{T, Int64}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} if LinearSolve.is_cusparse(A) ArrayInterface.lu_instance(A) else @@ -123,7 +123,7 @@ function LinearSolve.init_cacheval( alg::LUFactorization, A::AbstractSparseArray{T, Int32}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} if LinearSolve.is_cusparse(A) ArrayInterface.lu_instance(A) else @@ -136,7 +136,7 @@ function LinearSolve.init_cacheval( alg::LUFactorization, A::LinearSolve.GPUArraysCore.AnyGPUArray, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end @@ -144,7 +144,7 @@ function LinearSolve.init_cacheval( alg::UMFPACKFactorization, A::AbstractSparseArray{Float64, Int}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_UMFPACK end @@ -152,7 +152,7 @@ function LinearSolve.init_cacheval( alg::UMFPACKFactorization, A::LinearSolve.GPUArraysCore.AnyGPUArray, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -160,7 +160,7 @@ function LinearSolve.init_cacheval( alg::UMFPACKFactorization, A::AbstractSparseArray{T, Int64}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC{T, Int64}( zero(Int64), zero(Int64), [Int64(1)], Int64[], T[])) end @@ -169,7 +169,7 @@ function LinearSolve.init_cacheval( alg::UMFPACKFactorization, A::AbstractSparseArray{T, Int32}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC{T, Int32}( zero(Int32), zero(Int32), [Int32(1)], Int32[], T[])) end @@ -218,7 +218,7 @@ function LinearSolve.init_cacheval( alg::KLUFactorization, A::AbstractArray, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -226,7 +226,7 @@ function LinearSolve.init_cacheval( alg::KLUFactorization, A::AbstractSparseArray{Float64, Int64}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_KLU end @@ -234,7 +234,7 @@ function LinearSolve.init_cacheval( alg::KLUFactorization, A::LinearSolve.GPUArraysCore.AnyGPUArray, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -242,7 +242,7 @@ function LinearSolve.init_cacheval( alg::KLUFactorization, A::AbstractSparseArray{Float64, Int32}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) KLU.KLUFactorization(SparseMatrixCSC{Float64, Int32}( 0, 0, [Int32(1)], Int32[], Float64[])) end @@ -288,7 +288,7 @@ function LinearSolve.init_cacheval(alg::CHOLMODFactorization, A::Union{SparseMatrixCSC{T, Int}, Symmetric{T, SparseMatrixCSC{T, Int}}}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T <: + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: Float64} PREALLOCATED_CHOLMOD end @@ -297,7 +297,7 @@ function LinearSolve.init_cacheval(alg::CHOLMODFactorization, A::Union{SparseMatrixCSC{T, Int}, Symmetric{T, SparseMatrixCSC{T, Int}}}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T <: + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} cholesky(sparse(reshape([one(T)], 1, 1))) end @@ -306,14 +306,14 @@ function LinearSolve.init_cacheval(alg::CHOLMODFactorization, A::AbstractArray, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function LinearSolve.init_cacheval(alg::NormalCholeskyFactorization, A::Union{AbstractSparseArray{T}, LinearSolve.GPUArraysCore.AnyGPUArray, Symmetric{T, <:AbstractSparseArray{T}}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} ArrayInterface.cholesky_instance(convert(AbstractMatrix, A)) end @@ -373,20 +373,20 @@ function LinearSolve.init_cacheval( alg::QRFactorization, A::AbstractSparseArray{<:Number, <:Integer}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function LinearSolve.init_cacheval( alg::QRFactorization, A::SparseMatrixCSC{Float64, <:Integer}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(convert(AbstractMatrix, A), alg.pivot) end function LinearSolve.init_cacheval( alg::QRFactorization, A::Symmetric{<:Number, <:SparseMatrixCSC}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return nothing end diff --git a/ext/LinearSolveSparspakExt.jl b/ext/LinearSolveSparspakExt.jl index 256ff617c..a42566fbd 100644 --- a/ext/LinearSolveSparspakExt.jl +++ b/ext/LinearSolveSparspakExt.jl @@ -1,6 +1,7 @@ module LinearSolveSparspakExt using LinearSolve, LinearAlgebra +using LinearSolve: LinearVerbosity using Sparspak using Sparspak.SparseCSCInterface.SparseArrays using SparseArrays: AbstractSparseMatrixCSC, nonzeros, rowvals, getcolptr @@ -12,14 +13,14 @@ function LinearSolve.init_cacheval( ::SparspakFactorization, A::SparseMatrixCSC{Float64, Int}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_SPARSEPAK end function LinearSolve.init_cacheval( ::SparspakFactorization, A::AbstractSparseMatrixCSC{Tv, Ti}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {Tv, Ti} + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {Tv, Ti} if size(A, 1) == size(A, 2) A = convert(AbstractMatrix, A) if A isa SparseArrays.AbstractSparseArray diff --git a/src/LinearSolve.jl b/src/LinearSolve.jl index f01d80b60..b513c6deb 100644 --- a/src/LinearSolve.jl +++ b/src/LinearSolve.jl @@ -21,6 +21,7 @@ using SciMLBase: SciMLBase, LinearAliasSpecifier, AbstractSciMLOperator, using SciMLOperators: SciMLOperators, AbstractSciMLOperator, IdentityOperator, MatrixOperator, has_ldiv!, issquare +using SciMLLogging: Verbosity, @SciMLMessage, verbosity_to_int, @match, AbstractVerbositySpecifier using Setfield: @set, @set! using UnPack: @unpack using DocStringExtensions: DocStringExtensions @@ -361,6 +362,7 @@ const BLASELTYPES = Union{Float32, Float64, ComplexF32, ComplexF64} function defaultalg_symbol end +include("verbosity.jl") include("generic_lufact.jl") include("common.jl") include("extension_algs.jl") @@ -498,4 +500,7 @@ export OperatorAssumptions, OperatorCondition export LinearSolveAdjoint +export LinearVerbosity, LinearErrorControlVerbosity, LinearPerformanceVerbosity, + LinearNumericalVerbosity + end diff --git a/src/appleaccelerate.jl b/src/appleaccelerate.jl index 2af9e63a6..5cc3d1e0d 100644 --- a/src/appleaccelerate.jl +++ b/src/appleaccelerate.jl @@ -235,14 +235,14 @@ const PREALLOCATED_APPLE_LU = begin end function LinearSolve.init_cacheval(alg::AppleAccelerateLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_APPLE_LU end function LinearSolve.init_cacheval(alg::AppleAccelerateLUFactorization, A::AbstractMatrix{<:Union{Float32, ComplexF32, ComplexF64}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A = rand(eltype(A), 0, 0) luinst = ArrayInterface.lu_instance(A) @@ -295,7 +295,7 @@ const PREALLOCATED_APPLE32_LU = begin end function LinearSolve.init_cacheval(alg::AppleAccelerate32MixedLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Pre-allocate appropriate 32-bit arrays based on input type if eltype(A) <: Complex diff --git a/src/common.jl b/src/common.jl index a3d8d8971..eacb5e608 100644 --- a/src/common.jl +++ b/src/common.jl @@ -89,7 +89,7 @@ solving and caching of factorizations and intermediate results. - `abstol::Ttol`: Absolute tolerance for iterative solvers. - `reltol::Ttol`: Relative tolerance for iterative solvers. - `maxiters::Int`: Maximum number of iterations for iterative solvers. -- `verbose::Bool`: Whether to print verbose output during solving. +- `verbose::LinearVerbosity`: Whether to print verbose output during solving. - `assumptions::OperatorAssumptions{issq}`: Assumptions about the operator properties. - `sensealg::S`: Sensitivity analysis algorithm for automatic differentiation. @@ -119,7 +119,7 @@ mutable struct LinearCache{TA, Tb, Tu, Tp, Talg, Tc, Tl, Tr, Ttol, issq, S} abstol::Ttol reltol::Ttol maxiters::Int - verbose::Bool + verbose::LinearVerbosity assumptions::OperatorAssumptions{issq} sensealg::S end @@ -267,7 +267,7 @@ function __init(prob::LinearProblem, alg::SciMLLinearSolveAlgorithm, abstol = default_tol(real(eltype(prob.b))), reltol = default_tol(real(eltype(prob.b))), maxiters::Int = length(prob.b), - verbose::Bool = false, + verbose = LinearVerbosity(), Pl = nothing, Pr = nothing, assumptions = OperatorAssumptions(issquare(prob.A)), @@ -324,6 +324,18 @@ function __init(prob::LinearProblem, alg::SciMLLinearSolveAlgorithm, deepcopy(A) end + if verbose isa Bool + #@warn "Using `true` or `false` for `verbose` is being deprecated. Please use a `LinearVerbosity` type to specify verbosity settings. + # For details see the verbosity section of the common solver options documentation page." + if verbose + verbose = LinearVerbosity() + else + verbose = LinearVerbosity{false}(nothing, nothing, nothing) + end + elseif verbose isa Verbosity.Type + verbose = LinearVerbosity(verbose) + end + b = if issparsematrix(b) && !(A isa Diagonal) Array(b) # the solution to a linear solve will always be dense! elseif alias_b || b isa SVector diff --git a/src/default.jl b/src/default.jl index 39e87a1cc..9c6934591 100644 --- a/src/default.jl +++ b/src/default.jl @@ -454,7 +454,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::Nothing, end function init_cacheval(alg::Nothing, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assump::OperatorAssumptions) + verbose::LinearVerbosity, assump::OperatorAssumptions) init_cacheval(defaultalg(A, b, assump), A, b, u, Pl, Pr, maxiters, abstol, reltol, verbose, assump) @@ -465,7 +465,7 @@ cache.cacheval = NamedTuple(LUFactorization = cache of LUFactorization, ...) """ @generated function init_cacheval(alg::DefaultLinearSolver, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assump::OperatorAssumptions) + verbose::LinearVerbosity, assump::OperatorAssumptions) caches = map(first.(EnumX.symbol_map(DefaultAlgorithmChoice.T))) do alg if alg === :KrylovJL_GMRES || alg === :KrylovJL_CRAIGMR || alg === :KrylovJL_LSMR quote @@ -517,7 +517,8 @@ end newex = quote sol = SciMLBase.solve!(cache, $(algchoice_to_alg(alg)), args...; kwargs...) if sol.retcode === ReturnCode.Failure && alg.safetyfallback - ## TODO: Add verbosity logging here about using the fallback + @SciMLMessage("LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.", + cache.verbose, :default_lu_fallback, :error_control) sol = SciMLBase.solve!( cache, QRFactorization(ColumnNorm()), args...; kwargs...) SciMLBase.build_linear_solution(alg, sol.u, sol.resid, sol.cache; @@ -537,7 +538,8 @@ end sol = SciMLBase.solve!(cache, $(algchoice_to_alg(alg)), args...; kwargs...) if sol.retcode === ReturnCode.Failure && alg.safetyfallback - ## TODO: Add verbosity logging here about using the fallback + @SciMLMessage("LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.", + cache.verbose, :default_lu_fallback, :error_control) sol = SciMLBase.solve!( cache, QRFactorization(ColumnNorm()), args...; kwargs...) SciMLBase.build_linear_solution(alg, sol.u, sol.resid, sol.cache; @@ -557,7 +559,8 @@ end sol = SciMLBase.solve!(cache, $(algchoice_to_alg(alg)), args...; kwargs...) if sol.retcode === ReturnCode.Failure && alg.safetyfallback - ## TODO: Add verbosity logging here about using the fallback + @SciMLMessage("LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.", + cache.verbose, :default_lu_fallback, :error_control) sol = SciMLBase.solve!( cache, QRFactorization(ColumnNorm()), args...; kwargs...) SciMLBase.build_linear_solution(alg, sol.u, sol.resid, sol.cache; @@ -577,7 +580,8 @@ end sol = SciMLBase.solve!(cache, $(algchoice_to_alg(alg)), args...; kwargs...) if sol.retcode === ReturnCode.Failure && alg.safetyfallback - ## TODO: Add verbosity logging here about using the fallback + @SciMLMessage("LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.", + cache.verbose, :default_lu_fallback, :error_control) sol = SciMLBase.solve!( cache, QRFactorization(ColumnNorm()), args...; kwargs...) SciMLBase.build_linear_solution(alg, sol.u, sol.resid, sol.cache; @@ -597,7 +601,8 @@ end sol = SciMLBase.solve!(cache, $(algchoice_to_alg(alg)), args...; kwargs...) if sol.retcode === ReturnCode.Failure && alg.safetyfallback - ## TODO: Add verbosity logging here about using the fallback + @SciMLMessage("LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.", + cache.verbose, :default_lu_fallback, :error_control) sol = SciMLBase.solve!( cache, QRFactorization(ColumnNorm()), args...; kwargs...) SciMLBase.build_linear_solution(alg, sol.u, sol.resid, sol.cache; diff --git a/src/factorization.jl b/src/factorization.jl index 9f9065c4a..ca335d0ed 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -50,14 +50,14 @@ end # RF Bad fallback: will fail if `A` is just a stand-in # This should instead just create the factorization type. function init_cacheval(alg::AbstractFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, - reltol, verbose::Bool, assumptions::OperatorAssumptions) + reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) do_factorization(alg, convert(AbstractMatrix, A), b, u) end ## RFLU Factorization function LinearSolve.init_cacheval(alg::RFLUFactorization, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)) ArrayInterface.lu_instance(convert(AbstractMatrix, A)), ipiv end @@ -65,14 +65,14 @@ end function LinearSolve.init_cacheval( alg::RFLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_LU, PREALLOCATED_IPIV end function LinearSolve.init_cacheval(alg::RFLUFactorization, A::Union{Diagonal, SymTridiagonal, Tridiagonal}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing, nothing end @@ -171,7 +171,7 @@ end function init_cacheval( alg::GenericLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)) ArrayInterface.lu_instance(convert(AbstractMatrix, A)), ipiv @@ -179,7 +179,7 @@ end function init_cacheval( alg::GenericLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_LU, PREALLOCATED_IPIV end @@ -211,21 +211,21 @@ end function init_cacheval( alg::LUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(convert(AbstractMatrix, A)) end function init_cacheval(alg::LUFactorization, A::Union{<:Adjoint, <:Transpose}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) error_no_cudss_lu(A) return lu(A; check = false) end function init_cacheval(alg::GenericLUFactorization, A::Union{<:Adjoint, <:Transpose}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) error_no_cudss_lu(A) A isa GPUArraysCore.AnyGPUArray && return nothing ipiv = Vector{LinearAlgebra.BlasInt}(undef, 0) @@ -236,21 +236,21 @@ const PREALLOCATED_LU = ArrayInterface.lu_instance(rand(1, 1)) function init_cacheval(alg::LUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_LU end function init_cacheval(alg::LUFactorization, A::AbstractSciMLOperator, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function init_cacheval(alg::GenericLUFactorization, A::AbstractSciMLOperator, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -300,13 +300,13 @@ function do_factorization(alg::QRFactorization, A, b, u) end function init_cacheval(alg::QRFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(convert(AbstractMatrix, A), alg.pivot) end function init_cacheval(alg::QRFactorization, A::Symmetric{<:Number, <:Array}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return qr(convert(AbstractMatrix, A), alg.pivot) end @@ -314,13 +314,13 @@ end const PREALLOCATED_QR_ColumnNorm = ArrayInterface.qr_instance(rand(1, 1), ColumnNorm()) function init_cacheval(alg::QRFactorization{ColumnNorm}, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return PREALLOCATED_QR_ColumnNorm end function init_cacheval( alg::QRFactorization, A::Union{<:Adjoint, <:Transpose}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A isa GPUArraysCore.AnyGPUArray && return qr(A) return qr(A, alg.pivot) end @@ -328,12 +328,12 @@ end const PREALLOCATED_QR_NoPivot = ArrayInterface.qr_instance(rand(1, 1)) function init_cacheval(alg::QRFactorization{NoPivot}, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return PREALLOCATED_QR_NoPivot end function init_cacheval(alg::QRFactorization, A::AbstractSciMLOperator, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -379,33 +379,33 @@ function do_factorization(alg::CholeskyFactorization, A, b, u) end function init_cacheval(alg::CholeskyFactorization, A::SMatrix{S1, S2}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {S1, S2} cholesky(A) end function init_cacheval(alg::CholeskyFactorization, A::GPUArraysCore.AnyGPUArray, b, u, Pl, - Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + Pr, maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) cholesky(A; check = false) end function init_cacheval( alg::CholeskyFactorization, A::AbstractArray{<:BLASELTYPES}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.cholesky_instance(convert(AbstractMatrix, A), alg.pivot) end const PREALLOCATED_CHOLESKY = ArrayInterface.cholesky_instance(rand(1, 1), NoPivot()) function init_cacheval(alg::CholeskyFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_CHOLESKY end function init_cacheval(alg::CholeskyFactorization, A::Union{Diagonal, AbstractSciMLOperator, AbstractArray}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -433,12 +433,12 @@ end function init_cacheval(alg::LDLtFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function init_cacheval(alg::LDLtFactorization, A::SymTridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.ldlt_instance(convert(AbstractMatrix, A)) end @@ -472,7 +472,7 @@ function do_factorization(alg::SVDFactorization, A, b, u) end function init_cacheval(alg::SVDFactorization, A::Union{Matrix, SMatrix}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(convert(AbstractMatrix, A)) end @@ -480,13 +480,13 @@ end const PREALLOCATED_SVD = ArrayInterface.svd_instance(rand(1, 1)) function init_cacheval(alg::SVDFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_SVD end function init_cacheval(alg::SVDFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -515,7 +515,7 @@ end function init_cacheval(alg::BunchKaufmanFactorization, A::Symmetric{<:Number, <:Matrix}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.bunchkaufman_instance(convert(AbstractMatrix, A)) end @@ -525,13 +525,13 @@ const PREALLOCATED_BUNCHKAUFMAN = ArrayInterface.bunchkaufman_instance(Symmetric function init_cacheval(alg::BunchKaufmanFactorization, A::Symmetric{Float64, Matrix{Float64}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_BUNCHKAUFMAN end function init_cacheval(alg::BunchKaufmanFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -572,58 +572,58 @@ end function init_cacheval( alg::GenericFactorization{typeof(lu)}, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(lu!)}, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(lu)}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(lu!)}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(lu)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval(alg::GenericFactorization{typeof(lu)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(lu!)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval( alg::GenericFactorization{typeof(lu!)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(lu!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval( alg::GenericFactorization{typeof(lu)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end @@ -631,24 +631,24 @@ end function init_cacheval( alg::GenericFactorization{typeof(qr)}, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(qr!)}, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(qr)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval( alg::GenericFactorization{typeof(qr!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end @@ -656,33 +656,33 @@ end function init_cacheval(alg::GenericFactorization{typeof(qr)}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(qr!)}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(qr)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval(alg::GenericFactorization{typeof(qr)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(qr!)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval( alg::GenericFactorization{typeof(qr!)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end @@ -690,87 +690,87 @@ end function init_cacheval( alg::GenericFactorization{typeof(svd)}, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(svd!)}, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(svd)}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(svd!)}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(svd)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval( alg::GenericFactorization{typeof(svd)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(svd!)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval(alg::GenericFactorization{typeof(svd!)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(svd!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval( alg::GenericFactorization{typeof(svd)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval(alg::GenericFactorization, A::Diagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval(alg::GenericFactorization, A::Tridiagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval(alg::GenericFactorization, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval(alg::GenericFactorization, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) init_cacheval(alg, convert(AbstractMatrix, A), b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) end function init_cacheval(alg::GenericFactorization, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) do_factorization(alg, A, b, u) end @@ -778,7 +778,7 @@ function init_cacheval( alg::Union{GenericFactorization{typeof(bunchkaufman!)}, GenericFactorization{typeof(bunchkaufman)}}, A::Union{Hermitian, Symmetric}, b, u, Pl, Pr, maxiters::Int, abstol, - reltol, verbose::Bool, assumptions::OperatorAssumptions) + reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) BunchKaufman(A.data, Array(1:size(A, 1)), A.uplo, true, false, 0) end @@ -787,7 +787,7 @@ function init_cacheval( GenericFactorization{typeof(bunchkaufman)}}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) if eltype(A) <: Complex return bunchkaufman!(Hermitian(A)) else @@ -801,49 +801,49 @@ end # Cholesky needs the posdef matrix, for GenericFactorization assume structure is needed function init_cacheval( alg::GenericFactorization{typeof(cholesky)}, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) newA = copy(convert(AbstractMatrix, A)) do_factorization(alg, newA, b, u) end function init_cacheval( alg::GenericFactorization{typeof(cholesky!)}, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) newA = copy(convert(AbstractMatrix, A)) do_factorization(alg, newA, b, u) end function init_cacheval(alg::GenericFactorization{typeof(cholesky!)}, A::Diagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval( alg::GenericFactorization{typeof(cholesky!)}, A::Tridiagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(cholesky!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval(alg::GenericFactorization{typeof(cholesky)}, A::Diagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval( alg::GenericFactorization{typeof(cholesky)}, A::Tridiagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(cholesky)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end @@ -875,7 +875,7 @@ end function init_cacheval(alg::UMFPACKFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -901,7 +901,7 @@ end function init_cacheval(alg::KLUFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -929,7 +929,7 @@ end function init_cacheval(alg::CHOLMODFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -986,13 +986,13 @@ default_alias_b(::NormalCholeskyFactorization, ::Any, ::Any) = true const PREALLOCATED_NORMALCHOLESKY = ArrayInterface.cholesky_instance(rand(1, 1), NoPivot()) function init_cacheval(alg::NormalCholeskyFactorization, A::SMatrix, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return cholesky(Symmetric((A)' * A)) end function init_cacheval(alg::NormalCholeskyFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A_ = convert(AbstractMatrix, A) return ArrayInterface.cholesky_instance( @@ -1003,13 +1003,13 @@ const PREALLOCATED_NORMALCHOLESKY_SYMMETRIC = ArrayInterface.cholesky_instance( Symmetric(rand(1, 1)), NoPivot()) function init_cacheval(alg::NormalCholeskyFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return PREALLOCATED_NORMALCHOLESKY_SYMMETRIC end function init_cacheval(alg::NormalCholeskyFactorization, A::Union{Diagonal, AbstractSciMLOperator}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -1070,7 +1070,7 @@ default_alias_A(::NormalBunchKaufmanFactorization, ::Any, ::Any) = true default_alias_b(::NormalBunchKaufmanFactorization, ::Any, ::Any) = true function init_cacheval(alg::NormalBunchKaufmanFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.bunchkaufman_instance(convert(AbstractMatrix, A)) end @@ -1098,7 +1098,7 @@ A special implementation only for solving `Diagonal` matrices fast. struct DiagonalFactorization <: AbstractDenseFactorization end function init_cacheval(alg::DiagonalFactorization, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -1149,12 +1149,12 @@ end function init_cacheval(alg::SparspakFactorization, A::Union{AbstractMatrix, Nothing, AbstractSciMLOperator}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function init_cacheval(::SparspakFactorization, ::StaticArray, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -1194,39 +1194,39 @@ struct CliqueTreesFactorization{A, S} <: AbstractSparseFactorization end function init_cacheval(::CliqueTreesFactorization, ::Union{AbstractMatrix, Nothing, AbstractSciMLOperator}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function init_cacheval(::CliqueTreesFactorization, ::StaticArray, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end # Fallback init_cacheval for extension-based algorithms when extensions aren't loaded # These return nothing since the actual implementations are in the extensions function init_cacheval(::BLISLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function init_cacheval(::CudaOffloadLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function init_cacheval(::MetalLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end for alg in vcat(InteractiveUtils.subtypes(AbstractDenseFactorization), InteractiveUtils.subtypes(AbstractSparseFactorization)) @eval function init_cacheval(alg::$alg, A::MatrixOperator, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) init_cacheval(alg, A.A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) end end diff --git a/src/iterative_wrappers.jl b/src/iterative_wrappers.jl index 16d28d908..9050893c5 100644 --- a/src/iterative_wrappers.jl +++ b/src/iterative_wrappers.jl @@ -185,7 +185,7 @@ end # zeroinit allows for init_cacheval to start by initing with A (0,0) function init_cacheval(alg::KrylovJL, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions; zeroinit = true) + verbose::LinearVerbosity, assumptions::OperatorAssumptions; zeroinit = true) KS = get_KrylovJL_solver(alg.KrylovAlg) if zeroinit @@ -240,7 +240,7 @@ end # Krylov.jl tries to init with `ArrayPartition(undef, ...)`. Avoid hitting that! function init_cacheval( alg::LinearSolve.KrylovJL, A, b::RecursiveArrayTools.ArrayPartition, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, ::LinearSolve.OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, ::LinearSolve.OperatorAssumptions) return nothing end @@ -268,7 +268,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::KrylovJL; kwargs...) atol = float(cache.abstol) rtol = float(cache.reltol) itmax = cache.maxiters - verbose = cache.verbose ? 1 : 0 + verbose = cache.verbose cacheval = if cache.alg isa DefaultLinearSolver if alg.KrylovAlg === Krylov.gmres! @@ -284,13 +284,16 @@ function SciMLBase.solve!(cache::LinearCache, alg::KrylovJL; kwargs...) cache.cacheval end + krylovJL_verbose = isnothing(verbose.numerical) ? 0 : verbosity_to_int(verbose.numerical.KrylovJL_verbosity) + args = (cacheval, cache.A, cache.b) - kwargs = (atol = atol, rtol, itmax, verbose, + kwargs = (atol = atol, rtol, itmax, verbose = krylovJL_verbose, ldiv = true, history = true, alg.kwargs...) if cache.cacheval isa Krylov.CgWorkspace N !== I && - @warn "$(alg.KrylovAlg) doesn't support right preconditioning." + @SciMLMessage("$(alg.KrylovAlg) doesn't support right preconditioning.", + verbose, :no_right_preconditioning, :performance) Krylov.krylov_solve!(args...; M, kwargs...) elseif cache.cacheval isa Krylov.GmresWorkspace Krylov.krylov_solve!(args...; M, N, restart = alg.gmres_restart > 0, kwargs...) @@ -298,7 +301,8 @@ function SciMLBase.solve!(cache::LinearCache, alg::KrylovJL; kwargs...) Krylov.krylov_solve!(args...; M, N, kwargs...) elseif cache.cacheval isa Krylov.MinresWorkspace N !== I && - @warn "$(alg.KrylovAlg) doesn't support right preconditioning." + @SciMLMessage("$(alg.KrylovAlg) doesn't support right preconditioning.", + verbose, :no_right_preconditioning, :performance) Krylov.krylov_solve!(args...; M, kwargs...) else Krylov.krylov_solve!(args...; kwargs...) diff --git a/src/mkl.jl b/src/mkl.jl index 11fa20f09..385921365 100644 --- a/src/mkl.jl +++ b/src/mkl.jl @@ -220,14 +220,14 @@ const PREALLOCATED_MKL_LU = begin end function LinearSolve.init_cacheval(alg::MKLLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_MKL_LU end function LinearSolve.init_cacheval(alg::MKLLUFactorization, A::AbstractMatrix{<:Union{Float32, ComplexF32, ComplexF64}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A = rand(eltype(A), 0, 0) ArrayInterface.lu_instance(A), Ref{BlasInt}() @@ -278,7 +278,7 @@ const PREALLOCATED_MKL32_LU = begin end function LinearSolve.init_cacheval(alg::MKL32MixedLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Pre-allocate appropriate 32-bit arrays based on input type if eltype(A) <: Complex diff --git a/src/openblas.jl b/src/openblas.jl index d5b6d353c..048c13b9c 100644 --- a/src/openblas.jl +++ b/src/openblas.jl @@ -245,14 +245,14 @@ const PREALLOCATED_OPENBLAS_LU = begin end function LinearSolve.init_cacheval(alg::OpenBLASLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_OPENBLAS_LU end function LinearSolve.init_cacheval(alg::OpenBLASLUFactorization, A::AbstractMatrix{<:Union{Float32, ComplexF32, ComplexF64}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A = rand(eltype(A), 0, 0) ArrayInterface.lu_instance(A), Ref{BlasInt}() @@ -303,7 +303,7 @@ const PREALLOCATED_OPENBLAS32_LU = begin end function LinearSolve.init_cacheval(alg::OpenBLAS32MixedLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Pre-allocate appropriate 32-bit arrays based on input type if eltype(A) <: Complex diff --git a/src/simplegmres.jl b/src/simplegmres.jl index a21826c9f..644a62b61 100644 --- a/src/simplegmres.jl +++ b/src/simplegmres.jl @@ -161,7 +161,7 @@ function init_cacheval(alg::SimpleGMRES{UDB}, args...; kwargs...) where {UDB} end function _init_cacheval(::Val{false}, alg::SimpleGMRES, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, ::Bool, ::OperatorAssumptions; zeroinit = true, kwargs...) + abstol, reltol, ::LinearVerbosity, ::OperatorAssumptions; zeroinit = true, kwargs...) @unpack memory, restart, blocksize, warm_start = alg if zeroinit @@ -392,7 +392,7 @@ function SciMLBase.solve!(cache::SimpleGMRESCache{false}, lincache::LinearCache) end function _init_cacheval(::Val{true}, alg::SimpleGMRES, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, ::Bool, ::OperatorAssumptions; zeroinit = true, + abstol, reltol, ::LinearVerbosity, ::OperatorAssumptions; zeroinit = true, blocksize = alg.blocksize) @unpack memory, restart, warm_start = alg diff --git a/src/simplelu.jl b/src/simplelu.jl index 9917f5869..78d6775ab 100644 --- a/src/simplelu.jl +++ b/src/simplelu.jl @@ -218,6 +218,6 @@ function SciMLBase.solve!(cache::LinearCache, alg::SimpleLUFactorization; kwargs end function init_cacheval(alg::SimpleLUFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, - reltol, verbose::Bool, assumptions::OperatorAssumptions) + reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) LUSolver(convert(AbstractMatrix, A)) end diff --git a/src/verbosity.jl b/src/verbosity.jl new file mode 100644 index 000000000..34c557986 --- /dev/null +++ b/src/verbosity.jl @@ -0,0 +1,181 @@ +# Linear Verbosity + +const linear_defaults = Dict{Symbol, Verbosity.Type}( + :default_lu_fallback => Verbosity.Warn(), + :no_right_preconditioning => Verbosity.Warn(), + :using_iterative_solvers => Verbosity.Warn(), + :using_IterativeSolvers => Verbosity.Warn(), + :IterativeSolvers_iterations => Verbosity.Warn(), + :KrylovKit_verbosity => Verbosity.Warn(), + :KrylovJL_verbosity => Verbosity.None(), + :HYPRE_verbosity => Verbosity.Level(1), + :pardiso_verbosity => Verbosity.None() +) +mutable struct LinearErrorControlVerbosity + default_lu_fallback::Verbosity.Type + + function LinearErrorControlVerbosity(; + default_lu_fallback = linear_defaults[:default_lu_fallback]) + new(default_lu_fallback) + end + + function LinearErrorControlVerbosity(verbose::Verbosity.Type) + @match verbose begin + Verbosity.None() => new(fill( + Verbosity.None(), length(fieldnames(LinearErrorControlVerbosity)))...) + + Verbosity.Info() => new(fill( + Verbosity.Info(), length(fieldnames(LinearErrorControlVerbosity)))...) + + Verbosity.Warn() => new(fill( + Verbosity.Warn(), length(fieldnames(LinearErrorControlVerbosity)))...) + + Verbosity.Error() => new(fill( + Verbosity.Error(), length(fieldnames(LinearErrorControlVerbosity)))...) + + Verbosity.Default() => LinearErrorControlVerbosity() + + Verbosity.Edge() => LinearErrorControlVerbosity() + + _ => @error "Not a valid choice for verbosity." + end + end +end + +mutable struct LinearPerformanceVerbosity + no_right_preconditioning::Verbosity.Type + + function LinearPerformanceVerbosity(; + no_right_preconditioning = linear_defaults[:no_right_preconditioning]) + new(no_right_preconditioning) + end + + function LinearPerformanceVerbosity(verbose::Verbosity.Type) + @match verbose begin + Verbosity.None() => new(fill( + Verbosity.None(), length(fieldnames(LinearPerformanceVerbosity)))...) + + Verbosity.Info() => new(fill( + Verbosity.Info(), length(fieldnames(LinearPerformanceVerbosity)))...) + + Verbosity.Warn() => new(fill( + Verbosity.Warn(), length(fieldnames(LinearPerformanceVerbosity)))...) + + Verbosity.Error() => new(fill( + Verbosity.Error(), length(fieldnames(LinearPerformanceVerbosity)))...) + + Verbosity.Default() => LinearPerformanceVerbosity() + + Verbosity.Edge() => LinearPerformanceVerbosity() + + _ => @error "Not a valid choice for verbosity." + end + end +end + +mutable struct LinearNumericalVerbosity + using_IterativeSolvers::Verbosity.Type + IterativeSolvers_iterations::Verbosity.Type + KrylovKit_verbosity::Verbosity.Type + KrylovJL_verbosity::Verbosity.Type + HYPRE_verbosity::Verbosity.Type + pardiso_verbosity::Verbosity.Type + + function LinearNumericalVerbosity(; + using_IterativeSolvers = linear_defaults[:using_IterativeSolvers], + IterativeSolvers_iterations = linear_defaults[:IterativeSolvers_iterations], + KrylovKit_verbosity = linear_defaults[:KrylovKit_verbosity], + KrylovJL_verbosity = linear_defaults[:KrylovJL_verbosity], + HYPRE_verbosity = linear_defaults[:HYPRE_verbosity], + pardiso_verbosity = linear_defaults[:pardiso_verbosity]) + new(using_IterativeSolvers, IterativeSolvers_iterations, + KrylovKit_verbosity, KrylovJL_verbosity, HYPRE_verbosity, pardiso_verbosity) + end + + function LinearNumericalVerbosity(verbose::Verbosity.Type) + @match verbose begin + Verbosity.None() => new(fill( + Verbosity.None(), length(fieldnames(LinearNumericalVerbosity)))...) + + Verbosity.Info() => new(fill( + Verbosity.Info(), length(fieldnames(LinearNumericalVerbosity)))...) + + Verbosity.Warn() => new(fill( + Verbosity.Warn(), length(fieldnames(LinearNumericalVerbosity)))...) + + Verbosity.Error() => new(fill( + Verbosity.Error(), length(fieldnames(LinearNumericalVerbosity)))...) + + Verbosity.Default() => LinearNumericalVerbosity() + + Verbosity.Edge() => LinearNumericalVerbosity() + + _ => @error "Not a valid choice for verbosity." + end + end +end + +struct LinearVerbosity{T} <: AbstractVerbositySpecifier{T} + error_control::Union{LinearErrorControlVerbosity, Nothing} + performance::Union{LinearPerformanceVerbosity, Nothing} + numerical::Union{LinearNumericalVerbosity, Nothing} +end + +function LinearVerbosity(verbose::Verbosity.Type) + @match verbose begin + Verbosity.Default() => LinearVerbosity{true}( + LinearErrorControlVerbosity(Verbosity.Default()), + LinearPerformanceVerbosity(Verbosity.Default()), + LinearNumericalVerbosity(Verbosity.Default()) + ) + + Verbosity.None() => LinearVerbosity{false}(nothing, nothing, nothing) + + Verbosity.All() => LinearVerbosity{true}( + LinearErrorControlVerbosity(Verbosity.Info()), + LinearPerformanceVerbosity(Verbosity.Info()), + LinearNumericalVerbosity(Verbosity.Info()) + ) + + _ => @error "Not a valid choice for LinearVerbosity. Available choices are `Default`, `None`, and `All`." + end +end + +function LinearVerbosity(; + error_control = Verbosity.Default(), performance = Verbosity.Default(), + numerical = Verbosity.Default(), kwargs...) + if error_control isa Verbosity.Type + error_control_verbosity = LinearErrorControlVerbosity(error_control) + else + error_control_verbosity = error_control + end + + if performance isa Verbosity.Type + performance_verbosity = LinearPerformanceVerbosity(performance) + else + performance_verbosity = performance + end + + if numerical isa Verbosity.Type + numerical_verbosity = LinearNumericalVerbosity(numerical) + else + numerical_verbosity = numerical + end + + if !isempty(kwargs) + for (key, value) in pairs(kwargs) + if hasfield(LinearErrorControlVerbosity, key) + setproperty!(error_control_verbosity, key, value) + elseif hasfield(LinearPerformanceVerbosity, key) + setproperty!(performance_verbosity, key, value) + elseif hasfield(LinearNumericalVerbosity, key) + setproperty!(numerical_verbosity, key, value) + else + error("$key is not a recognized verbosity toggle.") + end + end + end + + LinearVerbosity{true}(error_control_verbosity, + performance_verbosity, numerical_verbosity) +end \ No newline at end of file diff --git a/test/runtests.jl b/test/runtests.jl index 0b9b4cd93..558f17c7b 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -17,6 +17,7 @@ if GROUP == "All" || GROUP == "Core" @time @safetestset "Adjoint Sensitivity" include("adjoint.jl") @time @safetestset "ForwardDiff Overloads" include("forwarddiff_overloads.jl") @time @safetestset "Traits" include("traits.jl") + @time @safetestset "Verbosity" include("verbosity.jl") @time @safetestset "BandedMatrices" include("banded.jl") @time @safetestset "Mixed Precision" include("test_mixed_precision.jl") end diff --git a/test/verbosity.jl b/test/verbosity.jl new file mode 100644 index 000000000..2e0b48fd1 --- /dev/null +++ b/test/verbosity.jl @@ -0,0 +1,27 @@ +using LinearSolve +using LinearSolve: LinearVerbosity +using SciMLLogging: SciMLLogging, Verbosity +using Test + +A = [1.0 0 0 0 + 0 1 0 0 + 0 0 1 0 + 0 0 0 0] +b = rand(4) +prob = LinearProblem(A, b) + +@test_logs (:warn, + "LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.") solve( + prob, + verbose = LinearVerbosity(default_lu_fallback = Verbosity.Warn())) + +@test_logs (:warn, + "LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.") solve( + prob, verbose = true) + +@test_logs min_level=SciMLLogging.Logging.Warn solve(prob, verbose = false) + +@test_logs (:info, + "LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.") solve( + prob, + verbose = LinearVerbosity(default_lu_fallback = Verbosity.Info())) \ No newline at end of file