Skip to content

Commit 5927e3e

Browse files
committed
Apply formatting and fix additional test compatibility
- Format code with JuliaFormatter SciMLStyle - Update resolve.jl tests to properly handle mixed precision algorithms - Add appropriate tolerance checks for Float32 precision solvers 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <[email protected]>
1 parent 555c337 commit 5927e3e

File tree

3 files changed

+108
-89
lines changed

3 files changed

+108
-89
lines changed

src/LinearSolve.jl

Lines changed: 74 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,8 @@ using SciMLBase: SciMLBase, LinearAliasSpecifier, AbstractSciMLOperator,
2121
using SciMLOperators: SciMLOperators, AbstractSciMLOperator, IdentityOperator,
2222
MatrixOperator,
2323
has_ldiv!, issquare
24-
using SciMLLogging: Verbosity, @SciMLMessage, verbosity_to_int, @match, AbstractVerbositySpecifier
24+
using SciMLLogging: Verbosity, @SciMLMessage, verbosity_to_int, @match,
25+
AbstractVerbositySpecifier
2526
using Setfield: @set, @set!
2627
using UnPack: @unpack
2728
using DocStringExtensions: DocStringExtensions
@@ -67,14 +68,13 @@ else
6768
const useopenblas = false
6869
end
6970

70-
7171
@reexport using SciMLBase
7272

7373
"""
7474
SciMLLinearSolveAlgorithm <: SciMLBase.AbstractLinearAlgorithm
7575
7676
The root abstract type for all linear solver algorithms in LinearSolve.jl.
77-
All concrete linear solver implementations should inherit from one of the
77+
All concrete linear solver implementations should inherit from one of the
7878
specialized subtypes rather than directly from this type.
7979
8080
This type integrates with the SciMLBase ecosystem, providing a consistent
@@ -92,39 +92,44 @@ matrices (e.g., `A = LU`, `A = QR`, `A = LDL'`) and then solve the system
9292
using forward/backward substitution.
9393
9494
## Characteristics
95-
- Requires concrete matrix representation (`needs_concrete_A() = true`)
96-
- Typically efficient for multiple solves with the same matrix
97-
- Generally provides high accuracy for well-conditioned problems
98-
- Memory requirements depend on the specific factorization type
95+
96+
- Requires concrete matrix representation (`needs_concrete_A() = true`)
97+
- Typically efficient for multiple solves with the same matrix
98+
- Generally provides high accuracy for well-conditioned problems
99+
- Memory requirements depend on the specific factorization type
99100
100101
## Subtypes
101-
- `AbstractDenseFactorization`: For dense matrix factorizations
102-
- `AbstractSparseFactorization`: For sparse matrix factorizations
102+
103+
- `AbstractDenseFactorization`: For dense matrix factorizations
104+
- `AbstractSparseFactorization`: For sparse matrix factorizations
103105
104106
## Examples of concrete subtypes
105-
- `LUFactorization`, `QRFactorization`, `CholeskyFactorization`
106-
- `UMFPACKFactorization`, `KLUFactorization`
107+
108+
- `LUFactorization`, `QRFactorization`, `CholeskyFactorization`
109+
- `UMFPACKFactorization`, `KLUFactorization`
107110
"""
108111
abstract type AbstractFactorization <: SciMLLinearSolveAlgorithm end
109112

110113
"""
111114
AbstractSparseFactorization <: AbstractFactorization
112115
113116
Abstract type for factorization-based linear solvers optimized for sparse matrices.
114-
These algorithms take advantage of sparsity patterns to reduce memory usage and
117+
These algorithms take advantage of sparsity patterns to reduce memory usage and
115118
computational cost compared to dense factorizations.
116119
117-
## Characteristics
118-
- Optimized for matrices with many zero entries
119-
- Often use specialized pivoting strategies to preserve sparsity
120-
- May reorder rows/columns to minimize fill-in during factorization
121-
- Typically more memory-efficient than dense methods for sparse problems
120+
## Characteristics
121+
122+
- Optimized for matrices with many zero entries
123+
- Often use specialized pivoting strategies to preserve sparsity
124+
- May reorder rows/columns to minimize fill-in during factorization
125+
- Typically more memory-efficient than dense methods for sparse problems
122126
123127
## Examples of concrete subtypes
124-
- `UMFPACKFactorization`: General sparse LU with partial pivoting
125-
- `KLUFactorization`: Sparse LU optimized for circuit simulation
126-
- `CHOLMODFactorization`: Sparse Cholesky for positive definite systems
127-
- `SparspakFactorization`: Envelope/profile method for sparse systems
128+
129+
- `UMFPACKFactorization`: General sparse LU with partial pivoting
130+
- `KLUFactorization`: Sparse LU optimized for circuit simulation
131+
- `CHOLMODFactorization`: Sparse Cholesky for positive definite systems
132+
- `SparspakFactorization`: Envelope/profile method for sparse systems
128133
"""
129134
abstract type AbstractSparseFactorization <: AbstractFactorization end
130135

@@ -136,16 +141,18 @@ These algorithms assume the matrix has no particular sparsity structure and use
136141
dense linear algebra routines (typically from BLAS/LAPACK) for optimal performance.
137142
138143
## Characteristics
139-
- Optimized for matrices with few zeros or no sparsity structure
140-
- Leverage highly optimized BLAS/LAPACK routines when available
141-
- Generally provide excellent performance for moderately-sized dense problems
142-
- Memory usage scales as O(n²) with matrix size
143-
144-
## Examples of concrete subtypes
145-
- `LUFactorization`: Dense LU with partial pivoting (via LAPACK)
146-
- `QRFactorization`: Dense QR factorization for overdetermined systems
147-
- `CholeskyFactorization`: Dense Cholesky for symmetric positive definite matrices
148-
- `BunchKaufmanFactorization`: For symmetric indefinite matrices
144+
145+
- Optimized for matrices with few zeros or no sparsity structure
146+
- Leverage highly optimized BLAS/LAPACK routines when available
147+
- Generally provide excellent performance for moderately-sized dense problems
148+
- Memory usage scales as O(n²) with matrix size
149+
150+
## Examples of concrete subtypes
151+
152+
- `LUFactorization`: Dense LU with partial pivoting (via LAPACK)
153+
- `QRFactorization`: Dense QR factorization for overdetermined systems
154+
- `CholeskyFactorization`: Dense Cholesky for symmetric positive definite matrices
155+
- `BunchKaufmanFactorization`: For symmetric indefinite matrices
149156
"""
150157
abstract type AbstractDenseFactorization <: AbstractFactorization end
151158

@@ -157,23 +164,26 @@ These algorithms solve linear systems by iteratively building an approximation
157164
from a sequence of Krylov subspaces, without requiring explicit matrix factorization.
158165
159166
## Characteristics
160-
- Does not require concrete matrix representation (`needs_concrete_A() = false`)
161-
- Only needs matrix-vector products `A*v` (can work with operators/functions)
162-
- Memory usage typically O(n) or O(kn) where k << n
163-
- Convergence depends on matrix properties (condition number, eigenvalue distribution)
164-
- Often benefits significantly from preconditioning
167+
168+
- Does not require concrete matrix representation (`needs_concrete_A() = false`)
169+
- Only needs matrix-vector products `A*v` (can work with operators/functions)
170+
- Memory usage typically O(n) or O(kn) where k << n
171+
- Convergence depends on matrix properties (condition number, eigenvalue distribution)
172+
- Often benefits significantly from preconditioning
165173
166174
## Advantages
167-
- Low memory requirements for large sparse systems
168-
- Can handle matrix-free operators (functions that compute `A*v`)
169-
- Often the only feasible approach for very large systems
170-
- Can exploit matrix structure through specialized operators
175+
176+
- Low memory requirements for large sparse systems
177+
- Can handle matrix-free operators (functions that compute `A*v`)
178+
- Often the only feasible approach for very large systems
179+
- Can exploit matrix structure through specialized operators
171180
172181
## Examples of concrete subtypes
173-
- `GMRESIteration`: Generalized Minimal Residual method
174-
- `CGIteration`: Conjugate Gradient (for symmetric positive definite systems)
175-
- `BiCGStabLIteration`: Bi-Conjugate Gradient Stabilized
176-
- Wrapped external iterative solvers (KrylovKit.jl, IterativeSolvers.jl)
182+
183+
- `GMRESIteration`: Generalized Minimal Residual method
184+
- `CGIteration`: Conjugate Gradient (for symmetric positive definite systems)
185+
- `BiCGStabLIteration`: Bi-Conjugate Gradient Stabilized
186+
- Wrapped external iterative solvers (KrylovKit.jl, IterativeSolvers.jl)
177187
"""
178188
abstract type AbstractKrylovSubspaceMethod <: SciMLLinearSolveAlgorithm end
179189

@@ -184,15 +194,17 @@ Abstract type for linear solvers that wrap custom solving functions or
184194
provide direct interfaces to specific solve methods. These provide flexibility
185195
for integrating custom algorithms or simple solve strategies.
186196
187-
## Characteristics
188-
- Does not require concrete matrix representation (`needs_concrete_A() = false`)
189-
- Provides maximum flexibility for custom solving strategies
190-
- Can wrap external solver libraries or implement specialized algorithms
191-
- Performance and stability depend entirely on the wrapped implementation
197+
## Characteristics
198+
199+
- Does not require concrete matrix representation (`needs_concrete_A() = false`)
200+
- Provides maximum flexibility for custom solving strategies
201+
- Can wrap external solver libraries or implement specialized algorithms
202+
- Performance and stability depend entirely on the wrapped implementation
192203
193204
## Examples of concrete subtypes
194-
- `LinearSolveFunction`: Wraps arbitrary user-defined solve functions
195-
- `DirectLdiv!`: Direct application of the `\\` operator
205+
206+
- `LinearSolveFunction`: Wraps arbitrary user-defined solve functions
207+
- `DirectLdiv!`: Direct application of the `\\` operator
196208
"""
197209
abstract type AbstractSolveFunction <: SciMLLinearSolveAlgorithm end
198210

@@ -205,22 +217,27 @@ Trait function that determines whether a linear solver algorithm requires
205217
a concrete matrix representation or can work with abstract operators.
206218
207219
## Arguments
208-
- `alg`: A linear solver algorithm instance
220+
221+
- `alg`: A linear solver algorithm instance
209222
210223
## Returns
211-
- `true`: Algorithm requires a concrete matrix (e.g., for factorization)
212-
- `false`: Algorithm can work with abstract operators (e.g., matrix-free methods)
224+
225+
- `true`: Algorithm requires a concrete matrix (e.g., for factorization)
226+
- `false`: Algorithm can work with abstract operators (e.g., matrix-free methods)
213227
214228
## Usage
229+
215230
This trait is used internally by LinearSolve.jl to optimize algorithm dispatch
216231
and determine when matrix operators need to be converted to concrete arrays.
217232
218233
## Algorithm-Specific Behavior
219-
- `AbstractFactorization`: `true` (needs explicit matrix entries for factorization)
220-
- `AbstractKrylovSubspaceMethod`: `false` (only needs matrix-vector products)
221-
- `AbstractSolveFunction`: `false` (depends on the wrapped function's requirements)
234+
235+
- `AbstractFactorization`: `true` (needs explicit matrix entries for factorization)
236+
- `AbstractKrylovSubspaceMethod`: `false` (only needs matrix-vector products)
237+
- `AbstractSolveFunction`: `false` (depends on the wrapped function's requirements)
222238
223239
## Example
240+
224241
```julia
225242
needs_concrete_A(LUFactorization()) # true
226243
needs_concrete_A(GMRESIteration()) # false

src/appleaccelerate.jl

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@ to avoid allocations and does not require libblastrampoline.
1414
"""
1515
struct AppleAccelerateLUFactorization <: AbstractFactorization end
1616

17-
1817
@static if !Sys.isapple()
1918
__appleaccelerate_isavailable() = false
2019
else
@@ -35,7 +34,7 @@ function aa_getrf!(A::AbstractMatrix{<:ComplexF64};
3534
ipiv = similar(A, Cint, min(size(A, 1), size(A, 2))),
3635
info = Ref{Cint}(),
3736
check = false)
38-
__appleaccelerate_isavailable() ||
37+
__appleaccelerate_isavailable() ||
3938
error("Error, AppleAccelerate binary is missing but solve is being called. Report this issue")
4039
require_one_based_indexing(A)
4140
check && chkfinite(A)
@@ -57,7 +56,7 @@ function aa_getrf!(A::AbstractMatrix{<:ComplexF32};
5756
ipiv = similar(A, Cint, min(size(A, 1), size(A, 2))),
5857
info = Ref{Cint}(),
5958
check = false)
60-
__appleaccelerate_isavailable() ||
59+
__appleaccelerate_isavailable() ||
6160
error("Error, AppleAccelerate binary is missing but solve is being called. Report this issue")
6261
require_one_based_indexing(A)
6362
check && chkfinite(A)
@@ -79,7 +78,7 @@ function aa_getrf!(A::AbstractMatrix{<:Float64};
7978
ipiv = similar(A, Cint, min(size(A, 1), size(A, 2))),
8079
info = Ref{Cint}(),
8180
check = false)
82-
__appleaccelerate_isavailable() ||
81+
__appleaccelerate_isavailable() ||
8382
error("Error, AppleAccelerate binary is missing but solve is being called. Report this issue")
8483
require_one_based_indexing(A)
8584
check && chkfinite(A)
@@ -101,7 +100,7 @@ function aa_getrf!(A::AbstractMatrix{<:Float32};
101100
ipiv = similar(A, Cint, min(size(A, 1), size(A, 2))),
102101
info = Ref{Cint}(),
103102
check = false)
104-
__appleaccelerate_isavailable() ||
103+
__appleaccelerate_isavailable() ||
105104
error("Error, AppleAccelerate binary is missing but solve is being called. Report this issue")
106105
require_one_based_indexing(A)
107106
check && chkfinite(A)
@@ -125,7 +124,7 @@ function aa_getrs!(trans::AbstractChar,
125124
ipiv::AbstractVector{Cint},
126125
B::AbstractVecOrMat{<:ComplexF64};
127126
info = Ref{Cint}())
128-
__appleaccelerate_isavailable() ||
127+
__appleaccelerate_isavailable() ||
129128
error("Error, AppleAccelerate binary is missing but solve is being called. Report this issue")
130129
require_one_based_indexing(A, ipiv, B)
131130
LinearAlgebra.LAPACK.chktrans(trans)
@@ -151,7 +150,7 @@ function aa_getrs!(trans::AbstractChar,
151150
ipiv::AbstractVector{Cint},
152151
B::AbstractVecOrMat{<:ComplexF32};
153152
info = Ref{Cint}())
154-
__appleaccelerate_isavailable() ||
153+
__appleaccelerate_isavailable() ||
155154
error("Error, AppleAccelerate binary is missing but solve is being called. Report this issue")
156155
require_one_based_indexing(A, ipiv, B)
157156
LinearAlgebra.LAPACK.chktrans(trans)
@@ -178,7 +177,7 @@ function aa_getrs!(trans::AbstractChar,
178177
ipiv::AbstractVector{Cint},
179178
B::AbstractVecOrMat{<:Float64};
180179
info = Ref{Cint}())
181-
__appleaccelerate_isavailable() ||
180+
__appleaccelerate_isavailable() ||
182181
error("Error, AppleAccelerate binary is missing but solve is being called. Report this issue")
183182
require_one_based_indexing(A, ipiv, B)
184183
LinearAlgebra.LAPACK.chktrans(trans)
@@ -205,7 +204,7 @@ function aa_getrs!(trans::AbstractChar,
205204
ipiv::AbstractVector{Cint},
206205
B::AbstractVecOrMat{<:Float32};
207206
info = Ref{Cint}())
208-
__appleaccelerate_isavailable() ||
207+
__appleaccelerate_isavailable() ||
209208
error("Error, AppleAccelerate binary is missing but solve is being called. Report this issue")
210209
require_one_based_indexing(A, ipiv, B)
211210
LinearAlgebra.LAPACK.chktrans(trans)
@@ -253,7 +252,7 @@ end
253252

254253
function SciMLBase.solve!(cache::LinearCache, alg::AppleAccelerateLUFactorization;
255254
kwargs...)
256-
__appleaccelerate_isavailable() ||
255+
__appleaccelerate_isavailable() ||
257256
error("Error, AppleAccelerate binary is missing but solve is being called. Report this issue")
258257
A = cache.A
259258
A = convert(AbstractMatrix, A)
@@ -296,7 +295,8 @@ const PREALLOCATED_APPLE32_LU = begin
296295
LU(luinst.factors, similar(A, Cint, 0), luinst.info), Ref{Cint}()
297296
end
298297

299-
function LinearSolve.init_cacheval(alg::AppleAccelerate32MixedLUFactorization, A, b, u, Pl, Pr,
298+
function LinearSolve.init_cacheval(
299+
alg::AppleAccelerate32MixedLUFactorization, A, b, u, Pl, Pr,
300300
maxiters::Int, abstol, reltol, verbose::LinearVerbosity,
301301
assumptions::OperatorAssumptions)
302302
# Pre-allocate appropriate 32-bit arrays based on input type
@@ -311,14 +311,14 @@ end
311311

312312
function SciMLBase.solve!(cache::LinearCache, alg::AppleAccelerate32MixedLUFactorization;
313313
kwargs...)
314-
__appleaccelerate_isavailable() ||
314+
__appleaccelerate_isavailable() ||
315315
error("Error, AppleAccelerate binary is missing but solve is being called. Report this issue")
316316
A = cache.A
317317
A = convert(AbstractMatrix, A)
318-
318+
319319
# Check if we have complex numbers
320320
iscomplex = eltype(A) <: Complex
321-
321+
322322
if cache.isfresh
323323
cacheval = @get_cacheval(cache, :AppleAccelerate32MixedLUFactorization)
324324
# Convert to appropriate 32-bit type for factorization
@@ -341,14 +341,14 @@ function SciMLBase.solve!(cache::LinearCache, alg::AppleAccelerate32MixedLUFacto
341341
A_lu, info = @get_cacheval(cache, :AppleAccelerate32MixedLUFactorization)
342342
require_one_based_indexing(cache.u, cache.b)
343343
m, n = size(A_lu, 1), size(A_lu, 2)
344-
344+
345345
# Convert b to appropriate 32-bit type for solving
346346
if iscomplex
347347
b_f32 = ComplexF32.(cache.b)
348348
else
349349
b_f32 = Float32.(cache.b)
350350
end
351-
351+
352352
if m > n
353353
Bc = copy(b_f32)
354354
aa_getrs!('N', A_lu.factors, A_lu.ipiv, Bc; info)

0 commit comments

Comments
 (0)