Skip to content

Commit e435f02

Browse files
Add FastLapackInterface.jl's LU to LinearSolveAutotune (#714)
* Add optional FastLapackInterface.jl's LU to LinearSolveAutotune - Added include_fastlapack parameter to autotune_setup (default: false) - FastLUFactorization is only included when explicitly requested - Keeps FastLapackInterface as a weak dependency in LinearSolve.jl - Users must load FastLapackInterface.jl before using include_fastlapack=true - Updated documentation with usage examples * Update Project.toml * Add FastLapackInterface as dependency to LinearSolveAutotune - FastLapackInterface is now a direct dependency of LinearSolveAutotune - Simplified the algorithm detection logic since it's always available - Updated documentation to reflect that users don't need to load it separately - The include_fastlapack parameter still defaults to false for backward compatibility --------- Co-authored-by: ChrisRackauckas <[email protected]>
1 parent 1adf243 commit e435f02

File tree

3 files changed

+21
-5
lines changed

3 files changed

+21
-5
lines changed

lib/LinearSolveAutotune/Project.toml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ CPUSummary = "2a0fbf3d-bb9c-48f3-b0a9-814d99fd7ab9"
1010
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
1111
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
1212
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
13+
FastLapackInterface = "29a986be-02c6-4525-aec4-84b980013641"
1314
GitHub = "bc5e4493-9b4d-5f90-b8aa-2b2bcaad7a26"
1415
LAPACK_jll = "51474c39-65e3-53ba-86ba-03b1b862ec14"
1516
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
@@ -36,6 +37,7 @@ CPUSummary = "0.2"
3637
CUDA = "5"
3738
DataFrames = "1"
3839
Dates = "1"
40+
FastLapackInterface = "2"
3941
GitHub = "5"
4042
LAPACK_jll = "3"
4143
LinearAlgebra = "1"

lib/LinearSolveAutotune/src/LinearSolveAutotune.jl

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ using blis_jll
3636
using LAPACK_jll
3737
using CUDA
3838
using Metal
39+
using FastLapackInterface
3940

4041

4142
# Optional dependencies for telemetry and plotting
@@ -156,7 +157,8 @@ end
156157
samples::Int = 5,
157158
seconds::Float64 = 0.5,
158159
eltypes = (Float32, Float64, ComplexF32, ComplexF64),
159-
skip_missing_algs::Bool = false)
160+
skip_missing_algs::Bool = false,
161+
include_fastlapack::Bool = false)
160162
161163
Run a comprehensive benchmark of all available LU factorization methods and optionally:
162164
@@ -179,6 +181,7 @@ Run a comprehensive benchmark of all available LU factorization methods and opti
179181
- `seconds::Float64 = 0.5`: Maximum time per benchmark
180182
- `eltypes = (Float32, Float64, ComplexF32, ComplexF64)`: Element types to benchmark
181183
- `skip_missing_algs::Bool = false`: If false, error when expected algorithms are missing; if true, warn instead
184+
- `include_fastlapack::Bool = false`: If true, includes FastLUFactorization in benchmarks
182185
183186
# Returns
184187
@@ -199,6 +202,9 @@ results = autotune_setup(sizes = [:small, :medium, :large, :big])
199202
# Large matrices only
200203
results = autotune_setup(sizes = [:large, :big], samples = 10, seconds = 1.0)
201204
205+
# Include FastLapackInterface.jl algorithms
206+
results = autotune_setup(include_fastlapack = true)
207+
202208
# After running autotune, share results (requires gh CLI or GitHub token)
203209
share_results(results)
204210
```
@@ -209,7 +215,8 @@ function autotune_setup(;
209215
samples::Int = 5,
210216
seconds::Float64 = 0.5,
211217
eltypes = (Float64,),
212-
skip_missing_algs::Bool = false)
218+
skip_missing_algs::Bool = false,
219+
include_fastlapack::Bool = false)
213220
@info "Starting LinearSolve.jl autotune setup..."
214221
@info "Configuration: sizes=$sizes, set_preferences=$set_preferences"
215222
@info "Element types to benchmark: $(join(eltypes, ", "))"
@@ -219,7 +226,7 @@ function autotune_setup(;
219226
@info "System detected: $(system_info["os"]) $(system_info["arch"]) with $(system_info["num_cores"]) cores"
220227

221228
# Get available algorithms
222-
cpu_algs, cpu_names = get_available_algorithms(; skip_missing_algs = skip_missing_algs)
229+
cpu_algs, cpu_names = get_available_algorithms(; skip_missing_algs = skip_missing_algs, include_fastlapack = include_fastlapack)
223230
@info "Found $(length(cpu_algs)) CPU algorithms: $(join(cpu_names, ", "))"
224231

225232
# Add GPU algorithms if available

lib/LinearSolveAutotune/src/algorithms.jl

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,13 @@
11
# Algorithm detection and creation functions
22

33
"""
4-
get_available_algorithms(; skip_missing_algs::Bool = false)
4+
get_available_algorithms(; skip_missing_algs::Bool = false, include_fastlapack::Bool = false)
55
66
Returns a list of available LU factorization algorithms based on the system and loaded packages.
77
If skip_missing_algs=false, errors when expected algorithms are missing; if true, warns instead.
8+
If include_fastlapack=true, includes FastLUFactorization in benchmarks.
89
"""
9-
function get_available_algorithms(; skip_missing_algs::Bool = false)
10+
function get_available_algorithms(; skip_missing_algs::Bool = false, include_fastlapack::Bool = false)
1011
algs = []
1112
alg_names = String[]
1213

@@ -68,6 +69,12 @@ function get_available_algorithms(; skip_missing_algs::Bool = false)
6869
push!(algs, SimpleLUFactorization())
6970
push!(alg_names, "SimpleLUFactorization")
7071

72+
# FastLapackInterface LU if requested (always available as dependency)
73+
if include_fastlapack
74+
push!(algs, FastLUFactorization())
75+
push!(alg_names, "FastLUFactorization")
76+
end
77+
7178
return algs, alg_names
7279
end
7380

0 commit comments

Comments
 (0)