Skip to content

Commit 1d25e0a

Browse files
Add autotune preference integration to default solver selection
- Add get_tuned_algorithm() helper function to load algorithm preferences - Modify defaultalg() to check for tuned preferences before fallback heuristics - Support size-based categorization (small/medium/large/big) matching autotune - Handle Float32, Float64, ComplexF32, ComplexF64 element types - Graceful fallback to existing heuristics when no preferences exist - Maintain backward compatibility 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <[email protected]>
1 parent 2481164 commit 1d25e0a

File tree

1 file changed

+68
-1
lines changed

1 file changed

+68
-1
lines changed

src/default.jl

Lines changed: 68 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,66 @@ end
173173

174174
userecursivefactorization(A) = false
175175

176+
"""
177+
get_tuned_algorithm(eltype_A, eltype_b, matrix_size)
178+
179+
Check if autotune preferences exist and return the appropriate algorithm
180+
based on element type and matrix size. Returns `nothing` if no preference exists.
181+
"""
182+
function get_tuned_algorithm(eltype_A, eltype_b, matrix_size)
183+
# Determine the element type to use for preference lookup
184+
target_eltype = if eltype_A !== nothing
185+
string(eltype_A)
186+
else
187+
string(eltype_b)
188+
end
189+
190+
# Determine size category based on matrix size
191+
size_category = if matrix_size <= 128
192+
"small"
193+
elseif matrix_size <= 256
194+
"medium"
195+
elseif matrix_size <= 512
196+
"large"
197+
else
198+
"big"
199+
end
200+
201+
# Try to load the preference
202+
pref_key = "best_algorithm_$(target_eltype)_$(size_category)"
203+
algorithm_name = Preferences.@load_preference(pref_key, nothing)
204+
205+
if algorithm_name !== nothing
206+
# Convert algorithm name string to DefaultAlgorithmChoice enum
207+
if algorithm_name == "LUFactorization"
208+
return DefaultAlgorithmChoice.LUFactorization
209+
elseif algorithm_name == "RFLUFactorization" || algorithm_name == "RecursiveFactorization"
210+
return DefaultAlgorithmChoice.RFLUFactorization
211+
elseif algorithm_name == "MKLLUFactorization"
212+
return DefaultAlgorithmChoice.MKLLUFactorization
213+
elseif algorithm_name == "AppleAccelerateLUFactorization"
214+
return DefaultAlgorithmChoice.AppleAccelerateLUFactorization
215+
elseif algorithm_name == "GenericLUFactorization"
216+
return DefaultAlgorithmChoice.GenericLUFactorization
217+
elseif algorithm_name == "QRFactorization"
218+
return DefaultAlgorithmChoice.QRFactorization
219+
elseif algorithm_name == "CholeskyFactorization"
220+
return DefaultAlgorithmChoice.CholeskyFactorization
221+
elseif algorithm_name == "SVDFactorization"
222+
return DefaultAlgorithmChoice.SVDFactorization
223+
elseif algorithm_name == "BunchKaufmanFactorization"
224+
return DefaultAlgorithmChoice.BunchKaufmanFactorization
225+
elseif algorithm_name == "LDLtFactorization"
226+
return DefaultAlgorithmChoice.LDLtFactorization
227+
else
228+
@warn "Unknown algorithm preference: $algorithm_name, falling back to heuristics"
229+
return nothing
230+
end
231+
end
232+
233+
return nothing
234+
end
235+
176236
# Allows A === nothing as a stand-in for dense matrix
177237
function defaultalg(A, b, assump::OperatorAssumptions{Bool})
178238
alg = if assump.issq
@@ -185,7 +245,14 @@ function defaultalg(A, b, assump::OperatorAssumptions{Bool})
185245
ArrayInterface.can_setindex(b) &&
186246
(__conditioning(assump) === OperatorCondition.IllConditioned ||
187247
__conditioning(assump) === OperatorCondition.WellConditioned)
188-
if length(b) <= 10
248+
249+
# First check if autotune preferences exist
250+
matrix_size = length(b)
251+
tuned_alg = get_tuned_algorithm(A === nothing ? nothing : eltype(A), eltype(b), matrix_size)
252+
253+
if tuned_alg !== nothing
254+
tuned_alg
255+
elseif length(b) <= 10
189256
DefaultAlgorithmChoice.GenericLUFactorization
190257
elseif appleaccelerate_isavailable() && b isa Array &&
191258
eltype(b) <: Union{Float32, Float64, ComplexF32, ComplexF64}

0 commit comments

Comments
 (0)