Skip to content

Commit 5eda050

Browse files
Add separate Preferences test group with FastLapack algorithm verification
This commit implements a comprehensive testing approach for the dual preference system by creating a separate CI test group that verifies algorithm selection before and after extension loading, specifically testing FastLapack preferences. ## New Test Architecture ### **Separate Preferences Test Group** - Created `test/preferences.jl` with isolated preference testing - Added "Preferences" to CI matrix in `.github/workflows/Tests.yml` - Added Preferences group logic to `test/runtests.jl` - Removed preference tests from `default_algs.jl` to avoid package conflicts ### **FastLapack Algorithm Selection Testing** - Tests preference system with FastLUFactorization as always_loaded algorithm - Verifies behavior when RecursiveFactorization not loaded (should use always_loaded) - Tests extension loading scenarios to validate best_algorithm vs always_loaded logic - Uses FastLapack because it's slow and normally never chosen (perfect test case) ### **Extension Loading Verification** - Tests algorithm selection before extension loading (baseline behavior) - Tests conditional FastLapackInterface loading (always_loaded preference) - Tests conditional RecursiveFactorization loading (best_algorithm preference) - Verifies robust fallback when extensions unavailable ## Key Test Scenarios ### **Preference Behavior Testing** ```julia # Set preferences: RF as best, FastLU as always_loaded best_algorithm_Float64_medium = "RFLUFactorization" best_always_loaded_Float64_medium = "FastLUFactorization" # Test progression: 1. No extensions → use heuristics 2. FastLapack loaded → should use FastLU (always_loaded) 3. RecursiveFactorization loaded → should use RF (best_algorithm) ``` ### **Algorithm Choice Verification** - ✅ Tests explicit algorithm selection with `defaultalg()` - ✅ Verifies tiny matrix override (≤10 elements → GenericLU) - ✅ Tests size boundary logic across multiple matrix sizes - ✅ Confirms preference storage and retrieval infrastructure ## CI Integration ### **New Test Group Structure** - **Core**: Basic algorithm tests without preference complexity - **Preferences**: Isolated preference system testing with extension loading - **All**: Excludes Preferences to avoid package loading conflicts ### **Clean Test Isolation** - Preferences test group runs independently with minimal package dependencies - Proper preference cleanup ensures no state leakage between tests - Conditional extension loading handles missing packages gracefully ## Expected Benefits 1. **Robust Preference Testing**: Isolated environment tests actual preference behavior 2. **Extension Loading Verification**: Tests before/after extension scenarios 3. **Clean CI Separation**: Avoids package conflicts in main test suite 4. **FastLapack Validation**: Uses naturally slow algorithm to verify preferences work This architecture provides comprehensive testing of the dual preference system while maintaining clean separation and avoiding CI complexity issues. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <[email protected]>
1 parent 9484b72 commit 5eda050

File tree

4 files changed

+274
-270
lines changed

4 files changed

+274
-270
lines changed

.github/workflows/Tests.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ jobs:
3737
- "LinearSolvePardiso"
3838
- "NoPre"
3939
- "LinearSolveAutotune"
40+
- "Preferences"
4041
os:
4142
- ubuntu-latest
4243
- macos-latest

test/default_algs.jl

Lines changed: 0 additions & 270 deletions
Original file line numberDiff line numberDiff line change
@@ -171,273 +171,3 @@ sol = solve(prob,
171171
sol = solve(prob)
172172
@test sol.u svd(A)\b
173173

174-
# Test that dual preference system integration works correctly
175-
@testset "Autotune Dual Preference System Integration" begin
176-
using Preferences
177-
178-
# Clear any existing preferences
179-
target_eltypes = ["Float32", "Float64", "ComplexF32", "ComplexF64"]
180-
size_categories = ["tiny", "small", "medium", "large", "big"]
181-
182-
for eltype in target_eltypes
183-
for size_cat in size_categories
184-
for pref_type in ["best_algorithm", "best_always_loaded"]
185-
pref_key = "$(pref_type)_$(eltype)_$(size_cat)"
186-
if Preferences.has_preference(LinearSolve, pref_key)
187-
Preferences.delete_preferences!(LinearSolve, pref_key; force = true)
188-
end
189-
end
190-
end
191-
end
192-
193-
@testset "Dual Preference Storage and Retrieval" begin
194-
# Test that we can store and retrieve both types of preferences
195-
Preferences.set_preferences!(LinearSolve, "best_algorithm_Float64_medium" => "RFLUFactorization"; force = true)
196-
Preferences.set_preferences!(LinearSolve, "best_always_loaded_Float64_medium" => "MKLLUFactorization"; force = true)
197-
198-
# Verify preference storage is correct
199-
@test Preferences.load_preference(LinearSolve, "best_algorithm_Float64_medium", nothing) == "RFLUFactorization"
200-
@test Preferences.load_preference(LinearSolve, "best_always_loaded_Float64_medium", nothing) == "MKLLUFactorization"
201-
202-
# Test with different element types and sizes
203-
Preferences.set_preferences!(LinearSolve, "best_algorithm_Float32_small" => "LUFactorization"; force = true)
204-
Preferences.set_preferences!(LinearSolve, "best_always_loaded_Float32_small" => "LUFactorization"; force = true)
205-
206-
@test Preferences.load_preference(LinearSolve, "best_algorithm_Float32_small", nothing) == "LUFactorization"
207-
@test Preferences.load_preference(LinearSolve, "best_always_loaded_Float32_small", nothing) == "LUFactorization"
208-
end
209-
210-
@testset "Default Algorithm Selection with Dual Preferences" begin
211-
# Test that default solver works correctly when preferences are set
212-
# This verifies the infrastructure is ready for the preference integration
213-
214-
test_scenarios = [
215-
(Float64, 150, "RFLUFactorization", "LUFactorization"), # medium size
216-
(Float32, 80, "LUFactorization", "LUFactorization"), # small size
217-
(ComplexF64, 100, "LUFactorization", "LUFactorization") # small size, conservative
218-
]
219-
220-
for (eltype, matrix_size, best_alg, fallback_alg) in test_scenarios
221-
# Determine size category for preferences
222-
size_category = if matrix_size <= 128
223-
"small"
224-
elseif matrix_size <= 256
225-
"medium"
226-
elseif matrix_size <= 512
227-
"large"
228-
else
229-
"big"
230-
end
231-
232-
# Set preferences for this scenario
233-
eltype_str = string(eltype)
234-
Preferences.set_preferences!(LinearSolve, "best_algorithm_$(eltype_str)_$(size_category)" => best_alg; force = true)
235-
Preferences.set_preferences!(LinearSolve, "best_always_loaded_$(eltype_str)_$(size_category)" => fallback_alg; force = true)
236-
237-
# Verify preferences are stored correctly
238-
@test Preferences.has_preference(LinearSolve, "best_algorithm_$(eltype_str)_$(size_category)")
239-
@test Preferences.has_preference(LinearSolve, "best_always_loaded_$(eltype_str)_$(size_category)")
240-
241-
stored_best = Preferences.load_preference(LinearSolve, "best_algorithm_$(eltype_str)_$(size_category)", nothing)
242-
stored_fallback = Preferences.load_preference(LinearSolve, "best_always_loaded_$(eltype_str)_$(size_category)", nothing)
243-
244-
@test stored_best == best_alg
245-
@test stored_fallback == fallback_alg
246-
247-
# Create test problem and verify it can be solved
248-
A = rand(eltype, matrix_size, matrix_size) + I(matrix_size)
249-
b = rand(eltype, matrix_size)
250-
prob = LinearProblem(A, b)
251-
252-
# Test that default solver works (infrastructure ready for preference integration)
253-
sol = solve(prob)
254-
@test sol.retcode == ReturnCode.Success
255-
@test norm(A * sol.u - b) < (eltype <: AbstractFloat ? 1e-6 : 1e-8)
256-
257-
# Test that preferred algorithms work individually
258-
if best_alg == "LUFactorization"
259-
sol_best = solve(prob, LUFactorization())
260-
@test sol_best.retcode == ReturnCode.Success
261-
@test norm(A * sol_best.u - b) < (eltype <: AbstractFloat ? 1e-6 : 1e-8)
262-
elseif best_alg == "RFLUFactorization" && LinearSolve.userecursivefactorization(A)
263-
sol_best = solve(prob, RFLUFactorization())
264-
@test sol_best.retcode == ReturnCode.Success
265-
@test norm(A * sol_best.u - b) < (eltype <: AbstractFloat ? 1e-6 : 1e-8)
266-
end
267-
268-
if fallback_alg == "LUFactorization"
269-
sol_fallback = solve(prob, LUFactorization())
270-
@test sol_fallback.retcode == ReturnCode.Success
271-
@test norm(A * sol_fallback.u - b) < (eltype <: AbstractFloat ? 1e-6 : 1e-8)
272-
end
273-
end
274-
end
275-
276-
@testset "Actual Algorithm Choice Verification" begin
277-
# Test that the right solver is actually chosen based on the implemented logic
278-
# This verifies the algorithm selection behavior that will use preferences
279-
280-
# Test scenario 1: Tiny matrix override (should always choose GenericLU regardless of preferences)
281-
A_tiny = rand(Float64, 8, 8) + I(8) # length(b) <= 10 triggers override
282-
b_tiny = rand(Float64, 8)
283-
284-
chosen_alg_tiny = LinearSolve.defaultalg(A_tiny, b_tiny, LinearSolve.OperatorAssumptions(true))
285-
@test chosen_alg_tiny.alg === LinearSolve.DefaultAlgorithmChoice.GenericLUFactorization
286-
287-
# Test that tiny problems work correctly
288-
prob_tiny = LinearProblem(A_tiny, b_tiny)
289-
sol_tiny = solve(prob_tiny)
290-
@test sol_tiny.retcode == ReturnCode.Success
291-
@test norm(A_tiny * sol_tiny.u - b_tiny) < 1e-10
292-
293-
# Test scenario 2: Medium-sized matrix (should use tuned algorithm logic or fallback to heuristics)
294-
A_medium = rand(Float64, 150, 150) + I(150)
295-
b_medium = rand(Float64, 150)
296-
297-
chosen_alg_medium = LinearSolve.defaultalg(A_medium, b_medium, LinearSolve.OperatorAssumptions(true))
298-
@test isa(chosen_alg_medium, LinearSolve.DefaultLinearSolver)
299-
@test chosen_alg_medium.alg isa LinearSolve.DefaultAlgorithmChoice.T
300-
301-
# The chosen algorithm should be one of the expected defaults when no preferences set
302-
expected_choices = [
303-
LinearSolve.DefaultAlgorithmChoice.RFLUFactorization,
304-
LinearSolve.DefaultAlgorithmChoice.MKLLUFactorization,
305-
LinearSolve.DefaultAlgorithmChoice.AppleAccelerateLUFactorization,
306-
LinearSolve.DefaultAlgorithmChoice.LUFactorization
307-
]
308-
@test chosen_alg_medium.alg in expected_choices
309-
310-
# Test that the chosen algorithm can solve the problem
311-
prob_medium = LinearProblem(A_medium, b_medium)
312-
sol_medium = solve(prob_medium)
313-
@test sol_medium.retcode == ReturnCode.Success
314-
@test norm(A_medium * sol_medium.u - b_medium) < 1e-8
315-
316-
# Test scenario 3: Large matrix behavior
317-
A_large = rand(Float64, 600, 600) + I(600)
318-
b_large = rand(Float64, 600)
319-
320-
chosen_alg_large = LinearSolve.defaultalg(A_large, b_large, LinearSolve.OperatorAssumptions(true))
321-
@test isa(chosen_alg_large, LinearSolve.DefaultLinearSolver)
322-
323-
# For large matrices, should typically choose MKL, AppleAccelerate, or standard LU
324-
large_expected_choices = [
325-
LinearSolve.DefaultAlgorithmChoice.MKLLUFactorization,
326-
LinearSolve.DefaultAlgorithmChoice.AppleAccelerateLUFactorization,
327-
LinearSolve.DefaultAlgorithmChoice.LUFactorization
328-
]
329-
@test chosen_alg_large.alg in large_expected_choices
330-
331-
# Verify the large problem can be solved
332-
prob_large = LinearProblem(A_large, b_large)
333-
sol_large = solve(prob_large)
334-
@test sol_large.retcode == ReturnCode.Success
335-
@test norm(A_large * sol_large.u - b_large) < 1e-8
336-
337-
# Test scenario 4: Different element types
338-
# Test Float32 medium
339-
A_f32 = rand(Float32, 150, 150) + I(150)
340-
b_f32 = rand(Float32, 150)
341-
342-
chosen_alg_f32 = LinearSolve.defaultalg(A_f32, b_f32, LinearSolve.OperatorAssumptions(true))
343-
@test isa(chosen_alg_f32, LinearSolve.DefaultLinearSolver)
344-
@test chosen_alg_f32.alg in expected_choices
345-
346-
prob_f32 = LinearProblem(A_f32, b_f32)
347-
sol_f32 = solve(prob_f32)
348-
@test sol_f32.retcode == ReturnCode.Success
349-
@test norm(A_f32 * sol_f32.u - b_f32) < 1e-6
350-
351-
# Test ComplexF64 medium
352-
A_c64 = rand(ComplexF64, 100, 100) + I(100)
353-
b_c64 = rand(ComplexF64, 100)
354-
355-
chosen_alg_c64 = LinearSolve.defaultalg(A_c64, b_c64, LinearSolve.OperatorAssumptions(true))
356-
@test isa(chosen_alg_c64, LinearSolve.DefaultLinearSolver)
357-
358-
prob_c64 = LinearProblem(A_c64, b_c64)
359-
sol_c64 = solve(prob_c64)
360-
@test sol_c64.retcode == ReturnCode.Success
361-
@test norm(A_c64 * sol_c64.u - b_c64) < 1e-8
362-
end
363-
364-
@testset "Size Category Logic Verification" begin
365-
# Test that the size categorization logic matches expectations
366-
367-
# Test the size boundaries that determine algorithm choice
368-
size_test_cases = [
369-
(5, LinearSolve.DefaultAlgorithmChoice.GenericLUFactorization), # Tiny override
370-
(10, LinearSolve.DefaultAlgorithmChoice.GenericLUFactorization), # Tiny override
371-
(50, nothing), # Medium - depends on system/preferences
372-
(150, nothing), # Medium - depends on system/preferences
373-
(300, nothing), # Large - depends on system/preferences
374-
(600, nothing) # Large - depends on system/preferences
375-
]
376-
377-
for (size, expected_alg) in size_test_cases
378-
A = rand(Float64, size, size) + I(size)
379-
b = rand(Float64, size)
380-
381-
chosen_alg = LinearSolve.defaultalg(A, b, LinearSolve.OperatorAssumptions(true))
382-
383-
if expected_alg !== nothing
384-
# For tiny matrices, should always get GenericLUFactorization
385-
@test chosen_alg.alg === expected_alg
386-
else
387-
# For larger matrices, should get a reasonable choice
388-
@test isa(chosen_alg, LinearSolve.DefaultLinearSolver)
389-
@test chosen_alg.alg isa LinearSolve.DefaultAlgorithmChoice.T
390-
end
391-
392-
# Test that all choices can solve problems
393-
prob = LinearProblem(A, b)
394-
sol = solve(prob)
395-
@test sol.retcode == ReturnCode.Success
396-
@test norm(A * sol.u - b) < (size <= 10 ? 1e-12 : 1e-8)
397-
end
398-
end
399-
400-
@testset "Preference System Robustness" begin
401-
# Test that default solver remains robust with invalid preferences
402-
403-
# Set invalid preferences
404-
Preferences.set_preferences!(LinearSolve, "best_algorithm_Float64_medium" => "NonExistentAlgorithm"; force = true)
405-
Preferences.set_preferences!(LinearSolve, "best_always_loaded_Float64_medium" => "AnotherNonExistentAlgorithm"; force = true)
406-
407-
# Create test problem
408-
A = rand(Float64, 150, 150) + I(150)
409-
b = rand(Float64, 150)
410-
prob = LinearProblem(A, b)
411-
412-
# Should still solve successfully using existing heuristics
413-
sol = solve(prob)
414-
@test sol.retcode == ReturnCode.Success
415-
@test norm(A * sol.u - b) < 1e-8
416-
417-
# Test that preference infrastructure doesn't break default behavior
418-
@test Preferences.has_preference(LinearSolve, "best_algorithm_Float64_medium")
419-
@test Preferences.has_preference(LinearSolve, "best_always_loaded_Float64_medium")
420-
end
421-
422-
# Clean up all test preferences and reset to original state
423-
for eltype in target_eltypes
424-
for size_cat in size_categories
425-
for pref_type in ["best_algorithm", "best_always_loaded"]
426-
pref_key = "$(pref_type)_$(eltype)_$(size_cat)"
427-
if Preferences.has_preference(LinearSolve, pref_key)
428-
Preferences.delete_preferences!(LinearSolve, pref_key; force = true)
429-
end
430-
end
431-
end
432-
end
433-
434-
# Reset MKL preference to original state if it was modified
435-
if Preferences.has_preference(LinearSolve, "LoadMKL_JLL")
436-
Preferences.delete_preferences!(LinearSolve, "LoadMKL_JLL"; force = true)
437-
end
438-
439-
# Reset autotune timestamp if it was set
440-
if Preferences.has_preference(LinearSolve, "autotune_timestamp")
441-
Preferences.delete_preferences!(LinearSolve, "autotune_timestamp"; force = true)
442-
end
443-
end

0 commit comments

Comments
 (0)