Skip to content

Commit f0fcaaa

Browse files
Add trim tests for LUFactorization, MKLLUFactorization, and RFLUFactorization
This PR adds trimming tests to LinearSolve.jl, similar to PR SciML#734 in NonlinearSolve.jl. ## Summary - Add `test/trim/` directory with infrastructure for testing trimming - Add tests for LUFactorization, MKLLUFactorization, and RFLUFactorization - Add "Trim" test group to CI configuration - Tests only run on Julia 1.12+ (when trimming was introduced) ## Test Structure The tests verify: 1. LUFactorization works correctly with pre-initialized cache 2. MKLLUFactorization works correctly with pre-initialized cache 3. RFLUFactorization works correctly with pre-initialized cache 4. Each implementation can be successfully compiled with \`juliac --trim\` ## Notes - Trim tests only run on Julia 1.12+ - CI excludes trim tests on LTS and pre-release versions - Tests use a custom environment in \`test/trim/\` to avoid conflicts 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <[email protected]>
1 parent 5df1b29 commit f0fcaaa

File tree

12 files changed

+215
-4
lines changed

12 files changed

+215
-4
lines changed

.github/workflows/Tests.yml

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,10 +38,17 @@ jobs:
3838
- "NoPre"
3939
- "LinearSolveAutotune"
4040
- "Preferences"
41+
- "Trim"
4142
os:
4243
- ubuntu-latest
4344
- macos-latest
4445
- windows-latest
46+
exclude:
47+
# Don't run trim tests on Julia versions below 1.12
48+
- group: Trim
49+
version: "lts"
50+
- group: Trim
51+
version: "pre"
4552
uses: "SciML/.github/.github/workflows/tests.yml@v1"
4653
with:
4754
group: "${{ matrix.group }}"

Project.toml

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
1111
EnumX = "4e289a0a-7415-4d19-859d-a7e5c4648b56"
1212
GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527"
1313
InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
14+
JuliaFormatter = "98e50ef6-434e-11e9-1051-2b60c6c9e899"
1415
Krylov = "ba0b0d4f-ebba-5204-a429-3ac8c609bfb7"
1516
LazyArrays = "5078a376-72f3-5289-bfd5-ec5146d43c02"
1617
Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
@@ -41,13 +42,13 @@ EnzymeCore = "f151be2c-9106-41f4-ab19-57ee4f262869"
4142
FastAlmostBandedMatrices = "9d29842c-ecb8-4973-b1e9-a27b1157504e"
4243
FastLapackInterface = "29a986be-02c6-4525-aec4-84b980013641"
4344
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
44-
Mooncake = "da2b9cff-9c12-43a0-ae48-6db2b0edb7d6"
4545
HYPRE = "b5ffcf37-a2bd-41ab-a3da-4bd9bc8ad771"
4646
IterativeSolvers = "42fd0dbc-a981-5370-80f2-aaf504508153"
4747
KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
4848
KrylovKit = "0b1a1467-8014-51b9-945f-bf0ae24f4b77"
4949
LAPACK_jll = "51474c39-65e3-53ba-86ba-03b1b862ec14"
5050
Metal = "dde4c033-4e86-420c-a63e-0dd931031962"
51+
Mooncake = "da2b9cff-9c12-43a0-ae48-6db2b0edb7d6"
5152
Pardiso = "46dd5b70-b6fb-5a00-ae2d-e8fea33afaf2"
5253
RecursiveFactorization = "f2c3362d-daeb-58d1-803e-2bc74f2840b4"
5354
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
@@ -90,8 +91,8 @@ CUDSS = "0.4, 0.6.1"
9091
CUSOLVERRF = "0.2.6"
9192
ChainRulesCore = "1.25"
9293
CliqueTrees = "1.11.0"
93-
ConcreteStructs = "0.2.3"
9494
ComponentArrays = "0.15.29"
95+
ConcreteStructs = "0.2.3"
9596
DocStringExtensions = "0.9.3"
9697
EnumX = "1.0.4"
9798
EnzymeCore = "0.8.5"
@@ -104,6 +105,7 @@ GPUArraysCore = "0.2"
104105
HYPRE = "1.7"
105106
InteractiveUtils = "1.10"
106107
IterativeSolvers = "0.9.4"
108+
JuliaFormatter = "2.2.0"
107109
KernelAbstractions = "0.9.30"
108110
Krylov = "0.10"
109111
KrylovKit = "0.10"
@@ -129,8 +131,8 @@ RecursiveFactorization = "0.2.26"
129131
Reexport = "1.2.2"
130132
SafeTestsets = "0.1"
131133
SciMLBase = "2.70"
132-
SciMLOperators = "1.7.1"
133134
SciMLLogging = "1.3.1"
135+
SciMLOperators = "1.7.1"
134136
Setfield = "1.1.1"
135137
SparseArrays = "1.10"
136138
Sparspak = "0.3.9"
@@ -163,13 +165,13 @@ KrylovKit = "0b1a1467-8014-51b9-945f-bf0ae24f4b77"
163165
KrylovPreconditioners = "45d422c2-293f-44ce-8315-2cb988662dec"
164166
MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195"
165167
Metal = "dde4c033-4e86-420c-a63e-0dd931031962"
168+
Mooncake = "da2b9cff-9c12-43a0-ae48-6db2b0edb7d6"
166169
MultiFloats = "bdf0d083-296b-4888-a5b6-7498122e68a5"
167170
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
168171
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
169172
RecursiveArrayTools = "731186ca-8d62-57ce-b412-fbd966d074cd"
170173
RecursiveFactorization = "f2c3362d-daeb-58d1-803e-2bc74f2840b4"
171174
SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f"
172-
Mooncake = "da2b9cff-9c12-43a0-ae48-6db2b0edb7d6"
173175
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
174176
Sparspak = "e56a9233-b9d6-4f03-8d0f-1825330902ac"
175177
StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3"

test/runtests.jl

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,3 +73,10 @@ end
7373
if Base.Sys.islinux() && (GROUP == "All" || GROUP == "LinearSolveHYPRE") && HAS_EXTENSIONS
7474
@time @safetestset "LinearSolveHYPRE" include("hypretests.jl")
7575
end
76+
77+
if GROUP == "Trim" && VERSION >= v"1.12.0"
78+
Pkg.activate("trim")
79+
Pkg.develop(PackageSpec(path = dirname(@__DIR__)))
80+
Pkg.instantiate()
81+
@time @safetestset "Trim Tests" include("trim/runtests.jl")
82+
end

test/trim/Project.toml

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
name = "TrimTest"
2+
uuid = "e59a8f5e-4b8c-4d8e-9c8e-8e8e8e8e8e8e"
3+
version = "0.1.0"
4+
5+
[deps]
6+
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
7+
LinearSolve = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae"
8+
RecursiveFactorization = "f2c3362d-daeb-58d1-803e-2bc74f2840b4"
9+
SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462"
10+
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
11+
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
12+
13+
[extras]
14+
JET = "c3a54625-cd67-489e-a8e7-0a5a0ff4e31b"
15+
SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f"
16+
17+
[targets]
18+
test = ["JET", "SafeTestsets"]
19+
20+
[compat]
21+
LinearSolve = "3"
22+
RecursiveFactorization = "0.2"
23+
SciMLBase = "2"
24+
StaticArrays = "1"
25+
julia = "1.10"

test/trim/linear_lu.jl

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
module TestLUFactorization
2+
using LinearSolve
3+
using LinearAlgebra
4+
using StaticArrays
5+
import SciMLBase
6+
7+
# Define a simple linear problem with a dense matrix
8+
const A_matrix = [4.0 1.0; 1.0 3.0]
9+
const b_vector = [1.0, 2.0]
10+
11+
const alg = LUFactorization()
12+
const prob = LinearProblem(A_matrix, b_vector)
13+
const cache = init(prob, alg)
14+
15+
function solve_linear(x)
16+
# Create a new problem with a modified b vector
17+
b_new = [x, 2.0 * x]
18+
reinit!(cache, b_new)
19+
solve!(cache)
20+
return cache
21+
end
22+
end

test/trim/linear_mkl.jl

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
module TestMKLLUFactorization
2+
using LinearSolve
3+
using LinearAlgebra
4+
using StaticArrays
5+
import SciMLBase
6+
7+
# Define a simple linear problem with a dense matrix
8+
const A_matrix = [4.0 1.0; 1.0 3.0]
9+
const b_vector = [1.0, 2.0]
10+
11+
const alg = MKLLUFactorization()
12+
const prob = LinearProblem(A_matrix, b_vector)
13+
const cache = init(prob, alg)
14+
15+
function solve_linear(x)
16+
# Create a new problem with a modified b vector
17+
b_new = [x, 2.0 * x]
18+
reinit!(cache, b_new)
19+
solve!(cache)
20+
return cache
21+
end
22+
end

test/trim/linear_rf.jl

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
module TestRFLUFactorization
2+
using LinearSolve
3+
using LinearAlgebra
4+
using StaticArrays
5+
using RecursiveFactorization
6+
import SciMLBase
7+
8+
# Define a simple linear problem with a dense matrix
9+
const A_matrix = [4.0 1.0; 1.0 3.0]
10+
const b_vector = [1.0, 2.0]
11+
12+
const alg = RFLUFactorization()
13+
const prob = LinearProblem(A_matrix, b_vector)
14+
const cache = init(prob, alg)
15+
16+
function solve_linear(x)
17+
# Create a new problem with a modified b vector
18+
b_new = [x, 2.0 * x]
19+
reinit!(cache, b_new)
20+
solve!(cache)
21+
return cache
22+
end
23+
end

test/trim/main_lu.jl

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
using TrimTest
2+
3+
function (@main)(argv::Vector{String})::Cint
4+
x = parse(Float64, argv[1])
5+
sol = TrimTest.TestLUFactorization.solve_linear(x)
6+
println(Core.stdout, sum(sol.u))
7+
return 0
8+
end

test/trim/main_mkl.jl

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
using TrimTest
2+
3+
function (@main)(argv::Vector{String})::Cint
4+
x = parse(Float64, argv[1])
5+
sol = TrimTest.TestMKLLUFactorization.solve_linear(x)
6+
println(Core.stdout, sum(sol.u))
7+
return 0
8+
end

test/trim/main_rf.jl

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
using TrimTest
2+
3+
function (@main)(argv::Vector{String})::Cint
4+
x = parse(Float64, argv[1])
5+
sol = TrimTest.TestRFLUFactorization.solve_linear(x)
6+
println(Core.stdout, sum(sol.u))
7+
return 0
8+
end

0 commit comments

Comments
 (0)