Skip to content

Commit 569050e

Browse files
Allow LinearSolve v3 (#153)
* allow LinearSolve v3 * use KLUFactorization() instead of LUFactorization() * remove bandaid * remove debugging statements * fix warnings related to alias * bump compat of RecursiveFactorization to v0.2.14 * use KLUFactorization only in test * bump compat of RecursiveFactorization.jl to v0.2.23 * remove SciMLOperators again * fix change of sparsity pattern also in other tests * fix * finally fixing it * use same pattern as elsewhere * fix keywords * bump compat bounds to fix downgrade * use algs * bump compat of OrdinaryDiffEqRosenbrock.jl to v1.5 * remove accidently pushed file again * try Downgrade with backported fix of SimpleNonlinearSolve.jl * bump compats again Downgrade still broken * allow LinearSolve v2.39.1 * allow SciMLBase.jl v2.68 * require SciMLBase.jl v2.78 again * require LinearSolve.jl v3.7.1 * fix * drop support for LinearSolve.jl v2
1 parent d8a3c61 commit 569050e

File tree

7 files changed

+61
-49
lines changed

7 files changed

+61
-49
lines changed

.github/workflows/Downgrade.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ jobs:
3838
- uses: julia-actions/cache@v2
3939
- uses: julia-actions/julia-downgrade-compat@v1
4040
with:
41-
skip: LinearAlgebra,SparseArrays,Statistics,Test,SciMLBase,SciMLOperators
41+
skip: LinearAlgebra,SparseArrays,Statistics,Test
4242
projects: ., test
4343
- uses: julia-actions/julia-buildpkg@v1
4444
env:

Project.toml

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@ OrdinaryDiffEqCore = "bbf590c4-e513-4bbe-9b18-05decba2e5d8"
1212
RecipesBase = "3cdcf5f2-1ef4-517c-9805-6587b60abb01"
1313
Reexport = "189a3867-3050-52da-a836-e630ba90ab69"
1414
SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462"
15-
SciMLOperators = "c0aeaf25-5076-4817-a8d5-81caf7dfa961"
1615
SimpleUnPack = "ce78b400-467f-4804-87d8-8f486da07d0a"
1716
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
1817
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
@@ -22,13 +21,12 @@ SymbolicIndexingInterface = "2efcf032-c050-4f8e-a9bb-153293bab1f5"
2221
[compat]
2322
FastBroadcast = "0.3.5"
2423
LinearAlgebra = "1"
25-
LinearSolve = "2.32"
24+
LinearSolve = "3.7.1"
2625
MuladdMacro = "0.2.4"
2726
OrdinaryDiffEqCore = "1.16"
2827
RecipesBase = "1.3.4"
2928
Reexport = "1.2.2"
30-
SciMLBase = "2.68 - 2.77"
31-
SciMLOperators = "0.3 - 0.3.12"
29+
SciMLBase = "2.78"
3230
SimpleUnPack = "1"
3331
SparseArrays = "1"
3432
StaticArrays = "1.9.7"

docs/Project.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ DiffEqDevTools = "2.45.1"
2727
Documenter = "1"
2828
InteractiveUtils = "1"
2929
LinearAlgebra = "1"
30-
LinearSolve = "2.32"
30+
LinearSolve = "3.7.1"
3131
OrdinaryDiffEqFIRK = "1.7"
3232
OrdinaryDiffEqLowOrderRK = "1.2"
3333
OrdinaryDiffEqRosenbrock = "1.4"

src/mprk.jl

Lines changed: 19 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -357,7 +357,9 @@ function alg_cache(alg::MPE, u, rate_prototype, ::Type{uEltypeNoUnits},
357357
# as well as to store the system matrix of the linear system
358358
# Right hand side of linear system is always uprev
359359
linprob = LinearProblem(P, _vec(uprev))
360-
linsolve = init(linprob, alg.linsolve, alias_A = true, alias_b = true,
360+
linsolve = init(linprob, alg.linsolve,
361+
alias = LinearSolve.LinearAliasSpecifier(; alias_A = true,
362+
alias_b = true),
361363
assumptions = LinearSolve.OperatorAssumptions(true))
362364

363365
MPEConservativeCache(P, σ, tab, linsolve)
@@ -366,7 +368,9 @@ function alg_cache(alg::MPE, u, rate_prototype, ::Type{uEltypeNoUnits},
366368
# We use P to store the evaluation of the PDS
367369
# as well as to store the system matrix of the linear system
368370
linprob = LinearProblem(P, _vec(linsolve_rhs))
369-
linsolve = init(linprob, alg.linsolve, alias_A = true, alias_b = true,
371+
linsolve = init(linprob, alg.linsolve,
372+
alias = LinearSolve.LinearAliasSpecifier(; alias_A = true,
373+
alias_b = true),
370374
assumptions = LinearSolve.OperatorAssumptions(true))
371375

372376
MPECache(P, similar(u), σ, tab, linsolve_rhs, linsolve)
@@ -664,15 +668,19 @@ function alg_cache(alg::MPRK22, u, rate_prototype, ::Type{uEltypeNoUnits},
664668
# not be altered, since it is needed to compute the adaptive time step
665669
# size.
666670
linprob = LinearProblem(P2, _vec(tmp))
667-
linsolve = init(linprob, alg.linsolve, alias_A = true, alias_b = true,
671+
linsolve = init(linprob, alg.linsolve,
672+
alias = LinearSolve.LinearAliasSpecifier(; alias_A = true,
673+
alias_b = true),
668674
assumptions = LinearSolve.OperatorAssumptions(true))
669675

670676
MPRK22ConservativeCache(tmp, P, P2, σ,
671677
tab, #MPRK22ConstantCache
672678
linsolve)
673679
elseif f isa PDSFunction
674680
linprob = LinearProblem(P2, _vec(tmp))
675-
linsolve = init(linprob, alg.linsolve, alias_A = true, alias_b = true,
681+
linsolve = init(linprob, alg.linsolve,
682+
alias = LinearSolve.LinearAliasSpecifier(; alias_A = true,
683+
alias_b = true),
676684
assumptions = LinearSolve.OperatorAssumptions(true))
677685

678686
MPRK22Cache(tmp, P, P2,
@@ -987,7 +995,7 @@ You can optionally choose the linear solver to be used by passing an
987995
algorithm from [LinearSolve.jl](https://github.com/SciML/LinearSolve.jl)
988996
as keyword argument `linsolve`.
989997
You can also choose the parameter `small_constant` which is added to all Patankar-weight denominators
990-
to avoid divisions by zero. To display the default value for data type `type` evaluate
998+
to avoid divisions by zero. To display the default value for data type `type` evaluate
991999
`MPRK43II(gamma).small_constant_function(type)`, where `type` can be, e.g.,
9921000
`Float64`.
9931001
@@ -1262,7 +1270,9 @@ function alg_cache(alg::Union{MPRK43I, MPRK43II}, u, rate_prototype, ::Type{uElt
12621270
# not be altered, since it is needed to compute the adaptive time step
12631271
# size.
12641272
linprob = LinearProblem(P3, _vec(tmp))
1265-
linsolve = init(linprob, alg.linsolve, alias_A = true, alias_b = true,
1273+
linsolve = init(linprob, alg.linsolve,
1274+
alias = LinearSolve.LinearAliasSpecifier(; alias_A = true,
1275+
alias_b = true),
12661276
assumptions = LinearSolve.OperatorAssumptions(true))
12671277
MPRK43ConservativeCache(tmp, tmp2, P, P2, P3, σ, tab, linsolve)
12681278
elseif f isa PDSFunction
@@ -1271,7 +1281,9 @@ function alg_cache(alg::Union{MPRK43I, MPRK43II}, u, rate_prototype, ::Type{uElt
12711281
D3 = similar(u)
12721282

12731283
linprob = LinearProblem(P3, _vec(tmp))
1274-
linsolve = init(linprob, alg.linsolve, alias_A = true, alias_b = true,
1284+
linsolve = init(linprob, alg.linsolve,
1285+
alias = LinearSolve.LinearAliasSpecifier(; alias_A = true,
1286+
alias_b = true),
12751287
assumptions = LinearSolve.OperatorAssumptions(true))
12761288

12771289
MPRK43Cache(tmp, tmp2, P, P2, P3, D, D2, D3, σ, tab, linsolve)

src/sspmprk.jl

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -240,15 +240,19 @@ function alg_cache(alg::SSPMPRK22, u, rate_prototype, ::Type{uEltypeNoUnits},
240240

241241
if f isa ConservativePDSFunction
242242
linprob = LinearProblem(P2, _vec(tmp))
243-
linsolve = init(linprob, alg.linsolve, alias_A = true, alias_b = true,
243+
linsolve = init(linprob, alg.linsolve,
244+
alias = LinearSolve.LinearAliasSpecifier(; alias_A = true,
245+
alias_b = true),
244246
assumptions = LinearSolve.OperatorAssumptions(true))
245247

246248
SSPMPRK22ConservativeCache(tmp, P, P2, σ,
247249
tab, #MPRK22ConstantCache
248250
linsolve)
249251
elseif f isa PDSFunction
250252
linprob = LinearProblem(P2, _vec(tmp))
251-
linsolve = init(linprob, alg.linsolve, alias_A = true, alias_b = true,
253+
linsolve = init(linprob, alg.linsolve,
254+
alias = LinearSolve.LinearAliasSpecifier(; alias_A = true,
255+
alias_b = true),
252256
assumptions = LinearSolve.OperatorAssumptions(true))
253257

254258
SSPMPRK22Cache(tmp, P, P2,
@@ -466,7 +470,7 @@ You can optionally choose the linear solver to be used by passing an
466470
algorithm from [LinearSolve.jl](https://github.com/SciML/LinearSolve.jl)
467471
as keyword argument `linsolve`.
468472
You can also choose the parameter `small_constant` which is added to all Patankar-weight denominators
469-
to avoid divisions by zero. To display the default value for data type `type` evaluate
473+
to avoid divisions by zero. To display the default value for data type `type` evaluate
470474
`SSPMPRK43. small_constant_function(type)`, where `type` can be, e.g.,
471475
`Float64`.
472476
@@ -775,7 +779,9 @@ function alg_cache(alg::SSPMPRK43, u, rate_prototype, ::Type{uEltypeNoUnits},
775779

776780
if f isa ConservativePDSFunction
777781
linprob = LinearProblem(P3, _vec(tmp))
778-
linsolve = init(linprob, alg.linsolve, alias_A = true, alias_b = true,
782+
linsolve = init(linprob, alg.linsolve,
783+
alias = LinearSolve.LinearAliasSpecifier(; alias_A = true,
784+
alias_b = true),
779785
assumptions = LinearSolve.OperatorAssumptions(true))
780786
SSPMPRK43ConservativeCache(tmp, tmp2, P, P2, P3, σ, ρ, tab, linsolve)
781787
elseif f isa PDSFunction
@@ -784,7 +790,9 @@ function alg_cache(alg::SSPMPRK43, u, rate_prototype, ::Type{uEltypeNoUnits},
784790
D3 = similar(u)
785791

786792
linprob = LinearProblem(P3, _vec(tmp))
787-
linsolve = init(linprob, alg.linsolve, alias_A = true, alias_b = true,
793+
linsolve = init(linprob, alg.linsolve,
794+
alias = LinearSolve.LinearAliasSpecifier(; alias_A = true,
795+
alias_b = true),
788796
assumptions = LinearSolve.OperatorAssumptions(true))
789797

790798
SSPMPRK43Cache(tmp, tmp2, P, P2, P3, D, D2, D3, σ, ρ, tab, linsolve)

test/Project.toml

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ OrdinaryDiffEqTsit5 = "b1df2697-797e-41e3-8120-5422d3b24e4a"
1111
OrdinaryDiffEqVerner = "79d7bb75-1356-48c1-b8c0-6832512096c2"
1212
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
1313
RecipesBase = "3cdcf5f2-1ef4-517c-9805-6587b60abb01"
14+
RecursiveFactorization = "f2c3362d-daeb-58d1-803e-2bc74f2840b4"
1415
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
1516
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
1617
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
@@ -22,14 +23,15 @@ ADTypes = "1.12"
2223
Aqua = "0.8"
2324
ExplicitImports = "1.0.1"
2425
LinearAlgebra = "1"
25-
LinearSolve = "2.32"
26+
LinearSolve = "3.7.1"
2627
OrdinaryDiffEqLowOrderRK = "1.2"
27-
OrdinaryDiffEqRosenbrock = "1.4"
28+
OrdinaryDiffEqRosenbrock = "1.5"
2829
OrdinaryDiffEqSDIRK = "1.2"
2930
OrdinaryDiffEqTsit5 = "1.1"
3031
OrdinaryDiffEqVerner = "1.1"
3132
Plots = "1.25.11"
3233
RecipesBase = "1.3.4"
34+
RecursiveFactorization = "0.2.23"
3335
SparseArrays = "1"
3436
StaticArrays = "1.9.7"
3537
Statistics = "1"

test/runtests.jl

Lines changed: 21 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,9 @@ using OrdinaryDiffEqTsit5: Tsit5
1515
using OrdinaryDiffEqVerner: Vern7, Vern9
1616
using PositiveIntegrators
1717

18-
using LinearSolve: RFLUFactorization, LUFactorization, KrylovJL_GMRES
18+
# load RecursiveFactorization to get RFLUFactorization
19+
using RecursiveFactorization: RecursiveFactorization
20+
using LinearSolve: RFLUFactorization, LUFactorization, KLUFactorization, KrylovJL_GMRES
1921

2022
using Aqua: Aqua
2123
using RecipesBase: RecipesBase # only for Aqua tests
@@ -1667,18 +1669,12 @@ end
16671669
prob_sparse_op = PDSProblem(prod, dest, u0, tspan;
16681670
p_prototype = P_sparse)
16691671

1670-
sol_tridiagonal_ip = solve(prob_tridiagonal_ip, alg;
1671-
dt)
1672-
sol_tridiagonal_op = solve(prob_tridiagonal_op, alg;
1673-
dt)
1674-
sol_dense_ip = solve(prob_dense_ip, alg;
1675-
dt)
1676-
sol_dense_op = solve(prob_dense_op, alg;
1677-
dt)
1678-
sol_sparse_ip = solve(prob_sparse_ip, alg;
1679-
dt)
1680-
sol_sparse_op = solve(prob_sparse_op, alg;
1681-
dt)
1672+
sol_tridiagonal_ip = solve(prob_tridiagonal_ip, alg; dt)
1673+
sol_tridiagonal_op = solve(prob_tridiagonal_op, alg; dt)
1674+
sol_dense_ip = solve(prob_dense_ip, alg; dt)
1675+
sol_dense_op = solve(prob_dense_op, alg; dt)
1676+
sol_sparse_ip = solve(prob_sparse_ip, alg; dt)
1677+
sol_sparse_op = solve(prob_sparse_op, alg; dt)
16821678

16831679
@test isapprox(sol_tridiagonal_ip.t, sol_tridiagonal_op.t; rtol)
16841680
@test isapprox(sol_dense_ip.t, sol_dense_op.t; rtol)
@@ -1751,7 +1747,7 @@ end
17511747
p_prototype = P_dense)
17521748
prob_sparse2 = PDSProblem(prod_sparse!, dest!, u0, tspan;
17531749
p_prototype = P_sparse)
1754-
#solve and test
1750+
# solve and test
17551751
for alg in (MPE(), MPRK22(0.5), MPRK22(1.0), MPRK43I(1.0, 0.5),
17561752
MPRK43I(0.5, 0.75),
17571753
MPRK43II(2.0 / 3.0), MPRK43II(0.5), SSPMPRK22(0.5, 1.0),
@@ -2026,7 +2022,7 @@ end
20262022

20272023
# Check that approximations, and thus the Patankar weights,
20282024
# remain positive to avoid division by zero.
2029-
@testset "Positvity check" begin
2025+
@testset "Positivity check" begin
20302026
# For this problem u[1] decreases montonically to 0 very fast.
20312027
# We perform 10^5 steps and check that u[end] does not contain any NaNs
20322028
u0 = [0.9, 0.1]
@@ -2293,9 +2289,9 @@ end
22932289
labelsB = ["B_1"; "B_2"; "B_3"; "B_4"; "B_5"]
22942290
dts = (last(prob.tspan) - first(prob.tspan)) / 10.0 * 0.5 .^ (4:13)
22952291
alg_ref = Vern7()
2296-
wp = work_precision_fixed(prob, [alg; alg; alg; alg; alg], labelsA, dts,
2292+
wp = work_precision_fixed(prob, algs, labelsA, dts,
22972293
alg_ref)
2298-
work_precision_fixed!(wp, prob, [alg; alg; alg; alg; alg], labelsB, dts,
2294+
work_precision_fixed!(wp, prob, algs, labelsB, dts,
22992295
alg_ref)
23002296

23012297
# check that errors agree
@@ -2328,12 +2324,10 @@ end
23282324
abstols = 1 ./ 10 .^ (4:8)
23292325
reltols = 1 ./ 10 .^ (3:7)
23302326
alg_ref = Vern7()
2331-
wp = work_precision_adaptive(prob, [alg; alg; alg; alg; alg], labelsA,
2332-
abstols,
2333-
reltols, alg_ref)
2334-
work_precision_adaptive!(wp, prob, [alg; alg; alg; alg; alg], labelsB,
2335-
abstols,
2336-
reltols, alg_ref)
2327+
wp = work_precision_adaptive(prob, algs, labelsA,
2328+
abstols, reltols, alg_ref)
2329+
work_precision_adaptive!(wp, prob, algs, labelsB,
2330+
abstols, reltols, alg_ref)
23372331

23382332
# check that errors agree
23392333
for (i, _) in enumerate(abstols)
@@ -2361,12 +2355,10 @@ end
23612355
abstols = 1 ./ 10 .^ (4:8)
23622356
reltols = 1 ./ 10 .^ (3:7)
23632357
alg_ref = Rodas4P()
2364-
wp = work_precision_adaptive(prob, [alg; alg; alg; alg; alg], labelsA,
2365-
abstols,
2366-
reltols, alg_ref; adaptive_ref = true)
2367-
work_precision_adaptive!(wp, prob, [alg; alg; alg; alg; alg], labelsB,
2368-
abstols,
2369-
reltols, alg_ref; adaptive_ref = true)
2358+
wp = work_precision_adaptive(prob, algs, labelsA,
2359+
abstols, reltols, alg_ref; adaptive_ref = true)
2360+
work_precision_adaptive!(wp, prob, algs, labelsB,
2361+
abstols, reltols, alg_ref; adaptive_ref = true)
23702362

23712363
# check that errors agree
23722364
for (i, _) in enumerate(abstols)

0 commit comments

Comments
 (0)