Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
a984768
allow LinearSolve v3
JoshuaLampert Mar 23, 2025
0cbd20b
Merge branch 'main' into linearsolve-3
JoshuaLampert Mar 23, 2025
c2489dd
Merge branch 'main' into linearsolve-3
JoshuaLampert Mar 24, 2025
ce17d63
use KLUFactorization() instead of LUFactorization()
JoshuaLampert Mar 24, 2025
04aaeb0
remove bandaid
JoshuaLampert Mar 24, 2025
3df55a0
remove debugging statements
JoshuaLampert Mar 24, 2025
de15d15
fix warnings related to alias
JoshuaLampert Mar 24, 2025
d650419
bump compat of RecursiveFactorization to v0.2.14
JoshuaLampert Mar 24, 2025
d0f10d3
use KLUFactorization only in test
JoshuaLampert Mar 24, 2025
583306b
bump compat of RecursiveFactorization.jl to v0.2.23
JoshuaLampert Mar 24, 2025
50a0da4
remove SciMLOperators again
JoshuaLampert Mar 24, 2025
58f873d
fix change of sparsity pattern also in other tests
JoshuaLampert Mar 24, 2025
e233189
fix
JoshuaLampert Mar 24, 2025
de6100e
finally fixing it
JoshuaLampert Mar 24, 2025
4226dd6
use same pattern as elsewhere
JoshuaLampert Mar 24, 2025
642565f
fix keywords
JoshuaLampert Mar 24, 2025
6247399
Merge branch 'main' into linearsolve-3
JoshuaLampert Mar 25, 2025
949aaba
bump compat bounds to fix downgrade
JoshuaLampert Mar 25, 2025
e503cfc
Merge pull request #8 from JoshuaLampert/fix-downgrade
JoshuaLampert Mar 25, 2025
b38354f
use algs
JoshuaLampert Mar 25, 2025
f12c93b
bump compat of OrdinaryDiffEqRosenbrock.jl to v1.5
JoshuaLampert Mar 25, 2025
ac45d5b
remove accidently pushed file again
JoshuaLampert Mar 25, 2025
5c50df6
try Downgrade with backported fix of SimpleNonlinearSolve.jl
JoshuaLampert Mar 27, 2025
77159bb
bump compats again
JoshuaLampert Mar 27, 2025
048f96c
allow LinearSolve v2.39.1
JoshuaLampert Mar 28, 2025
0ca900c
allow SciMLBase.jl v2.68
JoshuaLampert Mar 28, 2025
0c7234e
require SciMLBase.jl v2.78 again
JoshuaLampert Mar 28, 2025
b8bcba6
require LinearSolve.jl v3.7.1
JoshuaLampert Mar 28, 2025
e3d1ca9
fix
JoshuaLampert Mar 28, 2025
fa4b682
drop support for LinearSolve.jl v2
JoshuaLampert Mar 28, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/Downgrade.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ jobs:
- uses: julia-actions/cache@v2
- uses: julia-actions/julia-downgrade-compat@v1
with:
skip: LinearAlgebra,SparseArrays,Statistics,Test,SciMLBase,SciMLOperators
skip: LinearAlgebra,SparseArrays,Statistics,Test
projects: ., test
- uses: julia-actions/julia-buildpkg@v1
env:
Expand Down
6 changes: 2 additions & 4 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ OrdinaryDiffEqCore = "bbf590c4-e513-4bbe-9b18-05decba2e5d8"
RecipesBase = "3cdcf5f2-1ef4-517c-9805-6587b60abb01"
Reexport = "189a3867-3050-52da-a836-e630ba90ab69"
SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462"
SciMLOperators = "c0aeaf25-5076-4817-a8d5-81caf7dfa961"
SimpleUnPack = "ce78b400-467f-4804-87d8-8f486da07d0a"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
Expand All @@ -22,13 +21,12 @@ SymbolicIndexingInterface = "2efcf032-c050-4f8e-a9bb-153293bab1f5"
[compat]
FastBroadcast = "0.3.5"
LinearAlgebra = "1"
LinearSolve = "2.32"
LinearSolve = "3.7.1"
MuladdMacro = "0.2.4"
OrdinaryDiffEqCore = "1.16"
RecipesBase = "1.3.4"
Reexport = "1.2.2"
SciMLBase = "2.68 - 2.77"
SciMLOperators = "0.3 - 0.3.12"
SciMLBase = "2.78"
SimpleUnPack = "1"
SparseArrays = "1"
StaticArrays = "1.9.7"
Expand Down
2 changes: 1 addition & 1 deletion docs/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ DiffEqDevTools = "2.45.1"
Documenter = "1"
InteractiveUtils = "1"
LinearAlgebra = "1"
LinearSolve = "2.32"
LinearSolve = "3.7.1"
OrdinaryDiffEqFIRK = "1.7"
OrdinaryDiffEqLowOrderRK = "1.2"
OrdinaryDiffEqRosenbrock = "1.4"
Expand Down
26 changes: 19 additions & 7 deletions src/mprk.jl
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,9 @@ function alg_cache(alg::MPE, u, rate_prototype, ::Type{uEltypeNoUnits},
# as well as to store the system matrix of the linear system
# Right hand side of linear system is always uprev
linprob = LinearProblem(P, _vec(uprev))
linsolve = init(linprob, alg.linsolve, alias_A = true, alias_b = true,
linsolve = init(linprob, alg.linsolve,
alias = LinearSolve.LinearAliasSpecifier(; alias_A = true,
alias_b = true),
assumptions = LinearSolve.OperatorAssumptions(true))

MPEConservativeCache(P, σ, tab, linsolve)
Expand All @@ -366,7 +368,9 @@ function alg_cache(alg::MPE, u, rate_prototype, ::Type{uEltypeNoUnits},
# We use P to store the evaluation of the PDS
# as well as to store the system matrix of the linear system
linprob = LinearProblem(P, _vec(linsolve_rhs))
linsolve = init(linprob, alg.linsolve, alias_A = true, alias_b = true,
linsolve = init(linprob, alg.linsolve,
alias = LinearSolve.LinearAliasSpecifier(; alias_A = true,
alias_b = true),
assumptions = LinearSolve.OperatorAssumptions(true))

MPECache(P, similar(u), σ, tab, linsolve_rhs, linsolve)
Expand Down Expand Up @@ -664,15 +668,19 @@ function alg_cache(alg::MPRK22, u, rate_prototype, ::Type{uEltypeNoUnits},
# not be altered, since it is needed to compute the adaptive time step
# size.
linprob = LinearProblem(P2, _vec(tmp))
linsolve = init(linprob, alg.linsolve, alias_A = true, alias_b = true,
linsolve = init(linprob, alg.linsolve,
alias = LinearSolve.LinearAliasSpecifier(; alias_A = true,
alias_b = true),
assumptions = LinearSolve.OperatorAssumptions(true))

MPRK22ConservativeCache(tmp, P, P2, σ,
tab, #MPRK22ConstantCache
linsolve)
elseif f isa PDSFunction
linprob = LinearProblem(P2, _vec(tmp))
linsolve = init(linprob, alg.linsolve, alias_A = true, alias_b = true,
linsolve = init(linprob, alg.linsolve,
alias = LinearSolve.LinearAliasSpecifier(; alias_A = true,
alias_b = true),
assumptions = LinearSolve.OperatorAssumptions(true))

MPRK22Cache(tmp, P, P2,
Expand Down Expand Up @@ -987,7 +995,7 @@ You can optionally choose the linear solver to be used by passing an
algorithm from [LinearSolve.jl](https://github.com/SciML/LinearSolve.jl)
as keyword argument `linsolve`.
You can also choose the parameter `small_constant` which is added to all Patankar-weight denominators
to avoid divisions by zero. To display the default value for data type `type` evaluate
to avoid divisions by zero. To display the default value for data type `type` evaluate
`MPRK43II(gamma).small_constant_function(type)`, where `type` can be, e.g.,
`Float64`.

Expand Down Expand Up @@ -1262,7 +1270,9 @@ function alg_cache(alg::Union{MPRK43I, MPRK43II}, u, rate_prototype, ::Type{uElt
# not be altered, since it is needed to compute the adaptive time step
# size.
linprob = LinearProblem(P3, _vec(tmp))
linsolve = init(linprob, alg.linsolve, alias_A = true, alias_b = true,
linsolve = init(linprob, alg.linsolve,
alias = LinearSolve.LinearAliasSpecifier(; alias_A = true,
alias_b = true),
assumptions = LinearSolve.OperatorAssumptions(true))
MPRK43ConservativeCache(tmp, tmp2, P, P2, P3, σ, tab, linsolve)
elseif f isa PDSFunction
Expand All @@ -1271,7 +1281,9 @@ function alg_cache(alg::Union{MPRK43I, MPRK43II}, u, rate_prototype, ::Type{uElt
D3 = similar(u)

linprob = LinearProblem(P3, _vec(tmp))
linsolve = init(linprob, alg.linsolve, alias_A = true, alias_b = true,
linsolve = init(linprob, alg.linsolve,
alias = LinearSolve.LinearAliasSpecifier(; alias_A = true,
alias_b = true),
assumptions = LinearSolve.OperatorAssumptions(true))

MPRK43Cache(tmp, tmp2, P, P2, P3, D, D2, D3, σ, tab, linsolve)
Expand Down
18 changes: 13 additions & 5 deletions src/sspmprk.jl
Original file line number Diff line number Diff line change
Expand Up @@ -240,15 +240,19 @@ function alg_cache(alg::SSPMPRK22, u, rate_prototype, ::Type{uEltypeNoUnits},

if f isa ConservativePDSFunction
linprob = LinearProblem(P2, _vec(tmp))
linsolve = init(linprob, alg.linsolve, alias_A = true, alias_b = true,
linsolve = init(linprob, alg.linsolve,
alias = LinearSolve.LinearAliasSpecifier(; alias_A = true,
alias_b = true),
assumptions = LinearSolve.OperatorAssumptions(true))

SSPMPRK22ConservativeCache(tmp, P, P2, σ,
tab, #MPRK22ConstantCache
linsolve)
elseif f isa PDSFunction
linprob = LinearProblem(P2, _vec(tmp))
linsolve = init(linprob, alg.linsolve, alias_A = true, alias_b = true,
linsolve = init(linprob, alg.linsolve,
alias = LinearSolve.LinearAliasSpecifier(; alias_A = true,
alias_b = true),
assumptions = LinearSolve.OperatorAssumptions(true))

SSPMPRK22Cache(tmp, P, P2,
Expand Down Expand Up @@ -466,7 +470,7 @@ You can optionally choose the linear solver to be used by passing an
algorithm from [LinearSolve.jl](https://github.com/SciML/LinearSolve.jl)
as keyword argument `linsolve`.
You can also choose the parameter `small_constant` which is added to all Patankar-weight denominators
to avoid divisions by zero. To display the default value for data type `type` evaluate
to avoid divisions by zero. To display the default value for data type `type` evaluate
`SSPMPRK43. small_constant_function(type)`, where `type` can be, e.g.,
`Float64`.

Expand Down Expand Up @@ -775,7 +779,9 @@ function alg_cache(alg::SSPMPRK43, u, rate_prototype, ::Type{uEltypeNoUnits},

if f isa ConservativePDSFunction
linprob = LinearProblem(P3, _vec(tmp))
linsolve = init(linprob, alg.linsolve, alias_A = true, alias_b = true,
linsolve = init(linprob, alg.linsolve,
alias = LinearSolve.LinearAliasSpecifier(; alias_A = true,
alias_b = true),
assumptions = LinearSolve.OperatorAssumptions(true))
SSPMPRK43ConservativeCache(tmp, tmp2, P, P2, P3, σ, ρ, tab, linsolve)
elseif f isa PDSFunction
Expand All @@ -784,7 +790,9 @@ function alg_cache(alg::SSPMPRK43, u, rate_prototype, ::Type{uEltypeNoUnits},
D3 = similar(u)

linprob = LinearProblem(P3, _vec(tmp))
linsolve = init(linprob, alg.linsolve, alias_A = true, alias_b = true,
linsolve = init(linprob, alg.linsolve,
alias = LinearSolve.LinearAliasSpecifier(; alias_A = true,
alias_b = true),
assumptions = LinearSolve.OperatorAssumptions(true))

SSPMPRK43Cache(tmp, tmp2, P, P2, P3, D, D2, D3, σ, ρ, tab, linsolve)
Expand Down
6 changes: 4 additions & 2 deletions test/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ OrdinaryDiffEqTsit5 = "b1df2697-797e-41e3-8120-5422d3b24e4a"
OrdinaryDiffEqVerner = "79d7bb75-1356-48c1-b8c0-6832512096c2"
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
RecipesBase = "3cdcf5f2-1ef4-517c-9805-6587b60abb01"
RecursiveFactorization = "f2c3362d-daeb-58d1-803e-2bc74f2840b4"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
Expand All @@ -22,14 +23,15 @@ ADTypes = "1.12"
Aqua = "0.8"
ExplicitImports = "1.0.1"
LinearAlgebra = "1"
LinearSolve = "2.32"
LinearSolve = "3.7.1"
OrdinaryDiffEqLowOrderRK = "1.2"
OrdinaryDiffEqRosenbrock = "1.4"
OrdinaryDiffEqRosenbrock = "1.5"
OrdinaryDiffEqSDIRK = "1.2"
OrdinaryDiffEqTsit5 = "1.1"
OrdinaryDiffEqVerner = "1.1"
Plots = "1.25.11"
RecipesBase = "1.3.4"
RecursiveFactorization = "0.2.23"
SparseArrays = "1"
StaticArrays = "1.9.7"
Statistics = "1"
Expand Down
50 changes: 21 additions & 29 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@ using OrdinaryDiffEqTsit5: Tsit5
using OrdinaryDiffEqVerner: Vern7, Vern9
using PositiveIntegrators

using LinearSolve: RFLUFactorization, LUFactorization, KrylovJL_GMRES
# load RecursiveFactorization to get RFLUFactorization
using RecursiveFactorization: RecursiveFactorization
using LinearSolve: RFLUFactorization, LUFactorization, KLUFactorization, KrylovJL_GMRES

using Aqua: Aqua
using RecipesBase: RecipesBase # only for Aqua tests
Expand Down Expand Up @@ -1667,18 +1669,12 @@ end
prob_sparse_op = PDSProblem(prod, dest, u0, tspan;
p_prototype = P_sparse)

sol_tridiagonal_ip = solve(prob_tridiagonal_ip, alg;
dt)
sol_tridiagonal_op = solve(prob_tridiagonal_op, alg;
dt)
sol_dense_ip = solve(prob_dense_ip, alg;
dt)
sol_dense_op = solve(prob_dense_op, alg;
dt)
sol_sparse_ip = solve(prob_sparse_ip, alg;
dt)
sol_sparse_op = solve(prob_sparse_op, alg;
dt)
sol_tridiagonal_ip = solve(prob_tridiagonal_ip, alg; dt)
sol_tridiagonal_op = solve(prob_tridiagonal_op, alg; dt)
sol_dense_ip = solve(prob_dense_ip, alg; dt)
sol_dense_op = solve(prob_dense_op, alg; dt)
sol_sparse_ip = solve(prob_sparse_ip, alg; dt)
sol_sparse_op = solve(prob_sparse_op, alg; dt)

@test isapprox(sol_tridiagonal_ip.t, sol_tridiagonal_op.t; rtol)
@test isapprox(sol_dense_ip.t, sol_dense_op.t; rtol)
Expand Down Expand Up @@ -1751,7 +1747,7 @@ end
p_prototype = P_dense)
prob_sparse2 = PDSProblem(prod_sparse!, dest!, u0, tspan;
p_prototype = P_sparse)
#solve and test
# solve and test
for alg in (MPE(), MPRK22(0.5), MPRK22(1.0), MPRK43I(1.0, 0.5),
MPRK43I(0.5, 0.75),
MPRK43II(2.0 / 3.0), MPRK43II(0.5), SSPMPRK22(0.5, 1.0),
Expand Down Expand Up @@ -2026,7 +2022,7 @@ end

# Check that approximations, and thus the Patankar weights,
# remain positive to avoid division by zero.
@testset "Positvity check" begin
@testset "Positivity check" begin
# For this problem u[1] decreases montonically to 0 very fast.
# We perform 10^5 steps and check that u[end] does not contain any NaNs
u0 = [0.9, 0.1]
Expand Down Expand Up @@ -2293,9 +2289,9 @@ end
labelsB = ["B_1"; "B_2"; "B_3"; "B_4"; "B_5"]
dts = (last(prob.tspan) - first(prob.tspan)) / 10.0 * 0.5 .^ (4:13)
alg_ref = Vern7()
wp = work_precision_fixed(prob, [alg; alg; alg; alg; alg], labelsA, dts,
wp = work_precision_fixed(prob, algs, labelsA, dts,
alg_ref)
work_precision_fixed!(wp, prob, [alg; alg; alg; alg; alg], labelsB, dts,
work_precision_fixed!(wp, prob, algs, labelsB, dts,
alg_ref)

# check that errors agree
Expand Down Expand Up @@ -2328,12 +2324,10 @@ end
abstols = 1 ./ 10 .^ (4:8)
reltols = 1 ./ 10 .^ (3:7)
alg_ref = Vern7()
wp = work_precision_adaptive(prob, [alg; alg; alg; alg; alg], labelsA,
abstols,
reltols, alg_ref)
work_precision_adaptive!(wp, prob, [alg; alg; alg; alg; alg], labelsB,
abstols,
reltols, alg_ref)
wp = work_precision_adaptive(prob, algs, labelsA,
abstols, reltols, alg_ref)
work_precision_adaptive!(wp, prob, algs, labelsB,
abstols, reltols, alg_ref)

# check that errors agree
for (i, _) in enumerate(abstols)
Expand Down Expand Up @@ -2361,12 +2355,10 @@ end
abstols = 1 ./ 10 .^ (4:8)
reltols = 1 ./ 10 .^ (3:7)
alg_ref = Rodas4P()
wp = work_precision_adaptive(prob, [alg; alg; alg; alg; alg], labelsA,
abstols,
reltols, alg_ref; adaptive_ref = true)
work_precision_adaptive!(wp, prob, [alg; alg; alg; alg; alg], labelsB,
abstols,
reltols, alg_ref; adaptive_ref = true)
wp = work_precision_adaptive(prob, algs, labelsA,
abstols, reltols, alg_ref; adaptive_ref = true)
work_precision_adaptive!(wp, prob, algs, labelsB,
abstols, reltols, alg_ref; adaptive_ref = true)

# check that errors agree
for (i, _) in enumerate(abstols)
Expand Down
Loading