Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 35 additions & 4 deletions src/basic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -286,8 +286,18 @@ for T in SCALINGNUMBERTYPES[2:end]
end
end

Base.:-(L::AbstractSciMLOperator) = ScaledOperator(-true, L)
Base.:+(L::AbstractSciMLOperator) = L
Base.:-(L::AbstractSciMLOperator) = ScaledOperator(-true, L)

# Special cases for constant scalars. These simplify the structure when applicable
function Base.:-(L::ScaledOperator)
isconstant(L.λ) && return ScaledOperator(-L.λ, L.L)
return ScaledOperator(L.λ, -L.L) # Try to propagate the rule
end
function Base.:-(L::MatrixOperator)
isconstant(L) && return MatrixOperator(-L.A)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this allocates though

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree, but this operation is usually performed once in the beginning when defining the operator. Then, the final operator is usually applied many times when solving an ODE or a linear problem.

This allows to have a more clean structure of the operator, which will be used repeatedly on a solver.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In the ODE it will allocate for every step which takes a new Jacobian.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you make an explicit example? In my mind this is my workflow

some_f(a,u,p,t) = ...

# The allocations apply here, if the matrix is constant
A = ScalarOperator(rand(), update_function=some_f) * MatrixOperator(...) + ....

prob = ODEProblem(A, u0, tspan)
sol = solve(prob, Tsit5()) # It just updates the ScalarOperators. No allocations

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

that's not using the WOperator

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You'd have to do a case where someone directly writes jac_prototype = MatrixOperator(J) I think, then it should allocate the extra terms instead of doing the lazy computation that is expected.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok now I'm doing

f = ODEFunction(brusselator_2d_loop; jac_prototype = MatrixOperator(float.(jac_sparsity)))
prob_ode_brusselator_2d_sparse = ODEProblem(f, u0, (0.0, 11.5), p)

function incompletelu(W, du, u, p, t, newW, Plprev, Prprev, solverdata)
    if newW === nothing || newW
        Pl = IncompleteLU.ilu(convert(AbstractMatrix, W), τ = 50.0)
    else
        Pl = Plprev
    end
    Pl, nothing
end

solve(prob_ode_brusselator_2d_sparse,
    KenCarp47(; linsolve = KrylovJL_GMRES(), precs = incompletelu,
    concrete_jac = true); save_everystep = false);

But still the same performances.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

And memory usage and memory peak? The issue I'm worried about is that -A not being in-place will make there be a new temporary, and so it doubles the memory requirement on the system. I think this will oom larger cases.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm actually trying

Base.:-(L::AbstractSciMLOperator) = (println("HELLO"); ScaledOperator(-true, L))

without the other method definitions, just to check that it is actually calling it. But this is not. So maybe we should try another case?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok I have removed all the definitions that should allocate a new MatrixOperator, just to make it allocation-free.

return ScaledOperator(-true, L) # Going back to the generic case
end

function Base.convert(::Type{AbstractMatrix}, L::ScaledOperator)
convert(Number, L.λ) * convert(AbstractMatrix, L.L)
Expand Down Expand Up @@ -428,9 +438,11 @@ struct AddedOperator{T,

function AddedOperator(ops)
@assert !isempty(ops)
_check_AddedOperator_sizes(ops)
T = mapreduce(eltype, promote_type, ops)
new{T, typeof(ops)}(ops)
# Flatten nested AddedOperators
ops_flat = _flatten_added_operators(ops)
_check_AddedOperator_sizes(ops_flat)
T = mapreduce(eltype, promote_type, ops_flat)
new{T, typeof(ops_flat)}(ops_flat)
end
end

Expand All @@ -440,6 +452,25 @@ end

AddedOperator(L::AbstractSciMLOperator) = L

# Helper function to flatten nested AddedOperators
@generated function _flatten_added_operators(ops::Tuple)
exprs = ()
for i in 1:length(ops.parameters)
T = ops.parameters[i]
if T <: AddedOperator
# If this element is an AddedOperator, unpack its ops
exprs = (exprs..., :(ops[$i].ops...))
else
# Otherwise, keep the element as-is
exprs = (exprs..., :(ops[$i]))
end
end

return quote
tuple($(exprs...))
end
end

@generated function _check_AddedOperator_sizes(ops::Tuple)
ops_types = ops.parameters
N = length(ops_types)
Expand Down
50 changes: 45 additions & 5 deletions test/basic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,35 @@ end
end
end

@testset "Unary +/-" begin
A = MatrixOperator(rand(N, N))
v = rand(N, K)

# Test unary +
@test +A === A

# Test unary - on constant MatrixOperator (simplified to MatrixOperator)
minusA = -A
@test minusA isa MatrixOperator
@test minusA * v ≈ -A.A * v

# Test unary - on non-constant MatrixOperator (falls back to ScaledOperator)
func(A, u, p, t) = t
timeDepA = MatrixOperator(A.A; update_func = func)
minusTimeDepA = -timeDepA
@test minusTimeDepA isa ScaledOperator # Can't simplify, uses generic fallback

# Test unary - on non-constant ScaledOperator (propagates to inner operator)
timeDepScaled = ScalarOperator(0.0, func) * A
@test !isconstant(timeDepScaled)
minusTimeDepScaled = -timeDepScaled
@test minusTimeDepScaled.λ isa ScalarOperator && minusTimeDepScaled.L isa MatrixOperator # Propagates negation to inner MatrixOperator

# Test double negation
@test -(-A) isa MatrixOperator
@test (-(-A)) * v ≈ A * v
end

@testset "ScaledOperator" begin
A = rand(N, N)
D = Diagonal(rand(N))
Expand Down Expand Up @@ -269,13 +298,24 @@ end
w_orig = copy(v)
@test mul!(v, op, u, α, β) ≈ α * (A + B) * u + β * w_orig

# ensure AddedOperator doesn't nest
# Test flattening of nested AddedOperators via direct constructor
A = MatrixOperator(rand(N, N))
L = A + (A + A) + A
B = MatrixOperator(rand(N, N))
C = MatrixOperator(rand(N, N))

# Create nested structure: (A + B) is an AddedOperator
AB = A + B
@test AB isa AddedOperator

# When we create AddedOperator((AB, C)), it should flatten
L = AddedOperator((AB, C))
@test L isa AddedOperator
for op in L.ops
@test !isa(op, AddedOperator)
end
@test length(L.ops) == 3 # Should have A, B, C (not AB and C)
@test all(op -> !isa(op, AddedOperator), L.ops)

# Verify correctness
test_vec = rand(N, K)
@test L * test_vec ≈ (A + B + C) * test_vec

## Time-Dependent Coefficients
for T in (Float32, Float64, ComplexF32, ComplexF64)
Expand Down
Loading