1
- # TODO : add keyword `shrink` to give a consistant result on Base
2
- # when this is done, then we can propose this change to upstream Base
1
+ # Because there exists the `FiniteDiffs.jl` package with quite different purposes,
2
+ # this module is not expected to be reexported.
3
+ module FiniteDiffs
4
+
5
+ using ImageCore
6
+ using ImageCore. MappedArrays: of_eltype
7
+
8
+ """
9
+ Although stored as an array, image can also be viewed as a function from discrete grid space
10
+ Zᴺ to continuous space R (or C if it is complex value). This module provides the discrete
11
+ version of gradient-related operators by viewing image arrays as functions.
12
+
13
+ This module provides:
14
+
15
+ - forward/backward difference [`fdiff`](@ref) are the Images-flavor of `Base.diff`
16
+ - gradient operator [`fgradient`](@ref) and its adjoint via keyword `adjoint=true`.
17
+ - divergence operator [`fdiv`](@ref) is the negative sum of the adjoint gradient operator of
18
+ given vector fields.
19
+ - laplacian operator [`flaplacian`](@ref) is the divergence of the gradient fields.
20
+
21
+ Every function in this module has its in-place version.
22
+ """
23
+ FiniteDiffs
24
+
25
+ export
26
+ fdiff, fdiff!,
27
+ fdiv, fdiv!,
28
+ fgradient, fgradient!,
29
+ flaplacian, flaplacian!
30
+
31
+
3
32
"""
4
33
fdiff(A::AbstractArray; dims::Int, rev=false, boundary=:periodic)
5
34
@@ -19,7 +48,7 @@ Take vector as an example, it computes `(A[2]-A[1], A[3]-A[2], ..., A[1]-A[end])
19
48
20
49
# Examples
21
50
22
- ```jldoctest; setup=:(using ImageBase: fdiff)
51
+ ```jldoctest; setup=:(using ImageBase.FiniteDiffs : fdiff)
23
52
julia> A = [2 4 8; 3 9 27; 4 16 64]
24
53
3×3 $(Matrix{Int}) :
25
54
2 4 8
@@ -106,3 +135,95 @@ _fdiff_default_dims(A::AbstractVector) = 1
106
135
maybe_floattype (:: Type{T} ) where T = T
107
136
maybe_floattype (:: Type{T} ) where T<: FixedPoint = floattype (T)
108
137
maybe_floattype (:: Type{CT} ) where CT<: Color = base_color_type (CT){maybe_floattype (eltype (CT))}
138
+
139
+
140
+ """
141
+ fdiv(Vs...)
142
+
143
+ Compute the divergence of vector fields `Vs`.
144
+ """
145
+ function fdiv (V₁:: AbstractArray{T} , Vs:: AbstractArray{T} ...) where T
146
+ fdiv! (similar (first (V₁), maybe_floattype (eltype (V₁))), V₁, Vs... )
147
+ end
148
+ function fdiv! (out:: AbstractArray , V₁:: AbstractArray , Vs:: AbstractArray... )
149
+ # This is the optimized version of `sum(v->fgradient(v; ajoint=true), (V₁, Vs...))`
150
+ # by removing unnecessary memory allocations.
151
+ all (v-> axes (v) == axes (out), (V₁, Vs... )) || throw (ArgumentError (" All axes of vector fields Vs and X should be the same." ))
152
+
153
+ # TODO (johnnychen94): for better performance, we can eliminate this `tmp` allocation by fusing multiple `fdiff` in the inner loop.
154
+ out .= fdiff (V₁; dims= 1 , rev= true )
155
+ tmp = similar (out)
156
+ for i in 1 : length (Vs)
157
+ out .+ = fdiff! (tmp, Vs[i]; dims= i+ 1 , rev= true )
158
+ end
159
+ return out
160
+ end
161
+
162
+ """
163
+ flaplacian(X::AbstractArray)
164
+
165
+ The discrete laplacian operator, i.e., the divergence of the gradient fields of `X`.
166
+ """
167
+ flaplacian (X:: AbstractArray ) = flaplacian! (similar (X, maybe_floattype (eltype (X))), X)
168
+
169
+ """
170
+ flaplacian!(out, X)
171
+ flaplacian!(out, ∇X::Tuple, X)
172
+
173
+ The in-place version of the laplacian operator [`flaplacian`](@ref).
174
+
175
+ !!! tips Non-allocating
176
+ This function will allocate a new set of memories to store the intermediate
177
+ gradient fields `∇X`, if you pre-allcoate the memory for `∇X`, then this function
178
+ will use it and is thus non-allcating.
179
+ """
180
+ flaplacian! (out, X:: AbstractArray ) = fdiv! (out, fgradient (X)... )
181
+ flaplacian! (out, ∇X:: Tuple , X:: AbstractArray ) = fdiv! (out, fgradient! (∇X, X)... )
182
+
183
+
184
+ """
185
+ fgradient(X::AbstractArray; adjoint=false) -> (∂₁X, ∂₂X, ..., ∂ₙX)
186
+
187
+ Computes the gradient fields of `X`. If `adjoint==true` then it computes the adjoint gradient
188
+ fields.
189
+
190
+ Each gradient vector is computed as forward difference along specific dimension, e.g.,
191
+ [`∂ᵢX = fdiff(X, dims=i)`](@ref fdiff).
192
+
193
+ Mathematically, the adjoint operator ∂ᵢ' of ∂ᵢ is defined as `<∂ᵢu, v> := <u, ∂ᵢ'v>`.
194
+
195
+ See also the in-place version [`fgradient!(X)`](@ref) to reuse the allocated memory.
196
+ """
197
+ function fgradient (X:: AbstractArray{T,N} ; adjoint= false ) where {T,N}
198
+ fgradient! (ntuple (i-> similar (X, maybe_floattype (T)), N), X; adjoint= adjoint)
199
+ end
200
+
201
+ """
202
+ fgradient!(∇X::Tuple, X::AbstractArray; adjoint=false)
203
+
204
+ The in-place version of (adjoint) gradient operator [`fgradient`](@ref).
205
+
206
+ The input `∇X = (∂₁X, ∂₂X, ..., ∂ₙX)` is a tuple of arrays that are similar to `X`, i.e.,
207
+ `eltype(∂ᵢX) == eltype(X)` and `axes(∂ᵢX) == axes(X)` for all `i`.
208
+ """
209
+ function fgradient! (∇X:: NTuple{N, <:AbstractArray} , X; adjoint= false ) where N
210
+ all (v-> axes (v) == axes (X), ∇X) || throw (ArgumentError (" All axes of vector fields ∇X and X should be the same." ))
211
+ for i in 1 : N
212
+ if adjoint
213
+ # the negative adjoint of gradient operator for forward difference is the backward difference
214
+ fdiff! (∇X[i], X, dims= i, rev= true )
215
+ @. ∇X[i] = - ∇X[i]
216
+ else
217
+ fdiff! (∇X[i], X, dims= i)
218
+ end
219
+ end
220
+ return ∇X
221
+ end
222
+
223
+
224
+
225
+ if VERSION < v " 1.1"
226
+ isnothing (x) = x === nothing
227
+ end
228
+
229
+ end # module
0 commit comments