1
- # TODO : add keyword `shrink` to give a consistant result on Base
2
- # when this is done, then we can propose this change to upstream Base
1
+ # Because there exists the `FiniteDiff.jl` package with quite different purposes,
2
+ # this module is not expected to be reexported.
3
+ module FiniteDiff
4
+
5
+ using ImageCore
6
+ using ImageCore. MappedArrays: of_eltype
7
+
8
+ """
9
+ Although stored as an array, image can also be viewed as a function from discrete grid space
10
+ Zᴺ to continuous space R (or C if it is complex value). This module provides the discrete
11
+ version of gradient-related operators by viewing image arrays as functions.
12
+
13
+ This module provides:
14
+
15
+ - forward/backward difference [`fdiff`](@ref) are the Images-flavor of `Base.diff`
16
+ - gradient operator [`fgradient`](@ref) and its adjoint via keyword `adjoint=true`.
17
+ - divergence operator [`fdiv`](@ref) is the negative sum of the adjoint gradient operator of
18
+ given vector fields.
19
+ - laplacian operator [`flaplacian`](@ref) is the divergence of the gradient fields.
20
+
21
+ Every function in this module has its in-place version.
22
+ """
23
+ FiniteDiff
24
+
25
+ export
26
+ fdiff, fdiff!,
27
+ fdiv, fdiv!,
28
+ fgradient, fgradient!,
29
+ flaplacian, flaplacian!
30
+
31
+
3
32
"""
4
33
fdiff(A::AbstractArray; dims::Int, rev=false, boundary=:periodic)
5
34
@@ -19,7 +48,7 @@ Take vector as an example, it computes `(A[2]-A[1], A[3]-A[2], ..., A[1]-A[end])
19
48
20
49
# Examples
21
50
22
- ```jldoctest; setup=:(using ImageBase: fdiff)
51
+ ```jldoctest; setup=:(using ImageBase.FiniteDiff : fdiff)
23
52
julia> A = [2 4 8; 3 9 27; 4 16 64]
24
53
3×3 $(Matrix{Int}) :
25
54
2 4 8
@@ -106,3 +135,107 @@ _fdiff_default_dims(A::AbstractVector) = 1
106
135
maybe_floattype (:: Type{T} ) where T = T
107
136
maybe_floattype (:: Type{T} ) where T<: FixedPoint = floattype (T)
108
137
maybe_floattype (:: Type{CT} ) where CT<: Color = base_color_type (CT){maybe_floattype (eltype (CT))}
138
+
139
+
140
+ """
141
+ fdiv(Vs...)
142
+
143
+ Compute the divergence of vector fields `Vs`.
144
+
145
+ See also [`fdiv!`](@ref) for the in-place version.
146
+ """
147
+ function fdiv (V₁:: AbstractArray{T} , Vs:: AbstractArray{T} ...) where T
148
+ fdiv! (similar (V₁, maybe_floattype (T)), V₁, Vs... )
149
+ end
150
+ fdiv (Vs:: Tuple ) = fdiv (Vs... )
151
+
152
+ """
153
+ fdiv!(out, Vs...)
154
+
155
+ The in-place version of divergence operator [`fdiv`](@ref).
156
+ """
157
+ function fdiv! (out:: AbstractArray , V₁:: AbstractArray , Vs:: AbstractArray... )
158
+ # This is the optimized version of `sum(v->fgradient(v; ajoint=true), (V₁, Vs...))`
159
+ # by removing unnecessary memory allocations.
160
+ all (v-> axes (v) == axes (out), (V₁, Vs... )) || throw (ArgumentError (" All axes of vector fields Vs and X should be the same." ))
161
+
162
+ # TODO (johnnychen94): for better performance, we can eliminate this `tmp` allocation by fusing multiple `fdiff` in the inner loop.
163
+ out .= fdiff (V₁; dims= 1 , rev= true , boundary= :periodic )
164
+ tmp = similar (out)
165
+ for i in 1 : length (Vs)
166
+ out .+ = fdiff! (tmp, Vs[i]; dims= i+ 1 , rev= true , boundary= :periodic )
167
+ end
168
+ return out
169
+ end
170
+ fdiv! (out:: AbstractArray , Vs:: Tuple ) = fdiv! (out, Vs... )
171
+
172
+ """
173
+ flaplacian(X::AbstractArray)
174
+
175
+ The discrete laplacian operator, i.e., the divergence of the gradient fields of `X`.
176
+
177
+ See also [`flaplacian!`](@ref) for the in-place version.
178
+ """
179
+ flaplacian (X:: AbstractArray ) = flaplacian! (similar (X, maybe_floattype (eltype (X))), X)
180
+
181
+ """
182
+ flaplacian!(out, X)
183
+ flaplacian!(out, ∇X::Tuple, X)
184
+
185
+ The in-place version of the laplacian operator [`flaplacian`](@ref).
186
+
187
+ !!! tips Non-allocating
188
+ This function will allocate a new set of memories to store the intermediate
189
+ gradient fields `∇X`, if you pre-allcoate the memory for `∇X`, then this function
190
+ will use it and is thus non-allcating.
191
+ """
192
+ flaplacian! (out, X:: AbstractArray ) = fdiv! (out, fgradient (X))
193
+ flaplacian! (out, ∇X:: Tuple , X:: AbstractArray ) = fdiv! (out, fgradient! (∇X, X))
194
+
195
+
196
+ """
197
+ fgradient(X::AbstractArray; adjoint=false) -> (∂₁X, ∂₂X, ..., ∂ₙX)
198
+
199
+ Computes the gradient fields of `X`. If `adjoint==true` then it computes the adjoint gradient
200
+ fields.
201
+
202
+ Each gradient vector is computed as forward difference along specific dimension, e.g.,
203
+ [`∂ᵢX = fdiff(X, dims=i)`](@ref fdiff).
204
+
205
+ Mathematically, the adjoint operator ∂ᵢ' of ∂ᵢ is defined as `<∂ᵢu, v> := <u, ∂ᵢ'v>`.
206
+
207
+ See also the in-place version [`fgradient!(X)`](@ref) to reuse the allocated memory.
208
+ """
209
+ function fgradient (X:: AbstractArray{T,N} ; adjoint= false ) where {T,N}
210
+ fgradient! (ntuple (i-> similar (X, maybe_floattype (T)), N), X; adjoint= adjoint)
211
+ end
212
+
213
+ """
214
+ fgradient!(∇X::Tuple, X::AbstractArray; adjoint=false)
215
+
216
+ The in-place version of (adjoint) gradient operator [`fgradient`](@ref).
217
+
218
+ The input `∇X = (∂₁X, ∂₂X, ..., ∂ₙX)` is a tuple of arrays that are similar to `X`, i.e.,
219
+ `eltype(∂ᵢX) == eltype(X)` and `axes(∂ᵢX) == axes(X)` for all `i`.
220
+ """
221
+ function fgradient! (∇X:: NTuple{N, <:AbstractArray} , X; adjoint= false ) where N
222
+ all (v-> axes (v) == axes (X), ∇X) || throw (ArgumentError (" All axes of vector fields ∇X and X should be the same." ))
223
+ for i in 1 : N
224
+ if adjoint
225
+ # the negative adjoint of gradient operator for forward difference is the backward difference
226
+ fdiff! (∇X[i], X, dims= i, rev= true )
227
+ @. ∇X[i] = - ∇X[i]
228
+ else
229
+ fdiff! (∇X[i], X, dims= i)
230
+ end
231
+ end
232
+ return ∇X
233
+ end
234
+
235
+
236
+
237
+ if VERSION < v " 1.1"
238
+ isnothing (x) = x === nothing
239
+ end
240
+
241
+ end # module
0 commit comments