1
- # TODO : add keyword `shrink` to give a consistant result on Base
2
- # when this is done, then we can propose this change to upstream Base
1
+ # Because there exists the `FiniteDiff.jl` package with quite different purposes,
2
+ # this module is not expected to be reexported.
3
+ module FiniteDiff
4
+
5
+ using ImageCore
6
+ using ImageCore. MappedArrays: of_eltype
7
+
8
+ """
9
+ Although stored as an array, image can also be viewed as a function from discrete grid space
10
+ Zᴺ to continuous space R if it is gray image, to C if it is complex-valued image
11
+ (MRI rawdata), to Rᴺ if it is colorant image, etc.
12
+ This module provides the discrete
13
+ version of gradient-related operators by viewing image arrays as functions.
14
+
15
+ This module provides:
16
+
17
+ - forward/backward difference [`fdiff`](@ref) are the Images-flavor of `Base.diff`
18
+ - gradient operator [`fgradient`](@ref) and its adjoint via keyword `adjoint=true`.
19
+ - divergence operator [`fdiv`](@ref) computes the sum of discrete derivatives of vector
20
+ fields.
21
+ - laplacian operator [`flaplacian`](@ref) is the divergence of the gradient fields.
22
+
23
+ Every function in this module has its in-place version.
24
+ """
25
+ FiniteDiff
26
+
27
+ export
28
+ fdiff, fdiff!,
29
+ fdiv, fdiv!,
30
+ fgradient, fgradient!,
31
+ flaplacian, flaplacian!
32
+
33
+
3
34
"""
4
35
fdiff(A::AbstractArray; dims::Int, rev=false, boundary=:periodic)
5
36
@@ -19,7 +50,7 @@ Take vector as an example, it computes `(A[2]-A[1], A[3]-A[2], ..., A[1]-A[end])
19
50
20
51
# Examples
21
52
22
- ```jldoctest; setup=:(using ImageBase: fdiff)
53
+ ```jldoctest; setup=:(using ImageBase.FiniteDiff : fdiff)
23
54
julia> A = [2 4 8; 3 9 27; 4 16 64]
24
55
3×3 $(Matrix{Int}) :
25
56
2 4 8
@@ -106,3 +137,111 @@ _fdiff_default_dims(A::AbstractVector) = 1
106
137
maybe_floattype (:: Type{T} ) where T = T
107
138
maybe_floattype (:: Type{T} ) where T<: FixedPoint = floattype (T)
108
139
maybe_floattype (:: Type{CT} ) where CT<: Color = base_color_type (CT){maybe_floattype (eltype (CT))}
140
+
141
+
142
+ """
143
+ fdiv(Vs...)
144
+
145
+ Compute the divergence of vector fields `Vs`.
146
+
147
+ See also [`fdiv!`](@ref) for the in-place version.
148
+ """
149
+ function fdiv (V₁:: AbstractArray{T} , Vs:: AbstractArray{T} ...) where T
150
+ fdiv! (similar (V₁, maybe_floattype (T)), V₁, Vs... )
151
+ end
152
+ fdiv (Vs:: Tuple ) = fdiv (Vs... )
153
+
154
+ """
155
+ fdiv!(out, Vs...)
156
+
157
+ The in-place version of divergence operator [`fdiv`](@ref).
158
+ """
159
+ function fdiv! (out:: AbstractArray , V₁:: AbstractArray , Vs:: AbstractArray... )
160
+ # This is the optimized version of `sum(v->fgradient(v; ajoint=true), (V₁, Vs...))`
161
+ # by removing unnecessary memory allocations.
162
+ all (v-> axes (v) == axes (out), (V₁, Vs... )) || throw (ArgumentError (" All axes of vector fields Vs and X should be the same." ))
163
+
164
+ # TODO (johnnychen94): for better performance, we can eliminate this `tmp` allocation by fusing multiple `fdiff` in the inner loop.
165
+ out .= fdiff (V₁; dims= 1 , rev= true , boundary= :periodic )
166
+ tmp = similar (out)
167
+ for i in 1 : length (Vs)
168
+ out .+ = fdiff! (tmp, Vs[i]; dims= i+ 1 , rev= true , boundary= :periodic )
169
+ end
170
+ return out
171
+ end
172
+ fdiv! (out:: AbstractArray , Vs:: Tuple ) = fdiv! (out, Vs... )
173
+
174
+ """
175
+ flaplacian(X::AbstractArray)
176
+
177
+ The discrete laplacian operator, i.e., the divergence of the gradient fields of `X`.
178
+
179
+ See also [`flaplacian!`](@ref) for the in-place version.
180
+ """
181
+ flaplacian (X:: AbstractArray ) = flaplacian! (similar (X, maybe_floattype (eltype (X))), X)
182
+
183
+ """
184
+ flaplacian!(out, X)
185
+ flaplacian!(out, ∇X::Tuple, X)
186
+
187
+ The in-place version of the laplacian operator [`flaplacian`](@ref).
188
+
189
+ !!! tip Avoiding allocations
190
+ The two-argument method will allocate memory to store the intermediate
191
+ gradient fields `∇X`. If you call this repeatedly with images of consistent size and type,
192
+ consider using the three-argument form with pre-allocated memory for `∇X`,
193
+ which will eliminate allocation by this function.
194
+ """
195
+ flaplacian! (out, X:: AbstractArray ) = fdiv! (out, fgradient (X))
196
+ flaplacian! (out, ∇X:: Tuple , X:: AbstractArray ) = fdiv! (out, fgradient! (∇X, X))
197
+
198
+
199
+ """
200
+ fgradient(X::AbstractArray; adjoint=false) -> (∂₁X, ∂₂X, ..., ∂ₙX)
201
+
202
+ Computes the gradient fields of `X`. If `adjoint==true` then it computes the adjoint gradient
203
+ fields.
204
+
205
+ Each gradient vector is computed as forward difference along specific dimension, e.g.,
206
+ [`∂ᵢX = fdiff(X, dims=i)`](@ref fdiff).
207
+
208
+ Mathematically, the adjoint operator ∂ᵢ' of ∂ᵢ is defined as `<∂ᵢu, v> := <u, ∂ᵢ'v>`.
209
+
210
+ See also the in-place version [`fgradient!(X)`](@ref) to reuse the allocated memory.
211
+ """
212
+ function fgradient (X:: AbstractArray{T,N} ; adjoint:: Bool = false ) where {T,N}
213
+ fgradient! (ntuple (i-> similar (X, maybe_floattype (T)), N), X; adjoint= adjoint)
214
+ end
215
+
216
+ """
217
+ fgradient!(∇X::Tuple, X::AbstractArray; adjoint=false)
218
+
219
+ The in-place version of (adjoint) gradient operator [`fgradient`](@ref).
220
+
221
+ The input `∇X = (∂₁X, ∂₂X, ..., ∂ₙX)` is a tuple of arrays that are similar to `X`, i.e.,
222
+ `eltype(∂ᵢX) == eltype(X)` and `axes(∂ᵢX) == axes(X)` for all `i`.
223
+ """
224
+ function fgradient! (∇X:: NTuple{N, <:AbstractArray} , X; adjoint:: Bool = false ) where N
225
+ all (v-> axes (v) == axes (X), ∇X) || throw (ArgumentError (" All axes of vector fields ∇X and X should be the same." ))
226
+ for i in 1 : N
227
+ if adjoint
228
+ # the negative adjoint of gradient operator for forward difference is the backward difference
229
+ # see also
230
+ # Getreuer, Pascal. "Rudin-Osher-Fatemi total variation denoising using split Bregman." _Image Processing On Line_ 2 (2012): 74-95.
231
+ fdiff! (∇X[i], X, dims= i, rev= true )
232
+ # TODO (johnnychen94): ideally we can get avoid flipping the signs for better performance.
233
+ @. ∇X[i] = - ∇X[i]
234
+ else
235
+ fdiff! (∇X[i], X, dims= i)
236
+ end
237
+ end
238
+ return ∇X
239
+ end
240
+
241
+
242
+
243
+ if VERSION < v " 1.1"
244
+ isnothing (x) = x === nothing
245
+ end
246
+
247
+ end # module
0 commit comments