diff --git a/.gitignore b/.gitignore index b067edd..e5de18e 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,4 @@ -/Manifest.toml +Manifest.toml +docs/build/ +docs/site/ +docs/src/generated/ diff --git a/docs/Project.toml b/docs/Project.toml new file mode 100644 index 0000000..5fe21cd --- /dev/null +++ b/docs/Project.toml @@ -0,0 +1,11 @@ +[deps] +CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" +Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" +FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" +ImageGeoms = "9ee76f2b-840d-4475-b6d6-e485c9297852" +ImagePhantoms = "71a99df6-f52c-4da1-bd2a-69d6f37f3252" +LinearOperatorCollection = "a4a2c56f-fead-462a-a3ab-85921a5f2575" +Literate = "98b081ad-f1c9-55d3-8b20-4c87d4299306" +NFFT = "efe261a4-0d2b-5849-be55-fc731d526b0d" +RadonKA = "86de8297-835b-47df-b249-c04e8db91db5" +Wavelets = "29a6e085-ba6d-5f35-a997-948ac2efa89a" diff --git a/docs/make.jl b/docs/make.jl new file mode 100644 index 0000000..689f9e5 --- /dev/null +++ b/docs/make.jl @@ -0,0 +1,60 @@ +using Documenter, Literate # Documentation +using RadonKA, Wavelets, NFFT, FFTW # Extensions +using CairoMakie, ImageGeoms, ImagePhantoms # Documentation Example Packages + +# Generate examples +OUTPUT_BASE = joinpath(@__DIR__(), "src", "generated") +INPUT_BASE = joinpath(@__DIR__(), "src", "literate") +for (root, dirs, files) in walkdir(INPUT_BASE) + for dir in dirs + OUTPUT = joinpath(OUTPUT_BASE, dir) + INPUT = joinpath(INPUT_BASE, dir) + for file in filter(f -> endswith(f, ".jl"), readdir(INPUT)) + Literate.markdown(joinpath(INPUT, file), OUTPUT) + end + end +end + +modules = [LinearOperatorCollection, + isdefined(Base, :get_extension) ? Base.get_extension(LinearOperatorCollection, :LinearOperatorFFTWExt) : LinearOperatorCollection.LinearOperatorFFTWExt, + isdefined(Base, :get_extension) ? Base.get_extension(LinearOperatorCollection, :LinearOperatorNFFTExt) : LinearOperatorCollection.LinearOperatorNFFTExt, + isdefined(Base, :get_extension) ? Base.get_extension(LinearOperatorCollection, :LinearOperatorRadonKAExt) : LinearOperatorCollection.LinearOperatorRadonKAExt, + isdefined(Base, :get_extension) ? Base.get_extension(LinearOperatorCollection, :LinearOperatorWaveletExt) : LinearOperatorCollection.LinearOperatorWaveletExt] + +makedocs( + format = Documenter.HTML(prettyurls=get(ENV, "CI", "false") == "true", + canonical="https://github.com/JuliaImageRecon/LinearOperatorCollection.jl", + assets=String[], + collapselevel=1, + ), + modules = modules, + sitename = "LinearOperatorCollection", + authors = "Tobias Knopp, Niklas Hackelberg and Contributors", + pages = [ + "Home" => "index.md", + "Getting Started" => "generated/tutorials/overview.md", + "Tutorials" => Any[ + "Weighting Operator" => "generated/tutorials/weighting.md", + "FFT Operator" => "generated/tutorials/fft.md", + "Diagonal Operator" => "generated/tutorials/diagonal.md", + "Gradient Operator" => "generated/tutorials/gradient.md", + #"Sampling Operator" => "generated/tutorials/sampling.md", + #"NFFT Operator" => "generated/tutorials/nfft.md", + "Wavelet Operator" => "generated/tutorials/wavelet.md", + "Radon Operator" => "generated/tutorials/radon.md", + "Product Operator" => "generated/tutorials/product.md", + "Normal Operator" => "generated/tutorials/normal.md", + ], + "How to" => Any[ + #"Implement Custom Operators" => "generated/howtos/custom.md", + "Enable GPU Acceleration" => "generated/howtos/gpu.md", + ], + #"Explanations" => Any[ + # "Operator Structure" => "operators.md", + #], + "Reference" => "references.md" + ], + warnonly = [:missing_docs] +) + +deploydocs(repo = "github.com/JuliaImageRecon/LinearOperatorCollection.jl") diff --git a/docs/src/index.md b/docs/src/index.md new file mode 100644 index 0000000..bb9bc72 --- /dev/null +++ b/docs/src/index.md @@ -0,0 +1,54 @@ +# LinearOperatorCollection + +*Collection of linear operators for multi-dimensional signal and imaging tasks* + + +## Introduction + +This package contains a collection of linear operators that are particularly useful for multi-dimensional signal and image processing tasks. Linear operators or linear maps behave like matrices in a matrix-vector product, but aren't necessarily matrices themselves. They can utilize more effective algorithms and can defer their computation until they are multiplied with a vector. + +All operators provided by this package extend types and methods [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl). For example this package +provides operators for the FFT (Fast Fourier Transform) and its non-equidistant variant (NFFT), the DCT (Discrete Cosine Transform), and the Wavelet transform. This package, however, does not implement +these transformation itself but uses established libraries for them. + +LinearOperatorCollection's main purpose is provide a wrapper around low-level libraries like FFTW.jl and NFFT.jl, which allows using the transformations as linear operators, i.e., implementing `Op * x`, `adjoint(Op) * x` and the `mul!` based in-place variants of the former. + +## Installation + +Within Julia, use the package manager to install this package: +```julia +using Pkg +Pkg.add("LinearOperatorCollection") +``` +This will install `LinearOperatorCollection` and a subset of the available operators. +To keep the load time of this package low, many operators are implemented using package extensions. +For instance, in order to get the `FFTOp`, one needs to install not only `LinearOperatorCollection` but also `FFTW` and load both in a Julia sessiong: +```julia +Pkg.add("FFTW") +using LinearOperatorCollection, FFTW +``` +Small operators are implemented in LinearOperatorCollection directly. + + +## License / Terms of Usage + +The source code of this project is licensed under the MIT license. This implies that +you are free to use, share, and adapt it. However, please give appropriate credit +by citing the project. + +## Contact + +If you have problems using the software, find mistakes, or have general questions please use +the [issue tracker](hthttps://github.com/JuliaImageRecon/LinearOperatorCollection.jl/issues) to contact us. + +## Related Packages + +There exist many related packages which also implement efficient and/or lazy operators: + +* [LinearOperators.jl](https://github.com/JuliaSmoothOptimizers/LinearOperators.jl) +* [LinearMaps.jl](https://github.com/JuliaLinearAlgebra/LinearMaps.jl) +* [LazyArrays.jl](https://github.com/JuliaArrays/LazyArrays.jl) +* [BlockArrays.jl](https://github.com/JuliaArrays/BlockArrays.jl) +* [Kronecker.jl](https://github.com/MichielStock/Kronecker.jl) + +Generally, it should be possible to combine operators and arrays from various packages. diff --git a/docs/src/literate/howtos/custom.jl b/docs/src/literate/howtos/custom.jl new file mode 100644 index 0000000..0931837 --- /dev/null +++ b/docs/src/literate/howtos/custom.jl @@ -0,0 +1,2 @@ +# # Implement Custom Operators +# There are two different ways one can implement a custom LinearOperator. The first one is to directly implement an operator as a LinearOperator from LinearOperators.jl: diff --git a/docs/src/literate/howtos/gpu.jl b/docs/src/literate/howtos/gpu.jl new file mode 100644 index 0000000..d4f8985 --- /dev/null +++ b/docs/src/literate/howtos/gpu.jl @@ -0,0 +1,19 @@ +# # GPU Acceleration +include("../../util.jl") #hide +# GPU kernels generally require all their arguments to exist on the GPU. This is not ncessarily the case for matrix-free operators as provides LinearOperators or LinearOperatorCollection. +# In the case that a matrix free operator is solely a function call and contains no internal array state, the operator is GPU compatible as long as the method has a GPU compatible implementation. + +# If the operator has internal fields required for its computation, such as temporary arrays for intermediate values or indices, then it needs to move those to the GPU. +# Furthermore if the operator needs to create a new array in its execution, e.g. it is used in a non-inplace matrix-vector multiplication or it is combined with other operators, then the operator needs to specify +# a storage type. LinearOperatorCollection has several GPU compatible operators, where the storage type is given by setting a `S` parameter: +# ```julia +# using CUDA # or AMDGPU, Metal, ... +# image_gpu = cu(image) +# ``` +using LinearOperatorCollection.LinearOperators +image_gpu = image #hide +storage = Complex.(similar(image_gpu, 0)) +fop = FFTOp(eltype(image_gpu), shape = (N, N), S = typeof(storage)) +LinearOperators.storage_type(fop) == typeof(storage) + +# GPU operators can be used just like the other operators. Note however, that a GPU operator does not necessarily work with a CPU vector. \ No newline at end of file diff --git a/docs/src/literate/tutorials/diagonal.jl b/docs/src/literate/tutorials/diagonal.jl new file mode 100644 index 0000000..43c3b05 --- /dev/null +++ b/docs/src/literate/tutorials/diagonal.jl @@ -0,0 +1,19 @@ +# # Block Diagonal Operator +include("../../util.jl") #hide +# This operator represents a block-diagonal matrix out of given operators. +# One can also provide a single-operator and a number of blocks. In that case the given operator is repeated for each block. +# In the case of stateful operators, one can supply a method for copying the operators. +blocks = N +ops = [WeightingOp(fill(i % 2, N)) for i = 1:N] +dop = DiagOp(ops) +typeof(dop) + +# We can retrieve the operators: +typeof(dop.ops) + +# And visualize the result: +fig = Figure() +plot_image(fig[1,1], image, title = "Image") +plot_image(fig[1,2], reshape(dop * vec(image), N, N), title = "Block Weighted") +resize_to_layout!(fig) +fig \ No newline at end of file diff --git a/docs/src/literate/tutorials/fft.jl b/docs/src/literate/tutorials/fft.jl new file mode 100644 index 0000000..40acba6 --- /dev/null +++ b/docs/src/literate/tutorials/fft.jl @@ -0,0 +1,19 @@ +# # Fourier Operator +include("../../util.jl") #hide +# The Fourier operator and its related operators for the discrete cosine and sine transform are available +# whenever FFTW.jl is loaded together with LinearOperatorCollection: +using LinearOperatorCollection, FFTW +fop = FFTOp(Complex{eltype(image)}, shape = (N, N)) +cop = DCTOp(eltype(image), shape = (N, N)) +sop = DSTOp(eltype(image), shape = (N, N)) +image_frequencies = reshape(fop * vec(image), N, N) +image_cosine = reshape(cop * vec(image), N, N) +image_sine = reshape(sop * vec(image), N, N) + +fig = Figure() +plot_image(fig[1,1], image, title = "Image") +plot_image(fig[1,2], abs.(image_frequencies) .+ eps(), title = "Frequency Domain", colorscale = log10) +plot_image(fig[1,3], image_cosine, title = "Cosine") +plot_image(fig[1,4], image_sine, title = "Sine") +resize_to_layout!(fig) +fig \ No newline at end of file diff --git a/docs/src/literate/tutorials/gradient.jl b/docs/src/literate/tutorials/gradient.jl new file mode 100644 index 0000000..caa1e03 --- /dev/null +++ b/docs/src/literate/tutorials/gradient.jl @@ -0,0 +1,10 @@ +# # Gradient Operator +include("../../util.jl") #hide +# This operator computes a direction gradient along one or more dimensions of an array: +gop = GradientOp(eltype(image); shape = (N, N), dims = 1) +gradients = reshape(gop * vec(image), :, N) +fig = Figure() +plot_image(fig[1,1], image, title = "Image") +plot_image(fig[1,2], gradients[:, :], title = "Gradient", colormap = :vik) +resize_to_layout!(fig) +fig \ No newline at end of file diff --git a/docs/src/literate/tutorials/nfft.jl b/docs/src/literate/tutorials/nfft.jl new file mode 100644 index 0000000..b89a53e --- /dev/null +++ b/docs/src/literate/tutorials/nfft.jl @@ -0,0 +1 @@ +include("../../util.jl") #hide diff --git a/docs/src/literate/tutorials/normal.jl b/docs/src/literate/tutorials/normal.jl new file mode 100644 index 0000000..38ecfed --- /dev/null +++ b/docs/src/literate/tutorials/normal.jl @@ -0,0 +1,39 @@ +# # Normal operator +include("../../util.jl") #hide + +# This operator implements a lazy normal operator implementing: +# ```math +# \begin{equation} +# (\mathbf{A})^*\mathbf{A} +# \end{equation} +# ``` +# for some operator $\mathbf{A}$: +using FFTW +fop = op = FFTOp(ComplexF32, shape = (N, N)) +nop = NormalOp(eltype(fop), parent = fop) +isapprox(nop * vec(image), vec(image)) + +# And we can again access our original operator: +typeof(nop.parent) + +# LinearOperatorCollection also provides an opinionated `normalOperator` function which tries to optimize the resulting normal operator. +# As an example consider the normal operator of a weighted fourier operator: +weights = Float32.(collect(range(0, 1, length = N*N))) +wop = WeightingOp(weights) +pop = ProdOp(wop, fop) +nop = normalOperator(pop) +typeof(nop.parent) == typeof(pop) +# Note that the parent was changed. This is because the normal operator was optimized by initially computing the weights: +# ```math +# \begin{equation} +# \tilde{\mathbf{W}} = \mathbf{W}^*\mathbf{W} +# \end{equation} +# ``` +# and then applying the following each iteration: +# ```math +# \begin{equation} +# \mathbf{A}^*\tilde{\mathbf{W}}\mathbf{A} +# \end{equation} +# ``` + +# Other operators can define different optimization strategies for the `normalOperator` method. \ No newline at end of file diff --git a/docs/src/literate/tutorials/overview.jl b/docs/src/literate/tutorials/overview.jl new file mode 100644 index 0000000..c9d3538 --- /dev/null +++ b/docs/src/literate/tutorials/overview.jl @@ -0,0 +1,46 @@ +# # Getting Started +# To begin, we first need to load LinearOperatorCollection: +using LinearOperatorCollection + +# If we require an operator which is implemented via a package extensions, we also need to include +# the package that implements the functionality of the operator: +using FFTW + +# As an introduction, we will construct a two dimensional FFT operator and apply it to an image. +# To construct an operator we can either call its constructor directly: +N = 256 +op = FFTOp(ComplexF32, shape = (N, N)) +typeof(op) + +# Or we can use the factory method: +op = createLinearOperator(FFTOp{ComplexF64}, shape = (N, N)) + +# We will use a Shepp-logan phantom as an example image: +using ImagePhantoms, ImageGeoms +image = shepp_logan(N, SheppLoganToft()) +size(image) + +# Since our operators are only defined for matrix-vector products, we can't directly apply them to the two-dimensional image. +# We first have to reshape the image to a vector: +y = op * vec(image); + +# Afterwards we can reshape the result and visualize it with CairoMakie: +image_freq = reshape(y, N, N) +using CairoMakie +function plot_image(figPos, img; title = "", width = 150, height = 150, colorscale = identity) + ax = CairoMakie.Axis(figPos[1, 1]; yreversed=true, title, width, height) + hidedecorations!(ax) + hm = heatmap!(ax, img, colorscale = colorscale) + Colorbar(figPos[1, 2], hm) +end +fig = Figure() +plot_image(fig[1,1], image, title = "Image") +plot_image(fig[1,2], abs.(image_freq), title = "Frequency Domain", colorscale = log10) +resize_to_layout!(fig) +fig + +# To perform the inverse Fourier transform we can simply use the adjoint of our operator: +image_inverse = reshape(adjoint(op) * y, N, N) +plot_image(fig[1,3], real.(image_inverse), title = "Image after Inverse") +resize_to_layout!(fig) +fig diff --git a/docs/src/literate/tutorials/product.jl b/docs/src/literate/tutorials/product.jl new file mode 100644 index 0000000..c1453ca --- /dev/null +++ b/docs/src/literate/tutorials/product.jl @@ -0,0 +1,33 @@ +# # Product Operator +include("../../util.jl") #hide +# This operator describes the product or composition between two operators: +weights = collect(range(0, 1, length = N*N)) +wop = WeightingOp(weights) +fop = FFTOp(ComplexF64, shape = (N, N)); +# A feature of LinearOperators.jl is that operator can be cheaply transposed, conjugated and multiplied and only in the case of a matrix-vector product the combined operation is evaluated. +tmp_op = wop * fop +tmp_freqs = tmp_op * vec(image) + + +# Similar to the WeightingOp, the main difference with the product operator provided by LinearOperatorCollection is the dedicated type, which allows for code specialisation. +pop = ProdOp(wop, fop) +typeof(pop) +# and the ability to retrieve the components: +typeof(pop.A) +# and +typeof(pop.B) + +# Otherwise they compute the same thing: +pop * vec(image) == tmp_op * vec(image) + +# Note that a product operator is not thread-safe. + +# We can again visualize our result: +weighted_frequencies = reshape(pop * vec(image), N, N) +image_inverse = reshape(adjoint(pop) * vec(weighted_frequencies), N, N) +fig = Figure() +plot_image(fig[1,1], image, title = "Image") +plot_image(fig[1,2], abs.(weighted_frequencies) .+ eps(), title = "Frequency Domain", colorscale = log10) +plot_image(fig[1,3], real.(image_inverse), title = "Image after Inverse") +resize_to_layout!(fig) +fig \ No newline at end of file diff --git a/docs/src/literate/tutorials/radon.jl b/docs/src/literate/tutorials/radon.jl new file mode 100644 index 0000000..0f7062c --- /dev/null +++ b/docs/src/literate/tutorials/radon.jl @@ -0,0 +1,13 @@ +# # Radon Operator +include("../../util.jl") #hide +# The Radon operator is available when loading RadonKA.jl and LinearOperatorCollection: +using RadonKA +angles = collect(range(0, π, N)) +rop = RadonOp(eltype(image); angles, shape = size(image)); +sinogram = reshape(rop * vec(image), :, N) +fig = Figure() +plot_image(fig[1,1], image, title = "Image") +plot_image(fig[1,2], sinogram, title = "Sinogram") +plot_image(fig[1,3], reshape(adjoint(rop) * vec(sinogram), N, N), title = "Backprojection") +resize_to_layout!(fig) +fig diff --git a/docs/src/literate/tutorials/sampling.jl b/docs/src/literate/tutorials/sampling.jl new file mode 100644 index 0000000..62b641d --- /dev/null +++ b/docs/src/literate/tutorials/sampling.jl @@ -0,0 +1,2 @@ +# # Sampling +include("../../util.jl") #hide diff --git a/docs/src/literate/tutorials/wavelet.jl b/docs/src/literate/tutorials/wavelet.jl new file mode 100644 index 0000000..9563a38 --- /dev/null +++ b/docs/src/literate/tutorials/wavelet.jl @@ -0,0 +1,11 @@ +# # Wavelet Operator +include("../../util.jl") #hide +# The wavelet operator is available when loading Wavelets.jl together with LinearOperatorCollection: +using Wavelets +wop = WaveletOp(eltype(image), shape = (N, N)) +wavelet_image = reshape(wop * vec(image), N, N) +fig = Figure() +plot_image(fig[1,1], image, title = "Image") +plot_image(fig[1,2], abs.(wavelet_image) .+ eps(), title = "Wavelet Image", colorscale = sqrt) +resize_to_layout!(fig) +fig diff --git a/docs/src/literate/tutorials/weighting.jl b/docs/src/literate/tutorials/weighting.jl new file mode 100644 index 0000000..e7c2d8b --- /dev/null +++ b/docs/src/literate/tutorials/weighting.jl @@ -0,0 +1,20 @@ +# # Weighting Operator +include("../../util.jl") #hide +# The weighting operator implements a diagonal matrix which multiplies a vector index-wise with given weights. +# Such an operator is also implemented within LinearOperator.jl, however here this operator has a dedicated type on which one can dispatch: +weights = collect(range(0, 1, length = N*N)) +op = WeightingOp(weights) +typeof(op) + +# One can retrieve the weights from the operator +op.weights == weights + +# Note that we didn't need to specify the element type of the operator here. In this case the eltype was derived from the provided weights. +weighted_image = reshape(op * vec(image), N, N); + +# To visualize our weighted image, we will again use CairoMakie: +fig = Figure() +plot_image(fig[1,1], image, title = "Image") +plot_image(fig[1,2], weighted_image, title = "Weighted Image") +resize_to_layout!(fig) +fig \ No newline at end of file diff --git a/docs/src/references.md b/docs/src/references.md new file mode 100644 index 0000000..f525825 --- /dev/null +++ b/docs/src/references.md @@ -0,0 +1,24 @@ +```@docs +SamplingOp +WeightingOp +DiagOp +GradientOp +ProdOp +NormalOp +normalOperator +WaveletOp +RadonOp +NFFTOp +FFTOp +DCTOp +DSTOp +``` + + + +Modules = [LinearOperatorCollection, +isdefined(Base, :get_extension) ? Base.get_extension(LinearOperatorCollection, :LinearOperatorFFTWExt) : LinearOperatorCollection.LinearOperatorFFTWExt, +isdefined(Base, :get_extension) ? Base.get_extension(LinearOperatorCollection, :LinearOperatorNFFTWExt) : LinearOperatorCollection.LinearOperatorNFFTWExt, +isdefined(Base, :get_extension) ? Base.get_extension(LinearOperatorCollection, :LinearOperatoRadonWExt) : LinearOperatorCollection.LinearOperatorRadonExt, +isdefined(Base, :get_extension) ? Base.get_extension(LinearOperatorCollection, :LinearOperatorWaveletExt) : LinearOperatorCollection.LinearOperatorWaveletExt, +] diff --git a/docs/src/util.jl b/docs/src/util.jl new file mode 100644 index 0000000..f9fe089 --- /dev/null +++ b/docs/src/util.jl @@ -0,0 +1,11 @@ +using CairoMakie, LinearOperatorCollection +function plot_image(figPos, img; title = "", width = 150, height = 150, colorscale = identity, colormap = :viridis) + ax = CairoMakie.Axis(figPos[1, 1]; yreversed=true, title, width, height) + hidedecorations!(ax) + hm = heatmap!(ax, img, colorscale = colorscale, colormap = colormap) + Colorbar(figPos[1, 2], hm) +end +N = 256 +using ImagePhantoms, ImageGeoms +image = shepp_logan(N, SheppLoganToft()) +nothing \ No newline at end of file diff --git a/ext/LinearOperatorFFTWExt/DCTOp.jl b/ext/LinearOperatorFFTWExt/DCTOp.jl index 1f7743b..efe1ca7 100644 --- a/ext/LinearOperatorFFTWExt/DCTOp.jl +++ b/ext/LinearOperatorFFTWExt/DCTOp.jl @@ -34,7 +34,7 @@ returns a `DCTOpImpl <: AbstractLinearOperator` which performs a DCT on a given """ function LinearOperatorCollection.DCTOp(T::Type; shape::Tuple, dcttype=2) - tmp=Array{Complex{real(T)}}(undef, shape) + tmp=Array{T}(undef, shape) if dcttype == 2 plan = plan_dct!(tmp) iplan = plan_idct!(tmp) diff --git a/ext/LinearOperatorFFTWExt/DSTOp.jl b/ext/LinearOperatorFFTWExt/DSTOp.jl index 4cd4862..32e2ba9 100644 --- a/ext/LinearOperatorFFTWExt/DSTOp.jl +++ b/ext/LinearOperatorFFTWExt/DSTOp.jl @@ -32,7 +32,7 @@ returns a `LinearOperator` which performs a DST on a given input array. * `shape::Tuple` - size of the array to transform """ function LinearOperatorCollection.DSTOp(T::Type; shape::Tuple) - tmp=Array{Complex{real(T)}}(undef, shape) + tmp=Array{T}(undef, shape) plan = FFTW.plan_r2r!(tmp,FFTW.RODFT10) iplan = FFTW.plan_r2r!(tmp,FFTW.RODFT01) diff --git a/ext/LinearOperatorNFFTExt/NFFTOp.jl b/ext/LinearOperatorNFFTExt/NFFTOp.jl index 17e185f..9662688 100644 --- a/ext/LinearOperatorNFFTExt/NFFTOp.jl +++ b/ext/LinearOperatorNFFTExt/NFFTOp.jl @@ -1,4 +1,15 @@ +""" + NFFTOpImpl(shape::Tuple, tr::Trajectory; kargs...) + NFFTOpImpl(shape::Tuple, tr::AbstractMatrix; kargs...) +generates a `NFFTOpImpl` which evaluates the MRI Fourier signal encoding operator using the NFFT. + +# Arguments: +* `shape::NTuple{D,Int64}` - size of image to encode/reconstruct +* `tr` - Either a `Trajectory` object, or a `ND x Nsamples` matrix for an ND-dimenensional (e.g. 2D or 3D) NFFT with `Nsamples` k-space samples +* (`nodes=nothing`) - Array containg the trajectory nodes (redundant) +* (`kargs`) - additional keyword arguments +""" function LinearOperatorCollection.NFFTOp(::Type{T}; shape::Tuple, nodes::AbstractMatrix{U}, toeplitz=false, oversamplingFactor=1.25, kernelSize=3, kargs...) where {U <: Number, T <: Number} @@ -27,18 +38,6 @@ end LinearOperators.storage_type(op::NFFTOpImpl) = typeof(op.Mv5) -""" - NFFTOpImpl(shape::Tuple, tr::Trajectory; kargs...) - NFFTOpImpl(shape::Tuple, tr::AbstractMatrix; kargs...) - -generates a `NFFTOpImpl` which evaluates the MRI Fourier signal encoding operator using the NFFT. - -# Arguments: -* `shape::NTuple{D,Int64}` - size of image to encode/reconstruct -* `tr` - Either a `Trajectory` object, or a `ND x Nsamples` matrix for an ND-dimenensional (e.g. 2D or 3D) NFFT with `Nsamples` k-space samples -* (`nodes=nothing`) - Array containg the trajectory nodes (redundant) -* (`kargs`) - additional keyword arguments -""" function NFFTOpImpl(shape::Tuple, tr::AbstractMatrix{T}; toeplitz=false, oversamplingFactor=1.25, kernelSize=3, S = Vector{Complex{T}}, kargs...) where {T} baseArrayType = Base.typename(S).wrapper # https://github.com/JuliaLang/julia/issues/35543 diff --git a/ext/LinearOperatorRadonKAExt/RadonOp.jl b/ext/LinearOperatorRadonKAExt/RadonOp.jl index e4d6132..954a201 100644 --- a/ext/LinearOperatorRadonKAExt/RadonOp.jl +++ b/ext/LinearOperatorRadonKAExt/RadonOp.jl @@ -1,3 +1,16 @@ +""" + RadonOp(::Type{T}; shape::NTuple{N, Int}, angles, geometry = RadonParallelCircle(shape[1], -(shape[1]-1)÷2:(shape[1]-1)÷2), μ = nothing, S = Vector{T}) where {T, N} + +Generates a `RadonOp` which evaluates the Radon transform operator and its adjoint (backprojection) for a given geometry and projection angles. + +# Arguments: +* `T` - element type for the operator (e.g., `Float64`, `ComplexF32`) +* `shape::NTuple{N, Int}` - size of the image +* `angles` - array of projection angles +* `geometry` - Radon geometry descriptor (default: parallel beam circle) +* `μ` - optional attenuation map (for attenuated Radon transform) +* `S` - storage type for internal vectors (default: `Vector{T}`) +""" function LinearOperatorCollection.RadonOp(::Type{T}; shape::NTuple{N, Int}, angles, geometry = RadonParallelCircle(shape[1], -(shape[1]-1)÷2:(shape[1]-1)÷2), μ = nothing, S = Vector{T}) where {T, N} return RadonOpImpl(T; shape, angles, geometry, μ, S) diff --git a/src/ProdOp.jl b/src/ProdOp.jl index ccba07e..e33f5c3 100644 --- a/src/ProdOp.jl +++ b/src/ProdOp.jl @@ -1,12 +1,4 @@ export ProdOp -""" - `mutable struct ProdOp{T}` - - struct describing the result of a composition/product of operators. - Describing the composition using a dedicated type has the advantage - that the latter can be made copyable. This is particularly relevant for - multi-threaded code -""" mutable struct ProdOp{T,U,V, vecT <: AbstractVector{T}} <: AbstractLinearOperatorFromCollection{T} nrow :: Int ncol :: Int @@ -31,7 +23,7 @@ end """ ProdOp(A,B) -composition/product of two Operators. Differs with * since it can handle normal operator +composition/product of two Operators. Computes (A * (B * x)). Differs with composition via * since it can handle normal operator and allows for specialisation on `A` and `B`. """ function ProdOp(A, B) nrow = size(A, 1) diff --git a/src/SamplingOp.jl b/src/SamplingOp.jl index e67997f..e57b0d7 100644 --- a/src/SamplingOp.jl +++ b/src/SamplingOp.jl @@ -1,5 +1,15 @@ export vectorizePattern +""" + SamplingOp(pattern::Array{Int}, shape::Tuple) + +builds a `LinearOperator` which only returns the vector elements at positions +indicated by pattern. + +# Arguents +* `pattern::Array{Int}` - indices to sample +* `shape::Tuple` - size of the array to sample +""" function LinearOperatorCollection.SamplingOp(::Type{T}; pattern::P, shape::Tuple=(), S = Vector{T}) where {P} where T <: Number if length(shape) == 0 @@ -18,16 +28,6 @@ function vectorizePattern(idx::T, shape::Tuple) where T<:AbstractArray{Int} return [ floor(Int,(i-1)/size(idx,1))*shape[1]+idx[i] for i = 1:length(idx) ] end -""" - SamplingOp(pattern::Array{Int}, shape::Tuple) - -builds a `LinearOperator` which only returns the vector elements at positions -indicated by pattern. - -# Arguents -* `pattern::Array{Int}` - indices to sample -* `shape::Tuple` - size of the array to sample -""" function SamplingOpImpl(T::Type{<:Number}, pattern::AbstractArray{Int}, shape::Tuple; S = Vector{T}) ndims(pattern)>1 ? idx = vectorizePattern(pattern, shape) : idx = pattern return opEye(T,length(idx), S = S)*opRestriction(idx, prod(shape); S = S)