From cabbb38e00e27de0fb6c5de21d9b39990219f0c3 Mon Sep 17 00:00:00 2001 From: MartinuzziFrancesco Date: Fri, 14 Mar 2025 10:00:59 +0100 Subject: [PATCH 1/9] generalizing initializers --- src/esn/esn_inits.jl | 90 +++++++++++++++++++++++++++----------------- 1 file changed, 55 insertions(+), 35 deletions(-) diff --git a/src/esn/esn_inits.jl b/src/esn/esn_inits.jl index 69047c62..71b68bfd 100644 --- a/src/esn/esn_inits.jl +++ b/src/esn/esn_inits.jl @@ -660,6 +660,7 @@ Create and return a delay line reservoir matrix [^rodan2010]. - `weight`: Determines the value of all connections in the reservoir. Default is 0.1. + - `shift`: delay line shift. Default is 1. - `return_sparse`: flag for returning a `sparse` matrix. Default is `false`. @@ -688,19 +689,29 @@ julia> res_matrix = delay_line(5, 5; weight=1) IEEE transactions on neural networks 22.1 (2010): 131-144. """ function delay_line(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight=T(0.1), return_sparse::Bool=false) where {T <: Number} + weight=T(0.1), shift::Int=1, return_sparse::Bool=false) where {T <: Number} throw_sparse_error(return_sparse) - reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) @assert length(dims) == 2&&dims[1] == dims[2] """\n The dimensions must define a square matrix (e.g., (100, 100)) - """ + """ + reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) + delay_line!(reservoir_matrix, weight, shift) + return return_init_as(Val(return_sparse), reservoir_matrix) +end - for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - 1) - reservoir_matrix[idx + 1, idx] = T(weight) +function delay_line!(reservoir_matrix::AbstractMatrix, weight::Number, + shift::Int) + for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - shift) + reservoir_matrix[idx + shift, idx] = weight end +end - return return_init_as(Val(return_sparse), reservoir_matrix) +function delay_line!(reservoir_matrix::AbstractMatrix, weight::AbstractVector, + shift::Int) + for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - shift) + reservoir_matrix[idx + shift, idx] = weight[idx] + end end """ @@ -752,16 +763,29 @@ julia> res_matrix = delay_line_backward(Float16, 5, 5) IEEE transactions on neural networks 22.1 (2010): 131-144. """ function delay_line_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight=T(0.1), fb_weight=T(0.2), return_sparse::Bool=false) where {T <: Number} + weight=T(0.1), fb_weight=T(0.2), shift::Int=1, fb_shift::Int=1, + return_sparse::Bool=false) where {T <: Number} throw_sparse_error(return_sparse) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) - for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - 1) - reservoir_matrix[idx + 1, idx] = T(weight) - reservoir_matrix[idx, idx + 1] = T(fb_weight) - end + delay_line!(reservoir_matrix, weight, shift) + backward_connection!(reservoir_matrix, fb_weight, fb_shift) return return_init_as(Val(return_sparse), reservoir_matrix) end +function backward_connection!(reservoir_matrix::AbstractMatrix, weight::Number, + shift::Int) + for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - shift) + reservoir_matrix[idx, idx + shift] = weight + end +end + +function backward_connection!(reservoir_matrix::AbstractMatrix, weight::AbstractVector, + shift::Int) + for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - shift) + reservoir_matrix[idx, idx + shift] = weight[idx] + end +end + """ cycle_jumps([rng], [T], dims...; cycle_weight=0.1, jump_weight=0.1, jump_size=3, return_sparse=false) @@ -817,12 +841,7 @@ function cycle_jumps(rng::AbstractRNG, ::Type{T}, dims::Integer...; throw_sparse_error(return_sparse) res_size = first(dims) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) - - for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - 1) - reservoir_matrix[idx + 1, idx] = T(cycle_weight) - end - - reservoir_matrix[1, res_size] = T(cycle_weight) + simple_cycle!(reservoir_matrix, cycle_weight) for idx in 1:jump_size:(res_size - jump_size) tmp = (idx + jump_size) % res_size @@ -884,13 +903,22 @@ function simple_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; weight=T(0.1), return_sparse::Bool=false) where {T <: Number} throw_sparse_error(return_sparse) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) + simple_cycle!(reservoir_matrix, weight) + return return_init_as(Val(return_sparse), reservoir_matrix) +end +function simple_cycle!(reservoir_matrix::AbstractMatrix, weight::Number) for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - 1) - reservoir_matrix[idx + 1, idx] = T(weight) + reservoir_matrix[idx + 1, idx] = weight end + reservoir_matrix[1, end] = weight +end - reservoir_matrix[1, dims[1]] = T(weight) - return return_init_as(Val(return_sparse), reservoir_matrix) +function simple_cycle!(reservoir_matrix::AbstractMatrix, weight::AbstractVector) + for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - 1) + reservoir_matrix[idx + 1, idx] = weight[idx] + end + reservoir_matrix[1, end] = weight[end] end """ @@ -1497,17 +1525,13 @@ julia> reservoir_matrix = selfloop_delayline_backward(5, 5; weight=0.3) International Journal of Computational Science and Engineering 19.3 (2019): 407-417. """ function selfloop_delayline_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight=T(0.1f0), selfloop_weight=T(0.1f0), - return_sparse::Bool=false) where {T <: Number} + shift::Int=1, fb_shift::Int=2, weight=T(0.1f0), fb_weight=weight, + selfloop_weight=T(0.1f0), return_sparse::Bool=false) where {T <: Number} throw_sparse_error(return_sparse) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) reservoir_matrix += T(selfloop_weight) .* I(dims[1]) - for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - 1) - reservoir_matrix[idx + 1, idx] = T(weight) - end - for idx in (first(axes(reservoir_matrix, 1))):(last(axes(reservoir_matrix, 1)) - 2) - reservoir_matrix[idx, idx + 2] = T(weight) - end + delay_line!(reservoir_matrix, weight, shift) + backward_connection!(reservoir_matrix, fb_weight, fb_shift) return return_init_as(Val(return_sparse), reservoir_matrix) end @@ -1573,14 +1597,12 @@ julia> reservoir_matrix = selfloop_forward_connection(5, 5; weight=0.5) International Journal of Computational Science and Engineering 19.3 (2019): 407-417. """ function selfloop_forward_connection(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight=T(0.1f0), selfloop_weight=T(0.1f0), + weight=T(0.1f0), selfloop_weight=T(0.1f0), shift::Int=2, return_sparse::Bool=false) where {T <: Number} throw_sparse_error(return_sparse) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) reservoir_matrix += T(selfloop_weight) .* I(dims[1]) - for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - 2) - reservoir_matrix[idx + 2, idx] = T(weight) - end + delay_line!(reservoir_matrix, weight, shift) return return_init_as(Val(return_sparse), reservoir_matrix) end @@ -1645,9 +1667,7 @@ function forward_connection(rng::AbstractRNG, ::Type{T}, dims::Integer...; weight=T(0.1f0), return_sparse::Bool=false) where {T <: Number} throw_sparse_error(return_sparse) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) - for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - 2) - reservoir_matrix[idx + 2, idx] = T(weight) - end + delay_line!(reservoir_matrix, weight, 2) return return_init_as(Val(return_sparse), reservoir_matrix) end From a268310e5f37cd0f05186b6e7194eb713de90ca8 Mon Sep 17 00:00:00 2001 From: MartinuzziFrancesco Date: Fri, 14 Mar 2025 12:40:38 +0100 Subject: [PATCH 2/9] better generics --- src/esn/esn_inits.jl | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/src/esn/esn_inits.jl b/src/esn/esn_inits.jl index 71b68bfd..90651b73 100644 --- a/src/esn/esn_inits.jl +++ b/src/esn/esn_inits.jl @@ -702,9 +702,8 @@ end function delay_line!(reservoir_matrix::AbstractMatrix, weight::Number, shift::Int) - for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - shift) - reservoir_matrix[idx + shift, idx] = weight - end + weights = fill(weight, size(reservoir_matrix, 1) - shift) + delay_line!(reservoir_matrix, weights, shift) end function delay_line!(reservoir_matrix::AbstractMatrix, weight::AbstractVector, @@ -774,9 +773,8 @@ end function backward_connection!(reservoir_matrix::AbstractMatrix, weight::Number, shift::Int) - for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - shift) - reservoir_matrix[idx, idx + shift] = weight - end + weights = fill(weight, size(reservoir_matrix, 1) - shift) + backward_connection!(reservoir_matrix, weights, shift) end function backward_connection!(reservoir_matrix::AbstractMatrix, weight::AbstractVector, @@ -842,17 +840,19 @@ function cycle_jumps(rng::AbstractRNG, ::Type{T}, dims::Integer...; res_size = first(dims) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) simple_cycle!(reservoir_matrix, cycle_weight) + add_jumps!(reservoir_matrix, cycle_weight, jump_size) + return return_init_as(Val(return_sparse), reservoir_matrix) +end - for idx in 1:jump_size:(res_size - jump_size) - tmp = (idx + jump_size) % res_size +function add_jumps!(reservoir_matrix::AbstractMatrix, weight::Number, jump_size::Int) + for idx in 1:jump_size:(size(reservoir_matrix, 1) - jump_size) + tmp = (idx + jump_size) % size(reservoir_matrix, 1) if tmp == 0 - tmp = res_size + tmp = size(reservoir_matrix, 1) end - reservoir_matrix[idx, tmp] = T(cycle_weight) - reservoir_matrix[tmp, idx] = T(cycle_weight) + reservoir_matrix[idx, tmp] = weight + reservoir_matrix[tmp, idx] = weight end - - return return_init_as(Val(return_sparse), reservoir_matrix) end """ @@ -908,10 +908,8 @@ function simple_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; end function simple_cycle!(reservoir_matrix::AbstractMatrix, weight::Number) - for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - 1) - reservoir_matrix[idx + 1, idx] = weight - end - reservoir_matrix[1, end] = weight + weights = fill(weight, size(reservoir_matrix, 1)) + simple_cycle!(reservoir_matrix, weights) end function simple_cycle!(reservoir_matrix::AbstractMatrix, weight::AbstractVector) From e011c243476becc4f9e927c542f5e518eccecfe3 Mon Sep 17 00:00:00 2001 From: MartinuzziFrancesco Date: Sat, 15 Mar 2025 11:34:12 +0100 Subject: [PATCH 3/9] generalized minimal building blocks --- src/ReservoirComputing.jl | 1 + src/esn/esn_inits.jl | 175 +++++++----------------------------- src/esn/inits_components.jl | 140 +++++++++++++++++++++++++++++ test/esn/test_inits.jl | 4 +- 4 files changed, 175 insertions(+), 145 deletions(-) create mode 100644 src/esn/inits_components.jl diff --git a/src/ReservoirComputing.jl b/src/ReservoirComputing.jl index 3a032246..2ab1f087 100644 --- a/src/ReservoirComputing.jl +++ b/src/ReservoirComputing.jl @@ -22,6 +22,7 @@ include("predict.jl") include("train/linear_regression.jl") #esn +include("esn/inits_components.jl") include("esn/esn_inits.jl") include("esn/esn_reservoir_drivers.jl") include("esn/esn.jl") diff --git a/src/esn/esn_inits.jl b/src/esn/esn_inits.jl index 90651b73..368e819d 100644 --- a/src/esn/esn_inits.jl +++ b/src/esn/esn_inits.jl @@ -1,35 +1,3 @@ -# dispatch over dense inits -function return_init_as(::Val{false}, layer_matrix::AbstractVecOrMat) - return layer_matrix -end - -# error for sparse inits with no SparseArrays.jl call - -function throw_sparse_error(return_sparse::Bool) - if return_sparse && !haskey(Base.loaded_modules, :SparseArrays) - error("""\n - Sparse output requested but SparseArrays.jl is not loaded. - Please load it with: - - using SparseArrays\n - """) - end -end - -## scale spectral radius - -function scale_radius!(reservoir_matrix::AbstractMatrix, radius::AbstractFloat) - rho_w = maximum(abs.(eigvals(reservoir_matrix))) - reservoir_matrix .*= radius / rho_w - if Inf in unique(reservoir_matrix) || -Inf in unique(reservoir_matrix) - error("""\n - Sparsity too low for size of the matrix. - Increase res_size or increase sparsity.\n - """) - end - return reservoir_matrix -end - ### input layers """ scaled_rand([rng], [T], dims...; @@ -274,51 +242,16 @@ julia> res_input = minimal_init(8, 3; p=0.8)# higher p -> more positive signs IEEE transactions on neural networks 22.1 (2010): 131-144. """ function minimal_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; - sampling_type::Symbol=:bernoulli, kwargs...) where {T <: Number} + weight::Number=T(0.1), sampling_type::Symbol=:bernoulli!, kwargs...) where {T <: + Number} res_size, in_size = dims - f_sample = getfield(@__MODULE__, sampling_type) - layer_matrix = f_sample(rng, T, res_size, in_size; kwargs...) - return layer_matrix -end - -function bernoulli(rng::AbstractRNG, ::Type{T}, res_size::Int, in_size::Int; - weight::Number=T(0.1), p::Number=T(0.5)) where {T <: Number} input_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size) - for idx in 1:res_size - for jdx in 1:in_size - if DeviceAgnostic.rand(rng, T) < p - input_matrix[idx, jdx] = T(weight) - else - input_matrix[idx, jdx] = -T(weight) - end - end - end + input_matrix .+= T(weight) + f_sample = getfield(@__MODULE__, sampling_type) + f_sample(rng, input_matrix; kwargs...) return input_matrix end -function irrational(rng::AbstractRNG, ::Type{T}, res_size::Int, in_size::Int; - irrational::Irrational=pi, start::Int=1, - weight::Number=T(0.1)) where {T <: Number} - setprecision(BigFloat, Int(ceil(log2(10) * (res_size * in_size + start + 1)))) - ir_string = string(BigFloat(irrational)) |> collect - deleteat!(ir_string, findall(x -> x == '.', ir_string)) - ir_array = DeviceAgnostic.zeros(rng, T, length(ir_string)) - input_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size) - - for idx in eachindex(ir_string) - ir_array[idx] = parse(Int, ir_string[idx]) - end - - for idx in 1:res_size - for jdx in 1:in_size - random_number = DeviceAgnostic.rand(rng, T) - input_matrix[idx, jdx] = random_number < 0.5 ? -T(weight) : T(weight) - end - end - - return T.(input_matrix) -end - @doc raw""" chebyshev_mapping([rng], [T], dims...; amplitude=one(T), sine_divisor=one(T), @@ -689,30 +622,18 @@ julia> res_matrix = delay_line(5, 5; weight=1) IEEE transactions on neural networks 22.1 (2010): 131-144. """ function delay_line(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight=T(0.1), shift::Int=1, return_sparse::Bool=false) where {T <: Number} + weight=T(0.1), shift::Int=1, return_sparse::Bool=false, + kwargs...) where {T <: Number} throw_sparse_error(return_sparse) @assert length(dims) == 2&&dims[1] == dims[2] """\n The dimensions must define a square matrix (e.g., (100, 100)) """ reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) - delay_line!(reservoir_matrix, weight, shift) + delay_line!(rng, reservoir_matrix, weight, shift; kwargs...) return return_init_as(Val(return_sparse), reservoir_matrix) end -function delay_line!(reservoir_matrix::AbstractMatrix, weight::Number, - shift::Int) - weights = fill(weight, size(reservoir_matrix, 1) - shift) - delay_line!(reservoir_matrix, weights, shift) -end - -function delay_line!(reservoir_matrix::AbstractMatrix, weight::AbstractVector, - shift::Int) - for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - shift) - reservoir_matrix[idx + shift, idx] = weight[idx] - end -end - """ delay_line_backward([rng], [T], dims...; weight=0.1, fb_weight=0.2, return_sparse=false) @@ -763,27 +684,15 @@ julia> res_matrix = delay_line_backward(Float16, 5, 5) """ function delay_line_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...; weight=T(0.1), fb_weight=T(0.2), shift::Int=1, fb_shift::Int=1, - return_sparse::Bool=false) where {T <: Number} + return_sparse::Bool=false, delay_kwargs::NamedTuple=NamedTuple(), + fb_kwargs::NamedTuple=NamedTuple()) where {T <: Number} throw_sparse_error(return_sparse) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) - delay_line!(reservoir_matrix, weight, shift) - backward_connection!(reservoir_matrix, fb_weight, fb_shift) + delay_line!(rng, reservoir_matrix, weight, shift; delay_kwargs...) + backward_connection!(rng, reservoir_matrix, fb_weight, fb_shift; fb_kwargs...) return return_init_as(Val(return_sparse), reservoir_matrix) end -function backward_connection!(reservoir_matrix::AbstractMatrix, weight::Number, - shift::Int) - weights = fill(weight, size(reservoir_matrix, 1) - shift) - backward_connection!(reservoir_matrix, weights, shift) -end - -function backward_connection!(reservoir_matrix::AbstractMatrix, weight::AbstractVector, - shift::Int) - for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - shift) - reservoir_matrix[idx, idx + shift] = weight[idx] - end -end - """ cycle_jumps([rng], [T], dims...; cycle_weight=0.1, jump_weight=0.1, jump_size=3, return_sparse=false) @@ -835,26 +744,17 @@ julia> res_matrix = cycle_jumps(5, 5; jump_size=2) """ function cycle_jumps(rng::AbstractRNG, ::Type{T}, dims::Integer...; cycle_weight::Number=T(0.1), jump_weight::Number=T(0.1), - jump_size::Int=3, return_sparse::Bool=false) where {T <: Number} + jump_size::Int=3, return_sparse::Bool=false, + cycle_kwargs::NamedTuple=NamedTuple(), jump_kwargs::NamedTuple=NamedTuple()) where {T <: + Number} throw_sparse_error(return_sparse) res_size = first(dims) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) - simple_cycle!(reservoir_matrix, cycle_weight) - add_jumps!(reservoir_matrix, cycle_weight, jump_size) + simple_cycle!(rng, reservoir_matrix, cycle_weight; cycle_kwargs...) + add_jumps!(rng, reservoir_matrix, cycle_weight, jump_size; jump_kwargs...) return return_init_as(Val(return_sparse), reservoir_matrix) end -function add_jumps!(reservoir_matrix::AbstractMatrix, weight::Number, jump_size::Int) - for idx in 1:jump_size:(size(reservoir_matrix, 1) - jump_size) - tmp = (idx + jump_size) % size(reservoir_matrix, 1) - if tmp == 0 - tmp = size(reservoir_matrix, 1) - end - reservoir_matrix[idx, tmp] = weight - reservoir_matrix[tmp, idx] = weight - end -end - """ simple_cycle([rng], [T], dims...; weight=0.1, return_sparse=false) @@ -900,25 +800,13 @@ julia> res_matrix = simple_cycle(5, 5; weight=11) IEEE transactions on neural networks 22.1 (2010): 131-144. """ function simple_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight=T(0.1), return_sparse::Bool=false) where {T <: Number} + weight=T(0.1), return_sparse::Bool=false, kwargs...) where {T <: Number} throw_sparse_error(return_sparse) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) - simple_cycle!(reservoir_matrix, weight) + simple_cycle!(rng, reservoir_matrix, weight; kwargs...) return return_init_as(Val(return_sparse), reservoir_matrix) end -function simple_cycle!(reservoir_matrix::AbstractMatrix, weight::Number) - weights = fill(weight, size(reservoir_matrix, 1)) - simple_cycle!(reservoir_matrix, weights) -end - -function simple_cycle!(reservoir_matrix::AbstractMatrix, weight::AbstractVector) - for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - 1) - reservoir_matrix[idx + 1, idx] = weight[idx] - end - reservoir_matrix[1, end] = weight[end] -end - """ pseudo_svd([rng], [T], dims...; max_value=1.0, sparsity=0.1, sorted=true, reverse_sort=false, @@ -1524,12 +1412,14 @@ julia> reservoir_matrix = selfloop_delayline_backward(5, 5; weight=0.3) """ function selfloop_delayline_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...; shift::Int=1, fb_shift::Int=2, weight=T(0.1f0), fb_weight=weight, - selfloop_weight=T(0.1f0), return_sparse::Bool=false) where {T <: Number} + selfloop_weight=T(0.1f0), return_sparse::Bool=false, + delay_kwargs::NamedTuple=NamedTuple(), fb_kwargs::NamedTuple=NamedTuple()) where {T <: + Number} throw_sparse_error(return_sparse) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) reservoir_matrix += T(selfloop_weight) .* I(dims[1]) - delay_line!(reservoir_matrix, weight, shift) - backward_connection!(reservoir_matrix, fb_weight, fb_shift) + delay_line!(rng, reservoir_matrix, weight, shift; delay_kwargs...) + backward_connection!(rng, reservoir_matrix, fb_weight, fb_shift; fb_kwargs...) return return_init_as(Val(return_sparse), reservoir_matrix) end @@ -1596,11 +1486,11 @@ julia> reservoir_matrix = selfloop_forward_connection(5, 5; weight=0.5) """ function selfloop_forward_connection(rng::AbstractRNG, ::Type{T}, dims::Integer...; weight=T(0.1f0), selfloop_weight=T(0.1f0), shift::Int=2, - return_sparse::Bool=false) where {T <: Number} + return_sparse::Bool=false, kwargs...) where {T <: Number} throw_sparse_error(return_sparse) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) reservoir_matrix += T(selfloop_weight) .* I(dims[1]) - delay_line!(reservoir_matrix, weight, shift) + delay_line!(rng, reservoir_matrix, weight, shift; kwargs...) return return_init_as(Val(return_sparse), reservoir_matrix) end @@ -1662,21 +1552,20 @@ julia> reservoir_matrix = forward_connection(5, 5; weight=0.5) International Journal of Computational Science and Engineering 19.3 (2019): 407-417. """ function forward_connection(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight=T(0.1f0), return_sparse::Bool=false) where {T <: Number} + weight=T(0.1f0), return_sparse::Bool=false, kwargs...) where {T <: Number} throw_sparse_error(return_sparse) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) - delay_line!(reservoir_matrix, weight, 2) + delay_line!(rng, reservoir_matrix, weight, 2; kwargs...) return return_init_as(Val(return_sparse), reservoir_matrix) end ### fallbacks #fallbacks for initializers #eventually to remove once migrated to WeightInitializers.jl for initializer in (:rand_sparse, :delay_line, :delay_line_backward, :cycle_jumps, - :simple_cycle, :pseudo_svd, :chaotic_init, - :scaled_rand, :weighted_init, :informed_init, :minimal_init, :chebyshev_mapping, - :logistic_mapping, :modified_lm, :low_connectivity, :double_cycle, :selfloop_cycle, - :selfloop_feedback_cycle, :selfloop_delayline_backward, :selfloop_forward_connection, - :forward_connection) + :simple_cycle, :pseudo_svd, :chaotic_init, :scaled_rand, :weighted_init, + :informed_init, :minimal_init, :chebyshev_mapping, :logistic_mapping, :modified_lm, + :low_connectivity, :double_cycle, :selfloop_cycle, :selfloop_feedback_cycle, + :selfloop_delayline_backward, :selfloop_forward_connection, :forward_connection) @eval begin function ($initializer)(dims::Integer...; kwargs...) return $initializer(Utils.default_rng(), Float32, dims...; kwargs...) diff --git a/src/esn/inits_components.jl b/src/esn/inits_components.jl new file mode 100644 index 00000000..1b0aba4f --- /dev/null +++ b/src/esn/inits_components.jl @@ -0,0 +1,140 @@ +# dispatch over dense inits +function return_init_as(::Val{false}, layer_matrix::AbstractVecOrMat) + return layer_matrix +end + +# error for sparse inits with no SparseArrays.jl call +function throw_sparse_error(return_sparse::Bool) + if return_sparse && !haskey(Base.loaded_modules, :SparseArrays) + error("""\n + Sparse output requested but SparseArrays.jl is not loaded. + Please load it with: + + using SparseArrays\n + """) + end +end + +## scale spectral radius +function scale_radius!(reservoir_matrix::AbstractMatrix, radius::AbstractFloat) + rho_w = maximum(abs.(eigvals(reservoir_matrix))) + reservoir_matrix .*= radius / rho_w + if Inf in unique(reservoir_matrix) || -Inf in unique(reservoir_matrix) + error("""\n + Sparsity too low for size of the matrix. + Increase res_size or increase sparsity.\n + """) + end + return reservoir_matrix +end + +function default_sample!(rng::AbstractRNG, vecormat::AbstractVecOrMat) + return +end + +function bernoulli!(rng::AbstractRNG, vecormat::AbstractVecOrMat; p::Number=0.5) + for idx in eachindex(vecormat) + if rand(rng) > p + vecormat[idx] = -vecormat[idx] + end + end +end + +#TODO: @MartinuzziFrancesco maybe change name here #wait, for sure change name here +function irrational!(rng::AbstractRNG, vecormat::AbstractVecOrMat; + irrational::Irrational=pi, start::Int=1) + total_elements = length(vecormat) + setprecision(BigFloat, Int(ceil(log2(10) * (total_elements + start + 1)))) + ir_string = collect(string(BigFloat(irrational))) + deleteat!(ir_string, findall(x -> x == '.', ir_string)) + ir_array = zeros(Int, length(ir_string)) + for idx in eachindex(ir_string) + ir_array[idx] = parse(Int, ir_string[idx]) + end + + for idx in eachindex(vecormat) + digit_index = start + idx + if digit_index > length(ir_array) + error("Not enough digits available. Increase precision or adjust start.") + end + if isodd(ir_array[digit_index]) + vecormat[idx] = -vecormat[idx] + end + end + + return vecormat +end + +function delay_line!(rng::AbstractRNG, reservoir_matrix::AbstractMatrix, weight::Number, + shift::Int; kwargs...) + weights = fill(weight, size(reservoir_matrix, 1) - shift) + delay_line!(rng, reservoir_matrix, weights, shift; kwargs...) +end + +function delay_line!( + rng::AbstractRNG, reservoir_matrix::AbstractMatrix, weight::AbstractVector, + shift::Int; sampling_type=:default_sample!, kwargs...) + f_sample = getfield(@__MODULE__, sampling_type) + f_sample(rng, weight; kwargs...) + for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - shift) + reservoir_matrix[idx + shift, idx] = weight[idx] + end +end + +function backward_connection!( + rng::AbstractRNG, reservoir_matrix::AbstractMatrix, weight::Number, + shift::Int; kwargs...) + weights = fill(weight, size(reservoir_matrix, 1) - shift) + backward_connection!(rng, reservoir_matrix, weights, shift; kwargs...) +end + +function backward_connection!( + rng::AbstractRNG, reservoir_matrix::AbstractMatrix, weight::AbstractVector, + shift::Int; sampling_type=:default_sample!, kwargs...) + f_sample = getfield(@__MODULE__, sampling_type) + f_sample(rng, weight; kwargs...) + for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - shift) + reservoir_matrix[idx, idx + shift] = weight[idx] + end +end + +function simple_cycle!( + rng::AbstractRNG, reservoir_matrix::AbstractMatrix, weight::Number; kwargs...) + weights = fill(weight, size(reservoir_matrix, 1)) + simple_cycle!(rng, reservoir_matrix, weights; kwargs...) +end + +function simple_cycle!( + rng::AbstractRNG, reservoir_matrix::AbstractMatrix, weight::AbstractVector; + sampling_type=:default_sample!, kwargs...) + f_sample = getfield(@__MODULE__, sampling_type) + f_sample(rng, weight; kwargs...) + for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - 1) + reservoir_matrix[idx + 1, idx] = weight[idx] + end + reservoir_matrix[1, end] = weight[end] +end + +function add_jumps!(rng::AbstractRNG, reservoir_matrix::AbstractMatrix, + weight::Number, jump_size::Int; kwargs...) + weights = fill( + weight, length(collect(1:jump_size:(size(reservoir_matrix, 1) - jump_size)))) + add_jumps!(rng, reservoir_matrix, weights, jump_size; kwargs...) +end + +function add_jumps!(rng::AbstractRNG, reservoir_matrix::AbstractMatrix, + weight::AbstractVector, jump_size::Int; + sampling_type=:default_sample!, kwargs...) + f_sample = getfield(@__MODULE__, sampling_type) + f_sample(rng, weight; kwargs...) + w_idx = 1 + for idx in 1:jump_size:(size(reservoir_matrix, 1) - jump_size) + tmp = (idx + jump_size) % size(reservoir_matrix, 1) + if tmp == 0 + tmp = size(reservoir_matrix, 1) + end + reservoir_matrix[idx, tmp] = weight[w_idx] + reservoir_matrix[tmp, idx] = weight[w_idx] + w_idx += 1 + end +end diff --git a/test/esn/test_inits.jl b/test/esn/test_inits.jl index 9f475715..25d46bd0 100644 --- a/test/esn/test_inits.jl +++ b/test/esn/test_inits.jl @@ -38,7 +38,7 @@ input_inits = [ scaled_rand, weighted_init, minimal_init, - minimal_init(; sampling_type=:irrational), + minimal_init(; sampling_type=:irrational!), chebyshev_mapping, logistic_mapping, modified_lm(; factor=4) @@ -93,7 +93,7 @@ end @testset "Minimum complexity: $init" for init in [ minimal_init, - minimal_init(; sampling_type=:irrational) + minimal_init(; sampling_type=:irrational!) ] dl = init(res_size, in_size) @test sort(unique(dl)) == Float32.([-0.1, 0.1]) From 35ba80fd87be8f0c844a9884a1619c6724083c82 Mon Sep 17 00:00:00 2001 From: MartinuzziFrancesco Date: Sun, 16 Mar 2025 16:29:03 +0100 Subject: [PATCH 4/9] final components, some docstring work --- docs/src/api/inits.md | 11 ++ src/ReservoirComputing.jl | 1 + src/esn/esn_inits.jl | 31 +++- src/esn/inits_components.jl | 336 ++++++++++++++++++++++++++++++++++-- test/esn/test_inits.jl | 4 +- 5 files changed, 359 insertions(+), 24 deletions(-) diff --git a/docs/src/api/inits.md b/docs/src/api/inits.md index eb9ef1b4..37cfa9b3 100644 --- a/docs/src/api/inits.md +++ b/docs/src/api/inits.md @@ -30,3 +30,14 @@ selfloop_forward_connection forward_connection ``` + +## Building functions + +```@docs + scale_radius! + delay_line! + backward_connection! + simple_cycle! + self_loop! + add_jumps! +``` diff --git a/src/ReservoirComputing.jl b/src/ReservoirComputing.jl index 2ab1f087..5dc51a52 100644 --- a/src/ReservoirComputing.jl +++ b/src/ReservoirComputing.jl @@ -43,6 +43,7 @@ export rand_sparse, delay_line, delay_line_backward, cycle_jumps, simple_cycle, pseudo_svd, chaotic_init, low_connectivity, double_cycle, selfloop_cycle, selfloop_feedback_cycle, selfloop_delayline_backward, selfloop_forward_connection, forward_connection +export scale_radius!, delay_line!, backward_connection!, simple_cycle!, add_jumps! export RNN, MRNN, GRU, GRUParams, FullyGated, Minimal export train export ESN, HybridESN, KnowledgeModel, DeepESN diff --git a/src/esn/esn_inits.jl b/src/esn/esn_inits.jl index 368e819d..ddf6ce2e 100644 --- a/src/esn/esn_inits.jl +++ b/src/esn/esn_inits.jl @@ -242,7 +242,7 @@ julia> res_input = minimal_init(8, 3; p=0.8)# higher p -> more positive signs IEEE transactions on neural networks 22.1 (2010): 131-144. """ function minimal_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight::Number=T(0.1), sampling_type::Symbol=:bernoulli!, kwargs...) where {T <: + weight::Number=T(0.1), sampling_type::Symbol=:bernoulli_sample!, kwargs...) where {T <: Number} res_size, in_size = dims input_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size) @@ -596,6 +596,18 @@ Create and return a delay line reservoir matrix [^rodan2010]. - `shift`: delay line shift. Default is 1. - `return_sparse`: flag for returning a `sparse` matrix. Default is `false`. + - `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. + If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each + `weight` can be positive with a probability set by `positive_prob`. If set to + `:irrational_sample!` the `weight` is negative if the decimal number of the + irrational number chosen is odd. Default is `:no_sample`. + - `positive_prob`: probability of the `weight` being positive with `sampling_type` + set to `:bernoulli_sample!`. Default is 0.5 + - `irrational`: Irrational number whose decimals decide the sign of `weight`. + Default is `pi`. + - `start`: Which place after the decimal point the counting starts for the `irrational` + sign counting. Default is 1. + # Examples @@ -1258,11 +1270,11 @@ julia> reservoir_matrix = selfloop_cycle(5, 5; weight=0.2, selfloop_weight=0.5) """ function selfloop_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; cycle_weight=T(0.1f0), selfloop_weight=T(0.1f0), - return_sparse::Bool=false) where {T <: Number} + return_sparse::Bool=false, kwargs...) where {T <: Number} throw_sparse_error(return_sparse) reservoir_matrix = simple_cycle(rng, T, dims...; weight=T(cycle_weight), return_sparse=false) - reservoir_matrix += T(selfloop_weight) .* I(dims[1]) + self_loop!(rng, reservoir_matrix, selfloop_weight; kwargs...) return return_init_as(Val(return_sparse), reservoir_matrix) end @@ -1413,11 +1425,11 @@ julia> reservoir_matrix = selfloop_delayline_backward(5, 5; weight=0.3) function selfloop_delayline_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...; shift::Int=1, fb_shift::Int=2, weight=T(0.1f0), fb_weight=weight, selfloop_weight=T(0.1f0), return_sparse::Bool=false, - delay_kwargs::NamedTuple=NamedTuple(), fb_kwargs::NamedTuple=NamedTuple()) where {T <: - Number} + delay_kwargs::NamedTuple=NamedTuple(), fb_kwargs::NamedTuple=NamedTuple(), + selfloop_kwargs::NamedTuple=NamedTuple()) where {T <: Number} throw_sparse_error(return_sparse) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) - reservoir_matrix += T(selfloop_weight) .* I(dims[1]) + self_loop!(rng, reservoir_matrix, selfloop_weight; selfloop_kwargs...) delay_line!(rng, reservoir_matrix, weight, shift; delay_kwargs...) backward_connection!(rng, reservoir_matrix, fb_weight, fb_shift; fb_kwargs...) return return_init_as(Val(return_sparse), reservoir_matrix) @@ -1486,11 +1498,12 @@ julia> reservoir_matrix = selfloop_forward_connection(5, 5; weight=0.5) """ function selfloop_forward_connection(rng::AbstractRNG, ::Type{T}, dims::Integer...; weight=T(0.1f0), selfloop_weight=T(0.1f0), shift::Int=2, - return_sparse::Bool=false, kwargs...) where {T <: Number} + return_sparse::Bool=false, delay_kwargs::NamedTuple=NamedTuple(), + selfloop_kwargs::NamedTuple=NamedTuple()) where {T <: Number} throw_sparse_error(return_sparse) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) - reservoir_matrix += T(selfloop_weight) .* I(dims[1]) - delay_line!(rng, reservoir_matrix, weight, shift; kwargs...) + self_loop!(rng, reservoir_matrix, selfloop_weight; selfloop_kwargs...) + delay_line!(rng, reservoir_matrix, weight, shift; delay_kwargs...) return return_init_as(Val(return_sparse), reservoir_matrix) end diff --git a/src/esn/inits_components.jl b/src/esn/inits_components.jl index 1b0aba4f..656f388a 100644 --- a/src/esn/inits_components.jl +++ b/src/esn/inits_components.jl @@ -16,6 +16,17 @@ function throw_sparse_error(return_sparse::Bool) end ## scale spectral radius +""" + scale_radius!(matrix, radius) + +Scale the spectral radius of the given matrix to be equal to the +given radius + +# Arguments + + - `matrix`: Matrix to be scaled. + - `radius`: desidered radius to scale the given matrix to +""" function scale_radius!(reservoir_matrix::AbstractMatrix, radius::AbstractFloat) rho_w = maximum(abs.(eigvals(reservoir_matrix))) reservoir_matrix .*= radius / rho_w @@ -28,20 +39,20 @@ function scale_radius!(reservoir_matrix::AbstractMatrix, radius::AbstractFloat) return reservoir_matrix end -function default_sample!(rng::AbstractRNG, vecormat::AbstractVecOrMat) - return +function no_sample(rng::AbstractRNG, vecormat::AbstractVecOrMat) + return vecormat end -function bernoulli!(rng::AbstractRNG, vecormat::AbstractVecOrMat; p::Number=0.5) +function bernoulli_sample!(rng::AbstractRNG, vecormat::AbstractVecOrMat; positive_prob::Number=0.5) for idx in eachindex(vecormat) - if rand(rng) > p + if rand(rng) > positive_prob vecormat[idx] = -vecormat[idx] end end end #TODO: @MartinuzziFrancesco maybe change name here #wait, for sure change name here -function irrational!(rng::AbstractRNG, vecormat::AbstractVecOrMat; +function irrational_sample!(rng::AbstractRNG, vecormat::AbstractVecOrMat; irrational::Irrational=pi, start::Int=1) total_elements = length(vecormat) setprecision(BigFloat, Int(ceil(log2(10) * (total_elements + start + 1)))) @@ -65,66 +76,288 @@ function irrational!(rng::AbstractRNG, vecormat::AbstractVecOrMat; return vecormat end +""" + delay_line!([rng], reservoir_matrix, weight, shift; + sampling_type=:no_sample, irrational=pi, start=1, + p=0.5) + +Adds a delay line in the `reservoir_matrix`, with given `shift` and +`weight`. The `weight` can be a single number or an array. + +# Arguments + + - `rng`: Random number generator. Default is `Utils.default_rng()` + from WeightInitializers. + - `reservoir_matrix`: matrix to be changed. + - `weight`: weight to add as a delay line. Can be either a single number + or an array. + - `shift`: How far the delay line will be from the diagonal. + +# Keyword arguments + + - `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. + If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each + `weight` can be positive with a probability set by `positive_prob`. If set to + `:irrational_sample!` the `weight` is negative if the decimal number of the + irrational number chosen is odd. Default is `:no_sample`. + - `positive_prob`: probability of the `weight` being positive with `sampling_type` + set to `:bernoulli_sample!`. Default is 0.5 + - `irrational`: Irrational number whose decimals decide the sign of `weight`. + Default is `pi`. + - `start`: Which place after the decimal point the counting starts for the `irrational` + sign counting. Default is 1. + +# Examples + +```jldoctest +julia> matrix = zeros(Float32, 5, 5) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + +julia> delay_line!(matrix, 5.0, 2) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 5.0 0.0 0.0 0.0 0.0 + 0.0 5.0 0.0 0.0 0.0 + 0.0 0.0 5.0 0.0 0.0 + + julia> delay_line!(matrix, 5.0, 2; sampling_type=:bernoulli_sample!) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 5.0 0.0 0.0 0.0 0.0 + 0.0 -5.0 0.0 0.0 0.0 + 0.0 0.0 5.0 0.0 0.0 +``` +""" function delay_line!(rng::AbstractRNG, reservoir_matrix::AbstractMatrix, weight::Number, shift::Int; kwargs...) weights = fill(weight, size(reservoir_matrix, 1) - shift) - delay_line!(rng, reservoir_matrix, weights, shift; kwargs...) + return delay_line!(rng, reservoir_matrix, weights, shift; kwargs...) end function delay_line!( rng::AbstractRNG, reservoir_matrix::AbstractMatrix, weight::AbstractVector, - shift::Int; sampling_type=:default_sample!, kwargs...) + shift::Int; sampling_type=:no_sample, kwargs...) f_sample = getfield(@__MODULE__, sampling_type) f_sample(rng, weight; kwargs...) for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - shift) reservoir_matrix[idx + shift, idx] = weight[idx] end + return reservoir_matrix end +""" + backward_connection!([rng], reservoir_matrix, weight, shift; + sampling_type=:no_sample, irrational=pi, start=1, + p=0.5) + +Adds a backward connection in the `reservoir_matrix`, with given `shift` and +`weight`. The `weight` can be a single number or an array. + +# Arguments + + - `rng`: Random number generator. Default is `Utils.default_rng()` + from WeightInitializers. + - `reservoir_matrix`: matrix to be changed. + - `weight`: weight to add as a backward connection. Can be either a single number + or an array. + - `shift`: How far the backward connection will be from the diagonal. + +# Keyword arguments + + - `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. + If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each + `weight` can be positive with a probability set by `positive_prob`. If set to + `:irrational_sample!` the `weight` is negative if the decimal number of the + irrational number chosen is odd. Default is `:no_sample`. + - `positive_prob`: probability of the `weight` being positive with `sampling_type` + set to `:bernoulli_sample!`. Default is 0.5 + - `irrational`: Irrational number whose decimals decide the sign of `weight`. + Default is `pi`. + - `start`: Which place after the decimal point the counting starts for the `irrational` + sign counting. Default is 1. + +# Examples + +```jldoctest +julia> matrix = zeros(Float32, 5, 5) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + +julia> backward_connection!(matrix, 3.0, 1) +5×5 Matrix{Float32}: + 0.0 3.0 0.0 0.0 0.0 + 0.0 0.0 3.0 0.0 0.0 + 0.0 0.0 0.0 3.0 0.0 + 0.0 0.0 0.0 0.0 3.0 + 0.0 0.0 0.0 0.0 0.0 + +julia> backward_connection!(matrix, 3.0, 1; sampling_type=:bernoulli_sample!) +5×5 Matrix{Float32}: + 0.0 3.0 0.0 0.0 0.0 + 0.0 0.0 -3.0 0.0 0.0 + 0.0 0.0 0.0 3.0 0.0 + 0.0 0.0 0.0 0.0 -3.0 + 0.0 0.0 0.0 0.0 0.0 +``` +""" function backward_connection!( rng::AbstractRNG, reservoir_matrix::AbstractMatrix, weight::Number, shift::Int; kwargs...) weights = fill(weight, size(reservoir_matrix, 1) - shift) - backward_connection!(rng, reservoir_matrix, weights, shift; kwargs...) + return backward_connection!(rng, reservoir_matrix, weights, shift; kwargs...) end function backward_connection!( rng::AbstractRNG, reservoir_matrix::AbstractMatrix, weight::AbstractVector, - shift::Int; sampling_type=:default_sample!, kwargs...) + shift::Int; sampling_type=:no_sample, kwargs...) f_sample = getfield(@__MODULE__, sampling_type) f_sample(rng, weight; kwargs...) for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - shift) reservoir_matrix[idx, idx + shift] = weight[idx] end + return reservoir_matrix end +""" + simple_cycle!([rng], reservoir_matrix, weight; + sampling_type=:no_sample, irrational=pi, start=1, + p=0.5) + +Adds a simple cycle in the `reservoir_matrix`, with given +`weight`. The `weight` can be a single number or an array. + +# Arguments + + - `rng`: Random number generator. Default is `Utils.default_rng()` + from WeightInitializers. + - `reservoir_matrix`: matrix to be changed. + - `weight`: weight to add as a simple cycle. Can be either a single number + or an array. + +# Keyword arguments + + - `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. + If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each + `weight` can be positive with a probability set by `positive_prob`. If set to + `:irrational_sample!` the `weight` is negative if the decimal number of the + irrational number chosen is odd. Default is `:no_sample`. + - `positive_prob`: probability of the `weight` being positive when `sampling_type` is + set to `:bernoulli_sample!`. Default is 0.5 + - `irrational`: Irrational number whose decimals decide the sign of `weight`. + Default is `pi`. + - `start`: Which place after the decimal point the counting starts for the `irrational` + sign counting. Default is 1. + +# Examples + +```jldoctest +julia> matrix = zeros(Float32, 5, 5) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + +julia> simple_cycle!(matrix, 1.0; sampling_type=:irrational_sample!) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 0.0 -1.0 + -1.0 0.0 0.0 0.0 0.0 + 0.0 1.0 0.0 0.0 0.0 + 0.0 0.0 -1.0 0.0 0.0 + 0.0 0.0 0.0 -1.0 0.0 +``` +""" function simple_cycle!( rng::AbstractRNG, reservoir_matrix::AbstractMatrix, weight::Number; kwargs...) weights = fill(weight, size(reservoir_matrix, 1)) - simple_cycle!(rng, reservoir_matrix, weights; kwargs...) + return simple_cycle!(rng, reservoir_matrix, weights; kwargs...) end function simple_cycle!( rng::AbstractRNG, reservoir_matrix::AbstractMatrix, weight::AbstractVector; - sampling_type=:default_sample!, kwargs...) + sampling_type=:no_sample, kwargs...) f_sample = getfield(@__MODULE__, sampling_type) f_sample(rng, weight; kwargs...) for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - 1) reservoir_matrix[idx + 1, idx] = weight[idx] end reservoir_matrix[1, end] = weight[end] + return reservoir_matrix end +""" + add_jumps!([rng], reservoir_matrix, weight, jump_size; + sampling_type=:no_sample, irrational=pi, start=1, + positive_prob=0.5) + +Adds jumps to a given `reservoir_matrix` with chosen `weight` and determined `jump_size`. +`weight` can be either a number or an array. + +# Arguments + + - `rng`: Random number generator. Default is `Utils.default_rng()` + from WeightInitializers. + - `reservoir_matrix`: matrix to be changed. + - `weight`: weight to add as a simple cycle. Can be either a single number + or an array. + - `jump_size`: size of the jump's distance. + +# Keyword arguments + + - `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. + If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each + `weight` can be positive with a probability set by `positive_prob`. If set to + `:irrational_sample!` the `weight` is negative if the decimal number of the + irrational number chosen is odd. Default is `:no_sample`. + - `positive_prob`: probability of the `weight` being positive when `sampling_type` is + set to `:bernoulli_sample!`. Default is 0.5 + - `irrational`: Irrational number whose decimals decide the sign of `weight`. + Default is `pi`. + - `start`: Which place after the decimal point the counting starts for the `irrational` + sign counting. Default is 1. + +# Examples + +```jldoctest +julia> matrix = zeros(Float32, 5, 5) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + +julia> add_jumps!(matrix, 1.0) +5×5 Matrix{Float32}: + 0.0 0.0 1.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 1.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 1.0 + 0.0 0.0 1.0 0.0 0.0 +``` +""" function add_jumps!(rng::AbstractRNG, reservoir_matrix::AbstractMatrix, weight::Number, jump_size::Int; kwargs...) weights = fill( weight, length(collect(1:jump_size:(size(reservoir_matrix, 1) - jump_size)))) - add_jumps!(rng, reservoir_matrix, weights, jump_size; kwargs...) + return add_jumps!(rng, reservoir_matrix, weights, jump_size; kwargs...) end function add_jumps!(rng::AbstractRNG, reservoir_matrix::AbstractMatrix, weight::AbstractVector, jump_size::Int; - sampling_type=:default_sample!, kwargs...) + sampling_type=:no_sample, kwargs...) f_sample = getfield(@__MODULE__, sampling_type) f_sample(rng, weight; kwargs...) w_idx = 1 @@ -137,4 +370,81 @@ function add_jumps!(rng::AbstractRNG, reservoir_matrix::AbstractMatrix, reservoir_matrix[tmp, idx] = weight[w_idx] w_idx += 1 end + return reservoir_matrix +end + +""" + self_loop!([rng], reservoir_matrix, weight, jump_size; + sampling_type=:no_sample, irrational=pi, start=1, + positive_prob=0.5) + +Adds jumps to a given `reservoir_matrix` with chosen `weight` and determined `jump_size`. +`weight` can be either a number or an array. + +# Arguments + + - `rng`: Random number generator. Default is `Utils.default_rng()` + from WeightInitializers. + - `reservoir_matrix`: matrix to be changed. + - `weight`: weight to add as a self loop. Can be either a single number + or an array. + +# Keyword arguments + + - `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. + If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each + `weight` can be positive with a probability set by `positive_prob`. If set to + `:irrational_sample!` the `weight` is negative if the decimal number of the + irrational number chosen is odd. Default is `:no_sample`. + - `positive_prob`: probability of the `weight` being positive when `sampling_type` is + set to `:bernoulli_sample!`. Default is 0.5 + - `irrational`: Irrational number whose decimals decide the sign of `weight`. + Default is `pi`. + - `start`: Which place after the decimal point the counting starts for the `irrational` + sign counting. Default is 1. + +# Examples + +```jldoctest +julia> matrix = zeros(Float32, 5, 5) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + +julia> self_loop!(matrix, 1.0) +5×5 Matrix{Float32}: + 1.0 0.0 0.0 0.0 0.0 + 0.0 1.0 0.0 0.0 0.0 + 0.0 0.0 1.0 0.0 0.0 + 0.0 0.0 0.0 1.0 0.0 + 0.0 0.0 0.0 0.0 1.0 +``` +""" + +function self_loop!(rng::AbstractRNG, reservoir_matrix::AbstractMatrix, + weight::Number; kwargs...) + weights = fill(weight, size(reservoir_matrix, 1)) + return self_loop!(rng, reservoir_matrix, weights; kwargs...) +end + +function self_loop!(rng::AbstractRNG, reservoir_matrix::AbstractMatrix, + weight::AbstractVector; sampling_type=:no_sample, kwargs...) + f_sample = getfield(@__MODULE__, sampling_type) + f_sample(rng, weight; kwargs...) + for idx in axes(reservoir_matrix, 1) + reservoir_matrix[idx, idx] = weight[idx] + end + return reservoir_matrix +end + +for init_component in (:delay_line!, :add_jumps!, :backward_connection!, + :simple_cycle!, :self_loop!) + @eval begin + function ($init_component)(args...; kwargs...) + return $init_component(Utils.default_rng(), args...; kwargs...) + end + end end diff --git a/test/esn/test_inits.jl b/test/esn/test_inits.jl index 25d46bd0..568e44fe 100644 --- a/test/esn/test_inits.jl +++ b/test/esn/test_inits.jl @@ -38,7 +38,7 @@ input_inits = [ scaled_rand, weighted_init, minimal_init, - minimal_init(; sampling_type=:irrational!), + minimal_init(; sampling_type=:irrational_sample!), chebyshev_mapping, logistic_mapping, modified_lm(; factor=4) @@ -93,7 +93,7 @@ end @testset "Minimum complexity: $init" for init in [ minimal_init, - minimal_init(; sampling_type=:irrational!) + minimal_init(; sampling_type=:irrational_sample!) ] dl = init(res_size, in_size) @test sort(unique(dl)) == Float32.([-0.1, 0.1]) From 69524fd79b253000af992f4af9d8ce8dc20701b5 Mon Sep 17 00:00:00 2001 From: MartinuzziFrancesco Date: Mon, 17 Mar 2025 10:48:24 +0100 Subject: [PATCH 5/9] docs: added relevant docstrings for new init kwargs --- Project.toml | 2 +- docs/src/esn_tutorials/change_layers.md | 33 ++++++--- src/ReservoirComputing.jl | 3 +- src/esn/esn_inits.jl | 97 ++++++++++++++++++++++--- src/esn/inits_components.jl | 6 +- 5 files changed, 118 insertions(+), 23 deletions(-) diff --git a/Project.toml b/Project.toml index bfe8c110..260684c1 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ReservoirComputing" uuid = "7c2d2b1e-3dd4-11ea-355a-8f6a8116e294" authors = ["Francesco Martinuzzi"] -version = "0.10.13" +version = "0.11.0" [deps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" diff --git a/docs/src/esn_tutorials/change_layers.md b/docs/src/esn_tutorials/change_layers.md index 7927e9c0..438f6b78 100644 --- a/docs/src/esn_tutorials/change_layers.md +++ b/docs/src/esn_tutorials/change_layers.md @@ -1,6 +1,10 @@ # Using different layers -A great deal of efforts in the ESNs field are devoted to finding an ideal construction for the reservoir matrices. ReservoirComputing.jl offers multiple implementation of reservoir and input matrices initializations found in the literature. The API is standardized, and follows by [WeightInitializers.jl](https://github.com/LuxDL/Lux.jl/tree/main/lib/WeightInitializers): +A great deal of efforts in the ESNs field are devoted to finding an ideal construction +for the reservoir matrices. ReservoirComputing.jl offers multiple implementation of +reservoir and input matrices initializations found in the literature. +The API is standardized, and follows +[WeightInitializers.jl](https://github.com/LuxDL/Lux.jl/tree/main/lib/WeightInitializers): ```julia weights = init(rng, dims...) @@ -22,9 +26,13 @@ Custom layers only need to follow these APIs to be compatible with ReservoirComp ## Example of minimally complex ESN -Using [^rodan2012] and [^rodan2010] as references this section will provide an example on how to change both the input layer and the reservoir for ESNs. +Using [^rodan2012] and [^rodan2010] as references this section will provide an +example on how to change both the input layer and the reservoir for ESNs. -The task for this example will be the one step ahead prediction of the Henon map. To obtain the data one can leverage the package [PredefinedDynamicalSystems.jl](https://juliadynamics.github.io/PredefinedDynamicalSystems.jl/dev/). The data is scaled to be between -1 and 1. +The task for this example will be the one step ahead prediction of the Henon map. +To obtain the data one can leverage the package +[PredefinedDynamicalSystems.jl](https://juliadynamics.github.io/PredefinedDynamicalSystems.jl/dev/). +The data is scaled to be between -1 and 1. ```@example minesn using PredefinedDynamicalSystems @@ -43,14 +51,16 @@ testing_input = data[:, (shift + train_len):(shift + train_len + predict_len - 1 testing_target = data[:, (shift + train_len + 1):(shift + train_len + predict_len)] ``` -Now it is possible to define the input layers and reservoirs we want to compare and run the comparison in a simple for loop. The accuracy will be tested using the mean squared deviation msd from StatsBase. +Now it is possible to define the input layers and reservoirs we want to compare and run +the comparison in a simple for loop. The accuracy will be tested using the mean squared +deviation msd from StatsBase. ```@example minesn using ReservoirComputing, StatsBase res_size = 300 -input_layer = [minimal_init(; weight=0.85, sampling_type=:irrational), - minimal_init(; weight=0.95, sampling_type=:irrational)] +input_layer = [minimal_init(; weight=0.85, sampling_type=:irrational_sample!), + minimal_init(; weight=0.95, sampling_type=:irrational_sample!)] reservoirs = [simple_cycle(; weight=0.7), cycle_jumps(; cycle_weight=0.7, jump_weight=0.2, jump_size=5)] @@ -64,9 +74,14 @@ for i in 1:length(reservoirs) end ``` -As it is possible to see, changing layers in ESN models is straightforward. Be sure to check the API documentation for a full list of reservoir and layers. +As it is possible to see, changing layers in ESN models is straightforward. +Be sure to check the API documentation for a full list of reservoir and layers. ## Bibliography -[^rodan2012]: Rodan, Ali, and Peter Tiňo. “Simple deterministically constructed cycle reservoirs with regular jumps.” Neural computation 24.7 (2012): 1822-1852. -[^rodan2010]: Rodan, Ali, and Peter Tiňo. “Minimum complexity echo state network.” IEEE transactions on neural networks 22.1 (2010): 131-144. +[^rodan2012]: Rodan, Ali, and Peter Tiňo. + “Simple deterministically constructed cycle reservoirs with regular jumps.” + Neural computation 24.7 (2012): 1822-1852. +[^rodan2010]: Rodan, Ali, and Peter Tiňo. + “Minimum complexity echo state network.” + IEEE transactions on neural networks 22.1 (2010): 131-144. diff --git a/src/ReservoirComputing.jl b/src/ReservoirComputing.jl index 5dc51a52..c0b02836 100644 --- a/src/ReservoirComputing.jl +++ b/src/ReservoirComputing.jl @@ -43,7 +43,8 @@ export rand_sparse, delay_line, delay_line_backward, cycle_jumps, simple_cycle, pseudo_svd, chaotic_init, low_connectivity, double_cycle, selfloop_cycle, selfloop_feedback_cycle, selfloop_delayline_backward, selfloop_forward_connection, forward_connection -export scale_radius!, delay_line!, backward_connection!, simple_cycle!, add_jumps! +export scale_radius!, delay_line!, backward_connection!, simple_cycle!, add_jumps!, + self_loop! export RNN, MRNN, GRU, GRUParams, FullyGated, Minimal export train export ESN, HybridESN, KnowledgeModel, DeepESN diff --git a/src/esn/esn_inits.jl b/src/esn/esn_inits.jl index ddf6ce2e..ab9739ee 100644 --- a/src/esn/esn_inits.jl +++ b/src/esn/esn_inits.jl @@ -243,7 +243,7 @@ julia> res_input = minimal_init(8, 3; p=0.8)# higher p -> more positive signs """ function minimal_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; weight::Number=T(0.1), sampling_type::Symbol=:bernoulli_sample!, kwargs...) where {T <: - Number} + Number} res_size, in_size = dims input_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size) input_matrix .+= T(weight) @@ -577,7 +577,8 @@ end """ delay_line([rng], [T], dims...; - weight=0.1, return_sparse=false) + weight=0.1, return_sparse=false, + kwargs...) Create and return a delay line reservoir matrix [^rodan2010]. @@ -608,7 +609,6 @@ Create and return a delay line reservoir matrix [^rodan2010]. - `start`: Which place after the decimal point the counting starts for the `irrational` sign counting. Default is 1. - # Examples ```jldoctest @@ -648,7 +648,8 @@ end """ delay_line_backward([rng], [T], dims...; - weight=0.1, fb_weight=0.2, return_sparse=false) + weight=0.1, fb_weight=0.2, return_sparse=false, + delay_kwargs=(), fb_kwargs=()) Create a delay line backward reservoir with the specified by `dims` and weights. Creates a matrix with backward connections as described in [^rodan2010]. @@ -669,6 +670,19 @@ Creates a matrix with backward connections as described in [^rodan2010]. in the reservoir. Default is 0.2 - `return_sparse`: flag for returning a `sparse` matrix. Default is `false`. + - `delay_kwargs` and `fb_kwargs`: named tuples that control the kwargs for the + delay line weight and feedback weights respectively. The kwargs are as follows: + + `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. + If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each + `weight` can be positive with a probability set by `positive_prob`. If set to + `:irrational_sample!` the `weight` is negative if the decimal number of the + irrational number chosen is odd. Default is `:no_sample`. + + `positive_prob`: probability of the `weight` being positive when `sampling_type` is + set to `:bernoulli_sample!`. Default is 0.5 + + `irrational`: Irrational number whose decimals decide the sign of `weight`. + Default is `pi`. + + `start`: Which place after the decimal point the counting starts for the `irrational` + sign counting. Default is 1. # Examples @@ -707,7 +721,8 @@ end """ cycle_jumps([rng], [T], dims...; - cycle_weight=0.1, jump_weight=0.1, jump_size=3, return_sparse=false) + cycle_weight=0.1, jump_weight=0.1, jump_size=3, return_sparse=false, + cycle_kwargs=(), jump_kwargs=()) Create a cycle jumps reservoir [^Rodan2012]. @@ -729,6 +744,19 @@ Create a cycle jumps reservoir [^Rodan2012]. Default is 3. - `return_sparse`: flag for returning a `sparse` matrix. Default is `false`. + - `cycle_kwargs` and `jump_kwargs`: named tuples that control the kwargs for the + cycle and jump weights respectively. The kwargs are as follows: + + `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. + If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each + `weight` can be positive with a probability set by `positive_prob`. If set to + `:irrational_sample!` the `weight` is negative if the decimal number of the + irrational number chosen is odd. Default is `:no_sample`. + + `positive_prob`: probability of the `weight` being positive when `sampling_type` is + set to `:bernoulli_sample!`. Default is 0.5 + + `irrational`: Irrational number whose decimals decide the sign of `weight`. + Default is `pi`. + + `start`: Which place after the decimal point the counting starts for the `irrational` + sign counting. Default is 1. # Examples @@ -769,7 +797,8 @@ end """ simple_cycle([rng], [T], dims...; - weight=0.1, return_sparse=false) + weight=0.1, return_sparse=false, + kwargs...) Create a simple cycle reservoir [^rodan2010]. @@ -786,6 +815,17 @@ Create a simple cycle reservoir [^rodan2010]. Default is 0.1. - `return_sparse`: flag for returning a `sparse` matrix. Default is `false`. + - `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. + If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each + `weight` can be positive with a probability set by `positive_prob`. If set to + `:irrational_sample!` the `weight` is negative if the decimal number of the + irrational number chosen is odd. Default is `:no_sample`. + - `positive_prob`: probability of the `weight` being positive when `sampling_type` is + set to `:bernoulli_sample!`. Default is 0.5 + - `irrational`: Irrational number whose decimals decide the sign of `weight`. + Default is `pi`. + - `start`: Which place after the decimal point the counting starts for the `irrational` + sign counting. Default is 1. # Examples @@ -1210,7 +1250,7 @@ end @doc raw""" selfloop_cycle([rng], [T], dims...; cycle_weight=0.1, selfloop_weight=0.1, - return_sparse=false) + return_sparse=false, kwargs...) Creates a simple cycle reservoir with the addition of self loops [^elsarraj2019]. @@ -1243,6 +1283,17 @@ W_{i,j} = Default is 0.1. - `return_sparse`: flag for returning a `sparse` matrix. Default is `false`. + - `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. + If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each + `weight` can be positive with a probability set by `positive_prob`. If set to + `:irrational_sample!` the `weight` is negative if the decimal number of the + irrational number chosen is odd. Default is `:no_sample`. + - `positive_prob`: probability of the `weight` being positive when `sampling_type` is + set to `:bernoulli_sample!`. Default is 0.5 + - `irrational`: Irrational number whose decimals decide the sign of `weight`. + Default is `pi`. + - `start`: Which place after the decimal point the counting starts for the `irrational` + sign counting. Default is 1. # Examples @@ -1363,7 +1414,8 @@ end @doc raw""" selfloop_delayline_backward([rng], [T], dims...; weight=0.1, selfloop_weight=0.1, - return_sparse=false) + return_sparse=false, fb_kwargs=(), selfloop_kwargs=(), + delay_kwargs=()) Creates a reservoir based on a delay line with the addition of self loops and backward connections shifted by one [^elsarraj2019]. @@ -1397,6 +1449,19 @@ W_{i,j} = Default is 0.1. - `return_sparse`: flag for returning a `sparse` matrix. Default is `false`. + - `delay_kwargs`, `selfloop_kwargs`, and `fb_kwargs`: named tuples that control the kwargs + for the weights generation. The kwargs are as follows: + + `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. + If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each + `weight` can be positive with a probability set by `positive_prob`. If set to + `:irrational_sample!` the `weight` is negative if the decimal number of the + irrational number chosen is odd. Default is `:no_sample`. + + `positive_prob`: probability of the `weight` being positive when `sampling_type` is + set to `:bernoulli_sample!`. Default is 0.5 + + `irrational`: Irrational number whose decimals decide the sign of `weight`. + Default is `pi`. + + `start`: Which place after the decimal point the counting starts for the `irrational` + sign counting. Default is 1. # Examples @@ -1438,7 +1503,8 @@ end @doc raw""" selfloop_forward_connection([rng], [T], dims...; weight=0.1, selfloop_weight=0.1, - return_sparse=false) + return_sparse=false, selfloop_kwargs=(), + delay_kwargs=()) Creates a reservoir based on a forward connection of weights between even nodes with the addition of self loops [^elsarraj2019]. @@ -1471,6 +1537,19 @@ W_{i,j} = Default is 0.1. - `return_sparse`: flag for returning a `sparse` matrix. Default is `false`. + - `delay_kwargs` and `selfloop_kwargs`: named tuples that control the kwargs for the + delay line weight and self loop weights respectively. The kwargs are as follows: + + `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. + If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each + `weight` can be positive with a probability set by `positive_prob`. If set to + `:irrational_sample!` the `weight` is negative if the decimal number of the + irrational number chosen is odd. Default is `:no_sample`. + + `positive_prob`: probability of the `weight` being positive when `sampling_type` is + set to `:bernoulli_sample!`. Default is 0.5 + + `irrational`: Irrational number whose decimals decide the sign of `weight`. + Default is `pi`. + + `start`: Which place after the decimal point the counting starts for the `irrational` + sign counting. Default is 1. # Examples diff --git a/src/esn/inits_components.jl b/src/esn/inits_components.jl index 656f388a..62eca3e6 100644 --- a/src/esn/inits_components.jl +++ b/src/esn/inits_components.jl @@ -43,7 +43,8 @@ function no_sample(rng::AbstractRNG, vecormat::AbstractVecOrMat) return vecormat end -function bernoulli_sample!(rng::AbstractRNG, vecormat::AbstractVecOrMat; positive_prob::Number=0.5) +function bernoulli_sample!( + rng::AbstractRNG, vecormat::AbstractVecOrMat; positive_prob::Number=0.5) for idx in eachindex(vecormat) if rand(rng) > positive_prob vecormat[idx] = -vecormat[idx] @@ -328,7 +329,7 @@ Adds jumps to a given `reservoir_matrix` with chosen `weight` and determined `ju - `start`: Which place after the decimal point the counting starts for the `irrational` sign counting. Default is 1. -# Examples +# Examples ```jldoctest julia> matrix = zeros(Float32, 5, 5) @@ -423,7 +424,6 @@ julia> self_loop!(matrix, 1.0) 0.0 0.0 0.0 0.0 1.0 ``` """ - function self_loop!(rng::AbstractRNG, reservoir_matrix::AbstractMatrix, weight::Number; kwargs...) weights = fill(weight, size(reservoir_matrix, 1)) From 8b483933c80b8e36daf2176fe2167a74d599008d Mon Sep 17 00:00:00 2001 From: MartinuzziFrancesco Date: Mon, 17 Mar 2025 10:50:30 +0100 Subject: [PATCH 6/9] refactor: formatting --- src/esn/esn_inits.jl | 52 ++++++++++++++++++++----------------- src/esn/inits_components.jl | 2 +- 2 files changed, 29 insertions(+), 25 deletions(-) diff --git a/src/esn/esn_inits.jl b/src/esn/esn_inits.jl index ab9739ee..73463b6e 100644 --- a/src/esn/esn_inits.jl +++ b/src/esn/esn_inits.jl @@ -666,23 +666,25 @@ Creates a matrix with backward connections as described in [^rodan2010]. - `weight`: The weight determines the absolute value of forward connections in the reservoir. Default is 0.1 + - `fb_weight`: Determines the absolute value of backward connections in the reservoir. Default is 0.2 - `return_sparse`: flag for returning a `sparse` matrix. Default is `false`. - - `delay_kwargs` and `fb_kwargs`: named tuples that control the kwargs for the + - `delay_kwargs` and `fb_kwargs`: named tuples that control the kwargs for the delay line weight and feedback weights respectively. The kwargs are as follows: - + `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. - If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each - `weight` can be positive with a probability set by `positive_prob`. If set to - `:irrational_sample!` the `weight` is negative if the decimal number of the - irrational number chosen is odd. Default is `:no_sample`. - + `positive_prob`: probability of the `weight` being positive when `sampling_type` is - set to `:bernoulli_sample!`. Default is 0.5 - + `irrational`: Irrational number whose decimals decide the sign of `weight`. - Default is `pi`. - + `start`: Which place after the decimal point the counting starts for the `irrational` - sign counting. Default is 1. + + + `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. + If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each + `weight` can be positive with a probability set by `positive_prob`. If set to + `:irrational_sample!` the `weight` is negative if the decimal number of the + irrational number chosen is odd. Default is `:no_sample`. + + `positive_prob`: probability of the `weight` being positive when `sampling_type` is + set to `:bernoulli_sample!`. Default is 0.5 + + `irrational`: Irrational number whose decimals decide the sign of `weight`. + Default is `pi`. + + `start`: Which place after the decimal point the counting starts for the `irrational` + sign counting. Default is 1. # Examples @@ -738,25 +740,27 @@ Create a cycle jumps reservoir [^Rodan2012]. - `cycle_weight`: The weight of cycle connections. Default is 0.1. + - `jump_weight`: The weight of jump connections. Default is 0.1. - `jump_size`: The number of steps between jump connections. Default is 3. - `return_sparse`: flag for returning a `sparse` matrix. Default is `false`. - - `cycle_kwargs` and `jump_kwargs`: named tuples that control the kwargs for the + - `cycle_kwargs` and `jump_kwargs`: named tuples that control the kwargs for the cycle and jump weights respectively. The kwargs are as follows: - + `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. - If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each - `weight` can be positive with a probability set by `positive_prob`. If set to - `:irrational_sample!` the `weight` is negative if the decimal number of the - irrational number chosen is odd. Default is `:no_sample`. - + `positive_prob`: probability of the `weight` being positive when `sampling_type` is - set to `:bernoulli_sample!`. Default is 0.5 - + `irrational`: Irrational number whose decimals decide the sign of `weight`. - Default is `pi`. - + `start`: Which place after the decimal point the counting starts for the `irrational` - sign counting. Default is 1. + + + `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. + If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each + `weight` can be positive with a probability set by `positive_prob`. If set to + `:irrational_sample!` the `weight` is negative if the decimal number of the + irrational number chosen is odd. Default is `:no_sample`. + + `positive_prob`: probability of the `weight` being positive when `sampling_type` is + set to `:bernoulli_sample!`. Default is 0.5 + + `irrational`: Irrational number whose decimals decide the sign of `weight`. + Default is `pi`. + + `start`: Which place after the decimal point the counting starts for the `irrational` + sign counting. Default is 1. # Examples diff --git a/src/esn/inits_components.jl b/src/esn/inits_components.jl index 62eca3e6..91e66c2a 100644 --- a/src/esn/inits_components.jl +++ b/src/esn/inits_components.jl @@ -404,7 +404,7 @@ Adds jumps to a given `reservoir_matrix` with chosen `weight` and determined `ju - `start`: Which place after the decimal point the counting starts for the `irrational` sign counting. Default is 1. -# Examples +# Examples ```jldoctest julia> matrix = zeros(Float32, 5, 5) From d9f488a2686bf7f904f027c2986cdda35e6e5168 Mon Sep 17 00:00:00 2001 From: MartinuzziFrancesco Date: Mon, 17 Mar 2025 14:04:55 +0100 Subject: [PATCH 7/9] chore: upgrade ReservoirComputing in docs toml --- docs/Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/Project.toml b/docs/Project.toml index d372c202..3915bfc4 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -17,5 +17,5 @@ Documenter = "1" OrdinaryDiffEq = "6" Plots = "1" PredefinedDynamicalSystems = "1" -ReservoirComputing = "0.10.5" +ReservoirComputing = "0.11.0" StatsBase = "0.34.4" From 37acc4707b830f685f431c3fa8669f24e8612d69 Mon Sep 17 00:00:00 2001 From: MartinuzziFrancesco Date: Tue, 18 Mar 2025 10:31:01 +0100 Subject: [PATCH 8/9] docs: add more detailed kwargs for minimal inits --- src/esn/esn_inits.jl | 61 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 57 insertions(+), 4 deletions(-) diff --git a/src/esn/esn_inits.jl b/src/esn/esn_inits.jl index 73463b6e..2333edcd 100644 --- a/src/esn/esn_inits.jl +++ b/src/esn/esn_inits.jl @@ -593,6 +593,9 @@ Create and return a delay line reservoir matrix [^rodan2010]. # Keyword arguments - `weight`: Determines the value of all connections in the reservoir. + This can be provided as a single value or an array. In case it is provided as an + array please make sure that the lenght of the array matches the lenght of the sub-diagonal + you want to populate. Default is 0.1. - `shift`: delay line shift. Default is 1. - `return_sparse`: flag for returning a `sparse` matrix. @@ -665,10 +668,17 @@ Creates a matrix with backward connections as described in [^rodan2010]. # Keyword arguments - `weight`: The weight determines the absolute value of - forward connections in the reservoir. Default is 0.1 - + forward connections in the reservoir. + This can be provided as a single value or an array. In case it is provided as an + array please make sure that the lenght of the array matches the lenght of the sub-diagonal + you want to populate. + Default is 0.1 - `fb_weight`: Determines the absolute value of backward connections - in the reservoir. Default is 0.2 + in the reservoir. + This can be provided as a single value or an array. In case it is provided as an + array please make sure that the lenght of the array matches the lenght of the sub-diagonal + you want to populate. + Default is 0.2 - `return_sparse`: flag for returning a `sparse` matrix. Default is `false`. - `delay_kwargs` and `fb_kwargs`: named tuples that control the kwargs for the @@ -739,9 +749,14 @@ Create a cycle jumps reservoir [^Rodan2012]. # Keyword arguments - `cycle_weight`: The weight of cycle connections. + This can be provided as a single value or an array. In case it is provided as an + array please make sure that the lenght of the array matches the lenght of the cycle + you want to populate. Default is 0.1. - - `jump_weight`: The weight of jump connections. + This can be provided as a single value or an array. In case it is provided as an + array please make sure that the lenght of the array matches the lenght of the jumps + you want to populate. Default is 0.1. - `jump_size`: The number of steps between jump connections. Default is 3. @@ -816,6 +831,9 @@ Create a simple cycle reservoir [^rodan2010]. # Keyword arguments - `weight`: Weight of the connections in the reservoir matrix. + This can be provided as a single value or an array. In case it is provided as an + array please make sure that the lenght of the array matches the lenght of the cycle + you want to populate. Default is 0.1. - `return_sparse`: flag for returning a `sparse` matrix. Default is `false`. @@ -1282,8 +1300,14 @@ W_{i,j} = # Keyword arguments - `cycle_weight`: Weight of the cycle connections in the reservoir matrix. + This can be provided as a single value or an array. In case it is provided as an + array please make sure that the lenght of the array matches the lenght of the cycle + you want to populate. Default is 0.1. - `selfloop_weight`: Weight of the self loops in the reservoir matrix. + This can be provided as a single value or an array. In case it is provided as an + array please make sure that the lenght of the array matches the lenght of the diagonal + you want to populate. Default is 0.1. - `return_sparse`: flag for returning a `sparse` matrix. Default is `false`. @@ -1366,6 +1390,9 @@ W_{i,j} = # Keyword arguments - `cycle_weight`: Weight of the cycle connections in the reservoir matrix. + This can be provided as a single value or an array. In case it is provided as an + array please make sure that the lenght of the array matches the lenght of the cycle + you want to populate. Default is 0.1. - `selfloop_weight`: Weight of the self loops in the reservoir matrix. Default is 0.1. @@ -1448,8 +1475,14 @@ W_{i,j} = # Keyword arguments - `weight`: Weight of the cycle connections in the reservoir matrix. + This can be provided as a single value or an array. In case it is provided as an + array please make sure that the lenght of the array matches the lenght of the cycle + you want to populate. Default is 0.1. - `selfloop_weight`: Weight of the self loops in the reservoir matrix. + This can be provided as a single value or an array. In case it is provided as an + array please make sure that the lenght of the array matches the lenght of the diagonal + you want to populate. Default is 0.1. - `return_sparse`: flag for returning a `sparse` matrix. Default is `false`. @@ -1536,8 +1569,14 @@ W_{i,j} = # Keyword arguments - `weight`: Weight of the cycle connections in the reservoir matrix. + This can be provided as a single value or an array. In case it is provided as an + array please make sure that the lenght of the array matches the lenght of the cycle + you want to populate. Default is 0.1. - `selfloop_weight`: Weight of the self loops in the reservoir matrix. + This can be provided as a single value or an array. In case it is provided as an + array please make sure that the lenght of the array matches the lenght of the diagonal + you want to populate. Default is 0.1. - `return_sparse`: flag for returning a `sparse` matrix. Default is `false`. @@ -1619,9 +1658,23 @@ W_{i,j} = # Keyword arguments - `weight`: Weight of the cycle connections in the reservoir matrix. + This can be provided as a single value or an array. In case it is provided as an + array please make sure that the lenght of the array matches the lenght of the sub-diagonal + you want to populate. Default is 0.1. - `return_sparse`: flag for returning a `sparse` matrix. Default is `false`. + - `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. + If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each + `weight` can be positive with a probability set by `positive_prob`. If set to + `:irrational_sample!` the `weight` is negative if the decimal number of the + irrational number chosen is odd. Default is `:no_sample`. + - `positive_prob`: probability of the `weight` being positive when `sampling_type` is + set to `:bernoulli_sample!`. Default is 0.5 + - `irrational`: Irrational number whose decimals decide the sign of `weight`. + Default is `pi`. + - `start`: Which place after the decimal point the counting starts for the `irrational` + sign counting. Default is 1. # Examples From 7da0a8a85211f4b7a728eae0483f5200edd6b3fe Mon Sep 17 00:00:00 2001 From: MartinuzziFrancesco Date: Tue, 18 Mar 2025 12:02:46 +0100 Subject: [PATCH 9/9] refactor: formatting esn inits --- src/esn/esn_inits.jl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/esn/esn_inits.jl b/src/esn/esn_inits.jl index 2333edcd..10935bdd 100644 --- a/src/esn/esn_inits.jl +++ b/src/esn/esn_inits.jl @@ -668,11 +668,12 @@ Creates a matrix with backward connections as described in [^rodan2010]. # Keyword arguments - `weight`: The weight determines the absolute value of - forward connections in the reservoir. + forward connections in the reservoir. This can be provided as a single value or an array. In case it is provided as an array please make sure that the lenght of the array matches the lenght of the sub-diagonal you want to populate. Default is 0.1 + - `fb_weight`: Determines the absolute value of backward connections in the reservoir. This can be provided as a single value or an array. In case it is provided as an @@ -753,6 +754,7 @@ Create a cycle jumps reservoir [^Rodan2012]. array please make sure that the lenght of the array matches the lenght of the cycle you want to populate. Default is 0.1. + - `jump_weight`: The weight of jump connections. This can be provided as a single value or an array. In case it is provided as an array please make sure that the lenght of the array matches the lenght of the jumps