Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/Downgrade.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ jobs:
version: ${{ matrix.version }}
- uses: julia-actions/julia-downgrade-compat@v1
with:
skip: Pkg, TOML, Test, Random, LinearAlgebra, Statistics
skip: Pkg, TOML, Test, Random, LinearAlgebra, Statistics, SparseArrays
- uses: julia-actions/cache@v2
with:
token: ${{ secrets.GITHUB_TOKEN }}
Expand Down
8 changes: 5 additions & 3 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "ReservoirComputing"
uuid = "7c2d2b1e-3dd4-11ea-355a-8f6a8116e294"
authors = ["Francesco Martinuzzi"]
version = "0.10.7"
version = "0.10.8"

[deps]
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
Expand All @@ -11,6 +11,7 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Reexport = "189a3867-3050-52da-a836-e630ba90ab69"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
WeightInitializers = "d49dbf32-c5c2-4618-8acc-27bb2598ef2d"

Expand All @@ -35,6 +36,7 @@ NNlib = "0.9.26"
Random = "1.10"
Reexport = "1.2.2"
SafeTestsets = "0.1"
SparseArrays = "1.10"
Statistics = "1.10"
StatsBase = "0.34.4"
Test = "1"
Expand All @@ -46,10 +48,10 @@ Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa"
LIBSVM = "b1bec4e5-fd48-53fe-b0cb-9723c09d164b"
MLJLinearModels = "6ee0df7b-362f-4a72-a706-9e79364fb692"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
test = ["Aqua", "Test", "SafeTestsets", "Random", "DifferentialEquations", "MLJLinearModels", "LIBSVM", "Statistics"]
test = ["Aqua", "Test", "SafeTestsets", "DifferentialEquations",
"MLJLinearModels", "LIBSVM", "Statistics"]
1 change: 1 addition & 0 deletions src/ReservoirComputing.jl
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ using LinearAlgebra: eigvals, mul!, I
using NNlib: fast_act, sigmoid
using Random: Random, AbstractRNG
using Reexport: Reexport, @reexport
using SparseArrays: sparse
using StatsBase: sample
using WeightInitializers: DeviceAgnostic, PartialFunction, Utils
@reexport using WeightInitializers
Expand Down
79 changes: 48 additions & 31 deletions src/esn/esn_inits.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
### input layers
"""
scaled_rand([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
scaled_rand([rng], [T], dims...;
scaling=0.1)

Create and return a matrix with random values, uniformly distributed within
Expand Down Expand Up @@ -41,8 +41,8 @@ function scaled_rand(rng::AbstractRNG, ::Type{T}, dims::Integer...;
end

"""
weighted_init([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
scaling=0.1)
weighted_init([rng], [T], dims...;
scaling=0.1, return_sparse=true)

Create and return a matrix representing a weighted input layer.
This initializer generates a weighted input matrix with random non-zero
Expand All @@ -57,6 +57,8 @@ elements distributed uniformly within the range [-`scaling`, `scaling`] [^Lu2017
- `dims`: Dimensions of the matrix. Should follow `res_size x in_size`.
- `scaling`: The scaling factor for the weight distribution.
Defaults to `0.1`.
- `return_sparse`: flag for returning a `sparse` matrix.
Default is `true`.

# Examples

Expand All @@ -77,7 +79,7 @@ julia> res_input = weighted_init(8, 3)
Chaos: An Interdisciplinary Journal of Nonlinear Science 27.4 (2017): 041102.
"""
function weighted_init(rng::AbstractRNG, ::Type{T}, dims::Integer...;
scaling=T(0.1)) where {T <: Number}
scaling=T(0.1), return_sparse::Bool=true) where {T <: Number}
approx_res_size, in_size = dims
res_size = Int(floor(approx_res_size / in_size) * in_size)
layer_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size)
Expand All @@ -88,11 +90,11 @@ function weighted_init(rng::AbstractRNG, ::Type{T}, dims::Integer...;
T(0.5)) .* (T(2) * scaling)
end

return layer_matrix
return return_sparse ? sparse(layer_matrix) : layer_matrix
end

"""
informed_init([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
informed_init([rng], [T], dims...;
scaling=0.1, model_in_size, gamma=0.5)

Create an input layer for informed echo state networks [^Pathak2018].
Expand Down Expand Up @@ -152,7 +154,7 @@ function informed_init(rng::AbstractRNG, ::Type{T}, dims::Integer...;
end

"""
minimal_init([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
minimal_init([rng], [T], dims...;
sampling_type=:bernoulli, weight=0.1, irrational=pi, start=1, p=0.5)

Create a layer matrix with uniform weights determined by `weight`. The sign difference
Expand Down Expand Up @@ -283,8 +285,8 @@ end
### reservoirs

"""
rand_sparse([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
radius=1.0, sparsity=0.1, std=1.0)
rand_sparse([rng], [T], dims...;
radius=1.0, sparsity=0.1, std=1.0, return_sparse=true)

Create and return a random sparse reservoir matrix.
The matrix will be of size specified by `dims`, with specified `sparsity`
Expand All @@ -301,6 +303,8 @@ and scaled spectral radius according to `radius`.
Defaults to 1.0.
- `sparsity`: The sparsity level of the reservoir matrix,
controlling the fraction of zero elements. Defaults to 0.1.
- `return_sparse`: flag for returning a `sparse` matrix.
Default is `true`.

# Examples

Expand All @@ -315,20 +319,22 @@ julia> res_matrix = rand_sparse(5, 5; sparsity=0.5)
```
"""
function rand_sparse(rng::AbstractRNG, ::Type{T}, dims::Integer...;
radius=T(1.0), sparsity=T(0.1), std=T(1.0)) where {T <: Number}
radius=T(1.0), sparsity=T(0.1), std=T(1.0),
return_sparse::Bool=true) where {T <: Number}
lcl_sparsity = T(1) - sparsity #consistency with current implementations
reservoir_matrix = sparse_init(rng, T, dims...; sparsity=lcl_sparsity, std=std)
rho_w = maximum(abs.(eigvals(reservoir_matrix)))
reservoir_matrix .*= radius / rho_w
if Inf in unique(reservoir_matrix) || -Inf in unique(reservoir_matrix)
error("Sparsity too low for size of the matrix. Increase res_size or increase sparsity")
end
return reservoir_matrix

return return_sparse ? sparse(reservoir_matrix) : reservoir_matrix
end

"""
delay_line([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
weight=0.1)
delay_line([rng], [T], dims...;
weight=0.1, return_sparse=true)

Create and return a delay line reservoir matrix [^Rodan2010].

Expand All @@ -341,6 +347,8 @@ Create and return a delay line reservoir matrix [^Rodan2010].
- `dims`: Dimensions of the reservoir matrix.
- `weight`: Determines the value of all connections in the reservoir.
Default is 0.1.
- `return_sparse`: flag for returning a `sparse` matrix.
Default is `true`.

# Examples

Expand All @@ -366,7 +374,7 @@ julia> res_matrix = delay_line(5, 5; weight=1)
IEEE transactions on neural networks 22.1 (2010): 131-144.
"""
function delay_line(rng::AbstractRNG, ::Type{T}, dims::Integer...;
weight=T(0.1)) where {T <: Number}
weight=T(0.1), return_sparse::Bool=true) where {T <: Number}
reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...)
@assert length(dims) == 2&&dims[1] == dims[2] "The dimensions
must define a square matrix (e.g., (100, 100))"
Expand All @@ -375,12 +383,12 @@ function delay_line(rng::AbstractRNG, ::Type{T}, dims::Integer...;
reservoir_matrix[i + 1, i] = weight
end

return reservoir_matrix
return return_sparse ? sparse(reservoir_matrix) : reservoir_matrix
end

"""
delay_line_backward([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
weight = 0.1, fb_weight = 0.2)
delay_line_backward([rng], [T], dims...;
weight = 0.1, fb_weight = 0.2, return_sparse=true)

Create a delay line backward reservoir with the specified by `dims` and weights.
Creates a matrix with backward connections as described in [^Rodan2010].
Expand All @@ -396,6 +404,8 @@ Creates a matrix with backward connections as described in [^Rodan2010].
forward connections in the reservoir. Default is 0.1
- `fb_weight`: Determines the absolute value of backward connections
in the reservoir. Default is 0.2
- `return_sparse`: flag for returning a `sparse` matrix.
Default is `true`.

# Examples

Expand All @@ -421,7 +431,7 @@ julia> res_matrix = delay_line_backward(Float16, 5, 5)
IEEE transactions on neural networks 22.1 (2010): 131-144.
"""
function delay_line_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...;
weight=T(0.1), fb_weight=T(0.2)) where {T <: Number}
weight=T(0.1), fb_weight=T(0.2), return_sparse::Bool=true) where {T <: Number}
res_size = first(dims)
reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...)

Expand All @@ -430,12 +440,12 @@ function delay_line_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...;
reservoir_matrix[i, i + 1] = fb_weight
end

return reservoir_matrix
return return_sparse ? sparse(reservoir_matrix) : reservoir_matrix
end

"""
cycle_jumps([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
cycle_weight = 0.1, jump_weight = 0.1, jump_size = 3)
cycle_jumps([rng], [T], dims...;
cycle_weight = 0.1, jump_weight = 0.1, jump_size = 3, return_sparse=true)

Create a cycle jumps reservoir with the specified dimensions,
cycle weight, jump weight, and jump size.
Expand All @@ -453,6 +463,8 @@ cycle weight, jump weight, and jump size.
Default is 0.1.
- `jump_size`: The number of steps between jump connections.
Default is 3.
- `return_sparse`: flag for returning a `sparse` matrix.
Default is `true`.

# Examples

Expand All @@ -479,7 +491,7 @@ julia> res_matrix = cycle_jumps(5, 5; jump_size=2)
"""
function cycle_jumps(rng::AbstractRNG, ::Type{T}, dims::Integer...;
cycle_weight::Number=T(0.1), jump_weight::Number=T(0.1),
jump_size::Int=3) where {T <: Number}
jump_size::Int=3, return_sparse::Bool=true) where {T <: Number}
res_size = first(dims)
reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...)

Expand All @@ -498,12 +510,12 @@ function cycle_jumps(rng::AbstractRNG, ::Type{T}, dims::Integer...;
reservoir_matrix[tmp, i] = jump_weight
end

return reservoir_matrix
return return_sparse ? sparse(reservoir_matrix) : reservoir_matrix
end

"""
simple_cycle([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
weight = 0.1)
simple_cycle([rng], [T], dims...;
weight = 0.1, return_sparse=true)

Create a simple cycle reservoir with the specified dimensions and weight.

Expand All @@ -515,6 +527,8 @@ Create a simple cycle reservoir with the specified dimensions and weight.
- `dims`: Dimensions of the reservoir matrix.
- `weight`: Weight of the connections in the reservoir matrix.
Default is 0.1.
- `return_sparse`: flag for returning a `sparse` matrix.
Default is `true`.

# Examples

Expand All @@ -540,20 +554,21 @@ julia> res_matrix = simple_cycle(5, 5; weight=11)
IEEE transactions on neural networks 22.1 (2010): 131-144.
"""
function simple_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...;
weight=T(0.1)) where {T <: Number}
weight=T(0.1), return_sparse::Bool=true) where {T <: Number}
reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...)

for i in 1:(dims[1] - 1)
reservoir_matrix[i + 1, i] = weight
end

reservoir_matrix[1, dims[1]] = weight
return reservoir_matrix
return return_sparse ? sparse(reservoir_matrix) : reservoir_matrix
end

"""
pseudo_svd([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
max_value=1.0, sparsity=0.1, sorted = true, reverse_sort = false)
pseudo_svd([rng], [T], dims...;
max_value=1.0, sparsity=0.1, sorted = true, reverse_sort = false,
return_sparse=true)

Returns an initializer to build a sparse reservoir matrix with the given
`sparsity` by using a pseudo-SVD approach as described in [^yang].
Expand All @@ -573,6 +588,8 @@ Returns an initializer to build a sparse reservoir matrix with the given
creating the diagonal matrix. Default is `true`.
- `reverse_sort`: A boolean indicating whether to reverse the sorted
singular values. Default is `false`.
- `return_sparse`: flag for returning a `sparse` matrix.
Default is `true`.

# Examples

Expand All @@ -590,7 +607,7 @@ julia> res_matrix = pseudo_svd(5, 5)
"""
function pseudo_svd(rng::AbstractRNG, ::Type{T}, dims::Integer...;
max_value::Number=T(1.0), sparsity::Number=0.1, sorted::Bool=true,
reverse_sort::Bool=false) where {T <: Number}
reverse_sort::Bool=false, return_sparse::Bool=true) where {T <: Number}
reservoir_matrix = create_diag(rng, T, dims[1],
max_value;
sorted=sorted,
Expand All @@ -605,7 +622,7 @@ function pseudo_svd(rng::AbstractRNG, ::Type{T}, dims::Integer...;
tmp_sparsity = get_sparsity(reservoir_matrix, dims[1])
end

return reservoir_matrix
return return_sparse ? sparse(reservoir_matrix) : reservoir_matrix
end

#hacky workaround for the moment
Expand Down
7 changes: 4 additions & 3 deletions test/esn/test_inits.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
using ReservoirComputing
using LinearAlgebra
using Random
using ReservoirComputing, LinearAlgebra, Random, SparseArrays

const res_size = 30
const in_size = 3
Expand All @@ -11,6 +9,9 @@ const jump_size = 3
const rng = Random.default_rng()

function check_radius(matrix, target_radius; tolerance=1e-5)
if matrix isa SparseArrays.SparseMatrixCSC
matrix = Matrix(matrix)
end
eigenvalues = eigvals(matrix)
spectral_radius = maximum(abs.(eigenvalues))
return isapprox(spectral_radius, target_radius; atol=tolerance)
Expand Down
Loading