diff --git a/.github/workflows/Downgrade.yml b/.github/workflows/Downgrade.yml index bbcfc4ba..7da86654 100644 --- a/.github/workflows/Downgrade.yml +++ b/.github/workflows/Downgrade.yml @@ -33,7 +33,7 @@ jobs: version: ${{ matrix.version }} - uses: julia-actions/julia-downgrade-compat@v1 with: - skip: Pkg, TOML, Test, Random, LinearAlgebra, Statistics + skip: Pkg, TOML, Test, Random, LinearAlgebra, Statistics, SparseArrays - uses: julia-actions/cache@v2 with: token: ${{ secrets.GITHUB_TOKEN }} diff --git a/Project.toml b/Project.toml index 61ae1901..773a172f 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ReservoirComputing" uuid = "7c2d2b1e-3dd4-11ea-355a-8f6a8116e294" authors = ["Francesco Martinuzzi"] -version = "0.10.7" +version = "0.10.8" [deps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" @@ -11,6 +11,7 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" +SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" WeightInitializers = "d49dbf32-c5c2-4618-8acc-27bb2598ef2d" @@ -35,6 +36,7 @@ NNlib = "0.9.26" Random = "1.10" Reexport = "1.2.2" SafeTestsets = "0.1" +SparseArrays = "1.10" Statistics = "1.10" StatsBase = "0.34.4" Test = "1" @@ -46,10 +48,10 @@ Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" LIBSVM = "b1bec4e5-fd48-53fe-b0cb-9723c09d164b" MLJLinearModels = "6ee0df7b-362f-4a72-a706-9e79364fb692" -Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f" Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] -test = ["Aqua", "Test", "SafeTestsets", "Random", "DifferentialEquations", "MLJLinearModels", "LIBSVM", "Statistics"] +test = ["Aqua", "Test", "SafeTestsets", "DifferentialEquations", + "MLJLinearModels", "LIBSVM", "Statistics"] diff --git a/src/ReservoirComputing.jl b/src/ReservoirComputing.jl index 447ce8fe..46fd362b 100644 --- a/src/ReservoirComputing.jl +++ b/src/ReservoirComputing.jl @@ -7,6 +7,7 @@ using LinearAlgebra: eigvals, mul!, I using NNlib: fast_act, sigmoid using Random: Random, AbstractRNG using Reexport: Reexport, @reexport +using SparseArrays: sparse using StatsBase: sample using WeightInitializers: DeviceAgnostic, PartialFunction, Utils @reexport using WeightInitializers diff --git a/src/esn/esn_inits.jl b/src/esn/esn_inits.jl index 929d23f3..79805381 100644 --- a/src/esn/esn_inits.jl +++ b/src/esn/esn_inits.jl @@ -1,6 +1,6 @@ ### input layers """ - scaled_rand([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...; + scaled_rand([rng], [T], dims...; scaling=0.1) Create and return a matrix with random values, uniformly distributed within @@ -41,8 +41,8 @@ function scaled_rand(rng::AbstractRNG, ::Type{T}, dims::Integer...; end """ - weighted_init([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...; - scaling=0.1) + weighted_init([rng], [T], dims...; + scaling=0.1, return_sparse=true) Create and return a matrix representing a weighted input layer. This initializer generates a weighted input matrix with random non-zero @@ -57,6 +57,8 @@ elements distributed uniformly within the range [-`scaling`, `scaling`] [^Lu2017 - `dims`: Dimensions of the matrix. Should follow `res_size x in_size`. - `scaling`: The scaling factor for the weight distribution. Defaults to `0.1`. + - `return_sparse`: flag for returning a `sparse` matrix. + Default is `true`. # Examples @@ -77,7 +79,7 @@ julia> res_input = weighted_init(8, 3) Chaos: An Interdisciplinary Journal of Nonlinear Science 27.4 (2017): 041102. """ function weighted_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; - scaling=T(0.1)) where {T <: Number} + scaling=T(0.1), return_sparse::Bool=true) where {T <: Number} approx_res_size, in_size = dims res_size = Int(floor(approx_res_size / in_size) * in_size) layer_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size) @@ -88,11 +90,11 @@ function weighted_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; T(0.5)) .* (T(2) * scaling) end - return layer_matrix + return return_sparse ? sparse(layer_matrix) : layer_matrix end """ - informed_init([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...; + informed_init([rng], [T], dims...; scaling=0.1, model_in_size, gamma=0.5) Create an input layer for informed echo state networks [^Pathak2018]. @@ -152,7 +154,7 @@ function informed_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; end """ - minimal_init([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...; + minimal_init([rng], [T], dims...; sampling_type=:bernoulli, weight=0.1, irrational=pi, start=1, p=0.5) Create a layer matrix with uniform weights determined by `weight`. The sign difference @@ -283,8 +285,8 @@ end ### reservoirs """ - rand_sparse([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...; - radius=1.0, sparsity=0.1, std=1.0) + rand_sparse([rng], [T], dims...; + radius=1.0, sparsity=0.1, std=1.0, return_sparse=true) Create and return a random sparse reservoir matrix. The matrix will be of size specified by `dims`, with specified `sparsity` @@ -301,6 +303,8 @@ and scaled spectral radius according to `radius`. Defaults to 1.0. - `sparsity`: The sparsity level of the reservoir matrix, controlling the fraction of zero elements. Defaults to 0.1. + - `return_sparse`: flag for returning a `sparse` matrix. + Default is `true`. # Examples @@ -315,7 +319,8 @@ julia> res_matrix = rand_sparse(5, 5; sparsity=0.5) ``` """ function rand_sparse(rng::AbstractRNG, ::Type{T}, dims::Integer...; - radius=T(1.0), sparsity=T(0.1), std=T(1.0)) where {T <: Number} + radius=T(1.0), sparsity=T(0.1), std=T(1.0), + return_sparse::Bool=true) where {T <: Number} lcl_sparsity = T(1) - sparsity #consistency with current implementations reservoir_matrix = sparse_init(rng, T, dims...; sparsity=lcl_sparsity, std=std) rho_w = maximum(abs.(eigvals(reservoir_matrix))) @@ -323,12 +328,13 @@ function rand_sparse(rng::AbstractRNG, ::Type{T}, dims::Integer...; if Inf in unique(reservoir_matrix) || -Inf in unique(reservoir_matrix) error("Sparsity too low for size of the matrix. Increase res_size or increase sparsity") end - return reservoir_matrix + + return return_sparse ? sparse(reservoir_matrix) : reservoir_matrix end """ - delay_line([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...; - weight=0.1) + delay_line([rng], [T], dims...; + weight=0.1, return_sparse=true) Create and return a delay line reservoir matrix [^Rodan2010]. @@ -341,6 +347,8 @@ Create and return a delay line reservoir matrix [^Rodan2010]. - `dims`: Dimensions of the reservoir matrix. - `weight`: Determines the value of all connections in the reservoir. Default is 0.1. + - `return_sparse`: flag for returning a `sparse` matrix. + Default is `true`. # Examples @@ -366,7 +374,7 @@ julia> res_matrix = delay_line(5, 5; weight=1) IEEE transactions on neural networks 22.1 (2010): 131-144. """ function delay_line(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight=T(0.1)) where {T <: Number} + weight=T(0.1), return_sparse::Bool=true) where {T <: Number} reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) @assert length(dims) == 2&&dims[1] == dims[2] "The dimensions must define a square matrix (e.g., (100, 100))" @@ -375,12 +383,12 @@ function delay_line(rng::AbstractRNG, ::Type{T}, dims::Integer...; reservoir_matrix[i + 1, i] = weight end - return reservoir_matrix + return return_sparse ? sparse(reservoir_matrix) : reservoir_matrix end """ - delay_line_backward([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...; - weight = 0.1, fb_weight = 0.2) + delay_line_backward([rng], [T], dims...; + weight = 0.1, fb_weight = 0.2, return_sparse=true) Create a delay line backward reservoir with the specified by `dims` and weights. Creates a matrix with backward connections as described in [^Rodan2010]. @@ -396,6 +404,8 @@ Creates a matrix with backward connections as described in [^Rodan2010]. forward connections in the reservoir. Default is 0.1 - `fb_weight`: Determines the absolute value of backward connections in the reservoir. Default is 0.2 + - `return_sparse`: flag for returning a `sparse` matrix. + Default is `true`. # Examples @@ -421,7 +431,7 @@ julia> res_matrix = delay_line_backward(Float16, 5, 5) IEEE transactions on neural networks 22.1 (2010): 131-144. """ function delay_line_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight=T(0.1), fb_weight=T(0.2)) where {T <: Number} + weight=T(0.1), fb_weight=T(0.2), return_sparse::Bool=true) where {T <: Number} res_size = first(dims) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) @@ -430,12 +440,12 @@ function delay_line_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...; reservoir_matrix[i, i + 1] = fb_weight end - return reservoir_matrix + return return_sparse ? sparse(reservoir_matrix) : reservoir_matrix end """ - cycle_jumps([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...; - cycle_weight = 0.1, jump_weight = 0.1, jump_size = 3) + cycle_jumps([rng], [T], dims...; + cycle_weight = 0.1, jump_weight = 0.1, jump_size = 3, return_sparse=true) Create a cycle jumps reservoir with the specified dimensions, cycle weight, jump weight, and jump size. @@ -453,6 +463,8 @@ cycle weight, jump weight, and jump size. Default is 0.1. - `jump_size`: The number of steps between jump connections. Default is 3. + - `return_sparse`: flag for returning a `sparse` matrix. + Default is `true`. # Examples @@ -479,7 +491,7 @@ julia> res_matrix = cycle_jumps(5, 5; jump_size=2) """ function cycle_jumps(rng::AbstractRNG, ::Type{T}, dims::Integer...; cycle_weight::Number=T(0.1), jump_weight::Number=T(0.1), - jump_size::Int=3) where {T <: Number} + jump_size::Int=3, return_sparse::Bool=true) where {T <: Number} res_size = first(dims) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) @@ -498,12 +510,12 @@ function cycle_jumps(rng::AbstractRNG, ::Type{T}, dims::Integer...; reservoir_matrix[tmp, i] = jump_weight end - return reservoir_matrix + return return_sparse ? sparse(reservoir_matrix) : reservoir_matrix end """ - simple_cycle([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...; - weight = 0.1) + simple_cycle([rng], [T], dims...; + weight = 0.1, return_sparse=true) Create a simple cycle reservoir with the specified dimensions and weight. @@ -515,6 +527,8 @@ Create a simple cycle reservoir with the specified dimensions and weight. - `dims`: Dimensions of the reservoir matrix. - `weight`: Weight of the connections in the reservoir matrix. Default is 0.1. + - `return_sparse`: flag for returning a `sparse` matrix. + Default is `true`. # Examples @@ -540,7 +554,7 @@ julia> res_matrix = simple_cycle(5, 5; weight=11) IEEE transactions on neural networks 22.1 (2010): 131-144. """ function simple_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight=T(0.1)) where {T <: Number} + weight=T(0.1), return_sparse::Bool=true) where {T <: Number} reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) for i in 1:(dims[1] - 1) @@ -548,12 +562,13 @@ function simple_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; end reservoir_matrix[1, dims[1]] = weight - return reservoir_matrix + return return_sparse ? sparse(reservoir_matrix) : reservoir_matrix end """ - pseudo_svd([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...; - max_value=1.0, sparsity=0.1, sorted = true, reverse_sort = false) + pseudo_svd([rng], [T], dims...; + max_value=1.0, sparsity=0.1, sorted = true, reverse_sort = false, + return_sparse=true) Returns an initializer to build a sparse reservoir matrix with the given `sparsity` by using a pseudo-SVD approach as described in [^yang]. @@ -573,6 +588,8 @@ Returns an initializer to build a sparse reservoir matrix with the given creating the diagonal matrix. Default is `true`. - `reverse_sort`: A boolean indicating whether to reverse the sorted singular values. Default is `false`. + - `return_sparse`: flag for returning a `sparse` matrix. + Default is `true`. # Examples @@ -590,7 +607,7 @@ julia> res_matrix = pseudo_svd(5, 5) """ function pseudo_svd(rng::AbstractRNG, ::Type{T}, dims::Integer...; max_value::Number=T(1.0), sparsity::Number=0.1, sorted::Bool=true, - reverse_sort::Bool=false) where {T <: Number} + reverse_sort::Bool=false, return_sparse::Bool=true) where {T <: Number} reservoir_matrix = create_diag(rng, T, dims[1], max_value; sorted=sorted, @@ -605,7 +622,7 @@ function pseudo_svd(rng::AbstractRNG, ::Type{T}, dims::Integer...; tmp_sparsity = get_sparsity(reservoir_matrix, dims[1]) end - return reservoir_matrix + return return_sparse ? sparse(reservoir_matrix) : reservoir_matrix end #hacky workaround for the moment diff --git a/test/esn/test_inits.jl b/test/esn/test_inits.jl index 779fe5e3..3a337f42 100644 --- a/test/esn/test_inits.jl +++ b/test/esn/test_inits.jl @@ -1,6 +1,4 @@ -using ReservoirComputing -using LinearAlgebra -using Random +using ReservoirComputing, LinearAlgebra, Random, SparseArrays const res_size = 30 const in_size = 3 @@ -11,6 +9,9 @@ const jump_size = 3 const rng = Random.default_rng() function check_radius(matrix, target_radius; tolerance=1e-5) + if matrix isa SparseArrays.SparseMatrixCSC + matrix = Matrix(matrix) + end eigenvalues = eigvals(matrix) spectral_radius = maximum(abs.(eigenvalues)) return isapprox(spectral_radius, target_radius; atol=tolerance)