Skip to content

Commit 105ae5a

Browse files
fixing docstrings and tests, return_sparse true as default
1 parent 5d88459 commit 105ae5a

File tree

3 files changed

+46
-30
lines changed

3 files changed

+46
-30
lines changed

Project.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,10 +48,10 @@ Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
4848
DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa"
4949
LIBSVM = "b1bec4e5-fd48-53fe-b0cb-9723c09d164b"
5050
MLJLinearModels = "6ee0df7b-362f-4a72-a706-9e79364fb692"
51-
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
5251
SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f"
5352
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
5453
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
5554

5655
[targets]
57-
test = ["Aqua", "Test", "SafeTestsets", "Random", "DifferentialEquations", "MLJLinearModels", "LIBSVM", "Statistics"]
56+
test = ["Aqua", "Test", "SafeTestsets", "DifferentialEquations",
57+
"MLJLinearModels", "LIBSVM", "Statistics"]

src/esn/esn_inits.jl

Lines changed: 40 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
### input layers
22
"""
3-
scaled_rand([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
3+
scaled_rand([rng], [T], dims...;
44
scaling=0.1)
55
66
Create and return a matrix with random values, uniformly distributed within
@@ -41,8 +41,8 @@ function scaled_rand(rng::AbstractRNG, ::Type{T}, dims::Integer...;
4141
end
4242

4343
"""
44-
weighted_init([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
45-
scaling=0.1)
44+
weighted_init([rng], [T], dims...;
45+
scaling=0.1, return_sparse=true)
4646
4747
Create and return a matrix representing a weighted input layer.
4848
This initializer generates a weighted input matrix with random non-zero
@@ -57,6 +57,8 @@ elements distributed uniformly within the range [-`scaling`, `scaling`] [^Lu2017
5757
- `dims`: Dimensions of the matrix. Should follow `res_size x in_size`.
5858
- `scaling`: The scaling factor for the weight distribution.
5959
Defaults to `0.1`.
60+
- `return_sparse`: flag for returning a `sparse` matrix.
61+
Default is `true`.
6062
6163
# Examples
6264
@@ -77,7 +79,7 @@ julia> res_input = weighted_init(8, 3)
7779
Chaos: An Interdisciplinary Journal of Nonlinear Science 27.4 (2017): 041102.
7880
"""
7981
function weighted_init(rng::AbstractRNG, ::Type{T}, dims::Integer...;
80-
scaling=T(0.1)) where {T <: Number}
82+
scaling=T(0.1), return_sparse::Bool=true) where {T <: Number}
8183
approx_res_size, in_size = dims
8284
res_size = Int(floor(approx_res_size / in_size) * in_size)
8385
layer_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size)
@@ -88,11 +90,11 @@ function weighted_init(rng::AbstractRNG, ::Type{T}, dims::Integer...;
8890
T(0.5)) .* (T(2) * scaling)
8991
end
9092

91-
return layer_matrix
93+
return return_sparse ? sparse(layer_matrix) : layer_matrix
9294
end
9395

9496
"""
95-
informed_init([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
97+
informed_init([rng], [T], dims...;
9698
scaling=0.1, model_in_size, gamma=0.5)
9799
98100
Create an input layer for informed echo state networks [^Pathak2018].
@@ -152,7 +154,7 @@ function informed_init(rng::AbstractRNG, ::Type{T}, dims::Integer...;
152154
end
153155

154156
"""
155-
minimal_init([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
157+
minimal_init([rng], [T], dims...;
156158
sampling_type=:bernoulli, weight=0.1, irrational=pi, start=1, p=0.5)
157159
158160
Create a layer matrix with uniform weights determined by `weight`. The sign difference
@@ -283,8 +285,8 @@ end
283285
### reservoirs
284286

285287
"""
286-
rand_sparse([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
287-
radius=1.0, sparsity=0.1, std=1.0)
288+
rand_sparse([rng], [T], dims...;
289+
radius=1.0, sparsity=0.1, std=1.0, return_sparse=true)
288290
289291
Create and return a random sparse reservoir matrix.
290292
The matrix will be of size specified by `dims`, with specified `sparsity`
@@ -301,6 +303,8 @@ and scaled spectral radius according to `radius`.
301303
Defaults to 1.0.
302304
- `sparsity`: The sparsity level of the reservoir matrix,
303305
controlling the fraction of zero elements. Defaults to 0.1.
306+
- `return_sparse`: flag for returning a `sparse` matrix.
307+
Default is `true`.
304308
305309
# Examples
306310
@@ -316,7 +320,7 @@ julia> res_matrix = rand_sparse(5, 5; sparsity=0.5)
316320
"""
317321
function rand_sparse(rng::AbstractRNG, ::Type{T}, dims::Integer...;
318322
radius=T(1.0), sparsity=T(0.1), std=T(1.0),
319-
return_sparse::Bool=false) where {T <: Number}
323+
return_sparse::Bool=true) where {T <: Number}
320324
lcl_sparsity = T(1) - sparsity #consistency with current implementations
321325
reservoir_matrix = sparse_init(rng, T, dims...; sparsity=lcl_sparsity, std=std)
322326
rho_w = maximum(abs.(eigvals(reservoir_matrix)))
@@ -329,8 +333,8 @@ function rand_sparse(rng::AbstractRNG, ::Type{T}, dims::Integer...;
329333
end
330334

331335
"""
332-
delay_line([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
333-
weight=0.1)
336+
delay_line([rng], [T], dims...;
337+
weight=0.1, return_sparse=true)
334338
335339
Create and return a delay line reservoir matrix [^Rodan2010].
336340
@@ -343,6 +347,8 @@ Create and return a delay line reservoir matrix [^Rodan2010].
343347
- `dims`: Dimensions of the reservoir matrix.
344348
- `weight`: Determines the value of all connections in the reservoir.
345349
Default is 0.1.
350+
- `return_sparse`: flag for returning a `sparse` matrix.
351+
Default is `true`.
346352
347353
# Examples
348354
@@ -368,7 +374,7 @@ julia> res_matrix = delay_line(5, 5; weight=1)
368374
IEEE transactions on neural networks 22.1 (2010): 131-144.
369375
"""
370376
function delay_line(rng::AbstractRNG, ::Type{T}, dims::Integer...;
371-
weight=T(0.1), return_sparse::Bool=false) where {T <: Number}
377+
weight=T(0.1), return_sparse::Bool=true) where {T <: Number}
372378
reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...)
373379
@assert length(dims) == 2&&dims[1] == dims[2] "The dimensions
374380
must define a square matrix (e.g., (100, 100))"
@@ -381,8 +387,8 @@ function delay_line(rng::AbstractRNG, ::Type{T}, dims::Integer...;
381387
end
382388

383389
"""
384-
delay_line_backward([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
385-
weight = 0.1, fb_weight = 0.2)
390+
delay_line_backward([rng], [T], dims...;
391+
weight = 0.1, fb_weight = 0.2, return_sparse=true)
386392
387393
Create a delay line backward reservoir with the specified by `dims` and weights.
388394
Creates a matrix with backward connections as described in [^Rodan2010].
@@ -398,6 +404,8 @@ Creates a matrix with backward connections as described in [^Rodan2010].
398404
forward connections in the reservoir. Default is 0.1
399405
- `fb_weight`: Determines the absolute value of backward connections
400406
in the reservoir. Default is 0.2
407+
- `return_sparse`: flag for returning a `sparse` matrix.
408+
Default is `true`.
401409
402410
# Examples
403411
@@ -423,7 +431,7 @@ julia> res_matrix = delay_line_backward(Float16, 5, 5)
423431
IEEE transactions on neural networks 22.1 (2010): 131-144.
424432
"""
425433
function delay_line_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...;
426-
weight=T(0.1), fb_weight=T(0.2), return_sparse::Bool=false) where {T <: Number}
434+
weight=T(0.1), fb_weight=T(0.2), return_sparse::Bool=true) where {T <: Number}
427435
res_size = first(dims)
428436
reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...)
429437

@@ -436,8 +444,8 @@ function delay_line_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...;
436444
end
437445

438446
"""
439-
cycle_jumps([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
440-
cycle_weight = 0.1, jump_weight = 0.1, jump_size = 3)
447+
cycle_jumps([rng], [T], dims...;
448+
cycle_weight = 0.1, jump_weight = 0.1, jump_size = 3, return_sparse=true)
441449
442450
Create a cycle jumps reservoir with the specified dimensions,
443451
cycle weight, jump weight, and jump size.
@@ -455,6 +463,8 @@ cycle weight, jump weight, and jump size.
455463
Default is 0.1.
456464
- `jump_size`: The number of steps between jump connections.
457465
Default is 3.
466+
- `return_sparse`: flag for returning a `sparse` matrix.
467+
Default is `true`.
458468
459469
# Examples
460470
@@ -481,7 +491,7 @@ julia> res_matrix = cycle_jumps(5, 5; jump_size=2)
481491
"""
482492
function cycle_jumps(rng::AbstractRNG, ::Type{T}, dims::Integer...;
483493
cycle_weight::Number=T(0.1), jump_weight::Number=T(0.1),
484-
jump_size::Int=3, return_sparse::Bool=false) where {T <: Number}
494+
jump_size::Int=3, return_sparse::Bool=true) where {T <: Number}
485495
res_size = first(dims)
486496
reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...)
487497

@@ -504,8 +514,8 @@ function cycle_jumps(rng::AbstractRNG, ::Type{T}, dims::Integer...;
504514
end
505515

506516
"""
507-
simple_cycle([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
508-
weight = 0.1)
517+
simple_cycle([rng], [T], dims...;
518+
weight = 0.1, return_sparse=true)
509519
510520
Create a simple cycle reservoir with the specified dimensions and weight.
511521
@@ -517,6 +527,8 @@ Create a simple cycle reservoir with the specified dimensions and weight.
517527
- `dims`: Dimensions of the reservoir matrix.
518528
- `weight`: Weight of the connections in the reservoir matrix.
519529
Default is 0.1.
530+
- `return_sparse`: flag for returning a `sparse` matrix.
531+
Default is `true`.
520532
521533
# Examples
522534
@@ -542,7 +554,7 @@ julia> res_matrix = simple_cycle(5, 5; weight=11)
542554
IEEE transactions on neural networks 22.1 (2010): 131-144.
543555
"""
544556
function simple_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...;
545-
weight=T(0.1), return_sparse::Bool=false) where {T <: Number}
557+
weight=T(0.1), return_sparse::Bool=true) where {T <: Number}
546558
reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...)
547559

548560
for i in 1:(dims[1] - 1)
@@ -554,8 +566,9 @@ function simple_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...;
554566
end
555567

556568
"""
557-
pseudo_svd([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
558-
max_value=1.0, sparsity=0.1, sorted = true, reverse_sort = false)
569+
pseudo_svd([rng], [T], dims...;
570+
max_value=1.0, sparsity=0.1, sorted = true, reverse_sort = false,
571+
return_sparse=true)
559572
560573
Returns an initializer to build a sparse reservoir matrix with the given
561574
`sparsity` by using a pseudo-SVD approach as described in [^yang].
@@ -575,6 +588,8 @@ Returns an initializer to build a sparse reservoir matrix with the given
575588
creating the diagonal matrix. Default is `true`.
576589
- `reverse_sort`: A boolean indicating whether to reverse the sorted
577590
singular values. Default is `false`.
591+
- `return_sparse`: flag for returning a `sparse` matrix.
592+
Default is `true`.
578593
579594
# Examples
580595
@@ -592,7 +607,7 @@ julia> res_matrix = pseudo_svd(5, 5)
592607
"""
593608
function pseudo_svd(rng::AbstractRNG, ::Type{T}, dims::Integer...;
594609
max_value::Number=T(1.0), sparsity::Number=0.1, sorted::Bool=true,
595-
reverse_sort::Bool=false, return_sparse::Bool=false) where {T <: Number}
610+
reverse_sort::Bool=false, return_sparse::Bool=true) where {T <: Number}
596611
reservoir_matrix = create_diag(rng, T, dims[1],
597612
max_value;
598613
sorted=sorted,

test/esn/test_inits.jl

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,4 @@
1-
using ReservoirComputing
2-
using LinearAlgebra
3-
using Random
1+
using ReservoirComputing, LinearAlgebra, Random, SparseArrays
42

53
const res_size = 30
64
const in_size = 3
@@ -11,6 +9,9 @@ const jump_size = 3
119
const rng = Random.default_rng()
1210

1311
function check_radius(matrix, target_radius; tolerance=1e-5)
12+
if matrix isa SparseArrays.SparseMatrixCSC
13+
matrix = Matrix(matrix)
14+
end
1415
eigenvalues = eigvals(matrix)
1516
spectral_radius = maximum(abs.(eigenvalues))
1617
return isapprox(spectral_radius, target_radius; atol=tolerance)

0 commit comments

Comments
 (0)