diff --git a/.JuliaFormatter.toml b/.JuliaFormatter.toml
old mode 100644
new mode 100755
diff --git a/.buildkite/documentation.yml b/.buildkite/documentation.yml
old mode 100644
new mode 100755
diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml
old mode 100644
new mode 100755
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
old mode 100644
new mode 100755
diff --git a/.github/workflows/CompatHelper.yml b/.github/workflows/CompatHelper.yml
old mode 100644
new mode 100755
diff --git a/.github/workflows/Downgrade.yml b/.github/workflows/Downgrade.yml
old mode 100644
new mode 100755
diff --git a/.github/workflows/FormatCheck.yml b/.github/workflows/FormatCheck.yml
old mode 100644
new mode 100755
diff --git a/.github/workflows/Invalidations.yml b/.github/workflows/Invalidations.yml
old mode 100644
new mode 100755
diff --git a/.github/workflows/TagBot.yml b/.github/workflows/TagBot.yml
old mode 100644
new mode 100755
diff --git a/.github/workflows/Tests.yml b/.github/workflows/Tests.yml
old mode 100644
new mode 100755
diff --git a/.gitignore b/.gitignore
old mode 100644
new mode 100755
diff --git a/.typos.toml b/.typos.toml
old mode 100644
new mode 100755
diff --git a/CITATION.bib b/CITATION.bib
old mode 100644
new mode 100755
diff --git a/LICENSE b/LICENSE
old mode 100644
new mode 100755
diff --git a/Project.toml b/Project.toml
old mode 100644
new mode 100755
index a7c35268..86eb9627
--- a/Project.toml
+++ b/Project.toml
@@ -1,19 +1,19 @@
name = "ReservoirComputing"
uuid = "7c2d2b1e-3dd4-11ea-355a-8f6a8116e294"
authors = ["Francesco Martinuzzi"]
-version = "0.10.4"
+version = "0.10.5"
[deps]
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
CellularAutomata = "878138dc-5b27-11ea-1a71-cb95d38d6b29"
Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7"
-Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
-Optim = "429524aa-4258-5aef-a3af-852621145aeb"
PartialFunctions = "570af359-4316-4cb7-8c74-252c00c2016b"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
+Reexport = "189a3867-3050-52da-a836-e630ba90ab69"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
+StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
WeightInitializers = "d49dbf32-c5c2-4618-8acc-27bb2598ef2d"
[weakdeps]
@@ -30,23 +30,25 @@ Aqua = "0.8"
CellularAutomata = "0.0.2"
DifferentialEquations = "7"
Distances = "0.10"
-Distributions = "0.25.36"
LIBSVM = "0.8"
LinearAlgebra = "1.10"
MLJLinearModels = "0.9.2, 0.10"
NNlib = "0.8.4, 0.9"
-Optim = "1"
PartialFunctions = "1.2"
Random = "1.10"
+Reexport = "1.2.2"
SafeTestsets = "0.1"
Statistics = "1.10"
+StatsBase = "0.34.4"
Test = "1"
-WeightInitializers = "0.1.6"
+WeightInitializers = "1.0.4"
julia = "1.10"
[extras]
Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa"
+LIBSVM = "b1bec4e5-fd48-53fe-b0cb-9723c09d164b"
+MLJLinearModels = "6ee0df7b-362f-4a72-a706-9e79364fb692"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
diff --git a/README.md b/README.md
old mode 100644
new mode 100755
index 82154140..ce952571
--- a/README.md
+++ b/README.md
@@ -1,25 +1,19 @@
-
-
[](https://julialang.zulipchat.com/#narrow/stream/279055-sciml-bridged)
[](https://docs.sciml.ai/ReservoirComputing/stable/)
[](https://arxiv.org/abs/2204.05117)
-
[](https://codecov.io/gh/SciML/ReservoirComputing.jl)
[](https://github.com/SciML/ReservoirComputing.jl/actions?query=workflow%3ACI)
[](https://buildkite.com/julialang/reservoircomputing-dot-jl)
-
[](https://github.com/SciML/ColPrac)
[](https://github.com/SciML/SciMLStyle)
-
# ReservoirComputing.jl
ReservoirComputing.jl provides an efficient, modular and easy to use implementation of Reservoir Computing models such as Echo State Networks (ESNs). For information on using this package please refer to the [stable documentation](https://docs.sciml.ai/ReservoirComputing/stable/). Use the [in-development documentation](https://docs.sciml.ai/ReservoirComputing/dev/) to take a look at at not yet released features.
-
## Quick Example
To illustrate the workflow of this library we will showcase how it is possible to train an ESN to learn the dynamics of the Lorenz system. As a first step we will need to gather the data. For the `Generative` prediction we need the target data to be one step ahead of the training data:
diff --git a/docs/Project.toml b/docs/Project.toml
old mode 100644
new mode 100755
diff --git a/docs/make.jl b/docs/make.jl
old mode 100644
new mode 100755
diff --git a/docs/pages.jl b/docs/pages.jl
old mode 100644
new mode 100755
diff --git a/docs/src/api/esn.md b/docs/src/api/esn.md
old mode 100644
new mode 100755
diff --git a/docs/src/api/esn_drivers.md b/docs/src/api/esn_drivers.md
old mode 100644
new mode 100755
diff --git a/docs/src/api/predict.md b/docs/src/api/predict.md
old mode 100644
new mode 100755
diff --git a/docs/src/api/reca.md b/docs/src/api/reca.md
old mode 100644
new mode 100755
diff --git a/docs/src/api/states.md b/docs/src/api/states.md
old mode 100644
new mode 100755
diff --git a/docs/src/api/training.md b/docs/src/api/training.md
old mode 100644
new mode 100755
diff --git a/docs/src/assets/favicon.ico b/docs/src/assets/favicon.ico
old mode 100644
new mode 100755
diff --git a/docs/src/assets/logo.png b/docs/src/assets/logo.png
old mode 100644
new mode 100755
diff --git a/docs/src/esn_tutorials/change_layers.md b/docs/src/esn_tutorials/change_layers.md
old mode 100644
new mode 100755
diff --git a/docs/src/esn_tutorials/data/santafe_laser.txt b/docs/src/esn_tutorials/data/santafe_laser.txt
old mode 100644
new mode 100755
diff --git a/docs/src/esn_tutorials/deep_esn.md b/docs/src/esn_tutorials/deep_esn.md
old mode 100644
new mode 100755
diff --git a/docs/src/esn_tutorials/different_drivers.md b/docs/src/esn_tutorials/different_drivers.md
old mode 100644
new mode 100755
diff --git a/docs/src/esn_tutorials/different_training.md b/docs/src/esn_tutorials/different_training.md
old mode 100644
new mode 100755
diff --git a/docs/src/esn_tutorials/hybrid.md b/docs/src/esn_tutorials/hybrid.md
old mode 100644
new mode 100755
diff --git a/docs/src/esn_tutorials/lorenz_basic.md b/docs/src/esn_tutorials/lorenz_basic.md
old mode 100644
new mode 100755
diff --git a/docs/src/general/different_training.md b/docs/src/general/different_training.md
old mode 100644
new mode 100755
diff --git a/docs/src/general/predictive_generative.md b/docs/src/general/predictive_generative.md
old mode 100644
new mode 100755
diff --git a/docs/src/general/states_variation.md b/docs/src/general/states_variation.md
old mode 100644
new mode 100755
diff --git a/docs/src/index.md b/docs/src/index.md
old mode 100644
new mode 100755
diff --git a/docs/src/reca_tutorials/5bitinput.txt b/docs/src/reca_tutorials/5bitinput.txt
old mode 100644
new mode 100755
diff --git a/docs/src/reca_tutorials/5bitoutput.txt b/docs/src/reca_tutorials/5bitoutput.txt
old mode 100644
new mode 100755
diff --git a/docs/src/reca_tutorials/reca.md b/docs/src/reca_tutorials/reca.md
old mode 100644
new mode 100755
diff --git a/ext/RCLIBSVMExt.jl b/ext/RCLIBSVMExt.jl
old mode 100644
new mode 100755
diff --git a/ext/RCMLJLinearModelsExt.jl b/ext/RCMLJLinearModelsExt.jl
old mode 100644
new mode 100755
diff --git a/src/ReservoirComputing.jl b/src/ReservoirComputing.jl
old mode 100644
new mode 100755
index 8a9abab5..3ef8d25c
--- a/src/ReservoirComputing.jl
+++ b/src/ReservoirComputing.jl
@@ -3,28 +3,15 @@ module ReservoirComputing
using Adapt
using CellularAutomata
using Distances
-using Distributions
using LinearAlgebra
using NNlib
-using Optim
using PartialFunctions
using Random
+using Reexport: Reexport, @reexport
using Statistics
-using WeightInitializers
-
-export NLADefault, NLAT1, NLAT2, NLAT3
-export StandardStates, ExtendedStates, PaddedStates, PaddedExtendedStates
-export StandardRidge
-export scaled_rand, weighted_init, informed_init, minimal_init
-export rand_sparse, delay_line, delay_line_backward, cycle_jumps, simple_cycle, pseudo_svd
-export RNN, MRNN, GRU, GRUParams, FullyGated, Minimal
-export train
-export ESN
-export HybridESN, KnowledgeModel
-export DeepESN
-export RECA
-export RandomMapping, RandomMaps
-export Generative, Predictive, OutputLayer
+using StatsBase: sample
+using WeightInitializers: DeviceAgnostic, PartialFunction, Utils
+@reexport using WeightInitializers
#define global types
abstract type AbstractReservoirComputer end
@@ -104,34 +91,39 @@ function Predictive(prediction_data)
Predictive(prediction_data, prediction_len)
end
-__partial_apply(fn, inp) = fn$inp
-#fallbacks for initializers
+#fallbacks for initializers #eventually to remove once migrated to WeightInitializers.jl
for initializer in (:rand_sparse, :delay_line, :delay_line_backward, :cycle_jumps,
:simple_cycle, :pseudo_svd,
:scaled_rand, :weighted_init, :informed_init, :minimal_init)
- NType = ifelse(initializer === :rand_sparse, Real, Number)
- @eval function ($initializer)(dims::Integer...; kwargs...)
- return $initializer(WeightInitializers._default_rng(), Float32, dims...; kwargs...)
- end
- @eval function ($initializer)(rng::AbstractRNG, dims::Integer...; kwargs...)
- return $initializer(rng, Float32, dims...; kwargs...)
- end
- @eval function ($initializer)(::Type{T},
- dims::Integer...; kwargs...) where {T <: $NType}
- return $initializer(WeightInitializers._default_rng(), T, dims...; kwargs...)
- end
- @eval function ($initializer)(rng::AbstractRNG; kwargs...)
- return __partial_apply($initializer, (rng, (; kwargs...)))
+ @eval begin
+ function ($initializer)(dims::Integer...; kwargs...)
+ return $initializer(Utils.default_rng(), Float32, dims...; kwargs...)
+ end
+ function ($initializer)(rng::AbstractRNG, dims::Integer...; kwargs...)
+ return $initializer(rng, Float32, dims...; kwargs...)
+ end
+ function ($initializer)(::Type{T}, dims::Integer...; kwargs...) where {T <: Number}
+ return $initializer(Utils.default_rng(), T, dims...; kwargs...)
+ end
+
+ # Partial application
+ function ($initializer)(rng::AbstractRNG; kwargs...)
+ return PartialFunction.Partial{Nothing}($initializer, rng, kwargs)
+ end
+ function ($initializer)(::Type{T}; kwargs...) where {T <: Number}
+ return PartialFunction.Partial{T}($initializer, nothing, kwargs)
+ end
+ function ($initializer)(rng::AbstractRNG, ::Type{T}; kwargs...) where {T <: Number}
+ return PartialFunction.Partial{T}($initializer, rng, kwargs)
+ end
+ function ($initializer)(; kwargs...)
+ return PartialFunction.Partial{Nothing}($initializer, nothing, kwargs)
+ end
end
- @eval function ($initializer)(rng::AbstractRNG,
- ::Type{T}; kwargs...) where {T <: $NType}
- return __partial_apply($initializer, ((rng, T), (; kwargs...)))
- end
- @eval ($initializer)(; kwargs...) = __partial_apply(
- $initializer, (; kwargs...))
end
+
#general
include("states.jl")
include("predict.jl")
@@ -158,4 +150,18 @@ if !isdefined(Base, :get_extension)
include("../ext/RCLIBSVMExt.jl")
end
+export NLADefault, NLAT1, NLAT2, NLAT3
+export StandardStates, ExtendedStates, PaddedStates, PaddedExtendedStates
+export StandardRidge
+export scaled_rand, weighted_init, informed_init, minimal_init
+export rand_sparse, delay_line, delay_line_backward, cycle_jumps, simple_cycle, pseudo_svd
+export RNN, MRNN, GRU, GRUParams, FullyGated, Minimal
+export train
+export ESN
+export HybridESN, KnowledgeModel
+export DeepESN
+export RECA, sample
+export RandomMapping, RandomMaps
+export Generative, Predictive, OutputLayer
+
end #module
diff --git a/src/esn/deepesn.jl b/src/esn/deepesn.jl
old mode 100644
new mode 100755
index 636e0db1..cd8de8c0
--- a/src/esn/deepesn.jl
+++ b/src/esn/deepesn.jl
@@ -80,7 +80,7 @@ function DeepESN(train_data,
nla_type = NLADefault(),
states_type = StandardStates(),
washout::Int = 0,
- rng = WeightInitializers._default_rng(),
+ rng = Utils.default_rng(),
T = Float64,
matrix_type = typeof(train_data))
if states_type isa AbstractPaddedStates
diff --git a/src/esn/esn.jl b/src/esn/esn.jl
old mode 100644
new mode 100755
index f53939a5..1c1a7a65
--- a/src/esn/esn.jl
+++ b/src/esn/esn.jl
@@ -54,7 +54,7 @@ function ESN(train_data,
nla_type = NLADefault(),
states_type = StandardStates(),
washout = 0,
- rng = WeightInitializers._default_rng(),
+ rng = Utils.default_rng(),
T = Float32,
matrix_type = typeof(train_data))
if states_type isa AbstractPaddedStates
diff --git a/src/esn/esn_input_layers.jl b/src/esn/esn_input_layers.jl
old mode 100644
new mode 100755
index 6bbb5267..296f1768
--- a/src/esn/esn_input_layers.jl
+++ b/src/esn/esn_input_layers.jl
@@ -26,7 +26,8 @@ function scaled_rand(rng::AbstractRNG,
dims::Integer...;
scaling = T(0.1)) where {T <: Number}
res_size, in_size = dims
- layer_matrix = T.(rand(rng, Uniform(-scaling, scaling), res_size, in_size))
+ layer_matrix = (DeviceAgnostic.rand(rng, T, res_size, in_size) .- T(0.5)) .*
+ (T(2) * scaling)
return layer_matrix
end
@@ -65,13 +66,12 @@ function weighted_init(rng::AbstractRNG,
scaling = T(0.1)) where {T <: Number}
approx_res_size, in_size = dims
res_size = Int(floor(approx_res_size / in_size) * in_size)
- layer_matrix = zeros(T, res_size, in_size)
+ layer_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size)
q = floor(Int, res_size / in_size)
for i in 1:in_size
- layer_matrix[((i - 1) * q + 1):((i) * q), i] = rand(rng,
- Uniform(-scaling, scaling),
- q)
+ layer_matrix[((i - 1) * q + 1):((i) * q), i] = (DeviceAgnostic.rand(rng, T, q) .-
+ T(0.5)) .* (T(2) * scaling)
end
return layer_matrix
@@ -113,25 +113,28 @@ function informed_init(rng::AbstractRNG, ::Type{T}, dims::Integer...;
throw(DimensionMismatch("in_size must be greater than model_in_size"))
end
- input_matrix = zeros(res_size, in_size)
- zero_connections = zeros(in_size)
+ input_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size)
+ zero_connections = DeviceAgnostic.zeros(rng, T, in_size)
num_for_state = floor(Int, res_size * gamma)
num_for_model = floor(Int, res_size * (1 - gamma))
for i in 1:num_for_state
idxs = findall(Bool[zero_connections .== input_matrix[i, :]
for i in 1:size(input_matrix, 1)])
- random_row_idx = idxs[rand(rng, 1:end)]
- random_clm_idx = range(1, state_size, step = 1)[rand(rng, 1:end)]
- input_matrix[random_row_idx, random_clm_idx] = rand(rng, Uniform(-scaling, scaling))
+ random_row_idx = idxs[DeviceAgnostic.rand(rng, T, 1:end)]
+ random_clm_idx = range(1, state_size, step = 1)[DeviceAgnostic.rand(rng, T, 1:end)]
+ input_matrix[random_row_idx, random_clm_idx] = (DeviceAgnostic.rand(rng, T) -
+ T(0.5)) .* (T(2) * scaling)
end
for i in 1:num_for_model
idxs = findall(Bool[zero_connections .== input_matrix[i, :]
for i in 1:size(input_matrix, 1)])
- random_row_idx = idxs[rand(rng, 1:end)]
- random_clm_idx = range(state_size + 1, in_size, step = 1)[rand(rng, 1:end)]
- input_matrix[random_row_idx, random_clm_idx] = rand(rng, Uniform(-scaling, scaling))
+ random_row_idx = idxs[DeviceAgnostic.rand(rng, T, 1:end)]
+ random_clm_idx = range(state_size + 1, in_size, step = 1)[DeviceAgnostic.rand(
+ rng, T, 1:end)]
+ input_matrix[random_row_idx, random_clm_idx] = (DeviceAgnostic.rand(rng, T) -
+ T(0.5)) .* (T(2) * scaling)
end
return input_matrix
@@ -196,11 +199,14 @@ function _create_bernoulli(p::Number,
weight::Number,
rng::AbstractRNG,
::Type{T}) where {T <: Number}
- input_matrix = zeros(T, res_size, in_size)
+ input_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size)
for i in 1:res_size
for j in 1:in_size
- rand(rng, Bernoulli(p)) ? (input_matrix[i, j] = weight) :
- (input_matrix[i, j] = -weight)
+ if DeviceAgnostic.rand(rng, T) < p
+ input_matrix[i, j] = weight
+ else
+ input_matrix[i, j] = -weight
+ end
end
end
return input_matrix
@@ -216,8 +222,8 @@ function _create_irrational(irrational::Irrational,
setprecision(BigFloat, Int(ceil(log2(10) * (res_size * in_size + start + 1))))
ir_string = string(BigFloat(irrational)) |> collect
deleteat!(ir_string, findall(x -> x == '.', ir_string))
- ir_array = zeros(length(ir_string))
- input_matrix = zeros(T, res_size, in_size)
+ ir_array = DeviceAgnostic.zeros(rng, T, length(ir_string))
+ input_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size)
for i in 1:length(ir_string)
ir_array[i] = parse(Int, ir_string[i])
@@ -225,7 +231,7 @@ function _create_irrational(irrational::Irrational,
for i in 1:res_size
for j in 1:in_size
- random_number = rand(rng, T)
+ random_number = DeviceAgnostic.rand(rng, T)
input_matrix[i, j] = random_number < 0.5 ? -weight : weight
end
end
diff --git a/src/esn/esn_predict.jl b/src/esn/esn_predict.jl
old mode 100644
new mode 100755
diff --git a/src/esn/esn_reservoir_drivers.jl b/src/esn/esn_reservoir_drivers.jl
old mode 100644
new mode 100755
diff --git a/src/esn/esn_reservoirs.jl b/src/esn/esn_reservoirs.jl
old mode 100644
new mode 100755
index ef151084..6ac4ed23
--- a/src/esn/esn_reservoirs.jl
+++ b/src/esn/esn_reservoirs.jl
@@ -66,7 +66,7 @@ function delay_line(rng::AbstractRNG,
::Type{T},
dims::Integer...;
weight = T(0.1)) where {T <: Number}
- reservoir_matrix = zeros(T, dims...)
+ reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...)
@assert length(dims) == 2&&dims[1] == dims[2] "The dimensions must define a square matrix (e.g., (100, 100))"
for i in 1:(dims[1] - 1)
@@ -107,7 +107,7 @@ function delay_line_backward(rng::AbstractRNG,
weight = T(0.1),
fb_weight = T(0.2)) where {T <: Number}
res_size = first(dims)
- reservoir_matrix = zeros(T, dims...)
+ reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...)
for i in 1:(res_size - 1)
reservoir_matrix[i + 1, i] = weight
@@ -148,7 +148,7 @@ function cycle_jumps(rng::AbstractRNG,
jump_weight::Number = T(0.1),
jump_size::Int = 3) where {T <: Number}
res_size = first(dims)
- reservoir_matrix = zeros(T, dims...)
+ reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...)
for i in 1:(res_size - 1)
reservoir_matrix[i + 1, i] = cycle_weight
@@ -194,7 +194,7 @@ function simple_cycle(rng::AbstractRNG,
::Type{T},
dims::Integer...;
weight = T(0.1)) where {T <: Number}
- reservoir_matrix = zeros(T, dims...)
+ reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...)
for i in 1:(dims[1] - 1)
reservoir_matrix[i + 1, i] = weight
@@ -237,38 +237,42 @@ function pseudo_svd(rng::AbstractRNG,
sparsity::Number = 0.1,
sorted::Bool = true,
reverse_sort::Bool = false) where {T <: Number}
- reservoir_matrix = create_diag(dims[1],
- max_value,
- T;
+ reservoir_matrix = create_diag(rng, T, dims[1],
+ max_value;
sorted = sorted,
reverse_sort = reverse_sort)
tmp_sparsity = get_sparsity(reservoir_matrix, dims[1])
while tmp_sparsity <= sparsity
- reservoir_matrix *= create_qmatrix(dims[1],
- rand(1:dims[1]),
- rand(1:dims[1]),
- rand(T) * T(2) - T(1),
- T)
+ reservoir_matrix *= create_qmatrix(rng, T, dims[1],
+ rand_range(rng, T, dims[1]),
+ rand_range(rng, T, dims[1]),
+ DeviceAgnostic.rand(rng, T) * T(2) - T(1))
tmp_sparsity = get_sparsity(reservoir_matrix, dims[1])
end
return reservoir_matrix
end
-function create_diag(dim::Number, max_value::Number, ::Type{T};
+#hacky workaround for the moment
+function rand_range(rng, T, n::Int)
+ return Int(1 + floor(DeviceAgnostic.rand(rng, T) * n))
+end
+
+function create_diag(rng::AbstractRNG, ::Type{T}, dim::Number, max_value::Number;
sorted::Bool = true, reverse_sort::Bool = false) where {T <: Number}
- diagonal_matrix = zeros(T, dim, dim)
+ diagonal_matrix = DeviceAgnostic.zeros(rng, T, dim, dim)
if sorted == true
if reverse_sort == true
- diagonal_values = sort(rand(T, dim) .* max_value, rev = true)
+ diagonal_values = sort(
+ DeviceAgnostic.rand(rng, T, dim) .* max_value, rev = true)
diagonal_values[1] = max_value
else
- diagonal_values = sort(rand(T, dim) .* max_value)
+ diagonal_values = sort(DeviceAgnostic.rand(rng, T, dim) .* max_value)
diagonal_values[end] = max_value
end
else
- diagonal_values = rand(T, dim) .* max_value
+ diagonal_values = DeviceAgnostic.rand(rng, T, dim) .* max_value
end
for i in 1:dim
@@ -278,12 +282,11 @@ function create_diag(dim::Number, max_value::Number, ::Type{T};
return diagonal_matrix
end
-function create_qmatrix(dim::Number,
+function create_qmatrix(rng::AbstractRNG, ::Type{T}, dim::Number,
coord_i::Number,
coord_j::Number,
- theta::Number,
- ::Type{T}) where {T <: Number}
- qmatrix = zeros(T, dim, dim)
+ theta::Number) where {T <: Number}
+ qmatrix = DeviceAgnostic.zeros(rng, T, dim, dim)
for i in 1:dim
qmatrix[i, i] = 1.0
diff --git a/src/esn/hybridesn.jl b/src/esn/hybridesn.jl
old mode 100644
new mode 100755
index ad134a9e..b766b013
--- a/src/esn/hybridesn.jl
+++ b/src/esn/hybridesn.jl
@@ -126,7 +126,7 @@ function HybridESN(model,
nla_type = NLADefault(),
states_type = StandardStates(),
washout = 0,
- rng = WeightInitializers._default_rng(),
+ rng = Utils.default_rng(),
T = Float32,
matrix_type = typeof(train_data))
train_data = vcat(train_data, model.model_data[:, 1:(end - 1)])
diff --git a/src/predict.jl b/src/predict.jl
old mode 100644
new mode 100755
diff --git a/src/reca/reca.jl b/src/reca/reca.jl
old mode 100644
new mode 100755
diff --git a/src/reca/reca_input_encodings.jl b/src/reca/reca_input_encodings.jl
old mode 100644
new mode 100755
diff --git a/src/states.jl b/src/states.jl
old mode 100644
new mode 100755
diff --git a/src/train/linear_regression.jl b/src/train/linear_regression.jl
old mode 100644
new mode 100755
diff --git a/test/esn/deepesn.jl b/test/esn/deepesn.jl
old mode 100644
new mode 100755
diff --git a/test/esn/test_drivers.jl b/test/esn/test_drivers.jl
old mode 100644
new mode 100755
diff --git a/test/esn/test_hybrid.jl b/test/esn/test_hybrid.jl
old mode 100644
new mode 100755
diff --git a/test/esn/test_inits.jl b/test/esn/test_inits.jl
old mode 100644
new mode 100755
diff --git a/test/esn/test_train.jl b/test/esn/test_train.jl
old mode 100644
new mode 100755
diff --git a/test/qa.jl b/test/qa.jl
old mode 100644
new mode 100755
diff --git a/test/reca/test_predictive.jl b/test/reca/test_predictive.jl
old mode 100644
new mode 100755
diff --git a/test/runtests.jl b/test/runtests.jl
old mode 100644
new mode 100755
index 27a8ed2c..8f051129
--- a/test/runtests.jl
+++ b/test/runtests.jl
@@ -7,7 +7,7 @@ using Test
end
@testset "Echo State Networks" begin
- @safetestset "ESN Input Layers" include("esn/test_inits.jl")
+ @safetestset "ESN Initializers" include("esn/test_inits.jl")
@safetestset "ESN Train and Predict" include("esn/test_train.jl")
@safetestset "ESN Drivers" include("esn/test_drivers.jl")
@safetestset "Hybrid ESN" include("esn/test_hybrid.jl")
diff --git a/test/test_states.jl b/test/test_states.jl
old mode 100644
new mode 100755