1+ # ## input layers
2+ """
3+ scaled_rand([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
4+ scaling=0.1)
5+
6+ Create and return a matrix with random values, uniformly distributed within
7+ a range defined by `scaling`.
8+
9+ # Arguments
10+
11+ - `rng`: Random number generator. Default is `Utils.default_rng()`
12+ from WeightInitializers.
13+ - `T`: Type of the elements in the reservoir matrix.
14+ Default is `Float32`.
15+ - `dims`: Dimensions of the matrix. Should follow `res_size x in_size`.
16+ - `scaling`: A scaling factor to define the range of the uniform distribution.
17+ The matrix elements will be randomly chosen from the
18+ range `[-scaling, scaling]`. Defaults to `0.1`.
19+
20+ # Examples
21+
22+ ```jldoctest
23+ julia> res_input = scaled_rand(8, 3)
24+ 8×3 Matrix{Float32}:
25+ -0.0669356 -0.0292692 -0.0188943
26+ 0.0159724 0.004071 -0.0737949
27+ 0.026355 -0.0191563 0.0714962
28+ -0.0177412 0.0279123 0.0892906
29+ -0.0184405 0.0567368 0.0190222
30+ 0.0944272 0.0679244 0.0148647
31+ -0.0799005 -0.0891089 -0.0444782
32+ -0.0970182 0.0934286 0.03553
33+ ```
34+ """
35+ function scaled_rand (rng:: AbstractRNG , :: Type{T} , dims:: Integer... ;
36+ scaling= T (0.1 )) where {T <: Number }
37+ res_size, in_size = dims
38+ layer_matrix = (DeviceAgnostic. rand (rng, T, res_size, in_size) .- T (0.5 )) .*
39+ (T (2 ) * scaling)
40+ return layer_matrix
41+ end
42+
43+ """
44+ weighted_init([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
45+ scaling=0.1)
46+
47+ Create and return a matrix representing a weighted input layer.
48+ This initializer generates a weighted input matrix with random non-zero
49+ elements distributed uniformly within the range [-`scaling`, `scaling`] [^Lu2017].
50+
51+ # Arguments
52+
53+ - `rng`: Random number generator. Default is `Utils.default_rng()`
54+ from WeightInitializers.
55+ - `T`: Type of the elements in the reservoir matrix.
56+ Default is `Float32`.
57+ - `dims`: Dimensions of the matrix. Should follow `res_size x in_size`.
58+ - `scaling`: The scaling factor for the weight distribution.
59+ Defaults to `0.1`.
60+
61+ # Examples
62+
63+ ```jldoctest
64+ julia> res_input = weighted_init(8, 3)
65+ 6×3 Matrix{Float32}:
66+ 0.0452399 0.0 0.0
67+ -0.0348047 0.0 0.0
68+ 0.0 -0.0386004 0.0
69+ 0.0 0.00981022 0.0
70+ 0.0 0.0 0.0577838
71+ 0.0 0.0 -0.0562827
72+ ```
73+
74+ [^Lu2017]: Lu, Zhixin, et al.
75+ "Reservoir observers: Model-free inference of unmeasured variables in
76+ chaotic systems."
77+ Chaos: An Interdisciplinary Journal of Nonlinear Science 27.4 (2017): 041102.
78+ """
79+ function weighted_init (rng:: AbstractRNG , :: Type{T} , dims:: Integer... ;
80+ scaling= T (0.1 )) where {T <: Number }
81+ approx_res_size, in_size = dims
82+ res_size = Int (floor (approx_res_size / in_size) * in_size)
83+ layer_matrix = DeviceAgnostic. zeros (rng, T, res_size, in_size)
84+ q = floor (Int, res_size / in_size)
85+
86+ for i in 1 : in_size
87+ layer_matrix[((i - 1 ) * q + 1 ): ((i) * q), i] = (DeviceAgnostic. rand (rng, T, q) .-
88+ T (0.5 )) .* (T (2 ) * scaling)
89+ end
90+
91+ return layer_matrix
92+ end
93+
94+ """
95+ informed_init([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
96+ scaling=0.1, model_in_size, gamma=0.5)
97+
98+ Create an input layer for informed echo state networks [^Pathak2018].
99+
100+ # Arguments
101+
102+ - `rng`: Random number generator. Default is `Utils.default_rng()`
103+ from WeightInitializers.
104+ - `T`: Type of the elements in the reservoir matrix.
105+ Default is `Float32`.
106+ - `dims`: Dimensions of the matrix. Should follow `res_size x in_size`.
107+ - `scaling`: The scaling factor for the input matrix.
108+ Default is 0.1.
109+ - `model_in_size`: The size of the input model.
110+ - `gamma`: The gamma value. Default is 0.5.
111+
112+ # Examples
113+
114+ [^Pathak2018]: Pathak, Jaideep, et al. "Hybrid forecasting of chaotic processes:
115+ Using machine learning in conjunction with a knowledge-based model."
116+ Chaos: An Interdisciplinary Journal of Nonlinear Science 28.4 (2018).
117+ """
118+ function informed_init (rng:: AbstractRNG , :: Type{T} , dims:: Integer... ;
119+ scaling= T (0.1 ), model_in_size, gamma= T (0.5 )) where {T <: Number }
120+ res_size, in_size = dims
121+ state_size = in_size - model_in_size
122+
123+ if state_size <= 0
124+ throw (DimensionMismatch (" in_size must be greater than model_in_size" ))
125+ end
126+
127+ input_matrix = DeviceAgnostic. zeros (rng, T, res_size, in_size)
128+ zero_connections = DeviceAgnostic. zeros (rng, T, in_size)
129+ num_for_state = floor (Int, res_size * gamma)
130+ num_for_model = floor (Int, res_size * (1 - gamma))
131+
132+ for i in 1 : num_for_state
133+ idxs = findall (Bool[zero_connections .== input_matrix[i, :]
134+ for i in 1 : size (input_matrix, 1 )])
135+ random_row_idx = idxs[DeviceAgnostic. rand (rng, T, 1 : end)]
136+ random_clm_idx = range (1 , state_size; step= 1 )[DeviceAgnostic. rand (rng, T, 1 : end)]
137+ input_matrix[random_row_idx, random_clm_idx] = (DeviceAgnostic. rand (rng, T) -
138+ T (0.5 )) .* (T (2 ) * scaling)
139+ end
140+
141+ for i in 1 : num_for_model
142+ idxs = findall (Bool[zero_connections .== input_matrix[i, :]
143+ for i in 1 : size (input_matrix, 1 )])
144+ random_row_idx = idxs[DeviceAgnostic. rand (rng, T, 1 : end)]
145+ random_clm_idx = range (state_size + 1 , in_size; step= 1 )[DeviceAgnostic. rand (
146+ rng, T, 1 : end)]
147+ input_matrix[random_row_idx, random_clm_idx] = (DeviceAgnostic. rand (rng, T) -
148+ T (0.5 )) .* (T (2 ) * scaling)
149+ end
150+
151+ return input_matrix
152+ end
153+
154+ """
155+ minimal_init([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
156+ sampling_type=:bernoulli, weight=0.1, irrational=pi, start=1, p=0.5)
157+
158+ Create a layer matrix with uniform weights determined by `weight`. The sign difference
159+ is randomly determined by the `sampling` chosen.
160+
161+ # Arguments
162+
163+ - `rng`: Random number generator. Default is `Utils.default_rng()`
164+ from WeightInitializers.
165+ - `T`: Type of the elements in the reservoir matrix.
166+ Default is `Float32`.
167+ - `dims`: Dimensions of the matrix. Should follow `res_size x in_size`.
168+ - `weight`: The weight used to fill the layer matrix. Default is 0.1.
169+ - `sampling_type`: The sampling parameters used to generate the input matrix.
170+ Default is `:bernoulli`.
171+ - `irrational`: Irrational number chosen for sampling if `sampling_type=:irrational`.
172+ Default is `pi`.
173+ - `start`: Starting value for the irrational sample. Default is 1
174+ - `p`: Probability for the Bernoulli sampling. Lower probability increases negative
175+ value. Higher probability increases positive values. Default is 0.5
176+
177+ # Examples
178+
179+ ```jldoctest
180+ julia> res_input = minimal_init(8, 3)
181+ 8×3 Matrix{Float32}:
182+ 0.1 -0.1 0.1
183+ -0.1 0.1 0.1
184+ -0.1 -0.1 0.1
185+ -0.1 -0.1 -0.1
186+ 0.1 0.1 0.1
187+ -0.1 -0.1 -0.1
188+ -0.1 -0.1 0.1
189+ 0.1 -0.1 0.1
190+
191+ julia> res_input = minimal_init(8, 3; sampling_type=:irrational)
192+ 8×3 Matrix{Float32}:
193+ -0.1 0.1 -0.1
194+ 0.1 -0.1 -0.1
195+ 0.1 0.1 -0.1
196+ 0.1 0.1 0.1
197+ -0.1 -0.1 -0.1
198+ 0.1 0.1 0.1
199+ 0.1 0.1 -0.1
200+ -0.1 0.1 -0.1
201+
202+ julia> res_input = minimal_init(8, 3; p=0.1) # lower p -> more negative signs
203+ 8×3 Matrix{Float32}:
204+ -0.1 -0.1 -0.1
205+ -0.1 -0.1 -0.1
206+ -0.1 -0.1 -0.1
207+ -0.1 -0.1 -0.1
208+ 0.1 -0.1 -0.1
209+ -0.1 -0.1 -0.1
210+ -0.1 -0.1 -0.1
211+ -0.1 -0.1 -0.1
212+
213+ julia> res_input = minimal_init(8, 3; p=0.8)# higher p -> more positive signs
214+ 8×3 Matrix{Float32}:
215+ 0.1 0.1 0.1
216+ -0.1 0.1 0.1
217+ -0.1 0.1 0.1
218+ 0.1 0.1 0.1
219+ 0.1 0.1 0.1
220+ 0.1 -0.1 0.1
221+ -0.1 0.1 0.1
222+ 0.1 0.1 0.1
223+ ```
224+ """
225+ function minimal_init (rng:: AbstractRNG , :: Type{T} , dims:: Integer... ;
226+ sampling_type:: Symbol = :bernoulli , weight:: Number = T (0.1 ), irrational:: Real = pi ,
227+ start:: Int = 1 , p:: Number = T (0.5 )) where {T <: Number }
228+ res_size, in_size = dims
229+ if sampling_type == :bernoulli
230+ layer_matrix = _create_bernoulli (p, res_size, in_size, weight, rng, T)
231+ elseif sampling_type == :irrational
232+ layer_matrix = _create_irrational (irrational,
233+ start,
234+ res_size,
235+ in_size,
236+ weight,
237+ rng,
238+ T)
239+ else
240+ error (" Sampling type not allowed. Please use one of :bernoulli or :irrational" )
241+ end
242+ return layer_matrix
243+ end
244+
245+ function _create_bernoulli (p:: Number , res_size:: Int , in_size:: Int , weight:: Number ,
246+ rng:: AbstractRNG , :: Type{T} ) where {T <: Number }
247+ input_matrix = DeviceAgnostic. zeros (rng, T, res_size, in_size)
248+ for i in 1 : res_size
249+ for j in 1 : in_size
250+ if DeviceAgnostic. rand (rng, T) < p
251+ input_matrix[i, j] = weight
252+ else
253+ input_matrix[i, j] = - weight
254+ end
255+ end
256+ end
257+ return input_matrix
258+ end
259+
260+ function _create_irrational (irrational:: Irrational , start:: Int , res_size:: Int ,
261+ in_size:: Int , weight:: Number , rng:: AbstractRNG ,
262+ :: Type{T} ) where {T <: Number }
263+ setprecision (BigFloat, Int (ceil (log2 (10 ) * (res_size * in_size + start + 1 ))))
264+ ir_string = string (BigFloat (irrational)) |> collect
265+ deleteat! (ir_string, findall (x -> x == ' .' , ir_string))
266+ ir_array = DeviceAgnostic. zeros (rng, T, length (ir_string))
267+ input_matrix = DeviceAgnostic. zeros (rng, T, res_size, in_size)
268+
269+ for i in 1 : length (ir_string)
270+ ir_array[i] = parse (Int, ir_string[i])
271+ end
272+
273+ for i in 1 : res_size
274+ for j in 1 : in_size
275+ random_number = DeviceAgnostic. rand (rng, T)
276+ input_matrix[i, j] = random_number < 0.5 ? - weight : weight
277+ end
278+ end
279+
280+ return T .(input_matrix)
281+ end
282+
283+ # ## reservoirs
284+
1285"""
2286 rand_sparse([rng::AbstractRNG=Utils.default_rng()], [T=Float32], dims...;
3287 radius=1.0, sparsity=0.1, std=1.0)
370654function get_sparsity (M, dim)
371655 return size (M[M .!= 0 ], 1 ) / (dim * dim - size (M[M .!= 0 ], 1 )) # nonzero/zero elements
372656end
657+
658+ # ## fallbacks
659+ # fallbacks for initializers #eventually to remove once migrated to WeightInitializers.jl
660+ for initializer in (:rand_sparse , :delay_line , :delay_line_backward , :cycle_jumps ,
661+ :simple_cycle , :pseudo_svd ,
662+ :scaled_rand , :weighted_init , :informed_init , :minimal_init )
663+ @eval begin
664+ function ($ initializer)(dims:: Integer... ; kwargs... )
665+ return $ initializer (Utils. default_rng (), Float32, dims... ; kwargs... )
666+ end
667+ function ($ initializer)(rng:: AbstractRNG , dims:: Integer... ; kwargs... )
668+ return $ initializer (rng, Float32, dims... ; kwargs... )
669+ end
670+ function ($ initializer)(:: Type{T} , dims:: Integer... ; kwargs... ) where {T <: Number }
671+ return $ initializer (Utils. default_rng (), T, dims... ; kwargs... )
672+ end
673+
674+ # Partial application
675+ function ($ initializer)(rng:: AbstractRNG ; kwargs... )
676+ return PartialFunction. Partial {Nothing} ($ initializer, rng, kwargs)
677+ end
678+ function ($ initializer)(:: Type{T} ; kwargs... ) where {T <: Number }
679+ return PartialFunction. Partial {T} ($ initializer, nothing , kwargs)
680+ end
681+ function ($ initializer)(rng:: AbstractRNG , :: Type{T} ; kwargs... ) where {T <: Number }
682+ return PartialFunction. Partial {T} ($ initializer, rng, kwargs)
683+ end
684+ function ($ initializer)(; kwargs... )
685+ return PartialFunction. Partial {Nothing} ($ initializer, nothing , kwargs)
686+ end
687+ end
688+ end
0 commit comments