diff --git a/.JuliaFormatter.toml b/.JuliaFormatter.toml old mode 100755 new mode 100644 index 959ad88a..f0d96e5e --- a/.JuliaFormatter.toml +++ b/.JuliaFormatter.toml @@ -1,3 +1,9 @@ style = "sciml" format_markdown = true -format_docstrings = true \ No newline at end of file +whitespace_in_kwargs = false +margin = 92 +indent = 4 +format_docstrings = true +separate_kwargs_with_semicolon = true +always_for_in = true +annotate_untyped_fields_with_any = false \ No newline at end of file diff --git a/.buildkite/documentation.yml b/.buildkite/documentation.yml old mode 100755 new mode 100644 diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml old mode 100755 new mode 100644 diff --git a/.github/dependabot.yml b/.github/dependabot.yml old mode 100755 new mode 100644 diff --git a/.github/workflows/CompatHelper.yml b/.github/workflows/CompatHelper.yml old mode 100755 new mode 100644 diff --git a/.github/workflows/Downgrade.yml b/.github/workflows/Downgrade.yml old mode 100755 new mode 100644 diff --git a/.github/workflows/FormatCheck.yml b/.github/workflows/FormatCheck.yml old mode 100755 new mode 100644 diff --git a/.github/workflows/Invalidations.yml b/.github/workflows/Invalidations.yml old mode 100755 new mode 100644 diff --git a/.github/workflows/TagBot.yml b/.github/workflows/TagBot.yml old mode 100755 new mode 100644 diff --git a/.github/workflows/Tests.yml b/.github/workflows/Tests.yml old mode 100755 new mode 100644 diff --git a/.gitignore b/.gitignore old mode 100755 new mode 100644 index f4587026..09c048f8 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,6 @@ *.jl.cov *.jl.mem .DS_Store -/Manifest.toml +Manifest.toml /dev/ docs/build \ No newline at end of file diff --git a/.typos.toml b/.typos.toml old mode 100755 new mode 100644 diff --git a/CITATION.bib b/CITATION.bib old mode 100755 new mode 100644 diff --git a/LICENSE b/LICENSE old mode 100755 new mode 100644 diff --git a/Project.toml b/Project.toml old mode 100755 new mode 100644 index 8f973f73..86eb9627 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ReservoirComputing" uuid = "7c2d2b1e-3dd4-11ea-355a-8f6a8116e294" authors = ["Francesco Martinuzzi"] -version = "0.10.4" +version = "0.10.5" [deps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" diff --git a/README.md b/README.md old mode 100755 new mode 100644 index e52729d9..20f7cbf2 --- a/README.md +++ b/README.md @@ -11,11 +11,11 @@ [![Build status](https://badge.buildkite.com/db8f91b89a10ad79bbd1d9fdb1340e6f6602a1c0ed9496d4d0.svg)](https://buildkite.com/julialang/reservoircomputing-dot-jl) [![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor%27s%20Guide-blueviolet)](https://github.com/SciML/ColPrac) [![SciML Code Style](https://img.shields.io/static/v1?label=code%20style&message=SciML&color=9558b2&labelColor=389826)](https://github.com/SciML/SciMLStyle) - # ReservoirComputing.jl ReservoirComputing.jl provides an efficient, modular and easy to use implementation of Reservoir Computing models such as Echo State Networks (ESNs). For information on using this package please refer to the [stable documentation](https://docs.sciml.ai/ReservoirComputing/stable/). Use the [in-development documentation](https://docs.sciml.ai/ReservoirComputing/dev/) to take a look at at not yet released features. + ## Quick Example To illustrate the workflow of this library we will showcase how it is possible to train an ESN to learn the dynamics of the Lorenz system. As a first step we will need to gather the data. For the `Generative` prediction we need the target data to be one step ahead of the training data: @@ -36,7 +36,7 @@ function lorenz(du, u, p, t) end #solve and take data prob = ODEProblem(lorenz, u0, tspan, p) -data = Array(solve(prob, ABM54(), dt = 0.02)) +data = Array(solve(prob, ABM54(); dt=0.02)) shift = 300 train_len = 5000 @@ -55,9 +55,9 @@ Now that we have the data we can initialize the ESN with the chosen parameters. input_size = 3 res_size = 300 esn = ESN(input_data, input_size, res_size; - reservoir = rand_sparse(; radius = 1.2, sparsity = 6 / res_size), - input_layer = weighted_init, - nla_type = NLAT2()) + reservoir=rand_sparse(; radius=1.2, sparsity=6 / res_size), + input_layer=weighted_init, + nla_type=NLAT2()) ``` The echo state network can now be trained and tested. If not specified, the training will always be ordinary least squares regression. The full range of training methods is detailed in the documentation. @@ -71,8 +71,8 @@ The data is returned as a matrix, `output` in the code above, that contains the ```julia using Plots -plot(transpose(output), layout = (3, 1), label = "predicted") -plot!(transpose(test), layout = (3, 1), label = "actual") +plot(transpose(output); layout=(3, 1), label="predicted") +plot!(transpose(test); layout=(3, 1), label="actual") ``` ![lorenz_basic](https://user-images.githubusercontent.com/10376688/166227371-8bffa318-5c49-401f-9c64-9c71980cb3f7.png) @@ -82,9 +82,9 @@ One can also visualize the phase space of the attractor and the comparison with ```julia plot(transpose(output)[:, 1], transpose(output)[:, 2], - transpose(output)[:, 3], - label = "predicted") -plot!(transpose(test)[:, 1], transpose(test)[:, 2], transpose(test)[:, 3], label = "actual") + transpose(output)[:, 3]; + label="predicted") +plot!(transpose(test)[:, 1], transpose(test)[:, 2], transpose(test)[:, 3]; label="actual") ``` ![lorenz_attractor](https://user-images.githubusercontent.com/10376688/81470281-5a34b580-91ea-11ea-9eea-d2b266da19f4.png) diff --git a/docs/Project.toml b/docs/Project.toml old mode 100755 new mode 100644 diff --git a/docs/make.jl b/docs/make.jl old mode 100755 new mode 100644 index 7d697952..cc19d2e4 --- a/docs/make.jl +++ b/docs/make.jl @@ -7,13 +7,13 @@ ENV["PLOTS_TEST"] = "true" ENV["GKSwstype"] = "100" include("pages.jl") -makedocs(modules = [ReservoirComputing], - sitename = "ReservoirComputing.jl", - clean = true, doctest = false, linkcheck = true, - warnonly = [:missing_docs], - format = Documenter.HTML(assets = ["assets/favicon.ico"], - canonical = "https://docs.sciml.ai/ReservoirComputing/stable/"), - pages = pages) +makedocs(; modules=[ReservoirComputing], + sitename="ReservoirComputing.jl", + clean=true, doctest=false, linkcheck=true, + warnonly=[:missing_docs], + format=Documenter.HTML(; assets=["assets/favicon.ico"], + canonical="https://docs.sciml.ai/ReservoirComputing/stable/"), + pages=pages) -deploydocs(repo = "github.com/SciML/ReservoirComputing.jl.git"; - push_preview = true) +deploydocs(; repo="github.com/SciML/ReservoirComputing.jl.git", + push_preview=true) diff --git a/docs/pages.jl b/docs/pages.jl old mode 100755 new mode 100644 index 28bbfae5..e4837a9f --- a/docs/pages.jl +++ b/docs/pages.jl @@ -17,7 +17,8 @@ pages = [ "States Modifications" => "api/states.md", "Prediction Types" => "api/predict.md", "Echo State Networks" => "api/esn.md", - #"ESN Layers" => "api/esn_layers.md", + "ESN Layers" => "api/inits.md", "ESN Drivers" => "api/esn_drivers.md", + "ESN variatoins" => "api/esn_variations.md", "ReCA" => "api/reca.md"] ] diff --git a/docs/src/api/esn.md b/docs/src/api/esn.md old mode 100755 new mode 100644 diff --git a/docs/src/api/esn_drivers.md b/docs/src/api/esn_drivers.md old mode 100755 new mode 100644 diff --git a/docs/src/api/esn_variations.md b/docs/src/api/esn_variations.md new file mode 100644 index 00000000..c8df19e7 --- /dev/null +++ b/docs/src/api/esn_variations.md @@ -0,0 +1,14 @@ +# Echo State Networks variations + +## Deep ESN + +```@docs + DeepESN +``` + +## Hybrid ESN + +```@docs + HybridESN + KnowledgeModel +``` \ No newline at end of file diff --git a/docs/src/api/inits.md b/docs/src/api/inits.md new file mode 100644 index 00000000..83942c01 --- /dev/null +++ b/docs/src/api/inits.md @@ -0,0 +1,20 @@ +# Echo State Networks Initializers +## Input layers + +```@docs + scaled_rand + weighted_init + informed_init + minimal_init +``` + +## Reservoirs + +```@docs + rand_sparse + delay_line + delay_line_backward + cycle_jumps + simple_cycle + pseudo_svd +``` \ No newline at end of file diff --git a/docs/src/api/predict.md b/docs/src/api/predict.md old mode 100755 new mode 100644 diff --git a/docs/src/api/reca.md b/docs/src/api/reca.md old mode 100755 new mode 100644 diff --git a/docs/src/api/states.md b/docs/src/api/states.md old mode 100755 new mode 100644 index 5a4e5686..3a29d591 --- a/docs/src/api/states.md +++ b/docs/src/api/states.md @@ -17,3 +17,9 @@ NLAT2 NLAT3 ``` + +## Internals + +```@docs + ReservoirComputing.create_states +``` \ No newline at end of file diff --git a/docs/src/api/training.md b/docs/src/api/training.md old mode 100755 new mode 100644 diff --git a/docs/src/assets/favicon.ico b/docs/src/assets/favicon.ico old mode 100755 new mode 100644 diff --git a/docs/src/assets/logo.png b/docs/src/assets/logo.png old mode 100755 new mode 100644 diff --git a/docs/src/esn_tutorials/change_layers.md b/docs/src/esn_tutorials/change_layers.md old mode 100755 new mode 100644 index f10c869e..7927e9c0 --- a/docs/src/esn_tutorials/change_layers.md +++ b/docs/src/esn_tutorials/change_layers.md @@ -1,6 +1,6 @@ # Using different layers -A great deal of efforts in the ESNs field are devoted to finding an ideal construction for the reservoir matrices. ReservoirComputing.jl offers multiple implementation of reservoir and input matrices initializations found in the literature. The API is standardized, and follows by [WeightInitializers.jl](https://github.com/LuxDL/WeightInitializers.jl): +A great deal of efforts in the ESNs field are devoted to finding an ideal construction for the reservoir matrices. ReservoirComputing.jl offers multiple implementation of reservoir and input matrices initializations found in the literature. The API is standardized, and follows by [WeightInitializers.jl](https://github.com/LuxDL/Lux.jl/tree/main/lib/WeightInitializers): ```julia weights = init(rng, dims...) @@ -49,15 +49,15 @@ Now it is possible to define the input layers and reservoirs we want to compare using ReservoirComputing, StatsBase res_size = 300 -input_layer = [minimal_init(; weight = 0.85, sampling_type = :irrational), - minimal_init(; weight = 0.95, sampling_type = :irrational)] -reservoirs = [simple_cycle(; weight = 0.7), - cycle_jumps(; cycle_weight = 0.7, jump_weight = 0.2, jump_size = 5)] +input_layer = [minimal_init(; weight=0.85, sampling_type=:irrational), + minimal_init(; weight=0.95, sampling_type=:irrational)] +reservoirs = [simple_cycle(; weight=0.7), + cycle_jumps(; cycle_weight=0.7, jump_weight=0.2, jump_size=5)] for i in 1:length(reservoirs) esn = ESN(training_input, 2, res_size; - input_layer = input_layer[i], - reservoir = reservoirs[i]) + input_layer=input_layer[i], + reservoir=reservoirs[i]) wout = train(esn, training_target, StandardRidge(0.001)) output = esn(Predictive(testing_input), wout) println(msd(testing_target, output)) diff --git a/docs/src/esn_tutorials/data/santafe_laser.txt b/docs/src/esn_tutorials/data/santafe_laser.txt old mode 100755 new mode 100644 diff --git a/docs/src/esn_tutorials/deep_esn.md b/docs/src/esn_tutorials/deep_esn.md old mode 100755 new mode 100644 index 08f4e9f1..412edadf --- a/docs/src/esn_tutorials/deep_esn.md +++ b/docs/src/esn_tutorials/deep_esn.md @@ -2,7 +2,7 @@ Deep Echo State Network architectures started to gain some traction recently. In this guide, we illustrate how it is possible to use ReservoirComputing.jl to build a deep ESN. -The network implemented in this library is taken from [^1]. It works by stacking reservoirs on top of each other, feeding the output from one into the next. The states are obtained by merging all the inner states of the stacked reservoirs. For a more in-depth explanation, refer to the paper linked above. The full script for this example can be found [here](https://github.com/MartinuzziFrancesco/reservoir-computing-examples/blob/main/deep-esn/deepesn.jl). This example was run on Julia v1.7.2. +The network implemented in this library is taken from [^1]. It works by stacking reservoirs on top of each other, feeding the output from one into the next. The states are obtained by merging all the inner states of the stacked reservoirs. For a more in-depth explanation, refer to the paper linked above. ## Lorenz Example @@ -20,7 +20,7 @@ end #solve and take data prob = ODEProblem(lorenz!, [1.0, 0.0, 0.0], (0.0, 200.0)) -data = solve(prob, ABM54(), dt = 0.02) +data = solve(prob, ABM54(); dt=0.02) data = reduce(hcat, data.u) #determine shift length, training length and prediction length @@ -41,15 +41,15 @@ The construction of the ESN is also really similar. The only difference is that ```@example deep_lorenz using ReservoirComputing -reservoirs = [rand_sparse(; radius = 1.1, sparsity = 0.1), - rand_sparse(; radius = 1.2, sparsity = 0.1), - rand_sparse(; radius = 1.4, sparsity = 0.1)] +reservoirs = [rand_sparse(; radius=1.1, sparsity=0.1), + rand_sparse(; radius=1.2, sparsity=0.1), + rand_sparse(; radius=1.4, sparsity=0.1)] esn = DeepESN(input_data, 3, 200; - reservoir = reservoirs, - reservoir_driver = RNN(), - nla_type = NLADefault(), - states_type = StandardStates()) + reservoir=reservoirs, + reservoir_driver=RNN(), + nla_type=NLADefault(), + states_type=StandardStates()) ``` The input layer and bias can also be given as vectors, but of course, they have to be of the same size of the reservoirs vector. If they are not passed as a vector, the value passed will be used for all the layers in the deep ESN. @@ -75,17 +75,17 @@ lorenz_maxlyap = 0.9056 predict_ts = ts[(shift + train_len + 1):(shift + train_len + predict_len)] lyap_time = (predict_ts .- predict_ts[1]) * (1 / lorenz_maxlyap) -p1 = plot(lyap_time, [test_data[1, :] output[1, :]], label = ["actual" "predicted"], - ylabel = "x(t)", linewidth = 2.5, xticks = false, yticks = -15:15:15); -p2 = plot(lyap_time, [test_data[2, :] output[2, :]], label = ["actual" "predicted"], - ylabel = "y(t)", linewidth = 2.5, xticks = false, yticks = -20:20:20); -p3 = plot(lyap_time, [test_data[3, :] output[3, :]], label = ["actual" "predicted"], - ylabel = "z(t)", linewidth = 2.5, xlabel = "max(λ)*t", yticks = 10:15:40); - -plot(p1, p2, p3, plot_title = "Lorenz System Coordinates", - layout = (3, 1), xtickfontsize = 12, ytickfontsize = 12, xguidefontsize = 15, - yguidefontsize = 15, - legendfontsize = 12, titlefontsize = 20) +p1 = plot(lyap_time, [test_data[1, :] output[1, :]]; label=["actual" "predicted"], + ylabel="x(t)", linewidth=2.5, xticks=false, yticks=-15:15:15); +p2 = plot(lyap_time, [test_data[2, :] output[2, :]]; label=["actual" "predicted"], + ylabel="y(t)", linewidth=2.5, xticks=false, yticks=-20:20:20); +p3 = plot(lyap_time, [test_data[3, :] output[3, :]]; label=["actual" "predicted"], + ylabel="z(t)", linewidth=2.5, xlabel="max(λ)*t", yticks=10:15:40); + +plot(p1, p2, p3; plot_title="Lorenz System Coordinates", + layout=(3, 1), xtickfontsize=12, ytickfontsize=12, xguidefontsize=15, + yguidefontsize=15, + legendfontsize=12, titlefontsize=20) ``` ## Documentation diff --git a/docs/src/esn_tutorials/different_drivers.md b/docs/src/esn_tutorials/different_drivers.md old mode 100755 new mode 100644 index 5a6fc983..9b72fdf0 --- a/docs/src/esn_tutorials/different_drivers.md +++ b/docs/src/esn_tutorials/different_drivers.md @@ -14,7 +14,7 @@ where ``D`` is the number of activation functions and respective parameters chos The method to call to use the multiple activation function ESN is `MRNN(activation_function, leaky_coefficient, scaling_factor)`. The arguments can be used as both `args` and `kwargs`. `activation_function` and `scaling_factor` have to be vectors (or tuples) containing the chosen activation functions and respective scaling factors (``f_1,...,f_D`` and ``\lambda_1,...,\lambda_D`` following the nomenclature introduced above). The `leaky_coefficient` represents ``\alpha`` and it is a single value. -Starting with the example, the data used is based on the following function based on the DAFESN paper [^1]. A full script of the example is available [here](https://github.com/MartinuzziFrancesco/reservoir-computing-examples/blob/main/change_drivers/mrnn/mrnn.jl). This example was run on Julia v1.7.2. +Starting with the example, the data used is based on the following function based on the DAFESN paper [^1]. ```@example mrnn u(t) = sin(t) + sin(0.51 * t) + sin(0.22 * t) + sin(0.1002 * t) + sin(0.05343 * t) @@ -36,7 +36,7 @@ testing_target = reduce(hcat, data[(shift + train_len + 1):(shift + train_len + predict_len)]) ``` -To follow the paper more closely, it is necessary to define a couple of activation functions. The numbering of them follows the ones in the paper. Of course, one can also use any custom-defined function, available in the base language or any activation function from [NNlib](https://fluxml.ai/Flux.jl/stable/models/nnlib/#Activation-Functions). +To follow the paper more closely, it is necessary to define a couple of activation functions. The numbering of them follows the ones in the paper. Of course, one can also use any custom-defined function, available in the base language or any activation function from [NNlib](https://fluxml.ai/NNlib.jl/stable/reference/#Activation-Functions). ```@example mrnn f2(x) = (1 - exp(-x)) / (2 * (1 + exp(-x))) @@ -57,14 +57,14 @@ base_case = RNN(tanh, 0.85) #MRNN() test cases #Parameter given as kwargs -case3 = MRNN(activation_function = [tanh, f2], - leaky_coefficient = 0.85, - scaling_factor = [0.5, 0.3]) +case3 = MRNN(; activation_function=[tanh, f2], + leaky_coefficient=0.85, + scaling_factor=[0.5, 0.3]) #Parameter given as kwargs -case4 = MRNN(activation_function = [tanh, f3], - leaky_coefficient = 0.9, - scaling_factor = [0.45, 0.35]) +case4 = MRNN(; activation_function=[tanh, f3], + leaky_coefficient=0.9, + scaling_factor=[0.45, 0.35]) #Parameter given as args case5 = MRNN([tanh, f4], 0.9, [0.43, 0.13]) @@ -72,14 +72,14 @@ case5 = MRNN([tanh, f4], 0.9, [0.43, 0.13]) #tests test_cases = [base_case, case3, case4, case5] for case in test_cases - esn = ESN(training_input, 1, 100, - input_layer = weighted_init(; scaling = 0.3), - reservoir = rand_sparse(; radius = 0.4), - reservoir_driver = case, - states_type = ExtendedStates()) + esn = ESN(training_input, 1, 100; + input_layer=weighted_init(; scaling=0.3), + reservoir=rand_sparse(; radius=0.4), + reservoir_driver=case, + states_type=ExtendedStates()) wout = train(esn, training_target, StandardRidge(10e-6)) output = esn(Predictive(testing_input), wout) - println(rmsd(testing_target, output, normalize = true)) + println(rmsd(testing_target, output; normalize=true)) end ``` @@ -159,7 +159,7 @@ This variation can be obtained by setting `variation=Minimal()`. The `inner_laye ### Examples -To showcase the use of the `GRU()` method, this section will only illustrate the standard `FullyGated()` version. The full script for this example with the data can be found [here](https://github.com/MartinuzziFrancesco/reservoir-computing-examples/blob/main/change_drivers/gru/). +To showcase the use of the `GRU()` method, this section will only illustrate the standard `FullyGated()` version. The full script for this example with the data can be found [here](https://github.com/MartinuzziFrancesco/reservoir-computing-examples/tree/main/change_drivers/gru). The data used for this example is the Santa Fe laser dataset [^7] retrieved from [here](https://web.archive.org/web/20160427182805/http://www-psych.stanford.edu/%7Eandreas/Time-Series/SantaFe.html). The data is split to account for a next step prediction. @@ -187,19 +187,19 @@ res_radius = 1.4 Random.seed!(42) esn = ESN(training_input, 1, res_size; - reservoir = rand_sparse(; radius = res_radius), - reservoir_driver = GRU()) + reservoir=rand_sparse(; radius=res_radius), + reservoir_driver=GRU()) ``` The default inner reservoir and input layer for the GRU are the same defaults for the `reservoir` and `input_layer` of the ESN. One can use the explicit call if they choose to. ```@example gru -gru = GRU(reservoir = [rand_sparse, +gru = GRU(; reservoir=[rand_sparse, rand_sparse], - inner_layer = [scaled_rand, scaled_rand]) + inner_layer=[scaled_rand, scaled_rand]) esn = ESN(training_input, 1, res_size; - reservoir = rand_sparse(; radius = res_radius), - reservoir_driver = gru) + reservoir=rand_sparse(; radius=res_radius), + reservoir_driver=gru) ``` The training and prediction can proceed as usual: @@ -215,14 +215,14 @@ The results can be plotted using Plots.jl ```@example gru using Plots -plot([testing_target' output'], label = ["actual" "predicted"], - plot_title = "Santa Fe Laser", - titlefontsize = 20, - legendfontsize = 12, - linewidth = 2.5, - xtickfontsize = 12, - ytickfontsize = 12, - size = (1080, 720)) +plot([testing_target' output']; label=["actual" "predicted"], + plot_title="Santa Fe Laser", + titlefontsize=20, + legendfontsize=12, + linewidth=2.5, + xtickfontsize=12, + ytickfontsize=12, + size=(1080, 720)) ``` It is interesting to see a comparison of the GRU driven ESN and the standard RNN driven ESN. Using the same parameters defined before it is possible to do the following @@ -231,8 +231,8 @@ It is interesting to see a comparison of the GRU driven ESN and the standard RNN using StatsBase esn_rnn = ESN(training_input, 1, res_size; - reservoir = rand_sparse(; radius = res_radius), - reservoir_driver = RNN()) + reservoir=rand_sparse(; radius=res_radius), + reservoir_driver=RNN()) output_layer = train(esn_rnn, training_target, training_method) output_rnn = esn_rnn(Predictive(testing_input), output_layer) diff --git a/docs/src/esn_tutorials/different_training.md b/docs/src/esn_tutorials/different_training.md old mode 100755 new mode 100644 diff --git a/docs/src/esn_tutorials/hybrid.md b/docs/src/esn_tutorials/hybrid.md old mode 100755 new mode 100644 index 88e79707..cd68fe6c --- a/docs/src/esn_tutorials/hybrid.md +++ b/docs/src/esn_tutorials/hybrid.md @@ -12,7 +12,7 @@ using DifferentialEquations u0 = [1.0, 0.0, 0.0] tspan = (0.0, 1000.0) datasize = 100000 -tsteps = range(tspan[1], tspan[2], length = datasize) +tsteps = range(tspan[1], tspan[2]; length=datasize) function lorenz(du, u, p, t) p = [10.0, 28.0, 8 / 3] @@ -22,7 +22,7 @@ function lorenz(du, u, p, t) end ode_prob = ODEProblem(lorenz, u0, tspan) -ode_sol = solve(ode_prob, saveat = tsteps) +ode_sol = solve(ode_prob; saveat=tsteps) ode_data = Array(ode_sol) train_len = 10000 @@ -40,9 +40,9 @@ tspan_train = (tspan[1], ode_sol.t[train_len]) To feed the data to the ESN, it is necessary to create a suitable function. ```@example hybrid -function prior_model_data_generator(u0, tspan, tsteps, model = lorenz) +function prior_model_data_generator(u0, tspan, tsteps, model=lorenz) prob = ODEProblem(lorenz, u0, tspan) - sol = Array(solve(prob, saveat = tsteps)) + sol = Array(solve(prob; saveat=tsteps)) return sol end ``` @@ -61,7 +61,7 @@ hesn = HybridESN(km, input_data, in_size, res_size; - reservoir = rand_sparse) + reservoir=rand_sparse) ``` ## Training and Prediction @@ -81,17 +81,17 @@ lorenz_maxlyap = 0.9056 predict_ts = tsteps[(train_len + 1):(train_len + predict_len)] lyap_time = (predict_ts .- predict_ts[1]) * (1 / lorenz_maxlyap) -p1 = plot(lyap_time, [test_data[1, :] output[1, :]], label = ["actual" "predicted"], - ylabel = "x(t)", linewidth = 2.5, xticks = false, yticks = -15:15:15); -p2 = plot(lyap_time, [test_data[2, :] output[2, :]], label = ["actual" "predicted"], - ylabel = "y(t)", linewidth = 2.5, xticks = false, yticks = -20:20:20); -p3 = plot(lyap_time, [test_data[3, :] output[3, :]], label = ["actual" "predicted"], - ylabel = "z(t)", linewidth = 2.5, xlabel = "max(λ)*t", yticks = 10:15:40); - -plot(p1, p2, p3, plot_title = "Lorenz System Coordinates", - layout = (3, 1), xtickfontsize = 12, ytickfontsize = 12, xguidefontsize = 15, - yguidefontsize = 15, - legendfontsize = 12, titlefontsize = 20) +p1 = plot(lyap_time, [test_data[1, :] output[1, :]]; label=["actual" "predicted"], + ylabel="x(t)", linewidth=2.5, xticks=false, yticks=-15:15:15); +p2 = plot(lyap_time, [test_data[2, :] output[2, :]]; label=["actual" "predicted"], + ylabel="y(t)", linewidth=2.5, xticks=false, yticks=-20:20:20); +p3 = plot(lyap_time, [test_data[3, :] output[3, :]]; label=["actual" "predicted"], + ylabel="z(t)", linewidth=2.5, xlabel="max(λ)*t", yticks=10:15:40); + +plot(p1, p2, p3; plot_title="Lorenz System Coordinates", + layout=(3, 1), xtickfontsize=12, ytickfontsize=12, xguidefontsize=15, + yguidefontsize=15, + legendfontsize=12, titlefontsize=20) ``` ## Bibliography diff --git a/docs/src/esn_tutorials/lorenz_basic.md b/docs/src/esn_tutorials/lorenz_basic.md old mode 100755 new mode 100644 index 9b46a338..081d0ce9 --- a/docs/src/esn_tutorials/lorenz_basic.md +++ b/docs/src/esn_tutorials/lorenz_basic.md @@ -18,7 +18,7 @@ end #solve and take data prob = ODEProblem(lorenz!, [1.0, 0.0, 0.0], (0.0, 200.0)) -data = solve(prob, ABM54(), dt = 0.02) +data = solve(prob, ABM54(); dt=0.02) data = reduce(hcat, data.u) ``` @@ -54,11 +54,11 @@ input_scaling = 0.1 #build ESN struct esn = ESN(input_data, in_size, res_size; - reservoir = rand_sparse(; radius = res_radius, sparsity = res_sparsity), - input_layer = weighted_init(; scaling = input_scaling), - reservoir_driver = RNN(), - nla_type = NLADefault(), - states_type = StandardStates()) + reservoir=rand_sparse(; radius=res_radius, sparsity=res_sparsity), + input_layer=weighted_init(; scaling=input_scaling), + reservoir_driver=RNN(), + nla_type=NLADefault(), + states_type=StandardStates()) ``` Most of the parameters chosen here mirror the default ones, so a direct call is not necessary. The readme example is identical to this one, except for the explicit call. Going line by line to see what is happening, starting from `res_size`: this value determines the dimensions of the reservoir matrix. In this case, a size of 300 has been chosen, so the reservoir matrix will be 300 x 300. This is not always the case, since some input layer constructions can modify the dimensions of the reservoir, but in that case, everything is taken care of internally. @@ -113,17 +113,17 @@ lorenz_maxlyap = 0.9056 predict_ts = ts[(shift + train_len + 1):(shift + train_len + predict_len)] lyap_time = (predict_ts .- predict_ts[1]) * (1 / lorenz_maxlyap) -p1 = plot(lyap_time, [test_data[1, :] output[1, :]], label = ["actual" "predicted"], - ylabel = "x(t)", linewidth = 2.5, xticks = false, yticks = -15:15:15); -p2 = plot(lyap_time, [test_data[2, :] output[2, :]], label = ["actual" "predicted"], - ylabel = "y(t)", linewidth = 2.5, xticks = false, yticks = -20:20:20); -p3 = plot(lyap_time, [test_data[3, :] output[3, :]], label = ["actual" "predicted"], - ylabel = "z(t)", linewidth = 2.5, xlabel = "max(λ)*t", yticks = 10:15:40); - -plot(p1, p2, p3, plot_title = "Lorenz System Coordinates", - layout = (3, 1), xtickfontsize = 12, ytickfontsize = 12, xguidefontsize = 15, - yguidefontsize = 15, - legendfontsize = 12, titlefontsize = 20) +p1 = plot(lyap_time, [test_data[1, :] output[1, :]]; label=["actual" "predicted"], + ylabel="x(t)", linewidth=2.5, xticks=false, yticks=-15:15:15); +p2 = plot(lyap_time, [test_data[2, :] output[2, :]]; label=["actual" "predicted"], + ylabel="y(t)", linewidth=2.5, xticks=false, yticks=-20:20:20); +p3 = plot(lyap_time, [test_data[3, :] output[3, :]]; label=["actual" "predicted"], + ylabel="z(t)", linewidth=2.5, xlabel="max(λ)*t", yticks=10:15:40); + +plot(p1, p2, p3; plot_title="Lorenz System Coordinates", + layout=(3, 1), xtickfontsize=12, ytickfontsize=12, xguidefontsize=15, + yguidefontsize=15, + legendfontsize=12, titlefontsize=20) ``` ## Bibliography diff --git a/docs/src/general/different_training.md b/docs/src/general/different_training.md old mode 100755 new mode 100644 diff --git a/docs/src/general/predictive_generative.md b/docs/src/general/predictive_generative.md old mode 100755 new mode 100644 diff --git a/docs/src/general/states_variation.md b/docs/src/general/states_variation.md old mode 100755 new mode 100644 diff --git a/docs/src/index.md b/docs/src/index.md old mode 100755 new mode 100644 index ac8e1f66..320cdbb9 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -96,7 +96,7 @@ versioninfo() # hide ```@example using Pkg # hide -Pkg.status(; mode = PKGMODE_MANIFEST) # hide +Pkg.status(; mode=PKGMODE_MANIFEST) # hide ``` ```@raw html diff --git a/docs/src/reca_tutorials/5bitinput.txt b/docs/src/reca_tutorials/5bitinput.txt old mode 100755 new mode 100644 diff --git a/docs/src/reca_tutorials/5bitoutput.txt b/docs/src/reca_tutorials/5bitoutput.txt old mode 100755 new mode 100644 diff --git a/docs/src/reca_tutorials/reca.md b/docs/src/reca_tutorials/reca.md old mode 100755 new mode 100644 index e1e52c96..ac05b4fb --- a/docs/src/reca_tutorials/reca.md +++ b/docs/src/reca_tutorials/reca.md @@ -27,8 +27,8 @@ To define the ReCA model, it suffices to call: ```@example reca reca = RECA(input, ca; - generations = 16, - input_encoding = RandomMapping(16, 40)) + generations=16, + input_encoding=RandomMapping(16, 40)) ``` After this, the training can be performed with the chosen method. diff --git a/ext/RCLIBSVMExt.jl b/ext/RCLIBSVMExt.jl old mode 100755 new mode 100644 diff --git a/ext/RCMLJLinearModelsExt.jl b/ext/RCMLJLinearModelsExt.jl old mode 100755 new mode 100644 diff --git a/src/ReservoirComputing.jl b/src/ReservoirComputing.jl old mode 100755 new mode 100644 index 3ef8d25c..e5cc8b68 --- a/src/ReservoirComputing.jl +++ b/src/ReservoirComputing.jl @@ -91,7 +91,6 @@ function Predictive(prediction_data) Predictive(prediction_data, prediction_len) end - #fallbacks for initializers #eventually to remove once migrated to WeightInitializers.jl for initializer in (:rand_sparse, :delay_line, :delay_line_backward, :cycle_jumps, :simple_cycle, :pseudo_svd, @@ -123,7 +122,6 @@ for initializer in (:rand_sparse, :delay_line, :delay_line_backward, :cycle_jump end end - #general include("states.jl") include("predict.jl") diff --git a/src/esn/deepesn.jl b/src/esn/deepesn.jl old mode 100755 new mode 100644 index cd8de8c0..d5957acf --- a/src/esn/deepesn.jl +++ b/src/esn/deepesn.jl @@ -62,7 +62,7 @@ temporal features. train_data = [your_training_data_here] # Create a DeepESN with specific parameters -deepESN = DeepESN(train_data, 10, 100, depth = 3, washout = 100) +deepESN = DeepESN(train_data, 10, 100; depth=3, washout=100) # Proceed with training and prediction (pseudocode) train(deepESN, target_data) @@ -72,17 +72,17 @@ prediction = predict(deepESN, new_data) function DeepESN(train_data, in_size::Int, res_size::Int; - depth::Int = 2, - input_layer = fill(scaled_rand, depth), - bias = fill(zeros64, depth), - reservoir = fill(rand_sparse, depth), - reservoir_driver = RNN(), - nla_type = NLADefault(), - states_type = StandardStates(), - washout::Int = 0, - rng = Utils.default_rng(), - T = Float64, - matrix_type = typeof(train_data)) + depth::Int=2, + input_layer=fill(scaled_rand, depth), + bias=fill(zeros64, depth), + reservoir=fill(rand_sparse, depth), + reservoir_driver=RNN(), + nla_type=NLADefault(), + states_type=StandardStates(), + washout::Int=0, + rng=Utils.default_rng(), + T=Float64, + matrix_type=typeof(train_data)) if states_type isa AbstractPaddedStates in_size = size(train_data, 1) + 1 train_data = vcat(Adapt.adapt(matrix_type, ones(1, size(train_data, 2))), diff --git a/src/esn/esn.jl b/src/esn/esn.jl old mode 100755 new mode 100644 index 1c1a7a65..64d5d7b2 --- a/src/esn/esn.jl +++ b/src/esn/esn.jl @@ -41,22 +41,22 @@ using ReservoirComputing train_data = rand(10, 100) # 10 features, 100 time steps -esn = ESN(train_data, reservoir = RandSparseReservoir(200), washout = 10) +esn = ESN(train_data; reservoir=RandSparseReservoir(200), washout=10) ``` """ function ESN(train_data, in_size::Int, res_size::Int; - input_layer = scaled_rand, - reservoir = rand_sparse, - bias = zeros64, - reservoir_driver = RNN(), - nla_type = NLADefault(), - states_type = StandardStates(), - washout = 0, - rng = Utils.default_rng(), - T = Float32, - matrix_type = typeof(train_data)) + input_layer=scaled_rand, + reservoir=rand_sparse, + bias=zeros64, + reservoir_driver=RNN(), + nla_type=NLADefault(), + states_type=StandardStates(), + washout=0, + rng=Utils.default_rng(), + T=Float32, + matrix_type=typeof(train_data)) if states_type isa AbstractPaddedStates in_size = size(train_data, 1) + 1 train_data = vcat(Adapt.adapt(matrix_type, ones(1, size(train_data, 2))), @@ -78,7 +78,7 @@ end function (esn::AbstractEchoStateNetwork)(prediction::AbstractPrediction, output_layer::AbstractOutputLayer; - last_state = esn.states[:, [end]], + last_state=esn.states[:, [end]], kwargs...) pred_len = prediction.prediction_len @@ -111,19 +111,19 @@ The trained ESN model. The exact type and structure of the return value depends using ReservoirComputing # Initialize an ESN instance and target data -esn = ESN(train_data, reservoir = RandSparseReservoir(200), washout = 10) +esn = ESN(train_data; reservoir=RandSparseReservoir(200), washout=10) target_data = rand(size(train_data, 2)) # Train the ESN using the default training method trained_esn = train(esn, target_data) # Train the ESN using a custom training method -trained_esn = train(esn, target_data, training_method = StandardRidge(1.0)) +trained_esn = train(esn, target_data; training_method=StandardRidge(1.0)) ``` """ function train(esn::AbstractEchoStateNetwork, target_data, - training_method = StandardRidge(); + training_method=StandardRidge(); kwargs...) states_new = esn.states_type(esn.nla_type, esn.states, esn.train_data[:, 1:end]) diff --git a/src/esn/esn_input_layers.jl b/src/esn/esn_input_layers.jl old mode 100755 new mode 100644 index 296f1768..04487f96 --- a/src/esn/esn_input_layers.jl +++ b/src/esn/esn_input_layers.jl @@ -18,13 +18,13 @@ A matrix of type with dimensions specified by `dims`. Each element of the matrix ```julia rng = Random.default_rng() -matrix = scaled_rand(rng, Float64, (100, 50); scaling = 0.2) +matrix = scaled_rand(rng, Float64, (100, 50); scaling=0.2) ``` """ function scaled_rand(rng::AbstractRNG, ::Type{T}, dims::Integer...; - scaling = T(0.1)) where {T <: Number} + scaling=T(0.1)) where {T <: Number} res_size, in_size = dims layer_matrix = (DeviceAgnostic.rand(rng, T, res_size, in_size) .- T(0.5)) .* (T(2) * scaling) @@ -51,7 +51,7 @@ A matrix representing the weighted input layer as defined in [^Lu2017]. The matr ```julia rng = Random.default_rng() -input_layer = weighted_init(rng, Float64, (3, 300); scaling = 0.2) +input_layer = weighted_init(rng, Float64, (3, 300); scaling=0.2) ``` # References @@ -63,7 +63,7 @@ input_layer = weighted_init(rng, Float64, (3, 300); scaling = 0.2) function weighted_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; - scaling = T(0.1)) where {T <: Number} + scaling=T(0.1)) where {T <: Number} approx_res_size, in_size = dims res_size = Int(floor(approx_res_size / in_size) * in_size) layer_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size) @@ -101,11 +101,11 @@ Create a layer of a neural network. rng = Random.default_rng() dims = (100, 200) model_in_size = 50 -input_matrix = informed_init(rng, Float64, dims; model_in_size = model_in_size) +input_matrix = informed_init(rng, Float64, dims; model_in_size=model_in_size) ``` """ function informed_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; - scaling = T(0.1), model_in_size, gamma = T(0.5)) where {T <: Number} + scaling=T(0.1), model_in_size, gamma=T(0.5)) where {T <: Number} res_size, in_size = dims state_size = in_size - model_in_size @@ -122,7 +122,7 @@ function informed_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; idxs = findall(Bool[zero_connections .== input_matrix[i, :] for i in 1:size(input_matrix, 1)]) random_row_idx = idxs[DeviceAgnostic.rand(rng, T, 1:end)] - random_clm_idx = range(1, state_size, step = 1)[DeviceAgnostic.rand(rng, T, 1:end)] + random_clm_idx = range(1, state_size; step=1)[DeviceAgnostic.rand(rng, T, 1:end)] input_matrix[random_row_idx, random_clm_idx] = (DeviceAgnostic.rand(rng, T) - T(0.5)) .* (T(2) * scaling) end @@ -131,7 +131,7 @@ function informed_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; idxs = findall(Bool[zero_connections .== input_matrix[i, :] for i in 1:size(input_matrix, 1)]) random_row_idx = idxs[DeviceAgnostic.rand(rng, T, 1:end)] - random_clm_idx = range(state_size + 1, in_size, step = 1)[DeviceAgnostic.rand( + random_clm_idx = range(state_size + 1, in_size; step=1)[DeviceAgnostic.rand( rng, T, 1:end)] input_matrix[random_row_idx, random_clm_idx] = (DeviceAgnostic.rand(rng, T) - T(0.5)) .* (T(2) * scaling) @@ -166,16 +166,16 @@ using Random rng = Random.default_rng() dims = (3, 2) weight = 0.5 -layer_matrix = irrational_sample_init(rng, Float64, dims; weight = weight, - sampling = IrrationalSample(irrational = sqrt(2), start = 1)) +layer_matrix = irrational_sample_init(rng, Float64, dims; weight=weight, + sampling=IrrationalSample(; irrational=sqrt(2), start=1)) ``` """ function minimal_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; - sampling_type::Symbol = :bernoulli, - weight::Number = T(0.1), - irrational::Real = pi, - start::Int = 1, - p::Number = T(0.5)) where {T <: Number} + sampling_type::Symbol=:bernoulli, + weight::Number=T(0.1), + irrational::Real=pi, + start::Int=1, + p::Number=T(0.5)) where {T <: Number} res_size, in_size = dims if sampling_type == :bernoulli layer_matrix = _create_bernoulli(p, res_size, in_size, weight, rng, T) diff --git a/src/esn/esn_predict.jl b/src/esn/esn_predict.jl old mode 100755 new mode 100644 index cc7cdc5d..ba6c80da --- a/src/esn/esn_predict.jl +++ b/src/esn/esn_predict.jl @@ -3,8 +3,8 @@ function obtain_esn_prediction(esn, x, output_layer, args...; - initial_conditions = output_layer.last_value, - save_states = false) + initial_conditions=output_layer.last_value, + save_states=false) out_size = output_layer.out_size training_method = output_layer.training_method prediction_len = prediction.prediction_len @@ -33,8 +33,8 @@ function obtain_esn_prediction(esn, x, output_layer, args...; - initial_conditions = output_layer.last_value, - save_states = false) + initial_conditions=output_layer.last_value, + save_states=false) out_size = output_layer.out_size training_method = output_layer.training_method prediction_len = prediction.prediction_len diff --git a/src/esn/esn_reservoir_drivers.jl b/src/esn/esn_reservoir_drivers.jl old mode 100755 new mode 100644 index 76cabae6..52c1a2c3 --- a/src/esn/esn_reservoir_drivers.jl +++ b/src/esn/esn_reservoir_drivers.jl @@ -114,7 +114,7 @@ Returns a Recurrent Neural Network (RNN) initializer for the Echo State Network This function creates an RNN object with the specified activation function and leaky coefficient, which can be used as a reservoir driver in the ESN. """ -function RNN(; activation_function = NNlib.fast_act(tanh), leaky_coefficient = 1.0) +function RNN(; activation_function=NNlib.fast_act(tanh), leaky_coefficient=1.0) RNN(activation_function, leaky_coefficient) end @@ -185,9 +185,9 @@ This function creates an MRNN object with the specified activation functions, le "_A novel model of leaky integrator echo state network for time-series prediction._" Neurocomputing 159 (2015): 58-66. """ -function MRNN(; activation_function = [tanh, sigmoid], - leaky_coefficient = 1.0, - scaling_factor = fill(leaky_coefficient, length(activation_function))) +function MRNN(; activation_function=[tanh, sigmoid], + leaky_coefficient=1.0, + scaling_factor=fill(leaky_coefficient, length(activation_function))) @assert length(activation_function) == length(scaling_factor) return MRNN(activation_function, leaky_coefficient, scaling_factor) end @@ -290,11 +290,11 @@ A GRUParams object containing the parameters needed for the GRU-based reservoir "_Learning phrase representations using RNN encoder-decoder for statistical machine translation._" arXiv preprint arXiv:1406.1078 (2014). """ -function GRU(; activation_function = [NNlib.sigmoid, NNlib.sigmoid, tanh], - inner_layer = fill(scaled_rand, 2), - reservoir = fill(rand_sparse, 2), - bias = fill(scaled_rand, 2), - variant = FullyGated()) +function GRU(; activation_function=[NNlib.sigmoid, NNlib.sigmoid, tanh], + inner_layer=fill(scaled_rand, 2), + reservoir=fill(rand_sparse, 2), + bias=fill(scaled_rand, 2), + variant=FullyGated()) return GRU(activation_function, inner_layer, reservoir, bias, variant) end diff --git a/src/esn/esn_reservoirs.jl b/src/esn/esn_reservoirs.jl old mode 100755 new mode 100644 index 6ac4ed23..36276099 --- a/src/esn/esn_reservoirs.jl +++ b/src/esn/esn_reservoirs.jl @@ -22,11 +22,11 @@ This type of reservoir initialization is commonly used in ESNs for capturing tem function rand_sparse(rng::AbstractRNG, ::Type{T}, dims::Integer...; - radius = T(1.0), - sparsity = T(0.1), - std = T(1.0)) where {T <: Number} + radius=T(1.0), + sparsity=T(0.1), + std=T(1.0)) where {T <: Number} lcl_sparsity = T(1) - sparsity #consistency with current implementations - reservoir_matrix = sparse_init(rng, T, dims...; sparsity = lcl_sparsity, std = std) + reservoir_matrix = sparse_init(rng, T, dims...; sparsity=lcl_sparsity, std=std) rho_w = maximum(abs.(eigvals(reservoir_matrix))) reservoir_matrix .*= radius / rho_w if Inf in unique(reservoir_matrix) || -Inf in unique(reservoir_matrix) @@ -54,7 +54,7 @@ A delay line reservoir matrix with dimensions specified by `dims`. The matrix is # Example ```julia -reservoir = delay_line(Float64, 100, 100; weight = 0.2) +reservoir = delay_line(Float64, 100, 100; weight=0.2) ``` # References @@ -65,7 +65,7 @@ Rodan, Ali, and Peter Tino. "Minimum complexity echo state network." IEEE Transa function delay_line(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight = T(0.1)) where {T <: Number} + weight=T(0.1)) where {T <: Number} reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) @assert length(dims) == 2&&dims[1] == dims[2] "The dimensions must define a square matrix (e.g., (100, 100))" @@ -104,8 +104,8 @@ Reservoir matrix with the dimensions specified by `dims` and weights. function delay_line_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight = T(0.1), - fb_weight = T(0.2)) where {T <: Number} + weight=T(0.1), + fb_weight=T(0.2)) where {T <: Number} res_size = first(dims) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) @@ -144,9 +144,9 @@ Reservoir matrix with the specified dimensions, cycle weight, jump weight, and j function cycle_jumps(rng::AbstractRNG, ::Type{T}, dims::Integer...; - cycle_weight::Number = T(0.1), - jump_weight::Number = T(0.1), - jump_size::Int = 3) where {T <: Number} + cycle_weight::Number=T(0.1), + jump_weight::Number=T(0.1), + jump_size::Int=3) where {T <: Number} res_size = first(dims) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) @@ -193,7 +193,7 @@ Reservoir matrix with the dimensions specified by `dims` and weights. function simple_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight = T(0.1)) where {T <: Number} + weight=T(0.1)) where {T <: Number} reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) for i in 1:(dims[1] - 1) @@ -233,14 +233,14 @@ This reservoir initialization method, based on a pseudo-SVD approach, is inspire function pseudo_svd(rng::AbstractRNG, ::Type{T}, dims::Integer...; - max_value::Number = T(1.0), - sparsity::Number = 0.1, - sorted::Bool = true, - reverse_sort::Bool = false) where {T <: Number} + max_value::Number=T(1.0), + sparsity::Number=0.1, + sorted::Bool=true, + reverse_sort::Bool=false) where {T <: Number} reservoir_matrix = create_diag(rng, T, dims[1], max_value; - sorted = sorted, - reverse_sort = reverse_sort) + sorted=sorted, + reverse_sort=reverse_sort) tmp_sparsity = get_sparsity(reservoir_matrix, dims[1]) while tmp_sparsity <= sparsity @@ -260,12 +260,12 @@ function rand_range(rng, T, n::Int) end function create_diag(rng::AbstractRNG, ::Type{T}, dim::Number, max_value::Number; - sorted::Bool = true, reverse_sort::Bool = false) where {T <: Number} + sorted::Bool=true, reverse_sort::Bool=false) where {T <: Number} diagonal_matrix = DeviceAgnostic.zeros(rng, T, dim, dim) if sorted == true if reverse_sort == true diagonal_values = sort( - DeviceAgnostic.rand(rng, T, dim) .* max_value, rev = true) + DeviceAgnostic.rand(rng, T, dim) .* max_value; rev=true) diagonal_values[1] = max_value else diagonal_values = sort(DeviceAgnostic.rand(rng, T, dim) .* max_value) diff --git a/src/esn/hybridesn.jl b/src/esn/hybridesn.jl old mode 100755 new mode 100644 index b766b013..172d719f --- a/src/esn/hybridesn.jl +++ b/src/esn/hybridesn.jl @@ -47,7 +47,7 @@ Reference: Using Machine Learning in Conjunction with a Knowledge-Based Model" (2018). """ function KnowledgeModel(prior_model, u0, tspan, datasize) - trange = collect(range(tspan[1], tspan[2], length = datasize)) + trange = collect(range(tspan[1], tspan[2]; length=datasize)) dt = trange[2] - trange[1] tsteps = push!(trange, dt + trange[end]) tspan_new = (tspan[1], dt + tspan[2]) @@ -108,7 +108,7 @@ influence the network's behavior and learning capacity. km = KnowledgeModel(prior_model_function, u0, (0, 100), 1000) # Create a HybridESN -hesn = HybridESN(km, train_data, 10, 100; washout = 100) +hesn = HybridESN(km, train_data, 10, 100; washout=100) # Train and predict train(hesn, target_data) @@ -119,16 +119,16 @@ function HybridESN(model, train_data, in_size::Int, res_size::Int; - input_layer = scaled_rand, - reservoir = rand_sparse, - bias = zeros64, - reservoir_driver = RNN(), - nla_type = NLADefault(), - states_type = StandardStates(), - washout = 0, - rng = Utils.default_rng(), - T = Float32, - matrix_type = typeof(train_data)) + input_layer=scaled_rand, + reservoir=rand_sparse, + bias=zeros64, + reservoir_driver=RNN(), + nla_type=NLADefault(), + states_type=StandardStates(), + washout=0, + rng=Utils.default_rng(), + T=Float32, + matrix_type=typeof(train_data)) train_data = vcat(train_data, model.model_data[:, 1:(end - 1)]) if states_type isa AbstractPaddedStates @@ -155,7 +155,7 @@ end function (hesn::HybridESN)(prediction::AbstractPrediction, output_layer::AbstractOutputLayer; - last_state = hesn.states[:, [end]], + last_state=hesn.states[:, [end]], kwargs...) km = hesn.model pred_len = prediction.prediction_len @@ -174,7 +174,7 @@ end function train(hesn::HybridESN, target_data, - training_method = StandardRidge(); + training_method=StandardRidge(); kwargs...) states = vcat(hesn.states, hesn.model.model_data[:, 2:end]) states_new = hesn.states_type(hesn.nla_type, states, hesn.train_data[:, 1:end]) diff --git a/src/predict.jl b/src/predict.jl old mode 100755 new mode 100644 index 3ca98f0a..46b89076 --- a/src/predict.jl +++ b/src/predict.jl @@ -3,7 +3,7 @@ function obtain_prediction(rc::AbstractReservoirComputer, x, output_layer, args...; - initial_conditions = output_layer.last_value) + initial_conditions=output_layer.last_value) #x = last_state prediction_len = prediction.prediction_len train_method = output_layer.training_method diff --git a/src/reca/reca.jl b/src/reca/reca.jl old mode 100755 new mode 100644 index 8af5b2d3..385af83d --- a/src/reca/reca.jl +++ b/src/reca/reca.jl @@ -26,10 +26,10 @@ automata._” arXiv preprint arXiv:1703.02806 (2017). """ function RECA(train_data, automata; - generations = 8, - input_encoding = RandomMapping(), - nla_type = NLADefault(), - states_type = StandardStates()) + generations=8, + input_encoding=RandomMapping(), + nla_type=NLADefault(), + states_type=StandardStates()) in_size = size(train_data, 1) #res_size = obtain_res_size(input_encoding, generations) state_encoding = create_encoding(input_encoding, train_data, generations) @@ -39,7 +39,7 @@ function RECA(train_data, end #training dispatch -function train(reca::AbstractReca, target_data, training_method = StandardRidge; kwargs...) +function train(reca::AbstractReca, target_data, training_method=StandardRidge; kwargs...) states_new = reca.states_type(reca.nla_type, reca.states, reca.train_data) return train(training_method, Float32.(states_new), Float32.(target_data); kwargs...) end @@ -47,10 +47,10 @@ end #predict dispatch function (reca::RECA)(prediction, output_layer::AbstractOutputLayer, - initial_conditions = output_layer.last_value, - last_state = zeros(reca.input_encoding.ca_size)) + initial_conditions=output_layer.last_value, + last_state=zeros(reca.input_encoding.ca_size)) return obtain_prediction(reca, prediction, last_state, output_layer; - initial_conditions = initial_conditions) + initial_conditions=initial_conditions) end function next_state_prediction!(reca::RECA, x, out, i, args...) diff --git a/src/reca/reca_input_encodings.jl b/src/reca/reca_input_encodings.jl old mode 100755 new mode 100644 index a26fb6f1..ba9897cb --- a/src/reca/reca_input_encodings.jl +++ b/src/reca/reca_input_encodings.jl @@ -19,11 +19,11 @@ The detail of this implementation can be found in [1]. [1] Nichele, Stefano, and Andreas Molund. “Deep reservoir computing using cellular automata.” arXiv preprint arXiv:1703.02806 (2017). """ -function RandomMapping(; permutations = 8, expansion_size = 40) +function RandomMapping(; permutations=8, expansion_size=40) RandomMapping(permutations, expansion_size) end -function RandomMapping(permutations; expansion_size = 40) +function RandomMapping(permutations; expansion_size=40) RandomMapping(permutations, expansion_size) end @@ -102,5 +102,5 @@ function init_maps(input_size, permutations, mapped_vector_size) end function mapping(input_size, mapped_vector_size) - return sample(1:mapped_vector_size, input_size, replace = false) + return sample(1:mapped_vector_size, input_size; replace=false) end diff --git a/src/states.jl b/src/states.jl old mode 100755 new mode 100644 index 1de7952a..d240ff34 --- a/src/states.jl +++ b/src/states.jl @@ -53,7 +53,7 @@ enhancing the dimensionality and potentially improving the performance of the re This function is particularly useful in scenarios where adding a constant baseline to the states is necessary for the desired computational task. """ -function PaddedStates(; padding = 1.0) +function PaddedStates(; padding=1.0) return PaddedStates(padding) end @@ -68,7 +68,7 @@ This function is particularly useful for enhancing the reservoir's state represe where both extended contextual information and consistent baseline padding are crucial for the computational effectiveness of the reservoir computing model. """ -function PaddedExtendedStates(; padding = 1.0) +function PaddedExtendedStates(; padding=1.0) return PaddedExtendedStates(padding) end diff --git a/src/train/linear_regression.jl b/src/train/linear_regression.jl old mode 100755 new mode 100644 diff --git a/test/esn/deepesn.jl b/test/esn/deepesn.jl old mode 100755 new mode 100644 index 38e06814..67b3e9e5 --- a/test/esn/deepesn.jl +++ b/test/esn/deepesn.jl @@ -12,7 +12,7 @@ const reg = 10e-6 #test_types = [Float64, Float32, Float16] Random.seed!(77) -res = rand_sparse(; radius = 1.2, sparsity = 0.1) +res = rand_sparse(; radius=1.2, sparsity=0.1) esn = DeepESN(input_data, 1, res_size) output_layer = train(esn, target_data) diff --git a/test/esn/test_drivers.jl b/test/esn/test_drivers.jl old mode 100755 new mode 100644 index 7ad47f74..d0225992 --- a/test/esn/test_drivers.jl +++ b/test/esn/test_drivers.jl @@ -15,25 +15,25 @@ function test_esn(input_data, target_data, training_method, esn_config) esn = ESN(input_data, 1, res_size; esn_config...) output_layer = train(esn, target_data, training_method) - output = esn(Predictive(target_data), output_layer, initial_conditions = target_data[1]) + output = esn(Predictive(target_data), output_layer; initial_conditions=target_data[1]) @test mean(abs.(target_data .- output)) ./ mean(abs.(target_data)) < 0.15 end esn_configs = [ - Dict(:reservoir => rand_sparse(; radius = 1.2), - :reservoir_driver => GRU(variant = FullyGated(), - reservoir = [ - rand_sparse(; radius = 1.0, sparsity = 0.5), - rand_sparse(; radius = 1.2, sparsity = 0.1) + Dict(:reservoir => rand_sparse(; radius=1.2), + :reservoir_driver => GRU(; variant=FullyGated(), + reservoir=[ + rand_sparse(; radius=1.0, sparsity=0.5), + rand_sparse(; radius=1.2, sparsity=0.1) ])), - Dict(:reservoir => rand_sparse(; radius = 1.2), - :reservoir_driver => GRU(variant = Minimal(), - reservoir = rand_sparse(; radius = 1.0, sparsity = 0.5), - inner_layer = scaled_rand, - bias = scaled_rand)), - Dict(:reservoir => rand_sparse(; radius = 1.2), - :reservoir_driver => MRNN(activation_function = (tanh, sigmoid), - scaling_factor = (0.8, 0.1))) + Dict(:reservoir => rand_sparse(; radius=1.2), + :reservoir_driver => GRU(; variant=Minimal(), + reservoir=rand_sparse(; radius=1.0, sparsity=0.5), + inner_layer=scaled_rand, + bias=scaled_rand)), + Dict(:reservoir => rand_sparse(; radius=1.2), + :reservoir_driver => MRNN(; activation_function=(tanh, sigmoid), + scaling_factor=(0.8, 0.1))) ] @testset "Test Drivers: $config" for config in esn_configs diff --git a/test/esn/test_hybrid.jl b/test/esn/test_hybrid.jl old mode 100755 new mode 100644 index 415b1343..8d14f27a --- a/test/esn/test_hybrid.jl +++ b/test/esn/test_hybrid.jl @@ -3,7 +3,7 @@ using ReservoirComputing, DifferentialEquations, Statistics, Random u0 = [1.0, 0.0, 0.0] tspan = (0.0, 1000.0) datasize = 100000 -tsteps = range(tspan[1], tspan[2], length = datasize) +tsteps = range(tspan[1], tspan[2]; length=datasize) function lorenz(du, u, p, t) p = [10.0, 28.0, 8 / 3] @@ -12,16 +12,16 @@ function lorenz(du, u, p, t) du[3] = u[1] * u[2] - p[3] * u[3] end -function prior_model_data_generator(u0, tspan, tsteps, model = lorenz) +function prior_model_data_generator(u0, tspan, tsteps, model=lorenz) prob = ODEProblem(lorenz, u0, tspan) - sol = Array(solve(prob, saveat = tsteps)) + sol = Array(solve(prob; saveat=tsteps)) return sol end train_len = 10000 ode_prob = ODEProblem(lorenz, u0, tspan) -ode_sol = solve(ode_prob, saveat = tsteps) +ode_sol = solve(ode_prob; saveat=tsteps) ode_data = Array(ode_sol) input_data = ode_data[:, 1:train_len] target_data = ode_data[:, 2:(train_len + 1)] @@ -37,7 +37,7 @@ hesn = HybridESN(km, input_data, 3, 300; - reservoir = rand_sparse) + reservoir=rand_sparse) output_layer = train(hesn, target_data, StandardRidge(0.3)) diff --git a/test/esn/test_inits.jl b/test/esn/test_inits.jl old mode 100755 new mode 100644 index 96ecf4b5..779fe5e3 --- a/test/esn/test_inits.jl +++ b/test/esn/test_inits.jl @@ -10,10 +10,10 @@ const weight = 0.2 const jump_size = 3 const rng = Random.default_rng() -function check_radius(matrix, target_radius; tolerance = 1e-5) +function check_radius(matrix, target_radius; tolerance=1e-5) eigenvalues = eigvals(matrix) spectral_radius = maximum(abs.(eigenvalues)) - return isapprox(spectral_radius, target_radius, atol = tolerance) + return isapprox(spectral_radius, target_radius; atol=tolerance) end ft = [Float16, Float32, Float64] @@ -29,7 +29,7 @@ input_inits = [ scaled_rand, weighted_init, minimal_init, - minimal_init(; sampling_type = :irrational) + minimal_init(; sampling_type=:irrational) ] @testset "Reservoir Initializers" begin @@ -81,7 +81,7 @@ end @testset "Minimum complexity: $init" for init in [ minimal_init, - minimal_init(; sampling_type = :irrational) + minimal_init(; sampling_type=:irrational) ] dl = init(res_size, in_size) @test sort(unique(dl)) == Float32.([-0.1, 0.1]) diff --git a/test/esn/test_train.jl b/test/esn/test_train.jl old mode 100755 new mode 100644 index 8b418280..737fe44f --- a/test/esn/test_train.jl +++ b/test/esn/test_train.jl @@ -12,15 +12,15 @@ const reg = 10e-6 #test_types = [Float64, Float32, Float16] Random.seed!(77) -res = rand_sparse(; radius = 1.2, sparsity = 0.1) +res = rand_sparse(; radius=1.2, sparsity=0.1) esn = ESN(input_data, 1, res_size; - reservoir = res) + reservoir=res) # different models that implement a train dispatch # TODO add classification -linear_training = [StandardRidge(0.0), LinearRegression(; fit_intercept = false), - RidgeRegression(; fit_intercept = false), LassoRegression(; fit_intercept = false), - ElasticNetRegression(; fit_intercept = false), HuberRegression(; fit_intercept = false), - QuantileRegression(; fit_intercept = false), LADRegression(; fit_intercept = false)] +linear_training = [StandardRidge(0.0), LinearRegression(; fit_intercept=false), + RidgeRegression(; fit_intercept=false), LassoRegression(; fit_intercept=false), + ElasticNetRegression(; fit_intercept=false), HuberRegression(; fit_intercept=false), + QuantileRegression(; fit_intercept=false), LADRegression(; fit_intercept=false)] svm_training = [EpsilonSVR(), NuSVR()] # TODO check types diff --git a/test/qa.jl b/test/qa.jl old mode 100755 new mode 100644 index b1105f55..e009e2f1 --- a/test/qa.jl +++ b/test/qa.jl @@ -1,7 +1,7 @@ using ReservoirComputing, Aqua @testset "Aqua" begin Aqua.find_persistent_tasks_deps(ReservoirComputing) - Aqua.test_ambiguities(ReservoirComputing, recursive = false) + Aqua.test_ambiguities(ReservoirComputing; recursive=false) Aqua.test_deps_compat(ReservoirComputing) Aqua.test_piracies(ReservoirComputing) Aqua.test_project_extras(ReservoirComputing) diff --git a/test/reca/test_predictive.jl b/test/reca/test_predictive.jl old mode 100755 new mode 100644 index 1fb682c9..a45f04c3 --- a/test/reca/test_predictive.jl +++ b/test/reca/test_predictive.jl @@ -6,8 +6,8 @@ const g = 6 const rule = 90 reca = RECA(input, DCA(rule); - generations = g, - input_encoding = RandomMapping(6, 10)) + generations=g, + input_encoding=RandomMapping(6, 10)) output_layer = train(reca, output, StandardRidge(0.001)) prediction = reca(Predictive(input), output_layer) @@ -15,6 +15,6 @@ final_pred = convert(AbstractArray{Int}, prediction .> 0.5) @test final_pred == output rm1 = RandomMapping(6, 10) -rm2 = RandomMapping(6, expansion_size = 10) -rm3 = RandomMapping(permutations = 6, expansion_size = 10) +rm2 = RandomMapping(6; expansion_size=10) +rm3 = RandomMapping(; permutations=6, expansion_size=10) @test rm1 == rm2 == rm3 diff --git a/test/runtests.jl b/test/runtests.jl old mode 100755 new mode 100644 diff --git a/test/test_states.jl b/test/test_states.jl old mode 100755 new mode 100644 index cd776715..2eaab5fb --- a/test/test_states.jl +++ b/test/test_states.jl @@ -11,9 +11,9 @@ nlas = [(NLADefault(), test_array), (NLAT3(), [1, 2, 8, 4, 24, 6, 48, 8, 9])] pes = [(StandardStates(), test_array), - (PaddedStates(padding = padding), + (PaddedStates(; padding=padding), reshape(vcat(padding, test_array), length(test_array) + 1, 1)), - (PaddedExtendedStates(padding = padding), + (PaddedExtendedStates(; padding=padding), reshape(vcat(padding, extension, test_array), length(test_array) + length(extension) + 1, 1)),