From 95683a1a099cc40799f560e1e2b6b5bad639ccc0 Mon Sep 17 00:00:00 2001 From: MartinuzziFrancesco Date: Mon, 31 Mar 2025 14:16:10 +0200 Subject: [PATCH] style: comply with sciml style --- .JuliaFormatter.toml | 8 +- docs/make.jl | 22 ++-- src/esn/deepesn.jl | 14 +-- src/esn/esn.jl | 20 ++-- src/esn/esn_inits.jl | 184 +++++++++++++++---------------- src/esn/esn_predict.jl | 8 +- src/esn/esn_reservoir_drivers.jl | 18 +-- src/esn/hybridesn.jl | 19 ++-- src/esn/inits_components.jl | 24 ++-- src/predict.jl | 2 +- src/reca/reca.jl | 16 +-- src/reca/reca_input_encodings.jl | 4 +- src/states.jl | 4 +- test/esn/deepesn.jl | 4 +- test/esn/test_drivers.jl | 28 ++--- test/esn/test_hybrid.jl | 10 +- test/esn/test_inits.jl | 10 +- test/esn/test_train.jl | 12 +- test/qa.jl | 2 +- test/reca/test_predictive.jl | 8 +- test/test_states.jl | 4 +- 21 files changed, 208 insertions(+), 213 deletions(-) diff --git a/.JuliaFormatter.toml b/.JuliaFormatter.toml index 31e2a89b..d4bdeeed 100644 --- a/.JuliaFormatter.toml +++ b/.JuliaFormatter.toml @@ -1,9 +1,3 @@ style = "sciml" format_markdown = false -whitespace_in_kwargs = false -margin = 92 -indent = 4 -format_docstrings = true -separate_kwargs_with_semicolon = true -always_for_in = true -annotate_untyped_fields_with_any = false \ No newline at end of file +format_docstrings = true \ No newline at end of file diff --git a/docs/make.jl b/docs/make.jl index 3d928d49..7bbd16ca 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -1,22 +1,22 @@ using Documenter, ReservoirComputing -cp("./docs/Manifest.toml", "./docs/src/assets/Manifest.toml"; force=true) -cp("./docs/Project.toml", "./docs/src/assets/Project.toml"; force=true) +cp("./docs/Manifest.toml", "./docs/src/assets/Manifest.toml"; force = true) +cp("./docs/Project.toml", "./docs/src/assets/Project.toml"; force = true) ENV["PLOTS_TEST"] = "true" ENV["GKSwstype"] = "100" include("pages.jl") mathengine = Documenter.MathJax() -makedocs(; modules=[ReservoirComputing], - sitename="ReservoirComputing.jl", - clean=true, doctest=false, linkcheck=true, - format=Documenter.HTML(; +makedocs(; modules = [ReservoirComputing], + sitename = "ReservoirComputing.jl", + clean = true, doctest = false, linkcheck = true, + format = Documenter.HTML(; mathengine, - assets=["assets/favicon.ico"], - canonical="https://docs.sciml.ai/ReservoirComputing/stable/"), - pages=pages + assets = ["assets/favicon.ico"], + canonical = "https://docs.sciml.ai/ReservoirComputing/stable/"), + pages = pages ) -deploydocs(; repo="github.com/SciML/ReservoirComputing.jl.git", - push_preview=true) +deploydocs(; repo = "github.com/SciML/ReservoirComputing.jl.git", + push_preview = true) diff --git a/src/esn/deepesn.jl b/src/esn/deepesn.jl index ed599226..7d2adf95 100644 --- a/src/esn/deepesn.jl +++ b/src/esn/deepesn.jl @@ -59,15 +59,15 @@ enhanced by the depth provided by multiple reservoir layers. train_data = rand(Float32, 3, 100) # Create a DeepESN with specific parameters -deepESN = DeepESN(train_data, 3, 100; depth=3, washout=100) +deepESN = DeepESN(train_data, 3, 100; depth = 3, washout = 100) ``` """ -function DeepESN(train_data::AbstractArray, in_size::Int, res_size::Int; depth::Int=2, - input_layer=fill(scaled_rand, depth), bias=fill(zeros32, depth), - reservoir=fill(rand_sparse, depth), reservoir_driver::AbstractDriver=RNN(), - nla_type::NonLinearAlgorithm=NLADefault(), - states_type::AbstractStates=StandardStates(), washout::Int=0, - rng::AbstractRNG=Utils.default_rng(), matrix_type=typeof(train_data)) +function DeepESN(train_data::AbstractArray, in_size::Int, res_size::Int; depth::Int = 2, + input_layer = fill(scaled_rand, depth), bias = fill(zeros32, depth), + reservoir = fill(rand_sparse, depth), reservoir_driver::AbstractDriver = RNN(), + nla_type::NonLinearAlgorithm = NLADefault(), + states_type::AbstractStates = StandardStates(), washout::Int = 0, + rng::AbstractRNG = Utils.default_rng(), matrix_type = typeof(train_data)) if states_type isa AbstractPaddedStates in_size = size(train_data, 1) + 1 train_data = vcat(adapt(matrix_type, ones(1, size(train_data, 2))), diff --git a/src/esn/esn.jl b/src/esn/esn.jl index 8d1854b0..e443510d 100644 --- a/src/esn/esn.jl +++ b/src/esn/esn.jl @@ -50,17 +50,17 @@ julia> train_data = rand(Float32, 10, 100) # 10 features, 100 time steps 0.4463 0.334423 0.444679 0.311695 0.0494497 0.27171 0.214925 0.987182 0.898593 0.295241 0.233098 0.789699 0.453692 0.759205 -julia> esn = ESN(train_data, 10, 300; washout=10) +julia> esn = ESN(train_data, 10, 300; washout = 10) ESN(10 => 300) ``` """ function ESN(train_data::AbstractArray, in_size::Int, res_size::Int; - input_layer=scaled_rand, reservoir=rand_sparse, bias=zeros32, - reservoir_driver::AbstractDriver=RNN(), - nla_type::NonLinearAlgorithm=NLADefault(), - states_type::AbstractStates=StandardStates(), - washout::Int=0, rng::AbstractRNG=Utils.default_rng(), - matrix_type=typeof(train_data)) + input_layer = scaled_rand, reservoir = rand_sparse, bias = zeros32, + reservoir_driver::AbstractDriver = RNN(), + nla_type::NonLinearAlgorithm = NLADefault(), + states_type::AbstractStates = StandardStates(), + washout::Int = 0, rng::AbstractRNG = Utils.default_rng(), + matrix_type = typeof(train_data)) if states_type isa AbstractPaddedStates in_size = size(train_data, 1) + 1 train_data = vcat(adapt(matrix_type, ones(1, size(train_data, 2))), @@ -82,7 +82,7 @@ function ESN(train_data::AbstractArray, in_size::Int, res_size::Int; end function (esn::AbstractEchoStateNetwork)(prediction::AbstractPrediction, - output_layer::AbstractOutputLayer; last_state=esn.states[:, [end]], + output_layer::AbstractOutputLayer; last_state = esn.states[:, [end]], kwargs...) return obtain_esn_prediction(esn, prediction, last_state, output_layer; kwargs...) @@ -120,7 +120,7 @@ julia> train_data = rand(Float32, 10, 100) # 10 features, 100 time steps 0.133498 0.451058 0.0761995 0.90421 0.994212 0.332164 0.545112 0.214467 0.791524 0.124105 0.951805 0.947166 0.954244 0.889733 -julia> esn = ESN(train_data, 10, 300; washout=10) +julia> esn = ESN(train_data, 10, 300; washout = 10) ESN(10 => 300) julia> output_layer = train(esn, rand(Float32, 3, 90)) @@ -128,7 +128,7 @@ OutputLayer successfully trained with output size: 3 ``` """ function train(esn::AbstractEchoStateNetwork, target_data::AbstractArray, - training_method=StandardRidge(); kwargs...) + training_method = StandardRidge(); kwargs...) states_new = esn.states_type(esn.nla_type, esn.states, esn.train_data[:, 1:end]) return train(training_method, states_new, target_data; kwargs...) end diff --git a/src/esn/esn_inits.jl b/src/esn/esn_inits.jl index fdcd96de..de1f2713 100644 --- a/src/esn/esn_inits.jl +++ b/src/esn/esn_inits.jl @@ -36,7 +36,7 @@ julia> res_input = scaled_rand(8, 3) ``` """ function scaled_rand(rng::AbstractRNG, ::Type{T}, dims::Integer...; - scaling::Number=T(0.1)) where {T <: Number} + scaling::Number = T(0.1)) where {T <: Number} res_size, in_size = dims layer_matrix = (DeviceAgnostic.rand(rng, T, res_size, in_size) .- T(0.5)) .* (T(2) * T(scaling)) @@ -85,7 +85,7 @@ julia> res_input = weighted_init(8, 3) Chaos: An Interdisciplinary Journal of Nonlinear Science 27.4 (2017): 041102. """ function weighted_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; - scaling::Number=T(0.1), return_sparse::Bool=false) where {T <: Number} + scaling::Number = T(0.1), return_sparse::Bool = false) where {T <: Number} throw_sparse_error(return_sparse) approx_res_size, in_size = dims res_size = Int(floor(approx_res_size / in_size) * in_size) @@ -164,7 +164,7 @@ julia> res_input = weighted_minimal(8, 3) 0.0 0.0 0.1 0.0 0.0 0.1 -julia> res_input = weighted_minimal(9, 3; weight=0.99) +julia> res_input = weighted_minimal(9, 3; weight = 0.99) 9×3 Matrix{Float32}: 0.99 0.0 0.0 0.99 0.0 0.0 @@ -176,7 +176,7 @@ julia> res_input = weighted_minimal(9, 3; weight=0.99) 0.0 0.0 0.99 0.0 0.0 0.99 -julia> res_input = weighted_minimal(9, 3; sampling_type=:bernoulli_sample!) +julia> res_input = weighted_minimal(9, 3; sampling_type = :bernoulli_sample!) 9×3 Matrix{Float32}: 0.1 -0.0 -0.0 -0.1 -0.0 -0.0 @@ -195,8 +195,8 @@ julia> res_input = weighted_minimal(9, 3; sampling_type=:bernoulli_sample!) Chaos: An Interdisciplinary Journal of Nonlinear Science 27.4 (2017): 041102. """ function weighted_minimal(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight::Number=T(0.1), return_sparse::Bool=false, - sampling_type=:no_sample, kwargs...) where {T <: Number} + weight::Number = T(0.1), return_sparse::Bool = false, + sampling_type = :no_sample, kwargs...) where {T <: Number} throw_sparse_error(return_sparse) approx_res_size, in_size = dims res_size = Int(floor(approx_res_size / in_size) * in_size) @@ -240,8 +240,8 @@ Create an input layer for informed echo state networks [^pathak2018]. Chaos: An Interdisciplinary Journal of Nonlinear Science 28.4 (2018). """ function informed_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; - scaling::Number=T(0.1), model_in_size::Integer, - gamma::Number=T(0.5)) where {T <: Number} + scaling::Number = T(0.1), model_in_size::Integer, + gamma::Number = T(0.5)) where {T <: Number} res_size, in_size = dims state_size = in_size - model_in_size @@ -258,7 +258,7 @@ function informed_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; idxs = findall(Bool[zero_connections .== input_matrix[jdx, :] for jdx in axes(input_matrix, 1)]) random_row_idx = idxs[DeviceAgnostic.rand(rng, T, 1:end)] - random_clm_idx = range(1, state_size; step=1)[DeviceAgnostic.rand(rng, T, 1:end)] + random_clm_idx = range(1, state_size; step = 1)[DeviceAgnostic.rand(rng, T, 1:end)] input_matrix[random_row_idx, random_clm_idx] = (DeviceAgnostic.rand(rng, T) - T(0.5)) .* (T(2) * T(scaling)) end @@ -267,7 +267,7 @@ function informed_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; idxs = findall(Bool[zero_connections .== input_matrix[jdx, :] for jdx in axes(input_matrix, 1)]) random_row_idx = idxs[DeviceAgnostic.rand(rng, T, 1:end)] - random_clm_idx = range(state_size + 1, in_size; step=1)[DeviceAgnostic.rand( + random_clm_idx = range(state_size + 1, in_size; step = 1)[DeviceAgnostic.rand( rng, T, 1:end)] input_matrix[random_row_idx, random_clm_idx] = (DeviceAgnostic.rand(rng, T) - T(0.5)) .* (T(2) * T(scaling)) @@ -325,7 +325,7 @@ julia> res_input = minimal_init(8, 3) -0.1 -0.1 0.1 0.1 -0.1 0.1 -julia> res_input = minimal_init(8, 3; sampling_type=:irrational) +julia> res_input = minimal_init(8, 3; sampling_type = :irrational) 8×3 Matrix{Float32}: -0.1 0.1 -0.1 0.1 -0.1 -0.1 @@ -336,7 +336,7 @@ julia> res_input = minimal_init(8, 3; sampling_type=:irrational) 0.1 0.1 -0.1 -0.1 0.1 -0.1 -julia> res_input = minimal_init(8, 3; p=0.1) # lower p -> more negative signs +julia> res_input = minimal_init(8, 3; p = 0.1) # lower p -> more negative signs 8×3 Matrix{Float32}: -0.1 -0.1 -0.1 -0.1 -0.1 -0.1 @@ -347,7 +347,7 @@ julia> res_input = minimal_init(8, 3; p=0.1) # lower p -> more negative signs -0.1 -0.1 -0.1 -0.1 -0.1 -0.1 -julia> res_input = minimal_init(8, 3; p=0.8)# higher p -> more positive signs +julia> res_input = minimal_init(8, 3; p = 0.8)# higher p -> more positive signs 8×3 Matrix{Float32}: 0.1 0.1 0.1 -0.1 0.1 0.1 @@ -364,7 +364,7 @@ julia> res_input = minimal_init(8, 3; p=0.8)# higher p -> more positive signs IEEE transactions on neural networks 22.1 (2010): 131-144. """ function minimal_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight::Number=T(0.1), sampling_type::Symbol=:bernoulli_sample!, + weight::Number = T(0.1), sampling_type::Symbol = :bernoulli_sample!, kwargs...) where {T <: Number} res_size, in_size = dims input_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size) @@ -438,9 +438,9 @@ julia> input_matrix = chebyshev_mapping(10, 3) Neural Processing Letters 56.1 (2024): 30. """ function chebyshev_mapping(rng::AbstractRNG, ::Type{T}, dims::Integer...; - amplitude::AbstractFloat=one(T), sine_divisor::AbstractFloat=one(T), - chebyshev_parameter::AbstractFloat=one(T), - return_sparse::Bool=false) where {T <: Number} + amplitude::AbstractFloat = one(T), sine_divisor::AbstractFloat = one(T), + chebyshev_parameter::AbstractFloat = one(T), + return_sparse::Bool = false) where {T <: Number} throw_sparse_error(return_sparse) input_matrix = DeviceAgnostic.zeros(rng, T, dims...) n_rows, n_cols = dims[1], dims[2] @@ -518,9 +518,9 @@ julia> logistic_mapping(8, 3) Neurocomputing 489 (2022): 196-210. """ function logistic_mapping(rng::AbstractRNG, ::Type{T}, dims::Integer...; - amplitude::AbstractFloat=0.3, sine_divisor::AbstractFloat=5.9, - logistic_parameter::AbstractFloat=3.7, - return_sparse::Bool=false) where {T <: Number} + amplitude::AbstractFloat = 0.3, sine_divisor::AbstractFloat = 5.9, + logistic_parameter::AbstractFloat = 3.7, + return_sparse::Bool = false) where {T <: Number} throw_sparse_error(return_sparse) input_matrix = DeviceAgnostic.zeros(rng, T, dims...) num_rows, num_columns = dims[1], dims[2] @@ -620,9 +620,9 @@ julia> modified_lm(12, 4; factor=3) arXiv preprint arXiv:2501.15615 (2025). """ function modified_lm(rng::AbstractRNG, ::Type{T}, dims::Integer...; - factor::Integer, amplitude::AbstractFloat=0.3, - sine_divisor::AbstractFloat=5.9, logistic_parameter::AbstractFloat=2.35, - return_sparse::Bool=false) where {T <: Number} + factor::Integer, amplitude::AbstractFloat = 0.3, + sine_divisor::AbstractFloat = 5.9, logistic_parameter::AbstractFloat = 2.35, + return_sparse::Bool = false) where {T <: Number} throw_sparse_error(return_sparse) num_columns = dims[2] expected_num_rows = factor * num_columns @@ -678,7 +678,7 @@ and scaled spectral radius according to `radius`. # Examples ```jldoctest -julia> res_matrix = rand_sparse(5, 5; sparsity=0.5) +julia> res_matrix = rand_sparse(5, 5; sparsity = 0.5) 5×5 Matrix{Float32}: 0.0 0.0 0.0 0.0 0.0 0.0 0.794565 0.0 0.26164 0.0 @@ -688,12 +688,12 @@ julia> res_matrix = rand_sparse(5, 5; sparsity=0.5) ``` """ function rand_sparse(rng::AbstractRNG, ::Type{T}, dims::Integer...; - radius::Number=T(1.0), sparsity::Number=T(0.1), std::Number=T(1.0), - return_sparse::Bool=false) where {T <: Number} + radius::Number = T(1.0), sparsity::Number = T(0.1), std::Number = T(1.0), + return_sparse::Bool = false) where {T <: Number} throw_sparse_error(return_sparse) check_res_size(dims...) lcl_sparsity = T(1) - sparsity #consistency with current implementations - reservoir_matrix = sparse_init(rng, T, dims...; sparsity=lcl_sparsity, std=std) + reservoir_matrix = sparse_init(rng, T, dims...; sparsity = lcl_sparsity, std = std) reservoir_matrix = scale_radius!(reservoir_matrix, T(radius)) return return_init_as(Val(return_sparse), reservoir_matrix) end @@ -747,15 +747,15 @@ julia> res_matrix = pseudo_svd(5, 5) Neurocomputing 290 (2018): 148-160. """ function pseudo_svd(rng::AbstractRNG, ::Type{T}, dims::Integer...; - max_value::Number=T(1.0), sparsity::Number=0.1, sorted::Bool=true, - reverse_sort::Bool=false, return_sparse::Bool=false, - return_diag::Bool=false) where {T <: Number} + max_value::Number = T(1.0), sparsity::Number = 0.1, sorted::Bool = true, + reverse_sort::Bool = false, return_sparse::Bool = false, + return_diag::Bool = false) where {T <: Number} throw_sparse_error(return_sparse) check_res_size(dims...) reservoir_matrix = create_diag(rng, T, dims[1], T(max_value); - sorted=sorted, - reverse_sort=reverse_sort) + sorted = sorted, + reverse_sort = reverse_sort) tmp_sparsity = get_sparsity(reservoir_matrix, dims[1]) while tmp_sparsity <= sparsity @@ -779,12 +779,12 @@ function rand_range(rng, T, n::Int) end function create_diag(rng::AbstractRNG, ::Type{T}, dim::Number, max_value::Number; - sorted::Bool=true, reverse_sort::Bool=false) where {T <: Number} + sorted::Bool = true, reverse_sort::Bool = false) where {T <: Number} diagonal_matrix = DeviceAgnostic.zeros(rng, T, dim, dim) if sorted == true if reverse_sort == true diagonal_values = sort( - DeviceAgnostic.rand(rng, T, dim) .* max_value; rev=true) + DeviceAgnostic.rand(rng, T, dim) .* max_value; rev = true) diagonal_values[1] = max_value else diagonal_values = sort(DeviceAgnostic.rand(rng, T, dim) .* max_value) @@ -873,8 +873,8 @@ julia> res_matrix = chaotic_init(8, 8) Neural Processing Letters 56.1 (2024): 30. """ function chaotic_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; - extra_edge_probability::AbstractFloat=T(0.1), spectral_radius::AbstractFloat=one(T), - return_sparse::Bool=false) where {T <: Number} + extra_edge_probability::AbstractFloat = T(0.1), spectral_radius::AbstractFloat = one(T), + return_sparse::Bool = false) where {T <: Number} throw_sparse_error(return_sparse) check_res_size(dims...) requested_order = first(dims) @@ -902,7 +902,7 @@ function chaotic_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; random_weight_matrix = T(2) * rand(rng, T, actual_matrix_order, actual_matrix_order) .- T(1) adjacency_matrix = digital_chaotic_adjacency( - rng, chosen_bit_precision; extra_edge_probability=extra_edge_probability) + rng, chosen_bit_precision; extra_edge_probability = extra_edge_probability) reservoir_matrix = random_weight_matrix .* adjacency_matrix current_spectral_radius = maximum(abs, eigvals(reservoir_matrix)) if current_spectral_radius != 0 @@ -913,7 +913,7 @@ function chaotic_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; end function digital_chaotic_adjacency(rng::AbstractRNG, bit_precision::Integer; - extra_edge_probability::AbstractFloat=0.1) + extra_edge_probability::AbstractFloat = 0.1) matrix_order = 2^(2 * bit_precision) adjacency_matrix = DeviceAgnostic.zeros(rng, Int, matrix_order, matrix_order) for row_index in 1:(matrix_order - 1) @@ -967,8 +967,8 @@ otherwise, it generates a random connectivity pattern. Chaos: An Interdisciplinary Journal of Nonlinear Science 29.12 (2019). """ function low_connectivity(rng::AbstractRNG, ::Type{T}, dims::Integer...; - return_sparse::Bool=false, connected::Bool=false, - in_degree::Integer=1, kwargs...) where {T <: Number} + return_sparse::Bool = false, connected::Bool = false, + in_degree::Integer = 1, kwargs...) where {T <: Number} check_res_size(dims...) res_size = dims[1] if in_degree > res_size @@ -978,17 +978,17 @@ function low_connectivity(rng::AbstractRNG, ::Type{T}, dims::Integer...; end if in_degree == 1 reservoir_matrix = build_cycle( - Val(connected), rng, T, res_size; in_degree=in_degree, kwargs...) + Val(connected), rng, T, res_size; in_degree = in_degree, kwargs...) else reservoir_matrix = build_cycle( - Val(false), rng, T, res_size; in_degree=in_degree, kwargs...) + Val(false), rng, T, res_size; in_degree = in_degree, kwargs...) end return return_init_as(Val(return_sparse), reservoir_matrix) end function build_cycle(::Val{false}, rng::AbstractRNG, ::Type{T}, res_size::Int; - in_degree::Integer=1, radius::Number=T(1.0), cut_cycle::Bool=false) where {T <: - Number} + in_degree::Integer = 1, radius::Number = T(1.0), cut_cycle::Bool = false) where {T <: + Number} reservoir_matrix = DeviceAgnostic.zeros(rng, T, res_size, res_size) for idx in 1:res_size selected = randperm(rng, res_size)[1:in_degree] @@ -1001,8 +1001,8 @@ function build_cycle(::Val{false}, rng::AbstractRNG, ::Type{T}, res_size::Int; end function build_cycle(::Val{true}, rng::AbstractRNG, ::Type{T}, res_size::Int; - in_degree::Integer=1, radius::Number=T(1.0), cut_cycle::Bool=false) where {T <: - Number} + in_degree::Integer = 1, radius::Number = T(1.0), cut_cycle::Bool = false) where {T <: + Number} reservoir_matrix = DeviceAgnostic.zeros(rng, T, res_size, res_size) perm = randperm(rng, res_size) for idx in 1:(res_size - 1) @@ -1081,7 +1081,7 @@ julia> res_matrix = delay_line(5, 5) 0.0 0.0 0.1 0.0 0.0 0.0 0.0 0.0 0.1 0.0 -julia> res_matrix = delay_line(5, 5; weight=1) +julia> res_matrix = delay_line(5, 5; weight = 1) 5×5 Matrix{Float32}: 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 @@ -1095,8 +1095,8 @@ julia> res_matrix = delay_line(5, 5; weight=1) IEEE transactions on neural networks 22.1 (2010): 131-144. """ function delay_line(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight::Union{Number, AbstractVector}=T(0.1), shift::Integer=1, - return_sparse::Bool=false, kwargs...) where {T <: Number} + weight::Union{Number, AbstractVector} = T(0.1), shift::Integer = 1, + return_sparse::Bool = false, kwargs...) where {T <: Number} throw_sparse_error(return_sparse) check_res_size(dims...) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) @@ -1183,11 +1183,11 @@ julia> res_matrix = delay_line_backward(Float16, 5, 5) IEEE transactions on neural networks 22.1 (2010): 131-144. """ function delay_line_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight::Union{Number, AbstractVector}=T(0.1), - fb_weight::Union{Number, AbstractVector}=T(0.2), shift::Integer=1, - fb_shift::Integer=1, return_sparse::Bool=false, - delay_kwargs::NamedTuple=NamedTuple(), - fb_kwargs::NamedTuple=NamedTuple()) where {T <: Number} + weight::Union{Number, AbstractVector} = T(0.1), + fb_weight::Union{Number, AbstractVector} = T(0.2), shift::Integer = 1, + fb_shift::Integer = 1, return_sparse::Bool = false, + delay_kwargs::NamedTuple = NamedTuple(), + fb_kwargs::NamedTuple = NamedTuple()) where {T <: Number} throw_sparse_error(return_sparse) check_res_size(dims...) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) @@ -1258,7 +1258,7 @@ julia> res_matrix = cycle_jumps(5, 5) 0.1 0.0 0.1 0.0 0.0 0.0 0.0 0.0 0.1 0.0 -julia> res_matrix = cycle_jumps(5, 5; jump_size=2) +julia> res_matrix = cycle_jumps(5, 5; jump_size = 2) 5×5 Matrix{Float32}: 0.0 0.0 0.1 0.0 0.1 0.1 0.0 0.0 0.0 0.0 @@ -1272,11 +1272,11 @@ julia> res_matrix = cycle_jumps(5, 5; jump_size=2) Neural computation 24.7 (2012): 1822-1852. """ function cycle_jumps(rng::AbstractRNG, ::Type{T}, dims::Integer...; - cycle_weight::Union{Number, AbstractVector}=T(0.1), - jump_weight::Union{Number, AbstractVector}=T(0.1), - jump_size::Integer=3, return_sparse::Bool=false, - cycle_kwargs::NamedTuple=NamedTuple(), - jump_kwargs::NamedTuple=NamedTuple()) where {T <: Number} + cycle_weight::Union{Number, AbstractVector} = T(0.1), + jump_weight::Union{Number, AbstractVector} = T(0.1), + jump_size::Integer = 3, return_sparse::Bool = false, + cycle_kwargs::NamedTuple = NamedTuple(), + jump_kwargs::NamedTuple = NamedTuple()) where {T <: Number} throw_sparse_error(return_sparse) check_res_size(dims...) res_size = first(dims) @@ -1336,7 +1336,7 @@ julia> res_matrix = simple_cycle(5, 5) 0.0 0.0 0.1 0.0 0.0 0.0 0.0 0.0 0.1 0.0 -julia> res_matrix = simple_cycle(5, 5; weight=11) +julia> res_matrix = simple_cycle(5, 5; weight = 11) 5×5 Matrix{Float32}: 0.0 0.0 0.0 0.0 11.0 11.0 0.0 0.0 0.0 0.0 @@ -1350,8 +1350,8 @@ julia> res_matrix = simple_cycle(5, 5; weight=11) IEEE transactions on neural networks 22.1 (2010): 131-144. """ function simple_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight::Union{Number, AbstractVector}=T(0.1), - return_sparse::Bool=false, kwargs...) where {T <: Number} + weight::Union{Number, AbstractVector} = T(0.1), + return_sparse::Bool = false, kwargs...) where {T <: Number} throw_sparse_error(return_sparse) check_res_size(dims...) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) @@ -1385,7 +1385,7 @@ Creates a double cycle reservoir [^fu2023]. # Examples ```jldoctest -julia> reservoir_matrix = double_cycle(5, 5; cycle_weight=0.1, second_cycle_weight=0.3) +julia> reservoir_matrix = double_cycle(5, 5; cycle_weight = 0.1, second_cycle_weight = 0.3) 5×5 Matrix{Float32}: 0.0 0.3 0.0 0.0 0.3 0.1 0.0 0.3 0.0 0.0 @@ -1399,9 +1399,9 @@ julia> reservoir_matrix = double_cycle(5, 5; cycle_weight=0.1, second_cycle_weig Chaos: An Interdisciplinary Journal of Nonlinear Science 33.9 (2023). """ function double_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; - cycle_weight::Union{Number, AbstractVector}=T(0.1), - second_cycle_weight::Union{Number, AbstractVector}=T(0.1), - return_sparse::Bool=false) where {T <: Number} + cycle_weight::Union{Number, AbstractVector} = T(0.1), + second_cycle_weight::Union{Number, AbstractVector} = T(0.1), + return_sparse::Bool = false) where {T <: Number} throw_sparse_error(return_sparse) check_res_size(dims...) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) @@ -1464,7 +1464,7 @@ with cycles built on the definition by [^rodan2010]. # Examples ```jldoctest -julia> true_double_cycle(5, 5; cycle_weight=0.1, second_cycle_weight=0.3) +julia> true_double_cycle(5, 5; cycle_weight = 0.1, second_cycle_weight = 0.3) 5×5 Matrix{Float32}: 0.0 0.3 0.0 0.0 0.1 0.1 0.0 0.3 0.0 0.0 @@ -1481,10 +1481,10 @@ julia> true_double_cycle(5, 5; cycle_weight=0.1, second_cycle_weight=0.3) IEEE transactions on neural networks 22.1 (2010): 131-144. """ function true_double_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; - cycle_weight::Union{Number, AbstractVector}=T(0.1), - second_cycle_weight::Union{Number, AbstractVector}=T(0.1), - return_sparse::Bool=false, cycle_kwargs::NamedTuple=NamedTuple(), - second_cycle_kwargs::NamedTuple=NamedTuple()) where {T <: Number} + cycle_weight::Union{Number, AbstractVector} = T(0.1), + second_cycle_weight::Union{Number, AbstractVector} = T(0.1), + return_sparse::Bool = false, cycle_kwargs::NamedTuple = NamedTuple(), + second_cycle_kwargs::NamedTuple = NamedTuple()) where {T <: Number} throw_sparse_error(return_sparse) check_res_size(dims...) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) @@ -1577,13 +1577,13 @@ julia> reservoir_matrix = selfloop_cycle(5, 5; weight=0.2, selfloop_weight=0.5) International Journal of Computational Science and Engineering 19.3 (2019): 407-417. """ function selfloop_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; - cycle_weight::Union{Number, AbstractVector}=T(0.1f0), - selfloop_weight::Union{Number, AbstractVector}=T(0.1f0), - return_sparse::Bool=false, kwargs...) where {T <: Number} + cycle_weight::Union{Number, AbstractVector} = T(0.1f0), + selfloop_weight::Union{Number, AbstractVector} = T(0.1f0), + return_sparse::Bool = false, kwargs...) where {T <: Number} throw_sparse_error(return_sparse) check_res_size(dims...) reservoir_matrix = simple_cycle(rng, T, dims...; - weight=T.(cycle_weight), return_sparse=false) + weight = T.(cycle_weight), return_sparse = false) self_loop!(rng, reservoir_matrix, T.(selfloop_weight); kwargs...) return return_init_as(Val(return_sparse), reservoir_matrix) end @@ -1655,13 +1655,13 @@ julia> reservoir_matrix = selfloop_feedback_cycle(5, 5; self_loop_weight=0.5) International Journal of Computational Science and Engineering 19.3 (2019): 407-417. """ function selfloop_feedback_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; - cycle_weight::Union{Number, AbstractVector}=T(0.1f0), - selfloop_weight::Union{Number, AbstractVector}=T(0.1f0), - return_sparse::Bool=false) where {T <: Number} + cycle_weight::Union{Number, AbstractVector} = T(0.1f0), + selfloop_weight::Union{Number, AbstractVector} = T(0.1f0), + return_sparse::Bool = false) where {T <: Number} throw_sparse_error(return_sparse) check_res_size(dims...) reservoir_matrix = simple_cycle(rng, T, dims...; - weight=T.(cycle_weight), return_sparse=false) + weight = T.(cycle_weight), return_sparse = false) for idx in axes(reservoir_matrix, 1) if isodd(idx) reservoir_matrix[idx, idx] = T.(selfloop_weight) @@ -1769,13 +1769,13 @@ julia> reservoir_matrix = selfloop_delayline_backward(5, 5; weight=0.3) International Journal of Computational Science and Engineering 19.3 (2019): 407-417. """ function selfloop_delayline_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...; - shift::Integer=1, fb_shift::Integer=2, - weight::Union{Number, AbstractVector}=T(0.1f0), - fb_weight::Union{Number, AbstractVector}=weight, - selfloop_weight::Union{Number, AbstractVector}=T(0.1f0), - return_sparse::Bool=false, delay_kwargs::NamedTuple=NamedTuple(), - fb_kwargs::NamedTuple=NamedTuple(), - selfloop_kwargs::NamedTuple=NamedTuple()) where {T <: Number} + shift::Integer = 1, fb_shift::Integer = 2, + weight::Union{Number, AbstractVector} = T(0.1f0), + fb_weight::Union{Number, AbstractVector} = weight, + selfloop_weight::Union{Number, AbstractVector} = T(0.1f0), + return_sparse::Bool = false, delay_kwargs::NamedTuple = NamedTuple(), + fb_kwargs::NamedTuple = NamedTuple(), + selfloop_kwargs::NamedTuple = NamedTuple()) where {T <: Number} throw_sparse_error(return_sparse) check_res_size(dims...) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) @@ -1871,10 +1871,10 @@ julia> reservoir_matrix = selfloop_forward_connection(5, 5; weight=0.5) International Journal of Computational Science and Engineering 19.3 (2019): 407-417. """ function selfloop_forward_connection(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight::Union{Number, AbstractVector}=T(0.1f0), - selfloop_weight::Union{Number, AbstractVector}=T(0.1f0), shift::Integer=2, - return_sparse::Bool=false, delay_kwargs::NamedTuple=NamedTuple(), - selfloop_kwargs::NamedTuple=NamedTuple()) where {T <: Number} + weight::Union{Number, AbstractVector} = T(0.1f0), + selfloop_weight::Union{Number, AbstractVector} = T(0.1f0), shift::Integer = 2, + return_sparse::Bool = false, delay_kwargs::NamedTuple = NamedTuple(), + selfloop_kwargs::NamedTuple = NamedTuple()) where {T <: Number} throw_sparse_error(return_sparse) check_res_size(dims...) reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) @@ -1959,7 +1959,7 @@ julia> reservoir_matrix = forward_connection(5, 5; weight=0.5) International Journal of Computational Science and Engineering 19.3 (2019): 407-417. """ function forward_connection(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight::Union{Number, AbstractVector}=T(0.1f0), return_sparse::Bool=false, + weight::Union{Number, AbstractVector} = T(0.1f0), return_sparse::Bool = false, kwargs...) where {T <: Number} throw_sparse_error(return_sparse) check_res_size(dims...) diff --git a/src/esn/esn_predict.jl b/src/esn/esn_predict.jl index dc61c3c4..47636050 100644 --- a/src/esn/esn_predict.jl +++ b/src/esn/esn_predict.jl @@ -3,8 +3,8 @@ function obtain_esn_prediction(esn, x, output_layer, args...; - initial_conditions=output_layer.last_value, - save_states=false) + initial_conditions = output_layer.last_value, + save_states = false) out_size = output_layer.out_size training_method = output_layer.training_method prediction_len = prediction.prediction_len @@ -33,8 +33,8 @@ function obtain_esn_prediction(esn, x, output_layer, args...; - initial_conditions=output_layer.last_value, - save_states=false) + initial_conditions = output_layer.last_value, + save_states = false) out_size = output_layer.out_size training_method = output_layer.training_method prediction_len = prediction.prediction_len diff --git a/src/esn/esn_reservoir_drivers.jl b/src/esn/esn_reservoir_drivers.jl index e74f8f8c..f16e849a 100644 --- a/src/esn/esn_reservoir_drivers.jl +++ b/src/esn/esn_reservoir_drivers.jl @@ -100,7 +100,7 @@ echo state networks (`ESN`). - `leaky_coefficient`: The leaky coefficient used in the RNN. Defaults to 1.0. """ -function RNN(; activation_function=fast_act(tanh), leaky_coefficient=1.0) +function RNN(; activation_function = fast_act(tanh), leaky_coefficient = 1.0) return RNN(activation_function, leaky_coefficient) end @@ -178,9 +178,9 @@ in the ESN. "_A novel model of leaky integrator echo state network for time-series prediction._" Neurocomputing 159 (2015): 58-66. """ -function MRNN(; activation_function=[tanh, sigmoid], - leaky_coefficient=1.0, - scaling_factor=fill(leaky_coefficient, length(activation_function))) +function MRNN(; activation_function = [tanh, sigmoid], + leaky_coefficient = 1.0, + scaling_factor = fill(leaky_coefficient, length(activation_function))) @assert length(activation_function) == length(scaling_factor) return MRNN(activation_function, leaky_coefficient, scaling_factor) end @@ -272,11 +272,11 @@ This driver is based on the GRU architecture [^Cho2014]. "_Learning phrase representations using RNN encoder-decoder for statistical machine translation._" arXiv preprint arXiv:1406.1078 (2014). """ -function GRU(; activation_function=[sigmoid, sigmoid, tanh], - inner_layer=fill(scaled_rand, 2), - reservoir=fill(rand_sparse, 2), - bias=fill(scaled_rand, 2), - variant=FullyGated()) +function GRU(; activation_function = [sigmoid, sigmoid, tanh], + inner_layer = fill(scaled_rand, 2), + reservoir = fill(rand_sparse, 2), + bias = fill(scaled_rand, 2), + variant = FullyGated()) return GRU(activation_function, inner_layer, reservoir, bias, variant) end diff --git a/src/esn/hybridesn.jl b/src/esn/hybridesn.jl index 0c0a945e..a065e738 100644 --- a/src/esn/hybridesn.jl +++ b/src/esn/hybridesn.jl @@ -41,7 +41,7 @@ integrating a knowledge-based model (`prior_model`) with ESNs. Using Machine Learning in Conjunction with a Knowledge-Based Model" (2018). """ function KnowledgeModel(prior_model, u0, tspan, datasize) - trange = collect(range(tspan[1], tspan[2]; length=datasize)) + trange = collect(range(tspan[1], tspan[2]; length = datasize)) dt = trange[2] - trange[1] tsteps = push!(trange, dt + trange[end]) tspan_new = (tspan[1], dt + tspan[2]) @@ -94,12 +94,12 @@ traditional Echo State Networks with a predefined knowledge model [^Pathak2018]. Using Machine Learning in Conjunction with a Knowledge-Based Model" (2018). """ function HybridESN(model::KnowledgeModel, train_data::AbstractArray, - in_size::Int, res_size::Int; input_layer=scaled_rand, reservoir=rand_sparse, - bias=zeros32, reservoir_driver::AbstractDriver=RNN(), - nla_type::NonLinearAlgorithm=NLADefault(), - states_type::AbstractStates=StandardStates(), washout::Int=0, - rng::AbstractRNG=Utils.default_rng(), T=Float32, - matrix_type=typeof(train_data)) + in_size::Int, res_size::Int; input_layer = scaled_rand, reservoir = rand_sparse, + bias = zeros32, reservoir_driver::AbstractDriver = RNN(), + nla_type::NonLinearAlgorithm = NLADefault(), + states_type::AbstractStates = StandardStates(), washout::Int = 0, + rng::AbstractRNG = Utils.default_rng(), T = Float32, + matrix_type = typeof(train_data)) train_data = vcat(train_data, model.model_data[:, 1:(end - 1)]) if states_type isa AbstractPaddedStates @@ -125,7 +125,8 @@ function HybridESN(model::KnowledgeModel, train_data::AbstractArray, end function (hesn::HybridESN)(prediction::AbstractPrediction, - output_layer::AbstractOutputLayer; last_state::AbstractArray=hesn.states[:, [end]], + output_layer::AbstractOutputLayer; last_state::AbstractArray = hesn.states[ + :, [end]], kwargs...) km = hesn.model pred_len = prediction.prediction_len @@ -143,7 +144,7 @@ function (hesn::HybridESN)(prediction::AbstractPrediction, end function train(hesn::HybridESN, target_data::AbstractArray, - training_method=StandardRidge(); kwargs...) + training_method = StandardRidge(); kwargs...) states = vcat(hesn.states, hesn.model.model_data[:, 2:end]) states_new = hesn.states_type(hesn.nla_type, states, hesn.train_data[:, 1:end]) diff --git a/src/esn/inits_components.jl b/src/esn/inits_components.jl index ff90d348..b5ed7436 100644 --- a/src/esn/inits_components.jl +++ b/src/esn/inits_components.jl @@ -64,7 +64,7 @@ function no_sample(rng::AbstractRNG, vecormat::AbstractVecOrMat) end function regular_sample!(rng::AbstractRNG, vecormat::AbstractVecOrMat; - strides::Union{Integer, AbstractVector{<:Integer}}=2) + strides::Union{Integer, AbstractVector{<:Integer}} = 2) return _regular_sample!(rng, vecormat, strides) end @@ -91,7 +91,7 @@ function _regular_sample!( end function bernoulli_sample!( - rng::AbstractRNG, vecormat::AbstractVecOrMat; positive_prob::Number=0.5) + rng::AbstractRNG, vecormat::AbstractVecOrMat; positive_prob::Number = 0.5) for idx in eachindex(vecormat) if rand(rng) > positive_prob vecormat[idx] = -vecormat[idx] @@ -101,7 +101,7 @@ end #TODO: @MartinuzziFrancesco maybe change name here #wait, for sure change name here function irrational_sample!(rng::AbstractRNG, vecormat::AbstractVecOrMat; - irrational::Irrational=pi, start::Int=1) + irrational::Irrational = pi, start::Int = 1) total_elements = length(vecormat) setprecision(BigFloat, Int(ceil(log2(10) * (total_elements + start + 1)))) ir_string = collect(string(BigFloat(irrational))) @@ -195,7 +195,7 @@ end function delay_line!( rng::AbstractRNG, reservoir_matrix::AbstractMatrix, weight::AbstractVector, - shift::Integer; sampling_type=:no_sample, kwargs...) + shift::Integer; sampling_type = :no_sample, kwargs...) f_sample = getfield(@__MODULE__, sampling_type) f_sample(rng, weight; kwargs...) for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - shift) @@ -258,7 +258,7 @@ julia> backward_connection!(matrix, 3.0, 1) 0.0 0.0 0.0 0.0 3.0 0.0 0.0 0.0 0.0 0.0 -julia> backward_connection!(matrix, 3.0, 1; sampling_type=:bernoulli_sample!) +julia> backward_connection!(matrix, 3.0, 1; sampling_type = :bernoulli_sample!) 5×5 Matrix{Float32}: 0.0 3.0 0.0 0.0 0.0 0.0 0.0 -3.0 0.0 0.0 @@ -276,7 +276,7 @@ end function backward_connection!( rng::AbstractRNG, reservoir_matrix::AbstractMatrix, weight::AbstractVector, - shift::Integer; sampling_type=:no_sample, kwargs...) + shift::Integer; sampling_type = :no_sample, kwargs...) f_sample = getfield(@__MODULE__, sampling_type) f_sample(rng, weight; kwargs...) for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - shift) @@ -330,7 +330,7 @@ julia> matrix = zeros(Float32, 5, 5) 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -julia> simple_cycle!(matrix, 1.0; sampling_type=:irrational_sample!) +julia> simple_cycle!(matrix, 1.0; sampling_type = :irrational_sample!) 5×5 Matrix{Float32}: 0.0 0.0 0.0 0.0 -1.0 -1.0 0.0 0.0 0.0 0.0 @@ -347,7 +347,7 @@ end function simple_cycle!( rng::AbstractRNG, reservoir_matrix::AbstractMatrix, weight::AbstractVector; - sampling_type=:no_sample, kwargs...) + sampling_type = :no_sample, kwargs...) f_sample = getfield(@__MODULE__, sampling_type) f_sample(rng, weight; kwargs...) for idx in first(axes(reservoir_matrix, 1)):(last(axes(reservoir_matrix, 1)) - 1) @@ -402,7 +402,7 @@ julia> matrix = zeros(Float32, 5, 5) 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -julia> reverse_simple_cycle!(matrix, 1.0; sampling_type=:regular_sample!) +julia> reverse_simple_cycle!(matrix, 1.0; sampling_type = :regular_sample!) 5×5 Matrix{Float32}: 0.0 -1.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 @@ -419,7 +419,7 @@ end function reverse_simple_cycle!( rng::AbstractRNG, reservoir_matrix::AbstractMatrix, weight::AbstractVector; - sampling_type=:no_sample, kwargs...) + sampling_type = :no_sample, kwargs...) f_sample = getfield(@__MODULE__, sampling_type) f_sample(rng, weight; kwargs...) for idx in (first(axes(reservoir_matrix, 1)) + 1):last(axes(reservoir_matrix, 1)) @@ -493,7 +493,7 @@ end function add_jumps!(rng::AbstractRNG, reservoir_matrix::AbstractMatrix, weight::AbstractVector, jump_size::Integer; - sampling_type=:no_sample, kwargs...) + sampling_type = :no_sample, kwargs...) f_sample = getfield(@__MODULE__, sampling_type) f_sample(rng, weight; kwargs...) w_idx = 1 @@ -570,7 +570,7 @@ function self_loop!(rng::AbstractRNG, reservoir_matrix::AbstractMatrix, end function self_loop!(rng::AbstractRNG, reservoir_matrix::AbstractMatrix, - weight::AbstractVector; sampling_type=:no_sample, kwargs...) + weight::AbstractVector; sampling_type = :no_sample, kwargs...) f_sample = getfield(@__MODULE__, sampling_type) f_sample(rng, weight; kwargs...) for idx in axes(reservoir_matrix, 1) diff --git a/src/predict.jl b/src/predict.jl index a60fa992..81b3bab6 100644 --- a/src/predict.jl +++ b/src/predict.jl @@ -68,7 +68,7 @@ end function obtain_prediction(rc::AbstractReservoirComputer, prediction::Generative, x, output_layer::AbstractOutputLayer, args...; - initial_conditions=output_layer.last_value) + initial_conditions = output_layer.last_value) #x = last_state prediction_len = prediction.prediction_len train_method = output_layer.training_method diff --git a/src/reca/reca.jl b/src/reca/reca.jl index 385af83d..8af5b2d3 100644 --- a/src/reca/reca.jl +++ b/src/reca/reca.jl @@ -26,10 +26,10 @@ automata._” arXiv preprint arXiv:1703.02806 (2017). """ function RECA(train_data, automata; - generations=8, - input_encoding=RandomMapping(), - nla_type=NLADefault(), - states_type=StandardStates()) + generations = 8, + input_encoding = RandomMapping(), + nla_type = NLADefault(), + states_type = StandardStates()) in_size = size(train_data, 1) #res_size = obtain_res_size(input_encoding, generations) state_encoding = create_encoding(input_encoding, train_data, generations) @@ -39,7 +39,7 @@ function RECA(train_data, end #training dispatch -function train(reca::AbstractReca, target_data, training_method=StandardRidge; kwargs...) +function train(reca::AbstractReca, target_data, training_method = StandardRidge; kwargs...) states_new = reca.states_type(reca.nla_type, reca.states, reca.train_data) return train(training_method, Float32.(states_new), Float32.(target_data); kwargs...) end @@ -47,10 +47,10 @@ end #predict dispatch function (reca::RECA)(prediction, output_layer::AbstractOutputLayer, - initial_conditions=output_layer.last_value, - last_state=zeros(reca.input_encoding.ca_size)) + initial_conditions = output_layer.last_value, + last_state = zeros(reca.input_encoding.ca_size)) return obtain_prediction(reca, prediction, last_state, output_layer; - initial_conditions=initial_conditions) + initial_conditions = initial_conditions) end function next_state_prediction!(reca::RECA, x, out, i, args...) diff --git a/src/reca/reca_input_encodings.jl b/src/reca/reca_input_encodings.jl index 92297ba5..dd29d72e 100644 --- a/src/reca/reca_input_encodings.jl +++ b/src/reca/reca_input_encodings.jl @@ -19,11 +19,11 @@ The detail of this implementation can be found in [1]. [1] Nichele, Stefano, and Andreas Molund. “Deep reservoir computing using cellular automata.” arXiv preprint arXiv:1703.02806 (2017). """ -function RandomMapping(; permutations=8, expansion_size=40) +function RandomMapping(; permutations = 8, expansion_size = 40) RandomMapping(permutations, expansion_size) end -function RandomMapping(permutations; expansion_size=40) +function RandomMapping(permutations; expansion_size = 40) RandomMapping(permutations, expansion_size) end diff --git a/src/states.jl b/src/states.jl index 9a4d7705..a0e85123 100644 --- a/src/states.jl +++ b/src/states.jl @@ -194,7 +194,7 @@ struct PaddedStates{T} <: AbstractPaddedStates padding::T end -function PaddedStates(; padding=1.0) +function PaddedStates(; padding = 1.0) return PaddedStates(padding) end @@ -272,7 +272,7 @@ struct PaddedExtendedStates{T} <: AbstractPaddedStates padding::T end -function PaddedExtendedStates(; padding=1.0) +function PaddedExtendedStates(; padding = 1.0) return PaddedExtendedStates(padding) end diff --git a/test/esn/deepesn.jl b/test/esn/deepesn.jl index 70250028..815d16c8 100644 --- a/test/esn/deepesn.jl +++ b/test/esn/deepesn.jl @@ -15,9 +15,9 @@ zeros_types = [zeros64, zeros32, zeros16] for (tidx, t) in enumerate(test_types) Random.seed!(77) - res = rand_sparse(; radius=1.2, sparsity=0.1) + res = rand_sparse(; radius = 1.2, sparsity = 0.1) esn = DeepESN(t.(input_data), 1, res_size; - bias=fill(zeros_types[tidx], 2)) + bias = fill(zeros_types[tidx], 2)) output_layer = train(esn, t.(target_data)) output = esn(Generative(length(test_data)), output_layer) diff --git a/test/esn/test_drivers.jl b/test/esn/test_drivers.jl index d0225992..fe288ea4 100644 --- a/test/esn/test_drivers.jl +++ b/test/esn/test_drivers.jl @@ -15,25 +15,25 @@ function test_esn(input_data, target_data, training_method, esn_config) esn = ESN(input_data, 1, res_size; esn_config...) output_layer = train(esn, target_data, training_method) - output = esn(Predictive(target_data), output_layer; initial_conditions=target_data[1]) + output = esn(Predictive(target_data), output_layer; initial_conditions = target_data[1]) @test mean(abs.(target_data .- output)) ./ mean(abs.(target_data)) < 0.15 end esn_configs = [ - Dict(:reservoir => rand_sparse(; radius=1.2), - :reservoir_driver => GRU(; variant=FullyGated(), - reservoir=[ - rand_sparse(; radius=1.0, sparsity=0.5), - rand_sparse(; radius=1.2, sparsity=0.1) + Dict(:reservoir => rand_sparse(; radius = 1.2), + :reservoir_driver => GRU(; variant = FullyGated(), + reservoir = [ + rand_sparse(; radius = 1.0, sparsity = 0.5), + rand_sparse(; radius = 1.2, sparsity = 0.1) ])), - Dict(:reservoir => rand_sparse(; radius=1.2), - :reservoir_driver => GRU(; variant=Minimal(), - reservoir=rand_sparse(; radius=1.0, sparsity=0.5), - inner_layer=scaled_rand, - bias=scaled_rand)), - Dict(:reservoir => rand_sparse(; radius=1.2), - :reservoir_driver => MRNN(; activation_function=(tanh, sigmoid), - scaling_factor=(0.8, 0.1))) + Dict(:reservoir => rand_sparse(; radius = 1.2), + :reservoir_driver => GRU(; variant = Minimal(), + reservoir = rand_sparse(; radius = 1.0, sparsity = 0.5), + inner_layer = scaled_rand, + bias = scaled_rand)), + Dict(:reservoir => rand_sparse(; radius = 1.2), + :reservoir_driver => MRNN(; activation_function = (tanh, sigmoid), + scaling_factor = (0.8, 0.1))) ] @testset "Test Drivers: $config" for config in esn_configs diff --git a/test/esn/test_hybrid.jl b/test/esn/test_hybrid.jl index 8d14f27a..8b1eab3b 100644 --- a/test/esn/test_hybrid.jl +++ b/test/esn/test_hybrid.jl @@ -3,7 +3,7 @@ using ReservoirComputing, DifferentialEquations, Statistics, Random u0 = [1.0, 0.0, 0.0] tspan = (0.0, 1000.0) datasize = 100000 -tsteps = range(tspan[1], tspan[2]; length=datasize) +tsteps = range(tspan[1], tspan[2]; length = datasize) function lorenz(du, u, p, t) p = [10.0, 28.0, 8 / 3] @@ -12,16 +12,16 @@ function lorenz(du, u, p, t) du[3] = u[1] * u[2] - p[3] * u[3] end -function prior_model_data_generator(u0, tspan, tsteps, model=lorenz) +function prior_model_data_generator(u0, tspan, tsteps, model = lorenz) prob = ODEProblem(lorenz, u0, tspan) - sol = Array(solve(prob; saveat=tsteps)) + sol = Array(solve(prob; saveat = tsteps)) return sol end train_len = 10000 ode_prob = ODEProblem(lorenz, u0, tspan) -ode_sol = solve(ode_prob; saveat=tsteps) +ode_sol = solve(ode_prob; saveat = tsteps) ode_data = Array(ode_sol) input_data = ode_data[:, 1:train_len] target_data = ode_data[:, 2:(train_len + 1)] @@ -37,7 +37,7 @@ hesn = HybridESN(km, input_data, 3, 300; - reservoir=rand_sparse) + reservoir = rand_sparse) output_layer = train(hesn, target_data, StandardRidge(0.3)) diff --git a/test/esn/test_inits.jl b/test/esn/test_inits.jl index 29095473..18e15430 100644 --- a/test/esn/test_inits.jl +++ b/test/esn/test_inits.jl @@ -8,13 +8,13 @@ const weight = 0.2 const jump_size = 3 const rng = Random.default_rng() -function check_radius(matrix, target_radius; tolerance=1e-5) +function check_radius(matrix, target_radius; tolerance = 1e-5) if matrix isa SparseArrays.SparseMatrixCSC matrix = Matrix(matrix) end eigenvalues = eigvals(matrix) spectral_radius = maximum(abs.(eigenvalues)) - return isapprox(spectral_radius, target_radius; atol=tolerance) + return isapprox(spectral_radius, target_radius; atol = tolerance) end ft = [Float16, Float32, Float64] @@ -40,10 +40,10 @@ input_inits = [ weighted_init, weighted_minimal, minimal_init, - minimal_init(; sampling_type=:irrational_sample!), + minimal_init(; sampling_type = :irrational_sample!), chebyshev_mapping, logistic_mapping, - modified_lm(; factor=4) + modified_lm(; factor = 4) ] @testset "Reservoir Initializers" begin @@ -95,7 +95,7 @@ end @testset "Minimum complexity: $init" for init in [ minimal_init, - minimal_init(; sampling_type=:irrational_sample!) + minimal_init(; sampling_type = :irrational_sample!) ] dl = init(res_size, in_size) @test sort(unique(dl)) == Float32.([-0.1, 0.1]) diff --git a/test/esn/test_train.jl b/test/esn/test_train.jl index 737fe44f..8b418280 100644 --- a/test/esn/test_train.jl +++ b/test/esn/test_train.jl @@ -12,15 +12,15 @@ const reg = 10e-6 #test_types = [Float64, Float32, Float16] Random.seed!(77) -res = rand_sparse(; radius=1.2, sparsity=0.1) +res = rand_sparse(; radius = 1.2, sparsity = 0.1) esn = ESN(input_data, 1, res_size; - reservoir=res) + reservoir = res) # different models that implement a train dispatch # TODO add classification -linear_training = [StandardRidge(0.0), LinearRegression(; fit_intercept=false), - RidgeRegression(; fit_intercept=false), LassoRegression(; fit_intercept=false), - ElasticNetRegression(; fit_intercept=false), HuberRegression(; fit_intercept=false), - QuantileRegression(; fit_intercept=false), LADRegression(; fit_intercept=false)] +linear_training = [StandardRidge(0.0), LinearRegression(; fit_intercept = false), + RidgeRegression(; fit_intercept = false), LassoRegression(; fit_intercept = false), + ElasticNetRegression(; fit_intercept = false), HuberRegression(; fit_intercept = false), + QuantileRegression(; fit_intercept = false), LADRegression(; fit_intercept = false)] svm_training = [EpsilonSVR(), NuSVR()] # TODO check types diff --git a/test/qa.jl b/test/qa.jl index e009e2f1..62aa6f27 100644 --- a/test/qa.jl +++ b/test/qa.jl @@ -1,7 +1,7 @@ using ReservoirComputing, Aqua @testset "Aqua" begin Aqua.find_persistent_tasks_deps(ReservoirComputing) - Aqua.test_ambiguities(ReservoirComputing; recursive=false) + Aqua.test_ambiguities(ReservoirComputing; recursive = false) Aqua.test_deps_compat(ReservoirComputing) Aqua.test_piracies(ReservoirComputing) Aqua.test_project_extras(ReservoirComputing) diff --git a/test/reca/test_predictive.jl b/test/reca/test_predictive.jl index a45f04c3..44ff4706 100644 --- a/test/reca/test_predictive.jl +++ b/test/reca/test_predictive.jl @@ -6,8 +6,8 @@ const g = 6 const rule = 90 reca = RECA(input, DCA(rule); - generations=g, - input_encoding=RandomMapping(6, 10)) + generations = g, + input_encoding = RandomMapping(6, 10)) output_layer = train(reca, output, StandardRidge(0.001)) prediction = reca(Predictive(input), output_layer) @@ -15,6 +15,6 @@ final_pred = convert(AbstractArray{Int}, prediction .> 0.5) @test final_pred == output rm1 = RandomMapping(6, 10) -rm2 = RandomMapping(6; expansion_size=10) -rm3 = RandomMapping(; permutations=6, expansion_size=10) +rm2 = RandomMapping(6; expansion_size = 10) +rm3 = RandomMapping(; permutations = 6, expansion_size = 10) @test rm1 == rm2 == rm3 diff --git a/test/test_states.jl b/test/test_states.jl index 5fe5c653..5e4f1f44 100644 --- a/test/test_states.jl +++ b/test/test_states.jl @@ -13,9 +13,9 @@ nlas = [(NLADefault(), test_array), (ExtendedSquare(), [1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 4, 9, 16, 25, 36, 49, 64, 81])] pes = [(StandardStates(), test_array), - (PaddedStates(; padding=padding), + (PaddedStates(; padding = padding), vcat(test_array, padding)), - (PaddedExtendedStates(; padding=padding), + (PaddedExtendedStates(; padding = padding), vcat(test_array, padding, extension)), (ExtendedStates(), vcat(test_array, extension))]