Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 1 addition & 7 deletions .JuliaFormatter.toml
Original file line number Diff line number Diff line change
@@ -1,9 +1,3 @@
style = "sciml"
format_markdown = false
whitespace_in_kwargs = false
margin = 92
indent = 4
format_docstrings = true
separate_kwargs_with_semicolon = true
always_for_in = true
annotate_untyped_fields_with_any = false
format_docstrings = true
22 changes: 11 additions & 11 deletions docs/make.jl
Original file line number Diff line number Diff line change
@@ -1,22 +1,22 @@
using Documenter, ReservoirComputing

cp("./docs/Manifest.toml", "./docs/src/assets/Manifest.toml"; force=true)
cp("./docs/Project.toml", "./docs/src/assets/Project.toml"; force=true)
cp("./docs/Manifest.toml", "./docs/src/assets/Manifest.toml"; force = true)
cp("./docs/Project.toml", "./docs/src/assets/Project.toml"; force = true)

ENV["PLOTS_TEST"] = "true"
ENV["GKSwstype"] = "100"
include("pages.jl")
mathengine = Documenter.MathJax()

makedocs(; modules=[ReservoirComputing],
sitename="ReservoirComputing.jl",
clean=true, doctest=false, linkcheck=true,
format=Documenter.HTML(;
makedocs(; modules = [ReservoirComputing],
sitename = "ReservoirComputing.jl",
clean = true, doctest = false, linkcheck = true,
format = Documenter.HTML(;
mathengine,
assets=["assets/favicon.ico"],
canonical="https://docs.sciml.ai/ReservoirComputing/stable/"),
pages=pages
assets = ["assets/favicon.ico"],
canonical = "https://docs.sciml.ai/ReservoirComputing/stable/"),
pages = pages
)

deploydocs(; repo="github.com/SciML/ReservoirComputing.jl.git",
push_preview=true)
deploydocs(; repo = "github.com/SciML/ReservoirComputing.jl.git",
push_preview = true)
14 changes: 7 additions & 7 deletions src/esn/deepesn.jl
Original file line number Diff line number Diff line change
Expand Up @@ -59,15 +59,15 @@ enhanced by the depth provided by multiple reservoir layers.
train_data = rand(Float32, 3, 100)

# Create a DeepESN with specific parameters
deepESN = DeepESN(train_data, 3, 100; depth=3, washout=100)
deepESN = DeepESN(train_data, 3, 100; depth = 3, washout = 100)
```
"""
function DeepESN(train_data::AbstractArray, in_size::Int, res_size::Int; depth::Int=2,
input_layer=fill(scaled_rand, depth), bias=fill(zeros32, depth),
reservoir=fill(rand_sparse, depth), reservoir_driver::AbstractDriver=RNN(),
nla_type::NonLinearAlgorithm=NLADefault(),
states_type::AbstractStates=StandardStates(), washout::Int=0,
rng::AbstractRNG=Utils.default_rng(), matrix_type=typeof(train_data))
function DeepESN(train_data::AbstractArray, in_size::Int, res_size::Int; depth::Int = 2,
input_layer = fill(scaled_rand, depth), bias = fill(zeros32, depth),
reservoir = fill(rand_sparse, depth), reservoir_driver::AbstractDriver = RNN(),
nla_type::NonLinearAlgorithm = NLADefault(),
states_type::AbstractStates = StandardStates(), washout::Int = 0,
rng::AbstractRNG = Utils.default_rng(), matrix_type = typeof(train_data))
if states_type isa AbstractPaddedStates
in_size = size(train_data, 1) + 1
train_data = vcat(adapt(matrix_type, ones(1, size(train_data, 2))),
Expand Down
20 changes: 10 additions & 10 deletions src/esn/esn.jl
Original file line number Diff line number Diff line change
Expand Up @@ -50,17 +50,17 @@ julia> train_data = rand(Float32, 10, 100) # 10 features, 100 time steps
0.4463 0.334423 0.444679 0.311695 0.0494497 0.27171 0.214925
0.987182 0.898593 0.295241 0.233098 0.789699 0.453692 0.759205

julia> esn = ESN(train_data, 10, 300; washout=10)
julia> esn = ESN(train_data, 10, 300; washout = 10)
ESN(10 => 300)
```
"""
function ESN(train_data::AbstractArray, in_size::Int, res_size::Int;
input_layer=scaled_rand, reservoir=rand_sparse, bias=zeros32,
reservoir_driver::AbstractDriver=RNN(),
nla_type::NonLinearAlgorithm=NLADefault(),
states_type::AbstractStates=StandardStates(),
washout::Int=0, rng::AbstractRNG=Utils.default_rng(),
matrix_type=typeof(train_data))
input_layer = scaled_rand, reservoir = rand_sparse, bias = zeros32,
reservoir_driver::AbstractDriver = RNN(),
nla_type::NonLinearAlgorithm = NLADefault(),
states_type::AbstractStates = StandardStates(),
washout::Int = 0, rng::AbstractRNG = Utils.default_rng(),
matrix_type = typeof(train_data))
if states_type isa AbstractPaddedStates
in_size = size(train_data, 1) + 1
train_data = vcat(adapt(matrix_type, ones(1, size(train_data, 2))),
Expand All @@ -82,7 +82,7 @@ function ESN(train_data::AbstractArray, in_size::Int, res_size::Int;
end

function (esn::AbstractEchoStateNetwork)(prediction::AbstractPrediction,
output_layer::AbstractOutputLayer; last_state=esn.states[:, [end]],
output_layer::AbstractOutputLayer; last_state = esn.states[:, [end]],
kwargs...)
return obtain_esn_prediction(esn, prediction, last_state, output_layer;
kwargs...)
Expand Down Expand Up @@ -120,15 +120,15 @@ julia> train_data = rand(Float32, 10, 100) # 10 features, 100 time steps
0.133498 0.451058 0.0761995 0.90421 0.994212 0.332164 0.545112
0.214467 0.791524 0.124105 0.951805 0.947166 0.954244 0.889733

julia> esn = ESN(train_data, 10, 300; washout=10)
julia> esn = ESN(train_data, 10, 300; washout = 10)
ESN(10 => 300)

julia> output_layer = train(esn, rand(Float32, 3, 90))
OutputLayer successfully trained with output size: 3
```
"""
function train(esn::AbstractEchoStateNetwork, target_data::AbstractArray,
training_method=StandardRidge(); kwargs...)
training_method = StandardRidge(); kwargs...)
states_new = esn.states_type(esn.nla_type, esn.states, esn.train_data[:, 1:end])
return train(training_method, states_new, target_data; kwargs...)
end
Expand Down
Loading
Loading