From 82de2ade0f7c817cc265a1254a6885ca88c2f628 Mon Sep 17 00:00:00 2001 From: MartinuzziFrancesco Date: Sat, 29 Nov 2025 11:33:09 +0100 Subject: [PATCH 1/4] docs: better fleshing out of the docstrings --- src/states.jl | 272 ++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 232 insertions(+), 40 deletions(-) diff --git a/src/states.jl b/src/states.jl index 37b1e354..74d56e88 100644 --- a/src/states.jl +++ b/src/states.jl @@ -12,15 +12,96 @@ return new_states end -""" -Pad(padding) +@doc raw""" + Pad(padding=1.0) + +Padding layer that appends a constant value to the state (and hence to the +layer output). -Padding layer that adds `padding` (either a number or an array) at the -end of a state. +```math +\tilde{x} = \begin{bmatrix} x \\ \text{padding} \end{bmatrix} +``` ## Arguments - `padding`: value to append. Default is 1.0. + +## Forward + + pad(state) + +## Arguments + + - `state`: The reservoir computing state. + +## Returns + + - A vector or matrix with chosen `padding` added, thus increasing the size by 1. + +## Examples + +```jldoctest pad +julia> pad = Pad(1.0) +(::Pad{Float64}) (generic function with 2 methods) + +julia> x_old = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] +10-element Vector{Int64}: + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + +julia> x_new = pad(x_old) +11-element Vector{Int64}: + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 1 +``` + +```jldoctest pad +julia> mat_old = [1 2 3; + 4 5 6; + 7 8 9; + 10 11 12; + 13 14 15; + 16 17 18; + 19 20 21] +7×3 Matrix{Int64}: + 1 2 3 + 4 5 6 + 7 8 9 + 10 11 12 + 13 14 15 + 16 17 18 + 19 20 21 + + + julia> mat_new = pad(mat_old) + 8×3 Matrix{Int64}: + 1 2 3 + 4 5 6 + 7 8 9 + 10 11 12 + 13 14 15 + 16 17 18 + 19 20 21 + 1 1 1 +``` + """ struct Pad{P} <: Function padding::P @@ -39,15 +120,26 @@ function (pad::Pad)(x_old::AbstractMatrix) return vcat(x_old, row) end -""" -Pad(padding) +@doc raw""" + Extend(op) + +Wrapper layer that concatenates the reservoir state produced by `op` with the +input that `Extend` receives. + +For an input vector or matrix `x` and a wrapped layer producing state `s`, +`Extend` computes: -Wrapper layer that concatenates the reservoir state at that -point with the input that it receives. +```math +\begin{bmatrix} +x \\ +s +\end{bmatrix} +``` ## Arguments - - `op`: wrapped layer + - `op`: the wrapped layer whose output state will be concatenated with the input. + ## Examples @@ -88,13 +180,12 @@ end Base.show(io::IO, ex::Extend) = print(io, "Extend(", ex.op, ")") @doc raw""" - NLAT1(x) + NLAT1() `NLAT1` implements the T₁ transformation algorithm introduced in [Chattopadhyay2020](@cite) and [Pathak2017](@cite). The T₁ algorithm squares elements of the input array, targeting every second row. - ```math \tilde{r}_{i,j} = \begin{cases} @@ -102,9 +193,30 @@ elements of the input array, targeting every second row. r_{i,j}, & \text{if } j \text{ is even}. \end{cases} ``` -# Example -```jldoctest +## Arguments + +None + +## Forward + + nlat1(state) + +## Arguments + + - `state`: The reservoir computing state. + +## Returns + + - A vector or matrix with transformed elements according to NLAT1, + with same dimensionality as the original. + +## Example + +```jldoctest nlat1 +julia> nlat1 = NLAT1() +NLAT1 (generic function with 3 methods) + julia> x_old = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] 10-element Vector{Int64}: 0 @@ -118,7 +230,7 @@ julia> x_old = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] 8 9 -julia> n_new = NLAT1(x_old) +julia> n_new = nlat1(x_old) 10-element Vector{Int64}: 0 1 @@ -130,7 +242,9 @@ julia> n_new = NLAT1(x_old) 7 64 9 +``` +```jldoctest nlat1 julia> mat_old = [1 2 3; 4 5 6; 7 8 9; @@ -147,7 +261,7 @@ julia> mat_old = [1 2 3; 16 17 18 19 20 21 -julia> mat_new = NLAT1(mat_old) +julia> mat_new = nlat1(mat_old) 7×3 Matrix{Int64}: 1 4 9 4 5 6 @@ -188,9 +302,30 @@ reservoir states by multiplying each odd-indexed row r_{i,j}, & \text{if } j \text{ is 1 or even}. \end{cases} ``` -# Example -```jldoctest +## Arguments + +None + +## Forward + + nlat2(state) + +## Arguments + + - `state`: The reservoir computing state. + +## Returns + + - A vector or matrix with transformed elements according to NLAT2, + with same dimensionality as the original. + +## Example + +```jldoctest nlat2 +julia> nlat2 = NLAT2() +NLAT2 (generic function with 3 methods) + julia> x_old = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] 10-element Vector{Int64}: 0 @@ -204,7 +339,7 @@ julia> x_old = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] 8 9 -julia> n_new = NLAT2(x_old) +julia> n_new = nlat2(x_old) 10-element Vector{Int64}: 0 1 @@ -217,6 +352,9 @@ julia> n_new = NLAT2(x_old) 42 9 +``` + +```jldoctest nlat2 julia> mat_old = [1 2 3; 4 5 6; 7 8 9; @@ -233,7 +371,7 @@ julia> mat_old = [1 2 3; 16 17 18 19 20 21 -julia> mat_new = NLAT2(mat_old) +julia> mat_new = nlat2(mat_old) 7×3 Matrix{Int64}: 1 2 3 4 5 6 @@ -260,7 +398,7 @@ NLAT2(x_old::AbstractMatrix) = _apply_tomatrix(NLAT2, x_old) NLAT2() = NLAT2 @doc raw""" - NLAT3() + NLAT3(x) Implements the T₃ transformation algorithm as detailed in [Chattopadhyay2020](@cite). This algorithm modifies the reservoir's states by @@ -274,9 +412,30 @@ r_{i,j-1} \times r_{i,j+1}, & \text{if } j > 1 \text{ is odd}; \\ r_{i,j}, & \text{if } j = 1 \text{ or even.} \end{cases} ``` -# Example -```jldoctest +## Arguments + +None + +## Forward + + nlat3(state) + +## Arguments + + - `state`: The reservoir computing state. + +## Returns + + - A vector or matrix with transformed elements according to NLAT3, + with same dimensionality as the original. + +## Example + +```jldoctest nlat3 +julia> nlat2 = NLAT3() +NLAT3 (generic function with 3 methods) + julia> x_old = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] 10-element Vector{Int64}: 0 @@ -290,7 +449,7 @@ julia> x_old = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] 8 9 -julia> n_new = NLAT3(x_old) +julia> n_new = nlat2(x_old) 10-element Vector{Int64}: 0 1 @@ -303,6 +462,9 @@ julia> n_new = NLAT3(x_old) 63 9 +``` + +```jldoctest nlat3 julia> mat_old = [1 2 3; 4 5 6; 7 8 9; @@ -319,7 +481,7 @@ julia> mat_old = [1 2 3; 16 17 18 19 20 21 -julia> mat_new = NLAT3(mat_old) +julia> mat_new = nlat2(mat_old) 7×3 Matrix{Int64}: 1 2 3 4 5 6 @@ -350,8 +512,6 @@ NLAT3() = NLAT3 Implement a partial squaring of the states as described in [Barbosa2021](@cite). -# Equations - ```math \begin{equation} g(r_i) = @@ -362,12 +522,28 @@ Implement a partial squaring of the states as described in [Barbosa2021](@cite). \end{equation} ``` -# Examples +## Arguments -```jldoctest -julia> ps = PartialSquare(0.6) -PartialSquare(0.6) + - `eta`: Percentage of elements of the input vector to be squared. + +## Forward + + partialsq(state) + +## Arguments + + - `state`: The reservoir computing state. + +## Returns + + - A vector or matrix with partial square components, + with same dimensionality as the original. +## Example + +```jldoctest partialsq +julia> partialsq = PartialSquare(0.6) +PartialSquare(0.6) julia> x_old = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] 10-element Vector{Int64}: @@ -382,7 +558,7 @@ julia> x_old = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] 8 9 -julia> x_new = ps(x_old) +julia> x_new = partialsq(x_old) 10-element Vector{Int64}: 0 1 @@ -421,9 +597,7 @@ end Extension of the Lu initialization proposed in [Herteux2020](@cite). The state vector is extended with the squared elements of the initial -state - -# Equations +state. ```math \begin{equation} @@ -431,9 +605,30 @@ state \end{equation} ``` -# Examples +## Arguments + +None + +## Forward + + extendedsq(state) + +## Arguments + + - `state`: The reservoir computing state. + +## Returns + + - A vector or matrix with the original elements + concatenated with the squared elements. + Dimensionality is double of the original. + +## Example + +```jldoctest extendedsq +julia> extendedsq = ExtendedSquare() +ExtendedSquare() -```jldoctest julia> x_old = [1, 2, 3, 4, 5, 6, 7, 8, 9] 9-element Vector{Int64}: 1 @@ -446,10 +641,7 @@ julia> x_old = [1, 2, 3, 4, 5, 6, 7, 8, 9] 8 9 -julia> es = ExtendedSquare() -ExtendedSquare() - -julia> x_new = es(x_old) +julia> x_new = extendedsq(x_old) 18-element Vector{Int64}: 1 2 From e96dd3ed4b071df0b0fb3ab2151ec2d776284a13 Mon Sep 17 00:00:00 2001 From: MartinuzziFrancesco Date: Sat, 29 Nov 2025 16:35:32 +0100 Subject: [PATCH 2/4] docs: improve docstrings for input inits --- src/ReservoirComputing.jl | 3 +- src/inits/inits_components.jl | 24 + src/inits/inits_input.jl | 826 ++++++++++++++++++ .../{esn_inits.jl => inits_reservoir.jl} | 688 --------------- 4 files changed, 852 insertions(+), 689 deletions(-) create mode 100644 src/inits/inits_input.jl rename src/inits/{esn_inits.jl => inits_reservoir.jl} (69%) diff --git a/src/ReservoirComputing.jl b/src/ReservoirComputing.jl index e80cfa77..868d5a39 100644 --- a/src/ReservoirComputing.jl +++ b/src/ReservoirComputing.jl @@ -32,7 +32,8 @@ include("predict.jl") include("train.jl") #initializers include("inits/inits_components.jl") -include("inits/esn_inits.jl") +include("inits/inits_input.jl") +include("inits/inits_reservoir.jl") #full models include("models/esn_generics.jl") include("models/esn.jl") diff --git a/src/inits/inits_components.jl b/src/inits/inits_components.jl index 35248af5..ed889ffa 100644 --- a/src/inits/inits_components.jl +++ b/src/inits/inits_components.jl @@ -1,3 +1,27 @@ +function apply_scale!(input_matrix::AbstractArray, scaling::Number, ::Type{T}) where {T} + @. input_matrix = (input_matrix - T(0.5)) * (T(2) * T(scaling)) + return input_matrix +end + +function apply_scale!(input_matrix::AbstractArray, + scaling::Tuple{<:Number, <:Number}, ::Type{T}) where {T} + lower, upper = T(scaling[1]), T(scaling[2]) + @assert lower res_input = scaled_rand(8, 3) +8×3 Matrix{Float32}: + -0.0669356 -0.0292692 -0.0188943 + 0.0159724 0.004071 -0.0737949 + 0.026355 -0.0191563 0.0714962 + -0.0177412 0.0279123 0.0892906 + -0.0184405 0.0567368 0.0190222 + 0.0944272 0.0679244 0.0148647 + -0.0799005 -0.0891089 -0.0444782 + -0.0970182 0.0934286 0.03553 +``` + +Scaling with a tuple, providing lower and upper bound of the uniform distribution +from which the weights will be sampled: + +```jldoctest scaledrand +julia> res_input = scaled_rand(8, 3, scaling = (0.1, 0.15)) +8×3 Matrix{Float32}: + 0.108266 0.117683 0.120276 + 0.128993 0.126018 0.106551 + 0.131589 0.120211 0.142874 + 0.120565 0.131978 0.147323 + 0.12039 0.139184 0.129756 + 0.148607 0.141981 0.128716 + 0.105025 0.102723 0.11388 + 0.100745 0.148357 0.133882 +``` + +Scaling with a vector of scalars, where each provides the upper bound and its +negative provides the lower bound. Each column is scaled in order: first element +provides bounds for the first column, and so on: + +```jldoctest +julia> res_input = scaled_rand(8, 3, scaling = [0.1, 0.2, 0.3]) +8×3 Matrix{Float32}: + -0.0669356 -0.0585384 -0.0566828 + 0.0159724 0.00814199 -0.221385 + 0.026355 -0.0383126 0.214489 + -0.0177412 0.0558246 0.267872 + -0.0184405 0.113474 0.0570667 + 0.0944272 0.135849 0.0445941 + -0.0799005 -0.178218 -0.133435 + -0.0970182 0.186857 0.10659 +``` + +Scaling with a vector of tuples, each providing both upper and lower bound. +Each column is scaled in order: first element +provides bounds for the first column, and so on: + +```jldoctest scaledrand +julia> res_input = scaled_rand(8, 3, scaling = [(0.1, 0.2), (-0.2, -0.1), (0.3, 0.5)]) +8×3 Matrix{Float32}: + 0.116532 -0.164635 0.381106 + 0.157986 -0.147965 0.326205 + 0.163177 -0.159578 0.471496 + 0.141129 -0.136044 0.489291 + 0.14078 -0.121632 0.419022 + 0.197214 -0.116038 0.414865 + 0.11005 -0.194554 0.355522 + 0.101491 -0.103286 0.43553 +``` +""" +function scaled_rand(rng::AbstractRNG, ::Type{T}, dims::Integer...; + scaling::Union{Number, Tuple, Vector} = T(0.1)) where {T <: Number} + res_size, in_size = dims + layer_matrix = DeviceAgnostic.rand(rng, T, res_size, in_size) + apply_scale!(layer_matrix, scaling, T) + return layer_matrix +end + +""" + weighted_init([rng], [T], dims...; + scaling=0.1, return_sparse=false) + +Create and return a weighted input layer matrix. In this matrix, each +of the input signals `in_size` connects to the reservoir nodes +`res_size`/`in_size`. The nonzero entries are distributed uniformly +within a range defined by `scaling` [Lu2017](@cite). + +Please note that this initializer computes its own reservoir size! If +the computed reservoir size is different than the provided one it will raise a +warning. + +## Arguments + + - `rng`: Random number generator. Default is `Utils.default_rng()` + from WeightInitializers. + - `T`: Type of the elements in the reservoir matrix. + Default is `Float32`. + - `dims`: Dimensions of the matrix. Should follow `res_size x in_size`. + +## Keyword arguments + + - `scaling`: A scaling factor to define the range of the uniform distribution. + The factor can be passed in three different ways: + + + A single number. In this case, the matrix elements will be randomly + chosen from the range `[-scaling, scaling]`. Default option, with + a the scaling value set to `0.1`. + + A tuple `(lower, upper)`. The values define the range of the distribution. + the matrix elements will be randomly created and scaled the range + `[lower, upper]`. + + A vector of length = `in_size`. In this case, the columns will be + scaled individually by the entries of the vector. The entries can + be numbers or tuples, which will mirror the behavior described above. + + - `return_sparse`: flag for returning a `sparse` matrix. + Default is `false`. + +## Examples + +Standard call with scaling provided by a scalar: + +```jldoctest weightedinit +julia> res_input = weighted_init(9, 3; scaling=0.1) +9×3 Matrix{Float32}: + 0.0452399 0.0 0.0 + -0.0348047 0.0 0.0 + -0.0386004 0.0 0.0 + 0.0 0.0577838 0.0 + 0.0 -0.0562827 0.0 + 0.0 0.0441522 0.0 + 0.0 0.0 0.00627948 + 0.0 0.0 -0.0293777 + 0.0 0.0 -0.0352914 +``` + +Scaling with a tuple, providing lower and upper bound of the uniform distribution +from which the weights will be sampled: + +```jldoctest weightedinit +julia> res_input = weighted_init(9, 3; scaling=(0.1, 0.5)) +9×3 Matrix{Float32}: + 0.39048 0.0 0.0 + 0.230391 0.0 0.0 + 0.222799 0.0 0.0 + 0.0 0.415568 0.0 + 0.0 0.187435 0.0 + 0.0 0.388304 0.0 + 0.0 0.0 0.312559 + 0.0 0.0 0.241245 + 0.0 0.0 0.229417 +``` + +Scaling with a vector of scalars, where each provides the upper bound and its +negative provides the lower bound. Each column is scaled in order: first element +provides bounds for the first column, and so on: + +```jldoctest weightedinit +julia> res_input = weighted_init(9, 3; scaling=[0.1, 0.5, 0.9]) +9×3 Matrix{Float32}: + 0.0452399 0.0 0.0 + -0.0348047 0.0 0.0 + -0.0386004 0.0 0.0 + 0.0 0.288919 0.0 + 0.0 -0.281413 0.0 + 0.0 0.220761 0.0 + 0.0 0.0 0.0565153 + 0.0 0.0 -0.264399 + 0.0 0.0 -0.317622 +``` + +Scaling with a vector of tuples, each providing both upper and lower bound. +Each column is scaled in order: first element +provides bounds for the first column, and so on: + +```jldoctest weightedinit +julia> res_input = weighted_init(9, 3; scaling=[(0.1, 0.2), (-0.2, -0.1), (0.3, 0.5)]) +9×3 Matrix{Float32}: + 0.17262 0.0 0.0 + 0.132598 0.0 0.0 + 0.1307 0.0 0.0 + 0.0 -0.121108 0.0 + 0.0 -0.178141 0.0 + 0.0 -0.127924 0.0 + 0.0 0.0 0.40628 + 0.0 0.0 0.370622 + 0.0 0.0 0.364709 +``` + +Example of matrix size change: + +```jldoctest weightedinit +julia> res_input = weighted_init(8, 3) +┌ Warning: Reservoir size has changed! +│ +│ Computed reservoir size (6) does not equal the provided reservoir size (8). +│ +│ Using computed value (6). Make sure to modify the reservoir initializer accordingly. +│ +└ @ ReservoirComputing ~/.julia/dev/ReservoirComputing/src/inits/inits_components.jl:20 +6×3 Matrix{Float32}: + 0.0452399 0.0 0.0 + -0.0348047 0.0 0.0 + 0.0 -0.0386004 0.0 + 0.0 0.00981022 0.0 + 0.0 0.0 0.0577838 + 0.0 0.0 -0.0562827 +``` + +Return sparse: + +```jldoctest weightedinit +julia> using SparseArrays + +julia> res_input = weighted_init(9, 3; return_sparse = true) +9×3 SparseMatrixCSC{Float32, Int64} with 9 stored entries: + 0.0452399 ⋅ ⋅ + -0.0348047 ⋅ ⋅ + -0.0386004 ⋅ ⋅ + ⋅ 0.0577838 ⋅ + ⋅ -0.0562827 ⋅ + ⋅ 0.0441522 ⋅ + ⋅ ⋅ 0.00627948 + ⋅ ⋅ -0.0293777 + ⋅ ⋅ -0.0352914 +``` +""" +function weighted_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; + scaling::Union{Number, Tuple, Vector} = T(0.1), + return_sparse::Bool = false) where {T <: Number} + + throw_sparse_error(return_sparse) + approx_res_size, in_size = dims + res_size = Int(floor(approx_res_size / in_size) * in_size) + check_modified_ressize(res_size, approx_res_size) + + layer_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size) + q = floor(Int, res_size / in_size) + + for idx in 1:in_size + sc_rand = DeviceAgnostic.rand(rng, T, q) + new_scaling = scaling isa AbstractVector ? scaling[idx] : scaling + apply_scale!(sc_rand, new_scaling, T) + layer_matrix[((idx - 1) * q + 1):((idx) * q), idx] = sc_rand + end + + return return_init_as(Val(return_sparse), layer_matrix) +end + +""" + weighted_minimal([rng], [T], dims...; + weight=0.1, return_sparse=false, + sampling_type=:no_sample) + +Create and return a minimal weighted input layer matrix. +This initializer generates a weighted input matrix with equal, deterministic +elements in the same construction as [`weighted_minimal]`(@ref), +inspired by [Lu2017](@cite). + +Please note that this initializer computes its own reservoir size! If +the computed reservoir size is different than the provided one it will raise a +warning. + +## Arguments + + - `rng`: Random number generator. Default is `Utils.default_rng()` + from WeightInitializers. + - `T`: Type of the elements in the reservoir matrix. + Default is `Float32`. + - `dims`: Dimensions of the matrix. Should follow `res_size x in_size`. + +## Keyword arguments + + - `weight`: The value for all the weights in the input matrix. + Defaults to `0.1`. + - `return_sparse`: flag for returning a `sparse` matrix. + Default is `false`. + - `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. + If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each + `weight` can be positive with a probability set by `positive_prob`. If set to + `:irrational_sample!` the `weight` is negative if the decimal number of the + irrational number chosen is odd. If set to `:regular_sample!`, each weight will be + assigned a negative sign after the chosen `strides`. `strides` can be a single + number or an array. Default is `:no_sample`. + - `positive_prob`: probability of the `weight` being positive when `sampling_type` is + set to `:bernoulli_sample!`. Default is 0.5. + - `irrational`: Irrational number whose decimals decide the sign of `weight`. + Default is `pi`. + - `start`: Which place after the decimal point the counting starts for the `irrational` + sign counting. Default is 1. + - `strides`: number of strides for assigning negative value to a weight. It can be an + integer or an array. Default is 2. + +## Examples + +Standard call, changing the init weight: + +```jldoctest weightedminimal +julia> res_input = weighted_minimal(9, 3; weight = 0.99) +9×3 Matrix{Float32}: + 0.99 0.0 0.0 + 0.99 0.0 0.0 + 0.99 0.0 0.0 + 0.0 0.99 0.0 + 0.0 0.99 0.0 + 0.0 0.99 0.0 + 0.0 0.0 0.99 + 0.0 0.0 0.99 + 0.0 0.0 0.99 + ``` + + Random sign for each weight, drawn from a bernoulli distribution: + + ```jldoctest weightedminimal +julia> res_input = weighted_minimal(9, 3; sampling_type = :bernoulli_sample!) +9×3 Matrix{Float32}: + 0.1 -0.0 -0.0 + -0.1 -0.0 -0.0 + 0.1 -0.0 0.0 + -0.0 0.1 0.0 + 0.0 0.1 -0.0 + 0.0 0.1 0.0 + -0.0 -0.0 -0.1 + -0.0 -0.0 0.1 + 0.0 -0.0 0.1 +``` + +Example of different reservoir size for the initializer: + +```jldoctest weightedminimal +julia> res_input = weighted_minimal(8, 3) +┌ Warning: Reservoir size has changed! +│ +│ Computed reservoir size (6) does not equal the provided reservoir size (8). +│ +│ Using computed value (6). Make sure to modify the reservoir initializer accordingly. +│ +└ @ ReservoirComputing ~/.julia/dev/ReservoirComputing/src/esn/esn_inits.jl:159 +6×3 Matrix{Float32}: + 0.1 0.0 0.0 + 0.1 0.0 0.0 + 0.0 0.1 0.0 + 0.0 0.1 0.0 + 0.0 0.0 0.1 + 0.0 0.0 0.1 +``` +""" +function weighted_minimal(rng::AbstractRNG, ::Type{T}, dims::Integer...; + weight::Number = T(0.1), return_sparse::Bool = false, + sampling_type = :no_sample, kwargs...) where {T <: Number} + throw_sparse_error(return_sparse) + approx_res_size, in_size = dims + res_size = Int(floor(approx_res_size / in_size) * in_size) + check_modified_ressize(res_size, approx_res_size) + layer_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size) + q = floor(Int, res_size / in_size) + + for idx in 1:in_size + layer_matrix[((idx - 1) * q + 1):((idx) * q), idx] = T(weight) .* ones(T, q) + end + f_sample = getfield(@__MODULE__, sampling_type) + f_sample(rng, layer_matrix; kwargs...) + return return_init_as(Val(return_sparse), layer_matrix) +end + +""" + informed_init([rng], [T], dims...; + scaling=0.1, model_in_size, gamma=0.5) + +Create an input layer for informed echo state +networks [Pathak2018](@cite). + +## Arguments + + - `rng`: Random number generator. Default is `Utils.default_rng()` + from WeightInitializers. + - `T`: Type of the elements in the reservoir matrix. + Default is `Float32`. + - `dims`: Dimensions of the matrix. Should follow `res_size x in_size`. + +## Keyword arguments + + - `scaling`: The scaling factor for the input matrix. + Default is 0.1. + - `model_in_size`: The size of the input model. + - `gamma`: The gamma value. Default is 0.5. + +## Examples +""" +function informed_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; + scaling::Number = T(0.1), model_in_size::Integer, + gamma::Number = T(0.5)) where {T <: Number} + res_size, in_size = dims + state_size = in_size - model_in_size + + if state_size <= 0 + throw(DimensionMismatch("in_size must be greater than model_in_size")) + end + + input_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size) + zero_connections = DeviceAgnostic.zeros(rng, T, in_size) + num_for_state = floor(Int, res_size * gamma) + num_for_model = floor(Int, res_size * (1 - gamma)) + + same_as_zero_row(jdx::Int) = zero_connections == @view(input_matrix[jdx, :]) + + for _ in 1:num_for_state + idxs = findall(same_as_zero_row, axes(input_matrix, 1)) + isempty(idxs) && break + + random_row_idx = rand(rng, idxs) + random_clm_idx = rand(rng, 1:state_size) + + input_matrix[random_row_idx, random_clm_idx] = (DeviceAgnostic.rand(rng, T) - + T(0.5)) * (T(2) * T(scaling)) + end + + for _ in 1:num_for_model + idxs = findall(same_as_zero_row, axes(input_matrix, 1)) + isempty(idxs) && break + + random_row_idx = rand(rng, idxs) + random_clm_idx = rand(rng, (state_size + 1):in_size) + + input_matrix[random_row_idx, random_clm_idx] = (DeviceAgnostic.rand(rng, T) - + T(0.5)) * (T(2) * T(scaling)) + end + + return input_matrix +end + +""" + minimal_init([rng], [T], dims...; + sampling_type=:bernoulli_sample!, weight=0.1, irrational=pi, + start=1, p=0.5) + +Create a dense matrix with same weights magnitudes determined by +`weight` [Rodan2011](@cite). The sign difference is randomly +determined by the `sampling` chosen. + +## Arguments + + - `rng`: Random number generator. Default is `Utils.default_rng()` + from WeightInitializers. + - `T`: Type of the elements in the reservoir matrix. + Default is `Float32`. + - `dims`: Dimensions of the matrix. Should follow `res_size x in_size`. + +## Keyword arguments + + - `weight`: The weight used to fill the layer matrix. Default is 0.1. + - `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. + If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each + `weight` can be positive with a probability set by `positive_prob`. If set to + `:irrational_sample!` the `weight` is negative if the decimal number of the + irrational number chosen is odd. If set to `:regular_sample!`, each weight will be + assigned a negative sign after the chosen `strides`. `strides` can be a single + number or an array. Default is `:no_sample`. + - `positive_prob`: probability of the `weight` being positive when `sampling_type` is + set to `:bernoulli_sample!`. Default is 0.5. + - `irrational`: Irrational number whose decimals decide the sign of `weight`. + Default is `pi`. + - `start`: Which place after the decimal point the counting starts for the `irrational` + sign counting. Default is 1. + - `strides`: number of strides for assigning negative value to a weight. It can be an + integer or an array. Default is 2. + +## Examples + +Standard call: + +```jldoctest minimalinit +julia> res_input = minimal_init(8, 3) +8×3 Matrix{Float32}: + 0.1 -0.1 0.1 + -0.1 0.1 0.1 + -0.1 -0.1 0.1 + -0.1 -0.1 -0.1 + 0.1 0.1 0.1 + -0.1 -0.1 -0.1 + -0.1 -0.1 0.1 + 0.1 -0.1 0.1 +``` + +Sampling weight sign from an irrational number: + +```jldoctest minimalinit +julia> res_input = minimal_init(8, 3; sampling_type = :irrational) +8×3 Matrix{Float32}: + -0.1 0.1 -0.1 + 0.1 -0.1 -0.1 + 0.1 0.1 -0.1 + 0.1 0.1 0.1 + -0.1 -0.1 -0.1 + 0.1 0.1 0.1 + 0.1 0.1 -0.1 + -0.1 0.1 -0.1 + ``` + + Changing probability for the negative sign + + ```jldoctest minimalinit +julia> res_input = minimal_init(8, 3; p = 0.1) # lower p -> more negative signs +8×3 Matrix{Float32}: + -0.1 -0.1 -0.1 + -0.1 -0.1 -0.1 + -0.1 -0.1 -0.1 + -0.1 -0.1 -0.1 + 0.1 -0.1 -0.1 + -0.1 -0.1 -0.1 + -0.1 -0.1 -0.1 + -0.1 -0.1 -0.1 + + +julia> res_input = minimal_init(8, 3; p = 0.8)# higher p -> more positive signs +8×3 Matrix{Float32}: + 0.1 0.1 0.1 + -0.1 0.1 0.1 + -0.1 0.1 0.1 + 0.1 0.1 0.1 + 0.1 0.1 0.1 + 0.1 -0.1 0.1 + -0.1 0.1 0.1 + 0.1 0.1 0.1 +``` +""" +function minimal_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; + weight::Number = T(0.1), sampling_type::Symbol = :bernoulli_sample!, + kwargs...) where {T <: Number} + res_size, in_size = dims + input_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size) + input_matrix .+= T(weight) + f_sample = getfield(@__MODULE__, sampling_type) + f_sample(rng, input_matrix; kwargs...) + return input_matrix +end + +@doc raw""" + chebyshev_mapping([rng], [T], dims...; + amplitude=one(T), sine_divisor=one(T), + chebyshev_parameter=one(T), return_sparse=false) + +Generate a Chebyshev-mapped matrix [Xie2024](@cite). +The first row is initialized +using a sine function and subsequent rows are iteratively generated +via the Chebyshev mapping. The first row is defined as: + +```math + W[1, j] = \text{amplitude} \cdot \sin(j \cdot \pi / (\text{sine_divisor} + \cdot \text{n_cols})) +``` + +for j = 1, 2, …, n_cols (with n_cols typically equal to K+1, where K is the +number of input layer neurons). Subsequent rows are generated by +applying: + +```math + W[i+1, j] = \cos( \text{chebyshev_parameter} \cdot \acos(W[pi, j])) +``` + +## Arguments + + - `rng`: Random number generator. Default is `Utils.default_rng()` + from WeightInitializers. + - `T`: Type of the elements in the reservoir matrix. + Default is `Float32`. + - `dims`: Dimensions of the matrix. Should follow `res_size x in_size`. + `res_size` is assumed to be K+1. + +## Keyword arguments + + - `amplitude`: Scaling factor used to initialize the first row. + This parameter adjusts the amplitude of the sine function. Default value is one. + - `sine_divisor`: Divisor applied in the sine function's phase. Default value is one. + - `chebyshev_parameter`: Control parameter for the Chebyshev mapping in + subsequent rows. This parameter influences the distribution of the + matrix elements. Default is one. + - `return_sparse`: If `true`, the function returns the matrix as a sparse matrix. + Default is `false`. + +## Examples + +```jldoctest +julia> input_matrix = chebyshev_mapping(10, 3) +10×3 Matrix{Float32}: + 0.866025 0.866025 1.22465f-16 + 0.866025 0.866025 -4.37114f-8 + 0.866025 0.866025 -4.37114f-8 + 0.866025 0.866025 -4.37114f-8 + 0.866025 0.866025 -4.37114f-8 + 0.866025 0.866025 -4.37114f-8 + 0.866025 0.866025 -4.37114f-8 + 0.866025 0.866025 -4.37114f-8 + 0.866025 0.866025 -4.37114f-8 + 0.866025 0.866025 -4.37114f-8 +``` +""" +function chebyshev_mapping(rng::AbstractRNG, ::Type{T}, dims::Integer...; + amplitude::AbstractFloat = one(T), sine_divisor::AbstractFloat = one(T), + chebyshev_parameter::AbstractFloat = one(T), + return_sparse::Bool = false) where {T <: Number} + throw_sparse_error(return_sparse) + input_matrix = DeviceAgnostic.zeros(rng, T, dims...) + n_rows, n_cols = dims[1], dims[2] + + for idx_cols in 1:n_cols + input_matrix[1, idx_cols] = amplitude * sin(idx_cols * pi / (sine_divisor * n_cols)) + end + for idx_rows in 2:n_rows + for idx_cols in 1:n_cols + input_matrix[idx_rows, idx_cols] = cos(chebyshev_parameter * acos(input_matrix[ + idx_rows - 1, idx_cols])) + end + end + + return return_init_as(Val(return_sparse), input_matrix) +end + +@doc raw""" + logistic_mapping([rng], [T], dims...; + amplitude=0.3, sine_divisor=5.9, logistic_parameter=3.7, + return_sparse=false) + +Generate an input weight matrix using a logistic mapping [Wang2022](@cite) +The first row is initialized using a sine function: + +```math + W[1, j] = \text{amplitude} \cdot \sin(j \cdot \pi / + (\text{sine_divisor} \cdot in_size)) +``` + +for each input index `j`, with `in_size` being the number of columns +provided in `dims`. Subsequent rows are generated recursively using +the logistic map recurrence: + +```math + W[i+1, j] = \text{logistic_parameter} \cdot W(i, j) \cdot (1 - W[i, j]) +``` + +## Arguments + - `rng`: Random number generator. Default is `Utils.default_rng()` + from WeightInitializers. + - `T`: Type of the elements in the reservoir matrix. + Default is `Float32`. + - `dims`: Dimensions of the matrix. Should follow `res_size x in_size`. + +## Keyword arguments + + - `amplitude`: Scaling parameter used in the sine initialization of the + first row. Default is 0.3. + - `sine_divisor`: Parameter used to adjust the phase in the sine initialization. + Default is 5.9. + - `logistic_parameter`: The parameter in the logistic mapping recurrence that + governs the dynamics. Default is 3.7. + - `return_sparse`: If `true`, returns the resulting matrix as a sparse matrix. + Default is `false`. + +## Examples + +```jldoctest +julia> logistic_mapping(8, 3) +8×3 Matrix{Float32}: + 0.0529682 0.104272 0.1523 + 0.185602 0.345578 0.477687 + 0.559268 0.836769 0.923158 + 0.912003 0.50537 0.262468 + 0.296938 0.924893 0.716241 + 0.772434 0.257023 0.751987 + 0.650385 0.70656 0.69006 + 0.841322 0.767132 0.791346 + +``` +""" +function logistic_mapping(rng::AbstractRNG, ::Type{T}, dims::Integer...; + amplitude::AbstractFloat = 0.3, sine_divisor::AbstractFloat = 5.9, + logistic_parameter::AbstractFloat = 3.7, + return_sparse::Bool = false) where {T <: Number} + throw_sparse_error(return_sparse) + input_matrix = DeviceAgnostic.zeros(rng, T, dims...) + num_rows, num_columns = dims[1], dims[2] + for idx_col in 1:num_columns + input_matrix[1, idx_col] = amplitude * + sin(idx_col * pi / (sine_divisor * num_columns)) + end + for idx_row in 2:num_rows + for idx_col in 1:num_columns + previous_value = input_matrix[idx_row - 1, idx_col] + input_matrix[idx_row, idx_col] = logistic_parameter * previous_value * + (1 - previous_value) + end + end + + return return_init_as(Val(return_sparse), input_matrix) +end + +@doc raw""" + modified_lm([rng], [T], dims...; + factor, amplitude=0.3, sine_divisor=5.9, logistic_parameter=2.35, + return_sparse=false) + +Generate a input weight matrix based on the logistic mapping [Viehweg2025](@cite). +Thematrix is built so that each input is transformed into a high-dimensional feature +space via a recursive logistic map. For each input, a chain of weights is generated +as follows: +- The first element of the chain is initialized using a sine function: + +```math + W[1,j] = \text{amplitude} \cdot \sin( (j \cdot \pi) / + (\text{factor} \cdot \text{n} \cdot \text{sine_divisor}) ) +``` + where `j` is the index corresponding to the input and `n` is the number of inputs. + +- Subsequent elements are recursively computed using the logistic mapping: + +```math + W[i+1,j] = \text{logistic_parameter} \cdot W[i,j] \cdot (1 - W[i,j]) +``` + +The resulting matrix has dimensions `(factor * in_size) x in_size`, where +`in_size` corresponds to the number of columns provided in `dims`. +If the provided number of rows does not match `factor * in_size` +the number of rows is overridden. + +## Arguments + - `rng`: Random number generator. Default is `Utils.default_rng()` + from WeightInitializers. + - `T`: Type of the elements in the reservoir matrix. + Default is `Float32`. + - `dims`: Dimensions of the matrix. Should follow `res_size x in_size`. + +## Keyword arguments + + - `factor`: The number of logistic map iterations (chain length) per input, + determining the number of rows per input. + - `amplitude`: Scaling parameter A for the sine-based initialization of + the first element in each logistic chain. Default is 0.3. + - `sine_divisor`: Parameter B used to adjust the phase in the sine initialization. + Default is 5.9. + - `logistic_parameter`: The parameter r in the logistic recurrence that governs + the chain dynamics. Default is 2.35. + - `return_sparse`: If `true`, returns the resulting matrix as a sparse matrix. + Default is `false`. + +## Examples + +```jldoctest +julia> modified_lm(20, 10; factor=2) +20×10 SparseArrays.SparseMatrixCSC{Float32, Int64} with 18 stored entries: +⎡⢠⠀⠀⠀⠀⎤ +⎢⠀⢣⠀⠀⠀⎥ +⎢⠀⠀⢣⠀⠀⎥ +⎢⠀⠀⠀⢣⠀⎥ +⎣⠀⠀⠀⠀⢣⎦ + +julia> modified_lm(12, 4; factor=3) +12×4 SparseArrays.SparseMatrixCSC{Float32, Int64} with 9 stored entries: + ⋅ ⋅ ⋅ ⋅ + ⋅ ⋅ ⋅ ⋅ + ⋅ ⋅ ⋅ ⋅ + ⋅ 0.0133075 ⋅ ⋅ + ⋅ 0.0308564 ⋅ ⋅ + ⋅ 0.070275 ⋅ ⋅ + ⋅ ⋅ 0.0265887 ⋅ + ⋅ ⋅ 0.0608222 ⋅ + ⋅ ⋅ 0.134239 ⋅ + ⋅ ⋅ ⋅ 0.0398177 + ⋅ ⋅ ⋅ 0.0898457 + ⋅ ⋅ ⋅ 0.192168 + +``` +""" +function modified_lm(rng::AbstractRNG, ::Type{T}, dims::Integer...; + factor::Integer, amplitude::AbstractFloat = 0.3, + sine_divisor::AbstractFloat = 5.9, logistic_parameter::AbstractFloat = 2.35, + return_sparse::Bool = false) where {T <: Number} + throw_sparse_error(return_sparse) + num_columns = dims[2] + expected_num_rows = factor * num_columns + if dims[1] != expected_num_rows + @warn """\n + Provided dims[1] ($(dims[1])) is not equal to factor*num_columns ($expected_num_rows). + Overriding number of rows to $expected_num_rows. \n + """ + end + output_matrix = DeviceAgnostic.zeros(rng, T, expected_num_rows, num_columns) + for idx_col in 1:num_columns + base_row = (idx_col - 1) * factor + 1 + output_matrix[base_row, idx_col] = amplitude * sin(((idx_col - 1) * pi) / + (factor * num_columns * sine_divisor)) + for jdx in 1:(factor - 1) + current_row = base_row + jdx + previous_value = output_matrix[current_row - 1, idx_col] + output_matrix[current_row, idx_col] = logistic_parameter * previous_value * + (1 - previous_value) + end + end + + return return_init_as(Val(return_sparse), output_matrix) +end diff --git a/src/inits/esn_inits.jl b/src/inits/inits_reservoir.jl similarity index 69% rename from src/inits/esn_inits.jl rename to src/inits/inits_reservoir.jl index c137ea45..dca05877 100644 --- a/src/inits/esn_inits.jl +++ b/src/inits/inits_reservoir.jl @@ -1,691 +1,3 @@ -### input layers -""" - scaled_rand([rng], [T], dims...; - scaling=0.1) - -Create and return a matrix with random values, uniformly distributed within -a range defined by `scaling`. - -# Arguments - - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. - - `T`: Type of the elements in the reservoir matrix. - Default is `Float32`. - - `dims`: Dimensions of the matrix. Should follow `res_size x in_size`. - -# Keyword arguments - - - `scaling`: A scaling factor to define the range of the uniform distribution. - The factor can be passed in three different ways: - - + A single number. In this case, the matrix elements will be randomly - chosen from the range `[-scaling, scaling]`. Default option, with - a the scaling value set to `0.1`. - + A tuple `(lower, upper)`. The values define the range of the distribution. - + A vector. In this case, the columns will be scaled individually by the - entries of the vector. The entries can be numbers or tuples, which will mirror - the behavior described above. - -# Examples - -```jldoctest -julia> res_input = scaled_rand(8, 3) -8×3 Matrix{Float32}: - -0.0669356 -0.0292692 -0.0188943 - 0.0159724 0.004071 -0.0737949 - 0.026355 -0.0191563 0.0714962 - -0.0177412 0.0279123 0.0892906 - -0.0184405 0.0567368 0.0190222 - 0.0944272 0.0679244 0.0148647 - -0.0799005 -0.0891089 -0.0444782 - -0.0970182 0.0934286 0.03553 - -julia> tt = scaled_rand(5, 3, scaling = (0.1, 0.15)) -5×3 Matrix{Float32}: - 0.13631 0.110929 0.116177 - 0.116299 0.136038 0.119713 - 0.11535 0.144712 0.110029 - 0.127453 0.12657 0.147656 - 0.139446 0.117656 0.104712 -``` - -Example with vector: - -```jldoctest -julia> tt = scaled_rand(5, 3, scaling = [0.1, 0.2, 0.3]) -5×3 Matrix{Float32}: - 0.0452399 -0.112565 -0.105874 - -0.0348047 0.0883044 -0.0634468 - -0.0386004 0.157698 -0.179648 - 0.00981022 0.012559 0.271875 - 0.0577838 -0.0587553 -0.243451 - -julia> tt = scaled_rand(5, 3, scaling = [(0.1, 0.2), (-0.2, -0.1), (0.3, 0.5)]) -5×3 Matrix{Float32}: - 0.17262 -0.178141 0.364709 - 0.132598 -0.127924 0.378851 - 0.1307 -0.110575 0.340117 - 0.154905 -0.14686 0.490625 - 0.178892 -0.164689 0.31885 -``` -""" -function scaled_rand(rng::AbstractRNG, ::Type{T}, dims::Integer...; - scaling::Union{Number, Tuple, Vector} = T(0.1)) where {T <: Number} - res_size, in_size = dims - layer_matrix = DeviceAgnostic.rand(rng, T, res_size, in_size) - apply_scale!(layer_matrix, scaling, T) - return layer_matrix -end - -function apply_scale!(input_matrix::AbstractArray, scaling::Number, ::Type{T}) where {T} - @. input_matrix = (input_matrix - T(0.5)) * (T(2) * T(scaling)) - return input_matrix -end - -function apply_scale!(input_matrix::AbstractArray, - scaling::Tuple{<:Number, <:Number}, ::Type{T}) where {T} - lower, upper = T(scaling[1]), T(scaling[2]) - @assert lower res_input = weighted_init(8, 3) -6×3 Matrix{Float32}: - 0.0452399 0.0 0.0 - -0.0348047 0.0 0.0 - 0.0 -0.0386004 0.0 - 0.0 0.00981022 0.0 - 0.0 0.0 0.0577838 - 0.0 0.0 -0.0562827 -``` -""" -function weighted_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; - scaling::Number = T(0.1), return_sparse::Bool = false) where {T <: Number} - throw_sparse_error(return_sparse) - approx_res_size, in_size = dims - res_size = Int(floor(approx_res_size / in_size) * in_size) - check_modified_ressize(res_size, approx_res_size) - layer_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size) - q = floor(Int, res_size / in_size) - - for idx in 1:in_size - sc_rand = (DeviceAgnostic.rand(rng, T, q) .- T(0.5)) .* (T(2) * T(scaling)) - layer_matrix[((idx - 1) * q + 1):((idx) * q), idx] = sc_rand - end - - return return_init_as(Val(return_sparse), layer_matrix) -end - -""" - weighted_minimal([rng], [T], dims...; - weight=0.1, return_sparse=false, - sampling_type=:no_sample) - -Create and return a minimal weighted input layer matrix. -This initializer generates a weighted input matrix with equal, deterministic -elements in the same construction as [`weighted_minimal]`(@ref), -inspired by [Lu2017](@cite). - -Please note that this initializer computes its own reservoir size! If -the computed reservoir size is different than the provided one it will raise a -warning. - -# Arguments - - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. - - `T`: Type of the elements in the reservoir matrix. - Default is `Float32`. - - `dims`: Dimensions of the matrix. Should follow `res_size x in_size`. - -# Keyword arguments - - - `weight`: The value for all the weights in the input matrix. - Defaults to `0.1`. - - `return_sparse`: flag for returning a `sparse` matrix. - Default is `false`. - - `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. - If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each - `weight` can be positive with a probability set by `positive_prob`. If set to - `:irrational_sample!` the `weight` is negative if the decimal number of the - irrational number chosen is odd. If set to `:regular_sample!`, each weight will be - assigned a negative sign after the chosen `strides`. `strides` can be a single - number or an array. Default is `:no_sample`. - - `positive_prob`: probability of the `weight` being positive when `sampling_type` is - set to `:bernoulli_sample!`. Default is 0.5. - - `irrational`: Irrational number whose decimals decide the sign of `weight`. - Default is `pi`. - - `start`: Which place after the decimal point the counting starts for the `irrational` - sign counting. Default is 1. - - `strides`: number of strides for assigning negative value to a weight. It can be an - integer or an array. Default is 2. - -# Examples - -```jldoctest -julia> res_input = weighted_minimal(8, 3) -┌ Warning: Reservoir size has changed! -│ -│ Computed reservoir size (6) does not equal the provided reservoir size (8). -│ -│ Using computed value (6). Make sure to modify the reservoir initializer accordingly. -│ -└ @ ReservoirComputing ~/.julia/dev/ReservoirComputing/src/esn/esn_inits.jl:159 -6×3 Matrix{Float32}: - 0.1 0.0 0.0 - 0.1 0.0 0.0 - 0.0 0.1 0.0 - 0.0 0.1 0.0 - 0.0 0.0 0.1 - 0.0 0.0 0.1 - -julia> res_input = weighted_minimal(9, 3; weight = 0.99) -9×3 Matrix{Float32}: - 0.99 0.0 0.0 - 0.99 0.0 0.0 - 0.99 0.0 0.0 - 0.0 0.99 0.0 - 0.0 0.99 0.0 - 0.0 0.99 0.0 - 0.0 0.0 0.99 - 0.0 0.0 0.99 - 0.0 0.0 0.99 - -julia> res_input = weighted_minimal(9, 3; sampling_type = :bernoulli_sample!) -9×3 Matrix{Float32}: - 0.1 -0.0 -0.0 - -0.1 -0.0 -0.0 - 0.1 -0.0 0.0 - -0.0 0.1 0.0 - 0.0 0.1 -0.0 - 0.0 0.1 0.0 - -0.0 -0.0 -0.1 - -0.0 -0.0 0.1 - 0.0 -0.0 0.1 -``` -""" -function weighted_minimal(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight::Number = T(0.1), return_sparse::Bool = false, - sampling_type = :no_sample, kwargs...) where {T <: Number} - throw_sparse_error(return_sparse) - approx_res_size, in_size = dims - res_size = Int(floor(approx_res_size / in_size) * in_size) - check_modified_ressize(res_size, approx_res_size) - layer_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size) - q = floor(Int, res_size / in_size) - - for idx in 1:in_size - layer_matrix[((idx - 1) * q + 1):((idx) * q), idx] = T(weight) .* ones(T, q) - end - f_sample = getfield(@__MODULE__, sampling_type) - f_sample(rng, layer_matrix; kwargs...) - return return_init_as(Val(return_sparse), layer_matrix) -end - -""" - informed_init([rng], [T], dims...; - scaling=0.1, model_in_size, gamma=0.5) - -Create an input layer for informed echo state -networks [Pathak2018](@cite). - -# Arguments - - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. - - `T`: Type of the elements in the reservoir matrix. - Default is `Float32`. - - `dims`: Dimensions of the matrix. Should follow `res_size x in_size`. - -# Keyword arguments - - - `scaling`: The scaling factor for the input matrix. - Default is 0.1. - - `model_in_size`: The size of the input model. - - `gamma`: The gamma value. Default is 0.5. - -# Examples -""" -function informed_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; - scaling::Number = T(0.1), model_in_size::Integer, - gamma::Number = T(0.5)) where {T <: Number} - res_size, in_size = dims - state_size = in_size - model_in_size - - if state_size <= 0 - throw(DimensionMismatch("in_size must be greater than model_in_size")) - end - - input_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size) - zero_connections = DeviceAgnostic.zeros(rng, T, in_size) - num_for_state = floor(Int, res_size * gamma) - num_for_model = floor(Int, res_size * (1 - gamma)) - - same_as_zero_row(jdx::Int) = zero_connections == @view(input_matrix[jdx, :]) - - for _ in 1:num_for_state - idxs = findall(same_as_zero_row, axes(input_matrix, 1)) - isempty(idxs) && break - - random_row_idx = rand(rng, idxs) - random_clm_idx = rand(rng, 1:state_size) - - input_matrix[random_row_idx, random_clm_idx] = (DeviceAgnostic.rand(rng, T) - - T(0.5)) * (T(2) * T(scaling)) - end - - for _ in 1:num_for_model - idxs = findall(same_as_zero_row, axes(input_matrix, 1)) - isempty(idxs) && break - - random_row_idx = rand(rng, idxs) - random_clm_idx = rand(rng, (state_size + 1):in_size) - - input_matrix[random_row_idx, random_clm_idx] = (DeviceAgnostic.rand(rng, T) - - T(0.5)) * (T(2) * T(scaling)) - end - - return input_matrix -end - -""" - minimal_init([rng], [T], dims...; - sampling_type=:bernoulli_sample!, weight=0.1, irrational=pi, - start=1, p=0.5) - -Create a layer matrix with uniform weights determined by -`weight` [Rodan2011](@cite). The sign difference is randomly -determined by the `sampling` chosen. - -# Arguments - - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. - - `T`: Type of the elements in the reservoir matrix. - Default is `Float32`. - - `dims`: Dimensions of the matrix. Should follow `res_size x in_size`. - -# Keyword arguments - - - `weight`: The weight used to fill the layer matrix. Default is 0.1. - - `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. - If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each - `weight` can be positive with a probability set by `positive_prob`. If set to - `:irrational_sample!` the `weight` is negative if the decimal number of the - irrational number chosen is odd. If set to `:regular_sample!`, each weight will be - assigned a negative sign after the chosen `strides`. `strides` can be a single - number or an array. Default is `:no_sample`. - - `positive_prob`: probability of the `weight` being positive when `sampling_type` is - set to `:bernoulli_sample!`. Default is 0.5. - - `irrational`: Irrational number whose decimals decide the sign of `weight`. - Default is `pi`. - - `start`: Which place after the decimal point the counting starts for the `irrational` - sign counting. Default is 1. - - `strides`: number of strides for assigning negative value to a weight. It can be an - integer or an array. Default is 2. - -# Examples - -```jldoctest -julia> res_input = minimal_init(8, 3) -8×3 Matrix{Float32}: - 0.1 -0.1 0.1 - -0.1 0.1 0.1 - -0.1 -0.1 0.1 - -0.1 -0.1 -0.1 - 0.1 0.1 0.1 - -0.1 -0.1 -0.1 - -0.1 -0.1 0.1 - 0.1 -0.1 0.1 - -julia> res_input = minimal_init(8, 3; sampling_type = :irrational) -8×3 Matrix{Float32}: - -0.1 0.1 -0.1 - 0.1 -0.1 -0.1 - 0.1 0.1 -0.1 - 0.1 0.1 0.1 - -0.1 -0.1 -0.1 - 0.1 0.1 0.1 - 0.1 0.1 -0.1 - -0.1 0.1 -0.1 - -julia> res_input = minimal_init(8, 3; p = 0.1) # lower p -> more negative signs -8×3 Matrix{Float32}: - -0.1 -0.1 -0.1 - -0.1 -0.1 -0.1 - -0.1 -0.1 -0.1 - -0.1 -0.1 -0.1 - 0.1 -0.1 -0.1 - -0.1 -0.1 -0.1 - -0.1 -0.1 -0.1 - -0.1 -0.1 -0.1 - -julia> res_input = minimal_init(8, 3; p = 0.8)# higher p -> more positive signs -8×3 Matrix{Float32}: - 0.1 0.1 0.1 - -0.1 0.1 0.1 - -0.1 0.1 0.1 - 0.1 0.1 0.1 - 0.1 0.1 0.1 - 0.1 -0.1 0.1 - -0.1 0.1 0.1 - 0.1 0.1 0.1 -``` -""" -function minimal_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight::Number = T(0.1), sampling_type::Symbol = :bernoulli_sample!, - kwargs...) where {T <: Number} - res_size, in_size = dims - input_matrix = DeviceAgnostic.zeros(rng, T, res_size, in_size) - input_matrix .+= T(weight) - f_sample = getfield(@__MODULE__, sampling_type) - f_sample(rng, input_matrix; kwargs...) - return input_matrix -end - -@doc raw""" - chebyshev_mapping([rng], [T], dims...; - amplitude=one(T), sine_divisor=one(T), - chebyshev_parameter=one(T), return_sparse=false) - -Generate a Chebyshev-mapped matrix [Xie2024](@cite). -The first row is initialized -using a sine function and subsequent rows are iteratively generated -via the Chebyshev mapping. The first row is defined as: - -```math - W[1, j] = \text{amplitude} \cdot \sin(j \cdot \pi / (\text{sine_divisor} - \cdot \text{n_cols})) -``` - -for j = 1, 2, …, n_cols (with n_cols typically equal to K+1, where K is the number of input layer neurons). -Subsequent rows are generated by applying the mapping: - -```math - W[i+1, j] = \cos( \text{chebyshev_parameter} \cdot \acos(W[pi, j])) -``` - -# Arguments - - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. - - `T`: Type of the elements in the reservoir matrix. - Default is `Float32`. - - `dims`: Dimensions of the matrix. Should follow `res_size x in_size`. - `res_size` is assumed to be K+1. - -# Keyword arguments - - - `amplitude`: Scaling factor used to initialize the first row. - This parameter adjusts the amplitude of the sine function. Default value is one. - - `sine_divisor`: Divisor applied in the sine function's phase. Default value is one. - - `chebyshev_parameter`: Control parameter for the Chebyshev mapping in - subsequent rows. This parameter influences the distribution of the - matrix elements. Default is one. - - `return_sparse`: If `true`, the function returns the matrix as a sparse matrix. - Default is `false`. - -# Examples - -```jldoctest -julia> input_matrix = chebyshev_mapping(10, 3) -10×3 Matrix{Float32}: - 0.866025 0.866025 1.22465f-16 - 0.866025 0.866025 -4.37114f-8 - 0.866025 0.866025 -4.37114f-8 - 0.866025 0.866025 -4.37114f-8 - 0.866025 0.866025 -4.37114f-8 - 0.866025 0.866025 -4.37114f-8 - 0.866025 0.866025 -4.37114f-8 - 0.866025 0.866025 -4.37114f-8 - 0.866025 0.866025 -4.37114f-8 - 0.866025 0.866025 -4.37114f-8 -``` -""" -function chebyshev_mapping(rng::AbstractRNG, ::Type{T}, dims::Integer...; - amplitude::AbstractFloat = one(T), sine_divisor::AbstractFloat = one(T), - chebyshev_parameter::AbstractFloat = one(T), - return_sparse::Bool = false) where {T <: Number} - throw_sparse_error(return_sparse) - input_matrix = DeviceAgnostic.zeros(rng, T, dims...) - n_rows, n_cols = dims[1], dims[2] - - for idx_cols in 1:n_cols - input_matrix[1, idx_cols] = amplitude * sin(idx_cols * pi / (sine_divisor * n_cols)) - end - for idx_rows in 2:n_rows - for idx_cols in 1:n_cols - input_matrix[idx_rows, idx_cols] = cos(chebyshev_parameter * acos(input_matrix[ - idx_rows - 1, idx_cols])) - end - end - - return return_init_as(Val(return_sparse), input_matrix) -end - -@doc raw""" - logistic_mapping([rng], [T], dims...; - amplitude=0.3, sine_divisor=5.9, logistic_parameter=3.7, - return_sparse=false) - -Generate an input weight matrix using a logistic mapping [Wang2022](@cite) -The first row is initialized using a sine function: - -```math - W[1, j] = \text{amplitude} \cdot \sin(j \cdot \pi / - (\text{sine_divisor} \cdot in_size)) -``` - -for each input index `j`, with `in_size` being the number of columns provided in `dims`. Subsequent rows -are generated recursively using the logistic map recurrence: - -```math - W[i+1, j] = \text{logistic_parameter} \cdot W(i, j) \cdot (1 - W[i, j]) -``` - -# Arguments - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. - - `T`: Type of the elements in the reservoir matrix. - Default is `Float32`. - - `dims`: Dimensions of the matrix. Should follow `res_size x in_size`. - -# Keyword arguments - - - `amplitude`: Scaling parameter used in the sine initialization of the - first row. Default is 0.3. - - `sine_divisor`: Parameter used to adjust the phase in the sine initialization. - Default is 5.9. - - `logistic_parameter`: The parameter in the logistic mapping recurrence that - governs the dynamics. Default is 3.7. - - `return_sparse`: If `true`, returns the resulting matrix as a sparse matrix. - Default is `false`. - -# Examples - -```jldoctest -julia> logistic_mapping(8, 3) -8×3 Matrix{Float32}: - 0.0529682 0.104272 0.1523 - 0.185602 0.345578 0.477687 - 0.559268 0.836769 0.923158 - 0.912003 0.50537 0.262468 - 0.296938 0.924893 0.716241 - 0.772434 0.257023 0.751987 - 0.650385 0.70656 0.69006 - 0.841322 0.767132 0.791346 - -``` -""" -function logistic_mapping(rng::AbstractRNG, ::Type{T}, dims::Integer...; - amplitude::AbstractFloat = 0.3, sine_divisor::AbstractFloat = 5.9, - logistic_parameter::AbstractFloat = 3.7, - return_sparse::Bool = false) where {T <: Number} - throw_sparse_error(return_sparse) - input_matrix = DeviceAgnostic.zeros(rng, T, dims...) - num_rows, num_columns = dims[1], dims[2] - for idx_col in 1:num_columns - input_matrix[1, idx_col] = amplitude * - sin(idx_col * pi / (sine_divisor * num_columns)) - end - for idx_row in 2:num_rows - for idx_col in 1:num_columns - previous_value = input_matrix[idx_row - 1, idx_col] - input_matrix[idx_row, idx_col] = logistic_parameter * previous_value * - (1 - previous_value) - end - end - - return return_init_as(Val(return_sparse), input_matrix) -end - -@doc raw""" - modified_lm([rng], [T], dims...; - factor, amplitude=0.3, sine_divisor=5.9, logistic_parameter=2.35, - return_sparse=false) - -Generate a input weight matrix based on the logistic mapping [Viehweg2025](@cite). -Thematrix is built so that each input is transformed into a high-dimensional feature -space via a recursive logistic map. For each input, a chain of weights is generated -as follows: -- The first element of the chain is initialized using a sine function: - -```math - W[1,j] = \text{amplitude} \cdot \sin( (j \cdot \pi) / - (\text{factor} \cdot \text{n} \cdot \text{sine_divisor}) ) -``` - where `j` is the index corresponding to the input and `n` is the number of inputs. - -- Subsequent elements are recursively computed using the logistic mapping: - -```math - W[i+1,j] = \text{logistic_parameter} \cdot W[i,j] \cdot (1 - W[i,j]) -``` - -The resulting matrix has dimensions `(factor * in_size) x in_size`, where -`in_size` corresponds to the number of columns provided in `dims`. -If the provided number of rows does not match `factor * in_size` -the number of rows is overridden. - -# Arguments - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. - - `T`: Type of the elements in the reservoir matrix. - Default is `Float32`. - - `dims`: Dimensions of the matrix. Should follow `res_size x in_size`. - -# Keyword arguments - - - `factor`: The number of logistic map iterations (chain length) per input, - determining the number of rows per input. - - `amplitude`: Scaling parameter A for the sine-based initialization of - the first element in each logistic chain. Default is 0.3. - - `sine_divisor`: Parameter B used to adjust the phase in the sine initialization. - Default is 5.9. - - `logistic_parameter`: The parameter r in the logistic recurrence that governs - the chain dynamics. Default is 2.35. - - `return_sparse`: If `true`, returns the resulting matrix as a sparse matrix. - Default is `false`. - -# Examples - -```jldoctest -julia> modified_lm(20, 10; factor=2) -20×10 SparseArrays.SparseMatrixCSC{Float32, Int64} with 18 stored entries: -⎡⢠⠀⠀⠀⠀⎤ -⎢⠀⢣⠀⠀⠀⎥ -⎢⠀⠀⢣⠀⠀⎥ -⎢⠀⠀⠀⢣⠀⎥ -⎣⠀⠀⠀⠀⢣⎦ - -julia> modified_lm(12, 4; factor=3) -12×4 SparseArrays.SparseMatrixCSC{Float32, Int64} with 9 stored entries: - ⋅ ⋅ ⋅ ⋅ - ⋅ ⋅ ⋅ ⋅ - ⋅ ⋅ ⋅ ⋅ - ⋅ 0.0133075 ⋅ ⋅ - ⋅ 0.0308564 ⋅ ⋅ - ⋅ 0.070275 ⋅ ⋅ - ⋅ ⋅ 0.0265887 ⋅ - ⋅ ⋅ 0.0608222 ⋅ - ⋅ ⋅ 0.134239 ⋅ - ⋅ ⋅ ⋅ 0.0398177 - ⋅ ⋅ ⋅ 0.0898457 - ⋅ ⋅ ⋅ 0.192168 - -``` -""" -function modified_lm(rng::AbstractRNG, ::Type{T}, dims::Integer...; - factor::Integer, amplitude::AbstractFloat = 0.3, - sine_divisor::AbstractFloat = 5.9, logistic_parameter::AbstractFloat = 2.35, - return_sparse::Bool = false) where {T <: Number} - throw_sparse_error(return_sparse) - num_columns = dims[2] - expected_num_rows = factor * num_columns - if dims[1] != expected_num_rows - @warn """\n - Provided dims[1] ($(dims[1])) is not equal to factor*num_columns ($expected_num_rows). - Overriding number of rows to $expected_num_rows. \n - """ - end - output_matrix = DeviceAgnostic.zeros(rng, T, expected_num_rows, num_columns) - for idx_col in 1:num_columns - base_row = (idx_col - 1) * factor + 1 - output_matrix[base_row, idx_col] = amplitude * sin(((idx_col - 1) * pi) / - (factor * num_columns * sine_divisor)) - for jdx in 1:(factor - 1) - current_row = base_row + jdx - previous_value = output_matrix[current_row - 1, idx_col] - output_matrix[current_row, idx_col] = logistic_parameter * previous_value * - (1 - previous_value) - end - end - - return return_init_as(Val(return_sparse), output_matrix) -end - -### reservoirs - """ rand_sparse([rng], [T], dims...; radius=1.0, sparsity=0.1, std=1.0, return_sparse=false) From 4106497d874a77d682624748257c675f92c8040d Mon Sep 17 00:00:00 2001 From: MartinuzziFrancesco Date: Sun, 30 Nov 2025 18:39:26 +0100 Subject: [PATCH 3/4] docs: improve docstrings for reservoir inits --- src/inits/inits_reservoir.jl | 521 ++++++++++++++++++++++++++--------- 1 file changed, 384 insertions(+), 137 deletions(-) diff --git a/src/inits/inits_reservoir.jl b/src/inits/inits_reservoir.jl index dca05877..8bc51b73 100644 --- a/src/inits/inits_reservoir.jl +++ b/src/inits/inits_reservoir.jl @@ -6,26 +6,29 @@ Create and return a random sparse reservoir matrix. The matrix will be of size specified by `dims`, with specified `sparsity` and scaled spectral radius according to `radius`. -# Arguments +## Arguments - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. + - `rng`: Random number generator. Default is `Utils.default_rng()`from + [WeightInitializers](https://lux.csail.mit.edu/stable/api/Building_Blocks/WeightInitializers). - `T`: Type of the elements in the reservoir matrix. Default is `Float32`. - `dims`: Dimensions of the reservoir matrix. -# Keyword arguments +## Keyword arguments - `radius`: The desired spectral radius of the reservoir. Defaults to 1.0. - `sparsity`: The sparsity level of the reservoir matrix, controlling the fraction of zero elements. Defaults to 0.1. - `return_sparse`: flag for returning a `sparse` matrix. + `true` requires `SparseArrays` to be loaded. Default is `false`. -# Examples +## Examples -```jldoctest +Changing the sparsity: + +```jldoctest randsparse julia> res_matrix = rand_sparse(5, 5; sparsity = 0.5) 5×5 Matrix{Float32}: 0.0 0.0 0.0 0.0 0.0 @@ -33,6 +36,36 @@ julia> res_matrix = rand_sparse(5, 5; sparsity = 0.5) 0.0 0.0 -0.931294 0.0 0.553706 0.723235 -0.524727 0.0 0.0 0.0 1.23723 0.0 0.181824 -1.5478 0.465328 + +julia> res_matrix = rand_sparse(5, 5; sparsity = 0.2) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.853184 0.0 0.0 0.0 + 0.0 0.0 -1.0 0.0 0.0 + 0.776591 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 -1.66199 0.499657 + +julia> res_matrix = rand_sparse(5, 5; sparsity = 0.8) +5×5 Matrix{Float32}: + 0.0 0.229011 0.625026 -0.660061 -1.39078 + -0.295761 0.32544 0.0 0.107163 0.0 + 0.766352 1.44836 -0.381442 -0.435473 0.226788 + 0.296224 -0.214919 0.00956791 0.0 0.210393 + 0.506746 0.0 0.0744718 -0.633951 0.19059 +``` + +Returning a sparse matrix: + +```jldoctest randsparse +julia> using SparseArrays + +julia> res_matrix = rand_sparse(5, 5; sparsity = 0.4, return_sparse=true) +5×5 SparseMatrixCSC{Float32, Int64} with 10 stored entries: + ⋅ ⋅ ⋅ ⋅ ⋅ + ⋅ 0.794565 ⋅ 0.26164 ⋅ + ⋅ ⋅ -0.931294 ⋅ 0.553706 + 0.723235 -0.524727 ⋅ ⋅ ⋅ + 1.23723 ⋅ 0.181824 -1.5478 0.465328 ``` """ function rand_sparse(rng::AbstractRNG, ::Type{T}, dims::Integer...; @@ -54,15 +87,15 @@ end Returns an initializer to build a sparse reservoir matrix with the given `sparsity` by using a pseudo-SVD approach as described in [Yang2018](@cite). -# Arguments +## Arguments - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. + - `rng`: Random number generator. Default is `Utils.default_rng()`from + [WeightInitializers](https://lux.csail.mit.edu/stable/api/Building_Blocks/WeightInitializers). - `T`: Type of the elements in the reservoir matrix. Default is `Float32`. - `dims`: Dimensions of the reservoir matrix. -# Keyword arguments +## Keyword arguments - `max_value`: The maximum absolute value of elements in the matrix. Default is 1.0 @@ -73,14 +106,17 @@ Returns an initializer to build a sparse reservoir matrix with the given - `reverse_sort`: A boolean indicating whether to reverse the sorted singular values. Default is `false`. - `return_sparse`: flag for returning a `sparse` matrix. + `true` requires `SparseArrays` to be loaded. Default is `false`. - `return_diag`: flag for returning a `Diagonal` matrix. If both `return_diag` and `return_sparse` are set to `true` priority is given to `return_diag`. Default is `false`. -# Examples +## Examples -```jldoctest +Default call: + +```jldoctest psvd julia> res_matrix = pseudo_svd(5, 5) 5×5 Matrix{Float32}: 0.306998 0.0 0.0 0.0 0.0 @@ -89,10 +125,57 @@ julia> res_matrix = pseudo_svd(5, 5) 0.0 0.0 0.0 0.726199 0.0 0.0 0.0 0.0 0.0 1.0 ``` + +With reversed sorting: + +```jldoctest psvd +julia> pseudo_svd(5, 5; reverse_sort=true) +5×5 Matrix{Float32}: + 1.0 0.0 0.0 0.0 0.0 + 0.0 0.726199 0.0 0.0 0.0 + 0.0 0.0 0.549051 0.0 0.0 + 0.0 0.0 0.0 0.325977 0.0 + 0.0 0.0 0.0 0.0 0.306998 +``` + +With no sorting + +```jldoctest psvd +julia> pseudo_svd(5, 5; sorted=false) +5×5 Matrix{Float32}: + 0.726199 0.0 0.0 0.0 0.0 + 0.0 0.325977 0.0 0.0 0.0 + 0.0 0.0 0.306998 0.0 0.0 + 0.0 0.0 0.0 0.549051 0.0 + 0.0 0.0 0.0 0.0 0.788919 +``` + +Returning as a `Diagonal` or a `sparse` matrix: + +```jldoctest psvd +julia> pseudo_svd(5, 5; return_diag=true) +5×5 LinearAlgebra.Diagonal{Float32, Vector{Float32}}: + 0.306998 ⋅ ⋅ ⋅ ⋅ + ⋅ 0.325977 ⋅ ⋅ ⋅ + ⋅ ⋅ 0.549051 ⋅ ⋅ + ⋅ ⋅ ⋅ 0.726199 ⋅ + ⋅ ⋅ ⋅ ⋅ 1.0 + +julia> using SparseArrays + +julia> pseudo_svd(5, 5; return_sparse=true) +5×5 SparseMatrixCSC{Float32, Int64} with 5 stored entries: + 0.306998 ⋅ ⋅ ⋅ ⋅ + ⋅ 0.325977 ⋅ ⋅ ⋅ + ⋅ ⋅ 0.549051 ⋅ ⋅ + ⋅ ⋅ ⋅ 0.726199 ⋅ + ⋅ ⋅ ⋅ ⋅ 1.0 +``` + """ function pseudo_svd(rng::AbstractRNG, ::Type{T}, dims::Integer...; max_value::Number = T(1), - sparsity::Number = 0.1, + sparsity::Number = 0.1f0, sorted::Bool = true, reverse_sort::Bool = false, return_sparse::Bool = false, @@ -177,24 +260,25 @@ matrix based on a digital chaotic system operating at finite precision. If the requested matrix order does not exactly match a valid order the closest valid order is used. -# Arguments +## Arguments - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. + - `rng`: Random number generator. Default is `Utils.default_rng()`from + [WeightInitializers](https://lux.csail.mit.edu/stable/api/Building_Blocks/WeightInitializers). - `T`: Type of the elements in the reservoir matrix. Default is `Float32`. - `dims`: Dimensions of the reservoir matrix. -# Keyword arguments +## Keyword arguments - `extra_edge_probability`: Probability of adding extra random edges in the adjacency matrix to enhance connectivity. Default is 0.1. - `desired_spectral_radius`: The target spectral radius for the reservoir matrix. Default is one. - - `return_sparse`: If `true`, the function returns the - reservoir matrix as a sparse matrix. Default is `false`. + - `return_sparse`: flag for returning a `sparse` matrix. + `true` requires `SparseArrays` to be loaded. + Default is `false`. -# Examples +## Examples ```jldoctest julia> res_matrix = chaotic_init(8, 8) @@ -213,7 +297,7 @@ julia> res_matrix = chaotic_init(8, 8) ``` """ function chaotic_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; - extra_edge_probability::AbstractFloat = T(0.1), spectral_radius::AbstractFloat = one(T), + extra_edge_probability::AbstractFloat = T(0.1f0), spectral_radius::AbstractFloat = one(T), return_sparse::Bool = false) where {T <: Number} throw_sparse_error(return_sparse) check_res_size(dims...) @@ -282,18 +366,16 @@ for each node [Griffith2019](@cite). When `in_degree` is 1, the function can enf a fully connected cycle if `connected` is `true`; otherwise, it generates a random connectivity pattern. -# Arguments +## Arguments - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. + - `rng`: Random number generator. Default is `Utils.default_rng()`from + [WeightInitializers](https://lux.csail.mit.edu/stable/api/Building_Blocks/WeightInitializers). - `T`: Type of the elements in the reservoir matrix. Default is `Float32`. - `dims`: Dimensions of the reservoir matrix. -# Keyword Arguments +## Keyword Arguments - - `return_sparse`: If `true`, the function returns the - reservoir matrix as a sparse matrix. Default is `false`. - `connected`: For `in_degree == 1`, if `true` a connected cycle is enforced. Default is `false`. - `in_degree`: The number of incoming connections per node. @@ -302,6 +384,26 @@ otherwise, it generates a random connectivity pattern. Defaults to 1.0. - `cut_cycle`: If `true`, removes one edge from the cycle to cut it. Default is `false`. + - `return_sparse`: flag for returning a `sparse` matrix. + `true` requires `SparseArrays` to be loaded. + Default is `false`. + +## Examples + +```jldoctest lowcon +julia> low_connectivity(10, 10) +10×10 Matrix{Float32}: + 0.0 0.0 0.0 … 0.0 0.0 0.2207 + 0.0 0.0 0.0 0.0 0.0 0.564821 + 0.318999 0.0 0.0 0.0 0.0 0.0 + 0.670023 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 1.79705 0.0 0.0 + 0.0 -1.95711 0.0 … 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 -0.650657 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 -1.0 +``` """ function low_connectivity(rng::AbstractRNG, ::Type{T}, dims::Integer...; return_sparse::Bool = false, connected::Bool = false, @@ -324,7 +426,7 @@ function low_connectivity(rng::AbstractRNG, ::Type{T}, dims::Integer...; end function build_cycle(::Val{false}, rng::AbstractRNG, ::Type{T}, res_size::Int; - in_degree::Integer = 1, radius::Number = T(1.0), cut_cycle::Bool = false) where {T <: + in_degree::Integer = 1, radius::Number = T(1.0f0), cut_cycle::Bool = false) where {T <: Number} reservoir_matrix = DeviceAgnostic.zeros(rng, T, res_size, res_size) for idx in 1:res_size @@ -338,7 +440,7 @@ function build_cycle(::Val{false}, rng::AbstractRNG, ::Type{T}, res_size::Int; end function build_cycle(::Val{true}, rng::AbstractRNG, ::Type{T}, res_size::Int; - in_degree::Integer = 1, radius::Number = T(1.0), cut_cycle::Bool = false) where {T <: + in_degree::Integer = 1, radius::Number = T(1.0f0), cut_cycle::Bool = false) where {T <: Number} reservoir_matrix = DeviceAgnostic.zeros(rng, T, res_size, res_size) perm = randperm(rng, res_size) @@ -366,22 +468,30 @@ function cut_cycle_edge!( return reservoir_matrix end -""" +@doc raw""" delay_line([rng], [T], dims...; delay_weight=0.1, delay_shift=1, return_sparse=false, kwargs...) Create and return a delay line reservoir matrix [Rodan2011](@cite). -# Arguments +```math +W_{i,j} = +\begin{cases} + r, & \text{if } i = j + 1, j \in [1, D_{\mathrm{res}} - 1], \\[6pt] + 0, & \text{otherwise.} +\end{cases} +``` + +## Arguments - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. + - `rng`: Random number generator. Default is `Utils.default_rng()`from + [WeightInitializers](https://lux.csail.mit.edu/stable/api/Building_Blocks/WeightInitializers). - `T`: Type of the elements in the reservoir matrix. Default is `Float32`. - `dims`: Dimensions of the reservoir matrix. -# Keyword arguments +## Keyword arguments - `delay_weight`: Determines the value of all connections in the reservoir. This can be provided as a single value or an array. In case it is provided as an @@ -390,6 +500,7 @@ Create and return a delay line reservoir matrix [Rodan2011](@cite). Default is 0.1. - `delay_shift`: delay line shift. Default is 1. - `return_sparse`: flag for returning a `sparse` matrix. + `true` requires `SparseArrays` to be loaded. Default is `false`. - `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each @@ -407,7 +518,7 @@ Create and return a delay line reservoir matrix [Rodan2011](@cite). - `strides`: number of strides for assigning negative value to a weight. It can be an integer or an array. Default is 2. -# Examples +## Examples ```jldoctest julia> res_matrix = delay_line(5, 5) @@ -436,7 +547,7 @@ julia> res_matrix = delay_line(5, 5; delay_weight = 1, delay_shift = 3) ``` """ function delay_line(rng::AbstractRNG, ::Type{T}, dims::Integer...; - delay_weight::Union{Number, AbstractVector} = T(0.1), delay_shift::Integer = 1, + delay_weight::Union{Number, AbstractVector} = T(0.1f0), delay_shift::Integer = 1, return_sparse::Bool = false, kwargs...) where {T <: Number} throw_sparse_error(return_sparse) check_res_size(dims...) @@ -445,7 +556,7 @@ function delay_line(rng::AbstractRNG, ::Type{T}, dims::Integer...; return return_init_as(Val(return_sparse), reservoir_matrix) end -""" +@doc raw""" delayline_backward([rng], [T], dims...; delay_weight=0.1, fb_weight=0.1, delay_shift=1, fb_shift=1, return_sparse=false, @@ -454,15 +565,24 @@ end Create a delay line backward reservoir with the specified by `dims` and weights. Creates a matrix with backward connections as described in [Rodan2011](@cite). -# Arguments +```math +W_{i,j} = +\begin{cases} + r, & \text{if } i = j + 1,\;\; j \in [1, D_{\mathrm{res}} - 1], \\[4pt] + b, & \text{if } j = i + 1,\;\; i \in [1, D_{\mathrm{res}} - 1], \\[6pt] + 0, & \text{otherwise.} +\end{cases} +``` - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. +## Arguments + + - `rng`: Random number generator. Default is `Utils.default_rng()`from + [WeightInitializers](https://lux.csail.mit.edu/stable/api/Building_Blocks/WeightInitializers). - `T`: Type of the elements in the reservoir matrix. Default is `Float32`. - `dims`: Dimensions of the reservoir matrix. -# Keyword arguments +## Keyword arguments - `delay_weight`: The weight determines the absolute value of forward connections in the reservoir. @@ -481,6 +601,7 @@ Creates a matrix with backward connections as described in [Rodan2011](@cite). Default is 1. - `delay_shift`: delay line shift relative to the diagonal. Default is 1. - `return_sparse`: flag for returning a `sparse` matrix. + `true` requires `SparseArrays` to be loaded. Default is `false`. - `delay_kwargs` and `fb_kwargs`: named tuples that control the kwargs for the delay line weight and feedback weights respectively. The kwargs are as follows: @@ -501,7 +622,7 @@ Creates a matrix with backward connections as described in [Rodan2011](@cite). + `strides`: number of strides for assigning negative value to a weight. It can be an integer or an array. Default is 2. -# Examples +## Examples ```jldoctest julia> res_matrix = delayline_backward(5, 5) @@ -530,8 +651,8 @@ julia> res_matrix = delayline_backward(5, 5; delay_shift = 3, fb_weight = rand(F ``` """ function delayline_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...; - delay_weight::Union{Number, AbstractVector} = T(0.1), - fb_weight::Union{Number, AbstractVector} = T(0.1), delay_shift::Integer = 1, + delay_weight::Union{Number, AbstractVector} = T(0.1f0), + fb_weight::Union{Number, AbstractVector} = T(0.1f0), delay_shift::Integer = 1, fb_shift::Integer = 1, return_sparse::Bool = false, delay_kwargs::NamedTuple = NamedTuple(), fb_kwargs::NamedTuple = NamedTuple()) where {T <: Number} @@ -543,22 +664,35 @@ function delayline_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...; return return_init_as(Val(return_sparse), reservoir_matrix) end -""" +@doc raw""" cycle_jumps([rng], [T], dims...; cycle_weight=0.1, jump_weight=0.1, jump_size=3, return_sparse=false, cycle_kwargs=(), jump_kwargs=()) -Create a cycle jumps reservoir [Rodan2012](@cite). +Create a cycle reservoir with jumps [Rodan2012](@cite). -# Arguments +```math +W_{i,j} = +\begin{cases} + r, & \text{if } i = j + 1,\;\; j \in [1, D_{\mathrm{res}} - 1], \\[4pt] + r, & \text{if } i = 1,\;\; j = D_{\mathrm{res}}, \\[8pt] + r_j, & \text{if } i = j + \ell, \\[4pt] + r_j, & \text{if } j = i + \ell, \\[4pt] + r_j, & \text{if } (i,j) = (1+\ell, 1), \\[4pt] + r_j, & \text{if } (i,j) = (1,\, D_{\mathrm{res}}+1-\ell), \\[8pt] + 0, & \text{otherwise.} +\end{cases} +``` - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. +## Arguments + + - `rng`: Random number generator. Default is `Utils.default_rng()`from + [WeightInitializers](https://lux.csail.mit.edu/stable/api/Building_Blocks/WeightInitializers). - `T`: Type of the elements in the reservoir matrix. Default is `Float32`. - `dims`: Dimensions of the reservoir matrix. -# Keyword arguments +## Keyword arguments - `cycle_weight`: The weight of cycle connections. This can be provided as a single value or an array. In case it is provided as an @@ -574,6 +708,7 @@ Create a cycle jumps reservoir [Rodan2012](@cite). - `jump_size`: The number of steps between jump connections. Default is 3. - `return_sparse`: flag for returning a `sparse` matrix. + `true` requires `SparseArrays` to be loaded. Default is `false`. - `cycle_kwargs` and `jump_kwargs`: named tuples that control the kwargs for the cycle and jump weights respectively. The kwargs are as follows: @@ -594,7 +729,7 @@ Create a cycle jumps reservoir [Rodan2012](@cite). + `strides`: number of strides for assigning negative value to a weight. It can be an integer or an array. Default is 2. -# Examples +## Examples ```jldoctest julia> res_matrix = cycle_jumps(5, 5) @@ -615,8 +750,8 @@ julia> res_matrix = cycle_jumps(5, 5; jump_size = 2) ``` """ function cycle_jumps(rng::AbstractRNG, ::Type{T}, dims::Integer...; - cycle_weight::Union{Number, AbstractVector} = T(0.1), - jump_weight::Union{Number, AbstractVector} = T(0.1), + cycle_weight::Union{Number, AbstractVector} = T(0.1f0), + jump_weight::Union{Number, AbstractVector} = T(0.1f0), jump_size::Integer = 3, return_sparse::Bool = false, cycle_kwargs::NamedTuple = NamedTuple(), jump_kwargs::NamedTuple = NamedTuple()) where {T <: Number} @@ -629,21 +764,30 @@ function cycle_jumps(rng::AbstractRNG, ::Type{T}, dims::Integer...; return return_init_as(Val(return_sparse), reservoir_matrix) end -""" +@doc raw""" simple_cycle([rng], [T], dims...; cycle_weight=0.1, return_sparse=false, kwargs...) Create a simple cycle reservoir [Rodan2011](@cite). -# Arguments +```math +W_{i,j} = +\begin{cases} + r, & \text{if } i = j + 1,\;\; j \in [1, D_{\mathrm{res}} - 1], \\[4pt] + r, & \text{if } i = 1,\;\; j = D_{\mathrm{res}}, \\[6pt] + 0, & \text{otherwise.} +\end{cases} +``` + +## Arguments - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. + - `rng`: Random number generator. Default is `Utils.default_rng()`from + [WeightInitializers](https://lux.csail.mit.edu/stable/api/Building_Blocks/WeightInitializers). - `T`: Type of the elements in the reservoir matrix. Default is `Float32`. - `dims`: Dimensions of the reservoir matrix. -# Keyword arguments +## Keyword arguments - `cycle_weight`: Weight of the connections in the reservoir matrix. This can be provided as a single value or an array. In case it is provided as an @@ -651,6 +795,7 @@ Create a simple cycle reservoir [Rodan2011](@cite). you want to populate. Default is 0.1. - `return_sparse`: flag for returning a `sparse` matrix. + `true` requires `SparseArrays` to be loaded. Default is `false`. - `sampling_type`: Sampling that decides the distribution of `cycle_weight` negative numbers. If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each @@ -668,7 +813,7 @@ Create a simple cycle reservoir [Rodan2011](@cite). - `strides`: number of strides for assigning negative value to a weight. It can be an integer or an array. Default is 2. -# Examples +## Examples ```jldoctest julia> res_matrix = simple_cycle(5, 5) @@ -689,7 +834,7 @@ julia> res_matrix = simple_cycle(5, 5; weight = 11) ``` """ function simple_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; - cycle_weight::Union{Number, AbstractVector} = T(0.1), + cycle_weight::Union{Number, AbstractVector} = T(0.1f0), return_sparse::Bool = false, kwargs...) where {T <: Number} throw_sparse_error(return_sparse) check_res_size(dims...) @@ -698,30 +843,42 @@ function simple_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; return return_init_as(Val(return_sparse), reservoir_matrix) end -""" +@doc raw""" double_cycle([rng], [T], dims...; cycle_weight=0.1, second_cycle_weight=0.1, return_sparse=false) Creates a double cycle reservoir [Fu2023](@cite). -# Arguments +```math +W_{i,j} = +\begin{cases} + r_1, & \text{if } i = j + 1,\;\; j \in [1, D_{\mathrm{res}} - 1], \\[4pt] + r_1, & \text{if } i = D_{\mathrm{res}},\;\; j = 1, \\[6pt] + r_2, & \text{if } i = 1,\;\; j = D_{\mathrm{res}}, \\[6pt] + r_2, & \text{if } j = i + 1,\;\; i \in [1, D_{\mathrm{res}} - 1], \\[4pt] + 0, & \text{otherwise.} +\end{cases} +``` - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. +## Arguments + + - `rng`: Random number generator. Default is `Utils.default_rng()`from + [WeightInitializers](https://lux.csail.mit.edu/stable/api/Building_Blocks/WeightInitializers). - `T`: Type of the elements in the reservoir matrix. Default is `Float32`. - `dims`: Dimensions of the reservoir matrix. -# Keyword arguments +## Keyword arguments - `cycle_weight`: Weight of the upper cycle connections in the reservoir matrix. Default is 0.1. - `second_cycle_weight`: Weight of the lower cycle connections in the reservoir matrix. Default is 0.1. - `return_sparse`: flag for returning a `sparse` matrix. + `true` requires `SparseArrays` to be loaded. Default is `false`. -# Examples +## Examples ```jldoctest julia> reservoir_matrix = double_cycle(5, 5; cycle_weight = 0.1, second_cycle_weight = 0.3) @@ -734,8 +891,8 @@ julia> reservoir_matrix = double_cycle(5, 5; cycle_weight = 0.1, second_cycle_we ``` """ function double_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; - cycle_weight::Union{Number, AbstractVector} = T(0.1), - second_cycle_weight::Union{Number, AbstractVector} = T(0.1), + cycle_weight::Union{Number, AbstractVector} = T(0.1f0), + second_cycle_weight::Union{Number, AbstractVector} = T(0.1f0), return_sparse::Bool = false) where {T <: Number} throw_sparse_error(return_sparse) check_res_size(dims...) @@ -753,7 +910,7 @@ function double_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; return return_init_as(Val(return_sparse), reservoir_matrix) end -""" +@doc raw""" true_doublecycle([rng], [T], dims...; cycle_weight=0.1, second_cycle_weight=0.1, return_sparse=false, cycle_kwargs=(), second_cycle_kwargs=()) @@ -761,14 +918,25 @@ end Creates a true double cycle reservoir, ispired by [Fu2023](@cite), with cycles built on the definition by [Rodan2011](@cite). -# Arguments +```math +W_{i,j} = +\begin{cases} + r_1, & \text{if } i = j + 1,\;\; j \in [1, D_{\mathrm{res}} - 1], \\[4pt] + r_1, & \text{if } i = 1,\;\; j = D_{\mathrm{res}}, \\[6pt] + r_2, & \text{if } j = i + 1,\;\; i \in [1, D_{\mathrm{res}} - 1], \\[4pt] + r_2, & \text{if } i = D_{\mathrm{res}},\;\; j = 1, \\[6pt] + 0, & \text{otherwise.} +\end{cases} +``` - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. +## Arguments + + - `rng`: Random number generator. Default is `Utils.default_rng()`from + [WeightInitializers](https://lux.csail.mit.edu/stable/api/Building_Blocks/WeightInitializers). - `T`: Type of the elements in the reservoir matrix. Default is `Float32`. - `dims`: Dimensions of the reservoir matrix. -# Keyword arguments +## Keyword arguments - `cycle_weight`: Weight of the upper cycle connections in the reservoir matrix. Default is 0.1. @@ -776,6 +944,7 @@ with cycles built on the definition by [Rodan2011](@cite). - `second_cycle_weight`: Weight of the lower cycle connections in the reservoir matrix. Default is 0.1. - `return_sparse`: flag for returning a `sparse` matrix. + `true` requires `SparseArrays` to be loaded. Default is `false`. - `cycle_kwargs`, and `second_cycle_kwargs`: named tuples that control the kwargs for the weights generation. The kwargs are as follows: @@ -796,7 +965,7 @@ with cycles built on the definition by [Rodan2011](@cite). + `strides`: number of strides for assigning negative value to a weight. It can be an integer or an array. Default is 2. -# Examples +## Examples ```jldoctest julia> true_doublecycle(5, 5; cycle_weight = 0.1, second_cycle_weight = 0.3) @@ -809,8 +978,8 @@ julia> true_doublecycle(5, 5; cycle_weight = 0.1, second_cycle_weight = 0.3) ``` """ function true_doublecycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; - cycle_weight::Union{Number, AbstractVector} = T(0.1), - second_cycle_weight::Union{Number, AbstractVector} = T(0.1), + cycle_weight::Union{Number, AbstractVector} = T(0.1f0), + second_cycle_weight::Union{Number, AbstractVector} = T(0.1f0), return_sparse::Bool = false, cycle_kwargs::NamedTuple = NamedTuple(), second_cycle_kwargs::NamedTuple = NamedTuple()) where {T <: Number} throw_sparse_error(return_sparse) @@ -832,8 +1001,6 @@ addition of self loops [Elsarraj2019](@cite). This architecture is referred to as TP1 in the original paper. -# Equations - ```math W_{i,j} = \begin{cases} @@ -844,14 +1011,14 @@ W_{i,j} = \end{cases} ``` -# Arguments +## Arguments - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. + - `rng`: Random number generator. Default is `Utils.default_rng()`from + [WeightInitializers](https://lux.csail.mit.edu/stable/api/Building_Blocks/WeightInitializers). - `T`: Type of the elements in the reservoir matrix. Default is `Float32`. - `dims`: Dimensions of the reservoir matrix. -# Keyword arguments +## Keyword arguments - `cycle_weight`: Weight of the cycle connections in the reservoir matrix. This can be provided as a single value or an array. In case it is provided as an @@ -864,6 +1031,7 @@ W_{i,j} = you want to populate. Default is 0.1. - `return_sparse`: flag for returning a `sparse` matrix. + `true` requires `SparseArrays` to be loaded. Default is `false`. - `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each @@ -881,7 +1049,7 @@ W_{i,j} = - `strides`: number of strides for assigning negative value to a weight. It can be an integer or an array. Default is 2. -# Examples +## Examples ```jldoctest julia> reservoir_matrix = selfloop_cycle(5, 5) @@ -923,8 +1091,6 @@ self loops on odd neurons [Elsarraj2019](@cite). This architecture is referred to as TP2 in the original paper. -# Equations - ```math W_{i,j} = \begin{cases} @@ -936,14 +1102,14 @@ W_{i,j} = \end{cases} ``` -# Arguments +## Arguments - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. + - `rng`: Random number generator. Default is `Utils.default_rng()`from + [WeightInitializers](https://lux.csail.mit.edu/stable/api/Building_Blocks/WeightInitializers). - `T`: Type of the elements in the reservoir matrix. Default is `Float32`. - `dims`: Dimensions of the reservoir matrix. -# Keyword arguments +## Keyword arguments - `cycle_weight`: Weight of the cycle connections in the reservoir matrix. This can be provided as a single value or an array. In case it is provided as an @@ -955,9 +1121,10 @@ W_{i,j} = - `fb_weight`: Weight of the self loops in the reservoir matrix. Default is 0.1. - `return_sparse`: flag for returning a `sparse` matrix. + `true` requires `SparseArrays` to be loaded. Default is `false`. -# Examples +## Examples ```jldoctest julia> reservoir_matrix = selfloop_backward_cycle(5, 5) @@ -1010,8 +1177,6 @@ backward connections shifted by one [Elsarraj2019](@cite). This architecture is referred to as TP3 in the original paper. -# Equations - ```math W_{i,j} = \begin{cases} @@ -1022,14 +1187,14 @@ W_{i,j} = \end{cases} ``` -# Arguments +## Arguments - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. + - `rng`: Random number generator. Default is `Utils.default_rng()`from + [WeightInitializers](https://lux.csail.mit.edu/stable/api/Building_Blocks/WeightInitializers). - `T`: Type of the elements in the reservoir matrix. Default is `Float32`. - `dims`: Dimensions of the reservoir matrix. -# Keyword arguments +## Keyword arguments - `delay_weight`: Weight of the delay line connections in the reservoir matrix. This can be provided as a single value or an array. In case it is provided as an @@ -1050,6 +1215,7 @@ W_{i,j} = Default is 1. - `delay_shift`: delay line shift relative to the diagonal. Default is 1. - `return_sparse`: flag for returning a `sparse` matrix. + `true` requires `SparseArrays` to be loaded. Default is `false`. - `delay_kwargs`, `selfloop_kwargs`, and `fb_kwargs`: named tuples that control the kwargs for the weights generation. The kwargs are as follows: @@ -1070,7 +1236,7 @@ W_{i,j} = + `strides`: number of strides for assigning negative value to a weight. It can be an integer or an array. Default is 2. -# Examples +## Examples ```jldoctest julia> reservoir_matrix = selfloop_delayline_backward(5, 5) @@ -1118,8 +1284,6 @@ with the addition of self loops [Elsarraj2019](@cite). This architecture is referred to as TP4 in the original paper. -# Equations - ```math W_{i,j} = \begin{cases} @@ -1129,14 +1293,14 @@ W_{i,j} = \end{cases} ``` -# Arguments +## Arguments - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. + - `rng`: Random number generator. Default is `Utils.default_rng()`from + [WeightInitializers](https://lux.csail.mit.edu/stable/api/Building_Blocks/WeightInitializers). - `T`: Type of the elements in the reservoir matrix. Default is `Float32`. - `dims`: Dimensions of the reservoir matrix. -# Keyword arguments +## Keyword arguments - `forward_weight`: Weight of the forward connections in the reservoir matrix. This can be provided as a single value or an array. In case it is provided as an @@ -1149,6 +1313,7 @@ W_{i,j} = you want to populate. Default is 0.1. - `return_sparse`: flag for returning a `sparse` matrix. + `true` requires `SparseArrays` to be loaded. Default is `false`. - `delay_kwargs` and `selfloop_kwargs`: named tuples that control the kwargs for the delay line weight and self loop weights respectively. The kwargs are as follows: @@ -1169,7 +1334,7 @@ W_{i,j} = + `strides`: number of strides for assigning negative value to a weight. It can be an integer or an array. Default is 2. -# Examples +## Examples ```jldoctest julia> reservoir_matrix = selfloop_forwardconnection(5, 5) @@ -1211,8 +1376,6 @@ Creates a reservoir based on a forward connection of weights [Elsarraj2019](@cit This architecture is referred to as TP5 in the original paper. -# Equations - ```math W_{i,j} = \begin{cases} @@ -1221,14 +1384,14 @@ W_{i,j} = \end{cases} ``` -# Arguments +## Arguments - - `rng`: Random number generator. Default is `Utils.default_rng()` - from WeightInitializers. + - `rng`: Random number generator. Default is `Utils.default_rng()`from + [WeightInitializers](https://lux.csail.mit.edu/stable/api/Building_Blocks/WeightInitializers). - `T`: Type of the elements in the reservoir matrix. Default is `Float32`. - `dims`: Dimensions of the reservoir matrix. -# Keyword arguments +## Keyword arguments - `forward_weight`: Weight of the cycle connections in the reservoir matrix. This can be provided as a single value or an array. In case it is provided as an @@ -1236,6 +1399,7 @@ W_{i,j} = you want to populate. Default is 0.1. - `return_sparse`: flag for returning a `sparse` matrix. + `true` requires `SparseArrays` to be loaded. Default is `false`. - `sampling_type`: Sampling that decides the distribution of `forward_weight` negative numbers. If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each @@ -1253,9 +1417,11 @@ W_{i,j} = - `strides`: number of strides for assigning negative value to a weight. It can be an integer or an array. Default is 2. -# Examples +## Examples -```jldoctest +Default kwargs: + +```jldoctest forcon julia> reservoir_matrix = forward_connection(5, 5) 5×5 Matrix{Float32}: 0.0 0.0 0.0 0.0 0.0 @@ -1263,15 +1429,76 @@ julia> reservoir_matrix = forward_connection(5, 5) 0.1 0.0 0.0 0.0 0.0 0.0 0.1 0.0 0.0 0.0 0.0 0.0 0.1 0.0 0.0 +``` + +Changing the weights magnitudes to a different unique value: -julia> reservoir_matrix = forward_connection(5, 5; forward_weight=0.5) +```jldoctest forcon +julia> forward_connection(5, 5; forward_weight=0.99) 5×5 Matrix{Float32}: - 0.0 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 0.0 - 0.5 0.0 0.0 0.0 0.0 - 0.0 0.5 0.0 0.0 0.0 - 0.0 0.0 0.5 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 0.99 0.0 0.0 0.0 0.0 + 0.0 0.99 0.0 0.0 0.0 + 0.0 0.0 0.99 0.0 0.0 ``` + +Changing the weights signs with different sampling techniques: + +```jldoctest forcon +julia> forward_connection(5, 5; sampling_type=:irrational_sample!) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + -0.1 0.0 0.0 0.0 0.0 + 0.0 0.1 0.0 0.0 0.0 + 0.0 0.0 -0.1 0.0 0.0 + +julia> forward_connection(5, 5; sampling_type=:irrational_sample!) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + -0.1 0.0 0.0 0.0 0.0 + 0.0 0.1 0.0 0.0 0.0 + 0.0 0.0 -0.1 0.0 0.0 +``` + +Changing the weights to random numbers. Note that the length of the given array +must be at least as long as the subdiagonal one wants to fill: + +```jldoctest forcon +julia> reservoir_matrix = forward_connection(5, 5; forward_weight=rand(Float32, 3)) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 + 0.274221 0.0 0.0 0.0 0.0 + 0.0 0.111511 0.0 0.0 0.0 + 0.0 0.0 0.618345 0.0 0.0 +``` + +```jldoctest forcon + +``` + +Returning a sparse matrix: + +```jldoctest forcon + +julia> reservoir_matrix = forward_connection(10, 10; return_sparse=true) +10×10 SparseMatrixCSC{Float32, Int64} with 8 stored entries: + ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ + ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ + 0.1 ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ + ⋅ 0.1 ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ + ⋅ ⋅ 0.1 ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ + ⋅ ⋅ ⋅ 0.1 ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ + ⋅ ⋅ ⋅ ⋅ 0.1 ⋅ ⋅ ⋅ ⋅ ⋅ + ⋅ ⋅ ⋅ ⋅ ⋅ 0.1 ⋅ ⋅ ⋅ ⋅ + ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ 0.1 ⋅ ⋅ ⋅ + ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ 0.1 ⋅ ⋅ +``` + + """ function forward_connection(rng::AbstractRNG, ::Type{T}, dims::Integer...; forward_weight::Union{Number, AbstractVector} = T(0.1f0), return_sparse::Bool = false, @@ -1299,15 +1526,17 @@ Each block may be filled with ```math W_{i,j} = \begin{cases} - w_b, & \text{if }\left\lfloor\frac{i-1}{s}\right\rfloor = \left\lfloor\frac{j-1}{s}\right\rfloor = b,\; - s = \text{block\_size},\; b=0,\dots,nb-1, \\ + w_b, & \text{if }\left\lfloor\frac{i-1}{s}\right\rfloor = + \left\lfloor\frac{j-1}{s}\right\rfloor = b,\; + s = \text{block_size},\; b=0,\dots,nb-1, \\ 0, & \text{otherwise,} \end{cases} ``` # Arguments - - `rng`: Random number generator. Default is `Utils.default_rng()`. + - `rng`: Random number generator. Default is `Utils.default_rng()` from + WeightInitializers. - `T`: Element type of the matrix. Default is `Float32`. - `dims`: Dimensions of the output matrix (must be two-dimensional). @@ -1324,22 +1553,40 @@ W_{i,j} = # Examples -```jldoctest -# 4×4 with two 2×2 blocks of 1.0 -julia> W1 = block_diagonal(4, 4; block_size=2) -4×4 Matrix{Float32}: - 1.0 1.0 0.0 0.0 - 1.0 1.0 0.0 0.0 - 0.0 0.0 1.0 1.0 - 0.0 0.0 1.0 1.0 - -# per-block weights [0.5, 2.0] -julia> W2 = block_diagonal(4, 4; block_size=2, block_weight=[0.5, 2.0]) -4×4 Matrix{Float32}: - 0.5 0.5 0.0 0.0 - 0.5 0.5 0.0 0.0 - 0.0 0.0 2.0 2.0 - 0.0 0.0 2.0 2.0 +Changing the block size + +```jldoctest blockdiag +julia> res_matrix = block_diagonal(10, 10; block_size=2) +10×10 Matrix{Float32}: + 1.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 + 1.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 1.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 1.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 1.0 1.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 1.0 1.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 0.0 1.0 1.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 0.0 1.0 1.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 1.0 + 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 1.0 +``` + +Changing the weights, per block. Please note that you have to +know the number of blocks that you are going to have +(which usually is `res_size`/`block_size`). + +```jldoctest blockdiag +julia> res_matrix = block_diagonal(10, 10; block_size=2, block_weight=[0.5, 2.0, -0.99, 1.0, -99.0]) +10×10 Matrix{Float32}: + 0.5 0.5 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 + 0.5 0.5 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 2.0 2.0 0.0 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 2.0 2.0 0.0 0.0 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 -0.99 -0.99 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 -0.99 -0.99 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 0.0 1.0 1.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 0.0 1.0 1.0 0.0 0.0 + 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -99.0 -99.0 + 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -99.0 -99.0 ``` """ function block_diagonal(rng::AbstractRNG, ::Type{T}, dims::Integer...; From 3bb59da77679f0ce756f322ac13d7ea3a382d068 Mon Sep 17 00:00:00 2001 From: MartinuzziFrancesco Date: Mon, 1 Dec 2025 17:28:41 +0100 Subject: [PATCH 4/4] docs: improve docstrings for second batch of minimal inits --- src/inits/inits_input.jl | 78 ++--- src/inits/inits_reservoir.jl | 649 ++++++++++++++++++++++++++++++----- 2 files changed, 605 insertions(+), 122 deletions(-) diff --git a/src/inits/inits_input.jl b/src/inits/inits_input.jl index dfe2fefc..f097a5f7 100644 --- a/src/inits/inits_input.jl +++ b/src/inits/inits_input.jl @@ -133,22 +133,22 @@ warning. + A single number. In this case, the matrix elements will be randomly chosen from the range `[-scaling, scaling]`. Default option, with a the scaling value set to `0.1`. + + A tuple `(lower, upper)`. The values define the range of the distribution. the matrix elements will be randomly created and scaled the range `[lower, upper]`. + A vector of length = `in_size`. In this case, the columns will be scaled individually by the entries of the vector. The entries can be numbers or tuples, which will mirror the behavior described above. - - - `return_sparse`: flag for returning a `sparse` matrix. - Default is `false`. + + `return_sparse`: flag for returning a `sparse` matrix. + Default is `false`. ## Examples Standard call with scaling provided by a scalar: ```jldoctest weightedinit -julia> res_input = weighted_init(9, 3; scaling=0.1) +julia> res_input = weighted_init(9, 3; scaling = 0.1) 9×3 Matrix{Float32}: 0.0452399 0.0 0.0 -0.0348047 0.0 0.0 @@ -165,7 +165,7 @@ Scaling with a tuple, providing lower and upper bound of the uniform distributio from which the weights will be sampled: ```jldoctest weightedinit -julia> res_input = weighted_init(9, 3; scaling=(0.1, 0.5)) +julia> res_input = weighted_init(9, 3; scaling = (0.1, 0.5)) 9×3 Matrix{Float32}: 0.39048 0.0 0.0 0.230391 0.0 0.0 @@ -183,7 +183,7 @@ negative provides the lower bound. Each column is scaled in order: first element provides bounds for the first column, and so on: ```jldoctest weightedinit -julia> res_input = weighted_init(9, 3; scaling=[0.1, 0.5, 0.9]) +julia> res_input = weighted_init(9, 3; scaling = [0.1, 0.5, 0.9]) 9×3 Matrix{Float32}: 0.0452399 0.0 0.0 -0.0348047 0.0 0.0 @@ -201,7 +201,7 @@ Each column is scaled in order: first element provides bounds for the first column, and so on: ```jldoctest weightedinit -julia> res_input = weighted_init(9, 3; scaling=[(0.1, 0.2), (-0.2, -0.1), (0.3, 0.5)]) +julia> res_input = weighted_init(9, 3; scaling = [(0.1, 0.2), (-0.2, -0.1), (0.3, 0.5)]) 9×3 Matrix{Float32}: 0.17262 0.0 0.0 0.132598 0.0 0.0 @@ -255,7 +255,6 @@ julia> res_input = weighted_init(9, 3; return_sparse = true) function weighted_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; scaling::Union{Number, Tuple, Vector} = T(0.1), return_sparse::Bool = false) where {T <: Number} - throw_sparse_error(return_sparse) approx_res_size, in_size = dims res_size = Int(floor(approx_res_size / in_size) * in_size) @@ -334,22 +333,22 @@ julia> res_input = weighted_minimal(9, 3; weight = 0.99) 0.0 0.0 0.99 0.0 0.0 0.99 0.0 0.0 0.99 - ``` +``` - Random sign for each weight, drawn from a bernoulli distribution: +Random sign for each weight, drawn from a bernoulli distribution: - ```jldoctest weightedminimal +```jldoctest weightedminimal julia> res_input = weighted_minimal(9, 3; sampling_type = :bernoulli_sample!) 9×3 Matrix{Float32}: - 0.1 -0.0 -0.0 - -0.1 -0.0 -0.0 - 0.1 -0.0 0.0 - -0.0 0.1 0.0 - 0.0 0.1 -0.0 - 0.0 0.1 0.0 - -0.0 -0.0 -0.1 - -0.0 -0.0 0.1 - 0.0 -0.0 0.1 + 0.1 -0.0 -0.0 +-0.1 -0.0 -0.0 + 0.1 -0.0 0.0 +-0.0 0.1 0.0 + 0.0 0.1 -0.0 + 0.0 0.1 0.0 +-0.0 -0.0 -0.1 +-0.0 -0.0 0.1 + 0.0 -0.0 0.1 ``` Example of different reservoir size for the initializer: @@ -522,33 +521,32 @@ julia> res_input = minimal_init(8, 3; sampling_type = :irrational) 0.1 0.1 0.1 0.1 0.1 -0.1 -0.1 0.1 -0.1 - ``` +``` - Changing probability for the negative sign +Changing probability for the negative sign - ```jldoctest minimalinit +```jldoctest minimalinit julia> res_input = minimal_init(8, 3; p = 0.1) # lower p -> more negative signs 8×3 Matrix{Float32}: - -0.1 -0.1 -0.1 - -0.1 -0.1 -0.1 - -0.1 -0.1 -0.1 - -0.1 -0.1 -0.1 - 0.1 -0.1 -0.1 - -0.1 -0.1 -0.1 - -0.1 -0.1 -0.1 - -0.1 -0.1 -0.1 - +-0.1 -0.1 -0.1 +-0.1 -0.1 -0.1 +-0.1 -0.1 -0.1 +-0.1 -0.1 -0.1 + 0.1 -0.1 -0.1 +-0.1 -0.1 -0.1 +-0.1 -0.1 -0.1 +-0.1 -0.1 -0.1 julia> res_input = minimal_init(8, 3; p = 0.8)# higher p -> more positive signs 8×3 Matrix{Float32}: - 0.1 0.1 0.1 - -0.1 0.1 0.1 - -0.1 0.1 0.1 - 0.1 0.1 0.1 - 0.1 0.1 0.1 - 0.1 -0.1 0.1 - -0.1 0.1 0.1 - 0.1 0.1 0.1 + 0.1 0.1 0.1 +-0.1 0.1 0.1 +-0.1 0.1 0.1 + 0.1 0.1 0.1 + 0.1 0.1 0.1 + 0.1 -0.1 0.1 +-0.1 0.1 0.1 + 0.1 0.1 0.1 ``` """ function minimal_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; diff --git a/src/inits/inits_reservoir.jl b/src/inits/inits_reservoir.jl index 8bc51b73..7b3a3979 100644 --- a/src/inits/inits_reservoir.jl +++ b/src/inits/inits_reservoir.jl @@ -59,7 +59,7 @@ Returning a sparse matrix: ```jldoctest randsparse julia> using SparseArrays -julia> res_matrix = rand_sparse(5, 5; sparsity = 0.4, return_sparse=true) +julia> res_matrix = rand_sparse(5, 5; sparsity = 0.4, return_sparse = true) 5×5 SparseMatrixCSC{Float32, Int64} with 10 stored entries: ⋅ ⋅ ⋅ ⋅ ⋅ ⋅ 0.794565 ⋅ 0.26164 ⋅ @@ -129,7 +129,7 @@ julia> res_matrix = pseudo_svd(5, 5) With reversed sorting: ```jldoctest psvd -julia> pseudo_svd(5, 5; reverse_sort=true) +julia> pseudo_svd(5, 5; reverse_sort = true) 5×5 Matrix{Float32}: 1.0 0.0 0.0 0.0 0.0 0.0 0.726199 0.0 0.0 0.0 @@ -141,7 +141,7 @@ julia> pseudo_svd(5, 5; reverse_sort=true) With no sorting ```jldoctest psvd -julia> pseudo_svd(5, 5; sorted=false) +julia> pseudo_svd(5, 5; sorted = false) 5×5 Matrix{Float32}: 0.726199 0.0 0.0 0.0 0.0 0.0 0.325977 0.0 0.0 0.0 @@ -153,7 +153,7 @@ julia> pseudo_svd(5, 5; sorted=false) Returning as a `Diagonal` or a `sparse` matrix: ```jldoctest psvd -julia> pseudo_svd(5, 5; return_diag=true) +julia> pseudo_svd(5, 5; return_diag = true) 5×5 LinearAlgebra.Diagonal{Float32, Vector{Float32}}: 0.306998 ⋅ ⋅ ⋅ ⋅ ⋅ 0.325977 ⋅ ⋅ ⋅ @@ -163,7 +163,7 @@ julia> pseudo_svd(5, 5; return_diag=true) julia> using SparseArrays -julia> pseudo_svd(5, 5; return_sparse=true) +julia> pseudo_svd(5, 5; return_sparse = true) 5×5 SparseMatrixCSC{Float32, Int64} with 5 stored entries: 0.306998 ⋅ ⋅ ⋅ ⋅ ⋅ 0.325977 ⋅ ⋅ ⋅ @@ -171,7 +171,6 @@ julia> pseudo_svd(5, 5; return_sparse=true) ⋅ ⋅ ⋅ 0.726199 ⋅ ⋅ ⋅ ⋅ ⋅ 1.0 ``` - """ function pseudo_svd(rng::AbstractRNG, ::Type{T}, dims::Integer...; max_value::Number = T(1), @@ -427,7 +426,7 @@ end function build_cycle(::Val{false}, rng::AbstractRNG, ::Type{T}, res_size::Int; in_degree::Integer = 1, radius::Number = T(1.0f0), cut_cycle::Bool = false) where {T <: - Number} + Number} reservoir_matrix = DeviceAgnostic.zeros(rng, T, res_size, res_size) for idx in 1:res_size selected = randperm(rng, res_size)[1:in_degree] @@ -441,7 +440,7 @@ end function build_cycle(::Val{true}, rng::AbstractRNG, ::Type{T}, res_size::Int; in_degree::Integer = 1, radius::Number = T(1.0f0), cut_cycle::Bool = false) where {T <: - Number} + Number} reservoir_matrix = DeviceAgnostic.zeros(rng, T, res_size, res_size) perm = randperm(rng, res_size) for idx in 1:(res_size - 1) @@ -520,7 +519,9 @@ W_{i,j} = ## Examples -```jldoctest +Default call: + +```jldoctest delayline julia> res_matrix = delay_line(5, 5) 5×5 Matrix{Float32}: 0.0 0.0 0.0 0.0 0.0 @@ -528,7 +529,11 @@ julia> res_matrix = delay_line(5, 5) 0.0 0.1 0.0 0.0 0.0 0.0 0.0 0.1 0.0 0.0 0.0 0.0 0.0 0.1 0.0 +``` +Changing weights: + +```jldoctest delayline julia> res_matrix = delay_line(5, 5; delay_weight = 1) 5×5 Matrix{Float32}: 0.0 0.0 0.0 0.0 0.0 @@ -536,14 +541,64 @@ julia> res_matrix = delay_line(5, 5; delay_weight = 1) 0.0 1.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 +``` -julia> res_matrix = delay_line(5, 5; delay_weight = 1, delay_shift = 3) +Changing weights to a custom array: + +```jldoctest delayline +julia> res_matrix = delay_line(5, 5; delay_weight = rand(Float32, 4)) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 0.0 0.0 + 0.398408 0.0 0.0 0.0 0.0 + 0.0 0.624473 0.0 0.0 0.0 + 0.0 0.0 0.66302 0.0 0.0 + 0.0 0.0 0.0 0.0780818 0.0 +``` + +Changing sign of the weights with different samplings: + +```jldoctest delayline +julia> res_matrix = delay_line(5, 5; sampling_type=:irrational_sample!) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 0.0 0.0 + -0.1 0.0 0.0 0.0 0.0 + 0.0 0.1 0.0 0.0 0.0 + 0.0 0.0 -0.1 0.0 0.0 + 0.0 0.0 0.0 -0.1 0.0 + +julia> res_matrix = delay_line(5, 5; sampling_type=:bernoulli_sample!) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 0.0 0.0 + 0.1 0.0 0.0 0.0 0.0 + 0.0 -0.1 0.0 0.0 0.0 + 0.0 0.0 0.1 0.0 0.0 + 0.0 0.0 0.0 -0.1 0.0 +``` + +Shifting the delay line: + +```jldoctest delayline +julia> res_matrix = delay_line(5, 5; delay_shift = 3) 5×5 Matrix{Float32}: 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 - 1.0 0.0 0.0 0.0 0.0 - 0.0 1.0 0.0 0.0 0.0 + 0.1 0.0 0.0 0.0 0.0 + 0.0 0.1 0.0 0.0 0.0 +``` + +Returning as sparse: + +```jldoctest delayline +julia> using SparseArrays + +julia> res_matrix = delay_line(5, 5; return_sparse=true) +5×5 SparseMatrixCSC{Float32, Int64} with 4 stored entries: + ⋅ ⋅ ⋅ ⋅ ⋅ + 0.1 ⋅ ⋅ ⋅ ⋅ + ⋅ 0.1 ⋅ ⋅ ⋅ + ⋅ ⋅ 0.1 ⋅ ⋅ + ⋅ ⋅ ⋅ 0.1 ⋅ ``` """ function delay_line(rng::AbstractRNG, ::Type{T}, dims::Integer...; @@ -624,7 +679,9 @@ W_{i,j} = ## Examples -```jldoctest +Default call: + +```jldoctest dlbackward julia> res_matrix = delayline_backward(5, 5) 5×5 Matrix{Float32}: 0.0 0.1 0.0 0.0 0.0 @@ -632,22 +689,76 @@ julia> res_matrix = delayline_backward(5, 5) 0.0 0.1 0.0 0.1 0.0 0.0 0.0 0.1 0.0 0.1 0.0 0.0 0.0 0.1 0.0 +``` -julia> res_matrix = delayline_backward(5, 5; delay_shift = 3) +Changing weights: + +```jldoctest dlbackward +julia> res_matrix = delayline_backward(5, 5; delay_weight = 0.99, fb_weight=-1.0) +5×5 Matrix{Float32}: + 0.0 -1.0 0.0 0.0 0.0 + 0.99 0.0 -1.0 0.0 0.0 + 0.0 0.99 0.0 -1.0 0.0 + 0.0 0.0 0.99 0.0 -1.0 + 0.0 0.0 0.0 0.99 0.0 +``` + +Changing weights to custom arrays: + +```jldoctest dlbackward +julia> res_matrix = delayline_backward(5, 5; delay_weight = rand(4), fb_weight=.-rand(4)) +5×5 Matrix{Float32}: + 0.0 -0.294809 0.0 0.0 0.0 + 0.736006 0.0 -0.449479 0.0 0.0 + 0.0 0.10892 0.0 -0.60118 0.0 + 0.0 0.0 0.482435 0.0 -0.673392 + 0.0 0.0 0.0 0.177982 0.0 +``` + +Changing sign of the weights with different samplings: + +```jldoctest dlbackward +julia> res_matrix = delayline_backward(5, 5; delay_kwargs=(;sampling_type=:irrational_sample!)) +5×5 Matrix{Float32}: + 0.0 0.1 0.0 0.0 0.0 + -0.1 0.0 0.1 0.0 0.0 + 0.0 0.1 0.0 0.1 0.0 + 0.0 0.0 -0.1 0.0 0.1 + 0.0 0.0 0.0 -0.1 0.0 + +julia> res_matrix = delayline_backward(5, 5; fb_kwargs=(;sampling_type=:bernoulli_sample!)) +5×5 Matrix{Float32}: + 0.0 0.1 0.0 0.0 0.0 + 0.1 0.0 -0.1 0.0 0.0 + 0.0 0.1 0.0 0.1 0.0 + 0.0 0.0 0.1 0.0 -0.1 + 0.0 0.0 0.0 0.1 0.0 +``` + +Shifting: + +```jldoctest dlbackward +julia> res_matrix = delayline_backward(5, 5; delay_shift=3, fb_shift=2) 5×5 Matrix{Float32}: - 0.0 0.1 0.0 0.0 0.0 0.0 0.0 0.1 0.0 0.0 0.0 0.0 0.0 0.1 0.0 - 0.1 0.0 0.0 0.0 0.1 + 0.0 0.0 0.0 0.0 0.1 + 0.1 0.0 0.0 0.0 0.0 0.0 0.1 0.0 0.0 0.0 +``` -julia> res_matrix = delayline_backward(5, 5; delay_shift = 3, fb_weight = rand(Float32, 4)) -5×5 Matrix{Float32}: - 0.0 0.393622 0.0 0.0 0.0 - 0.0 0.0 0.21916 0.0 0.0 - 0.0 0.0 0.0 0.895871 0.0 - 0.1 0.0 0.0 0.0 0.654846 - 0.0 0.1 0.0 0.0 0.0 +Returning as sparse: + +```jldoctest dlbackward +julia> using SparseArrays + +julia> res_matrix = delayline_backward(5, 5; return_sparse=true) +5×5 SparseMatrixCSC{Float32, Int64} with 8 stored entries: + ⋅ 0.1 ⋅ ⋅ ⋅ + 0.1 ⋅ 0.1 ⋅ ⋅ + ⋅ 0.1 ⋅ 0.1 ⋅ + ⋅ ⋅ 0.1 ⋅ 0.1 + ⋅ ⋅ ⋅ 0.1 ⋅ ``` """ function delayline_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...; @@ -731,7 +842,9 @@ W_{i,j} = ## Examples -```jldoctest +Default call: + +```jldoctest cyclejumps julia> res_matrix = cycle_jumps(5, 5) 5×5 Matrix{Float32}: 0.0 0.0 0.0 0.1 0.1 @@ -739,7 +852,55 @@ julia> res_matrix = cycle_jumps(5, 5) 0.0 0.1 0.0 0.0 0.0 0.1 0.0 0.1 0.0 0.0 0.0 0.0 0.0 0.1 0.0 +``` + +Changing weights: + +```jldoctest cyclejumps +julia> res_matrix = cycle_jumps(5, 5; jump_weight = 2, cycle_weight=-1) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 2.0 -1.0 +-1.0 0.0 0.0 0.0 0.0 + 0.0 -1.0 0.0 0.0 0.0 + 2.0 0.0 -1.0 0.0 0.0 + 0.0 0.0 0.0 -1.0 0.0 +``` +Changing weights to custom arrays: + +```jldoctest cyclejumps +julia> res_matrix = cycle_jumps(5, 5; jump_weight = .-rand(3), cycle_weight=rand(5)) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 -0.453905 0.443731 + 0.434804 0.0 0.0 0.0 0.0 + 0.0 0.520551 0.0 0.0 0.0 + -0.453905 0.0 0.0665751 0.0 0.0 + 0.0 0.0 0.0 0.57811 0.0 +``` + +Changing sign of the weights with different samplings: + +```jldoctest cyclejumps +julia> res_matrix = cycle_jumps(5, 5; cycle_kwargs = (;sampling_type=:bernoulli_sample!)) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 0.1 0.1 + 0.1 0.0 0.0 0.0 0.0 + 0.0 -0.1 0.0 0.0 0.0 + 0.1 0.0 0.1 0.0 0.0 + 0.0 0.0 0.0 -0.1 0.0 + +julia> res_matrix = cycle_jumps(5, 5; jump_kwargs = (;sampling_type=:irrational_sample!)) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 -0.1 0.1 + 0.1 0.0 0.0 0.0 0.0 + 0.0 0.1 0.0 0.0 0.0 + -0.1 0.0 0.1 0.0 0.0 + 0.0 0.0 0.0 0.1 0.0 +``` + +Changing cycle jumps length: + +```jldoctest cyclejumps julia> res_matrix = cycle_jumps(5, 5; jump_size = 2) 5×5 Matrix{Float32}: 0.0 0.0 0.1 0.0 0.1 @@ -747,7 +908,30 @@ julia> res_matrix = cycle_jumps(5, 5; jump_size = 2) 0.1 0.1 0.0 0.0 0.1 0.0 0.0 0.1 0.0 0.0 0.0 0.0 0.1 0.1 0.0 + +julia> res_matrix = cycle_jumps(5, 5; jump_size = 4) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 0.0 0.1 + 0.1 0.0 0.0 0.0 0.0 + 0.0 0.1 0.0 0.0 0.0 + 0.0 0.0 0.1 0.0 0.0 + 0.1 0.0 0.0 0.1 0.0 ``` + +Return as a sparse matrix: + +```jldoctest cyclejumps +julia> using SparseArrays + +julia> res_matrix = cycle_jumps(5, 5; return_sparse=true) +5×5 SparseMatrixCSC{Float32, Int64} with 7 stored entries: + ⋅ ⋅ ⋅ 0.1 0.1 + 0.1 ⋅ ⋅ ⋅ ⋅ + ⋅ 0.1 ⋅ ⋅ ⋅ + 0.1 ⋅ 0.1 ⋅ ⋅ + ⋅ ⋅ ⋅ 0.1 ⋅ +``` + """ function cycle_jumps(rng::AbstractRNG, ::Type{T}, dims::Integer...; cycle_weight::Union{Number, AbstractVector} = T(0.1f0), @@ -815,7 +999,9 @@ W_{i,j} = ## Examples -```jldoctest +Default call: + +```jldoctest scycle julia> res_matrix = simple_cycle(5, 5) 5×5 Matrix{Float32}: 0.0 0.0 0.0 0.0 0.1 @@ -823,14 +1009,64 @@ julia> res_matrix = simple_cycle(5, 5) 0.0 0.1 0.0 0.0 0.0 0.0 0.0 0.1 0.0 0.0 0.0 0.0 0.0 0.1 0.0 +``` + +Changing weights: + +```jldoctest scycle +julia> res_matrix = simple_cycle(5, 5; cycle_weight=0.99) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 0.0 0.99 + 0.99 0.0 0.0 0.0 0.0 + 0.0 0.99 0.0 0.0 0.0 + 0.0 0.0 0.99 0.0 0.0 + 0.0 0.0 0.0 0.99 0.0 +``` + +Changing weights to a custom array: + +```jldoctest scycle +julia> res_matrix = simple_cycle(5, 5; cycle_weight=rand(5)) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 0.0 0.471823 + 0.534782 0.0 0.0 0.0 0.0 + 0.0 0.0764598 0.0 0.0 0.0 + 0.0 0.0 0.507883 0.0 0.0 + 0.0 0.0 0.0 0.546656 0.0 +``` + +Changing sign of the weights with different samplings: + +```jldoctest scycle +julia> res_matrix = simple_cycle(5, 5; sampling_type=:irrational_sample!) +5×5 Matrix{Float32}: + 0.0 0.0 0.0 0.0 -0.1 + -0.1 0.0 0.0 0.0 0.0 + 0.0 0.1 0.0 0.0 0.0 + 0.0 0.0 -0.1 0.0 0.0 + 0.0 0.0 0.0 -0.1 0.0 -julia> res_matrix = simple_cycle(5, 5; weight = 11) +julia> res_matrix = simple_cycle(5, 5; sampling_type=:bernoulli_sample!) 5×5 Matrix{Float32}: - 0.0 0.0 0.0 0.0 11.0 - 11.0 0.0 0.0 0.0 0.0 - 0.0 11.0 0.0 0.0 0.0 - 0.0 0.0 11.0 0.0 0.0 - 0.0 0.0 0.0 11.0 0.0 + 0.0 0.0 0.0 0.0 0.1 + 0.1 0.0 0.0 0.0 0.0 + 0.0 -0.1 0.0 0.0 0.0 + 0.0 0.0 0.1 0.0 0.0 + 0.0 0.0 0.0 -0.1 0.0 +``` + +Returning as sparse: + +```jldoctest scycle +julia> using SparseArrays + +julia> res_matrix = simple_cycle(5, 5; return_sparse=true) +5×5 SparseMatrixCSC{Float32, Int64} with 5 stored entries: + ⋅ ⋅ ⋅ ⋅ 0.1 + 0.1 ⋅ ⋅ ⋅ ⋅ + ⋅ 0.1 ⋅ ⋅ ⋅ + ⋅ ⋅ 0.1 ⋅ ⋅ + ⋅ ⋅ ⋅ 0.1 ⋅ ``` """ function simple_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; @@ -880,15 +1116,29 @@ W_{i,j} = ## Examples -```jldoctest -julia> reservoir_matrix = double_cycle(5, 5; cycle_weight = 0.1, second_cycle_weight = 0.3) +Default call: + +```jldoctest dcycle +julia> res_matrix = double_cycle(5, 5) 5×5 Matrix{Float32}: - 0.0 0.3 0.0 0.0 0.3 - 0.1 0.0 0.3 0.0 0.0 - 0.0 0.1 0.0 0.3 0.0 - 0.0 0.0 0.1 0.0 0.3 + 0.0 0.1 0.0 0.0 0.1 + 0.1 0.0 0.1 0.0 0.0 + 0.0 0.1 0.0 0.1 0.0 + 0.0 0.0 0.1 0.0 0.1 0.1 0.0 0.0 0.1 0.0 ``` + +Changing weights: + +```jldoctest dcycle +julia> res_matrix = double_cycle(5, 5; cycle_weight = -0.1, second_cycle_weight = 0.3) +5×5 Matrix{Float32}: + 0.0 0.3 0.0 0.0 0.3 + -0.1 0.0 0.3 0.0 0.0 + 0.0 -0.1 0.0 0.3 0.0 + 0.0 0.0 -0.1 0.0 0.3 + -0.1 0.0 0.0 -0.1 0.0 +``` """ function double_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; cycle_weight::Union{Number, AbstractVector} = T(0.1f0), @@ -967,8 +1217,22 @@ W_{i,j} = ## Examples -```jldoctest -julia> true_doublecycle(5, 5; cycle_weight = 0.1, second_cycle_weight = 0.3) +Default call: + +```jldoctest tdcycle +julia> res_matrix = true_doublecycle(5, 5) +5×5 Matrix{Float32}: + 0.0 0.1 0.0 0.0 0.1 + 0.1 0.0 0.1 0.0 0.0 + 0.0 0.1 0.0 0.1 0.0 + 0.0 0.0 0.1 0.0 0.1 + 0.1 0.0 0.0 0.1 0.0 +``` + +Changing weights: + +```jldoctest tdcycle +julia> res_matrix = true_doublecycle(5, 5; cycle_weight = 0.1, second_cycle_weight = 0.3) 5×5 Matrix{Float32}: 0.0 0.3 0.0 0.0 0.1 0.1 0.0 0.3 0.0 0.0 @@ -976,6 +1240,50 @@ julia> true_doublecycle(5, 5; cycle_weight = 0.1, second_cycle_weight = 0.3) 0.0 0.0 0.1 0.0 0.3 0.3 0.0 0.0 0.1 0.0 ``` + +Changing weights to custom arrays: + +```jldoctest tdcycle +julia> res_matrix = true_doublecycle(5, 5; cycle_weight = rand(5), second_cycle_weight = .-rand(5)) +5×5 Matrix{Float32}: + 0.0 -0.647066 0.0 0.0 0.604095 + 0.6687 0.0 -0.853307 0.0 0.0 + 0.0 0.40399 0.0 -0.565928 0.0 + 0.0 0.0 0.960196 0.0 -0.120321 + -0.120321 0.0 0.0 0.874008 0.0 +``` + +Changing sign of the weights with different samplings: + +```jldoctest tdcycle +julia> res_matrix = true_doublecycle(5, 5; cycle_kwargs=(;sampling_type=:irrational_sample!)) +5×5 Matrix{Float32}: + 0.0 0.1 0.0 0.0 -0.1 + -0.1 0.0 0.1 0.0 0.0 + 0.0 0.1 0.0 0.1 0.0 + 0.0 0.0 -0.1 0.0 0.1 + 0.1 0.0 0.0 -0.1 0.0 + +julia> res_matrix = true_doublecycle(5, 5; second_cycle_kwargs=(;sampling_type=:bernoulli_sample!)) +5×5 Matrix{Float32}: + 0.0 -0.1 0.0 0.0 0.1 + 0.1 0.0 0.1 0.0 0.0 + 0.0 0.1 0.0 -0.1 0.0 + 0.0 0.0 0.1 0.0 0.1 + 0.1 0.0 0.0 0.1 0.0 +``` + +Returning as sparse: + +```jldoctest tdcycle +julia> res_matrix = true_doublecycle(5, 5; return_sparse=true) +5×5 SparseMatrixCSC{Float32, Int64} with 10 stored entries: + ⋅ 0.1 ⋅ ⋅ 0.1 + 0.1 ⋅ 0.1 ⋅ ⋅ + ⋅ 0.1 ⋅ 0.1 ⋅ + ⋅ ⋅ 0.1 ⋅ 0.1 + 0.1 ⋅ ⋅ 0.1 ⋅ +``` """ function true_doublecycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; cycle_weight::Union{Number, AbstractVector} = T(0.1f0), @@ -1033,51 +1341,107 @@ W_{i,j} = - `return_sparse`: flag for returning a `sparse` matrix. `true` requires `SparseArrays` to be loaded. Default is `false`. - - `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. - If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each - `weight` can be positive with a probability set by `positive_prob`. If set to - `:irrational_sample!` the `weight` is negative if the decimal number of the - irrational number chosen is odd. If set to `:regular_sample!`, each weight will be - assigned a negative sign after the chosen `strides`. `strides` can be a single - number or an array. Default is `:no_sample`. - - `positive_prob`: probability of the `weight` being positive when `sampling_type` is - set to `:bernoulli_sample!`. Default is 0.5. - - `irrational`: Irrational number whose decimals decide the sign of `weight`. - Default is `pi`. - - `start`: Which place after the decimal point the counting starts for the `irrational` - sign counting. Default is 1. - - `strides`: number of strides for assigning negative value to a weight. It can be an - integer or an array. Default is 2. + - `cycle_kwargs` and `jump_kwargs`: named tuples that control the kwargs for the + cycle and jump weights respectively. The kwargs are as follows: + + + `sampling_type`: Sampling that decides the distribution of `weight` negative numbers. + If set to `:no_sample` the sign is unchanged. If set to `:bernoulli_sample!` then each + `weight` can be positive with a probability set by `positive_prob`. If set to + `:irrational_sample!` the `weight` is negative if the decimal number of the + irrational number chosen is odd. If set to `:regular_sample!`, each weight will be + assigned a negative sign after the chosen `strides`. `strides` can be a single + number or an array. Default is `:no_sample`. + + `positive_prob`: probability of the `weight` being positive when `sampling_type` is + set to `:bernoulli_sample!`. Default is 0.5. + + `irrational`: Irrational number whose decimals decide the sign of `weight`. + Default is `pi`. + + `start`: Which place after the decimal point the counting starts for the `irrational` + sign counting. Default is 1. + + `strides`: number of strides for assigning negative value to a weight. It can be an + integer or an array. Default is 2. ## Examples -```jldoctest -julia> reservoir_matrix = selfloop_cycle(5, 5) +Default call: + +```jldoctest slcycle +julia> res_matrix = selfloop_cycle(5, 5) 5×5 Matrix{Float32}: 0.1 0.0 0.0 0.0 0.1 0.1 0.1 0.0 0.0 0.0 0.0 0.1 0.1 0.0 0.0 0.0 0.0 0.1 0.1 0.0 0.0 0.0 0.0 0.1 0.1 +```jldoctest slcycle + +Changing weights: + +```jldoctest slcycle +julia> res_matrix = selfloop_cycle(5, 5; cycle_weight=-0.2, selfloop_weight=0.5) +5×5 Matrix{Float32}: + 0.5 0.0 0.0 0.0 -0.2 + -0.2 0.5 0.0 0.0 0.0 + 0.0 -0.2 0.5 0.0 0.0 + 0.0 0.0 -0.2 0.5 0.0 + 0.0 0.0 0.0 -0.2 0.5 +``` + +Changing weights to custom arrays: + +```jldoctest slcycle +julia> res_matrix = selfloop_cycle(5, 5; cycle_weight=rand(5), selfloop_weight=.-rand(5)) +5×5 Matrix{Float32}: + -0.902546 0.0 0.0 0.0 0.0987988 + 0.911585 -0.968998 0.0 0.0 0.0 + 0.0 0.00149246 -0.613033 0.0 0.0 + 0.0 0.0 0.777804 -0.727024 0.0 + 0.0 0.0 0.0 0.00441047 -0.310635 +``` + +Changing sign of the weights with different samplings: + +```jldoctest slcycle +julia> res_matrix = selfloop_cycle(5, 5; cycle_kwargs=(;sampling_type=:irrational_sample!)) +5×5 Matrix{Float32}: + 0.1 0.0 0.0 0.0 -0.1 + -0.1 0.1 0.0 0.0 0.0 + 0.0 0.1 0.1 0.0 0.0 + 0.0 0.0 -0.1 0.1 0.0 + 0.0 0.0 0.0 -0.1 0.1 -julia> reservoir_matrix = selfloop_cycle(5, 5; weight=0.2, selfloop_weight=0.5) +julia> res_matrix = selfloop_cycle(5, 5; selfloop_kwargs=(;sampling_type=:bernoulli_sample!)) 5×5 Matrix{Float32}: - 0.5 0.0 0.0 0.0 0.2 - 0.2 0.5 0.0 0.0 0.0 - 0.0 0.2 0.5 0.0 0.0 - 0.0 0.0 0.2 0.5 0.0 - 0.0 0.0 0.0 0.2 0.5 + 0.1 0.0 0.0 0.0 0.1 + 0.1 -0.1 0.0 0.0 0.0 + 0.0 0.1 0.1 0.0 0.0 + 0.0 0.0 0.1 -0.1 0.0 + 0.0 0.0 0.0 0.1 0.1 +``` + +Returning as sparse: + +```jldoctest slcycle +julia> using SparseArrays + +julia> res_matrix = selfloop_cycle(5, 5; return_sparse=true) +5×5 SparseMatrixCSC{Float32, Int64} with 10 stored entries: + 0.1 ⋅ ⋅ ⋅ 0.1 + 0.1 0.1 ⋅ ⋅ ⋅ + ⋅ 0.1 0.1 ⋅ ⋅ + ⋅ ⋅ 0.1 0.1 ⋅ + ⋅ ⋅ ⋅ 0.1 0.1 ``` """ function selfloop_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; cycle_weight::Union{Number, AbstractVector} = T(0.1f0), selfloop_weight::Union{Number, AbstractVector} = T(0.1f0), - return_sparse::Bool = false, kwargs...) where {T <: Number} + return_sparse::Bool = false, selfloop_kwargs::NamedTuple = NamedTuple(), + cycle_kwargs::NamedTuple = NamedTuple()) where {T <: Number} throw_sparse_error(return_sparse) check_res_size(dims...) - reservoir_matrix = simple_cycle(rng, T, dims...; - cycle_weight = T.(cycle_weight), return_sparse = false) - self_loop!(rng, reservoir_matrix, T.(selfloop_weight); kwargs...) + reservoir_matrix = DeviceAgnostic.zeros(rng, T, dims...) + self_loop!(rng, reservoir_matrix, T.(selfloop_weight); selfloop_kwargs...) + simple_cycle!(rng, reservoir_matrix, T.(cycle_weight); cycle_kwargs...) return return_init_as(Val(return_sparse), reservoir_matrix) end @@ -1238,22 +1602,93 @@ W_{i,j} = ## Examples -```jldoctest -julia> reservoir_matrix = selfloop_delayline_backward(5, 5) +Default call: + +```jldoctest sldlfb +julia> res_matrix = selfloop_delayline_backward(5, 5) 5×5 Matrix{Float32}: 0.1 0.0 0.1 0.0 0.0 0.1 0.1 0.0 0.1 0.0 0.0 0.1 0.1 0.0 0.1 0.0 0.0 0.1 0.1 0.0 0.0 0.0 0.0 0.1 0.1 +``` -julia> reservoir_matrix = selfloop_delayline_backward(5, 5; weight=0.3) +Changing weights: + +```jldoctest sldlfb +julia> res_matrix = selfloop_delayline_backward(5, 5; selfloop_weight=0.3, fb_weight=0.99, delay_weight=-0.5) 5×5 Matrix{Float32}: - 0.1 0.0 0.3 0.0 0.0 - 0.3 0.1 0.0 0.3 0.0 - 0.0 0.3 0.1 0.0 0.3 - 0.0 0.0 0.3 0.1 0.0 - 0.0 0.0 0.0 0.3 0.1 + 0.3 0.0 0.99 0.0 0.0 + -0.5 0.3 0.0 0.99 0.0 + 0.0 -0.5 0.3 0.0 0.99 + 0.0 0.0 -0.5 0.3 0.0 + 0.0 0.0 0.0 -0.5 0.3 +``` + +Changing weights to custom arrays: +```jldoctest sldlfb +julia> res_matrix = selfloop_delayline_backward(5, 5; selfloop_weight=randn(5), fb_weight=rand(5), delay_weight=-rand(5)) +5×5 Matrix{Float32}: + -1.22847 0.0 0.384073 0.0 0.0 + -0.699175 2.63937 0.0 0.345408 0.0 + 0.0 -0.5171 -0.452312 0.0 0.0205082 + 0.0 0.0 -0.193893 1.45921 0.0 + 0.0 0.0 0.0 -0.453015 -1.43402 +``` + +Changing sign of the weights with different samplings: + +```jldoctest sldlfb +julia> res_matrix = selfloop_delayline_backward(5, 5; selfloop_kwargs=(;sampling_type=:irrational_sample!)) +5×5 Matrix{Float32}: + -0.1 0.0 0.1 0.0 0.0 + 0.1 0.1 0.0 0.1 0.0 + 0.0 0.1 -0.1 0.0 0.1 + 0.0 0.0 0.1 -0.1 0.0 + 0.0 0.0 0.0 0.1 -0.1 + +julia> res_matrix = selfloop_delayline_backward(5, 5; delay_kwargs=(;sampling_type=:bernoulli_sample!)) +5×5 Matrix{Float32}: + 0.1 0.0 0.1 0.0 0.0 + 0.1 0.1 0.0 0.1 0.0 + 0.0 -0.1 0.1 0.0 0.1 + 0.0 0.0 0.1 0.1 0.0 + 0.0 0.0 0.0 -0.1 0.1 + +julia> res_matrix = selfloop_delayline_backward(5, 5; fb_kwargs=(;sampling_type=:regular_sample!)) +5×5 Matrix{Float32}: + 0.1 0.0 0.1 0.0 0.0 + 0.1 0.1 0.0 -0.1 0.0 + 0.0 0.1 0.1 0.0 0.1 + 0.0 0.0 0.1 0.1 0.0 + 0.0 0.0 0.0 0.1 0.1 +``` + +Shifting the delay and the backward line: + +```jldoctest sldlfb +julia> res_matrix = selfloop_delayline_backward(5, 5; delay_shift=3, fb_shift=2) +5×5 Matrix{Float32}: + 0.1 0.0 0.1 0.0 0.0 + 0.0 0.1 0.0 0.1 0.0 + 0.0 0.0 0.1 0.0 0.1 + 0.1 0.0 0.0 0.1 0.0 + 0.0 0.1 0.0 0.0 0.1 +``` + +Returning as sparse: + +```jldoctest sldlfb +julia> using SparseArrays + +julia> res_matrix = selfloop_delayline_backward(5, 5; return_sparse=true) +5×5 SparseMatrixCSC{Float32, Int64} with 12 stored entries: + 0.1 ⋅ 0.1 ⋅ ⋅ + 0.1 0.1 ⋅ 0.1 ⋅ + ⋅ 0.1 0.1 ⋅ 0.1 + ⋅ ⋅ 0.1 0.1 ⋅ + ⋅ ⋅ ⋅ 0.1 0.1 ``` """ function selfloop_delayline_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...; @@ -1336,22 +1771,72 @@ W_{i,j} = ## Examples -```jldoctest -julia> reservoir_matrix = selfloop_forwardconnection(5, 5) +Default call: + +```jldoctest slfc +julia> res_matrix = selfloop_forwardconnection(5, 5) 5×5 Matrix{Float32}: 0.1 0.0 0.0 0.0 0.0 0.0 0.1 0.0 0.0 0.0 0.1 0.0 0.1 0.0 0.0 0.0 0.1 0.0 0.1 0.0 0.0 0.0 0.1 0.0 0.1 +``` -julia> reservoir_matrix = selfloop_forwardconnection(5, 5; forward_weight=0.5) +Changing weights: + +```jldoctest slfc +julia> res_matrix = selfloop_forwardconnection(5, 5; forward_weight=0.5, selfloop_weight=0.99) 5×5 Matrix{Float32}: - 0.1 0.0 0.0 0.0 0.0 - 0.0 0.1 0.0 0.0 0.0 - 0.5 0.0 0.1 0.0 0.0 - 0.0 0.5 0.0 0.1 0.0 - 0.0 0.0 0.5 0.0 0.1 + 0.99 0.0 0.0 0.0 0.0 + 0.0 0.99 0.0 0.0 0.0 + 0.5 0.0 0.99 0.0 0.0 + 0.0 0.5 0.0 0.99 0.0 + 0.0 0.0 0.5 0.0 0.99 +``` + +Changing weights to custom arrays: + +```jldoctest slfc +julia> res_matrix = selfloop_forwardconnection(5, 5; forward_weight=rand(5), selfloop_weight=.-rand(5)) +5×5 Matrix{Float32}: + -0.0420509 0.0 0.0 0.0 0.0 + 0.0 -0.116113 0.0 0.0 0.0 + 0.69173 0.0 -0.513592 0.0 0.0 + 0.0 0.522245 0.0 -0.199966 0.0 + 0.0 0.0 0.784556 0.0 -0.918653 +``` + +```jldoctest slfc +julia> res_matrix = selfloop_forwardconnection(5, 5; delay_kwargs=(;sampling_type=:irrational_sample!)) +5×5 Matrix{Float32}: + 0.1 0.0 0.0 0.0 0.0 + 0.0 0.1 0.0 0.0 0.0 + -0.1 0.0 0.1 0.0 0.0 + 0.0 0.1 0.0 0.1 0.0 + 0.0 0.0 -0.1 0.0 0.1 + +julia> res_matrix = selfloop_forwardconnection(5, 5; selfloop_kwargs=(;sampling_type=:bernoulli_sample!)) +5×5 Matrix{Float32}: + 0.1 0.0 0.0 0.0 0.0 + 0.0 -0.1 0.0 0.0 0.0 + 0.1 0.0 0.1 0.0 0.0 + 0.0 0.1 0.0 -0.1 0.0 + 0.0 0.0 0.1 0.0 0.1 +``` + +Returning as sparse: + +```jldoctest slfc +julia> using SparseArrays + +julia> res_matrix = selfloop_forwardconnection(5, 5; return_sparse=true) +5×5 SparseMatrixCSC{Float32, Int64} with 8 stored entries: + 0.1 ⋅ ⋅ ⋅ ⋅ + ⋅ 0.1 ⋅ ⋅ ⋅ + 0.1 ⋅ 0.1 ⋅ ⋅ + ⋅ 0.1 ⋅ 0.1 ⋅ + ⋅ ⋅ 0.1 ⋅ 0.1 ``` """ function selfloop_forwardconnection(rng::AbstractRNG, ::Type{T}, dims::Integer...;