Skip to content

Commit

Permalink
Washout period
Browse files Browse the repository at this point in the history
  • Loading branch information
nschaetti committed Jan 30, 2019
1 parent 5201f71 commit e6bebb8
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 26 deletions.
5 changes: 3 additions & 2 deletions echotorch/nn/ESN.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def __init__(self, input_dim, hidden_dim, output_dim, spectral_radius=0.9, bias_
w=None, w_in=None, w_bias=None, w_fdb=None, sparsity=None, input_set=[1.0, -1.0], w_sparsity=None,
nonlin_func=torch.tanh, learning_algo='inv', ridge_param=0.0, create_cell=True,
feedbacks=False, with_bias=True, wfdb_sparsity=None, normalize_feedbacks=False,
softmax_output=False, seed=None):
softmax_output=False, seed=None, washout=0):
"""
Constructor
:param input_dim: Inputs dimension.
Expand All @@ -68,6 +68,7 @@ def __init__(self, input_dim, hidden_dim, output_dim, spectral_radius=0.9, bias_
self.feedbacks = feedbacks
self.with_bias = with_bias
self.normalize_feedbacks = normalize_feedbacks
self.washout = washout

# Recurrent layer
if create_cell:
Expand Down Expand Up @@ -168,7 +169,7 @@ def forward(self, u, y=None, reset_state=True):
# end if

# Learning algo
return self.output(hidden_states, y)
return self.output(hidden_states[:, self.washout:], y)
# end forward

# Finish training
Expand Down
4 changes: 2 additions & 2 deletions echotorch/nn/LiESN.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def __init__(self, input_dim, hidden_dim, output_dim, spectral_radius=0.9,
bias_scaling=0, input_scaling=1.0, w=None, w_in=None, w_bias=None, sparsity=None,
input_set=[1.0, -1.0], w_sparsity=None, nonlin_func=torch.tanh, learning_algo='inv', ridge_param=0.0,
leaky_rate=1.0, train_leaky_rate=False, feedbacks=False, wfdb_sparsity=None,
normalize_feedbacks=False, softmax_output=False, seed=None):
normalize_feedbacks=False, softmax_output=False, seed=None, washout=0):
"""
Constructor
:param input_dim:
Expand Down Expand Up @@ -68,7 +68,7 @@ def __init__(self, input_dim, hidden_dim, output_dim, spectral_radius=0.9,
w_sparsity=w_sparsity, nonlin_func=nonlin_func, learning_algo=learning_algo,
ridge_param=ridge_param, create_cell=False, feedbacks=feedbacks,
wfdb_sparsity=wfdb_sparsity, normalize_feedbacks=normalize_feedbacks,
softmax_output=softmax_output, seed=seed)
softmax_output=softmax_output, seed=seed, washout=washout)

# Recurrent layer
self.esn_cell = LiESNCell(leaky_rate, train_leaky_rate, input_dim, hidden_dim, spectral_radius=spectral_radius,
Expand Down
28 changes: 6 additions & 22 deletions echotorch/nn/LiESNCell.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,7 @@ def __init__(self, leaky_rate=1.0, train_leaky_rate=False, *args, **kwargs):
:param train_leaky_rate: Train leaky rate as parameter? (default: False)
"""
super(LiESNCell, self).__init__(*args, **kwargs)
"""print("W")
print(self.w)"""

# Params
if train_leaky_rate:
self.leaky_rate = nn.Parameter(torch.Tensor(1).fill_(leaky_rate), requires_grad=True)
Expand Down Expand Up @@ -92,12 +91,10 @@ def forward(self, u, y=None, w_out=None, reset_state=True):

# Compute input layer
u_win = self.w_in.mv(ut)
# print("u_win")
# print(u_win)

# Apply W to x
x_w = self.w.mv(self.hidden)
# print("x_w")
# print(x_w)

# Feedback or not
if self.feedbacks and self.training and y is not None:
# Current target
Expand All @@ -108,7 +105,6 @@ def forward(self, u, y=None, w_out=None, reset_state=True):

# Add everything
x = u_win + x_w + y_wfdb + self.w_bias
# x = u_win + x_w + self.w_bias
elif self.feedbacks and not self.training and w_out is not None:
# Add bias
bias_hidden = torch.cat((Variable(torch.ones(1)), self.hidden), dim=0)
Expand All @@ -128,32 +124,20 @@ def forward(self, u, y=None, w_out=None, reset_state=True):

# Add everything
x = u_win + x_w + y_wfdb + self.w_bias
# x = u_win + x_w + self.w_bias
else:
# Add everything
x = u_win + x_w + self.w_bias
# end if
# print("u_win + x_w + bias")
# print(x)

# Apply activation function
x = self.nonlin_func(x)
# print("non lin")
# print(x)

# Add to outputs
self.hidden.data = (self.hidden.mul(1.0 - self.leaky_rate) + x.view(self.output_dim).mul(self.leaky_rate)).data
# print("leak")
# print(self.hidden.data)

# New last state
outputs[b, t] = self.hidden
"""if t == 2:
exit()
# end if
print("")
print("")"""
# end for
"""print(outputs[0])
plt.imshow(outputs[0].t().numpy(), cmap='Greys')
plt.show()"""
# end for

return outputs
Expand Down

0 comments on commit e6bebb8

Please sign in to comment.