Skip to content

Commit

Permalink
Issue with feedbacks in ESNCell (#3)
Browse files Browse the repository at this point in the history
  • Loading branch information
nschaetti committed Jun 28, 2018
1 parent 6f059bf commit 928e8b4
Show file tree
Hide file tree
Showing 4 changed files with 113 additions and 4 deletions.
2 changes: 1 addition & 1 deletion echotorch/nn/ESN.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def forward(self, u, y=None):
if self.feedbacks and self.training:
hidden_states = self.esn_cell(u, y)
elif self.feedbacks and not self.training:
hidden_states = self.esn_cell(u, w_out=self.w_out)
hidden_states = self.esn_cell(u, w_out=self.output.w_out)
else:
hidden_states = self.esn_cell(u)
# end if
Expand Down
5 changes: 4 additions & 1 deletion echotorch/nn/ESNCell.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,8 +143,11 @@ def forward(self, u, y=None, w_out=None):
# Add everything
x = u_win + x_w + y_wfdb + self.w_bias
elif self.feedbacks and not self.training and w_out is not None:
# Add bias
bias_hidden = torch.cat((Variable(torch.ones(1)), self.hidden), dim=0)

# Compute past output
yt = w_out.mv(self.hidden)
yt = w_out.t().mv(bias_hidden)

# Normalize
if self.normalize_feedbacks:
Expand Down
7 changes: 5 additions & 2 deletions echotorch/nn/RRCell.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,9 +81,12 @@ def reset(self):
Reset learning
:return:
"""
self.xTx.data = torch.zeros(self.x_size, self.x_size)
"""self.xTx.data = torch.zeros(self.x_size, self.x_size)
self.xTy.data = torch.zeros(self.x_size, self.output_dim)
self.w_out.data = torch.zeros(1, self.input_dim)
self.w_out.data = torch.zeros(1, self.input_dim)"""
self.xTx.data.fill_(0.0)
self.xTy.data.fill_(0.0)
self.w_out.data.fill_(0.0)

# Training mode again
self.train(True)
Expand Down
103 changes: 103 additions & 0 deletions examples/generation/narma10_esn_feedbacks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
# -*- coding: utf-8 -*-
#
# File : examples/timeserie_prediction/switch_attractor_esn
# Description : NARMA 30 prediction with ESN.
# Date : 26th of January, 2018
#
# This file is part of EchoTorch. EchoTorch is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Nils Schaetti <nils.schaetti@unine.ch>


# Imports
import torch
from echotorch.datasets.NARMADataset import NARMADataset
import echotorch.nn as etnn
import echotorch.utils
from torch.autograd import Variable
from torch.utils.data.dataloader import DataLoader
import numpy as np
import mdp

# Dataset params
train_sample_length = 5000
test_sample_length = 1000
n_train_samples = 1
n_test_samples = 1
batch_size = 1
spectral_radius = 0.9
leaky_rate = 1.0
input_dim = 1
n_hidden = 100

# Use CUDA?
use_cuda = False
use_cuda = torch.cuda.is_available() if use_cuda else False

# Manual seed
mdp.numx.random.seed(1)
np.random.seed(2)
torch.manual_seed(1)

# NARMA30 dataset
narma10_train_dataset = NARMADataset(train_sample_length, n_train_samples, system_order=10, seed=1)
narma10_test_dataset = NARMADataset(test_sample_length, n_test_samples, system_order=10, seed=10)

# Data loader
trainloader = DataLoader(narma10_train_dataset, batch_size=batch_size, shuffle=False, num_workers=2)
testloader = DataLoader(narma10_test_dataset, batch_size=batch_size, shuffle=False, num_workers=2)

# ESN cell
esn = etnn.ESN(
input_dim=input_dim,
hidden_dim=n_hidden,
output_dim=1,
spectral_radius=spectral_radius,
learning_algo='inv',
# leaky_rate=leaky_rate,
feedbacks=True
)
if use_cuda:
esn.cuda()
# end if

# For each batch
for data in trainloader:
# Inputs and outputs
inputs, targets = data

# To variable
inputs, targets = Variable(inputs), Variable(targets)
if use_cuda: inputs, targets = inputs.cuda(), targets.cuda()

# Accumulate xTx and xTy
esn(inputs, targets)
# end for

# Finalize training
esn.finalize()

# Test MSE
dataiter = iter(testloader)
test_u, test_y = dataiter.next()
test_u, test_y = Variable(test_u), Variable(test_y)
gen_u = Variable(torch.zeros(batch_size, test_sample_length, input_dim))
if use_cuda: test_u, test_y, gen_u = test_u.cuda(), test_y.cuda(), gen_u.cuda()
y_predicted = esn(test_u)
print(u"Test MSE: {}".format(echotorch.utils.mse(y_predicted.data, test_y.data)))
print(u"Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, test_y.data)))
print(u"")

y_generated = esn(gen_u)
print(y_generated)

0 comments on commit 928e8b4

Please sign in to comment.