Skip to content

Commit

Permalink
Feed Forward Neural Networks Added!
Browse files Browse the repository at this point in the history
  • Loading branch information
MelJan committed May 25, 2019
1 parent 44b1fc4 commit 22a60d9
Show file tree
Hide file tree
Showing 7 changed files with 1,464 additions and 19 deletions.
10 changes: 3 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,11 @@ its extensive use of unittests assures a high level of reliability and correctne

News
''''''''''''''''''''''''''''''''''''''''''''''''''''
- Auto encoder module added including denoising, sparse, contractive, slowness AE's
- Unittests added, examples
- tutorials added
- Feed Forward neural networks will be added

- Upcoming (mid-term): Feed Forward neural networks will be added

- Future: MDP integration
- Future: Deep Boltzmann machines will be added
- Future: RBM/DBM in tensorFlow
- Future: RBM/DBM in tensorFlow / PyTorch


Features index
''''''''''''''''''''''''''''''''''''''''''''''''''''
Expand Down
17 changes: 8 additions & 9 deletions pydeep/base/basicstructure.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,8 +303,7 @@ def _add_hidden_units(self,
new_weights = numx.random.randn(self.input_dim, num_new_hiddens) * initial_weights
else:
new_weights = initial_weights
self.w = numx.array(numx.insert(self.w, numx.ones(num_new_hiddens) * position, new_weights, axis=1),
self.dtype)
self.w = numx.array(numx.insert(self.w, numx.array(numx.ones(num_new_hiddens) * position,dtype=int), new_weights, axis=1),self.dtype)

# AUTO -> Initialized to Hidden range mean
# Scalar -> Initialized to given value
Expand All @@ -316,7 +315,7 @@ def _add_hidden_units(self,
new_oh = numx.zeros((1, num_new_hiddens)) + initial_offsets
else:
new_oh = initial_offsets
self.oh = numx.array(numx.insert(self.oh, numx.ones(num_new_hiddens) * position, new_oh, axis=1), self.dtype)
self.oh = numx.array(numx.insert(self.oh, numx.array(numx.ones(num_new_hiddens) * position,dtype=int), new_oh, axis=1),self.dtype)

# AUTO -> Initialized to randn()*0.01
# Scalar -> Initialized to given value + randn()*0.01
Expand All @@ -331,7 +330,7 @@ def _add_hidden_units(self,
new_bias = initial_bias + numx.zeros((1, num_new_hiddens))
else:
new_bias = numx.array(initial_bias, dtype=self.dtype)
self.bh = numx.array(numx.insert(self.bh, numx.ones(num_new_hiddens) * position, new_bias, axis=1), self.dtype)
self.bh = numx.array(numx.insert(self.bh, numx.array(numx.ones(num_new_hiddens) * position,dtype=int), new_bias, axis=1), self.dtype)

self.output_dim = self.w.shape[1]

Expand Down Expand Up @@ -382,9 +381,9 @@ def _add_visible_units(self,
data = numx.concatenate(data)
new_data_mean = data.mean(axis=0).reshape(1, num_new_visibles)
new_data_std = data.std(axis=0).reshape(1, num_new_visibles)
self._data_mean = numx.array(numx.insert(self._data_mean, numx.ones(num_new_visibles) * position,
self._data_mean = numx.array(numx.insert(self._data_mean, numx.array(numx.ones(num_new_visibles)* position, dtype=int),
new_data_mean, axis=1), self.dtype)
self._data_std = numx.array(numx.insert(self._data_std, numx.ones(num_new_visibles) * position,
self._data_std = numx.array(numx.insert(self._data_std, numx.array(numx.ones(num_new_visibles) * position, dtype=int),
new_data_std, axis=1), self.dtype)

# AUTO -> Small random values out of
Expand All @@ -400,7 +399,7 @@ def _add_visible_units(self,
new_weights = numx.random.randn(num_new_visibles, self.output_dim) * initial_weights
else:
new_weights = initial_weights
self.w = numx.array(numx.insert(self.w, numx.ones(num_new_visibles) * position, new_weights, axis=0),
self.w = numx.array(numx.insert(self.w, numx.array(numx.ones(num_new_visibles) * position,dtype = int), new_weights, axis=0),
self.dtype)

if initial_offsets is 'AUTO':
Expand All @@ -413,7 +412,7 @@ def _add_visible_units(self,
new_ov = numx.zeros((1, num_new_visibles)) + initial_offsets
else:
new_ov = initial_offsets
self.ov = numx.array(numx.insert(self.ov, numx.ones(num_new_visibles) * position, new_ov, axis=1), self.dtype)
self.ov = numx.array(numx.insert(self.ov, numx.array(numx.ones(num_new_visibles) * position,dtype = int), new_ov, axis=1), self.dtype)

# AUTO -> data != None -> Initialized to the inverse sigmoid of
# data mean
Expand All @@ -430,7 +429,7 @@ def _add_visible_units(self,
new_bias = numx.zeros((1, num_new_visibles)) + initial_bias
else:
new_bias = initial_bias
self.bv = numx.array(numx.insert(self.bv, numx.ones(num_new_visibles) * position, new_bias, axis=1), self.dtype)
self.bv = numx.array(numx.insert(self.bv, numx.array(numx.ones(num_new_visibles) * position,dtype = int), new_bias, axis=1), self.dtype)
self.input_dim = self.w.shape[0]

def _remove_visible_units(self, indices):
Expand Down
33 changes: 33 additions & 0 deletions pydeep/fnn/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
''' Module initializer includes all sub-modules.
:Version:
1.0
:Date:
08.02.2016
:Author:
Jan Melchior
:Contact:
JanMelchior@gmx.de
:License:
Copyright (C) 2016 Jan Melchior
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
__all__ = ["layer", "model", "trainer"]

0 comments on commit 22a60d9

Please sign in to comment.