diff --git a/CHANGELOG.md b/CHANGELOG.md index f5fc6c3..8d781e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,10 @@ # Change Log All changes to 'hpelm' toolbox will be documented in this file. +## [1.0.3] - 22-03-2016 +### Fixed +- Fixed a bug with "icount" size + ## [1.0.3] - 15-03-2016 ### Fixed - HPELM loading, various small errors diff --git a/hpelm/hp_elm.py b/hpelm/hp_elm.py index 71b6b9e..9424b99 100755 --- a/hpelm/hp_elm.py +++ b/hpelm/hp_elm.py @@ -128,7 +128,7 @@ def add_data(self, fX, fT, istart=0, icount=np.inf, fHH=None, fHT=None): N = X.shape[0] _prepare_fHH(fHH, fHT, self.nnet, self.precision) # custom range adjustments - icount = min(istart + icount, N) + icount = min(icount, N - istart) nb = int(np.ceil(float(icount) / self.batch)) # number of batches # weighted classification initialization @@ -199,7 +199,7 @@ def predict(self, fX, fY=None, istart=0, icount=np.inf): X, _ = self._checkdata(fX, None) N = X.shape[0] # custom range adjustments - icount = min(istart + icount, N) + icount = min(icount, N - istart) nb = int(np.ceil(float(icount) / self.batch)) # number of batches # make file to store results if isinstance(fY, basestring): @@ -256,7 +256,7 @@ def project(self, fX, fH=None, istart=0, icount=np.inf): X, _ = self._checkdata(fX, None) N = X.shape[0] # custom range adjustments - icount = min(istart + icount, N) + icount = min(icount, N - istart) nb = int(np.ceil(float(icount) / self.batch)) # number of batches # make file to store results if isinstance(fH, basestring): @@ -335,7 +335,7 @@ def _error(self, T, Y, istart=0, icount=np.inf): icount (int): number of samples to process """ N = T.shape[0] - icount = min(istart + icount, N) + icount = min(icount, N - istart) nb = int(np.ceil(float(icount) / self.batch)) # number of batches if self.classification == "c": @@ -490,7 +490,7 @@ def add_data_async(self, fX, fT, istart=0, icount=np.inf, fHH=None, fHT=None): # TODO: adapt for GPU solver _prepare_fHH(fHH, fHT, self.nnet, self.precision) # custom range adjustments - icount = min(istart + icount, N) + icount = min(icount, N - istart) nb = int(np.ceil(float(icount) / self.batch)) # weighted classification initialization @@ -567,7 +567,7 @@ def predict_async(self, fX, fY, istart=0, icount=np.inf): X, _ = self._checkdata(fX, None) N = X.shape[0] # custom range adjustments - icount = min(istart + icount, N) + icount = min(icount, N - istart) nb = int(np.ceil(float(icount) / self.batch)) # number of batches # make file to store results make_hdf5((icount, self.nnet.outputs), fY) diff --git a/setup.py b/setup.py index 45e691d..c84911d 100755 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ def readme(): # sphinx-apidoc -f -o docs hpelm; cd docs; make html; cd ../ setup(name='hpelm', - version='1.0.3', + version='1.0.4', description='High-Performance implementation of an Extreme Learning Machine', long_description=readme(), classifiers=[