Skip to content
This repository has been archived by the owner on Aug 2, 2022. It is now read-only.

Added lazy loading of the signals submodule functions #15

Merged
merged 2 commits into from
Dec 30, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 8 additions & 0 deletions biosppy/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,11 @@

# get version
from .version import version as __version__

# Allow lazy loading
from biosppy.signals import ecg
from biosppy.signals import eda
from biosppy.signals import eeg
from biosppy.signals import emg
from biosppy.signals import bvp
from biosppy.signals import resp
60 changes: 30 additions & 30 deletions biosppy/biometrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ def list_subjects(self):

"""

subjects = self._subject2label.keys()
subjects = list(self._subject2label.keys())

return subjects

Expand Down Expand Up @@ -356,7 +356,7 @@ def batch_train(self, data=None):
if data is None:
raise TypeError("Please specify the data to train.")

for sub, val in data.iteritems():
for sub, val in data.items():
if val is None:
try:
self.dismiss(sub, deferred=True)
Expand Down Expand Up @@ -399,10 +399,10 @@ def update_thresholds(self, fraction=1.):

# gather data to test
data = {}
for subject, label in self._subject2label.iteritems():
for subject, label in self._subject2label.items():
# select a random fraction of the training data
aux = self.io_load(label)
indx = range(len(aux))
indx = list(range(len(aux)))
use, _ = utils.random_fraction(indx, fraction, sort=True)

data[subject] = aux[use]
Expand All @@ -411,7 +411,7 @@ def update_thresholds(self, fraction=1.):
_, res = self.evaluate(data, ths)

# choose thresholds at EER
for subject, label in self._subject2label.iteritems():
for subject, label in self._subject2label.items():
EER_auth = res['subject'][subject]['authentication']['rates']['EER']
self.set_auth_thr(label, EER_auth[self.EER_IDX, 0], ready=True)

Expand Down Expand Up @@ -639,7 +639,7 @@ def evaluate(self, data, thresholds=None, show=False):
thresholds = self.get_thresholds()

# get subjects
subjects = filter(lambda item: self.check_subject(item), data.keys())
subjects = [item for item in list(data.keys()) if self.check_subject(item)]
if len(subjects) == 0:
raise ValueError("No enrolled subjects in test set.")

Expand Down Expand Up @@ -722,15 +722,15 @@ def cross_validation(cls, data, labels, cv, thresholds=None, **kwargs):
lbl = labels[item]
train_idx[lbl].append(item)

train_data = {sub: data[idx] for sub, idx in train_idx.iteritems()}
train_data = {sub: data[idx] for sub, idx in train_idx.items()}

# test data set
test_idx = collections.defaultdict(list)
for item in test:
lbl = labels[item]
test_idx[lbl].append(item)

test_data = {sub: data[idx] for sub, idx in test_idx.iteritems()}
test_data = {sub: data[idx] for sub, idx in test_idx.items()}

# instantiate classifier
clf = cls(**kwargs)
Expand Down Expand Up @@ -832,8 +832,8 @@ def _prepare(self, data, targets=None):

# target class labels
if targets is None:
targets = self._subject2label.values()
elif isinstance(targets, basestring):
targets = list(self._subject2label.values())
elif isinstance(targets, str):
targets = [targets]

return data
Expand Down Expand Up @@ -975,7 +975,7 @@ def _authenticate(self, data, label, threshold):
# select based on subject label
aux = []
ns = len(dists)
for i in xrange(ns):
for i in range(ns):
aux.append(dists[i, train_labels[i, :] == label])

dists = np.array(aux)
Expand All @@ -984,7 +984,7 @@ def _authenticate(self, data, label, threshold):
dists = dists[:, :self.k]

decision = np.zeros(ns, dtype='bool')
for i in xrange(ns):
for i in range(ns):
# compare distances to threshold
count = np.sum(dists[i, :] <= threshold)

Expand Down Expand Up @@ -1014,8 +1014,8 @@ def _get_thresholds(self):
return np.linspace(self.min_thr, 1., 100)

maxD = []
for _ in xrange(3):
for label in self._subject2label.values():
for _ in range(3):
for label in list(self._subject2label.values()):
# randomly select samples
aux = self.io_load(label)
ind = np.random.randint(0, aux.shape[0], 3)
Expand Down Expand Up @@ -1064,7 +1064,7 @@ def _identify(self, data, threshold=None):
ns = len(dists)

labels = []
for i in xrange(ns):
for i in range(ns):
lbl, _ = majority_rule(train_labels[i, :], random=True)

# compare distances to threshold
Expand Down Expand Up @@ -1102,8 +1102,8 @@ def _prepare(self, data, targets=None):

# target class labels
if targets is None:
targets = self._subject2label.values()
elif isinstance(targets, basestring):
targets = list(self._subject2label.values())
elif isinstance(targets, str):
targets = [targets]

dists = []
Expand Down Expand Up @@ -1415,7 +1415,7 @@ def _authenticate(self, data, label, threshold):
aux = aux[sel, :]

decision = []
for i in xrange(ns):
for i in range(ns):
# determine majority
predMax, count = majority_rule(aux[:, i], random=True)
rate = float(count) / norm
Expand Down Expand Up @@ -1483,7 +1483,7 @@ def _identify(self, data, threshold=None):
norm = 1.0

labels = []
for i in xrange(ns):
for i in range(ns):
# determine majority
predMax, count = majority_rule(aux[:, i], random=True)
rate = float(count) / norm
Expand Down Expand Up @@ -1524,11 +1524,11 @@ def _prepare(self, data, targets=None):

# target class labels
if self._nbSubjects == 1:
pairs = self._models.keys()
pairs = list(self._models.keys())
else:
if targets is None:
pairs = self._models.keys()
elif isinstance(targets, basestring):
pairs = list(self._models.keys())
elif isinstance(targets, str):
labels = list(
set(self._subject2label.values()) - set([targets]))
pairs = [[targets, lbl] for lbl in labels]
Expand Down Expand Up @@ -1563,9 +1563,9 @@ def _train(self, enroll=None, dismiss=None):
dismiss = []

# process dismiss
pairs = self._models.keys()
pairs = list(self._models.keys())
for t in dismiss:
pairs = filter(lambda p: t in p, pairs)
pairs = [p for p in pairs if t in p]

for p in pairs:
self._del_clf(p)
Expand All @@ -1590,11 +1590,11 @@ def _train(self, enroll=None, dismiss=None):

# check singles
if self._nbSubjects == 1:
label = self._subject2label.values()[0]
label = list(self._subject2label.values())[0]
X = self.io_load(label)
self._get_single_clf(X, label)
elif self._nbSubjects > 1:
aux = filter(lambda p: '' in p, self._models.keys())
aux = [p for p in list(self._models.keys()) if '' in p]
if len(aux) != 0:
for p in aux:
self._del_clf(p)
Expand Down Expand Up @@ -1850,7 +1850,7 @@ def get_subject_results(results=None,
R = np.zeros(nth, dtype='float')
CM = []

for i in xrange(nth): # for each threshold
for i in range(nth): # for each threshold
# authentication
for k, lbl in enumerate(subject_idx): # for each subject
subject_tst = subjects[k]
Expand Down Expand Up @@ -2163,7 +2163,7 @@ def combination(results=None, weights=None):
weights = {}

# compile results to find all classes
vec = results.values()
vec = list(results.values())
if len(vec) == 0:
raise CombinationError("No keys found.")

Expand All @@ -2182,13 +2182,13 @@ def combination(results=None, weights=None):
# multi-class
counts = np.zeros(nb, dtype='float')

for n in results.iterkeys():
for n in results.keys():
# ensure array
res = np.array(results[n])
ns = float(len(res))

# get count for each unique class
for i in xrange(nb):
for i in range(nb):
aux = float(np.sum(res == unq[i]))
w = weights.get(n, 1.)
counts[i] += ((aux / ns) * w)
Expand Down
18 changes: 9 additions & 9 deletions biosppy/clustering.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def hierarchical(data=None,
'ward', 'weighted']:
raise ValueError("Unknown linkage criterion '%r'." % linkage)

if not isinstance(metric, basestring):
if not isinstance(metric, str):
raise TypeError("Please specify the distance metric as a string.")

N = len(data)
Expand Down Expand Up @@ -405,19 +405,19 @@ def create_coassoc(ensemble=None, N=None):
nparts = len(ensemble)
assoc = 0
for part in ensemble:
nsamples = np.array([len(part[key]) for key in part.iterkeys()])
nsamples = np.array([len(part[key]) for key in part.keys()])
dim = np.sum(nsamples * (nsamples - 1)) / 2

I = np.zeros(dim)
J = np.zeros(dim)
X = np.ones(dim)
ntriplets = 0

for v in part.itervalues():
for v in part.values():
nb = len(v)
if nb > 0:
for h in xrange(nb):
for f in xrange(h + 1, nb):
for h in range(nb):
for f in range(h + 1, nb):
I[ntriplets] = v[h]
J[ntriplets] = v[f]
ntriplets += 1
Expand Down Expand Up @@ -539,7 +539,7 @@ def mdist_templates(data=None,
clusters = {0: np.arange(len(data), dtype='int')}

# cluster labels
ks = clusters.keys()
ks = list(clusters.keys())

# remove the outliers' cluster, if present
if '-1' in ks:
Expand Down Expand Up @@ -631,7 +631,7 @@ def centroid_templates(data=None, clusters=None, ntemplates=1):
raise TypeError("Please specify a data clustering.")

# cluster labels
ks = clusters.keys()
ks = list(clusters.keys())

# remove the outliers' cluster, if present
if '-1' in ks:
Expand Down Expand Up @@ -839,7 +839,7 @@ def outliers_dmean(data=None,
outliers.append(i)

outliers = np.unique(outliers)
normal = np.setdiff1d(range(len(data)), outliers, assume_unique=True)
normal = np.setdiff1d(list(range(len(data))), outliers, assume_unique=True)

# output
clusters = {-1: outliers, 0: normal}
Expand Down Expand Up @@ -987,7 +987,7 @@ def _merge_clusters(clusters):

"""

keys = clusters.keys()
keys = list(clusters.keys())

# outliers
if -1 in keys:
Expand Down
4 changes: 2 additions & 2 deletions biosppy/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def pdist(X, metric='euclidean', p=2, w=None, V=None, VI=None):

"""

if isinstance(metric, basestring):
if isinstance(metric, str):
if metric == 'pcosine':
metric = pcosine

Expand Down Expand Up @@ -127,7 +127,7 @@ def cdist(XA, XB, metric='euclidean', p=2, V=None, VI=None, w=None):

"""

if isinstance(metric, basestring):
if isinstance(metric, str):
if metric == 'pcosine':
metric = pcosine

Expand Down
10 changes: 5 additions & 5 deletions biosppy/plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -770,7 +770,7 @@ def _plot_multichannel(ts=None,

# check labels
if labels is None:
labels = ['Ch. %d' % i for i in xrange(nch)]
labels = ['Ch. %d' % i for i in range(nch)]

if nch < nrows:
nrows = nch
Expand All @@ -794,7 +794,7 @@ def _plot_multichannel(ts=None,
ax0.grid()
axs = {(0, 0): ax0}

for i in xrange(1, nch - 1):
for i in range(1, nch - 1):
a = i % nrows
b = int(np.floor(i / float(nrows)))
ax = fig.add_subplot(gs[a, b], sharex=ax0)
Expand Down Expand Up @@ -822,7 +822,7 @@ def _plot_multichannel(ts=None,
if xlabel is not None:
ax.set_xlabel(xlabel)

for b in xrange(0, ncols - 1):
for b in range(0, ncols - 1):
a = nrows - 1
ax = axs[(a, b)]
ax.set_xlabel(xlabel)
Expand Down Expand Up @@ -1042,7 +1042,7 @@ def plot_biometrics(assessment=None, eer_idx=None, path=None, show=False):
id_ax = fig.add_subplot(122)

# subject results
for sub in assessment['subject'].iterkeys():
for sub in assessment['subject'].keys():
auth_rates = assessment['subject'][sub]['authentication']['rates']
_ = _plot_rates(ths, auth_rates, ['FAR', 'FRR'],
lw=MINOR_LW,
Expand Down Expand Up @@ -1134,7 +1134,7 @@ def plot_clustering(data=None, clusters=None, path=None, show=False):
ymin, ymax = _yscaling(data, alpha=1.2)

# determine number of clusters
keys = clusters.keys()
keys = list(clusters.keys())
nc = len(keys)

if nc <= 4:
Expand Down
2 changes: 1 addition & 1 deletion biosppy/signals/bvp.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ def find_onsets(signal=None, sampling_rate=1000., sm_size=None, size=None,

# analyze between maxima of 2nd derivative of ss
detected = False
for i in xrange(1, len(dpidx) + 1):
for i in range(1, len(dpidx) + 1):
try:
v, u = dpidx[i - 1], dpidx[i]
except IndexError:
Expand Down