Skip to content

Commit

Permalink
Metrics
Browse files Browse the repository at this point in the history
  • Loading branch information
xehivs committed Dec 19, 2019
1 parent 0adc027 commit 0f481dc
Show file tree
Hide file tree
Showing 8 changed files with 55 additions and 14 deletions.
23 changes: 23 additions & 0 deletions doc/api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -68,3 +68,26 @@ API
ensembles.OOB
ensembles.UOB
ensembles.WAE

:mod:`strlearn.metrics`: Metrics
====================================================

.. automodule:: strlearn.metrics
:no-members:
:no-inherited-members:

.. currentmodule:: strlearn

.. autosummary::
:toctree: generated/
:template: function.rst

metrics.binary_confusion_matrix
metrics.specificity
metrics.recall
metrics.precision
metrics.fbeta_score
metrics.f1_score
metrics.balanced_accuracy_score
metrics.geometric_mean_score_1
metrics.geometric_mean_score_2
2 changes: 1 addition & 1 deletion doc/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ The ``stream-learn`` module is a set of tools necessary for processing data stre
- `evaluators <evaluators.html>`_ - containing classes for running experiments on stream data in accordance with the Test-Then-Train and Prequential methodology.
- `classifiers <classifiers.html>`_ - containing sample stream classifiers,
- `ensembles <ensembles.html>`_ - containing standard team models of stream data classification,
- `utils <evaluators.html>`_ - containing typical classification quality metrics in data streams.
- `metrics <evaluators.html>`_ - containing typical classification quality metrics in data streams.

You can read more about each module in the User Guide.

Expand Down
2 changes: 1 addition & 1 deletion examples/plot_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
stream = sl.streams.StreamGenerator(n_chunks=30, n_drifts=1)

# Select vector of metrics
metrics = [sl.metrics.bac, sl.metrics.f1_score]
metrics = [sl.metrics.balanced_accuracy_score, sl.metrics.f1_score]

# Initialize evaluator with given metrics
evaluator = sl.evaluators.TestThenTrain(metrics)
Expand Down
4 changes: 2 additions & 2 deletions strlearn/evaluators/Prequential.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import numpy as np
from sklearn.metrics import accuracy_score
from ..metrics import bac
from ..metrics import balanced_accuracy_score
from sklearn.base import ClassifierMixin


Expand Down Expand Up @@ -44,7 +44,7 @@ class Prequential:
[0.925 0.92567027 0.9250634 0.92567027 0.92610837]]
"""

def __init__(self, metrics=(accuracy_score, bac)):
def __init__(self, metrics=(accuracy_score, balanced_accuracy_score)):
if isinstance(metrics, (list, tuple)):
self.metrics = metrics
else:
Expand Down
6 changes: 3 additions & 3 deletions strlearn/evaluators/TestThenTrain.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import numpy as np
from sklearn.metrics import accuracy_score
from ..metrics import bac
from ..metrics import balanced_accuracy_score
from sklearn.base import ClassifierMixin


Expand All @@ -20,7 +20,7 @@ class TestThenTrain:
The class labels.
scores_ : array-like, shape (stream.n_chunks, 5)
Values of accuracy_score, roc_auc_score,
geometric_mean_score, bac and f_score for
geometric_mean_score, balanced_accuracy_score and f_score for
each processed data chunk.
Examples
Expand All @@ -41,7 +41,7 @@ class TestThenTrain:
[0.935 0.93569212 0.93540766 0.93569212 0.93467337]]
"""

def __init__(self, metrics=(accuracy_score, bac)):
def __init__(self, metrics=(accuracy_score, balanced_accuracy_score)):
if isinstance(metrics, (list, tuple)):
self.metrics = metrics
else:
Expand Down
2 changes: 1 addition & 1 deletion strlearn/metrics/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
f1_score,
recall,
precision,
bac,
balanced_accuracy_score,
geometric_mean_score_1,
geometric_mean_score_2,
specificity,
Expand Down
4 changes: 2 additions & 2 deletions strlearn/metrics/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def f1_score(y_true, y_pred):
return fbeta_score(y_true, y_pred, 1)


def bac(y_true, y_pred):
def balanced_accuracy_score(y_true, y_pred):
"""
Calculates the balanced accuracy score.
Expand All @@ -103,7 +103,7 @@ def bac(y_true, y_pred):
Returns
-------
bac : float
balanced_accuracy_score : float
"""
spe, rec = specificity(y_true, y_pred), recall(y_true, y_pred)
return np.nan_to_num((rec + spe) / 2)
Expand Down
26 changes: 22 additions & 4 deletions strlearn/tests/test_evaluators.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import sys
import strlearn as sl
from sklearn.metrics import accuracy_score, roc_auc_score
from ..metrics import bac, f1_score, geometric_mean_score_1
from ..metrics import balanced_accuracy_score, f1_score, geometric_mean_score_1

sys.path.insert(0, "../..")

Expand All @@ -24,7 +24,13 @@ def test_TTT_single_clf():
def test_TTT_custom_metrics():
stream = get_stream()
clf = sl.classifiers.AccumulatedSamplesClassifier()
metrics = [accuracy_score, roc_auc_score, geometric_mean_score_1, bac, f1_score]
metrics = [
accuracy_score,
roc_auc_score,
geometric_mean_score_1,
balanced_accuracy_score,
f1_score,
]
evaluator = sl.evaluators.TestThenTrain(metrics=metrics)
evaluator.process(stream, clf)

Expand All @@ -46,7 +52,13 @@ def test_TTT_multiple_clfs():
sl.classifiers.AccumulatedSamplesClassifier(),
sl.classifiers.AccumulatedSamplesClassifier(),
]
metrics = [accuracy_score, roc_auc_score, geometric_mean_score_1, bac, f1_score]
metrics = [
accuracy_score,
roc_auc_score,
geometric_mean_score_1,
balanced_accuracy_score,
f1_score,
]
evaluator = sl.evaluators.TestThenTrain(metrics=metrics)
evaluator.process(stream, clfs)

Expand All @@ -59,7 +71,13 @@ def test_P_multiple_clfs():
sl.classifiers.AccumulatedSamplesClassifier(),
sl.classifiers.AccumulatedSamplesClassifier(),
]
metrics = [accuracy_score, roc_auc_score, geometric_mean_score_1, bac, f1_score]
metrics = [
accuracy_score,
roc_auc_score,
geometric_mean_score_1,
balanced_accuracy_score,
f1_score,
]
evaluator = sl.evaluators.Prequential(metrics=metrics)
evaluator.process(stream, clfs)

Expand Down

0 comments on commit 0f481dc

Please sign in to comment.