Skip to content

Commit

Permalink
adding usage examples
Browse files Browse the repository at this point in the history
  • Loading branch information
dvatterott committed Jan 5, 2019
1 parent 4ae77c9 commit f460cc9
Show file tree
Hide file tree
Showing 4 changed files with 141 additions and 18 deletions.
127 changes: 125 additions & 2 deletions docs/usage.rst
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,129 @@
Examples
=====

To use predeval in a project::
Example of using the ContinuousEvaluator::

import predeval
.. code:: ipython3
from predeval import ContinuousEvaluator
# create continuous sample.
# this might typically be your model's output from a training data-set
from numpy.random import uniform, seed
seed(1234)
model_output = uniform(0, 100, size=(1000,))
# create evaluator object
ce = ContinuousEvaluator(model_output)
ce.update_param('minimum', 0) # we know our data should not be less than 0
ce.update_param('maximum', 100) # we also know our data should not be greater than 100
# this might typically be your production model's output
new_model_output = uniform(0, 100, size=(1000,))
# check whether the new output is different than expected
test_results = ce.check_data(new_model_output)
.. parsed-literal::
Passed min check; min observed=0.022724991417177876
Passed max check; max observed=99.80687158469324
Passed mean check; mean observed=48.234379605277816 (Expected 50.8804672605415 +- 58.93838342088574)
Passed std check; std observed=29.579104190514 (Expected 29.46919171044287 +- 14.734595855221436)
Passed ks check; test statistic=0.051000000000000045, p=0.14408243524623565
.. code:: ipython3
# print test outputs. note we will not generate assertion errors on failure.
from predeval import evaluate_tests
evaluate_tests(test_results, assert_test=False)
.. parsed-literal::
Passed min test.
Passed max test.
Passed mean test.
Passed std test.
Passed ks test.
.. code:: ipython3
changed_model_output = uniform(0, 100, size=(1000,)) + 20
changed_test_results = ce.check_data(changed_model_output)
.. parsed-literal::
Passed min check; min observed=20.004308527071295
Failed max check; max observed=119.7728425105031
Passed mean check; mean observed=70.78355620677603 (Expected 50.8804672605415 +- 58.93838342088574)
Passed std check; std observed=28.94443741932546 (Expected 29.46919171044287 +- 14.734595855221436)
Failed ks check; test statistic=0.21699999999999997, p=4.182182152969388e-21
.. code:: ipython3
evaluate_tests(changed_test_results, assert_test=False)
.. parsed-literal::
Passed min test.
Failed max test.
Passed mean test.
Passed std test.
Failed ks test.
Example of using the CategoricalEvaluator::

.. code:: ipython3
from predeval import CategoricalEvaluator
# create categorical sample.
# this might typically be your model's output from a training data-set
from numpy.random import uniform, seed
seed(1234)
model_output = choice([0, 1, 2], size=(1000,))
# create evaluator object
ce = CategoricalEvaluator(model_output)
# this might typically be your production model's output
new_model_output = choice([0, 1, 2], size=(1000,))
# check whether the new output is different than expected
test_results = ce.check_data(new_model_output)
.. parsed-literal::
Passed chi2 check; test statistic=0.7317191804740675, p=0.6936001826101796
Passed min check; observed=[0 1 2] (Expected [0, 1, 2])
.. code:: ipython3
# print test outputs. note we will not generate assertion errors on failure.
from predeval import evaluate_tests
evaluate_tests(test_results, assert_test=False)
.. parsed-literal::
Passed chi2 test.
Passed exist test.
.. code:: ipython3
changed_model_output = choice([0, 1, 2], size=(1000,))
changed_model_output[:200] = 0
changed_test_results = ce.check_data(changed_model_output)
.. parsed-literal::
Failed chi2 check; test statistic=59.06552162818124, p=1.493086411779028e-13
Passed min check; observed=[0 1 2] (Expected [0, 1, 2])
.. code:: ipython3
evaluate_tests(changed_test_results, assert_test=False)
.. parsed-literal::
Failed chi2 test.
Passed exist test.
16 changes: 8 additions & 8 deletions predeval/categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,14 +76,14 @@ def __init__(
super(CategoricalEvaluator, self).__init__(ref_data, verbose=verbose)

# ---- Fill in Assertion Parameters ---- #
self.assertion_params_ = {
self._assertion_params_ = {
'chi2_test': None,
'cat_exists': None,
}

assert isinstance(kwargs.get('chi2_stat', 0.2),
assert isinstance(kwargs.get('chi2_stat', 2),
Real), 'expected number, input chi2_test_stat is not a number'
self.assertion_params_['chi2_stat'] = kwargs.get('chi2_stat', 0.2)
self._assertion_params_['chi2_stat'] = kwargs.get('chi2_stat', 2)

# ---- create list of assertions to test ---- #
self._possible_assertions_ = {
Expand All @@ -93,26 +93,26 @@ def __init__(

# ---- create list of assertions to test ---- #
assertions = ['chi2_test', 'exist'] if assertions is None else assertions
self.assertions_ = self._check_assertion_types(assertions)
self._assertions_ = self._check_assertion_types(assertions)

# ---- populate assertion tests with reference data ---- #
for i in self.assertions_:
for i in self._assertions_:
self._possible_assertions[i][0](self.ref_data)

# ---- populate list of tests to run and run tests ---- #
self._tests_ = [self._possible_assertions_[i][1] for i in self.assertions_]
self._tests_ = [self._possible_assertions_[i][1] for i in self._assertions_]

@property
def assertion_params(self):
return self.assertion_params_
return self._assertion_params_

@property
def _possible_assertions(self):
return self._possible_assertions_

@property
def assertions(self):
return self.assertions_
return self._assertions_

@property
def _tests(self):
Expand Down
14 changes: 7 additions & 7 deletions predeval/continuous.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def __init__(
super(ContinuousEvaluator, self).__init__(ref_data, verbose=verbose)

# ---- Fill in Assertion Parameters ---- #
self.assertion_params_ = {
self._assertion_params_ = {
'minimum': kwargs.get('min', None),
'maximum': kwargs.get('max', None),
'mean': kwargs.get('mean', None),
Expand All @@ -60,7 +60,7 @@ def __init__(

assert isinstance(kwargs.get('ks_stat', 0.2),
Real), 'expected number, input ks_test_stat is not a number'
self.assertion_params_['ks_stat'] = kwargs.get('ks_stat', 0.2)
self._assertion_params_['ks_stat'] = kwargs.get('ks_stat', 0.2)

# ---- create list of assertions to test ---- #
self._possible_assertions_ = {
Expand All @@ -73,29 +73,29 @@ def __init__(

# ---- create list of assertions to test ---- #
assertions = ['min', 'max', 'mean', 'std', 'ks_test'] if assertions is None else assertions
self.assertions_ = self._check_assertion_types(assertions)
self._assertions_ = self._check_assertion_types(assertions)

# ---- populate assertion tests with reference data ---- #
for i in self.assertions_:
for i in self._assertions_:
self._possible_assertions[i][0](self.ref_data)

if ('std' not in assertions) and ('mean' in assertions):
self._possible_assertions['std'][0](self.ref_data)

# ---- populate list of tests to run and run tests ---- #
self._tests_ = [self._possible_assertions_[i][1] for i in self.assertions_]
self._tests_ = [self._possible_assertions_[i][1] for i in self._assertions_]

@property
def assertion_params(self):
return self.assertion_params_
return self._assertion_params_

@property
def _possible_assertions(self):
return self._possible_assertions_

@property
def assertions(self):
return self.assertions_
return self._assertions_

@property
def _tests(self):
Expand Down
2 changes: 1 addition & 1 deletion predeval/parent.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def check_data(self, test_data):
assert len(test_data.shape) == 1, 'Input data not a single vector'
output = []
for funs in self._tests:
output.append(funs(comparison_data=test_data))
output.append(funs(test_data))
return output

def update_param(self, param_key, param_value):
Expand Down

0 comments on commit f460cc9

Please sign in to comment.