Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
increase code coverage
  • Loading branch information
sdpython committed Dec 10, 2021
1 parent 526ebb5 commit 1393e89
Show file tree
Hide file tree
Showing 22 changed files with 192 additions and 21 deletions.
6 changes: 1 addition & 5 deletions _doc/sphinxdoc/source/index.rst
Expand Up @@ -66,12 +66,8 @@ Among the tools this package implements, you may find:
.. toctree::
:maxdepth: 1

tutorial_onnx/index
tutorial_skl/index
tutorial_bench/index
tutorial_training/index
tutorials
api/apis
onnxmd/index
gyexamples/index
all_notebooks
license
Expand Down
34 changes: 34 additions & 0 deletions _doc/sphinxdoc/source/tutorials.rst
@@ -0,0 +1,34 @@

Tutorials
=========

.. contents::
:local:

ONNX ecosystem
++++++++++++++

Following tutorials introduce the :epkg:`ONNX` ecosystem. It walk the
user through the ONNX specficiations, how to execute an ONNX graph,
how to create an ONNX graph, how to convert a model from :epkg:`scikit-learn`,
and how to train them with :epkg:`onnxruntime-training`.

.. toctree::
:maxdepth: 2

tutorial_onnx/index
tutorial_skl/index
tutorial_bench/index
tutorial_training/index

Current documention of ONNX and onnxruntime
+++++++++++++++++++++++++++++++++++++++++++

Most of the documentation related on :epkg:`onnx` and :epkg:`onnxruntime`
is written on :epkg:`markdown`. The following section is an attempt
to render it and make it searchable.

.. toctree::
:maxdepth: 2

onnxmd/index
1 change: 1 addition & 0 deletions _unittests/ut_documentation/_test_example.txt
@@ -0,0 +1 @@
plot_orttraining_benchmark_torch.py
35 changes: 35 additions & 0 deletions _unittests/ut_documentation/test_documentation_check_coverage.py
@@ -0,0 +1,35 @@
# -*- coding: utf-8 -*-
"""
@brief test log(time=2800s)
"""
import os
import unittest
from pyquickhelper.pycode import ExtTestCase


class TestDocumentationCheckCoverage(ExtTestCase):

def test_notebook_artificiel(self):
with open(
os.path.join(os.path.dirname(__file__),
"_test_example.txt"), "r", encoding='utf-8') as f:
lines = f.read().split('\n')

this = os.path.abspath(os.path.dirname(__file__))
fold = os.path.normpath(
os.path.join(this, '..', '..', '_doc', 'examples'))
found = os.listdir(fold)

done = set(_ for _ in lines if os.path.splitext(_)[-1] == '.py')
found = set(_ for _ in found
if (os.path.splitext(_)[-1] == '.py' and
_.startswith('plot_')))
if len(done) != len(found):
missing = found - done
raise AssertionError(
"Following example were not tested:\n%s."
"" % "\n".join(sorted(missing)))


if __name__ == "__main__":
unittest.main()
Expand Up @@ -115,6 +115,11 @@ def test_documentation_examples_lightgbm(self):
finally:
if sys.path[0] == fold:
del sys.path[0]
with open(
os.path.join(os.path.dirname(__file__),
"_test_example.txt"), "a",
encoding='utf-8') as f:
f.write(name + "\n")
tested += 1
if tested == 0:
raise RuntimeError("No example was tested.")
Expand Down
Expand Up @@ -106,6 +106,11 @@ def test_documentation_examples_training(self):
finally:
if sys.path[0] == fold:
del sys.path[0]
with open(
os.path.join(os.path.dirname(__file__),
"_test_example.txt"), "a",
encoding='utf-8') as f:
f.write(name + "\n")
tested += 1
if tested == 0:
raise RuntimeError("No example was tested.")
Expand Down
Expand Up @@ -120,6 +120,11 @@ def test_documentation_examples_training_fwbw(self):
finally:
if sys.path[0] == fold:
del sys.path[0]
with open(
os.path.join(os.path.dirname(__file__),
"_test_example.txt"), "a",
encoding='utf-8') as f:
f.write(name + "\n")
tested += 1
if tested == 0:
raise RuntimeError("No example was tested.")
Expand Down
Expand Up @@ -120,6 +120,11 @@ def test_documentation_examples_training_torch(self):
finally:
if sys.path[0] == fold:
del sys.path[0]
with open(
os.path.join(os.path.dirname(__file__),
"_test_example.txt"), "a",
encoding='utf-8') as f:
f.write(name + "\n")
tested += 1
if tested == 0:
raise RuntimeError("No example was tested.")
Expand Down
5 changes: 5 additions & 0 deletions _unittests/ut_documentation/test_documentation_examples_u.py
Expand Up @@ -108,6 +108,11 @@ def test_documentation_examples_u(self):
finally:
if sys.path[0] == fold:
del sys.path[0]
with open(
os.path.join(os.path.dirname(__file__),
"_test_example.txt"), "a",
encoding='utf-8') as f:
f.write(name + "\n")
tested += 1
if tested == 0:
raise RuntimeError("No example was tested.")
Expand Down
5 changes: 5 additions & 0 deletions _unittests/ut_documentation/test_documentation_examples_u_.py
Expand Up @@ -94,6 +94,11 @@ def test_documentation_examplesU_(self):
finally:
if sys.path[0] == fold:
del sys.path[0]
with open(
os.path.join(os.path.dirname(__file__),
"_test_example.txt"), "a",
encoding='utf-8') as f:
f.write(name + "\n")
tested += 1
if tested == 0:
raise RuntimeError("No example was tested.")
Expand Down
7 changes: 7 additions & 0 deletions _unittests/ut_training/test_data_loader.py
Expand Up @@ -57,6 +57,13 @@ def test_ort_data_loader_numpy(self):
data.desc,
[((100, 10), numpy.float64), ((100, 1), numpy.float64)])

def test_ort_data_loader_numpy_exc(self):
X, y = make_regression( # pylint: disable=W0632
100, n_features=10, bias=2)
self.assertRaise(
lambda: OrtDataLoader(X, y, batch_size=5, device='cpu2'),
Exception)

def test_ort_data_loader_pickle(self):
X, y = make_regression( # pylint: disable=W0632
100, n_features=10, bias=2)
Expand Down
5 changes: 5 additions & 0 deletions _unittests/ut_training/test_learning_rate.py
Expand Up @@ -24,6 +24,11 @@ def test_learning_rate_sgd_regressor_default(self):
self.assertEqual(val[0], 0.01)
self.assertGreater(val[-1], 0.001)

def test_learning_rate_sgd_regressor_exc(self):
self.assertRaise(
lambda: LearningRateSGDRegressor(learning_rate='EXC'),
ValueError)

def test_learning_rate_sgd_regressor_optimal(self):
cllr = LearningRateSGDRegressor(learning_rate='optimal')
val = list(cllr.loop())
Expand Down
23 changes: 23 additions & 0 deletions _unittests/ut_training/test_optimizers.py
Expand Up @@ -14,6 +14,7 @@
from mlprodict.onnx_conv import to_onnx
from onnxcustom import __max_supported_opset__ as opset
from onnxcustom.training.sgd_learning_rate import LearningRateSGDRegressor
from onnxcustom.training import ConvergenceError
try:
from onnxruntime import TrainingSession
except ImportError:
Expand Down Expand Up @@ -53,6 +54,28 @@ def test_ort_gradient_optimizers_use_numpy(self):
self.assertGreater(len(losses), 1)
self.assertFalse(any(map(numpy.isnan, losses)))

@unittest.skipIf(TrainingSession is None, reason="not training")
def test_ort_gradient_optimizers_use_numpy_nan(self):
from onnxcustom.utils.onnx_orttraining import add_loss_output
from onnxcustom.training.optimizers import OrtGradientOptimizer
X, y = make_regression( # pylint: disable=W0632
100, n_features=10, bias=2, random_state=0)
X = X.astype(numpy.float32)
y = y.astype(numpy.float32)
X_train, _, y_train, __ = train_test_split(X, y)
reg = LinearRegression()
reg.fit(X_train, y_train)
reg.coef_ = reg.coef_.reshape((1, -1))
onx = to_onnx(reg, X_train, target_opset=opset,
black_op={'LinearRegressor'})
set_model_props(onx, {'info': 'unit test'})
onx_loss = add_loss_output(onx)
inits = ['intercept', 'coef']
train_session = OrtGradientOptimizer(
onx_loss, inits, learning_rate=1e3)
self.assertRaise(lambda: train_session.fit(X, y, use_numpy=True),
ConvergenceError)

@unittest.skipIf(TrainingSession is None, reason="not training")
def test_ort_gradient_optimizers_use_numpy_pickle(self):
from onnxcustom.utils.onnx_orttraining import add_loss_output
Expand Down
9 changes: 9 additions & 0 deletions _unittests/ut_training/test_orttraining_forward_backward.py
Expand Up @@ -43,8 +43,17 @@ def forward_no_training(self):
black_op={'LinearRegressor'})

# starts testing
self.assertRaise(
lambda: OrtGradientForwardBackward(
onx, debug=True, enable_logging=True, weights_to_train=[]),
ValueError)
self.assertRaise(
lambda: OrtGradientForwardBackward(
onx, debug=True, enable_logging=True, providers=['NONE']),
ValueError)
forback = OrtGradientForwardBackward(
onx, debug=True, enable_logging=True)
self.assertEqual(repr(forback), "OrtGradientForwardBackward(...)")
self.assertTrue(hasattr(forback, 'cls_type_'))
self.assertEqual(forback.cls_type_._onx_inp,
['X', 'coef', 'intercept'])
Expand Down
1 change: 1 addition & 0 deletions _unittests/ut_utils/test_onnx_helper.py
Expand Up @@ -37,6 +37,7 @@ def test_dtype_to_var_type(self):
self.assertEqual(dtype_to_var_type(numpy.float32), FloatTensorType)
self.assertEqual(dtype_to_var_type(numpy.float64), DoubleTensorType)
self.assertRaise(lambda: dtype_to_var_type(numpy.int64), ValueError)
self.assertEqual(proto_type_to_dtype('tensor(double)'), numpy.float64)

def test_proto_type_to_dtype(self):
self.assertEqual(proto_type_to_dtype(1), numpy.float32)
Expand Down
29 changes: 29 additions & 0 deletions _unittests/ut_utils/test_onnxruntime_helper.py
@@ -0,0 +1,29 @@
"""
@brief test log(time=1s)
"""
import unittest
from pyquickhelper.pycode import ExtTestCase
from onnxcustom.utils.onnxruntime_helper import (
device_to_provider, provider_to_device, get_ort_device_type)


class TestOnnxRuntimeHelper(ExtTestCase):

def test_provider_to_device(self):
self.assertEqual(provider_to_device('CPUExecutionProvider'), 'cpu')
self.assertEqual(provider_to_device('CUDAExecutionProvider'), 'cuda')
self.assertRaise(lambda: provider_to_device('NONE'), ValueError)

def test_device_to_provider(self):
self.assertEqual(device_to_provider('cpu'), 'CPUExecutionProvider')
self.assertEqual(device_to_provider('gpu'), 'CUDAExecutionProvider')
self.assertRaise(lambda: device_to_provider('NONE'), ValueError)

def test_get_ort_device_type(self):
self.assertEqual(get_ort_device_type('cpu'), 0)
self.assertEqual(get_ort_device_type('cuda'), 1)
self.assertRaise(lambda: get_ort_device_type('none'), ValueError)


if __name__ == "__main__":
unittest.main()
1 change: 1 addition & 0 deletions onnxcustom/training/__init__.py
Expand Up @@ -2,3 +2,4 @@
@file
@brief Shortcuts to *training*.
"""
from .excs import ConvergenceError # noqa
4 changes: 2 additions & 2 deletions onnxcustom/training/data_loader.py
Expand Up @@ -93,8 +93,8 @@ def iter_numpy(self):
batch. The function yields :epkg:`OrtValue`.
"""
if self.device not in ('Cpu', 'cpu'):
raise RuntimeError(
"Only CPU device is allowed if numpy array are requested "
raise RuntimeError( # pragma: no cover
"Only CPU device is allowed if numpy arrays are requested "
"not %r." % self.device)
N = 0
b = len(self) - self.batch_size
Expand Down
2 changes: 1 addition & 1 deletion onnxcustom/training/optimizers.py
Expand Up @@ -227,7 +227,7 @@ def _bind_input_ortvalue(self, name, bind, c_ortvalue):
shape=c_ortvalue.shape,
buffer_ptr=c_ortvalue.__array_interface__['data'][0])
else:
raise TypeError(
raise TypeError( # pragma: no cover
"Unable to bind type %r for name %r." % (
type(c_ortvalue), name))

Expand Down
4 changes: 2 additions & 2 deletions onnxcustom/training/optimizers_partial.py
Expand Up @@ -170,7 +170,7 @@ def _build_loss_function(self):
if self.enable_logging:
self._logger = logging.getLogger("onnxcustom")
else:
self._logger = None # pragma: no cover
self._logger = None

def fit(self, X, y, X_val=None, y_val=None, use_numpy=False):
"""
Expand Down Expand Up @@ -352,7 +352,7 @@ def _bind_output_ortvalue(self, name, bind, c_ortvalue):
shape=c_ortvalue.shape(),
buffer_ptr=c_ortvalue.data_ptr())
else:
raise TypeError(
raise TypeError( # pragma: no cover
"Unable to bind type %r for name %r." % (
type(c_ortvalue), name))

Expand Down
12 changes: 6 additions & 6 deletions onnxcustom/training/ortgradient.py
Expand Up @@ -116,7 +116,7 @@ def __init__(self, onnx_model, weights_to_train=None,
"input_names and provider_options must have the same length.")

if list(sorted(self.weights_to_train)) != self.weights_to_train:
raise ValueError(
raise ValueError( # pragma: no cover
"List of weights to train must be sorted but %r is not. "
"You shoud use function onnx_rename_weights to do that "
"before calling this class." % self.weights_to_train)
Expand Down Expand Up @@ -181,7 +181,7 @@ def _init_next(self):

if (len(config.initializer_names) != # noqa
len(config.initializer_names_to_train)):
raise RuntimeError(
raise RuntimeError( # pragma: no cover
"Unable to automatically fill "
"OrtModuleGraphBuilderConfiguration, mismatch between "
"%r and %r (initializer_names=%r)." % (
Expand Down Expand Up @@ -278,8 +278,8 @@ def get_initializer(self, name, exc=True):
if exc:
raise RuntimeError(
"Unable to find name %r in %r." % (
name, list(
i.name for i in self.onnx_model.graph.initializer)))
name,
list(i.name for i in self.onnx_model.graph.initializer)))
return None

def _create_onnx_graphs(self):
Expand Down Expand Up @@ -479,7 +479,7 @@ def _create_onnx_graphs(self):
})

if len(kwargs['_onx_inp']) != len(kwargs['_onx_out']):
raise RuntimeError(
raise RuntimeError( # pragma: no cover
"Gradient input and output are inconsistant: "
"%r != %r" % (kwargs['_onx_inp'], kwargs['_onx_out']))
return kwargs
Expand Down Expand Up @@ -561,7 +561,7 @@ def _validate_(tensors):
vect.push_back(C_OrtValue.ortvalue_from_numpy(t, dev))
if debug:
if len(vect) != len(tensors):
raise RuntimeError(
raise RuntimeError( # pragma: no cover
"Unexpected array length %d != %d (len(devices)=%d)." % (
len(vect), len(tensors), len(devices)))
_validate_(vect)
Expand Down
10 changes: 5 additions & 5 deletions onnxcustom/utils/onnxruntime_helper.py
Expand Up @@ -3,7 +3,7 @@
@brief Onnxruntime helper.
"""
from onnxruntime.capi._pybind_state import ( # pylint: disable=E0611
OrtDevice)
OrtDevice as C_OrtDevice)


def device_to_provider(device_name):
Expand Down Expand Up @@ -50,14 +50,14 @@ def provider_to_device(provider_name):

def get_ort_device_type(device):
"""
Converts device into :epkg:`OrtDevice`.
Converts device into :epkg:`C_OrtDevice`.
:param device: string
:return: :epkg:`OrtDevice`
:return: :epkg:`C_OrtDevice`
"""
device_type = device if isinstance(device, str) else device.type
if device_type == 'cuda':
return OrtDevice.cuda()
return C_OrtDevice.cuda()
if device_type == 'cpu':
return OrtDevice.cpu()
return C_OrtDevice.cpu()
raise ValueError('Unsupported device type: %r.' % device_type)

0 comments on commit 1393e89

Please sign in to comment.