Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion CHANGELOGS.rst
Original file line number Diff line number Diff line change
@@ -1,10 +1,15 @@
Change Logs
===========

0.3.2
0.3.3
+++++

* :pr:`104`: add code rendering when conveting a model into code
* :pr:`103`: fix import issue with the latest onnx version

0.3.2
+++++

* :pr:`101`: fix as_tensor in onnx_text_plot_tree

0.3.1
Expand Down
7 changes: 7 additions & 0 deletions _unittests/ut_light_api/test_backend_export.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,15 @@
from difflib import unified_diff
import packaging.version as pv
import numpy
import ml_dtypes
from numpy.testing import assert_allclose
from onnx.defs import onnx_opset_version
import onnx.backend.base
import onnx.backend.test
import onnx.shape_inference
import onnx.version_converter
import onnx.helper as oh
import onnx.numpy_helper as onh
from onnx import ModelProto, TensorProto, __version__ as onnx_version
from onnx.helper import (
make_function,
Expand Down Expand Up @@ -94,6 +97,10 @@ def run(

locs = {
"np": numpy,
"ml_dtypes": ml_dtypes,
"onnx": onnx,
"oh": oh,
"onh": onh,
"to_array": to_array,
"to_array_extended": to_array_extended,
"from_array": from_array,
Expand Down
49 changes: 27 additions & 22 deletions _unittests/ut_translate_api/test_translate.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
import unittest
from textwrap import dedent
import numpy as np
from onnx import ModelProto, TensorProto
import onnx
from onnx.defs import onnx_opset_version
from onnx.reference import ReferenceEvaluator
from onnx_array_api.ext_test_case import ExtTestCase
from onnx_array_api.light_api import start, g
from onnx_array_api.translate_api import translate
from onnx_array_api.translate_api import translate, translate_header
from onnx_array_api.translate_api.base_emitter import EventType

OPSET_API = min(19, onnx_opset_version() - 1)
Expand All @@ -18,9 +18,14 @@ def test_event_type(self):
EventType.to_str(EventType.INITIALIZER), "EventType.INITIALIZER"
)

def test_translate_header(self):
for f in ["light", "onnx", "builder"]:
translate_header(f)
self.assertRaise(lambda: translate_header("NONE"), ValueError)

def test_exp(self):
onx = start(opset=19).vin("X").Exp().rename("Y").vout().to_onnx()
self.assertIsInstance(onx, ModelProto)
self.assertIsInstance(onx, onnx.ModelProto)
self.assertIn("Exp", str(onx))
ref = ReferenceEvaluator(onx)
a = np.arange(10).astype(np.float32)
Expand All @@ -32,25 +37,25 @@ def test_exp(self):
"""
(
start(opset=19)
.vin('X', elem_type=TensorProto.FLOAT)
.vin('X', elem_type=onnx.TensorProto.FLOAT)
.bring('X')
.Exp()
.rename('Y')
.bring('Y')
.vout(elem_type=TensorProto.FLOAT)
.vout(elem_type=onnx.TensorProto.FLOAT)
.to_onnx()
)"""
).strip("\n")
self.assertEqual(expected, code)

onx2 = (
start(opset=19)
.vin("X", elem_type=TensorProto.FLOAT)
.vin("X", elem_type=onnx.TensorProto.FLOAT)
.bring("X")
.Exp()
.rename("Y")
.bring("Y")
.vout(elem_type=TensorProto.FLOAT)
.vout(elem_type=onnx.TensorProto.FLOAT)
.to_onnx()
)
ref = ReferenceEvaluator(onx2)
Expand All @@ -68,7 +73,7 @@ def test_transpose(self):
.vout()
.to_onnx()
)
self.assertIsInstance(onx, ModelProto)
self.assertIsInstance(onx, onnx.ModelProto)
self.assertIn("Transpose", str(onx))
ref = ReferenceEvaluator(onx)
a = np.arange(10).astype(np.float32)
Expand All @@ -82,15 +87,15 @@ def test_transpose(self):
start(opset=19)
.cst(np.array([-1, 1], dtype=np.int64))
.rename('r')
.vin('X', elem_type=TensorProto.FLOAT)
.vin('X', elem_type=onnx.TensorProto.FLOAT)
.bring('X', 'r')
.Reshape()
.rename('r0_0')
.bring('r0_0')
.Transpose(perm=[1, 0])
.rename('Y')
.bring('Y')
.vout(elem_type=TensorProto.FLOAT)
.vout(elem_type=onnx.TensorProto.FLOAT)
.to_onnx()
)"""
).strip("\n")
Expand All @@ -107,7 +112,7 @@ def test_topk_reverse(self):
.vout()
.to_onnx()
)
self.assertIsInstance(onx, ModelProto)
self.assertIsInstance(onx, onnx.ModelProto)
ref = ReferenceEvaluator(onx)
x = np.array([[0, 1, 2, 3], [9, 8, 7, 6]], dtype=np.float32)
k = np.array([2], dtype=np.int64)
Expand All @@ -120,15 +125,15 @@ def test_topk_reverse(self):
"""
(
start(opset=19)
.vin('X', elem_type=TensorProto.FLOAT)
.vin('K', elem_type=TensorProto.INT64)
.vin('X', elem_type=onnx.TensorProto.FLOAT)
.vin('K', elem_type=onnx.TensorProto.INT64)
.bring('X', 'K')
.TopK(axis=-1, largest=0, sorted=1)
.rename('Values', 'Indices')
.bring('Values')
.vout(elem_type=TensorProto.FLOAT)
.vout(elem_type=onnx.TensorProto.FLOAT)
.bring('Indices')
.vout(elem_type=TensorProto.FLOAT)
.vout(elem_type=onnx.TensorProto.FLOAT)
.to_onnx()
)"""
).strip("\n")
Expand All @@ -152,7 +157,7 @@ def test_export_if(self):
.to_onnx()
)

self.assertIsInstance(onx, ModelProto)
self.assertIsInstance(onx, onnx.ModelProto)
ref = ReferenceEvaluator(onx)
x = np.array([[0, 1, 2, 3], [9, 8, 7, 6]], dtype=np.float32)
k = np.array([2], dtype=np.int64)
Expand All @@ -162,19 +167,19 @@ def test_export_if(self):
code = translate(onx)
selse = (
"g().cst(np.array([0], dtype=np.int64)).rename('Z')."
"bring('Z').vout(elem_type=TensorProto.FLOAT)"
"bring('Z').vout(elem_type=onnx.TensorProto.FLOAT)"
)
sthen = (
"g().cst(np.array([1], dtype=np.int64)).rename('Z')."
"bring('Z').vout(elem_type=TensorProto.FLOAT)"
"bring('Z').vout(elem_type=onnx.TensorProto.FLOAT)"
)
expected = dedent(
f"""
(
start(opset=19)
.cst(np.array([0.0], dtype=np.float32))
.rename('r')
.vin('X', elem_type=TensorProto.FLOAT)
.vin('X', elem_type=onnx.TensorProto.FLOAT)
.bring('X')
.ReduceSum(keepdims=1, noop_with_empty_axes=0)
.rename('Xs')
Expand All @@ -185,7 +190,7 @@ def test_export_if(self):
.If(else_branch={selse}, then_branch={sthen})
.rename('W')
.bring('W')
.vout(elem_type=TensorProto.FLOAT)
.vout(elem_type=onnx.TensorProto.FLOAT)
.to_onnx()
)"""
).strip("\n")
Expand All @@ -210,15 +215,15 @@ def test_aionnxml(self):
start(opset=19, opsets={'ai.onnx.ml': 3})
.cst(np.array([-1, 1], dtype=np.int64))
.rename('r')
.vin('X', elem_type=TensorProto.FLOAT)
.vin('X', elem_type=onnx.TensorProto.FLOAT)
.bring('X', 'r')
.Reshape()
.rename('USE')
.bring('USE')
.ai.onnx.ml.Normalizer(norm='MAX')
.rename('Y')
.bring('Y')
.vout(elem_type=TensorProto.FLOAT)
.vout(elem_type=onnx.TensorProto.FLOAT)
.to_onnx()
)"""
).strip("\n")
Expand Down
46 changes: 23 additions & 23 deletions _unittests/ut_translate_api/test_translate_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from textwrap import dedent
import numpy as np
import onnx.helper as oh
from onnx import ModelProto, TensorProto
import onnx
from onnx.checker import check_model
from onnx.defs import onnx_opset_version
from onnx.reference import ReferenceEvaluator
Expand All @@ -22,7 +22,7 @@ def setUp(self):

def test_exp(self):
onx = start(opset=19, ir_version=10).vin("X").Exp().rename("Y").vout().to_onnx()
self.assertIsInstance(onx, ModelProto)
self.assertIsInstance(onx, onnx.ModelProto)
self.assertIn("Exp", str(onx))
ref = ReferenceEvaluator(onx)
a = np.arange(10).astype(np.float32)
Expand All @@ -42,9 +42,9 @@ def light_api(
return Y

g = GraphBuilder({'': 19}, ir_version=10)
g.make_tensor_input("X", TensorProto.FLOAT, ())
g.make_tensor_input("X", onnx.TensorProto.FLOAT, ())
light_api(g.op, "X")
g.make_tensor_output("Y", TensorProto.FLOAT, ()__SUFFIX__)
g.make_tensor_output("Y", onnx.TensorProto.FLOAT, ()__SUFFIX__)
model = g.to_onnx()
"""
)
Expand All @@ -62,10 +62,10 @@ def light_api(
return Y

g2 = GraphBuilder({"": 19})
g2.make_tensor_input("X", TensorProto.FLOAT, ("A",))
g2.make_tensor_input("X", onnx.TensorProto.FLOAT, ("A",))
light_api(g2.op, "X")
g2.make_tensor_output(
"Y", TensorProto.FLOAT, ("A",), is_dimension=False, indexed=False
"Y", onnx.TensorProto.FLOAT, ("A",), is_dimension=False, indexed=False
)
onx2 = g2.to_onnx()

Expand Down Expand Up @@ -99,9 +99,9 @@ def light_api(
return Y

g = GraphBuilder({'': 19}, ir_version=10)
g.make_tensor_input("X", TensorProto.FLOAT, ())
g.make_tensor_input("X", onnx.TensorProto.FLOAT, ())
light_api(g.op, "X")
g.make_tensor_output("Y", TensorProto.FLOAT, ()__SUFFIX__)
g.make_tensor_output("Y", onnx.TensorProto.FLOAT, ()__SUFFIX__)
model = g.to_onnx()
"""
)
Expand All @@ -122,16 +122,16 @@ def light_api(
return Y

g = GraphBuilder({"": 21})
X = g.make_tensor_input("X", TensorProto.FLOAT, ())
X = g.make_tensor_input("X", onnx.TensorProto.FLOAT, ())
light_api(g.op, X)
g.make_tensor_output("Y", TensorProto.FLOAT, ())
g.make_tensor_output("Y", onnx.TensorProto.FLOAT, ())
model = g.to_onnx()
self.assertNotEmpty(model)
check_model(model)

def test_exp_f(self):
onx = start(opset=19, ir_version=10).vin("X").Exp().rename("Y").vout().to_onnx()
self.assertIsInstance(onx, ModelProto)
self.assertIsInstance(onx, onnx.ModelProto)
self.assertIn("Exp", str(onx))
ref = ReferenceEvaluator(onx)
a = np.arange(10).astype(np.float32)
Expand All @@ -155,9 +155,9 @@ def light_api(

def mm() -> "ModelProto":
g = GraphBuilder({'': 19}, ir_version=10)
g.make_tensor_input("X", TensorProto.FLOAT, ())
g.make_tensor_input("X", onnx.TensorProto.FLOAT, ())
light_api(g.op, "X")
g.make_tensor_output("Y", TensorProto.FLOAT, ()__SUFFIX__)
g.make_tensor_output("Y", onnx.TensorProto.FLOAT, ()__SUFFIX__)
model = g.to_onnx()
return model

Expand All @@ -179,10 +179,10 @@ def light_api(
return Y

g2 = GraphBuilder({"": 19})
g2.make_tensor_input("X", TensorProto.FLOAT, ("A",))
g2.make_tensor_input("X", onnx.TensorProto.FLOAT, ("A",))
light_api(g2.op, "X")
g2.make_tensor_output(
"Y", TensorProto.FLOAT, ("A",), is_dimension=False, indexed=False
"Y", onnx.TensorProto.FLOAT, ("A",), is_dimension=False, indexed=False
)
onx2 = g2.to_onnx()

Expand Down Expand Up @@ -216,11 +216,11 @@ def test_local_function(self):
],
"example",
[
oh.make_tensor_value_info("X", TensorProto.FLOAT, [None, None]),
oh.make_tensor_value_info("A", TensorProto.FLOAT, [None, None]),
oh.make_tensor_value_info("B", TensorProto.FLOAT, [None, None]),
oh.make_tensor_value_info("X", onnx.TensorProto.FLOAT, [None, None]),
oh.make_tensor_value_info("A", onnx.TensorProto.FLOAT, [None, None]),
oh.make_tensor_value_info("B", onnx.TensorProto.FLOAT, [None, None]),
],
[oh.make_tensor_value_info("Y", TensorProto.FLOAT, None)],
[oh.make_tensor_value_info("Y", onnx.TensorProto.FLOAT, None)],
)

onnx_model = oh.make_model(
Expand Down Expand Up @@ -262,11 +262,11 @@ def make_custom_LinearRegression(g: "GraphBuilder"):

def mm() -> "ModelProto":
g = GraphBuilder({'': 14, 'custom': 1}, ir_version=10)
g.make_tensor_input("X", TensorProto.FLOAT, ('', ''))
g.make_tensor_input("A", TensorProto.FLOAT, ('', ''))
g.make_tensor_input("B", TensorProto.FLOAT, ('', ''))
g.make_tensor_input("X", onnx.TensorProto.FLOAT, ('', ''))
g.make_tensor_input("A", onnx.TensorProto.FLOAT, ('', ''))
g.make_tensor_input("B", onnx.TensorProto.FLOAT, ('', ''))
example(g.op, "X", "A", "B")
g.make_tensor_output("Y", TensorProto.FLOAT, ()__SUFFIX__)
g.make_tensor_output("Y", onnx.TensorProto.FLOAT, ()__SUFFIX__)
make_custom_LinearRegression(g)
model = g.to_onnx()
return model
Expand Down
Loading
Loading