Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
os: [ubuntu-20.04]
os: [ubuntu-24.04]
steps:
- name: Check out repository
uses: actions/checkout@v2
Expand Down Expand Up @@ -56,7 +56,7 @@ jobs:
needs: python-setup
strategy:
matrix:
os: [ubuntu-20.04]
os: [ubuntu-24.04]
pytest_target: [models, layers]
name: test-${{ matrix.pytest_target }}
steps:
Expand Down
9 changes: 7 additions & 2 deletions onnx2kerastl/operation_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,14 +231,19 @@ def convert_reduce_min(node, params, layers, lambda_func, node_name, keras_name)
axes = params.get("axes")
elif len(node.input) == 2:
axes = layers.get(node.input[1])
else:
axes = None
noop_with_empty_axes = bool(params.get("noop_with_empty_axes", False))
keepdims = params.get("keepdims", True)
if noop_with_empty_axes and params.get("axes") is None:
layers[node_name] = layers[node.input[0]]
else:
layers[node_name] = tf_math_reduce_min(layers[node.input[0]], axis=axes, keepdims=keepdims,
if axes is None:
layers[node_name] = tf_math_reduce_min(layers[node.input[0]], keepdims=keepdims,
tf_name=f"{params['cleaned_name']}_min")
else:
layers[node_name] = tf_math_reduce_min(layers[node.input[0]], axis=axes, keepdims=keepdims,
tf_name=f"{params['cleaned_name']}_min")


def convert_reduce_prod(node, params, layers, lambda_func, node_name, keras_name):
"""
Expand Down
114 changes: 36 additions & 78 deletions poetry.lock

Large diffs are not rendered by default.

3 changes: 1 addition & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "onnx2kerastl"
version = "0.0.165"
version = "0.0.168"
description = ""
authors = ["dorhar <doron.harnoy@tensorleap.ai>"]
license = "MIT"
Expand All @@ -17,7 +17,6 @@ fvcore = "^0.1.5.post20221221"
boto3 = "^1.24.22"
tensorflow-io-gcs-filesystem = "0.34.0"
keras-data-format-converter = "0.1.22"
optimum = "1.23.3"

[tool.poetry.dev-dependencies]
pytest = "^7.1.2"
Expand Down
34 changes: 34 additions & 0 deletions test/models/private_tests/test_dinov2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
import numpy as np
import onnx
from onnx2kerastl import onnx_to_keras
from keras_data_format_converter import convert_channels_first_to_last
import onnxruntime as ort
import torch
import pytest
from test.models.private_tests.aws_utils import aws_s3_download


@pytest.mark.parametrize('aws_s3_download', [["dinov2/", "dinov2/", False]], indirect=True)
def test_dinov2(aws_s3_download):
# This is commented out in case we'll upgrade python
# batch_size = 1
# dinov2_vits14 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14')
# wm = wrapper_model(dinov2_vits14).to('cpu')
# wm.eval()
# dummy_input = torch.FloatTensor(np.random.uniform(0, 1, (batch_size, 3, 224, 224)))
# torch.onnx.export(wm, dummy_input, "dino-2-test.onnx", input_names=['img'],
# output_names=['vit_out'])
np_input = list(np.random.rand(1, 3, 224, 224))
onnx_path = f'{aws_s3_download}/dino-2-test.onnx'
onnx_model = onnx.load(onnx_path)
keras_model = onnx_to_keras(onnx_model, ['img', 'masks'], allow_partial_compilation=False)
flipped_model = convert_channels_first_to_last(keras_model.converted_model, should_transform_inputs_and_outputs=False)
ort_session = ort.InferenceSession(onnx_path)
keras_res = flipped_model(np.array(np_input))
res = ort_session.run(
['vit_out'],
input_feed={"img": np.array(np_input).astype(np.float32)}
)
t_mean, t_max = (res[0]-keras_res).__abs__().numpy().mean(), (res[0]-keras_res).__abs__().numpy().max()
assert t_mean < 5e-2
assert t_max < 0.4
39 changes: 0 additions & 39 deletions test/models/test_dinov2.py

This file was deleted.

2 changes: 1 addition & 1 deletion test/models/test_llama_sentiment_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from onnx2kerastl import onnx_to_keras
from keras_data_format_converter import convert_channels_first_to_last
from onnx2kerastl.customonnxlayer import onnx_custom_objects_map
from test.utils import export_torch_to_onnx_optimum
#from test.utils import export_torch_to_onnx_optimum


@pytest.mark.skip(reason="Fails on CI but works locally (might be too big?)")
Expand Down
58 changes: 29 additions & 29 deletions test/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

from onnx2kerastl import onnx_to_keras
from onnx2kerastl.utils import check_torch_keras_error
from optimum.exporters.onnx import main_export
#from optimum.exporters.onnx import main_export

NP_SEED = 42

Expand Down Expand Up @@ -58,31 +58,31 @@ def is_lambda_layers_exist(model: Model):
return any(isinstance(layer, Lambda) for layer in model.layers)


def export_torch_to_onnx_optimum(model_name: str, model_output_path: str, task="causal-lm"):
"""
this function get a model as an input (Hugginface or local path), creates a folder and save the onnx model as output.
it uses the optimum library.
NOTE: For llama model the maximum absolute difference of the logits larget than 1e-5, it shouldnt be that important!
Args:
model_name: model path (local or HF name)
model_output_name: output folder path
task: model task

Returns:
creates the onnx model in the output folder path
"""
main_export(
model_name_or_path=model_name,
task=task,
output=model_output_path,
opset=None,
device="cpu",
dtype=None,
pad_token_id=None,
trust_remote_code=False,
do_validation=True,
framework=None,
no_post_process=False,
model_kwargs=None,
atol = 1e-5
)
# def export_torch_to_onnx_optimum(model_name: str, model_output_path: str, task="causal-lm"):
# """
# this function get a model as an input (Hugginface or local path), creates a folder and save the onnx model as output.
# it uses the optimum library.
# NOTE: For llama model the maximum absolute difference of the logits larget than 1e-5, it shouldnt be that important!
# Args:
# model_name: model path (local or HF name)
# model_output_name: output folder path
# task: model task
#
# Returns:
# creates the onnx model in the output folder path
# """
# main_export(
# model_name_or_path=model_name,
# task=task,
# output=model_output_path,
# opset=None,
# device="cpu",
# dtype=None,
# pad_token_id=None,
# trust_remote_code=False,
# do_validation=True,
# framework=None,
# no_post_process=False,
# model_kwargs=None,
# atol = 1e-5
# )