diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7ce42889..2d57149a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - os: [ubuntu-20.04] + os: [ubuntu-24.04] steps: - name: Check out repository uses: actions/checkout@v2 @@ -56,7 +56,7 @@ jobs: needs: python-setup strategy: matrix: - os: [ubuntu-20.04] + os: [ubuntu-24.04] pytest_target: [models, layers] name: test-${{ matrix.pytest_target }} steps: diff --git a/onnx2kerastl/operation_layers.py b/onnx2kerastl/operation_layers.py index 0aea8ddf..dd4455f8 100644 --- a/onnx2kerastl/operation_layers.py +++ b/onnx2kerastl/operation_layers.py @@ -231,14 +231,19 @@ def convert_reduce_min(node, params, layers, lambda_func, node_name, keras_name) axes = params.get("axes") elif len(node.input) == 2: axes = layers.get(node.input[1]) + else: + axes = None noop_with_empty_axes = bool(params.get("noop_with_empty_axes", False)) keepdims = params.get("keepdims", True) if noop_with_empty_axes and params.get("axes") is None: layers[node_name] = layers[node.input[0]] else: - layers[node_name] = tf_math_reduce_min(layers[node.input[0]], axis=axes, keepdims=keepdims, + if axes is None: + layers[node_name] = tf_math_reduce_min(layers[node.input[0]], keepdims=keepdims, + tf_name=f"{params['cleaned_name']}_min") + else: + layers[node_name] = tf_math_reduce_min(layers[node.input[0]], axis=axes, keepdims=keepdims, tf_name=f"{params['cleaned_name']}_min") - def convert_reduce_prod(node, params, layers, lambda_func, node_name, keras_name): """ diff --git a/poetry.lock b/poetry.lock index 7bd46317..3f542c50 100644 --- a/poetry.lock +++ b/poetry.lock @@ -19,7 +19,7 @@ version = "2.4.4" description = "Happy Eyeballs for asyncio" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "aiohappyeyeballs-2.4.4-py3-none-any.whl", hash = "sha256:a980909d50efcd44795c4afeca523296716d50cd756ddca6af8c65b996e27de8"}, {file = "aiohappyeyeballs-2.4.4.tar.gz", hash = "sha256:5fdd7d87889c63183afc18ce9271f9b0a7d32c2303e394468dd45d514a757745"}, @@ -31,7 +31,7 @@ version = "3.10.11" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "aiohttp-3.10.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5077b1a5f40ffa3ba1f40d537d3bec4383988ee51fbba6b74aa8fb1bc466599e"}, {file = "aiohttp-3.10.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8d6a14a4d93b5b3c2891fca94fa9d41b2322a68194422bef0dd5ec1e57d7d298"}, @@ -144,7 +144,7 @@ version = "1.3.1" description = "aiosignal: a list of registered asynchronous callbacks" optional = false python-versions = ">=3.7" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, @@ -176,7 +176,7 @@ version = "5.0.1" description = "Timeout context manager for asyncio programs" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c"}, {file = "async_timeout-5.0.1.tar.gz", hash = "sha256:d9321a7a3d5a6a5e187e824d2fa0793ce379a202935782d555d6e9d2735677d3"}, @@ -188,7 +188,7 @@ version = "25.1.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "attrs-25.1.0-py3-none-any.whl", hash = "sha256:c75a69e28a550a7e93789579c22aa26b0f5b83b75dc4e08fe092980051e1090a"}, {file = "attrs-25.1.0.tar.gz", hash = "sha256:1c97078a80c814273a76b2a298a932eb681c87415c11dee0a6921de7f1b02c3e"}, @@ -284,6 +284,7 @@ files = [ {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"}, {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"}, ] +markers = {main = "platform_machine == \"x86_64\" or platform_machine == \"arm64\""} [[package]] name = "cffi" @@ -466,6 +467,7 @@ files = [ {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"}, {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"}, ] +markers = {main = "platform_machine == \"x86_64\" or platform_machine == \"arm64\""} [[package]] name = "colorama" @@ -486,7 +488,7 @@ version = "15.0.1" description = "Colored terminal output for Python's logging module" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934"}, {file = "coloredlogs-15.0.1.tar.gz", hash = "sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0"}, @@ -504,7 +506,7 @@ version = "2.14.4" description = "HuggingFace community-driven open-source library of datasets" optional = false python-versions = ">=3.8.0" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "datasets-2.14.4-py3-none-any.whl", hash = "sha256:29336bd316a7d827ccd4da2236596279b20ca2ac78f64c04c9483da7cbc2459b"}, {file = "datasets-2.14.4.tar.gz", hash = "sha256:ef29c2b5841de488cd343cfc26ab979bff77efa4d2285af51f1ad7db5c46a83b"}, @@ -559,7 +561,7 @@ version = "0.3.7" description = "serialize all of Python" optional = false python-versions = ">=3.7" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "dill-0.3.7-py3-none-any.whl", hash = "sha256:76b122c08ef4ce2eedcd4d1abd8e641114bfc6c2867f49f3c41facf65bf19f5e"}, {file = "dill-0.3.7.tar.gz", hash = "sha256:cc1c8b182eb3013e24bd475ff2e9295af86c1a38eb1aff128dac8962a9ce3c03"}, @@ -589,7 +591,7 @@ version = "3.16.1" description = "A platform independent file lock." optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, @@ -619,7 +621,7 @@ version = "1.5.0" description = "A list-like structure which implements collections.abc.MutableSequence" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5b6a66c18b5b9dd261ca98dffcb826a525334b2f29e7caa54e182255c5f6a65a"}, {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d1b3eb7b05ea246510b43a7e53ed1653e55c2121019a97e60cad7efb881a97bb"}, @@ -721,7 +723,7 @@ version = "2025.2.0" description = "File-system specification" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "fsspec-2025.2.0-py3-none-any.whl", hash = "sha256:9de2ad9ce1f85e1931858535bc882543171d197001a0a5eb2ddc04f1781ab95b"}, {file = "fsspec-2025.2.0.tar.gz", hash = "sha256:1c24b16eaa0a1798afa0337aa0db9b256718ab2a89c425371f5628d22c3b6afd"}, @@ -968,7 +970,7 @@ version = "0.28.1" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "huggingface_hub-0.28.1-py3-none-any.whl", hash = "sha256:aa6b9a3ffdae939b72c464dbb0d7f99f56e649b55c3d52406f49e0a5a620c0a7"}, {file = "huggingface_hub-0.28.1.tar.gz", hash = "sha256:893471090c98e3b6efbdfdacafe4052b20b84d59866fb6f54c33d9af18c303ae"}, @@ -1003,7 +1005,7 @@ version = "10.0" description = "Human friendly output for text interfaces using Python" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "humanfriendly-10.0-py2.py3-none-any.whl", hash = "sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477"}, {file = "humanfriendly-10.0.tar.gz", hash = "sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc"}, @@ -1023,6 +1025,7 @@ files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] +markers = {main = "platform_machine == \"x86_64\" or platform_machine == \"arm64\""} [package.extras] all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] @@ -1410,7 +1413,7 @@ version = "1.3.0" description = "Python library for arbitrary-precision floating-point arithmetic" optional = false python-versions = "*" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, @@ -1502,7 +1505,7 @@ version = "6.1.0" description = "multidict implementation" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"}, {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"}, @@ -1607,7 +1610,7 @@ version = "0.70.15" description = "better multiprocessing and multithreading in Python" optional = false python-versions = ">=3.7" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "multiprocess-0.70.15-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:aa36c7ed16f508091438687fe9baa393a7a8e206731d321e443745e743a0d4e5"}, {file = "multiprocess-0.70.15-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:20e024018c46d0d1602024c613007ac948f9754659e3853b0aa705e83f6931d8"}, @@ -1866,53 +1869,6 @@ files = [ {file = "opt_einsum-3.4.0.tar.gz", hash = "sha256:96ca72f1b886d148241348783498194c577fa30a8faac108586b14f1ba4473ac"}, ] -[[package]] -name = "optimum" -version = "1.23.3" -description = "Optimum Library is an extension of the Hugging Face Transformers library, providing a framework to integrate third-party libraries from Hardware Partners and interface with their specific functionality." -optional = false -python-versions = ">=3.7.0" -groups = ["main"] -files = [ - {file = "optimum-1.23.3-py3-none-any.whl", hash = "sha256:ac34b497310e74e919e8eb3bc01cfea48bca304ade3e3ce8a7707d125120001a"}, - {file = "optimum-1.23.3.tar.gz", hash = "sha256:2089bd73d1232686473a80effd53800f8a8c385c02126e80d35c07227c1b9bf5"}, -] - -[package.dependencies] -coloredlogs = "*" -datasets = "*" -huggingface-hub = ">=0.8.0" -numpy = "*" -packaging = "*" -sympy = "*" -torch = ">=1.11" -transformers = ">=4.29" - -[package.extras] -amd = ["optimum-amd"] -benchmark = ["evaluate (>=0.2.0)", "optuna", "scikit-learn", "seqeval", "torchvision", "tqdm"] -dev = ["Pillow", "accelerate", "black (>=23.1,<24.0)", "diffusers (>=0.17.0)", "einops", "parameterized", "pytest (<=8.0.0)", "pytest-xdist", "requests", "rjieba", "ruff (==0.1.5)", "sacremoses", "scikit-learn", "sentencepiece", "timm", "torchaudio", "torchvision"] -diffusers = ["diffusers"] -doc-build = ["accelerate"] -exporters = ["onnx", "onnxruntime", "timm", "transformers (<4.47.0)"] -exporters-gpu = ["onnx", "onnxruntime-gpu", "timm", "transformers (<4.47.0)"] -exporters-tf = ["datasets (<=2.16)", "h5py", "numpy (<1.24.0)", "onnx", "onnxruntime", "tensorflow (>=2.4,<=2.12.1)", "tf2onnx", "timm", "transformers (>=4.26,<4.38)"] -furiosa = ["optimum-furiosa"] -graphcore = ["optimum-graphcore"] -habana = ["optimum-habana", "transformers (>=4.45.0,<4.46.0)"] -intel = ["optimum-intel (>=1.18.0)"] -ipex = ["optimum-intel[ipex] (>=1.18.0)"] -neural-compressor = ["optimum-intel[neural-compressor] (>=1.18.0)"] -neuron = ["optimum-neuron[neuron] (>=0.0.20)", "transformers (>=4.36.2,<4.42.0)"] -neuronx = ["optimum-neuron[neuronx] (>=0.0.20)", "transformers (>=4.36.2,<4.42.0)"] -nncf = ["optimum-intel[nncf] (>=1.18.0)"] -onnxruntime = ["datasets (>=1.2.1)", "evaluate", "onnx", "onnxruntime (>=1.11.0)", "protobuf (>=3.20.1)", "transformers (<4.47.0)"] -onnxruntime-gpu = ["accelerate", "datasets (>=1.2.1)", "evaluate", "onnx", "onnxruntime-gpu (>=1.11.0)", "protobuf (>=3.20.1)", "transformers (<4.47.0)"] -openvino = ["optimum-intel[openvino] (>=1.18.0)"] -quality = ["black (>=23.1,<24.0)", "ruff (==0.1.5)"] -quanto = ["optimum-quanto (>=0.2.4)"] -tests = ["Pillow", "accelerate", "diffusers (>=0.17.0)", "einops", "parameterized", "pytest (<=8.0.0)", "pytest-xdist", "requests", "rjieba", "sacremoses", "scikit-learn", "sentencepiece", "timm", "torchaudio", "torchvision"] - [[package]] name = "packaging" version = "24.2" @@ -1924,6 +1880,7 @@ files = [ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, ] +markers = {main = "platform_machine == \"x86_64\" or platform_machine == \"arm64\""} [[package]] name = "pandas" @@ -1931,7 +1888,7 @@ version = "2.0.3" description = "Powerful data structures for data analysis, time series, and statistics" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "pandas-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4c7c9f27a4185304c7caf96dc7d91bc60bc162221152de697c98eb0b2648dd8"}, {file = "pandas-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f167beed68918d62bffb6ec64f2e1d8a7d297a038f86d4aed056b9493fca407f"}, @@ -2171,7 +2128,7 @@ version = "0.2.0" description = "Accelerated property cache" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "propcache-0.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c5869b8fd70b81835a6f187c5fdbe67917a04d7e52b6e7cc4e5fe39d55c39d58"}, {file = "propcache-0.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:952e0d9d07609d9c5be361f33b0d6d650cd2bae393aabb11d9b719364521984b"}, @@ -2369,7 +2326,7 @@ version = "17.0.0" description = "Python library for Apache Arrow" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "pyarrow-17.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a5c8b238d47e48812ee577ee20c9a2779e6a5904f1708ae240f53ecbee7c9f07"}, {file = "pyarrow-17.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db023dc4c6cae1015de9e198d41250688383c3f9af8f565370ab2b4cb5f62655"}, @@ -2462,7 +2419,7 @@ version = "3.5.4" description = "A python implementation of GNU readline." optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] markers = "sys_platform == \"win32\"" files = [ {file = "pyreadline3-3.5.4-py3-none-any.whl", hash = "sha256:eaf8e6cc3c49bcccf145fc6067ba8643d1df34d604a1ec0eccbf7a18e6d3fae6"}, @@ -2516,7 +2473,7 @@ version = "2025.1" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "pytz-2025.1-py2.py3-none-any.whl", hash = "sha256:89dd22dca55b46eac6eda23b2d72721bf1bdfef212645d81513ef5d03038de57"}, {file = "pytz-2025.1.tar.gz", hash = "sha256:c2db42be2a2518b28e65f9207c4d05e6ff547d1efa4086469ef855e4ab70178e"}, @@ -2620,7 +2577,7 @@ version = "2024.11.6" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, @@ -2729,6 +2686,7 @@ files = [ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, ] +markers = {main = "platform_machine == \"x86_64\" or platform_machine == \"arm64\""} [package.dependencies] certifi = ">=2017.4.17" @@ -2800,7 +2758,7 @@ version = "0.5.2" description = "" optional = false python-versions = ">=3.7" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "safetensors-0.5.2-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:45b6092997ceb8aa3801693781a71a99909ab9cc776fbc3fa9322d29b1d3bef2"}, {file = "safetensors-0.5.2-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:6d0d6a8ee2215a440e1296b843edf44fd377b055ba350eaba74655a2fe2c4bae"}, @@ -3084,7 +3042,7 @@ version = "1.13.3" description = "Computer algebra system (CAS) in Python" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "sympy-1.13.3-py3-none-any.whl", hash = "sha256:54612cf55a62755ee71824ce692986f23c88ffa77207b30c1368eda4a7060f73"}, {file = "sympy-1.13.3.tar.gz", hash = "sha256:b27fd2c6530e0ab39e275fc9b683895367e51d5da91baa8d3d64db2565fec4d9"}, @@ -3380,7 +3338,7 @@ version = "0.20.3" description = "" optional = false python-versions = ">=3.7" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "tokenizers-0.20.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:31ccab28dbb1a9fe539787210b0026e22debeab1662970f61c2d921f7557f7e4"}, {file = "tokenizers-0.20.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c6361191f762bda98c773da418cf511cbaa0cb8d0a1196f16f8c0119bde68ff8"}, @@ -3552,7 +3510,7 @@ version = "1.12.1" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" optional = false python-versions = ">=3.7.0" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "torch-1.12.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:9c038662db894a23e49e385df13d47b2a777ffd56d9bcd5b832593fab0a7e286"}, {file = "torch-1.12.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:4e1b9c14cf13fd2ab8d769529050629a0e68a6fc5cb8e84b4a3cc1dd8c4fe541"}, @@ -3646,7 +3604,7 @@ version = "4.46.3" description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" optional = false python-versions = ">=3.8.0" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "transformers-4.46.3-py3-none-any.whl", hash = "sha256:a12ef6f52841fd190a3e5602145b542d03507222f2c64ebb7ee92e8788093aef"}, {file = "transformers-4.46.3.tar.gz", hash = "sha256:8ee4b3ae943fe33e82afff8e837f4b052058b07ca9be3cb5b729ed31295f72cc"}, @@ -3753,7 +3711,7 @@ version = "2025.1" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "tzdata-2025.1-py2.py3-none-any.whl", hash = "sha256:7e127113816800496f027041c570f50bcd464a020098a3b6b199517772303639"}, {file = "tzdata-2025.1.tar.gz", hash = "sha256:24894909e88cdb28bd1636c6887801df64cb485bd593f2fd83ef29075a81d694"}, @@ -3914,7 +3872,7 @@ version = "3.5.0" description = "Python binding for xxHash" optional = false python-versions = ">=3.7" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "xxhash-3.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ece616532c499ee9afbb83078b1b952beffef121d989841f7f4b3dc5ac0fd212"}, {file = "xxhash-3.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3171f693dbc2cef6477054a665dc255d996646b4023fe56cb4db80e26f4cc520"}, @@ -4063,7 +4021,7 @@ version = "1.15.2" description = "Yet another URL library" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "yarl-1.15.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e4ee8b8639070ff246ad3649294336b06db37a94bdea0d09ea491603e0be73b8"}, {file = "yarl-1.15.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a7cf963a357c5f00cb55b1955df8bbe68d2f2f65de065160a1c26b85a1e44172"}, @@ -4194,4 +4152,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = ">=3.8, <3.11" -content-hash = "f40eb267f2073255d1a5753db4ec40bf91f459bf1b83140f4a63b89d17c13eb8" +content-hash = "732146f5dfc8becf5f8ab348e666483456792ff1d127555f703768260a01d63b" diff --git a/pyproject.toml b/pyproject.toml index ca84608e..4f215a77 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "onnx2kerastl" -version = "0.0.165" +version = "0.0.168" description = "" authors = ["dorhar "] license = "MIT" @@ -17,7 +17,6 @@ fvcore = "^0.1.5.post20221221" boto3 = "^1.24.22" tensorflow-io-gcs-filesystem = "0.34.0" keras-data-format-converter = "0.1.22" -optimum = "1.23.3" [tool.poetry.dev-dependencies] pytest = "^7.1.2" diff --git a/test/models/private_tests/test_dinov2.py b/test/models/private_tests/test_dinov2.py new file mode 100644 index 00000000..1498d4b5 --- /dev/null +++ b/test/models/private_tests/test_dinov2.py @@ -0,0 +1,34 @@ +import numpy as np +import onnx +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +import onnxruntime as ort +import torch +import pytest +from test.models.private_tests.aws_utils import aws_s3_download + + +@pytest.mark.parametrize('aws_s3_download', [["dinov2/", "dinov2/", False]], indirect=True) +def test_dinov2(aws_s3_download): + # This is commented out in case we'll upgrade python + # batch_size = 1 + # dinov2_vits14 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14') + # wm = wrapper_model(dinov2_vits14).to('cpu') + # wm.eval() + # dummy_input = torch.FloatTensor(np.random.uniform(0, 1, (batch_size, 3, 224, 224))) + # torch.onnx.export(wm, dummy_input, "dino-2-test.onnx", input_names=['img'], + # output_names=['vit_out']) + np_input = list(np.random.rand(1, 3, 224, 224)) + onnx_path = f'{aws_s3_download}/dino-2-test.onnx' + onnx_model = onnx.load(onnx_path) + keras_model = onnx_to_keras(onnx_model, ['img', 'masks'], allow_partial_compilation=False) + flipped_model = convert_channels_first_to_last(keras_model.converted_model, should_transform_inputs_and_outputs=False) + ort_session = ort.InferenceSession(onnx_path) + keras_res = flipped_model(np.array(np_input)) + res = ort_session.run( + ['vit_out'], + input_feed={"img": np.array(np_input).astype(np.float32)} + ) + t_mean, t_max = (res[0]-keras_res).__abs__().numpy().mean(), (res[0]-keras_res).__abs__().numpy().max() + assert t_mean < 5e-2 + assert t_max < 0.4 diff --git a/test/models/test_dinov2.py b/test/models/test_dinov2.py deleted file mode 100644 index 32f0e4ef..00000000 --- a/test/models/test_dinov2.py +++ /dev/null @@ -1,39 +0,0 @@ -import numpy as np -import onnx -from onnx2kerastl import onnx_to_keras -from keras_data_format_converter import convert_channels_first_to_last -import onnxruntime as ort -import torch - - -class wrapper_model(torch.nn.Module): - def __init__(self, model): - super().__init__() - self.inner_model = model - - def forward(self, tensor): - ff = self.inner_model(tensor) - return ff - - -def test_dinov2(): - batch_size = 1 - dinov2_vits14 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14') - wm = wrapper_model(dinov2_vits14).to('cpu') - wm.eval() - dummy_input = torch.FloatTensor(np.random.uniform(0, 1, (batch_size, 3, 224, 224))) - torch.onnx.export(wm, dummy_input, "dino-2-test.onnx", input_names=['img'], - output_names=['vit_out']) - np_input = list(np.random.rand(1, 3, 224, 224)) - onnx_model = onnx.load('dino-2-test.onnx') - keras_model = onnx_to_keras(onnx_model, ['img', 'masks'], allow_partial_compilation=False) - flipped_model = convert_channels_first_to_last(keras_model.converted_model, should_transform_inputs_and_outputs=False) - ort_session = ort.InferenceSession('dino-2-test.onnx') - keras_res = flipped_model(np.array(np_input)) - res = ort_session.run( - ['vit_out'], - input_feed={"img": np.array(np_input).astype(np.float32)} - ) - t_mean, t_max = (res[0]-keras_res).__abs__().numpy().mean(), (res[0]-keras_res).__abs__().numpy().max() - assert t_mean < 5e-2 - assert t_max < 0.4 diff --git a/test/models/test_llama_sentiment_analysis.py b/test/models/test_llama_sentiment_analysis.py index ffec36f1..addfd247 100644 --- a/test/models/test_llama_sentiment_analysis.py +++ b/test/models/test_llama_sentiment_analysis.py @@ -8,7 +8,7 @@ from onnx2kerastl import onnx_to_keras from keras_data_format_converter import convert_channels_first_to_last from onnx2kerastl.customonnxlayer import onnx_custom_objects_map -from test.utils import export_torch_to_onnx_optimum +#from test.utils import export_torch_to_onnx_optimum @pytest.mark.skip(reason="Fails on CI but works locally (might be too big?)") diff --git a/test/utils.py b/test/utils.py index f04fff55..b2a4d387 100644 --- a/test/utils.py +++ b/test/utils.py @@ -7,7 +7,7 @@ from onnx2kerastl import onnx_to_keras from onnx2kerastl.utils import check_torch_keras_error -from optimum.exporters.onnx import main_export +#from optimum.exporters.onnx import main_export NP_SEED = 42 @@ -58,31 +58,31 @@ def is_lambda_layers_exist(model: Model): return any(isinstance(layer, Lambda) for layer in model.layers) -def export_torch_to_onnx_optimum(model_name: str, model_output_path: str, task="causal-lm"): - """ - this function get a model as an input (Hugginface or local path), creates a folder and save the onnx model as output. - it uses the optimum library. - NOTE: For llama model the maximum absolute difference of the logits larget than 1e-5, it shouldnt be that important! - Args: - model_name: model path (local or HF name) - model_output_name: output folder path - task: model task - - Returns: - creates the onnx model in the output folder path - """ - main_export( - model_name_or_path=model_name, - task=task, - output=model_output_path, - opset=None, - device="cpu", - dtype=None, - pad_token_id=None, - trust_remote_code=False, - do_validation=True, - framework=None, - no_post_process=False, - model_kwargs=None, - atol = 1e-5 - ) \ No newline at end of file +# def export_torch_to_onnx_optimum(model_name: str, model_output_path: str, task="causal-lm"): +# """ +# this function get a model as an input (Hugginface or local path), creates a folder and save the onnx model as output. +# it uses the optimum library. +# NOTE: For llama model the maximum absolute difference of the logits larget than 1e-5, it shouldnt be that important! +# Args: +# model_name: model path (local or HF name) +# model_output_name: output folder path +# task: model task +# +# Returns: +# creates the onnx model in the output folder path +# """ +# main_export( +# model_name_or_path=model_name, +# task=task, +# output=model_output_path, +# opset=None, +# device="cpu", +# dtype=None, +# pad_token_id=None, +# trust_remote_code=False, +# do_validation=True, +# framework=None, +# no_post_process=False, +# model_kwargs=None, +# atol = 1e-5 +# ) \ No newline at end of file