From 416d2f451bb2c0753ab73d0e33581c00741b2a53 Mon Sep 17 00:00:00 2001 From: liqun Fu Date: Wed, 3 Apr 2024 17:04:15 +0000 Subject: [PATCH 1/9] fix a skipped shape infer code (#6049) ### Description ### Motivation and Context Signed-off-by: Liqun Fu (cherry picked from commit fa0b8999bd4d0c7e4c478b5c8cb47b7ef5ab951a) --- onnx/shape_inference/implementation.cc | 36 +++++++++++++------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/onnx/shape_inference/implementation.cc b/onnx/shape_inference/implementation.cc index fab1faf27e8..8723dcd4fb3 100644 --- a/onnx/shape_inference/implementation.cc +++ b/onnx/shape_inference/implementation.cc @@ -488,29 +488,29 @@ class ShapeInferenceImplBase { ProcessCall(n, *(iter->second), ctx); } else { has_unsupported_op = true; + return; } } else { has_unsupported_op = true; + return; } - if (!has_unsupported_op) { - for (int i = 0; i < n.output_size(); ++i) { - // skip type and shape propagation for missing optional outputs. - if (!n.output(i).empty()) - UpdateType(n.output(i), ctx.getOutputType(i)); - } - // Constant values are tracked to improve inference/checking for subsequent nodes. - ProcessConstant(n); - // If data-propagation is enabled, partial-evaluation (aka data-propagation) is performed - // to improve inference/checking for subsequent nodes. - if (options.enable_data_propagation && schema && schema->has_data_propagation_function()) { - if (generated_shape_data_by_name == nullptr) { - fail_shape_inference( - "Container for generated shape data cannot be nullptr when enable_data_propagation option is set."); - } - DataPropagationContextImpl data_propagation_ctx( - n, value_types_by_name, input_data_by_name, *generated_shape_data_by_name); - schema->GetDataPropagationFunction()(data_propagation_ctx); + for (int i = 0; i < n.output_size(); ++i) { + // skip type and shape propagation for missing optional outputs. + if (!n.output(i).empty()) + UpdateType(n.output(i), ctx.getOutputType(i)); + } + // Constant values are tracked to improve inference/checking for subsequent nodes. + ProcessConstant(n); + // If data-propagation is enabled, partial-evaluation (aka data-propagation) is performed + // to improve inference/checking for subsequent nodes. + if (options.enable_data_propagation && schema && schema->has_data_propagation_function()) { + if (generated_shape_data_by_name == nullptr) { + fail_shape_inference( + "Container for generated shape data cannot be nullptr when enable_data_propagation option is set."); } + DataPropagationContextImpl data_propagation_ctx( + n, value_types_by_name, input_data_by_name, *generated_shape_data_by_name); + schema->GetDataPropagationFunction()(data_propagation_ctx); } } ONNX_CATCH(const ONNX_NAMESPACE::InferenceError& ex) { From 77b5255a1ee274ef18dae1715e5e31dc401853b7 Mon Sep 17 00:00:00 2001 From: Charles Volzka <42243335+cjvolzka@users.noreply.github.com> Date: Fri, 12 Apr 2024 11:43:15 -0500 Subject: [PATCH 2/9] Prevent crash on import after GCC 8 builds (#6048) ### Description Possible fix for https://github.com/onnx/onnx/issues/6047 based on https://stackoverflow.com/questions/33149878/experimentalfilesystem-linker-error/33159746#33159746 ### Motivation and Context I'm not a cmake expert so there may be a better way to do this. This did allow me to build ONNX 1.16 on RHEL 8. --------- Signed-off-by: Charles Volzka Signed-off-by: Charles Volzka <42243335+cjvolzka@users.noreply.github.com> Co-authored-by: Thiago Crepaldi Co-authored-by: G. Ramalingam (cherry picked from commit 3f24ef33ab6bd52c2a3c62846b31cf07b2e2cbb4) --- CMakeLists.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6d7ca846cf1..10a45e0c51f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -590,6 +590,11 @@ if(BUILD_ONNX_PYTHON) target_link_libraries(onnx_cpp2py_export PRIVATE "-Wl,--whole-archive" $ "-Wl,--no-whole-archive") + # Prevent "undefined symbol: _ZNSt10filesystem7__cxx114path14_M_split_cmptsEv" + # (std::filesystem::__cxx11::path::_M_split_cmpts()) on gcc 8 + if (CMAKE_CXX_STANDARD EQUAL 17 AND CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 9.0) + target_link_libraries(onnx_cpp2py_export PRIVATE "-lstdc++fs") + endif() set_target_properties(onnx_cpp2py_export PROPERTIES LINK_FLAGS "-Wl,--exclude-libs,ALL") endif() From fc1fa51c1673c2ba773a35c10d623ac0f9275ca2 Mon Sep 17 00:00:00 2001 From: "G. Ramalingam" Date: Tue, 23 Apr 2024 09:49:48 -0700 Subject: [PATCH 3/9] Add missing shape inference check for DequantizeLinear (#6080) Add missing `hasInputShape` condition in shape-inference for DequantizeLinear ... strangely missing in last two versions, but present in earlier version. Signed-off-by: G. Ramalingam (cherry picked from commit 5ecc0a9801b724bd71859ceacd1562cfed4c415c) --- onnx/defs/quantization/defs.cc | 3 +++ onnx/defs/quantization/old.cc | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/onnx/defs/quantization/defs.cc b/onnx/defs/quantization/defs.cc index 70b4a4dbab9..98c1154532d 100644 --- a/onnx/defs/quantization/defs.cc +++ b/onnx/defs/quantization/defs.cc @@ -200,6 +200,9 @@ ONNX_OPERATOR_SET_SCHEMA( .SetDoc(DequantizeLinear_ver21_doc) .TypeAndShapeInferenceFunction([](ONNX_NAMESPACE::InferenceContext& ctx) { propagateElemTypeFromInputToOutput(ctx, 1, 0); + if (!hasInputShape(ctx, 0)) { + return; + } auto& input_shape = getInputShape(ctx, 0); updateOutputShape(ctx, 0, input_shape); })); diff --git a/onnx/defs/quantization/old.cc b/onnx/defs/quantization/old.cc index 3f2d63843bb..d2f7cfd8d4a 100644 --- a/onnx/defs/quantization/old.cc +++ b/onnx/defs/quantization/old.cc @@ -130,6 +130,9 @@ ONNX_OPERATOR_SET_SCHEMA( .SetDoc(DequantizeLinear_ver19_doc) .TypeAndShapeInferenceFunction([](ONNX_NAMESPACE::InferenceContext& ctx) { propagateElemTypeFromInputToOutput(ctx, 1, 0); + if (!hasInputShape(ctx, 0)) { + return; + } auto& input_shape = getInputShape(ctx, 0); updateOutputShape(ctx, 0, input_shape); })); @@ -181,7 +184,6 @@ ONNX_OPERATOR_SET_SCHEMA( if (!hasInputShape(ctx, 0)) { return; } - auto& input_shape = getInputShape(ctx, 0); updateOutputShape(ctx, 0, input_shape); })); From 1f5283be4155d98305b72932655fdfda6fed9e05 Mon Sep 17 00:00:00 2001 From: Charles Volzka <42243335+cjvolzka@users.noreply.github.com> Date: Tue, 30 Apr 2024 08:35:05 -0500 Subject: [PATCH 4/9] Prepare for rel-1.16.1 branch (#6106) ### Description Prepare for rel-1.16.1 branch ### Motivation and Context Prepare ONNX 1.16.1 release Signed-off-by: Charles Volzka (cherry picked from commit d6f87121ba256ac6cc4d1da0463c300c278339d2) --- onnx/common/version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/onnx/common/version.h b/onnx/common/version.h index 088ed7c315b..4943afceb23 100644 --- a/onnx/common/version.h +++ b/onnx/common/version.h @@ -9,6 +9,6 @@ namespace ONNX_NAMESPACE { // Represents the most recent release version. Updated with every release. -constexpr const char* LAST_RELEASE_VERSION = "1.16.0"; +constexpr const char* LAST_RELEASE_VERSION = "1.16.1"; } // namespace ONNX_NAMESPACE From 1fc5eb8d3b5e28add5c486b43cc14ed7f8eaab22 Mon Sep 17 00:00:00 2001 From: Matthieu Darbois Date: Wed, 1 May 2024 22:33:13 +0200 Subject: [PATCH 5/9] chore(ci): build and test macOS universal2 wheels on macOS arm64 (#6117) ### Description Build and test macOS universal2 wheels on macOS arm64 Removes x86_64 wheels. ### Motivation and Context `macos-latest` runners moved to macOS 14 running on arm64 hardware. This is breaking the macOS release pipeline. This PR rewrites the macOS release pipeline in 2 parts: - Build universal2 wheels on `macos-latest` - Test both arm64 & x86_64 parts of the universal2 wheel on `macos-latest` The x86_64 wheels have been removed as the universal2 ones can be used on x86_64. close #6105 --------- Signed-off-by: Matthieu Darbois Signed-off-by: Justin Chu Co-authored-by: Justin Chu (cherry picked from commit 152988042cd8d1151883fa82fecf126b27f61e4d) --- .github/workflows/release_mac.yml | 85 +++++++++++++++++++------------ docs/CIPipelines.md | 2 +- setup.py | 4 +- 3 files changed, 55 insertions(+), 36 deletions(-) diff --git a/.github/workflows/release_mac.yml b/.github/workflows/release_mac.yml index a4878f9e70b..64701ee8afb 100644 --- a/.github/workflows/release_mac.yml +++ b/.github/workflows/release_mac.yml @@ -10,9 +10,9 @@ on: branches: [main, rel-*] workflow_dispatch: -# Use MACOSX_DEPLOYMENT_TARGET=10.15 to produce compatible wheel +# Use MACOSX_DEPLOYMENT_TARGET=11.0 to produce compatible wheel env: - MACOSX_DEPLOYMENT_TARGET: 10.15 + MACOSX_DEPLOYMENT_TARGET: 11.0 permissions: contents: read @@ -24,8 +24,6 @@ jobs: strategy: matrix: python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] - host-architecture: ['x64'] - target-architecture: ['x86_64', 'universal2'] steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -39,7 +37,6 @@ jobs: uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 with: python-version: ${{ matrix.python-version }} - architecture: ${{ matrix.host-architecture }} - name: Install Python dependencies run: | @@ -51,12 +48,7 @@ jobs: CC: "clang" CXX: "clang++" ONNX_ML: 1 - CMAKE_OSX_ARCHITECTURES: ${{ matrix.target-architecture == 'x86_64' && 'x86_64' || 'arm64;x86_64' }} - # Currently GitHub Action agent is using macos-11, we rename the wheels - # to use the MACOSX_DEPLOYMENT_TARGET - # Rename e.g. onnx-1.15.0-cp38-cp38-macosx_11_0_x86_64.whl - # to onnx-1.15.0-cp38-cp38-macosx_10_15_universal2.whl - ONNX_WHEEL_PLATFORM_NAME: macosx_10_15_${{ matrix.target-architecture }} + CMAKE_OSX_ARCHITECTURES: "arm64;x86_64" CMAKE_ARGS: "-DONNX_USE_LITE_PROTO=ON" run: | # Install Protobuf from source @@ -68,47 +60,73 @@ jobs: fi python -m build --wheel - for file in dist/*.whl; do - python -m pip install --upgrade $file; - done - - name: Test the installed wheel + - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + with: + name: macos-wheel-${{ matrix.python-version }} + path: dist + + test: + needs: build + runs-on: macos-latest + strategy: + matrix: + python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] + target-architecture: ['x86_64', 'arm64'] + + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Python dependencies run: | - pytest + arch -${{ matrix.target-architecture }} python -m pip install -q --upgrade pip + arch -${{ matrix.target-architecture }} python -m pip install -q -r requirements-release.txt - - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + - uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7 with: - name: wheels + name: macos-wheel-${{ matrix.python-version }} path: dist + - name: Test the wheel + run: | + arch -${{ matrix.target-architecture }} python -m pip install --upgrade dist/*.whl + arch -${{ matrix.target-architecture }} pytest + - name: Upload wheel to PyPI weekly - if: github.event_name == 'schedule' # Only triggered by weekly event + if: github.event_name == 'schedule' && matrix.target-architecture == 'arm64' # Only triggered by weekly event run: | twine upload --verbose dist/*.whl --repository-url https://upload.pypi.org/legacy/ -u ${{ secrets.ONNXWEEKLY_USERNAME }} -p ${{ secrets.ONNXWEEKLY_TOKEN }} - name: Verify ONNX with the latest numpy if: ${{ always() }} run: | - python -m pip uninstall -y numpy onnx && python -m pip install numpy - for file in dist/*.whl; do python -m pip install --upgrade $file; done - pytest + arch -${{ matrix.target-architecture }} python -m pip uninstall -y numpy onnx + arch -${{ matrix.target-architecture }} python -m pip install numpy + arch -${{ matrix.target-architecture }} python -m pip install --upgrade dist/*.whl + arch -${{ matrix.target-architecture }} pytest - name: Verify ONNX with the latest protobuf if: ${{ always() }} run: | - python -m pip uninstall -y protobuf onnx && python -m pip install protobuf - for file in dist/*.whl; do python -m pip install --upgrade $file; done - pytest + arch -${{ matrix.target-architecture }} python -m pip uninstall -y protobuf onnx + arch -${{ matrix.target-architecture }} python -m pip install protobuf + arch -${{ matrix.target-architecture }} python -m pip install --upgrade dist/*.whl + arch -${{ matrix.target-architecture }} pytest - name: Verify ONNX with the minimumly supported packages - if: ${{ always() }} + if: always() && (matrix.target-architecture == 'x86_64' || (matrix.python-version != '3.8' && matrix.python-version != '3.9')) run: | - python -m pip uninstall -y numpy protobuf onnx && python -m pip install -r requirements-min.txt - for file in dist/*.whl; do python -m pip install --upgrade $file; done - pytest + arch -${{ matrix.target-architecture }} python -m pip uninstall -y numpy protobuf onnx + arch -${{ matrix.target-architecture }} python -m pip install -r requirements-min.txt + arch -${{ matrix.target-architecture }} python -m pip install --upgrade dist/*.whl + arch -${{ matrix.target-architecture }} pytest # Only triggered by weekly event on certain CI - name: Build and upload source distribution to PyPI weekly - if: github.event_name == 'schedule' && matrix.python-version == '3.10' && matrix.target-architecture == 'x86_64' + if: github.event_name == 'schedule' && matrix.python-version == '3.10' && matrix.target-architecture == 'arm64' run: | # Build and upload source distribution to PyPI git clean -xdf @@ -125,9 +143,10 @@ jobs: - name: Verify ONNX with ONNX Runtime PyPI package if: matrix.python-version != '3.12' run: | - python -m pip uninstall -y protobuf numpy && python -m pip install -q -r requirements-release.txt - python -m pip install -q onnxruntime + arch -${{ matrix.target-architecture }} python -m pip uninstall -y protobuf numpy + arch -${{ matrix.target-architecture }} python -m pip install -q -r requirements-release.txt + arch -${{ matrix.target-architecture }} python -m pip install -q onnxruntime export ORT_MAX_IR_SUPPORTED_VERSION=9 export ORT_MAX_ML_OPSET_SUPPORTED_VERSION=3 export ORT_MAX_ONNX_OPSET_SUPPORTED_VERSION=20 - pytest + arch -${{ matrix.target-architecture }} pytest diff --git a/docs/CIPipelines.md b/docs/CIPipelines.md index 8f7a44b0cc6..89d3abfc79e 100644 --- a/docs/CIPipelines.md +++ b/docs/CIPipelines.md @@ -19,7 +19,7 @@ SPDX-License-Identifier: Apache-2.0 [WindowsRelease](/.github/workflows/release_win.yml) |
  • Main branch
  • Release branch
  • Weekly(1)
|
  • Latest Windows
  • x86 and x64
  • ONNX_USE_LITE_PROTO=ON
  • ONNX_USE_PROTOBUF_SHARED_LIBS=OFF
  • ONNX_ML=1
  • ONNX_USE_MSVC_STATIC_RUNTIME=OFF
|
  • Release Windows wheel
  • Release onnx-weekly package
  • Verify with different dependency versions - latest and min supported numpy version, latest and min supported protobuf version(2)
  • Verify ONNX with the latest [ONNX Runtime PyPI package](https://pypi.org/project/onnxruntime/)(3).
| [LinuxRelease_aarch64](/.github/workflows/release_linux_aarch64.yml) |
  • Main branch
  • Release branch
  • Weekly
  |
  • Latest manylinux2014_aarch64
  • ONNX_USE_PROTOBUF_SHARED_LIBS=OFF
  • ONNX_ML=1
  • ONNX_USE_LITE_PROTO=ON
|
  • Release Linux aarch64 wheel
  • Release onnx-weekly package
  • Verify with different dependency versions - latest numpy version, latest and min supported protobuf version
  • Verify ONNX with the latest ONNX Runtime PyPI package
| [LinuxRelease_x86_64](/.github/workflows/release_linux_x86_64.yml) |
  • Main branch
  • Release branch
  • Weekly
|
  • Latest LinuxRelease_x86_64
  • ONNX_USE_PROTOBUF_SHARED_LIBS=OFF
  • ONNX_ML=1
  • ONNX_USE_LITE_PROTO=ON
|
  • Release Linux x86_64 wheel
  • Release onnx-weekly package
  • Test TEST_HUB=1(4)
  • Verify with different dependency versions - latest numpy version, latest and min supported protobuf version
  • Verify ONNX with the latest ONNX Runtime PyPI package.
| - [MacRelease](/.github/workflows/release_win.yml) |
  • Main branch
  • Release branch
  • Weekly
|
  • macos-11
  • MACOSX_DEPLOYMENT_TARGET=10.12(5)
  • ONNX_USE_PROTOBUF_SHARED_LIBS=OFF
  • ONNX_ML=1
  • ONNX_USE_LITE_PROTO=ON
|
  • Release Mac wheel
  • Release onnx-weekly package
  • Verify with different dependency versions - latest numpy version, latest and min supported protobuf version
  • Verify ONNX with the latest ONNX Runtime PyPI package.
  • Test source distribution generation
  • Test build with source distribution
  • Release onnx-weekly source distribution
| + [MacRelease](/.github/workflows/release_mac.yml) |
  • Main branch
  • Release branch
  • Weekly
|
  • macos-latest
  • MACOSX_DEPLOYMENT_TARGET=11.0
  • ONNX_USE_PROTOBUF_SHARED_LIBS=OFF
  • ONNX_ML=1
  • ONNX_USE_LITE_PROTO=ON
|
  • Release Mac wheel
  • Release onnx-weekly package
  • Verify with different dependency versions - latest numpy version, latest and min supported protobuf version
  • Verify ONNX with the latest ONNX Runtime PyPI package.
  • Test source distribution generation
  • Test build with source distribution
  • Release onnx-weekly source distribution
| [Weekly CI with latest onnx.checker](/.github/workflows/weekly_mac_ci.yml) | weekly(6) |
  • macos-latest
  • MACOSX_DEPLOYMENT_TARGET=10.12
  • ONNX_USE_PROTOBUF_SHARED_LIBS=OFF
  • ONNX_ML=1
|
  • Test latest ONNX checker
  • Test latest ONNX shape inference
  • With all models from [onnx/models](https://github.com/onnx/models)(7)
| [Reuse](/.github/workflows/reuse.yml) | Every PR | |
  • Checks for Copyright and License header
  • More information could be found at: https://reuse.software/
  • If no license is to be added, or the checker does not recognize it, it must be configured under .reuse/dep5.
| [Dependabot](/.github/dependabot.yml) |
  • Main branch
  • weekly
| |
  • Create PRs for new dependency versions (will occur more often because p.ex. GitHub actions are pinned to commit hashes due to security best practices and not just to a version number).
| diff --git a/setup.py b/setup.py index 541085cf1b0..eaf8b65284f 100644 --- a/setup.py +++ b/setup.py @@ -50,8 +50,8 @@ DEBUG = os.getenv("DEBUG", "0") == "1" COVERAGE = os.getenv("COVERAGE", "0") == "1" -# Customize the wheel plat-name, usually needed for MacOS builds. -# See usage in .github/workflows/release_mac.yml +# Customize the wheel plat-name; sometimes useful for MacOS builds. +# See https://github.com/onnx/onnx/pull/6117 ONNX_WHEEL_PLATFORM_NAME = os.getenv("ONNX_WHEEL_PLATFORM_NAME") ################################################################################ From 00f1891eb7f025a58a099ca251516b7d0b706703 Mon Sep 17 00:00:00 2001 From: Adrian Lizarraga Date: Fri, 3 May 2024 01:45:26 -0700 Subject: [PATCH 6/9] Fix input names for quantize/dequantize ONNX backend tests (#6122) ### Description https://github.com/onnx/onnx/issues/6123 The input pb files for the following ONNX backend node tests have incorrect names that do not match the ONNX model: - test_dequantizelinear_uint4 - test_dequantizelinear_int4 - test_quantizelinear_e4m3fn - test_quantizelinear_e5m2 - test_quantizelinear_uint4 - test_quantizelinear_int4 ### Motivation and Context The mismatch in input names causes issues when running these backend tests via ONNX Runtime. --------- Signed-off-by: Adrian Lizarraga (cherry picked from commit 3bddb4d5acef8e72e3012ca016d4a9ae7431932e) --- docs/Operators.md | 12 ++++++------ docs/TestCoverage.md | 12 ++++++------ onnx/backend/test/case/node/dequantizelinear.py | 4 ++-- onnx/backend/test/case/node/quantizelinear.py | 8 ++++---- .../test_data_set_0/input_2.pb | 3 +-- .../test_data_set_0/input_2.pb | 3 +-- .../test_data_set_0/input_2.pb | Bin 19 -> 21 bytes .../test_data_set_0/input_2.pb | Bin 19 -> 21 bytes .../test_data_set_0/input_2.pb | 3 +-- .../test_data_set_0/input_2.pb | 3 +-- 10 files changed, 22 insertions(+), 26 deletions(-) diff --git a/docs/Operators.md b/docs/Operators.md index 83fb2ba38b4..acee9b02f75 100644 --- a/docs/Operators.md +++ b/docs/Operators.md @@ -7681,7 +7681,7 @@ node = onnx.helper.make_node( # scalar zero point and scale x = make_tensor("x", TensorProto.INT4, [5], [0, 1, 7, -4, -8]) x_scale = np.float32(2) -x_zero_point = make_tensor("zero_point", TensorProto.INT4, (1,), [1]) +x_zero_point = make_tensor("x_zero_point", TensorProto.INT4, (1,), [1]) y = np.array([-2, 0, 12, -10, -18], dtype=np.float32) expect( @@ -7735,7 +7735,7 @@ node = onnx.helper.make_node( # scalar zero point and scale x = make_tensor("x", TensorProto.UINT4, [5], [0, 1, 7, 10, 15]) x_scale = np.float32(2) -x_zero_point = make_tensor("zero_point", TensorProto.UINT4, (1,), [1]) +x_zero_point = make_tensor("x_zero_point", TensorProto.UINT4, (1,), [1]) y = np.array([-2, 0, 12, 18, 28], dtype=np.float32) expect( @@ -20495,7 +20495,7 @@ node = onnx.helper.make_node( x = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32) y_scale = np.float32(2) -y_zero_point = make_tensor("zero_point", TensorProto.FLOAT8E4M3FN, [1], [0]) +y_zero_point = make_tensor("y_zero_point", TensorProto.FLOAT8E4M3FN, [1], [0]) y = make_tensor("y", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, 96]) expect( @@ -20521,7 +20521,7 @@ node = onnx.helper.make_node( x = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32) y_scale = np.float32(2) -y_zero_point = make_tensor("zero_point", TensorProto.FLOAT8E5M2, [1], [0.0]) +y_zero_point = make_tensor("y_zero_point", TensorProto.FLOAT8E5M2, [1], [0.0]) y = make_tensor("y", TensorProto.FLOAT8E5M2, [5], [0, 0.5, 1, 49152, 96]) expect( @@ -20620,7 +20620,7 @@ x = np.array( y_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32) y_zero_point = make_tensor( - "zero_point", TensorProto.INT4, y_scale.shape, np.ones_like(y_scale) + "y_zero_point", TensorProto.INT4, y_scale.shape, np.ones_like(y_scale) ) y = make_tensor( "y", TensorProto.INT4, x.shape, [1, 2, 3, 5, -8, -6, 3, 4, 4, 5, 5, 7] @@ -20740,7 +20740,7 @@ x = np.array( y_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32) y_zero_point = make_tensor( - "zero_point", TensorProto.UINT4, y_scale.shape, np.ones_like(y_scale) + "y_zero_point", TensorProto.UINT4, y_scale.shape, np.ones_like(y_scale) ) y = make_tensor( "y", TensorProto.UINT4, x.shape, [1, 2, 3, 5, -1, -1, 3, 4, 4, 5, 5, 11] diff --git a/docs/TestCoverage.md b/docs/TestCoverage.md index 76dc2ad3a07..5073093808e 100644 --- a/docs/TestCoverage.md +++ b/docs/TestCoverage.md @@ -5451,7 +5451,7 @@ node = onnx.helper.make_node( # scalar zero point and scale x = make_tensor("x", TensorProto.INT4, [5], [0, 1, 7, -4, -8]) x_scale = np.float32(2) -x_zero_point = make_tensor("zero_point", TensorProto.INT4, (1,), [1]) +x_zero_point = make_tensor("x_zero_point", TensorProto.INT4, (1,), [1]) y = np.array([-2, 0, 12, -10, -18], dtype=np.float32) expect( @@ -5501,7 +5501,7 @@ node = onnx.helper.make_node( # scalar zero point and scale x = make_tensor("x", TensorProto.UINT4, [5], [0, 1, 7, 10, 15]) x_scale = np.float32(2) -x_zero_point = make_tensor("zero_point", TensorProto.UINT4, (1,), [1]) +x_zero_point = make_tensor("x_zero_point", TensorProto.UINT4, (1,), [1]) y = np.array([-2, 0, 12, 18, 28], dtype=np.float32) expect( @@ -13957,7 +13957,7 @@ node = onnx.helper.make_node( x = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32) y_scale = np.float32(2) -y_zero_point = make_tensor("zero_point", TensorProto.FLOAT8E4M3FN, [1], [0]) +y_zero_point = make_tensor("y_zero_point", TensorProto.FLOAT8E4M3FN, [1], [0]) y = make_tensor("y", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, 96]) expect( @@ -13981,7 +13981,7 @@ node = onnx.helper.make_node( x = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32) y_scale = np.float32(2) -y_zero_point = make_tensor("zero_point", TensorProto.FLOAT8E5M2, [1], [0.0]) +y_zero_point = make_tensor("y_zero_point", TensorProto.FLOAT8E5M2, [1], [0.0]) y = make_tensor("y", TensorProto.FLOAT8E5M2, [5], [0, 0.5, 1, 49152, 96]) expect( @@ -14076,7 +14076,7 @@ x = np.array( y_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32) y_zero_point = make_tensor( - "zero_point", TensorProto.INT4, y_scale.shape, np.ones_like(y_scale) + "y_zero_point", TensorProto.INT4, y_scale.shape, np.ones_like(y_scale) ) y = make_tensor( "y", TensorProto.INT4, x.shape, [1, 2, 3, 5, -8, -6, 3, 4, 4, 5, 5, 7] @@ -14190,7 +14190,7 @@ x = np.array( y_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32) y_zero_point = make_tensor( - "zero_point", TensorProto.UINT4, y_scale.shape, np.ones_like(y_scale) + "y_zero_point", TensorProto.UINT4, y_scale.shape, np.ones_like(y_scale) ) y = make_tensor( "y", TensorProto.UINT4, x.shape, [1, 2, 3, 5, -1, -1, 3, 4, 4, 5, 5, 11] diff --git a/onnx/backend/test/case/node/dequantizelinear.py b/onnx/backend/test/case/node/dequantizelinear.py index c504b558f0e..b9ed5627e5f 100644 --- a/onnx/backend/test/case/node/dequantizelinear.py +++ b/onnx/backend/test/case/node/dequantizelinear.py @@ -202,7 +202,7 @@ def export_uint4() -> None: # scalar zero point and scale x = make_tensor("x", TensorProto.UINT4, [5], [0, 1, 7, 10, 15]) x_scale = np.float32(2) - x_zero_point = make_tensor("zero_point", TensorProto.UINT4, (1,), [1]) + x_zero_point = make_tensor("x_zero_point", TensorProto.UINT4, (1,), [1]) y = np.array([-2, 0, 12, 18, 28], dtype=np.float32) expect( @@ -224,7 +224,7 @@ def export_int4() -> None: # scalar zero point and scale x = make_tensor("x", TensorProto.INT4, [5], [0, 1, 7, -4, -8]) x_scale = np.float32(2) - x_zero_point = make_tensor("zero_point", TensorProto.INT4, (1,), [1]) + x_zero_point = make_tensor("x_zero_point", TensorProto.INT4, (1,), [1]) y = np.array([-2, 0, 12, -10, -18], dtype=np.float32) expect( diff --git a/onnx/backend/test/case/node/quantizelinear.py b/onnx/backend/test/case/node/quantizelinear.py index f2749dc4db0..69208fab804 100644 --- a/onnx/backend/test/case/node/quantizelinear.py +++ b/onnx/backend/test/case/node/quantizelinear.py @@ -73,7 +73,7 @@ def export_e4m3fn() -> None: x = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32) y_scale = np.float32(2) - y_zero_point = make_tensor("zero_point", TensorProto.FLOAT8E4M3FN, [1], [0]) + y_zero_point = make_tensor("y_zero_point", TensorProto.FLOAT8E4M3FN, [1], [0]) y = make_tensor("y", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, 96]) expect( @@ -93,7 +93,7 @@ def export_e5m2() -> None: x = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32) y_scale = np.float32(2) - y_zero_point = make_tensor("zero_point", TensorProto.FLOAT8E5M2, [1], [0.0]) + y_zero_point = make_tensor("y_zero_point", TensorProto.FLOAT8E5M2, [1], [0.0]) y = make_tensor("y", TensorProto.FLOAT8E5M2, [5], [0, 0.5, 1, 49152, 96]) expect( @@ -230,7 +230,7 @@ def export_uint4() -> None: y_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32) y_zero_point = make_tensor( - "zero_point", TensorProto.UINT4, y_scale.shape, np.ones_like(y_scale) + "y_zero_point", TensorProto.UINT4, y_scale.shape, np.ones_like(y_scale) ) y = make_tensor( "y", TensorProto.UINT4, x.shape, [1, 2, 3, 5, -1, -1, 3, 4, 4, 5, 5, 11] @@ -262,7 +262,7 @@ def export_int4() -> None: y_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32) y_zero_point = make_tensor( - "zero_point", TensorProto.INT4, y_scale.shape, np.ones_like(y_scale) + "y_zero_point", TensorProto.INT4, y_scale.shape, np.ones_like(y_scale) ) y = make_tensor( "y", TensorProto.INT4, x.shape, [1, 2, 3, 5, -8, -6, 3, 4, 4, 5, 5, 7] diff --git a/onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/input_2.pb b/onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/input_2.pb index 27697b35887..c0d4733202c 100644 --- a/onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/input_2.pb +++ b/onnx/backend/test/data/node/test_dequantizelinear_int4/test_data_set_0/input_2.pb @@ -1,2 +1 @@ -*B -zero_point \ No newline at end of file +*B x_zero_point \ No newline at end of file diff --git a/onnx/backend/test/data/node/test_dequantizelinear_uint4/test_data_set_0/input_2.pb b/onnx/backend/test/data/node/test_dequantizelinear_uint4/test_data_set_0/input_2.pb index 8876e08c6db..118b7d9a7ee 100644 --- a/onnx/backend/test/data/node/test_dequantizelinear_uint4/test_data_set_0/input_2.pb +++ b/onnx/backend/test/data/node/test_dequantizelinear_uint4/test_data_set_0/input_2.pb @@ -1,2 +1 @@ -*B -zero_point \ No newline at end of file +*B x_zero_point \ No newline at end of file diff --git a/onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/input_2.pb b/onnx/backend/test/data/node/test_quantizelinear_e4m3fn/test_data_set_0/input_2.pb index a6e11c419bfcc419d1ad6e5132b0affc3eb8b524..012db71244298f72c48ca400874a2a62a477353b 100644 GIT binary patch literal 21 ccmd;J6cE&6WN_lCjIT;9%8xI|&&(?U04t#d#Q*>R literal 19 acmd;J6cE&6WN_lDN-fHdFUZf#D**r?f&}XT diff --git a/onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/input_2.pb b/onnx/backend/test/data/node/test_quantizelinear_e5m2/test_data_set_0/input_2.pb index 8749ea311c59cb722a6f7bb960c2fbab886d95cb..b7eca6fbcfed5524285075d7d03e894d25c122f9 100644 GIT binary patch literal 21 ccmd;J6cE;8WN_lCjIT;9%8xI|&&(?U04v1>#{d8T literal 19 acmd;J6cE;8WN_lDN-fHdFUZf#D**r?q6F;# diff --git a/onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/input_2.pb b/onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/input_2.pb index 9fe0fc5b846..7a0bc58621b 100644 --- a/onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/input_2.pb +++ b/onnx/backend/test/data/node/test_quantizelinear_int4/test_data_set_0/input_2.pb @@ -1,2 +1 @@ -*B -zero_point \ No newline at end of file +*B y_zero_point \ No newline at end of file diff --git a/onnx/backend/test/data/node/test_quantizelinear_uint4/test_data_set_0/input_2.pb b/onnx/backend/test/data/node/test_quantizelinear_uint4/test_data_set_0/input_2.pb index 412ac47a2c0..b0fdf1565af 100644 --- a/onnx/backend/test/data/node/test_quantizelinear_uint4/test_data_set_0/input_2.pb +++ b/onnx/backend/test/data/node/test_quantizelinear_uint4/test_data_set_0/input_2.pb @@ -1,2 +1 @@ -*B -zero_point \ No newline at end of file +*B y_zero_point \ No newline at end of file From e59d51c8f16bd2ad5d585d7063ba9f10855c7761 Mon Sep 17 00:00:00 2001 From: Matthieu Darbois Date: Sat, 4 May 2024 19:28:04 +0200 Subject: [PATCH 7/9] fix(ci): install python dependencies with `--only-binary :all:` in manylinux (#6120) ### Description Only install python dependencies from wheels in manylinux. ### Motivation and Context The latest version of google-re2 dropped support for manylinux2014. google-re2 fails to build from sources thus failing the Linux release pipelines. This PR forces installation from wheels on manylinux which thus fallback on the latest version of google-re2 with a wheel for manylinux2014. Another solution would be to bump the manylinux images to manylinux_2_28 but that could have impact on downstream users (e.g. impossible to install wheel from rh7, amazon linux 2, ubuntu 18.04). Signed-off-by: Matthieu Darbois Co-authored-by: Justin Chu (cherry picked from commit 9f6f3a03d40f0396eec8d273ade249424943e179) --- .github/workflows/manylinux/entrypoint.sh | 4 ++-- .github/workflows/release_linux_aarch64.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/manylinux/entrypoint.sh b/.github/workflows/manylinux/entrypoint.sh index 9e10c2acf56..76b206ce2e9 100644 --- a/.github/workflows/manylinux/entrypoint.sh +++ b/.github/workflows/manylinux/entrypoint.sh @@ -16,12 +16,12 @@ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib # Compile wheels # Need to be updated if there is a new Python Version if [ "$(uname -m)" == "aarch64" ]; then - PIP_INSTALL_COMMAND="$PY_VERSION -m pip install --no-cache-dir -q" + PIP_INSTALL_COMMAND="$PY_VERSION -m pip install --only-binary :all: --no-cache-dir -q" PYTHON_COMMAND="$PY_VERSION" else declare -A python_map=( ["3.8"]="cp38-cp38" ["3.9"]="cp39-cp39" ["3.10"]="cp310-cp310" ["3.11"]="cp311-cp311" ["3.12"]="cp312-cp312") PY_VER=${python_map[$PY_VERSION]} - PIP_INSTALL_COMMAND="/opt/python/${PY_VER}/bin/pip install --no-cache-dir -q" + PIP_INSTALL_COMMAND="/opt/python/${PY_VER}/bin/pip install --only-binary :all: --no-cache-dir -q" PYTHON_COMMAND="/opt/python/${PY_VER}/bin/python" fi diff --git a/.github/workflows/release_linux_aarch64.yml b/.github/workflows/release_linux_aarch64.yml index 6a0d6e3eaeb..daa17b79f11 100644 --- a/.github/workflows/release_linux_aarch64.yml +++ b/.github/workflows/release_linux_aarch64.yml @@ -46,7 +46,7 @@ jobs: ${{ env.img }} \ bash -exc '${{ env.py }} -m pip install -q virtualenv && ${{ env.py }} -m venv .env && \ source .env/bin/activate && \ - ${{ env.py }} -m pip install -q -r requirements-release.txt && \ + ${{ env.py }} -m pip install --only-binary :all: -q -r requirements-release.txt && \ yum install -y protobuf-compiler protobuf-devel deactivate' @@ -70,7 +70,7 @@ jobs: bash -exc '\ source .env/bin/activate && \ python -m pip install -q --upgrade pip && \ - python -m pip install -q -r requirements-release.txt && \ + python -m pip install --only-binary :all: -q -r requirements-release.txt && \ pip install dist/*manylinux2014_aarch64.whl && \ pytest && \ deactivate' From 0559c8b02e9cdfc6c4a99428cbb3ebfbdec21c69 Mon Sep 17 00:00:00 2001 From: Matthieu Darbois Date: Mon, 6 May 2024 18:09:27 +0200 Subject: [PATCH 8/9] fix: install google-re2 with `--only-binary` option (#6129) ### Description Only install google-re2 from wheels. ### Motivation and Context The latest version of google-re2 dropped support for manylinux2014. google-re2 fails to build from sources thus failing the Linux release pipelines. This PR forces installation from wheels on manylinux which thus fallback on the latest version of google-re2 with a wheel for manylinux2014. It reverts https://github.com/onnx/onnx/pull/6120 which forced all dependencies to be available as wheels (psutil is not which forbids installing recent versions of jupyter/ipykernel). Another solution would be to bump the manylinux images to manylinux_2_28 but that could have impact on downstream users (e.g. impossible to install wheel from rh7, amazon linux 2, ubuntu 18.04). Signed-off-by: Matthieu Darbois (cherry picked from commit aa2cdacb7c2854d9f46853be0b248759a335becb) --- .github/workflows/manylinux/entrypoint.sh | 4 ++-- .github/workflows/release_linux_aarch64.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/manylinux/entrypoint.sh b/.github/workflows/manylinux/entrypoint.sh index 76b206ce2e9..4431212bc5e 100644 --- a/.github/workflows/manylinux/entrypoint.sh +++ b/.github/workflows/manylinux/entrypoint.sh @@ -16,12 +16,12 @@ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib # Compile wheels # Need to be updated if there is a new Python Version if [ "$(uname -m)" == "aarch64" ]; then - PIP_INSTALL_COMMAND="$PY_VERSION -m pip install --only-binary :all: --no-cache-dir -q" + PIP_INSTALL_COMMAND="$PY_VERSION -m pip install --only-binary google-re2 --no-cache-dir -q" PYTHON_COMMAND="$PY_VERSION" else declare -A python_map=( ["3.8"]="cp38-cp38" ["3.9"]="cp39-cp39" ["3.10"]="cp310-cp310" ["3.11"]="cp311-cp311" ["3.12"]="cp312-cp312") PY_VER=${python_map[$PY_VERSION]} - PIP_INSTALL_COMMAND="/opt/python/${PY_VER}/bin/pip install --only-binary :all: --no-cache-dir -q" + PIP_INSTALL_COMMAND="/opt/python/${PY_VER}/bin/pip install --only-binary google-re2 --no-cache-dir -q" PYTHON_COMMAND="/opt/python/${PY_VER}/bin/python" fi diff --git a/.github/workflows/release_linux_aarch64.yml b/.github/workflows/release_linux_aarch64.yml index daa17b79f11..34dd12e15aa 100644 --- a/.github/workflows/release_linux_aarch64.yml +++ b/.github/workflows/release_linux_aarch64.yml @@ -46,7 +46,7 @@ jobs: ${{ env.img }} \ bash -exc '${{ env.py }} -m pip install -q virtualenv && ${{ env.py }} -m venv .env && \ source .env/bin/activate && \ - ${{ env.py }} -m pip install --only-binary :all: -q -r requirements-release.txt && \ + ${{ env.py }} -m pip install -q --only-binary google-re2 -r requirements-release.txt && \ yum install -y protobuf-compiler protobuf-devel deactivate' @@ -70,7 +70,7 @@ jobs: bash -exc '\ source .env/bin/activate && \ python -m pip install -q --upgrade pip && \ - python -m pip install --only-binary :all: -q -r requirements-release.txt && \ + python -m pip install -q --only-binary google-re2 -r requirements-release.txt && \ pip install dist/*manylinux2014_aarch64.whl && \ pytest && \ deactivate' From 84fe50ae87ebb0619f309605313f7e4457dd4bcd Mon Sep 17 00:00:00 2001 From: Justin Chu Date: Fri, 12 Apr 2024 09:08:47 -0700 Subject: [PATCH 9/9] Migrate CI to use Github Actions (#6075) Migrate CI to use Github Actions. Update Google Test to a newer version (some version in 2022). The latest commit has errors I don't know how to fix. Use `FetchContent` instead of `ExternalProject` to bring in googletest according to its documentation https://google.github.io/googletest/quickstart-cmake.html Fixes https://github.com/onnx/onnx/issues/6074 Fixes https://github.com/onnx/onnx/issues/6073 --------- Signed-off-by: Justin Chu (cherry picked from commit 284b12415638085fbc664b10b07ea3913d6d21b7) --- .azure-pipelines/Linux-CI.yml | 154 --------------- .azure-pipelines/MacOS-CI.yml | 138 -------------- .azure-pipelines/Windows-CI.yml | 142 -------------- .github/workflows/main.yml | 200 ++++++++++++++++++++ .github/workflows/release_linux_aarch64.yml | 4 + .github/workflows/release_linux_x86_64.yml | 4 + .github/workflows/release_mac.yml | 4 + .github/workflows/release_win.yml | 4 + CMakeLists.txt | 8 +- cmake/external/googletest.cmake | 49 +---- cmake/unittest.cmake | 10 +- 11 files changed, 230 insertions(+), 487 deletions(-) delete mode 100644 .azure-pipelines/Linux-CI.yml delete mode 100644 .azure-pipelines/MacOS-CI.yml delete mode 100644 .azure-pipelines/Windows-CI.yml create mode 100644 .github/workflows/main.yml diff --git a/.azure-pipelines/Linux-CI.yml b/.azure-pipelines/Linux-CI.yml deleted file mode 100644 index c1d24d6e739..00000000000 --- a/.azure-pipelines/Linux-CI.yml +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright (c) ONNX Project Contributors -# -# SPDX-License-Identifier: Apache-2.0 - -schedules: -- cron: '0 0 * * *' - displayName: Nightly Linux CI in main branch - branches: - include: - - main - -trigger: -- gh-readonly-queue/** - -jobs: -- job: 'Test' - pool: - vmImage: 'Ubuntu-20.04' - strategy: - matrix: - py312-internal-protobuf: - python.version: '3.12' - onnx_ml: 1 - build_type: 'Release' - documentation: 0 - protobuf_type: 'Internal' - py311-ml-debug-external-protobuf: - python.version: '3.11' - onnx_ml: 1 - build_type: 'Debug' - documentation: 0 - protobuf_type: 'External' - py310-internal-protobuf: - python.version: '3.10' - onnx_ml: 0 - build_type: 'Release' - documentation: 0 - protobuf_type: 'Internal' - py39-ml-doc-external-protobuf: - python.version: '3.9' - onnx_ml: 1 - build_type: 'Release' - documentation: 1 - protobuf_type: 'External' - py38-internal-protobuf: - python.version: '3.8' - onnx_ml: 0 - build_type: 'Release' - documentation: 0 - protobuf_type: 'Internal' - maxParallel: 4 - - steps: - - task: UsePythonVersion@0 - inputs: - versionSpec: '$(python.version)' - addToPath: true - - - script: | - python -m pip -q install virtualenv - python -m virtualenv venv - source venv/bin/activate - - if [ '$(protobuf_type)' == 'External' ]; then - sudo apt-get install libprotobuf-dev protobuf-compiler - elif [ '$(protobuf_type)' == 'Internal' ]; then - echo "Use the internal protobuf build" - fi - - python -m pip install --upgrade pip - python -m pip install -r requirements-release.txt - # still test protobuf==3.20.2 at least in a CI - python -m pip install protobuf==3.20.2 - - sudo apt-get install -qq -o=Dpkg::Use-Pty=0 -y --no-install-recommends dos2unix - - git submodule update --init --recursive - export ONNX_BUILD_TESTS=1 - if [ '$(build_type)' == 'Debug' ]; then - export DEBUG=1 - fi - if [ '$(onnx_ml)' == '1' ]; then - export ONNX_ML=1 - fi - export CMAKE_ARGS="-DONNX_WERROR=ON -DONNX_USE_PROTOBUF_SHARED_LIBS=ON" - # Enable more sanitizers - export CMAKE_ARGS="${CMAKE_ARGS} -DCMAKE_CXX_FLAGS='-fsanitize=undefined -fno-sanitize-recover=all '" - pip install -e ".[reference]" -v - displayName: 'Install ONNX and dependencies' - - - script: | - source venv/bin/activate - - pytest -sv --cov=onnx --cov-report=xml --cov-append --cov-branch --junit-xml pytest.xml -n auto --dist loadscope - if [ $? -ne 0 ]; then - echo "pytest failed" - exit 1 - fi - - # onnx c++ API tests - export LD_LIBRARY_PATH="./.setuptools-cmake-build/:$LD_LIBRARY_PATH" - ./.setuptools-cmake-build/onnx_gtests - if [ $? -ne 0 ]; then - echo "onnx_gtests failed" - exit 1 - fi - - displayName: 'Run ONNX tests' - - - script: | - curl -Os https://uploader.codecov.io/latest/linux/codecov - chmod +x codecov - ./codecov - - continueOnError: true - displayName: 'Upload to codecov' - - - script: | - source venv/bin/activate - python onnx/backend/test/cmd_tools.py generate-data --clean - git status - # Skip *output_*.pb because NumPy functions might behave differently on different platforms - # Skip test_log's input.pb because it uses np.random, which might behave differently on different platforms - git diff --exit-code -- . ':!onnx/onnx-data.proto' ':!onnx/onnx-data.proto3' ':!*output_*.pb' ':!*input_*.pb' - if [ $? -ne 0 ]; then - echo "git diff for test generation returned failures. Please check updated node test files" - exit 1 - fi - git diff --exit-code --diff-filter=ADR -- . ':!onnx/onnx-data.proto' ':!onnx/onnx-data.proto3' - if [ $? -ne 0 ]; then - echo "Test generation returned failures. Please check the number of node test files (input_*.pb or output_*.pb)" - exit 1 - fi - - pip uninstall -y pillow - python onnx/backend/test/cmd_tools.py generate-data --clean - git status - # Verify test generation without pillow for ImageDecoder, it should directly use frozen data - git diff --exit-code -- . ':!onnx/onnx-data.proto' ':!onnx/onnx-data.proto3' ':!*output_*.pb' ':!*input_*.pb' - if [ $? -ne 0 ]; then - echo "git diff for test generation without pillow returned failures. Please check updated node test files" - exit 1 - fi - - displayName: Test backend test data - - - script: | - if [ '$(documentation)' == '1' ]; then - source venv/bin/activate - pip install -r docs/docsgen/source/requirements.txt - cd docs/docsgen && make text - fi - displayName: Test documentation - continueOnError: true # the documentation generates errors due to operators documentation diff --git a/.azure-pipelines/MacOS-CI.yml b/.azure-pipelines/MacOS-CI.yml deleted file mode 100644 index 6e2bf282515..00000000000 --- a/.azure-pipelines/MacOS-CI.yml +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright (c) ONNX Project Contributors -# -# SPDX-License-Identifier: Apache-2.0 - -schedules: -- cron: '0 0 * * *' - displayName: Nightly MacOS CI in main branch - branches: - include: - - main - -trigger: -- gh-readonly-queue/** - -jobs: -- job: 'Test' - pool: - vmImage: 'macOS-11' - strategy: - matrix: - py312-internal-protobuf: - python.version: '3.12' - onnx_ml: 1 - build_type: 'Release' - documentation: 0 - protobuf_type: 'Internal' - py311-external-protobuf: - python.version: '3.11' - onnx_ml: 0 - build_type: 'Release' - protobuf_type: 'External' - onnx_lite: 0 - py310-lite-internal-protobuf: - python.version: '3.10' - onnx_ml: 0 - build_type: 'Release' - protobuf_type: 'Internal' - onnx_lite: 1 - py39-ml-lite-external-protobuf: - python.version: '3.9' - onnx_ml: 1 - build_type: 'Release' - protobuf_type: 'External' - onnx_lite: 1 - py38-ml-debug-external-protobuf: - python.version: '3.8' - onnx_ml: 1 - build_type: 'Debug' - protobuf_type: 'External' - onnx_lite: 0 - maxParallel: 4 - - steps: - - task: UsePythonVersion@0 - inputs: - versionSpec: '$(python.version)' - - - script: | - # Install Protobuf from source - export NUM_CORES=`sysctl -n hw.logicalcpu` - if [ '$(build_type)' == 'Debug' ]; then - export DEBUG=1 - fi - if [ '$(protobuf_type)' == 'External' ]; then - source workflow_scripts/protobuf/build_protobuf_unix.sh $NUM_CORES $(pwd)/protobuf/protobuf_install $(build_type) - elif [ '$(protobuf_type)' == 'Internal' ]; then - echo "Use the internal protobuf build" - fi - - git submodule update --init --recursive - python -m pip install --upgrade pip - python -m pip install -r requirements-release.txt - - if [ '$(onnx_ml)' == '1' ]; then - export ONNX_ML=1 - fi - export ONNX_BUILD_TESTS=1 - export CMAKE_ARGS="-DONNX_WERROR=ON" - if [ '$(onnx_lite)' == '1' ]; then - export CMAKE_ARGS="${CMAKE_ARGS} -DONNX_USE_LITE_PROTO=ON" - fi - pip install -e ".[reference]" -v - displayName: 'Install dependencies and ONNX' - - - script: | - pytest -n auto --dist loadscope - if [ $? -ne 0 ]; then - echo "pytest failed" - exit 1 - fi - - python -m pip install onnxruntime - export ORT_MAX_IR_SUPPORTED_VERSION=9 - export ORT_MAX_ML_OPSET_SUPPORTED_VERSION=3 - export ORT_MAX_ONNX_OPSET_SUPPORTED_VERSION=20 - pytest -n auto --dist loadscope - if [ $? -ne 0 ]; then - echo "pytest failed when testing onnx with onnxruntime" - exit 1 - fi - - # onnx c++ API tests - export LD_LIBRARY_PATH="./.setuptools-cmake-build/:$LD_LIBRARY_PATH" - ./.setuptools-cmake-build/onnx_gtests - if [ $? -ne 0 ]; then - echo "onnx_gtests failed" - exit 1 - fi - - python onnx/backend/test/cmd_tools.py generate-data --clean - git status - git diff --exit-code -- . ':!onnx/onnx-data.proto' ':!onnx/onnx-data.proto3' ':!*output_*.pb' ':!*input_*.pb' - if [ $? -ne 0 ]; then - echo "git diff for test generation returned failures. Please check updated node test files" - exit 1 - fi - git diff --exit-code --diff-filter=ADR -- . ':!onnx/onnx-data.proto' ':!onnx/onnx-data.proto3' - if [ $? -ne 0 ]; then - echo "Test generation returned failures. Please check the number of node test files (input_*.pb or output_*.pb)" - exit 1 - fi - - pip uninstall -y pillow - python onnx/backend/test/cmd_tools.py generate-data --clean - git status - git diff --exit-code -- . ':!onnx/onnx-data.proto' ':!onnx/onnx-data.proto3' ':!*output_*.pb' ':!*input_*.pb' - if [ $? -ne 0 ]; then - echo "git diff for test generation without pillow returned failures. Please check updated node test files" - exit 1 - fi - # Internal Protobuf won't have other untrack files like protobuf/ - if [ '$(protobuf_type)' == 'Internal' ]; then - if [[ $(git ls-files --others --exclude-standard) ]]; then - echo "Some test-generated files not included in the PR. Did you forget to add any test-generated files?" - exit 1 - fi - fi - displayName: 'Run ONNX Tests' diff --git a/.azure-pipelines/Windows-CI.yml b/.azure-pipelines/Windows-CI.yml deleted file mode 100644 index f30716be376..00000000000 --- a/.azure-pipelines/Windows-CI.yml +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright (c) ONNX Project Contributors -# -# SPDX-License-Identifier: Apache-2.0 - -schedules: -- cron: '0 0 * * *' - displayName: Nightly Windows CI in main branch - branches: - include: - - main - -trigger: -- gh-readonly-queue/** - -jobs: - -- job: 'Test' - pool: - vmImage: 'windows-2019' - strategy: - matrix: - py312-internal-protobuf: - python.version: '3.12' - onnx_ml: 1 - build_type: 'Release' - documentation: 0 - protobuf_type: 'Internal' - py311-ml-external-protobuf: - python.version: '3.11' - onnx_ml: 1 - onnx_verify_proto: 0 - protobuf_type: 'External' - py310_verify_proto_internal_protobuf: - python.version: '3.10' - onnx_ml: 0 - onnx_verify_proto: 1 - protobuf_type: 'Internal' - py39_ml_external_protobuf: - python.version: '3.9' - onnx_ml: 1 - onnx_verify_proto: 0 - protobuf_type: 'External' - py38_verify_proto_internal_protobuf: - python.version: '3.8' - onnx_ml: 0 - onnx_verify_proto: 1 - protobuf_type: 'Internal' - maxParallel: 4 - - steps: - - task: UsePythonVersion@0 - inputs: - versionSpec: '$(python.version)' - architecture: 'x64' - - - powershell: Write-Host "##vso[task.prependpath]$env:CONDA\Scripts" - displayName: Add conda to PATH - - - script: | - conda create --yes --quiet --name py$(python.version) python=$(python.version) - if '$(protobuf_type)' == 'External' ( - conda install -n py$(python.version) -y -c conda-forge numpy libprotobuf==3.21.5 - ) else ( - conda install -n py$(python.version) -y -c conda-forge numpy - ) - displayName: Create Anaconda environment - - - powershell: echo "##vso[task.setvariable variable=CMAKE_PREFIX_PATH]$env:CONDA/envs/py$(python.version)/Library" - displayName: Set CMAKE_PREFIX_PATH - - - script: | - call activate py$(python.version) - python -m pip install --upgrade pip - python -m pip install -r requirements-release.txt - - git submodule update --init --recursive - set ONNX_BUILD_TESTS=1 - set ONNX_ML=$(onnx_ml) - set ONNX_VERIFY_PROTO_3=$(onnx_verify_proto) - if '$(protobuf_type)' == 'External' ( - set CMAKE_ARGS=-DONNX_USE_PROTOBUF_SHARED_LIBS=ON -DONNX_USE_LITE_PROTO=ON -DONNX_WERROR=ON - ) else ( - REM Disable ONNX_WERROR since the new protobuf version has build warnings that are treated as errors. - set CMAKE_ARGS=-DONNX_USE_PROTOBUF_SHARED_LIBS=OFF -DONNX_USE_LITE_PROTO=ON -DONNX_WERROR=OFF - ) - - pip install -e ".[reference]" -v - pytest -n auto --dist loadscope - IF NOT %ERRORLEVEL% EQU 0 ( - @echo "pytest failed" - EXIT 1 - ) - - python onnx/backend/test/cmd_tools.py generate-data --clean - git status - git diff --exit-code -- . :!onnx/onnx-data.proto :!onnx/onnx-data.proto3 :!*output_*.pb :!*input_*.pb - IF NOT %ERRORLEVEL% EQU 0 ( - @echo "git diff for test generation returned failures. Please check updated node test files" - EXIT 1 - ) - git diff --exit-code --diff-filter=ADR -- . :!onnx/onnx-data.proto :!onnx/onnx-data.proto3 - IF NOT %ERRORLEVEL% EQU 0 ( - @echo "Test generation returned failures. Please check the number of node test files (input_*.pb or output_*.pb)." - EXIT 1 - ) - - pip uninstall -y pillow - python onnx/backend/test/cmd_tools.py generate-data --clean - git status - git diff --exit-code -- . :!onnx/onnx-data.proto :!onnx/onnx-data.proto3 :!*output_*.pb :!*input_*.pb - IF NOT %ERRORLEVEL% EQU 0 ( - @echo "git diff for test generation without pillow returned failures. Please check updated node test files" - EXIT 1 - ) - - rm -rf .setuptools-cmake-build - if '$(protobuf_type)' == 'External' ( - conda install -y -c conda-forge libprotobuf=3.20 - ) - displayName: Install and test ONNX - - - script: | - if '$(protobuf_type)' == 'External' ( - call activate py$(python.version) - python -m pip install protobuf==3.20.2 - - set ONNX_BUILD_TESTS=1 - set ONNX_ML=$(onnx_ml) - set CMAKE_ARGS=-DONNX_USE_PROTOBUF_SHARED_LIBS=ON -DONNX_USE_LITE_PROTO=ON -DONNX_WERROR=ON - python -m pip install -r requirements-release.txt - pip uninstall -y onnx - pip install . - - pytest -n auto --dist loadscope - IF NOT %ERRORLEVEL% EQU 0 ( - @echo "pytest failed when testing onnx with libprotobuf=3.20" - EXIT 1 - ) - ) else ( - REM Skip the test for externally installing lower version of Protobuf. - ) - displayName: Test Protobuf 3.20 from conda-forge diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 00000000000..b84942233aa --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,200 @@ +name: CI + +on: + push: + branches: + - main + pull_request: + merge_group: + workflow_dispatch: + +permissions: # set top-level default permissions as security best practice + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event_name == 'workflow_dispatch' }} + cancel-in-progress: true + +jobs: + test: + name: "Test" + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest,macos-latest] + python_version: ['3.12', '3.11', '3.10', '3.9', '3.8'] + include: + - python_version: '3.12' + onnx_ml: 1 + debug_build: 1 + documentation: 1 + protobuf_type: 'Internal' + - python_version: '3.11' + onnx_ml: 1 + debug_build: 0 + documentation: 0 + protobuf_type: 'External' + - python_version: '3.10' + onnx_ml: 0 + debug_build: 0 + documentation: 0 + protobuf_type: 'Internal' + - python_version: '3.9' + onnx_ml: 1 + debug_build: 0 + documentation: 0 + protobuf_type: 'External' + - python_version: '3.8' + onnx_ml: 0 + debug_build: 0 + documentation: 0 + protobuf_type: 'Internal' + runs-on: ${{ matrix.os }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python_version }} + + - name: Show versions + run: | + python --version + cmake --version + + - name: Install external protobuf - Linux + if: matrix.protobuf_type == 'External' && matrix.os == 'ubuntu-latest' + run: | + sudo apt-get install libprotobuf-dev protobuf-compiler + + - name: Install external protobuf - MacOS + if: matrix.protobuf_type == 'External' && matrix.os == 'macos-latest' + run: | + source workflow_scripts/protobuf/build_protobuf_unix.sh 3 $(pwd)/protobuf/protobuf_install + + - name: Set up MSBuild + if: matrix.os == 'windows-latest' + uses: microsoft/setup-msbuild@6fb02220983dee41ce7ae257b6f4d8f9bf5ed4ce # v2.0.0 + with: + msbuild-architecture: ${{ matrix.architecture }} + + - name: Install external protobuf - Windows + if: matrix.protobuf_type == 'External' && matrix.os == 'windows-latest' + run: | + workflow_scripts/protobuf/build_protobuf_win.ps1 + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install -r requirements-release.txt + python -m pip install protobuf==3.20.2 + git submodule update --init --recursive + + - name: Build and install ONNX - Linux + if: matrix.os == 'ubuntu-latest' + run: | + if [ "${{ matrix.protobuf_type }}" == "External" ]; then + export CMAKE_ARGS="$CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DONNX_USE_PROTOBUF_SHARED_LIBS=ON" + fi + pip install -e ".[reference]" -v + env: + DEBUG: ${{ matrix.debug_build }} + ONNX_ML: ${{ matrix.onnx_ml }} + ONNX_BUILD_TESTS: 1 + CMAKE_ARGS: "-DONNX_WERROR=ON -DCMAKE_CXX_FLAGS='-fsanitize=undefined -fno-sanitize-recover=all '" + + - name: Build and install ONNX - MacOS + if: matrix.os == 'macos-latest' + run: | + pip install -e ".[reference]" -v + env: + DEBUG: ${{ matrix.debug_build }} + ONNX_ML: ${{ matrix.onnx_ml }} + ONNX_BUILD_TESTS: 1 + CMAKE_ARGS: "-DONNX_WERROR=ON" + + - name: Build and install ONNX - Windows + if: matrix.os == 'windows-latest' + run: | + pip install -e ".[reference]" -v + env: + DEBUG: ${{ matrix.debug_build }} + ONNX_ML: ${{ matrix.onnx_ml }} + ONNX_BUILD_TESTS: 1 + CMAKE_ARGS: "-DONNX_WERROR=OFF -DONNX_USE_PROTOBUF_SHARED_LIBS=OFF -DONNX_USE_LITE_PROTO=ON" + + - name: Run Python tests + run: | + pytest -sv --cov=onnx --cov-report=xml --cov-append --cov-branch --junit-xml pytest.xml -n auto --dist loadscope + + - name: Run C++ tests + if: matrix.os == 'ubuntu-latest' || matrix.os == 'macos-latest' + run: | + export LD_LIBRARY_PATH="./.setuptools-cmake-build/:$LD_LIBRARY_PATH" + ./.setuptools-cmake-build/onnx_gtests + + - name: Upload coverage to Codecov + if: always() + uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} + + - name: Upload Test Results + if: always() + uses: actions/upload-artifact@v3 + with: + name: Test Results (${{ matrix.os }}-{{ matrix.python_version }}) + path: pytest.xml + + - name: Test backend test data + if: matrix.documentation == 1 && matrix.os == 'ubuntu-latest' + run: | + python onnx/backend/test/cmd_tools.py generate-data --clean + git status + git diff --exit-code -- . ':!onnx/onnx-data.proto' ':!onnx/onnx-data.proto3' ':!*output_*.pb' ':!*input_*.pb' + if [ $? -ne 0 ]; then + echo "git diff for test generation returned failures. Please check updated node test files" + exit 1 + fi + git diff --exit-code --diff-filter=ADR -- . ':!onnx/onnx-data.proto' ':!onnx/onnx-data.proto3' + if [ $? -ne 0 ]; then + echo "Test generation returned failures. Please check the number of node test files (input_*.pb or output_*.pb)" + exit 1 + fi + pip uninstall -y pillow + python onnx/backend/test/cmd_tools.py generate-data --clean + git status + git diff --exit-code -- . ':!onnx/onnx-data.proto' ':!onnx/onnx-data.proto3' ':!*output_*.pb' ':!*input_*.pb' + if [ $? -ne 0 ]; then + echo "git diff for test generation without pillow returned failures. Please check updated node test files" + exit 1 + fi + + - name: Test documentation + if: matrix.documentation == 1 + run: | + pip install -r docs/docsgen/source/requirements.txt + cd docs/docsgen && make text + continue-on-error: true + + publish-test-results: + name: "Publish Test Results to Github" + needs: test + runs-on: ubuntu-latest + permissions: + checks: write + # only needed unless run with comment_mode: off + pull-requests: write + if: always() + steps: + - name: Download Artifacts + uses: actions/download-artifact@v3 + with: + path: artifacts + + - name: Publish Test Results + uses: EnricoMi/publish-unit-test-result-action@v2 + with: + files: "artifacts/**/*.xml" diff --git a/.github/workflows/release_linux_aarch64.yml b/.github/workflows/release_linux_aarch64.yml index 34dd12e15aa..092e2e52999 100644 --- a/.github/workflows/release_linux_aarch64.yml +++ b/.github/workflows/release_linux_aarch64.yml @@ -13,6 +13,10 @@ on: permissions: # set top-level default permissions as security best practice contents: read +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event_name == 'workflow_dispatch' }} + cancel-in-progress: true + jobs: build: if: github.event_name != 'pull_request' || startsWith( github.base_ref, 'rel-') || contains( github.event.pull_request.labels.*.name, 'run release CIs') diff --git a/.github/workflows/release_linux_x86_64.yml b/.github/workflows/release_linux_x86_64.yml index 408c56479a5..8be53986f9f 100644 --- a/.github/workflows/release_linux_x86_64.yml +++ b/.github/workflows/release_linux_x86_64.yml @@ -13,6 +13,10 @@ on: permissions: # set top-level default permissions as security best practice contents: read +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event_name == 'workflow_dispatch' }} + cancel-in-progress: true + jobs: build: if: github.event_name != 'pull_request' || startsWith( github.base_ref, 'rel-') || contains( github.event.pull_request.labels.*.name, 'run release CIs') diff --git a/.github/workflows/release_mac.yml b/.github/workflows/release_mac.yml index 64701ee8afb..4872cf8e154 100644 --- a/.github/workflows/release_mac.yml +++ b/.github/workflows/release_mac.yml @@ -17,6 +17,10 @@ env: permissions: contents: read +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event_name == 'workflow_dispatch' }} + cancel-in-progress: true + jobs: build: if: github.event_name != 'pull_request' || startsWith( github.base_ref, 'rel-') || contains( github.event.pull_request.labels.*.name, 'run release CIs') diff --git a/.github/workflows/release_win.yml b/.github/workflows/release_win.yml index 05a321de784..c609d6e3378 100644 --- a/.github/workflows/release_win.yml +++ b/.github/workflows/release_win.yml @@ -13,6 +13,10 @@ on: permissions: # set top-level default permissions as security best practice contents: read +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event_name == 'workflow_dispatch' }} + cancel-in-progress: true + jobs: build: if: github.event_name != 'pull_request' || startsWith( github.base_ref, 'rel-') || contains( github.event.pull_request.labels.*.name, 'run release CIs') diff --git a/CMakeLists.txt b/CMakeLists.txt index 10a45e0c51f..b666eec4809 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,5 +1,5 @@ # Minimum CMake required -cmake_minimum_required(VERSION 3.1) +cmake_minimum_required(VERSION 3.14) include(cmake/Utils.cmake) # Set default build type if(NOT CMAKE_BUILD_TYPE) @@ -120,13 +120,11 @@ endif() if(ONNX_BUILD_TESTS) find_package(GTest) - if(GTest_FOUND) - set(googletest_INCLUDE_DIRS) - set(googletest_STATIC_LIBRARIES GTest::gtest) - else() + if(NOT GTest_FOUND) list(APPEND CMAKE_MODULE_PATH ${ONNX_ROOT}/cmake/external) include(googletest) endif() + set(googletest_STATIC_LIBRARIES GTest::gtest) endif() if((ONNX_USE_LITE_PROTO AND TARGET protobuf::libprotobuf-lite) OR ((NOT ONNX_USE_LITE_PROTO) AND TARGET protobuf::libprotobuf)) diff --git a/cmake/external/googletest.cmake b/cmake/external/googletest.cmake index e5a89965585..9f88d3c9b79 100644 --- a/cmake/external/googletest.cmake +++ b/cmake/external/googletest.cmake @@ -2,45 +2,12 @@ # # SPDX-License-Identifier: Apache-2.0 -include (ExternalProject) - -set(googletest_INCLUDE_DIRS ${CMAKE_CURRENT_BINARY_DIR}/googletest/src/googletest/googletest/include) -set(googletest_URL https://github.com/google/googletest.git) -set(googletest_BUILD ${CMAKE_CURRENT_BINARY_DIR}/googletest/) -set(googletest_TAG e93da23920e5b6887d6a6a291c3a59f83f5b579e) -#0fe96607d85cf3a25ac40da369db62bbee2939a5 - -if(WIN32 AND CMAKE_BUILD_TYPE MATCHES Debug) - set(googletest_STATIC_LIBRARIES - ${CMAKE_CURRENT_BINARY_DIR}/googletest/src/googletest/googletest/Debug/gtestd.lib) -elseif(WIN32 AND NOT CMAKE_BUILD_TYPE MATCHES Debug) - set(googletest_STATIC_LIBRARIES - ${CMAKE_CURRENT_BINARY_DIR}/googletest/src/googletest/googletest/Release/gtest.lib) -elseif(CMAKE_BUILD_TYPE MATCHES Debug) - set(googletest_STATIC_LIBRARIES - ${CMAKE_CURRENT_BINARY_DIR}/googletest/src/googletest/googletest/libgtestd.a) -else() - set(googletest_STATIC_LIBRARIES - ${CMAKE_CURRENT_BINARY_DIR}/googletest/src/googletest/googletest/libgtest.a) -endif() - -if(ONNX_USE_MSVC_STATIC_RUNTIME) - set(ONNX_USE_MSVC_SHARED_RUNTIME OFF) -else() - set(ONNX_USE_MSVC_SHARED_RUNTIME ON) -endif() -ExternalProject_Add(googletest - PREFIX googletest - GIT_REPOSITORY ${googletest_URL} - GIT_TAG ${googletest_TAG} - DOWNLOAD_DIR "${DOWNLOAD_LOCATION}" - BUILD_IN_SOURCE 1 - BUILD_COMMAND ${CMAKE_COMMAND} --build . --config ${CMAKE_BUILD_TYPE} --target gtest - INSTALL_COMMAND "" - CMAKE_CACHE_ARGS - -DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE} - -DBUILD_GMOCK:BOOL=OFF - -DBUILD_GTEST:BOOL=ON - -Dgtest_force_shared_crt:BOOL=${ONNX_USE_MSVC_SHARED_RUNTIME} - BUILD_BYPRODUCTS ${googletest_STATIC_LIBRARIES} +include(FetchContent) +FetchContent_Declare( + googletest + # Specify the commit you depend on and update it regularly. + URL https://github.com/google/googletest/archive/5376968f6948923e2411081fd9372e71a59d8e77.zip ) +# For Windows: Prevent overriding the parent project's compiler/linker settings +set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) +FetchContent_MakeAvailable(googletest) diff --git a/cmake/unittest.cmake b/cmake/unittest.cmake index cc825a0344e..33fa7f683b6 100644 --- a/cmake/unittest.cmake +++ b/cmake/unittest.cmake @@ -24,13 +24,9 @@ function(AddTest) add_executable(${_UT_TARGET} ${_UT_SOURCES}) add_dependencies(${_UT_TARGET} onnx onnx_proto) - if(NOT GTest_FOUND) - add_dependencies(${_UT_TARGET} googletest) - endif() target_include_directories(${_UT_TARGET} - PUBLIC ${googletest_INCLUDE_DIRS} - ${ONNX_INCLUDE_DIRS} + PUBLIC ${ONNX_INCLUDE_DIRS} ${PROTOBUF_INCLUDE_DIRS} ${ONNX_ROOT} ${CMAKE_CURRENT_BINARY_DIR}) @@ -56,9 +52,9 @@ function(AddTest) # unsigned from include\google\protob # uf\wire_format_lite.h /wd4244 # 'argument': conversion from 'google:: - # protobuf::uint64' to 'int', possible + # protobuf::uint64' to 'int', possible # loss of data - /wd4267 # Conversion from 'size_t' to 'int', + /wd4267 # Conversion from 'size_t' to 'int', # possible loss of data /wd4996 # The second parameter is ignored. )