diff --git a/CMakeLists.txt b/CMakeLists.txt index a9b583471ad..801e6baec8b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -280,6 +280,7 @@ endif() # Generate lib to register quantized ops if(REGISTER_QUANTIZED_OPS) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/kernels/quantized) + list(APPEND _libs quantized_ops_lib) endif() # ios can only build library but not binary diff --git a/examples/executor_runner/targets.bzl b/examples/executor_runner/targets.bzl index 5b6280adc31..5e313e2583c 100644 --- a/examples/executor_runner/targets.bzl +++ b/examples/executor_runner/targets.bzl @@ -27,13 +27,14 @@ def define_common_targets(): ) register_custom_op = native.read_config("executorch", "register_custom_op", "0") - + register_quantized_ops = native.read_config("executorch", "register_quantized_ops", "0") + custom_ops_lib = [] if register_custom_op == "1": - custom_ops_lib = ["//executorch/examples/custom_ops:lib_1"] + custom_ops_lib.append("//executorch/examples/custom_ops:lib_1") elif register_custom_op == "2": - custom_ops_lib = ["//executorch/examples/custom_ops:lib_2"] - else: - custom_ops_lib = [] + custom_ops_lib.append("//executorch/examples/custom_ops:lib_2") + if register_quantized_ops == "1": + custom_ops_lib.append("//executorch/kernels/quantized:generated_lib") # Test driver for models, uses all portable kernels and a demo backend. This # is intended to have minimal dependencies. If you want a runner that links diff --git a/examples/quantization/test_quantize.sh b/examples/quantization/test_quantize.sh index 17d572f7372..6431a9454dc 100644 --- a/examples/quantization/test_quantize.sh +++ b/examples/quantization/test_quantize.sh @@ -7,7 +7,7 @@ # Test the end-to-end quantization flow. -set -e +set -eu get_shared_lib_ext() { UNAME=$(uname) @@ -30,6 +30,14 @@ test_buck2_quantization() { echo "Run example.py" ${PYTHON_EXECUTABLE} -m "examples.quantization.example" --so_library="$SO_LIB" --model_name="$1" + + echo 'Running executor_runner' + buck2 run //examples/executor_runner:executor_runner \ + --config=executorch.register_quantized_ops=1 -- --model_path="./$1.pte" + # should give correct result + + echo "Removing $1.pte" + rm "./$1.pte" } test_cmake_quantization() { @@ -52,6 +60,13 @@ test_cmake_quantization() { echo "Run example.py, shared library $SO_LIB" ${PYTHON_EXECUTABLE} -m "examples.quantization.example" --so_library="$SO_LIB" --model_name="$1" + + echo 'Running executor_runner' + cmake-out/executor_runner --model_path="./$1.pte" + # should give correct result + + echo "Removing $1.pte" + rm "./$1.pte" } if [[ -z $PYTHON_EXECUTABLE ]]; diff --git a/kernels/quantized/CMakeLists.txt b/kernels/quantized/CMakeLists.txt index aa037129d5e..acea70c899d 100644 --- a/kernels/quantized/CMakeLists.txt +++ b/kernels/quantized/CMakeLists.txt @@ -52,5 +52,5 @@ gen_custom_ops_aot_lib("quantized_ops_aot_lib" "${_quantized_sources}") # Build a library for _quantized_kernels_srcs # -# quantized_kernels: Pure-C++ kernel library for quantized ops -gen_operators_lib("quantized_kernels" "${_quantized_kernels__srcs}") +# quantized_ops_lib: Register quantized ops kernels into Executorch runtime +gen_operators_lib("quantized_ops_lib" "${_quantized_kernels__srcs}")