diff --git a/examples/README.md b/examples/README.md index f4a46cec5b3..98d30e87ba8 100644 --- a/examples/README.md +++ b/examples/README.md @@ -60,9 +60,17 @@ Here is the [Quantization Flow Docs](/docs/website/docs/tutorials/quantization_f You can run quantization test with the following command: ```bash -python3 -m examples.quantization.example --model_name "mv2" # for MobileNetv2 +python3 -m examples.quantization.example --model_name "mv2" --so-library "" # for MobileNetv2 ``` -It will print both the original model after capture and quantized model. + +Note that the shared library being passed into `example.py` is required to register the out variants of the quantized operators (e.g., `quantized_decomposed::add.out`)into EXIR. To build this library, run the following command if using buck2: +```bash +buck2 build //kernels/quantized:aot_lib --show-output +``` + +If on cmake, follow the instructions in `test_quantize.sh` to build it, the default path is `cmake-out/kernels/quantized/libquantized_ops_lib.so`. + +This command will print both the original model after capture and quantized model. The flow produces a quantized model that could be lowered through partitioner or the runtime directly. diff --git a/examples/custom_ops/custom_ops_2.py b/examples/custom_ops/custom_ops_2.py index af7baeacb96..fd085f3d07b 100644 --- a/examples/custom_ops/custom_ops_2.py +++ b/examples/custom_ops/custom_ops_2.py @@ -37,7 +37,24 @@ def main(): help="Provide path to so library. E.g., cmake-out/examples/custom_ops/libcustom_ops_aot_lib.so", ) args = parser.parse_args() - torch.ops.load_library(args.so_library) + # See if we have custom op my_ops::mul4.out registered + has_out_ops = True + try: + op = torch.ops.my_ops.mul4.out + except AttributeError: + print("No registered custom op my_ops::mul4.out") + has_out_ops = False + if not has_out_ops: + if args.so_library: + torch.ops.load_library(args.so_library) + else: + raise RuntimeError( + "Need to specify shared library path to register custom op my_ops::mul4.out into" + "EXIR. The required shared library is defined as `custom_ops_aot_lib` in " + "examples/custom_ops/CMakeLists.txt if you are using CMake build, or `custom_ops_aot_lib_2` in " + "examples/custom_ops/targets.bzl for buck2. One example path would be cmake-out/examples/custom_ops/" + "libcustom_ops_aot_lib.[so|dylib]." + ) print(args.so_library) main() diff --git a/examples/quantization/example.py b/examples/quantization/example.py index edbbd4441e3..4ee1dc9495b 100644 --- a/examples/quantization/example.py +++ b/examples/quantization/example.py @@ -99,8 +99,24 @@ def verify_xnnpack_quantizer_matching_fx_quant_model(model_name, model, example_ ) args = parser.parse_args() - if args.so_library: - torch.ops.load_library(args.so_library) + # See if we have quantized op out variants registered + has_out_ops = True + try: + op = torch.ops.quantized_decomposed.add.out + except AttributeError: + print("No registered quantized ops") + has_out_ops = False + if not has_out_ops: + if args.so_library: + torch.ops.load_library(args.so_library) + else: + raise RuntimeError( + "Need to specify shared library path to register quantized ops (and their out variants) into" + "EXIR. The required shared library is defined as `quantized_ops_aot_lib` in " + "kernels/quantized/CMakeLists.txt if you are using CMake build, or `aot_lib` in " + "kernels/quantized/targets.bzl for buck2. One example path would be cmake-out/kernels/quantized/" + "libquantized_ops_aot_lib.[so|dylib]." + ) if not args.verify and args.model_name not in MODEL_NAME_TO_OPTIONS: raise RuntimeError( f"Model {args.model_name} is not a valid name. or not quantizable right now, " diff --git a/examples/quantization/test_quantize.sh b/examples/quantization/test_quantize.sh index 6431a9454dc..93aef82566f 100644 --- a/examples/quantization/test_quantize.sh +++ b/examples/quantization/test_quantize.sh @@ -7,7 +7,7 @@ # Test the end-to-end quantization flow. -set -eu +set -e get_shared_lib_ext() { UNAME=$(uname)