From fa59cb9ff71081e863f13d6bc523061b1f5b1ec8 Mon Sep 17 00:00:00 2001 From: Mengchi Zhang Date: Wed, 16 Mar 2022 16:55:54 -0700 Subject: [PATCH] Replacing fp16 and int8 mode with enum type (#74338) Summary: X-link: https://github.com/pytorch/pytorch/pull/74338 X-link: https://github.com/pytorch/fx2trt/pull/24 Pull Request resolved: https://github.com/pytorch/benchmark/pull/805 Reviewed By: jasonjk-park Differential Revision: D34929680 fbshipit-source-id: c7657a002aeb5ffc51ce7765a4c2778cb357ff32 --- torchbenchmark/util/backends/fx2trt.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/torchbenchmark/util/backends/fx2trt.py b/torchbenchmark/util/backends/fx2trt.py index b8165c9ea4..9ea4c09e07 100644 --- a/torchbenchmark/util/backends/fx2trt.py +++ b/torchbenchmark/util/backends/fx2trt.py @@ -62,11 +62,12 @@ def lower_to_trt( """ from fx2trt_oss.fx import LowerSetting from fx2trt_oss.fx.lower import Lowerer + from fx2trt_oss.fx.utils import LowerPrecision lower_setting = LowerSetting( max_batch_size=max_batch_size, max_workspace_size=max_workspace_size, explicit_batch_dimension=explicit_batch_dimension, - fp16_mode=fp16_mode, + lower_precision=LowerPrecision.FP16, enable_fuse=enable_fuse, verbose_log=verbose_log, timing_cache_prefix=timing_cache_prefix,