From aa3fd3afe232377f290b2b2c0cf70cf42db09186 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Tue, 8 Feb 2022 15:53:08 -0800 Subject: [PATCH] Move fx2trt out of PyTorch core (#72499) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/72499 Pull Request resolved: https://github.com/pytorch/benchmark/pull/740 To fx2trt out of tree to remove bloatness of PyTorch core. It's the first and major step. Next, we will move acc_tracer out of the tree and rearrange some fx passes. Reviewed By: suo Differential Revision: D34065866 fbshipit-source-id: a8df58e82155a5c741717cb48b0d56c325896c5e --- torchbenchmark/util/fx2trt.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/torchbenchmark/util/fx2trt.py b/torchbenchmark/util/fx2trt.py index b1e624c511..1eb44c5e77 100644 --- a/torchbenchmark/util/fx2trt.py +++ b/torchbenchmark/util/fx2trt.py @@ -1,6 +1,6 @@ import torch -from torch.fx.experimental.fx2trt import LowerSetting -from torch.fx.experimental.fx2trt.lower import Lowerer +from fx2trt_oss.fx import LowerSetting +from fx2trt_oss.fx.lower import Lowerer """ The purpose of this example is to demostrate the onverall flow of lowering a PyTorch model @@ -31,7 +31,7 @@ def lower_to_trt( explicit_batch_dimension: Use explicit batch dimension in TensorRT if set True, otherwise use implicit batch dimension. fp16_mode: fp16 config given to TRTModule. enable_fuse: Enable pass fusion during lowering if set to true. l=Lowering will try to find pattern defined - in torch.fx.experimental.fx2trt.passes from original module, and replace with optimized pass before apply lowering. + in fx2trt_oss.fx.passes from original module, and replace with optimized pass before apply lowering. verbose_log: Enable verbose log for TensorRT if set True. timing_cache_prefix: Timing cache file name for timing cache used by fx2trt. save_timing_cache: Update timing cache with current timing cache data if set to True.