diff --git a/.jenkins/pytorch/macos-test.sh b/.jenkins/pytorch/macos-test.sh index 64bdf42a01092..a883f0d107a12 100755 --- a/.jenkins/pytorch/macos-test.sh +++ b/.jenkins/pytorch/macos-test.sh @@ -63,7 +63,7 @@ test_python_all() { # Increase default limit on open file handles from 256 to 1024 ulimit -n 1024 - python test/run_test.py --verbose --exclude test_jit_profiling test_jit_legacy test_jit_fuser_legacy test_jit_fuser_profiling test_jit_fuser_te test_tensorexpr --determine-from="$DETERMINE_FROM" + python test/run_test.py --verbose --exclude test_jit_simple test_jit_legacy test_jit_fuser_legacy --determine-from="$DETERMINE_FROM" assert_git_not_dirty } diff --git a/.jenkins/pytorch/test.sh b/.jenkins/pytorch/test.sh index 48cc3611dacdb..d4f3c5b9dd76e 100755 --- a/.jenkins/pytorch/test.sh +++ b/.jenkins/pytorch/test.sh @@ -143,8 +143,8 @@ test_python_nn() { assert_git_not_dirty } -test_python_ge_config_profiling() { - time python test/run_test.py --include test_jit_profiling test_jit_fuser_profiling test_jit_fuser_te --verbose --determine-from="$DETERMINE_FROM" +test_python_ge_config_simple() { + time python test/run_test.py --include test_jit_simple --verbose --determine-from="$DETERMINE_FROM" assert_git_not_dirty } @@ -154,7 +154,7 @@ test_python_ge_config_legacy() { } test_python_all_except_nn() { - time python test/run_test.py --exclude test_nn test_jit_profiling test_jit_legacy test_jit_fuser_legacy test_jit_fuser_profiling test_jit_fuser_te test_tensorexpr --verbose --determine-from="$DETERMINE_FROM" + time python test/run_test.py --exclude test_nn test_jit_simple test_jit_legacy test_jit_fuser_legacy --verbose --determine-from="$DETERMINE_FROM" assert_git_not_dirty } diff --git a/.jenkins/pytorch/win-test-helpers/test_python_all_except_nn.bat b/.jenkins/pytorch/win-test-helpers/test_python_all_except_nn.bat index b0be5f4883b1c..042d116ff570c 100644 --- a/.jenkins/pytorch/win-test-helpers/test_python_all_except_nn.bat +++ b/.jenkins/pytorch/win-test-helpers/test_python_all_except_nn.bat @@ -1,3 +1,3 @@ call %SCRIPT_HELPERS_DIR%\setup_pytorch_env.bat -cd test && python run_test.py --exclude test_jit_profiling test_jit_legacy test_jit_fuser_legacy test_jit_fuser_profiling test_jit_fuser_te test_tensorexpr --verbose --determine-from="%1" && cd .. +cd test && python run_test.py --exclude test_jit_legacy test_jit_fuser_legacy --verbose --determine-from="%1" && cd .. if ERRORLEVEL 1 exit /b 1 diff --git a/test/run_test.py b/test/run_test.py index 2c32bc52af113..00d1a0c5989b3 100755 --- a/test/run_test.py +++ b/test/run_test.py @@ -57,10 +57,9 @@ 'test_type_hints', 'test_utils', 'test_namedtuple_return_api', - 'test_jit_profiling', + 'test_jit_simple', 'test_jit_legacy', 'test_jit_fuser_legacy', - 'test_jit_fuser_profiling', 'test_tensorboard', 'test_namedtensor', 'test_type_promotion', diff --git a/test/test_jit_fuser_profiling.py b/test/test_jit_fuser_profiling.py deleted file mode 100644 index a25839b4eb0d0..0000000000000 --- a/test/test_jit_fuser_profiling.py +++ /dev/null @@ -1,6 +0,0 @@ -import sys -sys.argv.append("--ge_config=profiling") -from test_jit_fuser import * - -if __name__ == '__main__': - run_tests() diff --git a/test/test_jit_profiling.py b/test/test_jit_profiling.py deleted file mode 100644 index be02985e69a80..0000000000000 --- a/test/test_jit_profiling.py +++ /dev/null @@ -1,10 +0,0 @@ -import sys -sys.argv.append("--ge_config=profiling") -from test_jit import * - -if __name__ == '__main__': - run_tests() - if not PY2: - import test_jit_py3 - suite = unittest.findTestCases(test_jit_py3) - unittest.TextTestRunner().run(suite) diff --git a/torch/csrc/jit/passes/tensorexpr_fuser.cpp b/torch/csrc/jit/passes/tensorexpr_fuser.cpp index 84d2d76076622..ad6636b1c42f7 100644 --- a/torch/csrc/jit/passes/tensorexpr_fuser.cpp +++ b/torch/csrc/jit/passes/tensorexpr_fuser.cpp @@ -13,7 +13,7 @@ namespace torch { namespace jit { -static bool texpr_fuser_enabled_ = false; +static bool texpr_fuser_enabled_ = true; void setTensorExprFuserEnabled(bool val) { texpr_fuser_enabled_ = val; } @@ -266,9 +266,9 @@ std::pair scanNode( } void fuseTensorExprs(std::shared_ptr& graph) { - //if (!tensorExprFuserEnabled()) { - // return; - //} + if (!tensorExprFuserEnabled()) { + return; + } GRAPH_DUMP("Before TExprFuser: ", graph); // Get rid of dead code so that we don't waste effort fusing it. diff --git a/torch/csrc/jit/runtime/graph_executor.cpp b/torch/csrc/jit/runtime/graph_executor.cpp index 737b9c97a6a37..0bc41c2bb2744 100644 --- a/torch/csrc/jit/runtime/graph_executor.cpp +++ b/torch/csrc/jit/runtime/graph_executor.cpp @@ -779,8 +779,7 @@ void runNondiffOptimization( // Fuse the dequant - op - quant patterns into quantized ops QuantFusion(graph); - //FuseGraph(graph, strict_fuser_check); - // strict_fuser_check is synomous with ProfilingExecutor on + // strict_fuser_check is synonymous with ProfilingExecutor on // if `strict_fuser_check` is set to `true`, run TE by default // otherwise fallback to the legacy executor and legacy fuser if (strict_fuser_check) { diff --git a/torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp b/torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp index 45cdbd686bc07..a7c20284d8e49 100644 --- a/torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp +++ b/torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp @@ -39,7 +39,7 @@ static std::atomic executor_mode{true}; static std::atomic profiling_mode{false}; #else static std::atomic executor_mode{true}; -static std::atomic profiling_mode{false}; +static std::atomic profiling_mode{true}; #endif static std::atomic num_profiled_runs{1}; diff --git a/torch/csrc/jit/tensorexpr/kernel.cpp b/torch/csrc/jit/tensorexpr/kernel.cpp index af9898c0718b5..826799a196e17 100644 --- a/torch/csrc/jit/tensorexpr/kernel.cpp +++ b/torch/csrc/jit/tensorexpr/kernel.cpp @@ -397,17 +397,14 @@ Tensor* TensorExprKernel::computeFourOperand( Tensor* TensorExprKernel::computeValue(const torch::jit::Value* v) { switch (v->node()->kind()) { case aten::add: { - if (v->node()->inputs().size () > 2){ - return computeTwoOperandWithAlpha( - "aten_add", v, [](const ExprHandle& lhs, const ExprHandle& rhs) { - return lhs + rhs; - }); - }else{ - return computeTwoOperand( - "aten_add", v, [](const ExprHandle& lhs, const ExprHandle& rhs) { - return lhs + rhs; - }); - } + auto add_lambda = [](const ExprHandle& lhs, const ExprHandle& rhs) { + return lhs + rhs; + }; + TORCH_INTERNAL_ASSERT( + v->node()->inputs().size() == 2 || v->node()->inputs().size() == 3); + return (v->node()->inputs().size() > 2) + ? computeTwoOperandWithAlpha("aten_add", v, add_lambda) + : computeTwoOperand("aten_add", v, add_lambda); } break; case aten::_cast_Float: { diff --git a/torch/testing/_internal/common_utils.py b/torch/testing/_internal/common_utils.py index 4ac7778a23c3d..db45db0590610 100644 --- a/torch/testing/_internal/common_utils.py +++ b/torch/testing/_internal/common_utils.py @@ -105,10 +105,10 @@ def prof_meth_call(*args, **kwargs): args, remaining = parser.parse_known_args() if args.ge_config == 'legacy': GRAPH_EXECUTOR = ProfilingMode.LEGACY -elif args.ge_config == 'profiling': - GRAPH_EXECUTOR = ProfilingMode.PROFILING -else: +elif args.ge_config == 'simple': GRAPH_EXECUTOR = ProfilingMode.SIMPLE +else: + GRAPH_EXECUTOR = ProfilingMode.PROFILING TEST_BAILOUTS = args.test_bailouts TEST_IN_SUBPROCESS = args.subprocess