From a1de00dce08ea37b0ed3de71e3e87722073b5949 Mon Sep 17 00:00:00 2001 From: Meghan Lele Date: Tue, 29 Sep 2020 10:07:03 -0700 Subject: [PATCH] [pytorch] Replace "blacklist" in test/test_mobile_optimizer.py Summary: This diff addresses https://github.com/pytorch/pytorch/issues/41443. It is a clone of D23205313 which could not be imported from GitHub for strange reasons. Test Plan: Continuous integration. Reviewed By: AshkanAliabadi Differential Revision: D23967322 fbshipit-source-id: 1140c9de3c58fd155e40f4e21c7cdf9d927b2ad2 --- test/test_mobile_optimizer.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/test_mobile_optimizer.py b/test/test_mobile_optimizer.py index eae6175fb024..897c76d8c814 100644 --- a/test/test_mobile_optimizer.py +++ b/test/test_mobile_optimizer.py @@ -100,8 +100,8 @@ def forward(self, x): torch.testing.assert_allclose(initial_result, optimized_result, rtol=1e-2, atol=1e-3) - optimization_blacklist_no_prepack = {MobileOptimizerType.INSERT_FOLD_PREPACK_OPS} - optimized_scripted_model_no_prepack = optimize_for_mobile(scripted_model, optimization_blacklist_no_prepack) + optimization_blocklist_no_prepack = {MobileOptimizerType.INSERT_FOLD_PREPACK_OPS} + optimized_scripted_model_no_prepack = optimize_for_mobile(scripted_model, optimization_blocklist_no_prepack) optimized_result_no_prepack = optimized_scripted_model_no_prepack(input_data) FileCheck().check_count("Tensor = aten::conv2d", 1, exactly=True) \ @@ -118,14 +118,14 @@ def forward(self, x): FileCheck().check_count("prim::CallMethod[name=\"forward\"]", 2, exactly=True) \ .run(str(get_forward(bn_scripted_module._c).graph)) - optimization_blacklist_no_prepack = {MobileOptimizerType.INSERT_FOLD_PREPACK_OPS} - bn_fold_scripted_module = optimize_for_mobile(bn_scripted_module, optimization_blacklist_no_prepack) + optimization_blocklist_no_prepack = {MobileOptimizerType.INSERT_FOLD_PREPACK_OPS} + bn_fold_scripted_module = optimize_for_mobile(bn_scripted_module, optimization_blocklist_no_prepack) self.assertEqual(len(torch.jit.export_opnames(bn_fold_scripted_module)), 1) bn_input = torch.rand(1, 1, 6, 6) torch.testing.assert_allclose(bn_scripted_module(bn_input), bn_fold_scripted_module(bn_input), rtol=1e-2, atol=1e-3) - optimization_blacklist_no_fold_bn = {MobileOptimizerType.CONV_BN_FUSION} - no_bn_fold_scripted_module = optimize_for_mobile(bn_scripted_module, optimization_blacklist_no_fold_bn) + optimization_blocklist_no_fold_bn = {MobileOptimizerType.CONV_BN_FUSION} + no_bn_fold_scripted_module = optimize_for_mobile(bn_scripted_module, optimization_blocklist_no_fold_bn) FileCheck().check_count("aten::batch_norm", 1, exactly=True) \ .run(str(get_forward_graph(no_bn_fold_scripted_module._c))) bn_input = torch.rand(1, 1, 6, 6)