From 33e82c02698ee9c57b8e7030582a5ec31c0e9049 Mon Sep 17 00:00:00 2001 From: Ailing Zhang Date: Wed, 21 Oct 2020 19:35:11 -0700 Subject: [PATCH 1/3] Update error message to include link to readme. (#46613) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/46613 Test Plan: CI Reviewed By: ezyang Differential Revision: D24430852 fbshipit-source-id: 811e4d10508d47ef830d2b8445f11592f342461f --- tools/autograd/gen_variable_type.py | 7 +++++-- tools/codegen/gen.py | 5 ++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/tools/autograd/gen_variable_type.py b/tools/autograd/gen_variable_type.py index 480435054dd5..aa65a68dff87 100644 --- a/tools/autograd/gen_variable_type.py +++ b/tools/autograd/gen_variable_type.py @@ -723,8 +723,11 @@ def gen_variable_type_shard(out, aten_declarations, template_path, suffix, heade # If you want to register a kernel to Autograd, you must make the op abstract. # In other words, this op must have dispatch section in native_functions.yaml. if declaration['name'] in MANUAL_AUTOGRAD_AND_TRACER or declaration['derivative']: - msg = (f'Did you add a formula for {declaration["name"]}(or its functional variant) in derivatives.yaml?' - f'If so please add a dispatch section for it with DefaultBackend in native_functions.yaml.') + msg = (f'There\'s a formula for {declaration["name"]}(or its functional variant) in derivatives.yaml. ' + f'It\'s required to add a dispatch section for it with explicit supported backends e.g CPU/CUDA ' + f'or DefaultBackend in native_functions.yaml. Please see ' + f'https://github.com/pytorch/pytorch/tree/master/aten/src/ATen/native#choosing-the-right-dispatch-keyword ' + f'for instructions to choose the right dispatch keyword.') assert declaration['abstract'], msg # Emit TraceType code diff --git a/tools/codegen/gen.py b/tools/codegen/gen.py index b1a690b795a6..1db3b93012f6 100644 --- a/tools/codegen/gen.py +++ b/tools/codegen/gen.py @@ -763,6 +763,9 @@ def compute_declaration_yaml(f: NativeFunction) -> object: is_factory_method = any(isinstance(a.argument, TensorOptionsArguments) for a in cpp_args) \ and Variant.method not in f.variants + # Having only Math in dispatch section is equivalent to no dispatch section. + is_abstract = f.dispatch is not None and set(f.dispatch.keys()) != set({'Math'}) # type ignore + return OrderedDict([ ('name', cpp.name(f.func)), ('operator_name', str(f.func.name.name)), @@ -796,7 +799,7 @@ def compute_declaration_yaml(f: NativeFunction) -> object: # for the entry or not (as this affects whether or not the operation is # overrideable or not.) Once this all gets cleaned up, this # property will be obsolete. - ('abstract', f.dispatch is not None), + ('abstract', is_abstract), ('device_guard', f.device_guard), ('with_gil', False), ('deprecated', False), From adbb50ea67ab87f2dd4a09698d2e229a83877e09 Mon Sep 17 00:00:00 2001 From: Rahul Nambiar Date: Wed, 21 Oct 2020 19:59:47 -0700 Subject: [PATCH 2/3] Enabling alias annotation checks for all operations during autograd tests (#46601) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/46601 * except excluded tests and magic methods. https://github.com/pytorch/pytorch/issues/38731 Previously, we'd only do run these tests for inplace operations. Since this is a lot more tests, fixed these issues that came up when running them - - Updated schema of conj() to reflect existing behaviour. - Updated deepEquals method in check_alias_annotation.cpp to re-use the overloaded == operator. Previous implementation did not cover all types of IValues. - Corrected the order inputs are passed in during autograd testing of 'view' & 'reshape'. - Subbed out atn::ger with the func its aliased to, atn::outer, for testing. The alias annotation checking code doesn't handle aliased operators properly. ghstack-source-id: 114830903 Test Plan: Ran all tests in test:jit and verified they pass. Reviewed By: eellison Differential Revision: D24424955 fbshipit-source-id: 382d7e2585911b81b1573f21fff1d54a5e9a2054 --- aten/src/ATen/native/native_functions.yaml | 2 +- .../check_backward_compatibility.py | 1 + test/test_jit.py | 2 +- torch/csrc/jit/passes/normalize_ops.cpp | 55 ++++++++++--------- torch/csrc/jit/passes/normalize_ops.h | 2 + .../passes/utils/check_alias_annotation.cpp | 23 ++++---- .../_internal/common_methods_invocations.py | 4 +- 7 files changed, 49 insertions(+), 40 deletions(-) diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml index 8b668d827120..cf0defa32454 100644 --- a/aten/src/ATen/native/native_functions.yaml +++ b/aten/src/ATen/native/native_functions.yaml @@ -314,7 +314,7 @@ use_c10_dispatcher: full variants: function -- func: conj(Tensor self) -> Tensor +- func: conj(Tensor(a) self) -> Tensor(a) use_c10_dispatcher: full variants: function, method diff --git a/test/backward_compatibility/check_backward_compatibility.py b/test/backward_compatibility/check_backward_compatibility.py index 10cd793f96c9..73f77d698bc1 100644 --- a/test/backward_compatibility/check_backward_compatibility.py +++ b/test/backward_compatibility/check_backward_compatibility.py @@ -128,6 +128,7 @@ ("aten::_foreach_addcdiv_", datetime.date(2020, 10, 15)), ("aten::_foreach_addcdiv", datetime.date(2020, 10, 15)), ("aten::_foreach_addcmul", datetime.date(2020, 10, 15)), + ("aten::conj", datetime.date(2020, 11, 10)), ] def allow_listed(schema, allow_list): diff --git a/test/test_jit.py b/test/test_jit.py index e36558207a2f..f7583432adaf 100644 --- a/test/test_jit.py +++ b/test/test_jit.py @@ -15734,7 +15734,7 @@ def fn(*inputs, **kwargs): check_types=check_types) # alias annotation testing - if is_inplace and test_name not in EXCLUDE_SCRIPT: + if not is_magic_method and test_name not in EXCLUDE_SCRIPT: check_alias_annotation(name, (self_variable,) + args_variable, kwargs_variable) check(name) diff --git a/torch/csrc/jit/passes/normalize_ops.cpp b/torch/csrc/jit/passes/normalize_ops.cpp index 75ad2a4499ce..9d9ac0203b90 100644 --- a/torch/csrc/jit/passes/normalize_ops.cpp +++ b/torch/csrc/jit/passes/normalize_ops.cpp @@ -6,30 +6,6 @@ namespace jit { namespace { -// map from op alias -> normalized op -static const std::unordered_map alias_map = { - {aten::absolute, aten::abs}, {aten::absolute_, aten::abs_}, - {aten::clip, aten::clamp}, {aten::clip_, aten::clamp_}, - {aten::linalg_det, aten::det}, {aten::ger, aten::outer}, - {aten::arccos, aten::acos}, {aten::arccos_, aten::acos_}, - {aten::arcsin, aten::asin}, {aten::arcsin_, aten::asin_}, - {aten::arctan, aten::atan}, {aten::arctan_, aten::atan_}, - {aten::arccosh, aten::acosh}, {aten::arccosh_, aten::acosh_}, - {aten::arcsinh, aten::asinh}, {aten::arcsinh_, aten::asinh_}, - {aten::arctanh, aten::atanh}, {aten::arctanh_, aten::atanh_}, - {aten::fix, aten::trunc}, {aten::fix_, aten::trunc_}, - {aten::negative, aten::neg}, {aten::negative_, aten::neg_}, - {aten::subtract, aten::sub}, {aten::subtract_, aten::sub_}, - {aten::greater_equal, aten::ge}, {aten::greater_equal_, aten::ge_}, - {aten::greater, aten::gt}, {aten::greater_, aten::gt_}, - {aten::less_equal, aten::le}, {aten::less_equal_, aten::le_}, - {aten::less, aten::lt}, {aten::less_, aten::lt_}, - {aten::not_equal, aten::ne}, {aten::not_equal_, aten::ne_}, - {aten::divide, aten::div}, {aten::divide_, aten::div_}, - {aten::multiply, aten::mul}, {aten::multiply_, aten::mul_}, - {aten::true_divide, aten::div}, {aten::true_divide_, aten::div_}, -}; - void replaceNodeWithNewSymbol(Node* node, Symbol new_symbol) { WithInsertPoint insert_guard{node}; auto graph = node->owningGraph(); @@ -53,8 +29,8 @@ void replaceNodeWithNewSymbol(Node* node, Symbol new_symbol) { // difficult to consumer for downstream user of the IR, such as our own // optimization passes here, we convert op aliases into a standard form bool normalizeOpAliases(graph_node_list_iterator& iter) { - auto alias = alias_map.find(iter->kind()); - if (alias != alias_map.end()) { + auto alias = getOperatorAliasMap().find(iter->kind()); + if (alias != getOperatorAliasMap().end()) { replaceNodeWithNewSymbol(*iter, alias->second); iter.destroyCurrent(); return true; @@ -79,6 +55,33 @@ void NormalizeOps(Block* block) { } // namespace +const std::unordered_map& getOperatorAliasMap() { + // map from op alias -> normalized op + static const std::unordered_map alias_map = { + {aten::absolute, aten::abs}, {aten::absolute_, aten::abs_}, + {aten::clip, aten::clamp}, {aten::clip_, aten::clamp_}, + {aten::linalg_det, aten::det}, {aten::ger, aten::outer}, + {aten::arccos, aten::acos}, {aten::arccos_, aten::acos_}, + {aten::arcsin, aten::asin}, {aten::arcsin_, aten::asin_}, + {aten::arctan, aten::atan}, {aten::arctan_, aten::atan_}, + {aten::arccosh, aten::acosh}, {aten::arccosh_, aten::acosh_}, + {aten::arcsinh, aten::asinh}, {aten::arcsinh_, aten::asinh_}, + {aten::arctanh, aten::atanh}, {aten::arctanh_, aten::atanh_}, + {aten::fix, aten::trunc}, {aten::fix_, aten::trunc_}, + {aten::negative, aten::neg}, {aten::negative_, aten::neg_}, + {aten::subtract, aten::sub}, {aten::subtract_, aten::sub_}, + {aten::greater_equal, aten::ge}, {aten::greater_equal_, aten::ge_}, + {aten::greater, aten::gt}, {aten::greater_, aten::gt_}, + {aten::less_equal, aten::le}, {aten::less_equal_, aten::le_}, + {aten::less, aten::lt}, {aten::less_, aten::lt_}, + {aten::not_equal, aten::ne}, {aten::not_equal_, aten::ne_}, + {aten::divide, aten::div}, {aten::divide_, aten::div_}, + {aten::multiply, aten::mul}, {aten::multiply_, aten::mul_}, + {aten::true_divide, aten::div}, {aten::true_divide_, aten::div_}, + }; + return alias_map; +} + void NormalizeOps(const std::shared_ptr& graph) { NormalizeOps(graph->block()); } diff --git a/torch/csrc/jit/passes/normalize_ops.h b/torch/csrc/jit/passes/normalize_ops.h index 03963cd26a8b..4d630392ca47 100644 --- a/torch/csrc/jit/passes/normalize_ops.h +++ b/torch/csrc/jit/passes/normalize_ops.h @@ -12,5 +12,7 @@ namespace jit { // Currently only handles normalization of op aliases. TORCH_API void NormalizeOps(const std::shared_ptr& graph); +const std::unordered_map& getOperatorAliasMap(); + } // namespace jit } // namespace torch diff --git a/torch/csrc/jit/passes/utils/check_alias_annotation.cpp b/torch/csrc/jit/passes/utils/check_alias_annotation.cpp index 20e4c6874e68..40aa7db7e5e0 100644 --- a/torch/csrc/jit/passes/utils/check_alias_annotation.cpp +++ b/torch/csrc/jit/passes/utils/check_alias_annotation.cpp @@ -1,5 +1,6 @@ #include #include +#include #include namespace torch { @@ -61,19 +62,11 @@ Stack deepCopy(const Stack& stack) { } bool deepEquals(const IValue& lhs, const IValue& rhs) { - if (lhs.isInt() && rhs.isInt()) { - return lhs.toInt() == rhs.toInt(); - } else if (lhs.isDouble() && rhs.isDouble()) { - return lhs.toDouble() == rhs.toDouble(); - } else if (lhs.isNone() && rhs.isNone()) { - return true; - } else if (lhs.isIntList() && rhs.isIntList()) { - return lhs.toIntVector() == rhs.toIntVector(); - } else if (lhs.isTensor() && rhs.isTensor()) { + if (lhs.isTensor() && rhs.isTensor()) { return lhs.toTensor().equal(rhs.toTensor()); } - throw std::runtime_error("Deep equals not implemented for type"); + return lhs == rhs; } struct AliasAndIValue { @@ -146,6 +139,16 @@ const Node* findNodeForOp( return node; } } + + // Check for alias-ed operator names + const auto aliasOp = torch::jit::getOperatorAliasMap().find(opName); + AT_ASSERT(aliasOp != torch::jit::getOperatorAliasMap().end()); + for (const auto node : g.nodes()) { + if (node->kind() == aliasOp->second) { + return node; + } + } + AT_ASSERT(false); } diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py index 1c7b43d0be90..4152d0aa0ce3 100644 --- a/torch/testing/_internal/common_methods_invocations.py +++ b/torch/testing/_internal/common_methods_invocations.py @@ -586,13 +586,13 @@ def method_tests(): ('transpose', (S, S, S), (2, 0), '3d', (False,)), ('t', (1, 2), NO_ARGS, '', (False,)), ('view', (S, S, S), (S * S, S), '', (False,)), - ('view', (S, S, S), (torch.Size([S * S, S]),), 'size', (False,)), + ('view', (torch.Size([S * S, S]),), (S, S, S), 'size', (False,)), ('view', (S,), (S,), '1d', (False,)), ('view', (), (dont_convert(()),), 'scalar_to_scalar', (False,)), ('view', (), (1,), 'scalar_to_1d', (False,)), ('ravel', (S, S, S), NO_ARGS, '', (False,)), ('reshape', (S, S, S), (S * S, S), '', (False,)), - ('reshape', (S, S, S), (torch.Size([S * S, S]),), 'size', (False,)), + ('reshape', (torch.Size([S * S, S]),), (S, S, S), 'size', (False,)), ('reshape', (S,), (S,), '1d', (False,)), ('reshape', (), (dont_convert(()),), 'scalar_to_scalar', (False,)), ('reshape', (), (1,), 'scalar_to_1d', (False,)), From db83ddcb8624a9eaae28f559e1b24d3621713c79 Mon Sep 17 00:00:00 2001 From: Brian Hirsh Date: Wed, 21 Oct 2020 20:15:25 -0700 Subject: [PATCH 3/3] small doc fix (#46599) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/46599 Test Plan: Imported from OSS Reviewed By: mrshenli Differential Revision: D24426181 Pulled By: bdhirsh fbshipit-source-id: d0900d5c43574c80f1bf614824eafd21ba6a9caf --- torch/distributed/distributed_c10d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch/distributed/distributed_c10d.py b/torch/distributed/distributed_c10d.py index 417116b621d5..4e68e402428f 100644 --- a/torch/distributed/distributed_c10d.py +++ b/torch/distributed/distributed_c10d.py @@ -44,7 +44,7 @@ except ImportError: _GLOO_AVAILABLE = False -# Some reduce ops are not supported by complex numbers. +# Some reduce ops are not supported by complex numbers and will result in an error. # We currently provide complex support to the distributed API by viewing # complex tensors as real (torch.view_as_real), meaning that calling # these unsupported ops will return garbage values rather than error out.