Skip to content

Commit

Permalink
Update on "[py][vulkan][reland] Add is_vulkan to py api, add vulkan t…
Browse files Browse the repository at this point in the history
…o device type parsing"


Summary:

Test Plan: Imported from OSS

Pulled By: IvanKobzarev

Differential Revision: [D24448984](https://our.internmc.facebook.com/intern/diff/D24448984)

Reland of the PR: #46511

The initial PR broke tests as they assert the error message that  was changed in PR

torch/testing/_internal/distributed/nn/api/remote_module_test.py

In this PR it is changed accordingly

[ghstack-poisoned]
  • Loading branch information
IvanKobzarev committed Oct 22, 2020
2 parents 98ba6b5 + db83ddc commit 01f367f
Show file tree
Hide file tree
Showing 10 changed files with 59 additions and 44 deletions.
2 changes: 1 addition & 1 deletion aten/src/ATen/native/native_functions.yaml
Expand Up @@ -314,7 +314,7 @@
use_c10_dispatcher: full
variants: function

- func: conj(Tensor self) -> Tensor
- func: conj(Tensor(a) self) -> Tensor(a)
use_c10_dispatcher: full
variants: function, method

Expand Down
Expand Up @@ -128,6 +128,7 @@
("aten::_foreach_addcdiv_", datetime.date(2020, 10, 15)),
("aten::_foreach_addcdiv", datetime.date(2020, 10, 15)),
("aten::_foreach_addcmul", datetime.date(2020, 10, 15)),
("aten::conj", datetime.date(2020, 11, 10)),
]

def allow_listed(schema, allow_list):
Expand Down
2 changes: 1 addition & 1 deletion test/test_jit.py
Expand Up @@ -15734,7 +15734,7 @@ def fn(*inputs, **kwargs):
check_types=check_types)

# alias annotation testing
if is_inplace and test_name not in EXCLUDE_SCRIPT:
if not is_magic_method and test_name not in EXCLUDE_SCRIPT:
check_alias_annotation(name, (self_variable,) + args_variable, kwargs_variable)

check(name)
Expand Down
7 changes: 5 additions & 2 deletions tools/autograd/gen_variable_type.py
Expand Up @@ -723,8 +723,11 @@ def gen_variable_type_shard(out, aten_declarations, template_path, suffix, heade
# If you want to register a kernel to Autograd, you must make the op abstract.
# In other words, this op must have dispatch section in native_functions.yaml.
if declaration['name'] in MANUAL_AUTOGRAD_AND_TRACER or declaration['derivative']:
msg = (f'Did you add a formula for {declaration["name"]}(or its functional variant) in derivatives.yaml?'
f'If so please add a dispatch section for it with DefaultBackend in native_functions.yaml.')
msg = (f'There\'s a formula for {declaration["name"]}(or its functional variant) in derivatives.yaml. '
f'It\'s required to add a dispatch section for it with explicit supported backends e.g CPU/CUDA '
f'or DefaultBackend in native_functions.yaml. Please see '
f'https://github.com/pytorch/pytorch/tree/master/aten/src/ATen/native#choosing-the-right-dispatch-keyword '
f'for instructions to choose the right dispatch keyword.')
assert declaration['abstract'], msg

# Emit TraceType code
Expand Down
5 changes: 4 additions & 1 deletion tools/codegen/gen.py
Expand Up @@ -763,6 +763,9 @@ def compute_declaration_yaml(f: NativeFunction) -> object:
is_factory_method = any(isinstance(a.argument, TensorOptionsArguments) for a in cpp_args) \
and Variant.method not in f.variants

# Having only Math in dispatch section is equivalent to no dispatch section.
is_abstract = f.dispatch is not None and set(f.dispatch.keys()) != set({'Math'}) # type ignore

return OrderedDict([
('name', cpp.name(f.func)),
('operator_name', str(f.func.name.name)),
Expand Down Expand Up @@ -796,7 +799,7 @@ def compute_declaration_yaml(f: NativeFunction) -> object:
# for the entry or not (as this affects whether or not the operation is
# overrideable or not.) Once this all gets cleaned up, this
# property will be obsolete.
('abstract', f.dispatch is not None),
('abstract', is_abstract),
('device_guard', f.device_guard),
('with_gil', False),
('deprecated', False),
Expand Down
55 changes: 29 additions & 26 deletions torch/csrc/jit/passes/normalize_ops.cpp
Expand Up @@ -6,30 +6,6 @@ namespace jit {

namespace {

// map from op alias -> normalized op
static const std::unordered_map<Symbol, Symbol> alias_map = {
{aten::absolute, aten::abs}, {aten::absolute_, aten::abs_},
{aten::clip, aten::clamp}, {aten::clip_, aten::clamp_},
{aten::linalg_det, aten::det}, {aten::ger, aten::outer},
{aten::arccos, aten::acos}, {aten::arccos_, aten::acos_},
{aten::arcsin, aten::asin}, {aten::arcsin_, aten::asin_},
{aten::arctan, aten::atan}, {aten::arctan_, aten::atan_},
{aten::arccosh, aten::acosh}, {aten::arccosh_, aten::acosh_},
{aten::arcsinh, aten::asinh}, {aten::arcsinh_, aten::asinh_},
{aten::arctanh, aten::atanh}, {aten::arctanh_, aten::atanh_},
{aten::fix, aten::trunc}, {aten::fix_, aten::trunc_},
{aten::negative, aten::neg}, {aten::negative_, aten::neg_},
{aten::subtract, aten::sub}, {aten::subtract_, aten::sub_},
{aten::greater_equal, aten::ge}, {aten::greater_equal_, aten::ge_},
{aten::greater, aten::gt}, {aten::greater_, aten::gt_},
{aten::less_equal, aten::le}, {aten::less_equal_, aten::le_},
{aten::less, aten::lt}, {aten::less_, aten::lt_},
{aten::not_equal, aten::ne}, {aten::not_equal_, aten::ne_},
{aten::divide, aten::div}, {aten::divide_, aten::div_},
{aten::multiply, aten::mul}, {aten::multiply_, aten::mul_},
{aten::true_divide, aten::div}, {aten::true_divide_, aten::div_},
};

void replaceNodeWithNewSymbol(Node* node, Symbol new_symbol) {
WithInsertPoint insert_guard{node};
auto graph = node->owningGraph();
Expand All @@ -53,8 +29,8 @@ void replaceNodeWithNewSymbol(Node* node, Symbol new_symbol) {
// difficult to consumer for downstream user of the IR, such as our own
// optimization passes here, we convert op aliases into a standard form
bool normalizeOpAliases(graph_node_list_iterator& iter) {
auto alias = alias_map.find(iter->kind());
if (alias != alias_map.end()) {
auto alias = getOperatorAliasMap().find(iter->kind());
if (alias != getOperatorAliasMap().end()) {
replaceNodeWithNewSymbol(*iter, alias->second);
iter.destroyCurrent();
return true;
Expand All @@ -79,6 +55,33 @@ void NormalizeOps(Block* block) {

} // namespace

const std::unordered_map<Symbol, Symbol>& getOperatorAliasMap() {
// map from op alias -> normalized op
static const std::unordered_map<Symbol, Symbol> alias_map = {
{aten::absolute, aten::abs}, {aten::absolute_, aten::abs_},
{aten::clip, aten::clamp}, {aten::clip_, aten::clamp_},
{aten::linalg_det, aten::det}, {aten::ger, aten::outer},
{aten::arccos, aten::acos}, {aten::arccos_, aten::acos_},
{aten::arcsin, aten::asin}, {aten::arcsin_, aten::asin_},
{aten::arctan, aten::atan}, {aten::arctan_, aten::atan_},
{aten::arccosh, aten::acosh}, {aten::arccosh_, aten::acosh_},
{aten::arcsinh, aten::asinh}, {aten::arcsinh_, aten::asinh_},
{aten::arctanh, aten::atanh}, {aten::arctanh_, aten::atanh_},
{aten::fix, aten::trunc}, {aten::fix_, aten::trunc_},
{aten::negative, aten::neg}, {aten::negative_, aten::neg_},
{aten::subtract, aten::sub}, {aten::subtract_, aten::sub_},
{aten::greater_equal, aten::ge}, {aten::greater_equal_, aten::ge_},
{aten::greater, aten::gt}, {aten::greater_, aten::gt_},
{aten::less_equal, aten::le}, {aten::less_equal_, aten::le_},
{aten::less, aten::lt}, {aten::less_, aten::lt_},
{aten::not_equal, aten::ne}, {aten::not_equal_, aten::ne_},
{aten::divide, aten::div}, {aten::divide_, aten::div_},
{aten::multiply, aten::mul}, {aten::multiply_, aten::mul_},
{aten::true_divide, aten::div}, {aten::true_divide_, aten::div_},
};
return alias_map;
}

void NormalizeOps(const std::shared_ptr<Graph>& graph) {
NormalizeOps(graph->block());
}
Expand Down
2 changes: 2 additions & 0 deletions torch/csrc/jit/passes/normalize_ops.h
Expand Up @@ -12,5 +12,7 @@ namespace jit {
// Currently only handles normalization of op aliases.
TORCH_API void NormalizeOps(const std::shared_ptr<Graph>& graph);

const std::unordered_map<Symbol, Symbol>& getOperatorAliasMap();

} // namespace jit
} // namespace torch
23 changes: 13 additions & 10 deletions torch/csrc/jit/passes/utils/check_alias_annotation.cpp
@@ -1,5 +1,6 @@
#include <torch/csrc/jit/passes/utils/check_alias_annotation.h>
#include <torch/csrc/jit/passes/constant_propagation.h>
#include <torch/csrc/jit/passes/normalize_ops.h>
#include <torch/csrc/jit/runtime/operator.h>

namespace torch {
Expand Down Expand Up @@ -61,19 +62,11 @@ Stack deepCopy(const Stack& stack) {
}

bool deepEquals(const IValue& lhs, const IValue& rhs) {
if (lhs.isInt() && rhs.isInt()) {
return lhs.toInt() == rhs.toInt();
} else if (lhs.isDouble() && rhs.isDouble()) {
return lhs.toDouble() == rhs.toDouble();
} else if (lhs.isNone() && rhs.isNone()) {
return true;
} else if (lhs.isIntList() && rhs.isIntList()) {
return lhs.toIntVector() == rhs.toIntVector();
} else if (lhs.isTensor() && rhs.isTensor()) {
if (lhs.isTensor() && rhs.isTensor()) {
return lhs.toTensor().equal(rhs.toTensor());
}

throw std::runtime_error("Deep equals not implemented for type");
return lhs == rhs;
}

struct AliasAndIValue {
Expand Down Expand Up @@ -146,6 +139,16 @@ const Node* findNodeForOp(
return node;
}
}

// Check for alias-ed operator names
const auto aliasOp = torch::jit::getOperatorAliasMap().find(opName);
AT_ASSERT(aliasOp != torch::jit::getOperatorAliasMap().end());
for (const auto node : g.nodes()) {
if (node->kind() == aliasOp->second) {
return node;
}
}

AT_ASSERT(false);
}

Expand Down
2 changes: 1 addition & 1 deletion torch/distributed/distributed_c10d.py
Expand Up @@ -44,7 +44,7 @@
except ImportError:
_GLOO_AVAILABLE = False

# Some reduce ops are not supported by complex numbers.
# Some reduce ops are not supported by complex numbers and will result in an error.
# We currently provide complex support to the distributed API by viewing
# complex tensors as real (torch.view_as_real), meaning that calling
# these unsupported ops will return garbage values rather than error out.
Expand Down
4 changes: 2 additions & 2 deletions torch/testing/_internal/common_methods_invocations.py
Expand Up @@ -586,13 +586,13 @@ def method_tests():
('transpose', (S, S, S), (2, 0), '3d', (False,)),
('t', (1, 2), NO_ARGS, '', (False,)),
('view', (S, S, S), (S * S, S), '', (False,)),
('view', (S, S, S), (torch.Size([S * S, S]),), 'size', (False,)),
('view', (torch.Size([S * S, S]),), (S, S, S), 'size', (False,)),
('view', (S,), (S,), '1d', (False,)),
('view', (), (dont_convert(()),), 'scalar_to_scalar', (False,)),
('view', (), (1,), 'scalar_to_1d', (False,)),
('ravel', (S, S, S), NO_ARGS, '', (False,)),
('reshape', (S, S, S), (S * S, S), '', (False,)),
('reshape', (S, S, S), (torch.Size([S * S, S]),), 'size', (False,)),
('reshape', (torch.Size([S * S, S]),), (S, S, S), 'size', (False,)),
('reshape', (S,), (S,), '1d', (False,)),
('reshape', (), (dont_convert(()),), 'scalar_to_scalar', (False,)),
('reshape', (), (1,), 'scalar_to_1d', (False,)),
Expand Down

0 comments on commit 01f367f

Please sign in to comment.