From 42ee8e67d26cf920ca7dd0bed2b0b5835e1f355c Mon Sep 17 00:00:00 2001 From: Ivan Kobzarev Date: Wed, 21 Oct 2020 08:43:22 -0700 Subject: [PATCH 1/2] [py][vulkan][reland] Add is_vulkan to py api, add vulkan to device type parsing Summary: Test Plan: Imported from OSS Pulled By: IvanKobzarev [ghstack-poisoned] --- c10/core/Device.cpp | 5 +++-- tools/pyi/gen_pyi.py | 1 + torch/csrc/autograd/python_variable.cpp | 12 ++++++++++++ torch/csrc/jit/frontend/sugared_value.cpp | 1 + torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp | 8 ++++++++ torch/overrides.py | 1 + .../distributed/nn/api/remote_module_test.py | 2 +- 7 files changed, 27 insertions(+), 3 deletions(-) diff --git a/c10/core/Device.cpp b/c10/core/Device.cpp index 60c40b516f45..dbe38e17f39d 100644 --- a/c10/core/Device.cpp +++ b/c10/core/Device.cpp @@ -30,7 +30,7 @@ namespace c10 { namespace { DeviceType parse_type(const std::string& device_string) { - static const std::array, 10> types = {{ + static const std::array, 11> types = {{ {"cpu", DeviceType::CPU}, {"cuda", DeviceType::CUDA}, {"mkldnn", DeviceType::MKLDNN}, @@ -41,6 +41,7 @@ DeviceType parse_type(const std::string& device_string) { {"fpga", DeviceType::FPGA}, {"msnpu", DeviceType::MSNPU}, {"xla", DeviceType::XLA}, + {"vulkan", DeviceType::Vulkan}, }}; auto device = std::find_if( types.begin(), @@ -52,7 +53,7 @@ DeviceType parse_type(const std::string& device_string) { return device->second; } AT_ERROR( - "Expected one of cpu, cuda, mkldnn, opengl, opencl, ideep, hip, msnpu, xla device type at start of device string: ", device_string); + "Expected one of cpu, cuda, mkldnn, opengl, opencl, ideep, hip, msnpu, xla, vulkan device type at start of device string: ", device_string); } } // namespace diff --git a/tools/pyi/gen_pyi.py b/tools/pyi/gen_pyi.py index 701fd0652c60..1336621e1d56 100644 --- a/tools/pyi/gen_pyi.py +++ b/tools/pyi/gen_pyi.py @@ -577,6 +577,7 @@ def gen_pyi(declarations_path, out): 'is_quantized': ['is_quantized: _bool'], 'is_meta': ['is_meta: _bool'], 'is_mkldnn': ['is_mkldnn: _bool'], + 'is_vulkan': ['is_vulkan: _bool'], 'storage_offset': ['def storage_offset(self) -> _int: ...'], 'to': ['def to(self, dtype: _dtype, non_blocking: _bool=False, copy: _bool=False) -> Tensor: ...', 'def to(self, device: Optional[Union[_device, str]]=None, dtype: Optional[_dtype]=None, ' diff --git a/torch/csrc/autograd/python_variable.cpp b/torch/csrc/autograd/python_variable.cpp index d370f81f6c77..f0ab8e8308c6 100644 --- a/torch/csrc/autograd/python_variable.cpp +++ b/torch/csrc/autograd/python_variable.cpp @@ -568,6 +568,17 @@ PyObject *THPVariable_is_mkldnn(THPVariable *self, void *unused) END_HANDLE_TH_ERRORS } +PyObject *THPVariable_is_vulkan(THPVariable *self, void *unused) +{ + HANDLE_TH_ERRORS + if (check_has_torch_function((PyObject *)self)) { + return handle_torch_function_getter(self, "is_vulkan"); + } + auto& self_ = self->cdata; + return torch::autograd::utils::wrap(self_.is_vulkan()); + END_HANDLE_TH_ERRORS +} + PyObject *THPVariable_is_quantized(THPVariable *self, void *unused) { HANDLE_TH_ERRORS @@ -697,6 +708,7 @@ static struct PyGetSetDef THPVariable_properties[] = { {"is_cuda", (getter)THPVariable_is_cuda, nullptr, nullptr, nullptr}, {"is_sparse", (getter)THPVariable_is_sparse, nullptr, nullptr, nullptr}, {"is_mkldnn", (getter)THPVariable_is_mkldnn, nullptr, nullptr, nullptr}, + {"is_vulkan", (getter)THPVariable_is_vulkan, nullptr, nullptr, nullptr}, {"is_complex", (getter)THPVariable_is_complex, nullptr, nullptr, nullptr}, {"is_quantized", (getter)THPVariable_is_quantized, nullptr, nullptr, nullptr}, {"is_meta", (getter)THPVariable_is_meta, nullptr, nullptr, nullptr}, diff --git a/torch/csrc/jit/frontend/sugared_value.cpp b/torch/csrc/jit/frontend/sugared_value.cpp index f4aed768fbf2..69e86716f72e 100644 --- a/torch/csrc/jit/frontend/sugared_value.cpp +++ b/torch/csrc/jit/frontend/sugared_value.cpp @@ -109,6 +109,7 @@ std::shared_ptr SimpleValue::attr( {"is_sparse", "prim"}, {"is_mkldnn", "prim"}, {"is_quantized", "prim"}, + {"is_vulkan", "prim"}, {"is_meta", "prim"}, {"is_leaf", "aten"}, {"requires_grad", "prim"}, diff --git a/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp b/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp index f1af932abd80..4f409c74210c 100644 --- a/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp +++ b/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp @@ -272,6 +272,14 @@ RegisterOperators reg( push(stack, a.is_mkldnn()); }, aliasAnalysisFromSchema()), + Operator( + "prim::is_vulkan(Tensor a) -> bool", + [](Stack* stack) { + at::Tensor a; + pop(stack, a); + push(stack, a.is_vulkan()); + }, + aliasAnalysisFromSchema()), Operator( "prim::is_quantized(Tensor a) -> bool", [](Stack* stack) { diff --git a/torch/overrides.py b/torch/overrides.py index 595e499727f6..cbfaea0de460 100644 --- a/torch/overrides.py +++ b/torch/overrides.py @@ -817,6 +817,7 @@ def get_testing_overrides() -> Dict[Callable, Callable]: Tensor.is_mkldnn.__get__: lambda self: -1, Tensor.is_quantized.__get__: lambda self: -1, Tensor.is_sparse.__get__: lambda self: -1, + Tensor.is_vulkan.__get__: lambda self: -1, Tensor.layout.__get__: lambda self: -1, Tensor.name.__get__: lambda self: -1, Tensor.names.__get__: lambda self: -1, diff --git a/torch/testing/_internal/distributed/nn/api/remote_module_test.py b/torch/testing/_internal/distributed/nn/api/remote_module_test.py index 1b453bc12a06..9d3057469702 100644 --- a/torch/testing/_internal/distributed/nn/api/remote_module_test.py +++ b/torch/testing/_internal/distributed/nn/api/remote_module_test.py @@ -244,7 +244,7 @@ def test_invalid_devices(self): with self.assertRaisesRegex( RuntimeError, - r"Expected one of cpu, cuda, mkldnn, opengl, opencl, ideep, hip, msnpu, xla device type at start of device string", + r"Expected one of cpu, cuda, mkldnn, opengl, opencl, ideep, hip, msnpu, xla, vulkan device type at start of device string", ): list( self._create_remote_module_iter( From 3b26ac697ce67417ffb1d1f92ec9680ddc012939 Mon Sep 17 00:00:00 2001 From: Ivan Kobzarev Date: Wed, 21 Oct 2020 08:50:57 -0700 Subject: [PATCH 2/2] Update on "[py][vulkan][reland] Add is_vulkan to py api, add vulkan to device type parsing" Summary: Test Plan: Imported from OSS Pulled By: IvanKobzarev Differential Revision: [D24448984](https://our.internmc.facebook.com/intern/diff/D24448984) Reland of the PR: https://github.com/pytorch/pytorch/pull/46511 The initial PR broke tests as they assert the error message that was changed in PR torch/testing/_internal/distributed/nn/api/remote_module_test.py In this PR it is changed accordingly [ghstack-poisoned] --- .../testing/_internal/distributed/nn/api/remote_module_test.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/torch/testing/_internal/distributed/nn/api/remote_module_test.py b/torch/testing/_internal/distributed/nn/api/remote_module_test.py index 9d3057469702..da81b3b16e53 100644 --- a/torch/testing/_internal/distributed/nn/api/remote_module_test.py +++ b/torch/testing/_internal/distributed/nn/api/remote_module_test.py @@ -244,7 +244,8 @@ def test_invalid_devices(self): with self.assertRaisesRegex( RuntimeError, - r"Expected one of cpu, cuda, mkldnn, opengl, opencl, ideep, hip, msnpu, xla, vulkan device type at start of device string", + r"Expected one of cpu, cuda, mkldnn, opengl, opencl, ideep, hip, msnpu, xla, vulkan" + " device type at start of device string", ): list( self._create_remote_module_iter(