Skip to content

Commit

Permalink
[GPU] Introduce USE_PYTORCH_METAL
Browse files Browse the repository at this point in the history
Pull Request resolved: #46383

The old `USE_METAL` is actually being used by Caffe2. Here we introduce a new macro to enable metal in pytorch.
ghstack-source-id: 114384673

Differential Revision: [D24322018](https://our.internmc.facebook.com/intern/diff/D24322018/)

**NOTE FOR REVIEWERS**: This PR has internal Facebook specific changes or comments, please review them on [Phabricator](https://our.internmc.facebook.com/intern/diff/D24322018/)!
  • Loading branch information
xta0 committed Oct 15, 2020
1 parent 2d6fd22 commit 515a581
Show file tree
Hide file tree
Showing 6 changed files with 18 additions and 12 deletions.
3 changes: 2 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,8 @@ option(USE_GLOG "Use GLOG" OFF)
option(USE_LEVELDB "Use LEVELDB" OFF)
option(USE_LITE_PROTO "Use lite protobuf instead of full." OFF)
option(USE_LMDB "Use LMDB" OFF)
option(USE_METAL "Use Metal for iOS build" OFF)
option(USE_METAL "Use Metal for Caffe2 iOS build" ON)
option(USE_PYTORCH_METAL "Use Metal for PyTorch iOS build" OFF)
option(USE_NATIVE_ARCH "Use -march=native" OFF)
cmake_dependent_option(
USE_NCCL "Use NCCL" ON
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ else()
set(all_cpu_cpp ${all_cpu_cpp} ${vulkan_cpp})
endif()

if(USE_METAL)
if(USE_PYTORCH_METAL)
set(all_cpu_cpp ${all_cpu_cpp} ${metal_cpp} ${native_metal_srcs})
else()
set(all_cpu_cpp ${all_cpu_cpp} ${metal_cpp})
Expand Down Expand Up @@ -390,7 +390,7 @@ set(INSTALL_HEADERS ${base_h} ${ATen_CORE_HEADERS})
if(NOT INTERN_BUILD_MOBILE)
list(APPEND INSTALL_HEADERS ${native_h} ${native_cpu_h} ${native_quantized_h} ${cuda_h} ${native_cuda_h} ${native_hip_h} ${cudnn_h} ${hip_h} ${miopen_h})
else()
if(USE_METAL)
if(USE_PYTORCH_METAL)
list(APPEND INSTALL_HEADERS ${metal_h} ${native_metal_h})
endif()
endif()
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/native/metal/MetalAten.mm
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ Tensor reshape(const Tensor& input, IntArrayRef shape) {

struct MetalImpl : public at::metal::MetalInterface {
bool is_metal_available() const override {
#if defined(USE_METAL)
#if defined(USE_PYTORCH_METAL)
return [[MPSCNNContext sharedInstance] available];
#else
return false;
Expand All @@ -260,7 +260,7 @@ bool is_metal_available() const override {
return native::metal::metal_copy_impl_(input, src);
}
};
#if defined(USE_METAL)
#if defined(USE_PYTORCH_METAL)
static at::metal::MetalImplRegistrar g_metal_impl(new MetalImpl());
#endif

Expand Down
1 change: 1 addition & 0 deletions cmake/Summary.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@ function(caffe2_print_configuration_summary)
message(STATUS " LMDB version : ${LMDB_VERSION}")
endif()
message(STATUS " USE_METAL : ${USE_METAL}")
message(STATUS " USE_PYTORCH_METAL : ${USE_PYTORCH_METAL}")
message(STATUS " USE_MKL : ${CAFFE2_USE_MKL}")
message(STATUS " USE_MKLDNN : ${USE_MKLDNN}")
if(${CAFFE2_USE_MKLDNN})
Expand Down
2 changes: 1 addition & 1 deletion scripts/build_ios.sh
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ CMAKE_ARGS+=("-DUSE_LEVELDB=OFF")
CMAKE_ARGS+=("-DUSE_MPI=OFF")
CMAKE_ARGS+=("-DUSE_NUMPY=OFF")
CMAKE_ARGS+=("-DUSE_NNPACK=OFF")
CMAKE_ARGS+=("-DUSE_METAL=OFF")
CMAKE_ARGS+=("-DUSE_PYTORCH_METAL=OFF")
CMAKE_ARGS+=("-DUSE_MKLDNN=OFF")

# pthreads
Expand Down
16 changes: 10 additions & 6 deletions torch/csrc/jit/passes/metal_rewrite.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
namespace torch {
namespace jit {

#ifdef USE_METAL
#ifdef USE_PYTORCH_METAL

namespace {

Expand Down Expand Up @@ -196,27 +196,31 @@ script::Module metalOptimizeForMobile(
#else

void metalInsertPrePackedOps(std::shared_ptr<Graph>& graph) {
TORCH_INTERNAL_ASSERT("metal is not enabled. Please build with USE_METAL=1");
TORCH_INTERNAL_ASSERT(
"metal is not enabled. Please build with USE_PYTORCH_METAL=1");
}

void metalInsertPrePackedOps(script::Module& module) {
TORCH_INTERNAL_ASSERT("metal is not enabled. Please build with USE_METAL=1");
TORCH_INTERNAL_ASSERT(
"metal is not enabled. Please build with USE_PYTORCH_METAL=1");
}

TORCH_API void metalFusePrePackedConvWithClamp(script::Module& module) {
TORCH_INTERNAL_ASSERT("metal is not enabled. Please build with USE_METAL=1");
TORCH_INTERNAL_ASSERT(
"metal is not enabled. Please build with USE_PYTORCH_METAL=1");
}

TORCH_API void metalFoldPrePackingOps(script::Module& module) {
TORCH_INTERNAL_ASSERT("metal is not enabled. Please build with USE_METAL=1");
TORCH_INTERNAL_ASSERT(
"metal is not enabled. Please build with USE_PYTORCH_METAL=1");
}

script::Module metalOptimizeForMobile(
const script::Module& m,
const std::vector<std::string>& preserved_methods) {
TORCH_INTERNAL_ASSERT(
"Mobile optimizaiton only available with metal at the moment. "
"metal is not enabled. Please build with USE_METAL=1");
"metal is not enabled. Please build with USE_PYTORCH_METAL=1");
return m;
}

Expand Down

0 comments on commit 515a581

Please sign in to comment.