Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -543,6 +543,7 @@ header_template_rule(
cc_library(
name = "aten_headers",
hdrs = [
"torch/csrc/Export.h",
"torch/csrc/WindowsTorchApiMacro.h",
"torch/csrc/jit/frontend/function_schema_parser.h",
] + glob([
Expand Down
3 changes: 1 addition & 2 deletions tools/autograd/templates/Functions.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,10 @@
#include <ATen/core/functional.h>
#include <ATen/TensorGeometry.h>

#include "torch/csrc/THP_export.h"
#include "torch/csrc/autograd/function.h"
#include "torch/csrc/autograd/variable.h"
#include "torch/csrc/autograd/saved_variable.h"
#include <torch/csrc/WindowsTorchApiMacro.h>
#include <torch/csrc/Export.h>

namespace torch { namespace autograd { namespace generated {

Expand Down
1 change: 1 addition & 0 deletions tools/build_variables.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ def libtorch_generated_sources(gencode_pattern):
# copied from https://github.com/pytorch/pytorch/blob/f99a693cd9ff7a9b5fdc71357dac66b8192786d3/aten/src/ATen/core/CMakeLists.txt
jit_core_headers = [
"torch/csrc/utils/memory.h",
"torch/csrc/Export.h",
"torch/csrc/WindowsTorchApiMacro.h",
"torch/csrc/jit/frontend/source_range.h",
"torch/csrc/jit/serialization/callstack_debug_info_serialization.h",
Expand Down
7 changes: 3 additions & 4 deletions torch/csrc/Exceptions.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,9 @@

#include <c10/util/Exception.h>
#include <pybind11/pybind11.h>
#include <torch/csrc/THP_export.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/utils/auto_gil.h>
#include <torch/csrc/jit/runtime/jit_exception.h>
#include <torch/csrc/WindowsTorchApiMacro.h>
#include <c10/util/StringUtil.h>
#include <ATen/detail/FunctionTraits.h>

Expand Down Expand Up @@ -259,9 +258,9 @@ bool THPException_init(PyObject *module);

namespace torch {

THP_CLASS std::string processErrorMsg(std::string str);
TORCH_PYTHON_API std::string processErrorMsg(std::string str);

THP_API bool get_cpp_stacktraces_enabled();
TORCH_PYTHON_API bool get_cpp_stacktraces_enabled();

// Abstract base class for exceptions which translate to specific Python types
struct PyTorchError : public std::exception {
Expand Down
9 changes: 9 additions & 0 deletions torch/csrc/Export.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
#pragma once

#include <c10/macros/Export.h>

#ifdef THP_BUILD_MAIN_LIB
#define TORCH_PYTHON_API C10_EXPORT
#else
#define TORCH_PYTHON_API C10_IMPORT
#endif
8 changes: 4 additions & 4 deletions torch/csrc/Generator.h
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
#pragma once

#include <torch/csrc/Export.h>
#include <torch/csrc/python_headers.h>
#include <ATen/ATen.h>

#include <torch/csrc/THP_export.h>

// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct THPGenerator {
Expand All @@ -14,16 +14,16 @@ struct THPGenerator {
// Creates a new Python object wrapping the default at::Generator. The reference is
// borrowed. The caller should ensure that the at::Generator object lifetime
// last at least as long as the Python wrapper.
THP_API PyObject * THPGenerator_initDefaultGenerator(at::Generator cdata);
TORCH_PYTHON_API PyObject * THPGenerator_initDefaultGenerator(at::Generator cdata);

#define THPGenerator_Check(obj) \
PyObject_IsInstance(obj, THPGeneratorClass)

THP_API PyObject *THPGeneratorClass;
TORCH_PYTHON_API extern PyObject *THPGeneratorClass;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do you need to re-add the extern here? (and in other places)

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

extern is necessary for variables defined in another file, but it doesn't apply to classes or structs which is why there were two macros: THP_API and THP_CLASS. Now we just have one and manually add extern. This is consistent with how all the other _API macros work.


bool THPGenerator_init(PyObject *module);

THP_API PyObject * THPGenerator_Wrap(at::Generator gen);
TORCH_PYTHON_API PyObject * THPGenerator_Wrap(at::Generator gen);

// Creates a new Python object for a Generator. The Generator must not already
// have a PyObject* associated with it.
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/THP.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
#include <TH/TH.h>
#include <TH/THTensor.hpp>

#include <torch/csrc/THP_export.h>
#include <torch/csrc/Export.h>

// Back-compatibility macros, Thanks to http://cx-oracle.sourceforge.net/
// define PyInt_* macros for Python 3.x. NB: We must include Python.h first,
Expand Down
17 changes: 0 additions & 17 deletions torch/csrc/THP_export.h

This file was deleted.

9 changes: 1 addition & 8 deletions torch/csrc/WindowsTorchApiMacro.h
Original file line number Diff line number Diff line change
@@ -1,9 +1,2 @@
#pragma once

#include <c10/macros/Export.h>

#ifdef _WIN32
#define TORCH_PYTHON_API
#else
#define TORCH_PYTHON_API TORCH_API
#endif
#include <torch/csrc/Export.h>
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we just delete this file? (can be in a follow up if you prefer)

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should, but it's included everywhere in the torch folder so the diff is massive. Can do it in separate PR though.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Follow up PR sounds good!

2 changes: 1 addition & 1 deletion torch/csrc/autograd/python_saved_variable_hooks.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
#include <torch/csrc/autograd/python_variable.h>
#include <torch/csrc/autograd/saved_variable_hooks.h>
#include <torch/csrc/python_headers.h>
#include <torch/csrc/THP_export.h>
#include <torch/csrc/Export.h>
#include <ATen/ATen.h>

namespace py = pybind11;
Expand Down
10 changes: 5 additions & 5 deletions torch/csrc/autograd/python_variable.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
#include <ATen/ATen.h>

#include <torch/csrc/autograd/variable.h>
#include <torch/csrc/THP_export.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/Exceptions.h>

// Python object that backs torch.autograd.Variable
Expand All @@ -21,11 +21,11 @@ struct THPVariable {

TORCH_API void registerPythonTensorClass(const std::string& device, PyObject* python_tensor_class);

THP_API PyObject *THPVariableClass;
THP_API PyObject *ParameterClass;
TORCH_PYTHON_API extern PyObject *THPVariableClass;
TORCH_PYTHON_API extern PyObject *ParameterClass;

bool THPVariable_initModule(PyObject *module);
THP_API PyObject * THPVariable_Wrap(at::TensorBase var);
TORCH_PYTHON_API PyObject * THPVariable_Wrap(at::TensorBase var);

static inline bool THPVariable_CheckTypeExact(PyTypeObject* tp) {
// Check that a python object is a `Tensor`, but not a `Tensor` subclass.
Expand Down Expand Up @@ -61,4 +61,4 @@ inline const at::Tensor& THPVariable_Unpack(PyObject* obj) {
return THPVariable_Unpack(reinterpret_cast<THPVariable*>(obj));
}

THP_API c10::impl::PyInterpreter* getPyInterpreter();
TORCH_PYTHON_API c10::impl::PyInterpreter* getPyInterpreter();
2 changes: 1 addition & 1 deletion torch/csrc/autograd/python_variable_indexing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

#include <torch/csrc/DynamicTypes.h>
#include <torch/csrc/Exceptions.h>
#include <torch/csrc/THP_export.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/autograd/python_variable.h>
#include <torch/csrc/autograd/utils/wrap_outputs.h>
Expand Down
6 changes: 3 additions & 3 deletions torch/csrc/distributed/c10d/comm.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#include <ATen/ATen.h>
#include <ATen/core/ivalue.h>
#include <c10d/ProcessGroup.hpp>
#include <torch/csrc/Export.h>

namespace c10d {

Expand Down Expand Up @@ -85,7 +86,7 @@ class TORCH_API GradBucket {
// Requires implementing 1) `runHook` method that communicates gradients
// asynchronously, and 2) `parseHookResult` method that converts the hook
// result into a tensor.
class TORCH_PYTHON_API CommHookInterface {
class TORCH_API CommHookInterface {
public:
virtual ~CommHookInterface() = default;

Expand Down Expand Up @@ -121,9 +122,8 @@ inline at::Tensor parseCppCommHookResult(

// This CppCommHook interface only requires implementing runHook method that
// potentially uses a state.
// Still need TORCH_PYTHON_API instead of TORCH_API to support Windows platform.
template <typename T>
class TORCH_PYTHON_API CppCommHookInterface : public CommHookInterface {
class CppCommHookInterface : public CommHookInterface {
public:
explicit CppCommHookInterface(T& state) : state_(state) {}

Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/generic/Storage.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

#include <torch/csrc/StorageDefs.h>

THP_API PyObject * THPStorage_(New)(c10::intrusive_ptr<c10::StorageImpl> ptr);
TORCH_PYTHON_API PyObject * THPStorage_(New)(c10::intrusive_ptr<c10::StorageImpl> ptr);
extern PyObject *THPStorageClass;

#include <torch/csrc/Types.h>
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,8 @@
#define THPUtils_assert(cond, ...) THPUtils_assertRet(nullptr, cond, __VA_ARGS__)
#define THPUtils_assertRet(value, cond, ...) \
if (THP_EXPECT(!(cond), 0)) { THPUtils_setError(__VA_ARGS__); return value; }
THP_API void THPUtils_setError(const char *format, ...);
THP_API void THPUtils_invalidArguments(
TORCH_PYTHON_API void THPUtils_setError(const char *format, ...);
TORCH_PYTHON_API void THPUtils_invalidArguments(
PyObject *given_args, PyObject *given_kwargs,
const char *function_name, size_t num_options, ...);

Expand Down