Skip to content

Commit

Permalink
PyLong_{As/From}{Long/UnsignedLong} lint checks (#49280)
Browse files Browse the repository at this point in the history
Summary:
Fixes #45581

Pull Request resolved: #49280

Reviewed By: mruberry

Differential Revision: D25592330

Pulled By: ezyang

fbshipit-source-id: 5c16d6aed88ad1feaa7f129b4cd44c0561be2de2
  • Loading branch information
peterjc123 authored and facebook-github-bot committed Dec 17, 2020
1 parent c20b916 commit 815d383
Show file tree
Hide file tree
Showing 9 changed files with 84 additions and 26 deletions.
15 changes: 15 additions & 0 deletions .jenkins/pytorch/win-build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,21 @@ fi

export SCRIPT_HELPERS_DIR=$SCRIPT_PARENT_DIR/win-test-helpers

set +ex
grep -E -R 'PyLong_(From|As)(Unsigned|)Long\(' --exclude=python_numbers.h torch/
PYLONG_API_CHECK=$?
if [[ $PYLONG_API_CHECK == 0 ]]; then
echo "Usage of PyLong_{From,As}{Unsigned}Long API may lead to overflow errors on Windows"
echo "because \`sizeof(long) == 4\` and \`sizeof(unsigned long) == 4\`."
echo "Please include \"torch/csrc/python_numbers.h\" and use the correspoding APIs instead."
echo "PyLong_FromLong -> THPUtils_packInt32 / THPUtils_packInt64"
echo "PyLong_AsLong -> THPUtils_unpackInt (32-bit) / THPUtils_unpackLong (64-bit)"
echo "PyLong_FromUnsignedLong -> THPUtils_packUInt32 / THPUtils_packUInt64"
echo "PyLong_AsUnsignedLong -> THPUtils_unpackUInt32 / THPUtils_unpackUInt64"
exit 1
fi
set -ex

$SCRIPT_HELPERS_DIR/build_pytorch.bat

assert_git_not_dirty
Expand Down
15 changes: 8 additions & 7 deletions torch/csrc/Module.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -153,27 +153,28 @@ static PyObject * THPModule_crashIfCsrcASAN(PyObject *module, PyObject *arg) {
"but got %s", THPUtils_typename(arg));
//NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays, modernize-avoid-c-arrays)
volatile char x[3];
x[static_cast<int>(THPUtils_unpackLong(arg))] = 0;
return PyLong_FromLong(x[0]);
x[THPUtils_unpackInt(arg)] = 0;
//NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
return THPUtils_packInt32(x[0]);
}

static PyObject * THPModule_crashIfCsrcUBSAN(PyObject *module, PyObject *arg) {
THPUtils_assert(THPUtils_checkLong(arg), "crash_if_csrc_ubsan expects an int, "
"but got %s", THPUtils_typename(arg));
int32_t x = static_cast<int>(THPUtils_unpackLong(arg));
int32_t x = THPUtils_unpackInt(arg);
double y = 1.0 / x;
return PyLong_FromLong((int)y);
return THPUtils_packInt32((int)y);
}

static PyObject * THPModule_crashIfATenASAN(PyObject *module, PyObject *arg) {
THPUtils_assert(THPUtils_checkLong(arg), "crash_if_aten_asan expects an int, "
"but got %s", THPUtils_typename(arg));
return PyLong_FromLong(at::_crash_if_asan(static_cast<int>(THPUtils_unpackLong(arg))));
return THPUtils_packInt32(at::_crash_if_asan(THPUtils_unpackInt(arg)));
}

static PyObject * THPModule_getNumThreads(PyObject *module, PyObject *noargs)
{
return PyLong_FromLong(at::get_num_threads());
return THPUtils_packInt32(at::get_num_threads());
}

static PyObject * THPModule_setNumThreads(PyObject *module, PyObject *arg)
Expand All @@ -188,7 +189,7 @@ static PyObject * THPModule_setNumThreads(PyObject *module, PyObject *arg)

static PyObject * THPModule_getNumInteropThreads(PyObject *module, PyObject *noargs)
{
return PyLong_FromLong(at::get_num_interop_threads());
return THPUtils_packInt32(at::get_num_interop_threads());
}

static PyObject * THPModule_setNumInteropThreads(PyObject *module, PyObject *arg)
Expand Down
5 changes: 3 additions & 2 deletions torch/csrc/Size.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

#include <string>
#include <torch/csrc/utils/object_ptr.h>
#include <torch/csrc/utils/python_numbers.h>
#include <torch/csrc/utils/python_strings.h>
#include <torch/csrc/utils/python_tuples.h>

Expand Down Expand Up @@ -84,7 +85,7 @@ static PyObject * THPSize_repr(THPSize *self)
if (i != 0) {
repr += ", ";
}
repr += std::to_string(PyLong_AsLong(PyTuple_GET_ITEM(self, i)));
repr += std::to_string(THPUtils_unpackLong(PyTuple_GET_ITEM(self, i)));
}
repr += "])";
return THPUtils_packString(repr);
Expand Down Expand Up @@ -136,7 +137,7 @@ static PyObject *THPSize_numel(PyObject *_self, PyObject *noargs)
auto self = (THPSize*)_self;
int64_t numel = 1;
for (Py_ssize_t i = 0; i < PyTuple_Size((PyObject*)self); ++i) {
numel *= PyLong_AsLong(PyTuple_GET_ITEM(self, i));
numel *= THPUtils_unpackLong(PyTuple_GET_ITEM(self, i));
}
return THPUtils_packInt64(numel);
END_HANDLE_TH_ERRORS
Expand Down
3 changes: 2 additions & 1 deletion torch/csrc/autograd/python_cpp_function.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include <torch/csrc/autograd/python_hook.h>
#include <torch/csrc/autograd/python_anomaly_mode.h>
#include <pybind11/pybind11.h>
#include <torch/csrc/utils/python_numbers.h>
#include <torch/csrc/utils/python_strings.h>
#include <torch/csrc/DynamicTypes.h>
#include <torch/csrc/Exceptions.h>
Expand Down Expand Up @@ -114,7 +115,7 @@ PyObject* THPCppFunction_next_functions(THPCppFunction* self, PyObject* hook)
PyObject *py_fn = functionToPyObject(c_tuple.function);
if (!py_fn) return nullptr;
PyTuple_SET_ITEM(tuple.get(), 0, py_fn);
PyObject *py_idx = PyLong_FromLong(c_tuple.input_nr);
PyObject *py_idx = THPUtils_packUInt32(c_tuple.input_nr);
if (!py_idx) return nullptr;
PyTuple_SET_ITEM(tuple.get(), 1, py_idx);
PyTuple_SET_ITEM(py_functions.get(), i, tuple.release());
Expand Down
7 changes: 4 additions & 3 deletions torch/csrc/cuda/Module.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include <torch/csrc/CudaIPCTypes.h>
#include <torch/csrc/utils/pybind.h>
#include <torch/csrc/utils/cuda_lazy_init.h>
#include <torch/csrc/utils/python_numbers.h>
#include <torch/csrc/utils/python_strings.h>
#include <torch/csrc/cuda/python_comm.h>
#include <torch/csrc/Generator.h>
Expand Down Expand Up @@ -77,15 +78,15 @@ PyObject * THCPModule_getDevice_wrap(PyObject *self, PyObject *noargs)
HANDLE_TH_ERRORS
torch::utils::cuda_lazy_init();
auto device = static_cast<int>(c10::cuda::current_device());
return PyLong_FromLong(device);
return THPUtils_packInt32(device);
END_HANDLE_TH_ERRORS
}

PyObject * THCPModule_getDeviceCount_wrap(PyObject *self, PyObject *noargs)
{
HANDLE_TH_ERRORS
poison_fork();
return PyLong_FromLong(at::cuda::device_count());
return THPUtils_packUInt64(at::cuda::device_count());
END_HANDLE_TH_ERRORS
}

Expand Down Expand Up @@ -150,7 +151,7 @@ PyObject * THCPModule_setStream_wrap(PyObject *self, PyObject *obj)

PyObject * THCPModule_getCompiledVersion(PyObject *self, PyObject *noargs)
{
return PyLong_FromLong((long) CUDA_VERSION);
return THPUtils_packInt64((int64_t) CUDA_VERSION);
}

PyObject * THCPModule_cudaHostAllocator(PyObject *_unused, PyObject *noargs)
Expand Down
3 changes: 2 additions & 1 deletion torch/csrc/cuda/Stream.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#include <pybind11/pybind11.h>
#include <torch/csrc/cuda/Stream.h>
#include <torch/csrc/cuda/Module.h>
#include <torch/csrc/utils/python_numbers.h>
#include <torch/csrc/Device.h>
#include <torch/csrc/THP.h>

Expand Down Expand Up @@ -65,7 +66,7 @@ static PyObject * THCPStream_get_cuda_stream(THCPStream *self, void *unused) {

static PyObject * THCPStream_get_priority(THCPStream *self, void *unused) {
HANDLE_TH_ERRORS
return PyLong_FromLong(self->cuda_stream.priority());
return THPUtils_packInt64(self->cuda_stream.priority());
END_HANDLE_TH_ERRORS
}

Expand Down
7 changes: 4 additions & 3 deletions torch/csrc/generic/StorageMethods.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#include <ATen/ATen.h>
#include <torch/csrc/utils/pycfunction_helpers.h>
#include <torch/csrc/utils/python_numbers.h>

#ifdef USE_CUDA
#include <cuda_runtime.h>
Expand All @@ -15,7 +16,7 @@ static PyObject * THPStorage_(size)(PyObject *_self, PyObject *noargs)
{
HANDLE_TH_ERRORS
auto self = (THPStorage*)_self;
return PyLong_FromLong(self->cdata->nbytes() / sizeof(scalar_t));
return THPUtils_packUInt64(self->cdata->nbytes() / sizeof(scalar_t));
END_HANDLE_TH_ERRORS
}

Expand Down Expand Up @@ -50,7 +51,7 @@ static PyObject * THPStorage_(elementSize)(PyObject *_self, PyObject *noargs)
{
HANDLE_TH_ERRORS
auto self = (THPStorage*)_self;
return PyLong_FromLong(THWStorage_(elementSize)(LIBRARY_STATE_NOARGS));
return THPUtils_packInt64(THWStorage_(elementSize)(LIBRARY_STATE_NOARGS));
END_HANDLE_TH_ERRORS
}

Expand Down Expand Up @@ -315,7 +316,7 @@ PyObject * THPStorage_(getDevice)(PyObject *_self, PyObject *noargs)
{
HANDLE_TH_ERRORS
auto self = (THPStorage*)_self;
return PyLong_FromLong(THCStorage_(getDevice)(LIBRARY_STATE self->cdata));
return THPUtils_packInt32(THCStorage_(getDevice)(LIBRARY_STATE self->cdata));
END_HANDLE_TH_ERRORS
}
#endif
Expand Down
19 changes: 10 additions & 9 deletions torch/csrc/generic/StorageSharing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
#include <c10/cuda/CUDAGuard.h>
#endif

#include <torch/csrc/utils/python_numbers.h>
#include <random>

static PyObject * THPStorage_(sharedDecref)(PyObject *_self, PyObject *noargs)
Expand Down Expand Up @@ -95,7 +96,7 @@ static PyObject * THPStorage_(shareFilename)(PyObject *_self, PyObject *noargs)
if (!manager_handle) return nullptr;
THPObjectPtr storage_handle(PyBytes_FromString(ctx->filename()));
if (!storage_handle) return nullptr;
THPObjectPtr size(PyLong_FromLong(storage->nbytes() / sizeof(scalar_t)));
THPObjectPtr size(THPUtils_packUInt64(storage->nbytes() / sizeof(scalar_t)));
if (!size) return nullptr;

THPObjectPtr tuple(PyTuple_New(3));
Expand Down Expand Up @@ -172,9 +173,9 @@ static PyObject * THPStorage_(shareFd)(PyObject *_self, PyObject *noargs)
AT_ASSERT(ctx);
}

THPObjectPtr storage_handle(PyLong_FromLong(ctx->fd()));
THPObjectPtr storage_handle(THPUtils_packInt32(ctx->fd()));
if (!storage_handle) return nullptr;
THPObjectPtr size(PyLong_FromLong(storage->nbytes() / sizeof(scalar_t)));
THPObjectPtr size(THPUtils_packUInt64(storage->nbytes() / sizeof(scalar_t)));
if (!size) return nullptr;

THPObjectPtr tuple(PyTuple_New(2));
Expand Down Expand Up @@ -231,14 +232,14 @@ static PyObject * THPStorage_(shareCuda)(PyObject *_self, PyObject *noargs)

at::DeviceGuard device_guard(storage->device());
THPObjectPtr tuple(PyTuple_New(8));
THPObjectPtr device(PyLong_FromLong(storage->device().index()));
THPObjectPtr device(THPUtils_packInt32(storage->device().index()));
THPObjectPtr _handle(Py_None);
Py_INCREF(Py_None);
THPObjectPtr size_bytes(PyLong_FromLong(storage->nbytes()));
THPObjectPtr _offset_bytes(PyLong_FromLong(0));
THPObjectPtr size_bytes(THPUtils_packUInt64(storage->nbytes()));
THPObjectPtr _offset_bytes(THPUtils_packInt32(0));
THPObjectPtr _ref_counter(Py_None);
Py_INCREF(Py_None);
THPObjectPtr _ref_counter_offset(PyLong_FromLong(0));
THPObjectPtr _ref_counter_offset(THPUtils_packInt32(0));
THPObjectPtr _event_handle(Py_None);
Py_INCREF(Py_None);
THPObjectPtr _event_sync_required(Py_None);
Expand All @@ -261,7 +262,7 @@ static PyObject * THPStorage_(shareCuda)(PyObject *_self, PyObject *noargs)
auto sent_data = static_cast<torch::CudaIPCSentData*>(storage->data_ptr().get_context());
sent_data->set_original_ptr(std::move(old_data_ptr));
_ref_counter = PyBytes_FromString((sent_data->handle()).c_str());
_ref_counter_offset = PyLong_FromLong(sent_data->offset());
_ref_counter_offset = THPUtils_packInt64(sent_data->offset());


cudaIpcEventHandle_t ipc_event_handle;
Expand Down Expand Up @@ -515,7 +516,7 @@ PyObject * THPStorage_(sharedFd)(PyObject *_self, PyObject *noargs)
#endif

THPUtils_assert(ctx, "couldn't retrieve a shared file descriptor");
return PyLong_FromLong(ctx->fd());
return THPUtils_packInt32(ctx->fd());
END_HANDLE_TH_ERRORS
}

Expand Down
36 changes: 36 additions & 0 deletions torch/csrc/utils/python_numbers.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,24 @@
#include <torch/csrc/utils/object_ptr.h>
#include <torch/csrc/utils/tensor_numpy.h>
#include <cstdint>
#include <limits>
#include <stdexcept>

// largest integer that can be represented consecutively in a double
const int64_t DOUBLE_INT_MAX = 9007199254740992;

inline PyObject* THPUtils_packInt32(int32_t value) {
return PyLong_FromLong(value);
}

inline PyObject* THPUtils_packInt64(int64_t value) {
return PyLong_FromLongLong(value);
}

inline PyObject* THPUtils_packUInt32(uint32_t value) {
return PyLong_FromUnsignedLong(value);
}

inline PyObject* THPUtils_packUInt64(uint64_t value) {
return PyLong_FromUnsignedLongLong(value);
}
Expand All @@ -33,6 +42,22 @@ inline bool THPUtils_checkLong(PyObject* obj) {
return PyLong_Check(obj) && !PyBool_Check(obj);
}

inline int32_t THPUtils_unpackInt(PyObject* obj) {
int overflow;
long value = PyLong_AsLongAndOverflow(obj, &overflow);
if (value == -1 && PyErr_Occurred()) {
throw python_error();
}
if (overflow != 0) {
throw std::runtime_error("Overflow when unpacking long");
}
if (value > std::numeric_limits<int32_t>::max() ||
value < std::numeric_limits<int32_t>::min()) {
throw std::runtime_error("Overflow when unpacking long");
}
return (int32_t)value;
}

inline int64_t THPUtils_unpackLong(PyObject* obj) {
int overflow;
long long value = PyLong_AsLongLongAndOverflow(obj, &overflow);
Expand All @@ -45,6 +70,17 @@ inline int64_t THPUtils_unpackLong(PyObject* obj) {
return (int64_t)value;
}

inline uint32_t THPUtils_unpackUInt32(PyObject* obj) {
unsigned long value = PyLong_AsUnsignedLong(obj);
if (PyErr_Occurred()) {
throw python_error();
}
if (value > std::numeric_limits<uint32_t>::max()) {
throw std::runtime_error("Overflow when unpacking unsigned long");
}
return (uint32_t)value;
}

inline uint64_t THPUtils_unpackUInt64(PyObject* obj) {
unsigned long long value = PyLong_AsUnsignedLongLong(obj);
if (PyErr_Occurred()) {
Expand Down

0 comments on commit 815d383

Please sign in to comment.