Skip to content

Commit

Permalink
Revert "turn on -Werror=unused-function in our Bazel CPU build"
Browse files Browse the repository at this point in the history
  • Loading branch information
pytorchmergebot committed Jun 10, 2022
1 parent 2e8312c commit bcd7a20
Show file tree
Hide file tree
Showing 26 changed files with 373 additions and 83 deletions.
18 changes: 2 additions & 16 deletions .bazelrc
Expand Up @@ -49,19 +49,5 @@ build:cpu-only --@rules_cuda//cuda:enable_cuda=False
# On the bright side, this means we don't have to more broadly apply
# the exceptions to an entire target.
build \
--per_file_copt='^//.*\.(cpp|cc)$'@-Werror=type-limits \
--per_file_copt=^//.*\.cu$@--compiler-options=-Werror=type-limits \
--per_file_copt='^//.*\.(cpp|cc)$'@-Werror=unused-function \
--per_file_copt=^//.*\.cu$@--compiler-options=-Werror=unused-function

build \
--per_file_copt=//:aten/src/ATen/RegisterCompositeExplicitAutograd.cpp@-Wno-error=unused-function \
--per_file_copt=//:aten/src/ATen/RegisterCompositeImplicitAutograd.cpp@-Wno-error=unused-function \
--per_file_copt=//:aten/src/ATen/RegisterMkldnnCPU.cpp$@-Wno-error=unused-function \
--per_file_copt=//:aten/src/ATen/RegisterNestedTensorCPU.cpp$@-Wno-error=unused-function \
--per_file_copt=//:aten/src/ATen/RegisterQuantizedCPU.cpp$@-Wno-error=unused-function \
--per_file_copt=//:aten/src/ATen/RegisterSparseCPU.cpp$@-Wno-error=unused-function \
--per_file_copt=//:aten/src/ATen/RegisterSparseCsrCPU.cpp$@-Wno-error=unused-function \
--per_file_copt=//:aten/src/ATen/RegisterZeroTensor.cpp$@-Wno-error=unused-function \
--per_file_copt=//:torch/csrc/lazy/generated/RegisterAutogradLazy.cpp@-Wno-error=unused-function \
--per_file_copt=//:torch/csrc/lazy/generated/RegisterLazy.cpp@-Wno-error=unused-function
--per_file_copt='^//.*\.(cpp|cc)$'@-Werror=type-limits \
--per_file_copt=^//.*\.cu$@--compiler-options=-Werror=type-limits
57 changes: 57 additions & 0 deletions aten/src/ATen/NamedTensorUtils.cpp
Expand Up @@ -260,6 +260,33 @@ std::vector<Dimname> compute_diagonal_outnames(
return outnames;
}

// tensor_dotted_dim and other_dotted_dim are the dimensions of the two
// tensors that we contract together. Usually other_dotted_dim is 0
// and tensor_dotted_dim is the last dim of tensor, but there are some special
// cases like einsum and tensordot where one can contract arbitrary dims.
// NOLINTNEXTLINE(clang-diagnostic-unused-function)
static std::vector<Dimname> compute_dot_product_outnames(
DimnameList tensor_names,
int64_t tensor_dotted_dim,
DimnameList other_names,
int64_t other_dotted_dim) {
int64_t num_outnames = tensor_names.size() + other_names.size() - 2;
if (num_outnames == 0) {
return {};
}
std::vector<Dimname> outnames(num_outnames, Dimname::wildcard());
int64_t index = 0;
for (const auto j : c10::irange(static_cast<int64_t>(tensor_names.size()))) {
if (j == tensor_dotted_dim) continue;
outnames[index++] = tensor_names[j];
}
for (const auto j : c10::irange(static_cast<int64_t>(other_names.size()))) {
if (j == other_dotted_dim) continue;
outnames[index++] = other_names[j];
}
return outnames;
}

static void check_feature_names_are_distinct(
DimnameList self_names,
DimnameList other_names,
Expand All @@ -279,6 +306,36 @@ static void check_feature_names_are_distinct(
". Please rename the input tensors with `Tensor.rename` to prevent this.");
}

// NOLINTNEXTLINE(clang-diagnostic-unused-function)
static DimnameList batch_dims(DimnameList names) {
if (names.size() <= 2) {
return {};
}
return DimnameList(names.begin(), names.end() - 2);
}

// NOLINTNEXTLINE(clang-diagnostic-unused-function)
static DimnameList feature_dims(DimnameList names) {
if (names.size() <= 2) {
return names;
}
return DimnameList(names.end() - 2, 2);
}

// NOLINTNEXTLINE(clang-diagnostic-unused-function)
static bool are_distinct(DimnameList batch_dims, DimnameList feature_dims) {
for (const auto& target : feature_dims) {
if (target.isWildcard()) {
continue;
}
if (std::any_of(batch_dims.begin(), batch_dims.end(),
[&](const Dimname& dim) { return target == dim; })) {
return false;
}
}
return true;
}

static int64_t num_batch_dims(DimnameList names) {
if (names.size() <= 2) {
return 0;
Expand Down
20 changes: 20 additions & 0 deletions aten/src/ATen/native/BinaryOps.cpp
Expand Up @@ -12,6 +12,26 @@
#include <torch/library.h>

namespace at {
namespace native {

// These are still needed because we don't have C++ conversions from number
// types (int, float, etc.) to Tensor (only to Scalar). They're not exposed
// to Python.

static void check_convert(const Scalar& scalar, ScalarType scalarType) {
// Validate that is possible to convert scalar to tensor dtype without
// overflow
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
at::ScalarType::Bool,
at::ScalarType::BFloat16,
at::ScalarType::Half,
at::ScalarType::ComplexHalf,
scalarType,
"check_convert",
[&] { scalar.to<scalar_t>(); });
}

} // namespace native

namespace meta {

Expand Down
12 changes: 12 additions & 0 deletions aten/src/ATen/native/ReduceOps.cpp
Expand Up @@ -1111,6 +1111,18 @@ Tensor nansum(const Tensor& self, IntArrayRef dim, bool keepdim, c10::optional<S
return at::native::nansum_out(self, dim, keepdim, dtype, result);
}

static Tensor& prod_out_impl(Tensor& result, const Tensor& self, IntArrayRef dim,
bool keepdim, c10::optional<ScalarType> opt_dtype) {
ScalarType dtype = get_dtype_from_result(result, opt_dtype);
auto iter = make_reduction("prod", result, self, dim, keepdim, dtype);
if (iter.numel() == 0) {
result.fill_(1);
} else {
prod_stub(iter.device_type(), iter);
}
return result;
}

// NOTE: this could be implemented via diag and sum, but this has perf problems,
// see https://github.com/pytorch/pytorch/pull/47305,
Tensor trace_cpu(const Tensor& self) {
Expand Down
5 changes: 5 additions & 0 deletions aten/src/ATen/native/quantized/AffineQuantizer.cpp
Expand Up @@ -35,6 +35,11 @@ void checkRoundingMode(const std::string& fn_name) {
return;
}

void checkCPUTensor(const std::string& fn_name, const Tensor& t) {
TORCH_CHECK(
t.device().type() == kCPU, fn_name, " only supports CPU device type.");
}

void checkFloatTensor(const std::string& fn_name, const Tensor& t) {
TORCH_CHECK(
t.scalar_type() == kFloat, fn_name, " expects a Float Tensor, got ",
Expand Down
8 changes: 5 additions & 3 deletions c10/test/util/exception_test.cpp
Expand Up @@ -5,6 +5,9 @@
using c10::Error;

namespace {
bool throw_func() {
throw std::runtime_error("I'm throwing...");
}

template <class Functor>
inline void expectThrowsEq(Functor&& functor, const char* expectedMessage) {
Expand All @@ -23,10 +26,9 @@ TEST(ExceptionTest, TORCH_INTERNAL_ASSERT_DEBUG_ONLY) {
#ifdef NDEBUG
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
ASSERT_NO_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false));
// Does nothing - `throw ...` should not be evaluated
// Does nothing - `throw_func()` should not be evaluated
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
ASSERT_NO_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
(throw std::runtime_error("I'm throwing..."), true)));
ASSERT_NO_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(throw_func()));
#else
ASSERT_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false), c10::Error);
ASSERT_NO_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(true));
Expand Down
25 changes: 25 additions & 0 deletions caffe2/ideep/operators/adam_op.cc
Expand Up @@ -4,6 +4,31 @@ using namespace caffe2;

namespace {

// NOLINTNEXTLINE(clang-diagnostic-unused-function)
void adam_ideep_update(
int N,
const float* g,
const float* m,
const float* v,
float* ng,
float* nm,
float* nv,
float beta1,
float beta2,
float eps_hat,
float correction,
const float* lr) {
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (auto i = 0; i < N; ++i) {
float gi = g[i];
float mi = nm[i] = m[i] * beta1 + gi * (1 - beta1);
float vi = nv[i] = v[i] * beta2 + gi * gi * (1 - beta2);
ng[i] = lr[0] * correction * mi / (std::sqrt(vi) + eps_hat);
}
}

void adam_ideep_compute(
int N,
const float* w,
Expand Down
22 changes: 22 additions & 0 deletions caffe2/opt/onnxifi_transformer.cc
Expand Up @@ -31,6 +31,28 @@ std::unordered_map<std::string, TensorShape> stripShapeInfoMap(
return shape_map;
}

// NOLINTNEXTLINE(clang-diagnostic-unused-function)
uint64_t onnxifiDataType(caffe2::TensorProto::DataType t) {
#define CAFFE2_TO_ONNXIFI_TYPE(x, y) \
case (caffe2::TensorProto::x): \
return y
switch (t) {
CAFFE2_TO_ONNXIFI_TYPE(FLOAT, ONNXIFI_DATATYPE_FLOAT32);
CAFFE2_TO_ONNXIFI_TYPE(INT8, ONNXIFI_DATATYPE_INT8);
CAFFE2_TO_ONNXIFI_TYPE(UINT8, ONNXIFI_DATATYPE_UINT8);
CAFFE2_TO_ONNXIFI_TYPE(INT16, ONNXIFI_DATATYPE_INT16);
CAFFE2_TO_ONNXIFI_TYPE(UINT16, ONNXIFI_DATATYPE_UINT16);
CAFFE2_TO_ONNXIFI_TYPE(INT32, ONNXIFI_DATATYPE_INT32);
CAFFE2_TO_ONNXIFI_TYPE(INT64, ONNXIFI_DATATYPE_INT64);
CAFFE2_TO_ONNXIFI_TYPE(FLOAT16, ONNXIFI_DATATYPE_FLOAT16);
default:
LOG(WARNING) << "Unsupported Caffe2 tensor type: " << t
<< ", fallback to FLOAT";
return ONNXIFI_DATATYPE_FLOAT32;
}
#undef CAFFE2_TO_ONNXIFI_TYPE
}

std::vector<::ONNX_NAMESPACE::ValueInfoProto> convertToValueInfo(
const std::vector<std::string>& names,
const std::unordered_map<std::string, TensorShape>& shape_hints,
Expand Down
41 changes: 20 additions & 21 deletions test/cpp/jit/torch_python_test.cpp
Expand Up @@ -34,30 +34,29 @@ void testEvalModeForLoadedModule() {
AT_ASSERT(module.attr("dropout").toModule().is_training());
}

// TODO: this test never ran before and is broken.
// void testSerializationInterop() {
// if (isSandcastle()) {
// // The module file to load is not generated in Sandcastle
// return;
// }
void testSerializationInterop() {
if (isSandcastle()) {
// The module file to load is not generated in Sandcastle
return;
}

// // This should be generated by `test/cpp/jit/tests_setup.py`
// std::ifstream input_stream("ivalue.pt");
// std::vector<char> input;
// input.insert(
// input.begin(),
// std::istream_iterator<char>(input_stream),
// std::istream_iterator<char>());
// IValue ivalue = pickle_load(input);
// This should be generated by `test/cpp/jit/tests_setup.py`
std::ifstream input_stream("ivalue.pt");
std::vector<char> input;
input.insert(
input.begin(),
std::istream_iterator<char>(input_stream),
std::istream_iterator<char>());
IValue ivalue = pickle_load(input);

// auto elements = ivalue.toTupleRef().elements();
// auto ones = torch::ones({2, 2});
// AT_ASSERT(ones.equal(elements.at(0).toTensor()));
auto elements = ivalue.toTupleRef().elements();
auto ones = torch::ones({2, 2});
AT_ASSERT(ones.equal(elements.at(0).toTensor()));

// // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
// auto twos = torch::ones({3, 5}) * 2;
// AT_ASSERT(twos.equal(elements.at(1).toTensor()));
// }
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto twos = torch::ones({3, 5}) * 2;
AT_ASSERT(twos.equal(elements.at(1).toTensor()));
}

void testTorchSaveError() {
if (isSandcastle()) {
Expand Down
4 changes: 2 additions & 2 deletions test/cpp/tensorexpr/test_conv.cpp
Expand Up @@ -12,14 +12,14 @@ namespace jit {
namespace te = torch::jit::tensorexpr;
namespace F = torch::nn::functional;

#ifdef TORCH_ENABLE_LLVM

// Generate test data with few bits of precision, to minimize error
// accumulation from floating-point reordering.
static at::Tensor genTestData(c10::IntArrayRef args) {
return at::trunc(at::randn(args) * 256.0f) / 256.0f;
}

#ifdef TORCH_ENABLE_LLVM

TEST(Conv, DepthwiseConv2D) {
constexpr int N = 1, C = 72, H = 56, W = 56;
constexpr int K = 72, R = 3, S = 3;
Expand Down
4 changes: 0 additions & 4 deletions test/cpp/tensorexpr/tutorial.cpp
Expand Up @@ -54,13 +54,9 @@

using namespace torch::jit::tensorexpr;

#ifdef TORCH_ENABLE_LLVM

// Helper function to print a snippet from a big multi-line string
static void printLinesToFrom(const std::string& input_str, int from, int to);

#endif

int main(int argc, char* argv[]) {
std::cout << "*** Structure of tensor expressions and statements ***"
<< std::endl;
Expand Down
16 changes: 16 additions & 0 deletions torch/csrc/DynamicTypes.cpp
Expand Up @@ -28,6 +28,22 @@ std::array<THPDtype*, static_cast<int>(at::ScalarType::NumOptions)> dtype_regist

std::array<THPLayout*, static_cast<int>(at::Layout::NumOptions)> layout_registry = {};

at::Backend get_backend(bool is_cuda, bool is_sparse) {
if (is_cuda) {
if (is_sparse){
return at::Backend::SparseCUDA;
} else {
return at::Backend::CUDA;
}
} else {
if (is_sparse){
return at::Backend::SparseCPU;
} else {
return at::Backend::CPU;
}
}
}

at::DeprecatedTypeProperties* get_type_properties(at::DeviceType device_type, at::ScalarType scalarType) {
at::Backend backend;
if (device_type == at::kCPU) {
Expand Down
14 changes: 14 additions & 0 deletions torch/csrc/Storage.cpp
Expand Up @@ -337,6 +337,20 @@ static PyObject * THPStorage_device(THPStorage* self, void *unused) {
END_HANDLE_TH_ERRORS
}

static PyObject * THPStorage_dtype(THPStorage *self, void *unused)
{
HANDLE_TH_ERRORS
return torch::autograd::utils::wrap(
torch::getTHPDtype(at::typeMetaToScalarType(
#ifdef THQUANTIZED
caffe2::TypeMeta::Make<quantized_t>()
#else
caffe2::TypeMeta::Make<uint8_t>()
#endif
)));
END_HANDLE_TH_ERRORS
}

typedef PyObject *(*getter)(PyObject *, void *);

// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables)
Expand Down
12 changes: 12 additions & 0 deletions torch/csrc/autograd/init.cpp
Expand Up @@ -480,6 +480,18 @@ static PyObject * set_autocast_cpu_dtype(PyObject* _unused, PyObject *arg) {
END_HANDLE_TH_ERRORS
}

static const char* scalarTypeName(const at::ScalarType type) {
switch (type) {
#define DEFINE_CASE(ctype, name) \
case at::ScalarType::name: \
return #ctype;
AT_FORAUTOCAST_SCALAR_TYPES(DEFINE_CASE)
#undef DEFINE_CASE
default:
throw std::runtime_error("unknown scalar type for autocast");
}
}

static PyObject * get_autocast_gpu_dtype(PyObject* _unused, PyObject *arg){
HANDLE_TH_ERRORS
at::ScalarType current_dtype = at::autocast::get_autocast_gpu_dtype();
Expand Down
6 changes: 6 additions & 0 deletions torch/csrc/jit/codegen/cuda/codegen.cpp
Expand Up @@ -27,6 +27,12 @@ std::string ptrType(DataType dt) {
return ss.str();
}

std::string refType(DataType dt) {
std::stringstream ss;
ss << dt << "&";
return ss.str();
}

//! Utility class to build an argument list
class ArgumentBuilder {
public:
Expand Down

0 comments on commit bcd7a20

Please sign in to comment.