Skip to content

Commit

Permalink
Update on "Fix kthvalue error for scalar input"
Browse files Browse the repository at this point in the history
fixes #30818

Note that the median case was already fixed by #45847

[ghstack-poisoned]
  • Loading branch information
heitorschueroff committed Nov 10, 2020
2 parents 28c4fce + 22d2141 commit 280f8fe
Show file tree
Hide file tree
Showing 121 changed files with 5,392 additions and 3,144 deletions.
6 changes: 4 additions & 2 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -655,11 +655,13 @@ jobs:
echo "Retrieving test reports"
docker cp $id:/var/lib/jenkins/workspace/test/test-reports ./ || echo 'No test reports found!'
if [[ ${BUILD_ENVIRONMENT} == *"coverage"* ]]; then
echo "Retrieving C++ coverage report"
docker cp $id:/var/lib/jenkins/workspace/build/coverage.info ./test
fi
if [[ ${BUILD_ENVIRONMENT} == *"coverage"* || ${BUILD_ENVIRONMENT} == *"onnx"* ]]; then
echo "Retrieving Python coverage report"
docker cp $id:/var/lib/jenkins/workspace/test/.coverage ./test
docker cp $id:/var/lib/jenkins/workspace/test/coverage.xml ./test
echo "Retrieving C++ coverage report"
docker cp $id:/var/lib/jenkins/workspace/build/coverage.info ./test
python3 -mpip install codecov
python3 -mcodecov
fi
Expand Down
6 changes: 4 additions & 2 deletions .circleci/verbatim-sources/job-specs/pytorch-job-specs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -217,11 +217,13 @@ jobs:
echo "Retrieving test reports"
docker cp $id:/var/lib/jenkins/workspace/test/test-reports ./ || echo 'No test reports found!'
if [[ ${BUILD_ENVIRONMENT} == *"coverage"* ]]; then
echo "Retrieving C++ coverage report"
docker cp $id:/var/lib/jenkins/workspace/build/coverage.info ./test
fi
if [[ ${BUILD_ENVIRONMENT} == *"coverage"* || ${BUILD_ENVIRONMENT} == *"onnx"* ]]; then
echo "Retrieving Python coverage report"
docker cp $id:/var/lib/jenkins/workspace/test/.coverage ./test
docker cp $id:/var/lib/jenkins/workspace/test/coverage.xml ./test
echo "Retrieving C++ coverage report"
docker cp $id:/var/lib/jenkins/workspace/build/coverage.info ./test
python3 -mpip install codecov
python3 -mcodecov
fi
Expand Down
1 change: 1 addition & 0 deletions .jenkins/caffe2/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ fi
# See comments on
# https://github.com/HypothesisWorks/hypothesis-python/commit/eadd62e467d6cee6216e71b391951ec25b4f5830
$MAYBE_SUDO pip -q uninstall -y hypothesis
$MAYBE_SUDO pip -q uninstall -y coverage
# "pip install hypothesis==3.44.6" from official server is unreliable on
# CircleCI, so we host a copy on S3 instead
$MAYBE_SUDO pip -q install attrs==18.1.0 -f https://s3.amazonaws.com/ossci-linux/wheels/attrs-18.1.0-py2.py3-none-any.whl
Expand Down
1 change: 1 addition & 0 deletions .jenkins/pytorch/codegen-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ python -m tools.pyi.gen_pyi \
# autograd codegen (called by torch codegen but can run independently)
python -m tools.autograd.gen_autograd \
"$OUT"/torch/share/ATen/Declarations.yaml \
aten/src/ATen/native/native_functions.yaml \
"$OUT"/autograd \
tools/autograd

Expand Down
314 changes: 307 additions & 7 deletions aten/src/ATen/BatchingRegistrations.cpp

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -323,8 +323,6 @@ namespace impl {
call_functor_with_args_from_stack_(Functor* functor, Stack* stack, std::index_sequence<ivalue_arg_indices...>) {
(void)(stack); // when sizeof...(ivalue_arg_indices) == 0, this argument would be unused and we have to silence the compiler warning.

constexpr size_t num_ivalue_args = sizeof...(ivalue_arg_indices);

/*
* For ops that take "Tensor&" as an argument, ivalue_to_arg would still return a "Tensor" by value
* and C++ doesn't allow us to call (*functor) with a temporary "Tensor" when it expects "Tensor&".
Expand All @@ -335,7 +333,7 @@ namespace impl {
using ArgTypes = typename guts::infer_function_traits_t<Functor>::parameter_types;
return (*functor)(reference_cast<guts::typelist::element_t<ivalue_arg_indices, ArgTypes>>(
ivalue_to_arg<std::decay_t<guts::typelist::element_t<ivalue_arg_indices, ArgTypes>>, AllowDeprecatedTypes>::call(
std::move(torch::jit::peek(*stack, ivalue_arg_indices, num_ivalue_args))
std::move(torch::jit::peek(*stack, ivalue_arg_indices, sizeof...(ivalue_arg_indices)))
))...);
}

Expand Down
28 changes: 28 additions & 0 deletions aten/src/ATen/native/ForeachOpsKernels.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -149,20 +149,48 @@ void foreach_tensor_##OP##_scalarlist_slow_(TensorList input, TensorList tensors

FOREACH_BINARY_OP_LIST_ALPHA(add);
FOREACH_BINARY_OP_LIST_ALPHA(sub);

FOREACH_BINARY_OP_SCALAR(add);
FOREACH_BINARY_OP_SCALAR(sub);
FOREACH_BINARY_OP_SCALAR(mul);
FOREACH_BINARY_OP_SCALAR(div);

FOREACH_BINARY_OP_SCALARLIST(add);
FOREACH_BINARY_OP_SCALARLIST(sub);
FOREACH_BINARY_OP_SCALARLIST(mul);
FOREACH_BINARY_OP_SCALARLIST(div);

FOREACH_BINARY_OP_LIST(mul);
FOREACH_BINARY_OP_LIST(div);

FOREACH_UNARY_OP(sqrt);
FOREACH_UNARY_OP(exp);
FOREACH_UNARY_OP(abs);
FOREACH_UNARY_OP(acos);
FOREACH_UNARY_OP(asin);
FOREACH_UNARY_OP(atan);
FOREACH_UNARY_OP(ceil);
FOREACH_UNARY_OP(cos);
FOREACH_UNARY_OP(cosh);
FOREACH_UNARY_OP(erf);
FOREACH_UNARY_OP(erfc);
FOREACH_UNARY_OP(expm1);
FOREACH_UNARY_OP(floor);
FOREACH_UNARY_OP(log);
FOREACH_UNARY_OP(log10);
FOREACH_UNARY_OP(log1p);
FOREACH_UNARY_OP(log2);
FOREACH_UNARY_OP(neg);
FOREACH_UNARY_OP(tan);
FOREACH_UNARY_OP(tanh);
FOREACH_UNARY_OP(sin);
FOREACH_UNARY_OP(sinh);
FOREACH_UNARY_OP(round);
FOREACH_UNARY_OP(lgamma);

FOREACH_POINTWISE_OP_SCALAR(addcdiv);
FOREACH_POINTWISE_OP_SCALAR(addcmul);

FOREACH_POINTWISE_OP_SCALARLIST(addcdiv);
FOREACH_POINTWISE_OP_SCALARLIST(addcmul);

Expand Down
11 changes: 2 additions & 9 deletions aten/src/ATen/native/Resize.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#pragma once

#include <ATen/ATen.h>
#include <ATen/native/ResizeCommon.h>
#include <TH/THTensor.hpp>

namespace at { namespace native {
Expand Down Expand Up @@ -51,15 +52,7 @@ inline TensorImpl* resize_impl_cpu_(
if (stride) {
self->set_sizes_and_strides(size, *stride);
// NB: storage size can be different from numel.
for (size_t dim = 0; dim < size.size(); ++dim) {
// FIXME: Don't rely on storage_size being negative because this
// may not be true for some edge cases.
if (size[dim] == 0) {
storage_size = 0;
break;
}
storage_size += (size[dim] - 1) * stride.value()[dim];
}
storage_size = storage_size_for(size, *stride);
} else {
self->set_sizes_contiguous(size);
storage_size = self->numel();
Expand Down
15 changes: 15 additions & 0 deletions aten/src/ATen/native/ResizeCommon.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,21 @@

namespace at { namespace native {

inline int64_t storage_size_for(IntArrayRef size, IntArrayRef stride) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(size.size() == stride.size(),
"storage_size_for(size, stride) requires that size and stride ",
"have the same size as a precondition.");
int64_t storage_size = 1;
for (size_t dim = 0; dim < size.size(); ++dim) {
if (size[dim] == 0) {
storage_size = 0;
break;
}
storage_size += (size[dim] - 1) * stride[dim];
}
return storage_size;
}

inline Tensor& resize_named_tensor_(
Tensor& self,
IntArrayRef size,
Expand Down
9 changes: 9 additions & 0 deletions aten/src/ATen/native/TensorFactories.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -341,6 +341,15 @@ Tensor new_empty(
return at::empty(size, self.options().merge_in(options));
}

Tensor new_empty_strided(
const Tensor& self,
IntArrayRef size,
IntArrayRef stride,
const TensorOptions& options
) {
return at::empty_strided(size, stride, self.options().merge_in(options));
}

// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eye ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Tensor eye(int64_t n, const TensorOptions& options) {
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/TensorShape.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1868,7 +1868,7 @@ Tensor diag(const Tensor& self, int64_t dimension) {
}

Tensor& diag_cpu_out(Tensor &result, const Tensor& self, int64_t dimension) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Bool, self.scalar_type(), "diag", [&] {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(at::ScalarType::Bool, self.scalar_type(), "diag", [&] {
apply_diag<scalar_t>(result, self, dimension);
});
return result;
Expand Down

0 comments on commit 280f8fe

Please sign in to comment.