Skip to content

Commit

Permalink
Merge branch 'master' into upsampling-expose-recompute_scale_factor
Browse files Browse the repository at this point in the history
  • Loading branch information
vfdev-5 committed Oct 19, 2021
2 parents 27f53b3 + 0a07488 commit 5ae8b86
Show file tree
Hide file tree
Showing 506 changed files with 8,494 additions and 4,243 deletions.
2 changes: 2 additions & 0 deletions .github/templates/linux_ci_workflow.yml.j2
Expand Up @@ -223,6 +223,8 @@ jobs:
# Time out the test phase after !{{ timeout_after }} minutes
timeout-minutes: !{{ timeout_after }}
run: |
set -x

if [[ $TEST_CONFIG == 'multigpu' ]]; then
TEST_COMMAND=.jenkins/pytorch/multigpu-test.sh
elif [[ $BUILD_ENVIRONMENT == *onnx* ]]; then
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions .github/workflows/generated-linux-bionic-py3.6-clang9.yml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions .github/workflows/generated-linux-xenial-py3.6-gcc5.4.yml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion .jenkins/pytorch/common_utils.sh
Expand Up @@ -70,7 +70,7 @@ function file_diff_from_base() {

function get_bazel() {
# download bazel version
wget https://github.com/bazelbuild/bazel/releases/download/4.2.1/bazel-4.2.1-linux-x86_64 -O tools/bazel
wget https://ossci-linux.s3.amazonaws.com/bazel-4.2.1-linux-x86_64 -O tools/bazel
# verify content
echo '1a4f3a3ce292307bceeb44f459883859c793436d564b95319aacb8af1f20557c tools/bazel' | sha256sum --quiet -c

Expand Down
35 changes: 21 additions & 14 deletions .jenkins/pytorch/test.sh
Expand Up @@ -4,12 +4,11 @@
# (This is set by default in the Docker images we build, so you don't
# need to set it yourself.

set -ex

# shellcheck disable=SC2034
COMPACT_JOB_NAME="${BUILD_ENVIRONMENT}"

# Get fully qualified path using realpath
CUSTOM_TEST_ARTIFACT_BUILD_DIR=$(realpath "${CUSTOM_TEST_ARTIFACT_BUILD_DIR:-${PWD}/../}")

TORCH_INSTALL_DIR=$(python -c "import site; print(site.getsitepackages()[0])")/torch
TORCH_BIN_DIR="$TORCH_INSTALL_DIR"/bin
TORCH_LIB_DIR="$TORCH_INSTALL_DIR"/lib
Expand All @@ -24,6 +23,12 @@ if [[ -n "${TEST_CONFIG}" ]]; then
BUILD_ENVIRONMENT="${BUILD_ENVIRONMENT}-${TEST_CONFIG}"
fi

# Get fully qualified path using realpath
if [[ "$BUILD_ENVIRONMENT" != *bazel* ]]; then
CUSTOM_TEST_ARTIFACT_BUILD_DIR=$(realpath "${CUSTOM_TEST_ARTIFACT_BUILD_DIR:-${PWD}/../}")
fi


# shellcheck source=./common.sh
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"

Expand Down Expand Up @@ -69,6 +74,9 @@ fi
if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
# Print GPU info
rocminfo | grep -E 'Name:.*\sgfx|Marketing'

# Manually set NUM_TEST_SHARDS since Jenkins doesn't do it
export NUM_TEST_SHARDS=2
fi

# --user breaks ppc64le builds and these packages are already in ppc64le docker
Expand Down Expand Up @@ -154,13 +162,12 @@ test_python_legacy_jit() {
assert_git_not_dirty
}

test_python_shard1() {
time python test/run_test.py --exclude-jit-executor --exclude-distributed-tests --shard 1 2 --verbose --determine-from="$DETERMINE_FROM"
assert_git_not_dirty
}

test_python_shard2() {
time python test/run_test.py --exclude-jit-executor --exclude-distributed-tests --shard 2 2 --verbose --determine-from="$DETERMINE_FROM"
test_python_shard() {
if [[ -z "$NUM_TEST_SHARDS" ]]; then
echo "NUM_TEST_SHARDS must be defined to run a Python test shard"
exit 1
fi
time python test/run_test.py --exclude-jit-executor --exclude-distributed-tests --shard "$1" "$NUM_TEST_SHARDS" --verbose --determine-from="$DETERMINE_FROM"
assert_git_not_dirty
}

Expand Down Expand Up @@ -415,7 +422,7 @@ test_xla() {
assert_git_not_dirty
}

# Do NOT run this test before any other tests, like test_python_shard1, etc.
# Do NOT run this test before any other tests, like test_python_shard, etc.
# Because this function uninstalls the torch built from branch, and install
# nightly version.
test_backward_compatibility() {
Expand Down Expand Up @@ -521,11 +528,11 @@ elif [[ "${BUILD_ENVIRONMENT}" == *-test1 || "${JOB_BASE_NAME}" == *-test1 || ("
fi
test_without_numpy
install_torchvision
test_python_shard1
test_python_shard 1
test_aten
elif [[ "${BUILD_ENVIRONMENT}" == *-test2 || "${JOB_BASE_NAME}" == *-test2 || ("${SHARD_NUMBER}" == 2 && $NUM_TEST_SHARDS -gt 1) ]]; then
install_torchvision
test_python_shard2
test_python_shard 2
test_libtorch
test_aot_compilation
test_custom_script_ops
Expand All @@ -535,7 +542,7 @@ elif [[ "${BUILD_ENVIRONMENT}" == *vulkan* ]]; then
test_vulkan
elif [[ "${BUILD_ENVIRONMENT}" == *-bazel-* ]]; then
test_bazel
elif [[ "${BUILD_ENVIRONMENT}" == *distributed* ]]; then
elif [[ "${BUILD_ENVIRONMENT}" == *distributed* || "${JOB_BASE_NAME}" == *distributed* ]]; then
test_distributed
test_rpc
elif [[ "${TEST_CONFIG}" = docs_test ]]; then
Expand Down
15 changes: 8 additions & 7 deletions android/pytorch_android/src/main/cpp/pytorch_jni_common.cpp
Expand Up @@ -4,6 +4,7 @@
#include <string>

#include <c10/core/MemoryFormat.h>
#include <c10/util/irange.h>

#include <fbjni/ByteBuffer.h>
#include <fbjni/fbjni.h>
Expand Down Expand Up @@ -97,7 +98,7 @@ static at::Tensor newAtTensor(
std::vector<int64_t> shapeVec{};
shapeVec.reserve(rank);
auto numel = 1;
for (auto i = 0; i < rank; ++i) {
for (const auto i : c10::irange(rank)) {
shapeVec.push_back(shapeArr[i]);
numel *= shapeArr[i];
}
Expand Down Expand Up @@ -521,7 +522,7 @@ at::IValue JIValue::JIValueToAtIValue(

std::vector<at::IValue> elements;
elements.reserve(n);
for (auto i = 0; i < n; ++i) {
for (const auto i : c10::irange(n)) {
auto jivalue_element = jarray->getElement(i);
auto element = JIValue::JIValueToAtIValue(jivalue_element);
elements.push_back(std::move(element));
Expand All @@ -535,7 +536,7 @@ at::IValue JIValue::JIValueToAtIValue(
size_t n = jArrayPinned.size();
c10::List<bool> list{};
list.reserve(n);
for (size_t i = 0; i < n; ++i) {
for (const auto i : c10::irange(n)) {
list.push_back(jArrayPinned[i]);
}
return at::IValue{std::move(list)};
Expand All @@ -547,7 +548,7 @@ at::IValue JIValue::JIValueToAtIValue(
size_t n = jArrayPinned.size();
c10::List<int64_t> list{};
list.reserve(n);
for (size_t i = 0; i < n; ++i) {
for (const auto i : c10::irange(n)) {
list.push_back(jArrayPinned[i]);
}
return at::IValue{std::move(list)};
Expand All @@ -559,7 +560,7 @@ at::IValue JIValue::JIValueToAtIValue(
size_t n = jArrayPinned.size();
c10::List<double> list{};
list.reserve(n);
for (size_t i = 0; i < n; ++i) {
for (const auto i : c10::irange(n)) {
list.push_back(jArrayPinned[i]);
}
return at::IValue{std::move(list)};
Expand All @@ -572,7 +573,7 @@ at::IValue JIValue::JIValueToAtIValue(
size_t n = jArray->size();
c10::List<at::Tensor> list{};
list.reserve(n);
for (size_t i = 0; i < n; ++i) {
for (const auto i : c10::irange(n)) {
list.push_back(
TensorHybrid::newAtTensorFromJTensor(jArray->getElement(i)));
}
Expand All @@ -594,7 +595,7 @@ at::IValue JIValue::JIValueToAtIValue(
c10::impl::GenericList list{c10::unshapedType(first_element.type())};
list.reserve(n);
list.push_back(first_element);
for (auto i = 1; i < n; ++i) {
for (const auto i : c10::irange(1, n)) {
auto jivalue_element = jarray->getElement(i);
auto element = JIValue::JIValueToAtIValue(jivalue_element);
list.push_back(element);
Expand Down
5 changes: 3 additions & 2 deletions android/pytorch_android/src/main/cpp/pytorch_jni_lite.cpp
Expand Up @@ -6,6 +6,7 @@
#include <fbjni/ByteBuffer.h>
#include <fbjni/fbjni.h>

#include <c10/util/irange.h>
#include <torch/csrc/jit/mobile/import.h>
#include <torch/csrc/jit/mobile/module.h>
#include <torch/script.h>
Expand Down Expand Up @@ -157,7 +158,7 @@ class PytorchJni : public facebook::jni::HybridClass<PytorchJni> {
std::vector<at::IValue> inputs{};
size_t n = jinputs->size();
inputs.reserve(n);
for (size_t i = 0; i < n; i++) {
for (const auto i : c10::irange(n)) {
at::IValue atIValue = JIValue::JIValueToAtIValue(jinputs->getElement(i));
if (at::kVulkan == deviceType_) {
inputs.push_back(
Expand Down Expand Up @@ -186,7 +187,7 @@ class PytorchJni : public facebook::jni::HybridClass<PytorchJni> {
std::vector<at::IValue> inputs{};
size_t n = jinputs->size();
inputs.reserve(n);
for (size_t i = 0; i < n; i++) {
for (const auto i : c10::irange(n)) {
at::IValue atIValue = JIValue::JIValueToAtIValue(jinputs->getElement(i));
if (at::kVulkan == deviceType_) {
inputs.push_back(
Expand Down
3 changes: 2 additions & 1 deletion aten/src/ATen/BatchingRegistrations.cpp
Expand Up @@ -3,6 +3,7 @@
#include <ATen/BatchedFallback.h>
#include <ATen/native/ResizeCommon.h>
#include <ATen/ATen.h>
#include <c10/util/irange.h>

namespace at {

Expand Down Expand Up @@ -329,7 +330,7 @@ Tensor permute_batching_rule(const Tensor& self, IntArrayRef dims) {

VmapDimVector all_dims_physical;
all_dims_physical.reserve(self_physical.tensor().dim());
for (int64_t bdim = 0; bdim < self_physical.numBatchDims(); bdim++) {
for (const auto bdim : c10::irange(self_physical.numBatchDims())) {
all_dims_physical.push_back(bdim);
}
all_dims_physical.insert(
Expand Down
5 changes: 3 additions & 2 deletions aten/src/ATen/CPUApplyUtils.h
Expand Up @@ -2,6 +2,7 @@

#include <ATen/Parallel.h>
#include <ATen/TensorUtils.h>
#include <c10/util/irange.h>
#include <limits>
#include <utility>
#include <cstring>
Expand Down Expand Up @@ -130,7 +131,7 @@ inline Tensor sort_strides(Tensor& tensor_) {
IntArrayRef strides = tensor_.strides();
std::vector<int64_t> indices;
indices.reserve(tensor_.ndimension());
for (int64_t i = 0; i < tensor_.ndimension(); i++) {
for (const auto i : c10::irange(tensor_.ndimension())) {
indices.push_back(i);
}
std::sort(indices.begin(), indices.end(), [&strides](int64_t i1, int64_t i2) {
Expand Down Expand Up @@ -196,7 +197,7 @@ inline bool _all_equal_numel(at::ArrayRef<Tensor> tensors) {
if (tensors.size() == 0)
return true;
int64_t all_numel = tensors[0].numel();
for (size_t i = 1; i < tensors.size(); i++) {
for (const auto i : c10::irange(1, tensors.size())) {
if (tensors[i].numel() != all_numel)
return false;
}
Expand Down
28 changes: 21 additions & 7 deletions aten/src/ATen/Context.cpp
Expand Up @@ -62,18 +62,32 @@ bool Context::deterministicAlgorithms() const {
return _deterministic_algorithms;
}

void Context::setDeterministicAlgorithms(bool b) {
bool Context::deterministicAlgorithmsWarnOnly() const {
return _deterministic_algorithms_warn_only;
}

void Context::setDeterministicAlgorithms(bool b, bool warn_only=false) {
_deterministic_algorithms = b;
_deterministic_algorithms_warn_only = warn_only;
}

void Context::alertNotDeterministic(c10::string_view const& caller) {
if (globalContext().deterministicAlgorithms()) {
TORCH_CHECK(false,
caller, " does not have a deterministic implementation, but you set "
"'torch.use_deterministic_algorithms(True)'. You can turn off determinism ",
"just for this operation if that's acceptable for your application. You "
"can also file an issue at https://github.com/pytorch/pytorch/issues "
"to help us prioritize adding deterministic support for this operation.");
if (globalContext().deterministicAlgorithmsWarnOnly()) {
TORCH_WARN(
caller, " does not have a deterministic implementation, but you set "
"'torch.use_deterministic_algorithms(True, warn_only=True)'. "
"You can file an issue at https://github.com/pytorch/pytorch/issues "
"to help us prioritize adding deterministic support for this operation.");
} else {
TORCH_CHECK(false,
caller, " does not have a deterministic implementation, but you set "
"'torch.use_deterministic_algorithms(True)'. You can turn off "
"determinism just for this operation, or you can use the "
"'warn_only=True' option, if that's acceptable for your application. "
"You can also file an issue at https://github.com/pytorch/pytorch/issues "
"to help us prioritize adding deterministic support for this operation.");
}
}
}

Expand Down

0 comments on commit 5ae8b86

Please sign in to comment.