Skip to content

Commit

Permalink
Update base for Update on "Preserve python backtrace in autograd engi…
Browse files Browse the repository at this point in the history
…ne errors."

This PR attempts to address #42560 by capturing the appropriate
exception_ptr in the autograd engine and passing it over to the Future.

As part of this change, there is a significant change the Future API where we
now only accept an exception_ptr as part of setError.

For the example in #42560, the exception trace would now look like:


```
> Traceback (most recent call last):
>   File "test_autograd.py", line 6914, in test_preserve_backtrace
>     Foo.apply(t).sum().backward()
>   File "torch/tensor.py", line 214, in backward
>     torch.autograd.backward(self, gradient, retain_graph, create_graph)
>   File "torch/autograd/__init__.py", line 127, in backward
>     allow_unreachable=True)  # allow_unreachable flag
>   File "torch/autograd/function.py", line 87, in apply
>     return self._forward_cls.backward(self, *args)
>   File "test_autograd.py", line 6910, in backward
>     raise ValueError("something")
> ValueError: something
```

Differential Revision: [D23365408](https://our.internmc.facebook.com/intern/diff/D23365408/)

[ghstack-poisoned]
  • Loading branch information
pritamdamania committed Sep 1, 2020
2 parents 5017e21 + 0394c5a commit 8c3b3d0
Show file tree
Hide file tree
Showing 115 changed files with 10,461 additions and 4,671 deletions.
9 changes: 2 additions & 7 deletions .circleci/scripts/cpp_doc_push_script.sh
Expand Up @@ -47,16 +47,11 @@ sudo apt-get -y install doxygen
# Generate ATen files
pushd "${pt_checkout}"
pip install -r requirements.txt
time python aten/src/ATen/gen.py \
time python -m tools.codegen.gen \
-s aten/src/ATen \
-d build/aten/src/ATen \
aten/src/ATen/Declarations.cwrap \
aten/src/THCUNN/generic/THCUNN.h \
aten/src/ATen/nn.yaml \
aten/src/ATen/native/native_functions.yaml
-d build/aten/src/ATen

# Copy some required files
cp aten/src/ATen/common_with_cwrap.py tools/shared/cwrap_common.py
cp torch/_utils_internal.py tools/shared

# Generate PyTorch files
Expand Down
8 changes: 2 additions & 6 deletions .github/workflows/lint.yml
Expand Up @@ -131,13 +131,9 @@ jobs:
time python setup.py --cmake-only build
# Generate ATen files.
time python aten/src/ATen/gen.py \
time python -m tools.codegen.gen \
-s aten/src/ATen \
-d build/aten/src/ATen \
aten/src/ATen/Declarations.cwrap \
aten/src/THCUNN/generic/THCUNN.h \
aten/src/ATen/nn.yaml \
aten/src/ATen/native/native_functions.yaml
-d build/aten/src/ATen
# Generate PyTorch files.
time python tools/setup_helpers/generate_code.py \
Expand Down
3 changes: 0 additions & 3 deletions .gitignore
Expand Up @@ -108,9 +108,6 @@ env
# macOS dir files
.DS_Store

# Symbolic files
tools/shared/cwrap_common.py

# Ninja files
.ninja_deps
.ninja_log
Expand Down
2 changes: 2 additions & 0 deletions .jenkins/caffe2/build.sh
Expand Up @@ -248,6 +248,8 @@ else
export MAX_JOBS=`expr $(nproc) - 1`
fi

pip install --user dataclasses

$PYTHON setup.py install --user

report_compile_cache_stats
Expand Down
2 changes: 1 addition & 1 deletion .jenkins/pytorch/macos-common.sh
Expand Up @@ -20,7 +20,7 @@ if [ ! -d "${WORKSPACE_DIR}/miniconda3" ]; then
fi
export PATH="${WORKSPACE_DIR}/miniconda3/bin:$PATH"
source ${WORKSPACE_DIR}/miniconda3/bin/activate
retry conda install -y mkl mkl-include numpy=1.18.5 pyyaml=5.3 setuptools=46.0.0 cmake cffi ninja typing_extensions
retry conda install -y mkl mkl-include numpy=1.18.5 pyyaml=5.3 setuptools=46.0.0 cmake cffi ninja typing_extensions dataclasses

# The torch.hub tests make requests to GitHub.
#
Expand Down
4 changes: 2 additions & 2 deletions .jenkins/pytorch/win-test-helpers/build_pytorch.bat
Expand Up @@ -21,8 +21,8 @@ call %INSTALLER_DIR%\install_sccache.bat
call %INSTALLER_DIR%\install_miniconda3.bat


:: Install ninja
if "%REBUILD%"=="" ( pip install -q "ninja==1.9.0" )
:: Install ninja and other deps
if "%REBUILD%"=="" ( pip install -q "ninja==1.9.0" dataclasses )

git submodule sync --recursive
git submodule update --init --recursive
Expand Down
2 changes: 1 addition & 1 deletion .jenkins/pytorch/win-test-helpers/setup_pytorch_env.bat
Expand Up @@ -22,7 +22,7 @@ call %CONDA_PARENT_DIR%\Miniconda3\Scripts\activate.bat %CONDA_PARENT_DIR%\Minic
if NOT "%BUILD_ENVIRONMENT%"=="" (
:: We have to pin Python version to 3.6.7, until mkl supports Python 3.7
:: Numba is pinned to 0.44.0 to avoid https://github.com/numba/numba/issues/4352
call conda install -y -q python=3.6.7 numpy mkl cffi pyyaml boto3 protobuf numba==0.44.0 scipy==1.5.0 typing_extensions
call conda install -y -q python=3.6.7 numpy mkl cffi pyyaml boto3 protobuf numba==0.44.0 scipy==1.5.0 typing_extensions dataclasses
if %errorlevel% neq 0 ( exit /b %errorlevel% )
call conda install -y -q -c conda-forge cmake
if %errorlevel% neq 0 ( exit /b %errorlevel% )
Expand Down
21 changes: 10 additions & 11 deletions BUILD.bazel
Expand Up @@ -106,17 +106,19 @@ cc_test(
],
)

# TODO: refactor this into its own library (but how to make
# a binary based off of a module in a library?)
py_binary(
name = "gen",
srcs = ["aten/src/ATen/gen.py"],
srcs = ["tools/setup_helpers/gen.py"],
deps = [
":tools_codegen"
],
)

genrule(
name = "generated_cpp",
srcs = [
"aten/src/ATen/Declarations.cwrap",
"aten/src/THCUNN/generic/THCUNN.h",
"aten/src/ATen/nn.yaml",
"aten/src/ATen/native/native_functions.yaml",
] + glob(["aten/src/ATen/templates/**"]),
outs = [
Expand All @@ -126,8 +128,6 @@ genrule(
"aten/src/ATen/CPUType.cpp",
"aten/src/ATen/Functions.h",
"aten/src/ATen/Functions.cpp",
"aten/src/ATen/LegacyTHFunctionsCPU.h",
"aten/src/ATen/LegacyTHFunctionsCPU.cpp",
"aten/src/ATen/NativeFunctions.h",
"aten/src/ATen/MkldnnCPUType.h",
"aten/src/ATen/MkldnnCPUType.cpp",
Expand All @@ -141,14 +141,13 @@ genrule(
"aten/src/ATen/core/TensorMethods.cpp",
"aten/src/ATen/core/ATenOpList.cpp",
],
cmd = "$(location :gen) --source-path aten/src/ATen --install_dir `dirname $(location aten/src/ATen/Declarations.yaml)` aten/src/ATen/Declarations.cwrap aten/src/THCUNN/generic/THCUNN.h aten/src/ATen/nn.yaml aten/src/ATen/native/native_functions.yaml",
cmd = "$(location :gen) --source-path aten/src/ATen --install_dir `dirname $(location aten/src/ATen/Declarations.yaml)`",
tools = [":gen"],
)

py_library(
name = "code_template",
srcs = ["aten/src/ATen/code_template.py"],
imports = ["aten"],
name = "tools_codegen",
srcs = glob(["tools/codegen/**/*.py"]),
)

py_library(
Expand All @@ -158,7 +157,7 @@ py_library(
"tools/autograd/*.yaml",
"tools/autograd/templates/*",
]),
deps = [":code_template"],
deps = [":tools_codegen"],
)

py_library(
Expand Down
2 changes: 1 addition & 1 deletion README.md
Expand Up @@ -169,7 +169,7 @@ If you are building for NVIDIA's Jetson platforms (Jetson Nano, TX1, TX2, AGX Xa

Common
```bash
conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi typing_extensions future six requests
conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi typing_extensions future six requests dataclasses
```

On Linux
Expand Down
2 changes: 2 additions & 0 deletions aten/src/ATen/BatchingRegistrations.cpp
Expand Up @@ -426,6 +426,8 @@ TORCH_LIBRARY_IMPL(aten, Batched, m) {
m.impl("pow.Tensor_Scalar", unary_pointwise_batching_rule<TensorScalarType, at::pow, Scalar>);
m.impl("pow.Scalar", pow_scalar_Tensor_batching_rule);

m.impl("sigmoid_backward", binary_pointwise_batching_rule<TensorTensorType, at::sigmoid_backward>);

// for at::result_type, call the native::result_type implementation.
// We don't have to do anything special because native::result_type operates
// on the logical shape of the tensors.
Expand Down

0 comments on commit 8c3b3d0

Please sign in to comment.