Skip to content

Commit

Permalink
Update on "[ONNX] Add binary_cross_entropy_with_logits op to ONNX ops…
Browse files Browse the repository at this point in the history
…et version 12 (#49675)"

Fixes #{#47997}
Exporting the operator binary_cross_entropy_with_logits to ONNX opset version 12.

[ghstack-poisoned]
  • Loading branch information
BowenBao committed Jan 21, 2021
2 parents b8aee3f + 6467593 commit f75572e
Show file tree
Hide file tree
Showing 292 changed files with 9,880 additions and 2,592 deletions.
38 changes: 19 additions & 19 deletions .circleci/scripts/binary_linux_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -39,27 +39,27 @@ fi
# conda build scripts themselves. These should really be consolidated
pkg="/final_pkgs/\$(ls /final_pkgs)"
if [[ "$PACKAGE_TYPE" == conda ]]; then
conda install \${EXTRA_CONDA_FLAGS} -y "\$pkg" --offline
if [[ "$DESIRED_CUDA" == 'cpu' ]]; then
retry conda install \${EXTRA_CONDA_FLAGS} -y cpuonly -c pytorch
fi
retry conda install \${EXTRA_CONDA_FLAGS} -yq future numpy protobuf six
if [[ "$DESIRED_CUDA" != 'cpu' ]]; then
# DESIRED_CUDA is in format cu90 or cu102
if [[ "${#DESIRED_CUDA}" == 4 ]]; then
cu_ver="${DESIRED_CUDA:2:1}.${DESIRED_CUDA:3}"
else
cu_ver="${DESIRED_CUDA:2:2}.${DESIRED_CUDA:4}"
(
# For some reason conda likes to re-activate the conda environment when attempting this install
# which means that a deactivate is run and some variables might not exist when that happens,
# namely CONDA_MKL_INTERFACE_LAYER_BACKUP from libblas so let's just ignore unbound variables when
# it comes to the conda installation commands
set +u
conda install \${EXTRA_CONDA_FLAGS} -y "\$pkg" --offline
if [[ "$DESIRED_CUDA" == 'cpu' ]]; then
retry conda install \${EXTRA_CONDA_FLAGS} -y cpuonly -c pytorch
fi
(
# For some reason conda likes to re-activate the conda environment when attempting this install
# which means that a deactivate is run and some variables might not exist when that happens,
# namely CONDA_MKL_INTERFACE_LAYER_BACKUP from libblas so let's just ignore unbound variables when
# it comes to the conda installation commands
set +u
retry conda install \${EXTRA_CONDA_FLAGS} -yq future numpy protobuf six
if [[ "$DESIRED_CUDA" != 'cpu' ]]; then
# DESIRED_CUDA is in format cu90 or cu102
if [[ "${#DESIRED_CUDA}" == 4 ]]; then
cu_ver="${DESIRED_CUDA:2:1}.${DESIRED_CUDA:3}"
else
cu_ver="${DESIRED_CUDA:2:2}.${DESIRED_CUDA:4}"
fi
retry conda install \${EXTRA_CONDA_FLAGS} -yq -c nvidia -c pytorch "cudatoolkit=\${cu_ver}"
)
fi
fi
)
elif [[ "$PACKAGE_TYPE" != libtorch ]]; then
pip install "\$pkg"
retry pip install -q future numpy protobuf six
Expand Down
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ test/.coverage
test/.hypothesis/
test/cpp/api/mnist
test/custom_operator/model.pt
test/jit_hooks/*.pt
test/data/legacy_modules.t7
test/data/*.pt
test/backward_compatibility/nightly_schemas.txt
Expand All @@ -52,6 +53,8 @@ test/cpp_extensions/install/
test/test-reports/
third_party/build/
tools/shared/_utils_internal.py
tools/fast_nvcc/wrap_nvcc.sh
tools/fast_nvcc/tmp/
torch.egg-info/
torch/_C/__init__.pyi
torch/_C/_nn.pyi
Expand Down
15 changes: 14 additions & 1 deletion .jenkins/pytorch/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
fi

python tools/amd_build/build_amd.py
python setup.py install --user
python setup.py install

# remove sccache wrappers post-build; runtime compilation of MIOpen kernels does not yet fully support them
sudo rm -f /opt/cache/bin/cc
Expand Down Expand Up @@ -223,6 +223,18 @@ else
popd
assert_git_not_dirty

# Build jit hook tests
JIT_HOOK_BUILD="$PWD/../jit-hook-build"
JIT_HOOK_TEST="$PWD/test/jit_hooks"
python --version
SITE_PACKAGES="$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')"
mkdir "$JIT_HOOK_BUILD"
pushd "$JIT_HOOK_BUILD"
cmake "$JIT_HOOK_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" -DPYTHON_EXECUTABLE="$(which python)"
make VERBOSE=1
popd
assert_git_not_dirty

# Build custom backend tests.
CUSTOM_BACKEND_BUILD="$PWD/../custom-backend-build"
CUSTOM_BACKEND_TEST="$PWD/test/custom_backend"
Expand Down Expand Up @@ -261,6 +273,7 @@ if [[ "${BUILD_ENVIRONMENT}" == *xla* ]]; then
# TODO: Move this to Dockerfile.

pip_install lark-parser
pip_install cloud-tpu-client

sudo apt-get -qq update
sudo apt-get -qq install npm nodejs
Expand Down
21 changes: 21 additions & 0 deletions .jenkins/pytorch/macos-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -134,18 +134,39 @@ test_custom_script_ops() {
assert_git_not_dirty
}

test_jit_hooks() {
echo "Testing jit hooks in cpp"
pushd test/jit_hooks
# Build the custom operator library.
rm -rf build && mkdir build
pushd build
SITE_PACKAGES="$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')"
CMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" cmake ..
make VERBOSE=1
popd

# Run tests Python-side and export a script module.
python model.py --export-script-module=model
# Run tests C++-side and load the exported script module.
build/test_jit_hooks ./model
popd
assert_git_not_dirty
}


if [ -z "${BUILD_ENVIRONMENT}" ] || [[ "${BUILD_ENVIRONMENT}" == *-test ]]; then
test_python_all
test_libtorch
test_custom_script_ops
test_jit_hooks
test_custom_backend
else
if [[ "${BUILD_ENVIRONMENT}" == *-test1 ]]; then
test_python_all
elif [[ "${BUILD_ENVIRONMENT}" == *-test2 ]]; then
test_libtorch
test_custom_script_ops
test_jit_hooks
test_custom_backend
fi
fi
15 changes: 15 additions & 0 deletions .jenkins/pytorch/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,21 @@ test_custom_script_ops() {
fi
}

test_jit_hooks() {
if [[ "$BUILD_ENVIRONMENT" != *rocm* ]] && [[ "$BUILD_ENVIRONMENT" != *asan* ]] ; then
echo "Testing jit hooks in cpp"
HOOK_BUILD="$PWD/../jit-hook-build"
pushd test/jit_hooks
cp -a "$HOOK_BUILD" build
# Run tests Python-side and export the script modules with hooks
python model.py --export-script-module=model
# Run tests C++-side and load the exported script modules
build/test_jit_hooks ./model
popd
assert_git_not_dirty
fi
}

test_torch_function_benchmark() {
echo "Testing __torch_function__ benchmarks"
pushd benchmarks/overrides_benchmark
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,7 @@ if ERRORLEVEL 1 exit /b 1
if ERRORLEVEL 1 exit /b 1

echo Run nn tests
python run_test.py --include test_nn --verbose --determine-from="%1"
python run_test.py --exclude-jit-executor --shard 1 2 --verbose --determine-from="%1"
if ERRORLEVEL 1 exit /b 1

popd


Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
call %SCRIPT_HELPERS_DIR%\setup_pytorch_env.bat
cd test && python run_test.py --exclude-jit-executor --shard 2 2 --verbose --determine-from="%1" && cd ..
if ERRORLEVEL 1 exit /b 1
8 changes: 4 additions & 4 deletions .jenkins/pytorch/win-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -48,15 +48,15 @@ run_tests() {
$SCRIPT_HELPERS_DIR/test_custom_backend.bat
$SCRIPT_HELPERS_DIR/test_libtorch.bat
else
export PYTORCH_COLLECT_COVERAGE=1
if [[ "${JOB_BASE_NAME}" == *-test1 ]]; then
export PYTORCH_COLLECT_COVERAGE=1
$SCRIPT_HELPERS_DIR/test_python_nn.bat "$DETERMINE_FROM"
$SCRIPT_HELPERS_DIR/test_python_first_shard.bat "$DETERMINE_FROM"
$SCRIPT_HELPERS_DIR/test_libtorch.bat
if [[ "${USE_CUDA}" == "1" ]]; then
$SCRIPT_HELPERS_DIR/test_python_jit_legacy.bat "$DETERMINE_FROM"
fi
elif [[ "${JOB_BASE_NAME}" == *-test2 ]]; then
$SCRIPT_HELPERS_DIR/test_python_all_except_nn.bat "$DETERMINE_FROM"
$SCRIPT_HELPERS_DIR/test_python_second_shard.bat "$DETERMINE_FROM"
$SCRIPT_HELPERS_DIR/test_custom_backend.bat
$SCRIPT_HELPERS_DIR/test_custom_script_ops.bat
fi
Expand All @@ -67,7 +67,7 @@ run_tests
assert_git_not_dirty
echo "TEST PASSED"

if [[ "${BUILD_ENVIRONMENT}" == "pytorch-win-vs2019-cuda10-cudnn7-py3" ]] && [[ "${JOB_BASE_NAME}" == *-test1 ]]; then
if [[ "${BUILD_ENVIRONMENT}" == "pytorch-win-vs2019-cuda10-cudnn7-py3" ]]; then
pushd $TEST_DIR
python -mpip install coverage
echo "Generating XML coverage report"
Expand Down
13 changes: 11 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,16 @@ endif()

set(CMAKE_INSTALL_MESSAGE NEVER)

# check and set CMAKE_CXX_STANDARD
string(FIND "${CMAKE_CXX_FLAGS}" "-std=c++" env_cxx_standard)
if(env_cxx_standard GREATER -1)
message(
WARNING "C++ standard version definition detected in environment variable."
"PyTorch requires -std=c++14. Please remove -std=c++ settings in your environment.")
endif()
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_C_STANDARD 11)

if(DEFINED GLIBCXX_USE_CXX11_ABI)
if(${GLIBCXX_USE_CXX11_ABI} EQUAL 1)
set(CXX_STANDARD_REQUIRED ON)
Expand Down Expand Up @@ -153,6 +161,7 @@ option(COLORIZE_OUTPUT "Colorize output during compilation" ON)
option(USE_ASAN "Use Address Sanitizer" OFF)
option(USE_TSAN "Use Thread Sanitizer" OFF)
option(USE_CUDA "Use CUDA" ON)
option(USE_FAST_NVCC "Use parallel NVCC build" OFF)
option(USE_ROCM "Use ROCm" ON)
option(CAFFE2_STATIC_LINK_CUDA "Statically link CUDA libraries" OFF)
cmake_dependent_option(
Expand Down Expand Up @@ -220,8 +229,8 @@ option(USE_ZSTD "Use ZSTD" OFF)
# Ensure that an MKLDNN build is the default for x86 CPUs
# but optional for AArch64 (dependent on -DUSE_MKLDNN).
cmake_dependent_option(
USE_MKLDNN "Use MKLDNN. Only available on x86, x86_64, and AArch64." ON
"CPU_INTEL OR CPU_AARCH64 AND USE_MKLDNN" OFF)
USE_MKLDNN "Use MKLDNN. Only available on x86, x86_64, and AArch64." "${CPU_INTEL}"
"CPU_INTEL OR CPU_AARCH64" OFF)
set(MKLDNN_ENABLE_CONCURRENT_EXEC ${USE_MKLDNN})
cmake_dependent_option(
USE_MKLDNN_CBLAS "Use CBLAS in MKLDNN" OFF
Expand Down
23 changes: 20 additions & 3 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,12 @@

- [Contributing to PyTorch](#contributing-to-pytorch)
- [Developing PyTorch](#developing-pytorch)
- [Nightly Checkout & Pull](#nightly-checkout--pull)
- [Tips and Debugging](#tips-and-debugging)
- [Nightly Checkout & Pull](#nightly-checkout--pull)
- [Codebase structure](#codebase-structure)
- [Unit testing](#unit-testing)
- [Better local unit tests with pytest](#better-local-unit-tests-with-pytest)
- [Running `mypy`](#running-mypy)
- [Writing documentation](#writing-documentation)
- [Building documentation](#building-documentation)
- [Tips](#tips)
Expand Down Expand Up @@ -291,6 +293,17 @@ pytest test/test_nn.py -k Loss -v
The above is an example of testing a change to Loss functions: this command runs tests such as
`TestNN.test_BCELoss` and `TestNN.test_MSELoss` and can be useful to save keystrokes.

### Running `mypy`

One of the test suites runs `mypy` on the codebase:
```bash
python test/test_type_hints.py
```
See [Guide for adding type annotations to
PyTorch](https://github.com/pytorch/pytorch/wiki/Guide-for-adding-type-annotations-to-PyTorch)
for more information on how to set up `mypy` and tackle type annotation
tasks, as well as other ways to run `mypy` besides running that test suite.

## Writing documentation

PyTorch uses [Google style](http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html)
Expand Down Expand Up @@ -903,14 +916,14 @@ You'll need to install an appropriately configured flake8; see
[Lint as you type](https://github.com/pytorch/pytorch/wiki/Lint-as-you-type)
for documentation on how to do this.

If you haven't set up the pre-commit hook and have already committed files and
If you haven't set up the pre-commit hook and have already committed files and
CI reports `flake8` errors, you can run the check locally in your PR branch with:
```bash
flake8 $(git diff --name-only $(git merge-base --fork-point master))
```
fix the code so that no errors are reported when you re-run the above check again,
fix the code so that no errors are reported when you re-run the above check again,
and then commit the fix.
## Building PyTorch with ASAN
Expand Down Expand Up @@ -1065,3 +1078,7 @@ following steps:
4. Now you can find the pytorch working directory, which could be
`~/workspace` or `~/project`, and run commands locally to debug
the failure.

For certain Windows failures, it may be useful to have a full [Remote
Desktop](https://docs.microsoft.com/en-us/windows-server/remote/remote-desktop-services/clients/remote-desktop-clients) connection. See detailed instructions [here](https://github.com/pytorch/pytorch/wiki/Debugging-Windows-with-Remote-Desktop-or-CDB-(CLI-windbg)-on-CircleCI)
for how to set that up after rerunning the job.
7 changes: 6 additions & 1 deletion aten/src/ATen/BatchingRegistrations.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -359,7 +359,12 @@ Tensor select_backward_batching_rule(const Tensor& grad, IntArrayRef input_sizes
return grad_physical.getPhysicalToLogicalMap().apply(grad_input);
}

Tensor slice_batching_rule(const Tensor& self, int64_t dim, int64_t start, int64_t end, int64_t step) {
Tensor slice_batching_rule(
const Tensor& self,
int64_t dim,
c10::optional<int64_t> start,
c10::optional<int64_t> end,
int64_t step) {
auto self_physical = MultiBatchVmapTransform::logicalToPhysical(self);
auto dim_physical = self_physical.getPhysicalDim(dim);
auto result = self_physical.tensor().slice(dim_physical, start, end, step);
Expand Down

0 comments on commit f75572e

Please sign in to comment.