Skip to content

Commit

Permalink
Update on "[quant] Add quantized::sigmoid that take output_scale/outp…
Browse files Browse the repository at this point in the history
…ut_zero_point as input"

Summary:
Same changes as the stack for leaky_relu: #45702

Test Plan:

Reviewers:

Subscribers:

Tasks:

Tags:

Differential Revision: [D24129113](https://our.internmc.facebook.com/intern/diff/D24129113)

[ghstack-poisoned]
  • Loading branch information
jerryzh168 committed Oct 6, 2020
2 parents c0b9d44 + f1f9f5f commit 7ce8417
Show file tree
Hide file tree
Showing 99 changed files with 3,440 additions and 591 deletions.
8 changes: 4 additions & 4 deletions .circleci/cimodel/data/simple/ge_config_tests.py
Expand Up @@ -61,25 +61,25 @@ def gen_tree(self):
MultiPartVersion([3, 6], "py"),
MultiPartVersion([5, 4], "gcc"),
None,
["ge_config_legacy", "test"],
["jit_legacy", "test"],
["pytorch_linux_xenial_py3_6_gcc5_4_build"]),
GeConfigTestJob(
MultiPartVersion([3, 6], "py"),
MultiPartVersion([5, 4], "gcc"),
None,
["ge_config_simple", "test"],
["jit_simple", "test"],
["pytorch_linux_xenial_py3_6_gcc5_4_build"],
),
GeConfigTestJob(
None,
None,
CudaVersion(10, 2),
["cudnn7", "py3", "ge_config_legacy", "test"],
["cudnn7", "py3", "jit_legacy", "test"],
["pytorch_linux_xenial_cuda10_2_cudnn7_py3_gcc7_build"],
use_cuda_docker=True,
# TODO Why does the build environment specify cuda10.1, while the
# job name is cuda10_2?
build_env_override="pytorch-linux-xenial-cuda10.1-cudnn7-ge_config_legacy-test"),
build_env_override="pytorch-linux-xenial-cuda10.1-cudnn7-jit_legacy-test"),
]


Expand Down
12 changes: 6 additions & 6 deletions .circleci/config.yml
Expand Up @@ -7023,23 +7023,23 @@ workflows:
requires:
- docker-pytorch-linux-xenial-py3-clang5-android-ndk-r19c
- pytorch_linux_test:
build_environment: pytorch-linux-xenial-py3.6-gcc5.4-ge_config_legacy-test
build_environment: pytorch-linux-xenial-py3.6-gcc5.4-jit_legacy-test
docker_image: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4
name: pytorch_linux_xenial_py3_6_gcc5_4_ge_config_legacy_test
name: pytorch_linux_xenial_py3_6_gcc5_4_jit_legacy_test
requires:
- pytorch_linux_xenial_py3_6_gcc5_4_build
resource_class: large
- pytorch_linux_test:
build_environment: pytorch-linux-xenial-py3.6-gcc5.4-ge_config_simple-test
build_environment: pytorch-linux-xenial-py3.6-gcc5.4-jit_simple-test
docker_image: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4
name: pytorch_linux_xenial_py3_6_gcc5_4_ge_config_simple_test
name: pytorch_linux_xenial_py3_6_gcc5_4_jit_simple_test
requires:
- pytorch_linux_xenial_py3_6_gcc5_4_build
resource_class: large
- pytorch_linux_test:
build_environment: pytorch-linux-xenial-cuda10.1-cudnn7-ge_config_legacy-test
build_environment: pytorch-linux-xenial-cuda10.1-cudnn7-jit_legacy-test
docker_image: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7
name: pytorch_linux_xenial_cuda10_2_cudnn7_py3_ge_config_legacy_test
name: pytorch_linux_xenial_cuda10_2_cudnn7_py3_jit_legacy_test
requires:
- pytorch_linux_xenial_cuda10_2_cudnn7_py3_gcc7_build
resource_class: gpu.medium
Expand Down
2 changes: 1 addition & 1 deletion .jenkins/pytorch/macos-test.sh
Expand Up @@ -63,7 +63,7 @@ test_python_all() {
# Increase default limit on open file handles from 256 to 1024
ulimit -n 1024

python test/run_test.py --verbose --exclude test_jit_cuda_fuser_profiling test_jit_cuda_fuser_legacy test_jit_legacy test_jit_fuser_legacy --determine-from="$DETERMINE_FROM"
python test/run_test.py --verbose --exclude-jit-executor --determine-from="$DETERMINE_FROM"

assert_git_not_dirty
}
Expand Down
36 changes: 14 additions & 22 deletions .jenkins/pytorch/test.sh
Expand Up @@ -126,23 +126,18 @@ if ([ -n "$CIRCLE_PULL_REQUEST" ] && [[ "$BUILD_ENVIRONMENT" != *coverage* ]]);
file_diff_from_base "$DETERMINE_FROM"
fi

test_python_nn() {
time python test/run_test.py --include test_nn --verbose --determine-from="$DETERMINE_FROM"
assert_git_not_dirty
}

test_python_ge_config_profiling() {
time python test/run_test.py --include test_jit_cuda_fuser_profiling test_jit_profiling test_jit_fuser_te test_tensorexpr --verbose --determine-from="$DETERMINE_FROM"
test_python_legacy_jit() {
time python test/run_test.py --include test_jit_cuda_fuser_legacy test_jit_legacy test_jit_fuser_legacy --verbose --determine-from="$DETERMINE_FROM"
assert_git_not_dirty
}

test_python_ge_config_legacy() {
time python test/run_test.py --include test_jit_cuda_fuser_legacy test_jit_legacy test_jit_fuser_legacy --verbose --determine-from="$DETERMINE_FROM"
test_python_shard1() {
time python test/run_test.py --exclude-jit-executor --shard 1 2 --verbose --determine-from="$DETERMINE_FROM"
assert_git_not_dirty
}

test_python_all_except_nn_and_cpp_extensions() {
time python test/run_test.py --exclude test_jit_cuda_fuser_profiling test_jit_cuda_fuser_legacy test_nn test_jit_profiling test_jit_legacy test_jit_fuser_legacy test_jit_fuser_te test_tensorexpr --verbose --determine-from="$DETERMINE_FROM"
test_python_shard2() {
time python test/run_test.py --exclude-jit-executor --shard 2 2 --verbose --determine-from="$DETERMINE_FROM"
assert_git_not_dirty
}

Expand Down Expand Up @@ -304,7 +299,7 @@ test_xla() {
assert_git_not_dirty
}

# Do NOT run this test before any other tests, like test_python_nn, etc.
# Do NOT run this test before any other tests, like test_python_shard1, etc.
# Because this function uninstalls the torch built from branch, and install
# nightly version.
test_backward_compatibility() {
Expand Down Expand Up @@ -381,19 +376,17 @@ if [[ "${BUILD_ENVIRONMENT}" == *backward* ]]; then
elif [[ "${BUILD_ENVIRONMENT}" == *xla* || "${JOB_BASE_NAME}" == *xla* ]]; then
install_torchvision
test_xla
elif [[ "${BUILD_ENVIRONMENT}" == *ge_config_legacy* || "${JOB_BASE_NAME}" == *ge_config_legacy* ]]; then
test_python_ge_config_legacy
elif [[ "${BUILD_ENVIRONMENT}" == *ge_config_profiling* || "${JOB_BASE_NAME}" == *ge_config_profiling* ]]; then
test_python_ge_config_profiling
elif [[ "${BUILD_ENVIRONMENT}" == *legacy_jit* || "${JOB_BASE_NAME}" == *legacy_jit* ]]; then
test_python_legacy_jit
elif [[ "${BUILD_ENVIRONMENT}" == *libtorch* ]]; then
# TODO: run some C++ tests
echo "no-op at the moment"
elif [[ "${BUILD_ENVIRONMENT}" == *-test1 || "${JOB_BASE_NAME}" == *-test1 ]]; then
test_python_nn
test_cpp_extensions
install_torchvision
test_python_shard1
elif [[ "${BUILD_ENVIRONMENT}" == *-test2 || "${JOB_BASE_NAME}" == *-test2 ]]; then
install_torchvision
test_python_all_except_nn_and_cpp_extensions
test_python_shard2
test_aten
test_libtorch
test_custom_script_ops
Expand All @@ -409,9 +402,8 @@ elif [[ "${BUILD_ENVIRONMENT}" == pytorch-linux-xenial-cuda9.2-cudnn7-py3-gcc5.4
test_cpp_extensions
else
install_torchvision
test_python_nn
test_python_all_except_nn_and_cpp_extensions
test_cpp_extensions
test_python_shard1
test_python_shard2
test_aten
test_vec256
test_libtorch
Expand Down
@@ -1,3 +1,3 @@
call %SCRIPT_HELPERS_DIR%\setup_pytorch_env.bat
cd test && python run_test.py --exclude test_jit_cuda_fuser_profiling test_jit_cuda_fuser_legacy test_jit_profiling test_jit_legacy test_jit_fuser_legacy test_jit_fuser_te test_tensorexpr --verbose --determine-from="%1" && cd ..
cd test && python run_test.py --exclude-jit-executor --verbose --determine-from="%1" && cd ..
if ERRORLEVEL 1 exit /b 1
28 changes: 27 additions & 1 deletion CONTRIBUTING.md
Expand Up @@ -118,11 +118,37 @@ For example:
- modify your Python file `torch/__init__.py`
- test functionality

You do not need to repeatedly install after modifying Python files.
You do not need to repeatedly install after modifying Python files (`.py`). However, you would need to reinstall
if you modify Python interface (`.pyi`, `.pyi.in`) or non-Python files (`.cpp`, `.cc`, `.cu`, `.h`, ...).

In case you want to reinstall, make sure that you uninstall PyTorch first by running `pip uninstall torch`
and `python setup.py clean`. Then you can install in `develop` mode again.

### Tips and Debugging
* A prerequisite to installing PyTorch is CMake. We recommend installing it with [Homebrew](https://brew.sh/)
with `brew install cmake` if you are developing on MacOS or Linux system.
* Our `setup.py` requires Python >= 3.6
* If you run into errors when running `python setup.py develop`, here are some debugging steps:
1. Run `printf '#include <stdio.h>\nint main() { printf("Hello World");}'|clang -x c -; ./a.out` to make sure
your CMake works and can compile this simple Hello World program without errors.
2. Nuke your `build` directory. The `setup.py` script compiles binaries into the `build` folder and caches many
details along the way, which saves time the next time you build. If you're running into issues, you can always
`rm -rf build` from the toplevel `pytorch` directory and start over.
3. If you have made edits to the PyTorch repo, commit any change you'd like to keep and clean the repo with the
following commands (note that clean _really_ removes all untracked files and changes.):
```bash
git submodule deinit -f .
git clean -xdf
python setup.py clean
git submodule update --init --recursive # very important to sync the submodules
python setup.py develop # then try running the command again
```
4. The main step within `python setup.py develop` is running `make` from the `build` directory. If you want to
experiment with some environment variables, you can pass them into the command:
```bash
ENV_KEY1=ENV_VAL1[, ENV_KEY2=ENV_VAL2]* python setup.py develop
```

## Nightly Checkout & Pull

The `tools/nightly.py` script is provided to ease pure Python development of
Expand Down
1 change: 1 addition & 0 deletions aten/src/ATen/CMakeLists.txt
Expand Up @@ -51,6 +51,7 @@ file(GLOB cudnn_cpp "cudnn/*.cpp")

file(GLOB hip_h "hip/*.h" "hip/detail/*.h" "hip/*.cuh" "hip/detail/*.cuh" "hip/impl/*.h")
file(GLOB hip_cpp "hip/*.cpp" "hip/detail/*.cpp" "hip/impl/*.cpp")
list(REMOVE_ITEM hip_cpp "${CMAKE_CURRENT_SOURCE_DIR}/hip/detail/LazyNVRTC.cpp")
file(GLOB hip_hip "hip/*.hip" "hip/detail/*.hip" "hip/impl/*.hip")
file(GLOB hip_nvrtc_stub_h "hip/nvrtc_stub/*.h")
file(GLOB hip_nvrtc_stub_cpp "hip/nvrtc_stub/*.cpp")
Expand Down
35 changes: 23 additions & 12 deletions aten/src/ATen/core/function_schema.h
Expand Up @@ -156,18 +156,29 @@ struct FunctionSchema {
checkSchema();
}

// check whether this schema is backward compatible with the old one.
// the following conditions are considered as this schema is backward
// compatible with old:
// 1) two schemas are equal
// 2) this schema has the same or more positional args than old,
// and any positional arg in this schema is backward compatible
// with the corresponding one in old schema, which could be an arg
// or a kwarg, if it has, or it must provide a default value
// 3) this schema has the same or more kwargs than old, and all the kwargs
// in old schema can find the corresponding kwarg in this schema which
// is backward compatible with the old kwarg, and the extra kwargs in
// this schema must provide default values.
// Checks whether this schema is backward compatible with the old one.
// The following conditions must be true:
// [Function structure] The new schema's name, overload-name, varargs, and
// return arity are the same.
// [Output Narrowing] The new schema's output type must be the same class
// or inherit from the old schema's output type.
// [Argument count] The new schema must have at least as many arguments as
// the old schema (considering the list of positional and kwargs).
// [Arg Compatibility] Every argument in the old schema has a corresponding
// argument in the new schema that:
// * is at the same position.
// * has the same name.
// * is either positional, or kwarg and the old argument was kwarg.
// * has the same type, or the old argument's type inherits from the
// new argument's type.
// [Default Values] Every new argument must have a default value.
// E.g.
// OK f_new(a, b, c=1) => f_old(a, b)
// NOK f_new(a, c=1, *, b) => f_old(a, *, b)
// OK f_new(a, b, *, c) => f_old(a, *, b, c)
// NOK f_new(a, *, b, c) -> f_old(a, b, *, c)
// NOK f_new(a, *, c, b) => f_old(a, *, b, c)
// OK f_new(a, *, b, c, d=1) => f_old(a, *, b, c)
bool isBackwardCompatibleWith(
const FunctionSchema& old,
std::ostream* why_not = nullptr) const;
Expand Down
73 changes: 19 additions & 54 deletions aten/src/ATen/core/function_schema_inl.h
Expand Up @@ -111,69 +111,35 @@ inline bool FunctionSchema::isBackwardCompatibleWith(
return false;
}
for (size_t i = 0; i < returns().size(); ++i) {
// functions are covariant in arguments but contravariant in returns
// Backwards compatibility requires covariance on argument types
// (i.e. more generic), and contravariance on return types (i.e.
// more specific).
if (!old.returns().at(i).isBackwardCompatibleWith(
returns().at(i),
why_not)) {
return false;
}
}
std::vector<const Argument*> args, old_args;
std::map<std::string, const Argument*> kwargs, old_kwargs;
auto split_func = [](const std::vector<Argument>& arguments,
std::vector<const Argument*>* positionals,
std::map<std::string, const Argument*>* nameds) {
for (const Argument& arg : arguments) {
if (!arg.kwarg_only()) {
positionals->emplace_back(&arg);
}
nameds->emplace(arg.name(), &arg);
}
};
// we split args into positional and keyward parts,
split_func(arguments(), &args, &kwargs);
split_func(old.arguments(), &old_args, &old_kwargs);
if (old_args.size() > args.size()) {
return false;
}
// make sure that all the old positional args have their corresponding
// backward compatible positional args in this schema
for (size_t i = 0; i < old_args.size(); ++i) {
if (!args.at(i)->isBackwardCompatibleWith(
*old_args.at(i),
why_not)) {

// Make sure that all the old arguments have their corresponding backward
// compatible arguments in this schema.
for (size_t i = 0; i < old.arguments().size(); ++i) {
if (!arguments().at(i).isBackwardCompatibleWith(
old.arguments().at(i), why_not)) {
return false;
}
}
// check the extra positional args in this schema either has corresponding
// backward compatible keyward args since positional args also can be used as
// a keyward arg, or provided default values
for (size_t i = old_args.size(); i < args.size(); ++i) {
if (!args.at(i)->default_value()) {
auto it = old_kwargs.find(args.at(i)->name());
if (it == old_kwargs.end() ||
!args.at(i)->isBackwardCompatibleWith(
*it->second,
why_not)) {
return false;

// Validate that all new arguments provided a default value.
for (size_t i = old.arguments().size(); i < arguments().size(); ++i) {
if (!arguments().at(i).default_value()) {
if (why_not) {
*why_not
<< "Function schema not backward compatible since the new argument '"
<< arguments().at(i).name() << "' of type "
<< arguments().at(i).type()->str()
<< " did not provide a default value.";
}
}
}
// make sure that all the keyword args in the old schema have their
// corresponding backward compatible keyward args in this schema
for (auto& kv : old_kwargs) {
auto it = kwargs.find(kv.first);
if (it == kwargs.end() ||
!it->second->isBackwardCompatibleWith(
*kv.second,
why_not)) {
return false;
}
kwargs.erase(it);
}
// check all the extra keyword args in this schema provide default values
for (auto& kv : kwargs) {
if (!kv.second->default_value()) {
return false;
}
}
Expand All @@ -186,7 +152,6 @@ inline void FunctionSchema::checkArg(
const Argument& argument,
optional<size_t> pos) const {
if (!value.type()->isSubtypeOf(argument.type())) {
std::string position = pos ? ::c10::str(" in position ", *pos) : "";
TORCH_CHECK(
false,
formatTypeMismatchMsg(
Expand Down
10 changes: 9 additions & 1 deletion aten/src/ATen/cuda/detail/CUDAHooks.cpp
Expand Up @@ -28,6 +28,10 @@
#include <miopen/version.h>
#endif

#ifndef USE_ROCM
#include <ATen/cuda/detail/LazyNVRTC.h>
#endif

#include <cuda.h>

#include <sstream>
Expand Down Expand Up @@ -116,10 +120,14 @@ bool CUDAHooks::hasCuDNN() const {
return AT_CUDNN_ENABLED();
}

#ifdef USE_DIRECT_NVRTC
#if defined(USE_DIRECT_NVRTC)
static std::pair<std::unique_ptr<at::DynamicLibrary>, at::cuda::NVRTC*> load_nvrtc() {
return std::make_pair(nullptr, at::cuda::load_nvrtc());
}
#elif !defined(USE_ROCM)
static std::pair<std::unique_ptr<at::DynamicLibrary>, at::cuda::NVRTC*> load_nvrtc() {
return std::make_pair(nullptr, &at::cuda::detail::lazyNVRTC);
}
#else
static std::pair<std::unique_ptr<at::DynamicLibrary>, at::cuda::NVRTC*> load_nvrtc() {
#if defined(_WIN32)
Expand Down

0 comments on commit 7ce8417

Please sign in to comment.