Skip to content

Commit

Permalink
Update on "[Gradient Compression] Allow PowerSGD to run vallina allre…
Browse files Browse the repository at this point in the history
…duce for the first K iterations"

This can extend the original PowerSGD method to a hybrid approach: vanilla allreduce + PowerSGD. This can help further improve the accuracy, at the cost of a lower speedup.

Also add more comments on the fields in `PowerSGDState`.

Original PR issue: Investigate Applying PowerSGD to Communication Hook for Gradient Compression #47202

Differential Revision: [D26031478](https://our.internmc.facebook.com/intern/diff/D26031478/)

[ghstack-poisoned]
  • Loading branch information
wayi committed Jan 25, 2021
2 parents a999628 + 99d5bbe commit 61f32b6
Show file tree
Hide file tree
Showing 155 changed files with 3,639 additions and 1,263 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ docs/cpp/source/html/
docs/cpp/source/latex/
docs/source/generated/
log
test-reports/
test/.coverage
test/.hypothesis/
test/cpp/api/mnist
Expand All @@ -50,7 +51,6 @@ dropout_model.pt
test/generated_type_hints_smoketest.py
test/htmlcov
test/cpp_extensions/install/
test/test-reports/
third_party/build/
tools/shared/_utils_internal.py
tools/fast_nvcc/wrap_nvcc.sh
Expand Down
2 changes: 2 additions & 0 deletions BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,8 @@ genrule(
"aten/src/ATen/RegisterMeta.cpp",
"aten/src/ATen/RegisterDefaultBackend.cpp",
"aten/src/ATen/RegisterSchema.cpp",
"aten/src/ATen/CPUFunctions.h",
"aten/src/ATen/CUDAFunctions.h",
"aten/src/ATen/Functions.h",
"aten/src/ATen/Functions.cpp",
"aten/src/ATen/NativeFunctions.h",
Expand Down
1 change: 1 addition & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -249,6 +249,7 @@ cmake_dependent_option(
option(USE_TBB "Use TBB" OFF)
option(ONNX_ML "Enable traditional ONNX ML API." ON)
option(HAVE_SOVERSION "Whether to add SOVERSION to the shared objects" OFF)
option(USE_DEPLOY "Enable torch::deploy embedded python interpreter" OFF)

# Since TensorPipe does not support Windows, set it to OFF when WIN32 detected
# On Windows platform, if user does not install libuv in build conda env and
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@

#include "jni.h"

#define clamp0255(x) x > 255 ? 255 : x < 0 ? 0 : x

namespace pytorch_vision_jni {

static void imageYUV420CenterCropToFloatBuffer(
Expand Down Expand Up @@ -112,9 +110,12 @@ static void imageYUV420CenterCropToFloatBuffer(
ri = (a0 + 1634 * vi) >> 10;
gi = (a0 - 833 * vi - 400 * ui) >> 10;
bi = (a0 + 2066 * ui) >> 10;
outData[wr++] = (clamp0255(ri) - normMeanRm255) / normStdRm255;
outData[wg++] = (clamp0255(gi) - normMeanGm255) / normStdGm255;
outData[wb++] = (clamp0255(bi) - normMeanBm255) / normStdBm255;
ri = ri > 255 ? 255 : ri < 0 ? 0 : ri;
gi = gi > 255 ? 255 : gi < 0 ? 0 : gi;
bi = bi > 255 ? 255 : bi < 0 ? 0 : bi;
outData[wr++] = (ri - normMeanRm255) / normStdRm255;
outData[wg++] = (gi - normMeanGm255) / normStdGm255;
outData[wb++] = (bi - normMeanBm255) / normStdBm255;
}
}
}
Expand Down
1 change: 1 addition & 0 deletions aten/src/ATen/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -325,6 +325,7 @@ if(USE_CUDA AND NOT USE_ROCM)
${CUDA_TOOLKIT_ROOT_DIR}/lib64/libcublas_static.a
${CUDA_TOOLKIT_ROOT_DIR}/lib64/libcufft_static_nocallback.a
${CUDA_TOOLKIT_ROOT_DIR}/lib64/libcusolver_static.a
${CUDA_TOOLKIT_ROOT_DIR}/lib64/liblapack_static.a # needed for libcusolver_static
)
else()
list(APPEND ATen_CUDA_DEPENDENCY_LIBS
Expand Down
3 changes: 2 additions & 1 deletion aten/src/ATen/CPUGeneratorImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
#include <ATen/Utils.h>
#include <ATen/core/MT19937RNGEngine.h>
#include <c10/util/C++17.h>
#include <c10/util/MathConstants.h>
#include <algorithm>

namespace at {
Expand Down Expand Up @@ -153,7 +154,7 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
// intermediate values.
if (legacy_pod->normal_is_valid) {
auto r = legacy_pod->normal_rho;
auto theta = 2.0 * M_PI * legacy_pod->normal_x;
auto theta = 2.0 * c10::pi<double> * legacy_pod->normal_x;
// we return the sin version of the normal sample when in caching mode
double_normal_sample = c10::optional<double>(r * ::sin(theta));
}
Expand Down
22 changes: 11 additions & 11 deletions aten/src/ATen/Context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,25 +60,25 @@ void Context::setDeterministicCuDNN(bool b) {
deterministic_cudnn = b;
}

bool Context::deterministic() const {
return _deterministic;
bool Context::deterministicAlgorithms() const {
return _deterministic_algorithms;
}

void Context::setDeterministic(bool b) {
void Context::setDeterministicAlgorithms(bool b) {
if (b) {
TORCH_WARN_ONCE("torch.set_deterministic is in beta, and its design and "
TORCH_WARN_ONCE("torch.use_deterministic_algorithms is in beta, and its design and"
" functionality may change in the future.");
}

_deterministic = b;
_deterministic_algorithms = b;
}

void Context::alertNotDeterministic(c10::string_view const& caller) {
if (globalContext().deterministic()) {
if (globalContext().deterministicAlgorithms()) {
TORCH_CHECK(false,
caller, " does not have a deterministic implementation, but you set "
"'torch.set_deterministic(True)'. You can turn off determinism just "
"for this operation if that's acceptable for your application. You "
"'torch.use_deterministic_algorithms(True)'. You can turn off determinism ",
"just for this operation if that's acceptable for your application. You "
"can also file an issue at https://github.com/pytorch/pytorch/issues "
"to help us prioritize adding deterministic support for this operation.");
}
Expand Down Expand Up @@ -111,9 +111,9 @@ bool Context::checkCuBLASConfigDeterministic() {

void Context::alertCuBLASConfigNotDeterministic() {
static bool cublas_config_deterministic = checkCuBLASConfigDeterministic();
TORCH_CHECK(!deterministic() || cublas_config_deterministic,
"Deterministic behavior was enabled with either `torch.set_deterministic(True)` or ",
"`at::Context::setDeterministic(true)`, but this operation is not deterministic because ",
TORCH_CHECK(!deterministicAlgorithms() || cublas_config_deterministic,
"Deterministic behavior was enabled with either `torch.use_deterministic_algorithms(True)` or ",
"`at::Context::setDeterministicAlgorithms(true)`, but this operation is not deterministic because ",
"it uses CuBLAS and you have CUDA >= 10.2. To enable deterministic behavior in this ",
"case, you must set an environment variable before running your PyTorch application: ",
cublas_config_var_name, "=", cublas_deterministic_configs[0], " or ",
Expand Down
34 changes: 18 additions & 16 deletions aten/src/ATen/Context.h
Original file line number Diff line number Diff line change
Expand Up @@ -120,27 +120,27 @@ class TORCH_API Context {
//
// * Include this comment: "See Note [Enabling Deterministic Operations]"
//
// * Check the value of `at::globalContext().deterministic()` to toggle between
// nondeterministic and deterministic implementations.
// * Check the value of `at::globalContext().deterministicAlgorithms()` to toggle
// between nondeterministic and deterministic implementations.
//
// * Have an entry in the list of PyTorch operations that toggle between nondeterministic
// and deterministic implementations, in the docstring of `set_deterministic()`
// and deterministic implementations, in the docstring of `use_deterministic_algorithms()`
// in torch/__init__.py
//
// `example_func()` below shows an example of toggling between nondeterministic and
// deterministic implementations:
//
// void example_func() {
// // See Note [Enabling Deterministic Operations]
// if (at::globalContext().deterministic()) {
// if (at::globalContext().deterministicAlgorithms()) {
// example_func_deterministic();
// } else {
// example_func_nondeterministic();
// }
// }

bool deterministic() const;
void setDeterministic(bool);
bool deterministicAlgorithms() const;
void setDeterministicAlgorithms(bool);

// Note [Writing Nondeterministic Operations]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Expand All @@ -151,16 +151,18 @@ class TORCH_API Context {
//
// * Include a comment explaining why the operation is nondeterministic.
//
// * Throw an error when `Context::deterministic()` is true. Most of the time, this
// should be accomplished by calling `at::globalContext().alertNotDeterminstic()`.
// However, if the nondeterministic behavior is caused by the CuBLAS workspace
// * Throw an error when `Context::deterministicAlgorithms()` is true. Most
// of the time, this should be accomplished by calling
// `at::globalContext().alertNotDeterminstic()`. However, if the
// nondeterministic behavior is caused by the CuBLAS workspace
// configuration in CUDA >= 10.2,
// `at::globalContext().alertCuBLASConfigNotDeterministic()` should
// be called instead (in this case, a comment explaining why the operation is
// nondeterministic is not necessary). See below for details on these methods.
// `at::globalContext().alertCuBLASConfigNotDeterministic()` should be
// called instead (in this case, a comment explaining why the operation is
// nondeterministic is not necessary). See below for details on these
// methods.
//
// * Have an entry in the list of nondeterministic PyTorch operations in the
// docstring of `set_deterministic()` in torch/__init__.py
// docstring of `use_deterministic_algorithms()` in torch/__init__.py
//
// `example_func()` below shows an example of the comments and error-throwing code
// for a nondeterministic operation:
Expand All @@ -172,10 +174,10 @@ class TORCH_API Context {
// ...
// }

// Throws an error if `Context::deterministic()` is true
// Throws an error if `Context::deterministicAlgorithms()` is true
void alertNotDeterministic(c10::string_view const& caller);

// Throws an error if `Context::deterministic()` is true, CUDA >= 10.2, and
// Throws an error if `Context::deterministicAlgorithms()` is true, CUDA >= 10.2, and
// CUBLAS_WORKSPACE_CONFIG is not set to either ":16:8" or ":4096:8". For more details:
// https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
void alertCuBLASConfigNotDeterministic();
Expand Down Expand Up @@ -210,7 +212,7 @@ class TORCH_API Context {
std::once_flag thh_init;
bool enabled_cudnn = true;
bool deterministic_cudnn = false;
bool _deterministic = false;
bool _deterministic_algorithms = false;
bool benchmark_cudnn = false;
bool allow_tf32_cudnn = true;
bool allow_tf32_cublas = true;
Expand Down

0 comments on commit 61f32b6

Please sign in to comment.