Skip to content

Commit

Permalink
FP16 allreduce on CPU (#563)
Browse files Browse the repository at this point in the history
  • Loading branch information
alsrgv committed Oct 16, 2018
1 parent d90d9e8 commit 6feb47a
Show file tree
Hide file tree
Showing 5 changed files with 243 additions and 14 deletions.
29 changes: 27 additions & 2 deletions LICENSE
@@ -1,5 +1,5 @@
Horovod
Copyright 2017 Uber Technologies, Inc.
Copyright 2018 Uber Technologies, Inc.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -246,4 +246,29 @@
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
limitations under the License.

NVIDIA/cutlass
Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the NVIDIA CORPORATION nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
133 changes: 133 additions & 0 deletions horovod/common/half.h
@@ -0,0 +1,133 @@
/***************************************************************************************************
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Modifications copyright (C) 2018 Uber Technologies, Inc.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#ifndef HOROVOD_HALF_H
#define HOROVOD_HALF_H

namespace horovod {
namespace common {

inline void HalfBits2Float(unsigned short* src, float* res) {
unsigned h = *src;
int sign = ((h >> 15) & 1);
int exp = ((h >> 10) & 0x1f);
int mantissa = (h & 0x3ff);
unsigned f = 0;

if (exp > 0 && exp < 31) {
// normal
exp += 112;
f = (sign << 31) | (exp << 23) | (mantissa << 13);
} else if (exp == 0) {
if (mantissa) {
// subnormal
exp += 113;
while ((mantissa & (1 << 10)) == 0) {
mantissa <<= 1;
exp--;
}
mantissa &= 0x3ff;
f = (sign << 31) | (exp << 23) | (mantissa << 13);
} else {
// sign-preserving zero
f = (sign << 31);
}
} else if (exp == 31) {
if (mantissa) {
f = 0x7fffffff; // not a number
} else {
f = (0xff << 23) | (sign << 31); // inf
}
}

*res = *reinterpret_cast<float const*>(&f);
}

inline void Float2HalfBits(float* src, unsigned short* dest) {
// software implementation rounds toward nearest even
unsigned const& s = *reinterpret_cast<unsigned const*>(src);
uint16_t sign = uint16_t((s >> 16) & 0x8000);
int16_t exp = uint16_t(((s >> 23) & 0xff) - 127);
int mantissa = s & 0x7fffff;
uint16_t u = 0;

if ((s & 0x7fffffff) == 0) {
// sign-preserving zero
*dest = sign;
return;
}

if (exp > 15) {
if (exp == 128 && mantissa) {
// not a number
u = 0x7fff;
} else {
// overflow to infinity
u = sign | 0x7c00;
}
*dest = u;
return;
}

int sticky_bit = 0;

if (exp >= -14) {
// normal fp32 to normal fp16
exp = uint16_t(exp + uint16_t(15));
u = uint16_t(((exp & 0x1f) << 10));
u = uint16_t(u | (mantissa >> 13));
} else {
// normal single-precision to subnormal half_t-precision representation
int rshift = (-14 - exp);
if (rshift < 32) {
mantissa |= (1 << 23);

sticky_bit = ((mantissa & ((1 << rshift) - 1)) != 0);

mantissa = (mantissa >> rshift);
u = (uint16_t(mantissa >> 13) & 0x3ff);
} else {
mantissa = 0;
u = 0;
}
}

// round to nearest even
int round_bit = ((mantissa >> 12) & 1);
sticky_bit |= ((mantissa & ((1 << 12) - 1)) != 0);

if ((round_bit && sticky_bit) || (round_bit && (u & 1))) {
u = uint16_t(u + 1);
}

u |= sign;

*dest = u;
}

} // namespace common
} // namespace horovod

#endif // HOROVOD_HALF_H
54 changes: 45 additions & 9 deletions horovod/common/operations.cc
Expand Up @@ -36,6 +36,7 @@
#endif

#define OMPI_SKIP_MPICXX
#include "half.h"
#include "hashes.h"
#include "mpi.h"
#include "mpi_message.h"
Expand Down Expand Up @@ -184,6 +185,7 @@ struct HorovodGlobalState {

// MPI custom data type for float16.
MPI_Datatype mpi_float16_t;
MPI_Op mpi_float16_sum;

// Private MPI communicator for Horovod to ensure no collisions with other
// threads using MPI.
Expand Down Expand Up @@ -1136,7 +1138,10 @@ void PerformOperation(TensorTable& tensor_table, MPIResponse response) {
MPI_CHECK(entries, "MPI_Allreduce",
MPI_Allreduce(MPI_IN_PLACE, host_buffer,
(int)total_num_elements,
GetMPIDataType(first_entry.tensor), MPI_SUM,
GetMPIDataType(first_entry.tensor),
first_entry.tensor->dtype() == HOROVOD_FLOAT16
? horovod_global.mpi_float16_sum
: MPI_SUM,
horovod_global.cross_comm))
ACTIVITY_END_ALL(entries, timeline)

Expand Down Expand Up @@ -1268,7 +1273,10 @@ void PerformOperation(TensorTable& tensor_table, MPIResponse response) {
MPI_CHECK(entries, "MPI_Allreduce",
MPI_Allreduce(MPI_IN_PLACE, (void*)buffer_data,
(int)num_elements,
GetMPIDataType(first_entry.tensor), MPI_SUM,
GetMPIDataType(first_entry.tensor),
first_entry.tensor->dtype() == HOROVOD_FLOAT16
? horovod_global.mpi_float16_sum
: MPI_SUM,
horovod_global.mpi_comm))
ACTIVITY_END_ALL(entries, timeline)

Expand Down Expand Up @@ -1310,7 +1318,10 @@ void PerformOperation(TensorTable& tensor_table, MPIResponse response) {
MPI_CHECK(entries, "MPI_Allreduce",
MPI_Allreduce(sendbuf, (void*)e.output->data(),
(int)e.tensor->shape().num_elements(),
GetMPIDataType(e.tensor), MPI_SUM,
GetMPIDataType(e.tensor),
first_entry.tensor->dtype() == HOROVOD_FLOAT16
? horovod_global.mpi_float16_sum
: MPI_SUM,
horovod_global.mpi_comm))
ACTIVITY_END_ALL(entries, timeline)
}
Expand Down Expand Up @@ -1400,6 +1411,22 @@ void CheckForStalledTensors(HorovodGlobalState& state) {
}
}

// float16 custom data type summation operation.
void float16_sum(void* invec, void* inoutvec, int* len,
MPI_Datatype* datatype) {
// cast invec and inoutvec to your float16 type
auto* in = (unsigned short*)invec;
auto* inout = (unsigned short*)inoutvec;
for (int i = 0; i < *len; ++i) {
float in_float;
float inout_float;
HalfBits2Float(in + i, &in_float);
HalfBits2Float(inout + i, &inout_float);
inout_float += in_float;
Float2HalfBits(&inout_float, inout + i);
}
}

// The MPI background thread loop coordinates all the MPI processes and the
// tensor reductions. The design of the communicator mechanism is limited by a
// few considerations:
Expand Down Expand Up @@ -1436,7 +1463,7 @@ void BackgroundThreadLoop(HorovodGlobalState& state) {
auto mpi_threads_disable = std::getenv(HOROVOD_MPI_THREADS_DISABLE);
int required = MPI_THREAD_MULTIPLE;
if (mpi_threads_disable != nullptr &&
std::strtol(mpi_threads_disable, nullptr, 10) > 0) {
std::strtol(mpi_threads_disable, nullptr, 10) > 0) {
required = MPI_THREAD_SINGLE;
}
int provided;
Expand Down Expand Up @@ -1525,6 +1552,10 @@ void BackgroundThreadLoop(HorovodGlobalState& state) {
MPI_Type_contiguous(2, MPI_BYTE, &mpi_float16_t);
MPI_Type_commit(&mpi_float16_t);

// Create custom MPI float16 summation op.
MPI_Op mpi_float16_sum;
MPI_Op_create(&float16_sum, 1, &mpi_float16_sum);

state.rank = rank;
state.local_rank = local_rank;
state.cross_rank = cross_rank;
Expand All @@ -1534,6 +1565,7 @@ void BackgroundThreadLoop(HorovodGlobalState& state) {
state.local_comm = local_comm;
state.cross_comm = cross_comm;
state.mpi_float16_t = mpi_float16_t;
state.mpi_float16_sum = mpi_float16_sum;
state.mpi_threads_supported = (provided == MPI_THREAD_MULTIPLE);
state.local_comm_ranks = local_comm_ranks;

Expand Down Expand Up @@ -1577,9 +1609,10 @@ void BackgroundThreadLoop(HorovodGlobalState& state) {

// Override Tensor Fusion threshold, if it's set.
auto horovod_fusion_threshold = std::getenv("HOROVOD_FUSION_THRESHOLD");
int64_t proposed_fusion_threshold = (horovod_fusion_threshold != nullptr) ?
std::strtol(horovod_fusion_threshold, nullptr, 10) :
state.tensor_fusion_threshold;
int64_t proposed_fusion_threshold =
(horovod_fusion_threshold != nullptr)
? std::strtol(horovod_fusion_threshold, nullptr, 10)
: state.tensor_fusion_threshold;

// If the cluster is homogeneous and hierarchical allreduce is enabled,
// adjust buffer size to make sure it is divisible by local_size to improve
Expand All @@ -1592,8 +1625,10 @@ void BackgroundThreadLoop(HorovodGlobalState& state) {
// FUSION_BUFFER_ATOMIC_UNIT for performance
int mpi_double_size;
MPI_Type_size(MPI_DOUBLE, &mpi_double_size);
int64_t div = state.local_size * mpi_double_size * FUSION_BUFFER_ATOMIC_UNIT;
state.tensor_fusion_threshold = ((proposed_fusion_threshold+div-1) / div) * div;
int64_t div =
state.local_size * mpi_double_size * FUSION_BUFFER_ATOMIC_UNIT;
state.tensor_fusion_threshold =
((proposed_fusion_threshold + div - 1) / div) * div;
} else {
state.tensor_fusion_threshold = proposed_fusion_threshold;
}
Expand Down Expand Up @@ -1880,6 +1915,7 @@ bool RunLoopOnce(HorovodGlobalState& state, bool is_coordinator) {
}

return !should_shut_down;
MPI_Op_free(&state.mpi_float16_sum);
}

// Start Horovod background thread. Ensure that this is
Expand Down
4 changes: 2 additions & 2 deletions test/test_tensorflow.py
Expand Up @@ -58,7 +58,7 @@ def test_horovod_allreduce_cpu(self):
hvd.init()
size = hvd.size()
with self.test_session(config=self.config) as session:
dtypes = [tf.int32, tf.int64, tf.float32, tf.float64]
dtypes = [tf.int32, tf.int64, tf.float16, tf.float32, tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/cpu:0"):
Expand Down Expand Up @@ -90,7 +90,7 @@ def test_horovod_allreduce_cpu_fused(self):
hvd.init()
size = hvd.size()
with self.test_session(config=self.config) as session:
dtypes = [tf.int32, tf.int64, tf.float32, tf.float64]
dtypes = [tf.int32, tf.int64, tf.float16, tf.float32, tf.float64]
dims = [1, 2, 3]
tests = []
for dtype, dim in itertools.product(dtypes, dims):
Expand Down

0 comments on commit 6feb47a

Please sign in to comment.