Skip to content
Permalink
Browse files

Initial support for TensorFlow 2.0 (#1193)

* Initial support for TensorFlow 2.0

* Support for TensorFlow 2.0 in `horovod.tensorflow`
* Replace RMSProp with Adam (RMSProp doesn't scale)
* Minor example bugfixes

Signed-off-by: Alex Sergeev <alsrgv@users.noreply.github.com>

* Bugfixes, enable subgraph caching

Signed-off-by: Alex Sergeev <alsrgv@users.noreply.github.com>

* Bugfixes & performance improvements

Signed-off-by: Alex Sergeev <alsrgv@users.noreply.github.com>

* Revert RMSprop changes in Keras tests

Signed-off-by: Alex Sergeev <alsrgv@users.noreply.github.com>

* Renamed examples tensorflow_20_* -> tensorflow2_*

Signed-off-by: Alex Sergeev <alsrgv@users.noreply.github.com>

* Support for TensorFlow 2.0 tests

Signed-off-by: Alex Sergeev <alsrgv@users.noreply.github.com>

* Bugfix Keras test exclusion logic

Signed-off-by: Alex Sergeev <alsrgv@users.noreply.github.com>

* Address review comments

Signed-off-by: Alex Sergeev <alsrgv@users.noreply.github.com>
  • Loading branch information...
alsrgv committed Jul 8, 2019
1 parent 107d740 commit 15692dab6465f16c14d8aa0992c0c06fa5345214
@@ -17,6 +17,9 @@ tests=( \
test-cpu-openmpi-py2_7-tf1_14_0-keras2_2_4-torch1_1_0-mxnet1_4_1-pyspark2_4_0 \
test-cpu-openmpi-py3_5-tf1_14_0-keras2_2_4-torch1_1_0-mxnet1_4_1-pyspark2_4_0 \
test-cpu-openmpi-py3_6-tf1_14_0-keras2_2_4-torch1_1_0-mxnet1_4_1-pyspark2_4_0 \
test-cpu-openmpi-py2_7-tf2_0_0-keras2_2_4-torch1_1_0-mxnet1_4_1-pyspark2_4_0 \
test-cpu-openmpi-py3_5-tf2_0_0-keras2_2_4-torch1_1_0-mxnet1_4_1-pyspark2_4_0 \
test-cpu-openmpi-py3_6-tf2_0_0-keras2_2_4-torch1_1_0-mxnet1_4_1-pyspark2_4_0 \
test-cpu-openmpi-py2_7-tfhead-kerashead-torchhead-mxnethead-pyspark2_4_0 \
test-cpu-openmpi-py3_6-tfhead-kerashead-torchhead-mxnethead-pyspark2_4_0 \
test-cpu-mpich-py3_6-tf1_14_0-keras2_2_4-torch1_1_0-mxnet1_4_1-pyspark2_4_0 \
@@ -25,6 +28,8 @@ tests=( \
test-gpu-openmpi-py3_5-tf1_6_0-keras2_1_2-torch0_4_1-mxnet1_4_1-pyspark2_3_2 \
test-gpu-openmpi-py2_7-tf1_14_0-keras2_2_4-torch1_1_0-mxnet1_4_1-pyspark2_4_0 \
test-gpu-openmpi-py3_6-tf1_14_0-keras2_2_4-torch1_1_0-mxnet1_4_1-pyspark2_4_0 \
test-gpu-openmpi-py2_7-tf2_0_0-keras2_2_4-torch1_1_0-mxnet1_4_1-pyspark2_4_0 \
test-gpu-openmpi-py3_6-tf2_0_0-keras2_2_4-torch1_1_0-mxnet1_4_1-pyspark2_4_0 \
test-gpu-openmpi-py2_7-tfhead-kerashead-torchhead-mxnethead-pyspark2_4_0 \
test-gpu-openmpi-py3_6-tfhead-kerashead-torchhead-mxnethead-pyspark2_4_0 \
test-gpu-mpich-py3_6-tf1_14_0-keras2_2_4-torch1_1_0-mxnet1_4_1-pyspark2_4_0 \
@@ -95,24 +100,33 @@ run_all() {
local test=$1
local queue=$2

local exclude_keras_if_needed=""
if [[ ${test} == *"tf2_"* ]]; then
# TODO: support for Keras + TF 2.0 and TF-Keras 2.0
exclude_keras_if_needed="| sed 's/[a-z_]*keras[a-z_.]*//g'"
fi

run_test "${test}" "${queue}" \
":pytest: Run PyTests (${test})" \
"bash -c \"cd /horovod/test && (echo test_*.py | xargs -n 1 \\\$(cat /mpirun_command) pytest -v --capture=no)\""
"bash -c \"cd /horovod/test && (echo test_*.py ${exclude_keras_if_needed} | xargs -n 1 \\\$(cat /mpirun_command) pytest -v --capture=no)\""

run_test "${test}" "${queue}" \
":muscle: Test TensorFlow MNIST (${test})" \
"bash -c \"\\\$(cat /mpirun_command) python /horovod/examples/tensorflow_mnist.py\""
# Legacy TensorFlow tests
if [[ ${test} != *"tf2_"* ]]; then
run_test "${test}" "${queue}" \
":muscle: Test TensorFlow MNIST (${test})" \
"bash -c \"\\\$(cat /mpirun_command) python /horovod/examples/tensorflow_mnist.py\""

if [[ ${test} != *"tf1_1_0"* && ${test} != *"tf1_6_0"* ]]; then
run_test "${test}" "${queue}" \
":muscle: Test TensorFlow Eager MNIST (${test})" \
"bash -c \"\\\$(cat /mpirun_command) python /horovod/examples/tensorflow_mnist_eager.py\""
fi

if [[ ${test} != *"tf1_1_0"* && ${test} != *"tf1_6_0"* ]]; then
run_test "${test}" "${queue}" \
":muscle: Test TensorFlow Eager MNIST (${test})" \
"bash -c \"\\\$(cat /mpirun_command) python /horovod/examples/tensorflow_mnist_eager.py\""
":muscle: Test Keras MNIST (${test})" \
"bash -c \"\\\$(cat /mpirun_command) python /horovod/examples/keras_mnist_advanced.py\""
fi

run_test "${test}" "${queue}" \
":muscle: Test Keras MNIST (${test})" \
"bash -c \"\\\$(cat /mpirun_command) python /horovod/examples/keras_mnist_advanced.py\""

run_test "${test}" "${queue}" \
":muscle: Test PyTorch MNIST (${test})" \
"bash -c \"\\\$(cat /mpirun_command) python /horovod/examples/pytorch_mnist.py\""
@@ -134,6 +148,13 @@ run_all() {
"horovodrun -np 2 -H localhost:2 python /horovod/examples/tensorflow_mnist.py"
fi
fi

# TensorFlow 2.0 tests
if [[ ${test} == *"tf2_"* ]]; then
run_test "${test}" "${queue}" \
":muscle: Test TensorFlow 2.0 MNIST (${test})" \
"bash -c \"\\\$(cat /mpirun_command) python /horovod/examples/tensorflow2_mnist.py\""
fi
}

build_docs() {
@@ -116,6 +116,43 @@ services:
TORCHVISION_PACKAGE: https://download.pytorch.org/whl/cpu/torchvision-0.3.0-cp36-cp36m-linux_x86_64.whl
MXNET_PACKAGE: mxnet==1.4.1
PYSPARK_PACKAGE: pyspark==2.4.0
test-cpu-openmpi-py2_7-tf2_0_0-keras2_2_4-torch1_1_0-mxnet1_4_1-pyspark2_4_0:
extends: test-cpu-base
build:
args:
MPI_KIND: OpenMPI
PYTHON_VERSION: 2.7
TENSORFLOW_PACKAGE: tensorflow==2.0.0b1
KERAS_PACKAGE: keras==2.2.4
PYTORCH_PACKAGE: torch==1.1.0
TORCHVISION_PACKAGE: https://download.pytorch.org/whl/cpu/torchvision-0.3.0-cp27-cp27mu-linux_x86_64.whl
MXNET_PACKAGE: mxnet==1.4.1
PYSPARK_PACKAGE: pyspark==2.4.0
test-cpu-openmpi-py3_5-tf2_0_0-keras2_2_4-torch1_1_0-mxnet1_4_1-pyspark2_4_0:
extends: test-cpu-base
build:
args:
MPI_KIND: OpenMPI
PYTHON_VERSION: 3.5
TENSORFLOW_PACKAGE: tensorflow==2.0.0b1
KERAS_PACKAGE: keras==2.2.4
PYTORCH_PACKAGE: torch==1.1.0
TORCHVISION_PACKAGE: https://download.pytorch.org/whl/cpu/torchvision-0.3.0-cp35-cp35m-linux_x86_64.whl
MXNET_PACKAGE: mxnet==1.4.1
PYSPARK_PACKAGE: pyspark==2.4.0
test-cpu-openmpi-py3_6-tf2_0_0-keras2_2_4-torch1_1_0-mxnet1_4_1-pyspark2_4_0:
extends: test-cpu-base
build:
args:
UBUNTU_VERSION: 18.04
MPI_KIND: OpenMPI
PYTHON_VERSION: 3.6
TENSORFLOW_PACKAGE: tensorflow==2.0.0b1
KERAS_PACKAGE: keras==2.2.4
PYTORCH_PACKAGE: torch==1.1.0
TORCHVISION_PACKAGE: https://download.pytorch.org/whl/cpu/torchvision-0.3.0-cp36-cp36m-linux_x86_64.whl
MXNET_PACKAGE: mxnet==1.4.1
PYSPARK_PACKAGE: pyspark==2.4.0
test-cpu-openmpi-py2_7-tfhead-kerashead-torchhead-mxnethead-pyspark2_4_0:
extends: test-cpu-base
build:
@@ -233,6 +270,31 @@ services:
TORCHVISION_PACKAGE: https://download.pytorch.org/whl/cu100/torchvision-0.3.0-cp36-cp36m-linux_x86_64.whl
MXNET_PACKAGE: mxnet-cu100==1.4.1
PYSPARK_PACKAGE: pyspark==2.4.0
test-gpu-openmpi-py2_7-tf2_0_0-keras2_2_4-torch1_1_0-mxnet1_4_1-pyspark2_4_0:
extends: test-gpu-base
build:
args:
MPI_KIND: OpenMPI
PYTHON_VERSION: 2.7
TENSORFLOW_PACKAGE: tensorflow-gpu==2.0.0b1
KERAS_PACKAGE: keras==2.2.4
PYTORCH_PACKAGE: https://download.pytorch.org/whl/cu100/torch-1.1.0-cp27-cp27mu-linux_x86_64.whl
TORCHVISION_PACKAGE: https://download.pytorch.org/whl/cu100/torchvision-0.3.0-cp27-cp27mu-linux_x86_64.whl
MXNET_PACKAGE: mxnet-cu100==1.4.1
PYSPARK_PACKAGE: pyspark==2.4.0
test-gpu-openmpi-py3_6-tf2_0_0-keras2_2_4-torch1_1_0-mxnet1_4_1-pyspark2_4_0:
extends: test-gpu-base
build:
args:
CUDA_DOCKER_VERSION: 10.0-devel-ubuntu18.04
MPI_KIND: OpenMPI
PYTHON_VERSION: 3.6
TENSORFLOW_PACKAGE: tensorflow-gpu==2.0.0b1
KERAS_PACKAGE: keras==2.2.4
PYTORCH_PACKAGE: https://download.pytorch.org/whl/cu100/torch-1.1.0-cp36-cp36m-linux_x86_64.whl
TORCHVISION_PACKAGE: https://download.pytorch.org/whl/cu100/torchvision-0.3.0-cp36-cp36m-linux_x86_64.whl
MXNET_PACKAGE: mxnet-cu100==1.4.1
PYSPARK_PACKAGE: pyspark==2.4.0
test-gpu-openmpi-py2_7-tfhead-kerashead-torchhead-mxnethead-pyspark2_4_0:
extends: test-gpu-base
build:
@@ -64,7 +64,7 @@
'special-members': '__init__',
'imported-members': None,
'undoc-members': None,
'exclude-members': 'contextmanager',
'exclude-members': 'contextmanager, LooseVersion, tf, keras, torch, mx, pyspark',
}


@@ -45,10 +45,13 @@

MOCK_TREE = {
'tensorflow': {
'__version__': '1.12.0',
'__version__': '1.14.0',
'train': {
'Optimizer': MagicMock,
'SessionRunMock': MagicMock,
'SessionRunHook': MagicMock,
},
'estimator': {
'SessionRunHook': MagicMock,
},
'keras': {
'callbacks': {
@@ -0,0 +1,88 @@
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

import tensorflow as tf
import horovod.tensorflow as hvd

# Horovod: initialize Horovod.
hvd.init()

# Horovod: pin GPU to be used to process local rank (one GPU per process)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')

mnist_model = tf.keras.Sequential([
tf.keras.layers.Conv2D(16, [3, 3], activation='relu'),
tf.keras.layers.Conv2D(16, [3, 3], activation='relu'),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(10)
])
loss = tf.losses.SparseCategoricalCrossentropy(from_logits=True)

# Horovod: adjust learning rate based on number of GPUs.
opt = tf.optimizers.Adam(0.001 * hvd.size())

(mnist_images, mnist_labels), _ = \
tf.keras.datasets.mnist.load_data(path='mnist-%d.npz' % hvd.rank())

dataset = tf.data.Dataset.from_tensor_slices(
(tf.cast(mnist_images[..., tf.newaxis] / 255.0, tf.float32),
tf.cast(mnist_labels, tf.int64))
)
dataset = dataset.repeat().shuffle(1000).batch(32)

checkpoint_dir = './checkpoints'
checkpoint = tf.train.Checkpoint(model=mnist_model, optimizer=opt)


@tf.function
def training_step(images, labels, first_batch):
with tf.GradientTape() as tape:
logits = mnist_model(images, training=True)
loss_value = loss(labels, logits)

# Horovod: add Horovod Distributed GradientTape.
tape = hvd.DistributedGradientTape(tape)

grads = tape.gradient(loss_value, mnist_model.trainable_variables)
opt.apply_gradients(zip(grads, mnist_model.trainable_variables))

# Horovod: broadcast initial variable states from rank 0 to all other processes.
# This is necessary to ensure consistent initialization of all workers when
# training is started with random weights or restored from a checkpoint.
#
# Note: broadcast should be done after the first gradient step to ensure optimizer
# initialization.
if first_batch:
hvd.broadcast_variables(mnist_model.variables, root_rank=0)
hvd.broadcast_variables(opt.variables(), root_rank=0)

return loss_value


# Horovod: adjust number of steps based on number of GPUs.
for batch, (images, labels) in enumerate(dataset.take(20000 // hvd.size())):
loss_value = training_step(images, labels, batch == 0)

if batch % 10 == 0 and hvd.local_rank() == 0:
print('Step #%d\tLoss: %.6f' % (batch, loss_value))

# Horovod: save checkpoints only on worker 0 to prevent other workers from
# corrupting it.
if hvd.rank() == 0:
checkpoint.save(checkpoint_dir)

0 comments on commit 15692da

Please sign in to comment.
You can’t perform that action at this time.