Skip to content

Commit

Permalink
conduct replacements for all of distutils.version
Browse files Browse the repository at this point in the history
  • Loading branch information
njzjz committed Sep 14, 2022
1 parent 6024a38 commit 2bfa8e1
Show file tree
Hide file tree
Showing 39 changed files with 108 additions and 108 deletions.
2 changes: 1 addition & 1 deletion docs/conf.py
Expand Up @@ -62,7 +62,7 @@
'members': None,
'member-order': 'bysource',
'imported-members': None,
'exclude-members': 'contextmanager, LooseVersion, tf, keras, torch, mx, pyspark',
'exclude-members': 'contextmanager, version.parse, tf, keras, torch, mx, pyspark',
}


Expand Down
Expand Up @@ -16,7 +16,7 @@
import argparse
import tensorflow as tf
import horovod.tensorflow.keras as hvd
from distutils.version import LooseVersion
from packaging import version

parser = argparse.ArgumentParser(description='Tensorflow 2.0 Keras MNIST Example')

Expand All @@ -26,7 +26,7 @@
args = parser.parse_args()

if args.use_mixed_precision:
if LooseVersion(tf.__version__) >= LooseVersion('2.4.0'):
if version.parse(tf.__version__) >= version.parse('2.4.0'):
from tensorflow.keras import mixed_precision
mixed_precision.set_global_policy('mixed_float16')
else:
Expand Down
6 changes: 3 additions & 3 deletions examples/pytorch/pytorch_mnist.py
@@ -1,6 +1,6 @@
import argparse
import os
from distutils.version import LooseVersion
from packaging import version

import torch.multiprocessing as mp
import torch.nn as nn
Expand Down Expand Up @@ -161,8 +161,8 @@ def test():
if args.use_mixed_precision:
raise ValueError("Mixed precision is only supported with cuda enabled.")

if (args.use_mixed_precision and LooseVersion(torch.__version__)
< LooseVersion('1.6.0')):
if (args.use_mixed_precision and version.parse(torch.__version__)
< version.parse('1.6.0')):
raise ValueError("""Mixed precision is using torch.cuda.amp.autocast(),
which requires torch >= 1.6.0""")

Expand Down
6 changes: 3 additions & 3 deletions examples/spark/keras/keras_spark_mnist.py
Expand Up @@ -2,15 +2,15 @@
import os
import subprocess
import sys
from distutils.version import LooseVersion
from packaging import version

import numpy as np

import pyspark
import pyspark.sql.types as T
from pyspark import SparkConf
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
if LooseVersion(pyspark.__version__) < LooseVersion('3.0.0'):
if version.parse(pyspark.__version__) < version.parse('3.0.0'):
from pyspark.ml.feature import OneHotEncoderEstimator as OneHotEncoder
else:
from pyspark.ml.feature import OneHotEncoder
Expand Down Expand Up @@ -78,7 +78,7 @@
train_df, test_df = train_df.randomSplit([0.9, 0.1])

# Disable GPUs when building the model to prevent memory leaks
if LooseVersion(tf.__version__) >= LooseVersion('2.0.0'):
if version.parse(tf.__version__) >= version.parse('2.0.0'):
# See https://github.com/tensorflow/tensorflow/issues/33168
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
else:
Expand Down
4 changes: 2 additions & 2 deletions examples/spark/keras/keras_spark_rossmann_estimator.py
Expand Up @@ -18,7 +18,7 @@
import datetime
import os
import sys
from distutils.version import LooseVersion
from packaging import version

import pyspark.sql.types as T
import pyspark.sql.functions as F
Expand Down Expand Up @@ -338,7 +338,7 @@ def act_sigmoid_scaled(x):
'act_sigmoid_scaled': act_sigmoid_scaled}

# Disable GPUs when building the model to prevent memory leaks
if LooseVersion(tf.__version__) >= LooseVersion('2.0.0'):
if version.parse(tf.__version__) >= version.parse('2.0.0'):
# See https://github.com/tensorflow/tensorflow/issues/33168
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
else:
Expand Down
10 changes: 5 additions & 5 deletions examples/spark/pytorch/pytorch_lightning_spark_mnist.py
Expand Up @@ -2,15 +2,15 @@
import os
import subprocess
import sys
from distutils.version import LooseVersion
from packaging import version

import numpy as np

import pyspark
import pyspark.sql.types as T
from pyspark import SparkConf
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
if LooseVersion(pyspark.__version__) < LooseVersion('3.0.0'):
if version.parse(pyspark.__version__) < version.parse('3.0.0'):
from pyspark.ml.feature import OneHotEncoderEstimator as OneHotEncoder
else:
from pyspark.ml.feature import OneHotEncoder
Expand All @@ -21,8 +21,8 @@
try:
# tensorflow has to be imported BEFORE pytorch_lightning, otherwise we see the segfault right away
import tensorflow as tf
from distutils.version import LooseVersion
if LooseVersion('2.5.0') <= LooseVersion(tf.__version__) < LooseVersion('2.7.0'):
from packaging import version
if version.parse('2.5.0') <= version.parse(tf.__version__) < version.parse('2.7.0'):
print('Skipping test as Pytorch Lightning conflicts with present Tensorflow 2.6.x', file=sys.stderr)
sys.exit(0)
except ImportError:
Expand Down Expand Up @@ -61,7 +61,7 @@
def train_model(args):
# do not run this test for pytorch lightning below min supported verson
import pytorch_lightning as pl
if LooseVersion(pl.__version__) < LooseVersion(MIN_PL_VERSION):
if version.parse(pl.__version__) < version.parse(MIN_PL_VERSION):
print("Skip test for pytorch_ligthning=={}, min support version is {}".format(pl.__version__, MIN_PL_VERSION))
return

Expand Down
Expand Up @@ -2,15 +2,15 @@
import os
import subprocess
import sys
from distutils.version import LooseVersion
from packaging import version

import numpy as np

import pyspark
import pyspark.sql.types as T
from pyspark import SparkConf
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
if LooseVersion(pyspark.__version__) < LooseVersion('3.0.0'):
if version.parse(pyspark.__version__) < version.parse('3.0.0'):
from pyspark.ml.feature import OneHotEncoderEstimator as OneHotEncoder
else:
from pyspark.ml.feature import OneHotEncoder
Expand Down Expand Up @@ -45,7 +45,7 @@
def train_model(args):
# do not run this test for pytorch lightning below min supported verson
import pytorch_lightning as pl
if LooseVersion(pl.__version__) < LooseVersion(MIN_PL_VERSION):
if version.parse(pl.__version__) < version.parse(MIN_PL_VERSION):
print("Skip test for pytorch_ligthning=={}, min support version is {}".format(pl.__version__, MIN_PL_VERSION))
return

Expand Down
4 changes: 2 additions & 2 deletions examples/spark/pytorch/pytorch_spark_mnist.py
Expand Up @@ -2,15 +2,15 @@
import os
import subprocess
import sys
from distutils.version import LooseVersion
from packaging import version

import numpy as np

import pyspark
import pyspark.sql.types as T
from pyspark import SparkConf
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
if LooseVersion(pyspark.__version__) < LooseVersion('3.0.0'):
if version.parse(pyspark.__version__) < version.parse('3.0.0'):
from pyspark.ml.feature import OneHotEncoderEstimator as OneHotEncoder
else:
from pyspark.ml.feature import OneHotEncoder
Expand Down
4 changes: 2 additions & 2 deletions horovod/_keras/__init__.py
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.
# ==============================================================================

from distutils.version import LooseVersion
from packaging import version

import horovod.tensorflow as hvd
import tensorflow as tf
Expand All @@ -22,7 +22,7 @@
from horovod.tensorflow.mpi_ops import rank


_PRE_TF_2_4_0 = LooseVersion(tf.__version__) < LooseVersion('2.4.0')
_PRE_TF_2_4_0 = version.parse(tf.__version__) < version.parse('2.4.0')


def create_distributed_optimizer(keras, optimizer, name, device_dense, device_sparse,
Expand Down
4 changes: 2 additions & 2 deletions horovod/_keras/callbacks.py
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.
# ==============================================================================

from distutils.version import LooseVersion
from packaging import version
import warnings

import horovod.tensorflow as hvd
Expand Down Expand Up @@ -54,7 +54,7 @@ def __init__(self, backend, device='', *args):
self.allreduce_ops = {}
self.device = device

if LooseVersion("2.3") <= LooseVersion(tf.__version__) < LooseVersion("2.5"):
if version.parse("2.3") <= version.parse(tf.__version__) < version.parse("2.5"):
warnings.warn(
"Some callbacks may not have access to the averaged metrics, "
"see https://github.com/horovod/horovod/issues/2440")
Expand Down
4 changes: 2 additions & 2 deletions horovod/common/util.py
Expand Up @@ -269,7 +269,7 @@ def is_iterable(x):

@_cache
def is_version_greater_equal_than(ver, target):
from distutils.version import LooseVersion
from packaging import version
if any([not isinstance(_str, str) for _str in (ver, target)]):
raise ValueError("This function only accepts string arguments. \n"
"Received:\n"
Expand All @@ -285,4 +285,4 @@ def is_version_greater_equal_than(ver, target):
raise ValueError("We only accepts target version values in the form "
"of: major.minor.patch. Received: {}".format(target))

return LooseVersion(ver) >= LooseVersion(target)
return version.parse(ver) >= version.parse(target)
8 changes: 4 additions & 4 deletions horovod/spark/common/store.py
Expand Up @@ -22,7 +22,7 @@
import tempfile
import warnings

from distutils.version import LooseVersion
from packaging import version

import pyarrow as pa
import pyarrow.parquet as pq
Expand Down Expand Up @@ -202,7 +202,7 @@ def read_serialized_keras_model(self, ckpt_path, model, custom_objects):
from tensorflow import keras
from horovod.spark.keras.util import TFKerasUtil

if LooseVersion(tensorflow.__version__) < LooseVersion("2.0.0"):
if version.parse(tensorflow.__version__) < version.parse("2.0.0"):
model_bytes = self.read(ckpt_path)
return codec.dumps_base64(model_bytes)
else:
Expand Down Expand Up @@ -429,7 +429,7 @@ def __init__(self, prefix_path,
user=user,
kerb_ticket=kerb_ticket,
extra_conf=extra_conf)
if LooseVersion(pa.__version__) < LooseVersion('0.17.0'):
if version.parse(pa.__version__) < version.parse('0.17.0'):
self._hdfs_kwargs['driver'] = driver
self._hdfs = self._get_filesystem_fn()()

Expand Down Expand Up @@ -609,7 +609,7 @@ def read_serialized_keras_model(self, ckpt_path, model, custom_objects):
from tensorflow import keras
from horovod.spark.keras.util import TFKerasUtil

if LooseVersion(tensorflow.__version__) < LooseVersion("2.0.0"):
if version.parse(tensorflow.__version__) < version.parse("2.0.0"):
model.load_weights(ckpt_path)
else:
with keras.utils.custom_object_scope(custom_objects):
Expand Down
14 changes: 7 additions & 7 deletions horovod/spark/keras/remote.py
Expand Up @@ -22,7 +22,7 @@
import h5py
import tensorflow as tf

from distutils.version import LooseVersion
from packaging import version
from horovod.spark.common import constants
from horovod.spark.common.store import DBFSLocalStore
from horovod.spark.common.util import _get_assigned_gpu_or_default, _set_mp_start_method
Expand Down Expand Up @@ -126,7 +126,7 @@ def train(serialized_model, train_rows, val_rows, avg_row_size):
print("Skip pinning current process to the GPU.")

if random_seed is not None:
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
if version.parse(tf.__version__) < version.parse('2.0.0'):
tf.random.set_random_seed(random_seed)
else:
tf.random.set_seed(random_seed)
Expand Down Expand Up @@ -176,7 +176,7 @@ def train(serialized_model, train_rows, val_rows, avg_row_size):
if _checkpoint_callback:
_checkpoint_callback.filepath = ckpt_file
else:
if is_dbfs and LooseVersion(tf.__version__) < LooseVersion("2.0.0"):
if is_dbfs and version.parse(tf.__version__) < version.parse("2.0.0"):
# Because DBFS local file APIs does not support random write which is
# required by h5 format, save_weights_only=True is needed for switching
# to the TensorFlow SavedModel format.
Expand Down Expand Up @@ -270,15 +270,15 @@ def train(serialized_model, train_rows, val_rows, avg_row_size):

if hvd.rank() == 0:
if is_dbfs:
if LooseVersion(tf.__version__) < LooseVersion("2.0.0"):
if version.parse(tf.__version__) < version.parse("2.0.0"):
model.load_weights(ckpt_file)
else:
# needs to be deserialized in the with scope
with k.utils.custom_object_scope(custom_objects):
model = k.models.load_model(ckpt_file)
serialized_model = keras_utils.serialize_model(model)
else:
if LooseVersion(tf.__version__) >= LooseVersion("2.0.0"):
if version.parse(tf.__version__) >= version.parse("2.0.0"):
with k.utils.custom_object_scope(custom_objects):
model = k.models.load_model(ckpt_file)
serialized_model = keras_utils.serialize_model(model)
Expand All @@ -302,7 +302,7 @@ def deserialize_keras_model(model_bytes, load_model_fn):

def _pin_gpu_fn():
# Horovod: pin GPU to be used to process local rank (one GPU per process)
return _pin_gpu_tensorflow2_fn() if LooseVersion(tf.__version__) >= LooseVersion('2.0.0') \
return _pin_gpu_tensorflow2_fn() if version.parse(tf.__version__) >= version.parse('2.0.0') \
else _pin_gpu_tensorflow1_fn()


Expand All @@ -328,7 +328,7 @@ def fn(hvd, tf, keras):


def _pin_cpu_fn():
return _pin_cpu_tensorflow2_fn() if LooseVersion(tf.__version__) >= LooseVersion('2.0.0') \
return _pin_cpu_tensorflow2_fn() if version.parse(tf.__version__) >= version.parse('2.0.0') \
else _pin_cpu_tensorflow1_fn()


Expand Down
2 changes: 1 addition & 1 deletion horovod/spark/keras/util.py
Expand Up @@ -16,7 +16,7 @@

import io

from distutils.version import LooseVersion
from packaging import version

import h5py
import numpy as np
Expand Down
4 changes: 2 additions & 2 deletions horovod/spark/lightning/estimator.py
Expand Up @@ -21,7 +21,7 @@
import os
import time
import warnings
from distutils.version import LooseVersion
from packaging import version

from pyspark import keyword_only
from pyspark.ml.param.shared import Param, Params, TypeConverters
Expand Down Expand Up @@ -298,7 +298,7 @@ def __init__(self,
kwargs = self._input_kwargs

# pl version check
if LooseVersion(pl.__version__) < LooseVersion(MIN_PL_VERSION):
if version.parse(pl.__version__) < version.parse(MIN_PL_VERSION):
raise RuntimeError("Only support pytorch_lightning version > {}, found version {}".format(MIN_PL_VERSION, pl.__version__))

if EstimatorParams.loss.name in kwargs and TorchEstimator.loss_constructors.name in kwargs:
Expand Down
2 changes: 1 addition & 1 deletion horovod/spark/lightning/remote.py
Expand Up @@ -19,7 +19,7 @@
import tempfile
import math
import warnings
from distutils.version import LooseVersion
from packaging import version

import torch
import pytorch_lightning as pl
Expand Down
4 changes: 2 additions & 2 deletions horovod/spark/task/task_service.py
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.
# ==============================================================================

from distutils.version import LooseVersion
from packaging import version

import os
import pyspark
Expand Down Expand Up @@ -96,7 +96,7 @@ def _handle(self, req, client_address):
return super(SparkTaskService, self)._handle(req, client_address)

def _get_resources(self):
if LooseVersion(pyspark.__version__) >= LooseVersion('3.0.0'):
if version.parse(pyspark.__version__) >= version.parse('3.0.0'):
task_context = pyspark.TaskContext.get()
if task_context:
return task_context.resources()
Expand Down
4 changes: 2 additions & 2 deletions horovod/tensorflow/__init__.py
Expand Up @@ -16,7 +16,7 @@
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation

from distutils.version import LooseVersion
from packaging import version
import os
import warnings

Expand Down Expand Up @@ -45,7 +45,7 @@
from horovod.tensorflow.gradient_aggregation import LocalGradientAggregationHelper

import tensorflow as tf
_IS_TF2 = LooseVersion(tf.__version__) >= LooseVersion('2.0.0')
_IS_TF2 = version.parse(tf.__version__) >= version.parse('2.0.0')

# @DEKHTIARJonathan: Do not remove, this fixes issues:
# - https://github.com/tensorflow/tensorflow/issues/38516
Expand Down

0 comments on commit 2bfa8e1

Please sign in to comment.