Skip to content

Commit

Permalink
torch.distributed group initialization for torch_neuron disabled …
Browse files Browse the repository at this point in the history
…when `optimum-neuron` is installed (#22728)

* Make the process group initialization not happen if optimum_neuron is installed

* Add warning

* Remove list and added warning
  • Loading branch information
michaelbenayoun committed Apr 12, 2023
1 parent 1306b7d commit 10fab90
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 9 deletions.
29 changes: 20 additions & 9 deletions src/transformers/training_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,13 @@
logging,
requires_backends,
)
from .utils.import_utils import is_optimum_neuron_available


logger = logging.get_logger(__name__)
log_levels = logging.get_log_levels_dict().copy()
trainer_log_levels = dict(**log_levels, passive=-1)

if is_torch_available():
import torch
import torch.distributed as dist
Expand All @@ -67,12 +72,23 @@
# torchrun support
# https://github.com/pytorch/xla/pull/3609
if os.environ.get("TORCHELASTIC_RUN_ID"):
import torch_xla.distributed.xla_backend as xbn
if is_optimum_neuron_available():
logger.info(
"Make sure that you are performing the training with the TrainiumTrainer from optimum[neuron], this "
"will fail otherwise."
)
else:
logger.warning(
"Please use the TrainiumTrainer from optimum[neuron] instead of the Transformers library to perform "
"training on AWS Trainium instances. More information here: "
"https://github.com/huggingface/optimum-neuron"
)
import torch_xla.distributed.xla_backend as xbn

if not isinstance(torch.distributed.group.WORLD, xbn.ProcessGroupXla):
torch.distributed.init_process_group(backend="xla")
if not isinstance(torch.distributed.group.WORLD, xbn.ProcessGroupXla):
raise AssertionError("Failed to initialize torch.distributed process group using XLA backend.")
torch.distributed.init_process_group(backend="xla")
if not isinstance(torch.distributed.group.WORLD, xbn.ProcessGroupXla):
raise AssertionError("Failed to initialize torch.distributed process group using XLA backend.")


if is_sagemaker_mp_enabled():
Expand All @@ -81,11 +97,6 @@
smp.init()


logger = logging.get_logger(__name__)
log_levels = logging.get_log_levels_dict().copy()
trainer_log_levels = dict(**log_levels, passive=-1)


def default_logdir() -> str:
"""
Same default as PyTorch
Expand Down
4 changes: 4 additions & 0 deletions src/transformers/utils/import_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -583,6 +583,10 @@ def is_optimum_available():
return importlib.util.find_spec("optimum") is not None


def is_optimum_neuron_available():
return importlib.util.find_spec("optimum.neuron") is not None


def is_safetensors_available():
if is_torch_available():
if version.parse(_torch_version) >= version.parse("1.10"):
Expand Down

0 comments on commit 10fab90

Please sign in to comment.