diff --git a/.isort.cfg b/.isort.cfg
index dae7524a502d..cb3272070ed5 100644
--- a/.isort.cfg
+++ b/.isort.cfg
@@ -19,7 +19,7 @@ filter_files=True
# python/ray/setup-dev.py
# For the rest we will gradually remove them from the blacklist as we
# reformat the code to follow the style guide.
-skip_glob=doc/*,python/ray/__init__.py,python/ray/setup-dev.py,python/build/*,python/ray/cloudpickle/*,python/ray/thirdparty_files/*,python/ray/_private/thirdparty/*,python/ray/_private/runtime_env/agent/thirdparty_files/*,python/ray/dag/*.py,ci/*,python/ray/_private/*,python/ray/air/*,dashboard/*,python/ray/includes/*,python/ray/internal/*,python/ray/ray_operator/*,python/ray/scripts/*,python/ray/serve/generated/serve_pb2.py,python/ray/sgd/*,python/ray/streaming/*,python/ray/tests/*,python/ray/tests/*,python/ray/train/*,python/ray/tune/*,python/ray/util/*,python/ray/workers/*,python/ray/workflow/*,rllib/*,release/*,
+skip_glob=doc/*,python/ray/__init__.py,python/ray/setup-dev.py,python/build/*,python/ray/cloudpickle/*,python/ray/thirdparty_files/*,python/ray/_private/thirdparty/*,python/ray/_private/runtime_env/agent/thirdparty_files/*,python/ray/dag/*.py,ci/*,python/ray/_private/*,python/ray/air/*,dashboard/*,python/ray/includes/*,python/ray/internal/*,python/ray/ray_operator/*,python/ray/scripts/*,python/ray/serve/generated/serve_pb2.py,python/ray/sgd/*,python/ray/streaming/*,python/ray/tests/*,python/ray/tests/*,python/ray/tune/*,python/ray/util/*,python/ray/workers/*,python/ray/workflow/*,rllib/*,release/*,
known_local_folder=ray
known_afterray=psutil,setproctitle
diff --git a/python/ray/train/__init__.py b/python/ray/train/__init__.py
index d1a3c0df3813..744d5d5bbef9 100644
--- a/python/ray/train/__init__.py
+++ b/python/ray/train/__init__.py
@@ -1,17 +1,21 @@
# Try import ray[train] core requirements (defined in setup.py)
+# isort: off
try:
+ import fsspec # noqa: F401
import pandas # noqa: F401
- import requests # noqa: F401
import pyarrow # noqa: F401
- import fsspec # noqa: F401
+ import requests # noqa: F401
except ImportError as exc:
raise ImportError(
"Can't import ray.train as some dependencies are missing. "
'Run `pip install "ray[train]"` to fix.'
) from exc
+# isort: on
from ray._private.usage import usage_lib
+from ray.air.config import CheckpointConfig, FailureConfig, RunConfig, ScalingConfig
+from ray.air.result import Result
# Import this first so it can be used in other modules
from ray.train._checkpoint import Checkpoint
@@ -23,9 +27,6 @@
from ray.train.context import get_context
from ray.train.trainer import TrainingIterator
-from ray.air.config import CheckpointConfig, FailureConfig, RunConfig, ScalingConfig
-from ray.air.result import Result
-
usage_lib.record_library_usage("train")
Checkpoint.__module__ = "ray.train"
diff --git a/python/ray/train/_checkpoint.py b/python/ray/train/_checkpoint.py
index 125227ef0195..aeb021cf8672 100644
--- a/python/ray/train/_checkpoint.py
+++ b/python/ray/train/_checkpoint.py
@@ -7,8 +7,8 @@
import shutil
import tempfile
import traceback
-from typing import Any, Dict, Iterator, List, Optional, Union
import uuid
+from typing import Any, Dict, Iterator, List, Optional, Union
import pyarrow.fs
diff --git a/python/ray/train/_internal/backend_executor.py b/python/ray/train/_internal/backend_executor.py
index c6b97c7915f0..444e46db51f7 100644
--- a/python/ray/train/_internal/backend_executor.py
+++ b/python/ray/train/_internal/backend_executor.py
@@ -2,17 +2,17 @@
import os
from collections import defaultdict
from dataclasses import dataclass
-from typing import Callable, Dict, List, Optional, Tuple, Type, TypeVar, Any
+from typing import Any, Callable, Dict, List, Optional, Tuple, Type, TypeVar
import ray
import ray._private.ray_constants as ray_constants
-from ray.data import Dataset
from ray._private.ray_constants import env_integer
+from ray.data import Dataset
from ray.exceptions import RayActorError
from ray.train import Checkpoint, DataConfig
from ray.train._internal.session import (
- _TrainingResult,
TrialInfo,
+ _TrainingResult,
get_session,
init_session,
shutdown_session,
@@ -24,9 +24,9 @@
from ray.train.constants import (
ENABLE_DETAILED_AUTOFILLED_METRICS_ENV,
ENABLE_SHARE_CUDA_VISIBLE_DEVICES_ENV,
+ ENABLE_SHARE_NEURON_CORES_ACCELERATOR_ENV,
TRAIN_ENABLE_WORKER_SPREAD_ENV,
TRAIN_PLACEMENT_GROUP_TIMEOUT_S_ENV,
- ENABLE_SHARE_NEURON_CORES_ACCELERATOR_ENV,
)
from ray.util.placement_group import get_current_placement_group, remove_placement_group
diff --git a/python/ray/train/_internal/checkpoint_manager.py b/python/ray/train/_internal/checkpoint_manager.py
index a2bbf345b9d7..6a0eb5527524 100644
--- a/python/ray/train/_internal/checkpoint_manager.py
+++ b/python/ray/train/_internal/checkpoint_manager.py
@@ -3,12 +3,11 @@
from typing import Any, Callable, List, Optional, Tuple
from ray._private.dict import flatten_dict
-from ray.air.config import MAX
from ray.air._internal.util import is_nan
+from ray.air.config import MAX
from ray.train import CheckpointConfig
-from ray.train._internal.storage import _delete_fs_path
from ray.train._internal.session import _TrainingResult
-
+from ray.train._internal.storage import _delete_fs_path
logger = logging.getLogger(__name__)
diff --git a/python/ray/train/_internal/data_config.py b/python/ray/train/_internal/data_config.py
index 4503d7b33a09..4d36386e684f 100644
--- a/python/ray/train/_internal/data_config.py
+++ b/python/ray/train/_internal/data_config.py
@@ -1,20 +1,14 @@
-from typing import Optional, Union, Dict, List
+import sys
+from typing import Dict, List, Optional, Union
import ray
from ray.actor import ActorHandle
+from ray.data import DataIterator, Dataset, ExecutionOptions, NodeIdStr
+from ray.data.preprocessor import Preprocessor
# TODO(justinvyu): Fix the circular import error
from ray.train.constants import TRAIN_DATASET_KEY # noqa
-from ray.util.annotations import PublicAPI, DeveloperAPI
-from ray.data import (
- Dataset,
- DataIterator,
- ExecutionOptions,
- NodeIdStr,
-)
-from ray.data.preprocessor import Preprocessor
-
-import sys
+from ray.util.annotations import DeveloperAPI, PublicAPI
if sys.version_info >= (3, 8):
from typing import Literal
diff --git a/python/ray/train/_internal/dataset_spec.py b/python/ray/train/_internal/dataset_spec.py
index 8009046e652c..719b6a5e6ce0 100644
--- a/python/ray/train/_internal/dataset_spec.py
+++ b/python/ray/train/_internal/dataset_spec.py
@@ -2,7 +2,6 @@
from typing import Callable, Dict, List, Optional, Union
from ray.actor import ActorHandle
-
from ray.data import Dataset, DatasetPipeline
RayDataset = Union["Dataset", "DatasetPipeline"]
diff --git a/python/ray/train/_internal/session.py b/python/ray/train/_internal/session.py
index 690e58fdb19d..f76ea2aff7fe 100644
--- a/python/ray/train/_internal/session.py
+++ b/python/ray/train/_internal/session.py
@@ -1,38 +1,38 @@
-import os
+import functools
import logging
+import os
import platform
import queue
import sys
import threading
import time
+import warnings
from dataclasses import dataclass
from datetime import datetime
-import functools
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Set, Type, Union
-import warnings
import ray
from ray.air._internal.session import _get_session
-from ray.air._internal.util import StartTraceback, RunnerThread
+from ray.air._internal.util import RunnerThread, StartTraceback
from ray.air.constants import (
- _RESULT_FETCH_TIMEOUT,
_ERROR_FETCH_TIMEOUT,
+ _RESULT_FETCH_TIMEOUT,
SESSION_MISUSE_LOG_ONCE_KEY,
- TIMESTAMP,
TIME_THIS_ITER_S,
+ TIMESTAMP,
)
from ray.data import Dataset, DatasetPipeline
from ray.train import Checkpoint
from ray.train._internal.accelerator import Accelerator
-from ray.train._internal.storage import _use_storage_context, StorageContext
+from ray.train._internal.storage import StorageContext, _use_storage_context
from ray.train.constants import (
+ CHECKPOINT_DIR_NAME,
DETAILED_AUTOFILLED_KEYS,
+ RAY_CHDIR_TO_TRIAL_DIR,
+ TIME_TOTAL_S,
WORKER_HOSTNAME,
WORKER_NODE_IP,
WORKER_PID,
- TIME_TOTAL_S,
- RAY_CHDIR_TO_TRIAL_DIR,
- CHECKPOINT_DIR_NAME,
)
from ray.train.error import SessionMisuseError
from ray.util.annotations import DeveloperAPI, PublicAPI
@@ -43,7 +43,6 @@
SchedulingStrategyT,
)
-
if TYPE_CHECKING:
from ray.data import DataIterator
from ray.tune.execution.placement_groups import PlacementGroupFactory
diff --git a/python/ray/train/_internal/storage.py b/python/ray/train/_internal/storage.py
index 2543b912bf5b..21b23986eb3c 100644
--- a/python/ray/train/_internal/storage.py
+++ b/python/ray/train/_internal/storage.py
@@ -1,11 +1,5 @@
-import dataclasses
-import fnmatch
-import logging
-import os
-from pathlib import Path
-import shutil
-from typing import Callable, Dict, List, Optional, Tuple, Type, Union, TYPE_CHECKING
-
+# Try import ray[train] core requirements (defined in setup.py)
+# isort: off
try:
import fsspec # noqa
from fsspec.implementations.local import LocalFileSystem
@@ -25,11 +19,19 @@
"pyarrow is a required dependency of Ray Train and Ray Tune. "
"Please install with: `pip install pyarrow`"
) from e
+# isort: on
+import dataclasses
+import fnmatch
+import logging
+import os
+import shutil
+from pathlib import Path
+from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Tuple, Type, Union
from ray._private.storage import _get_storage_uri
from ray.air._internal.filelock import TempFileLock
-from ray.train._internal.syncer import Syncer, SyncConfig, _BackgroundSyncer
+from ray.train._internal.syncer import SyncConfig, Syncer, _BackgroundSyncer
from ray.train.constants import _get_defaults_results_dir
if TYPE_CHECKING:
diff --git a/python/ray/train/_internal/syncer.py b/python/ray/train/_internal/syncer.py
index cac5e86de8a0..eae650d89477 100644
--- a/python/ray/train/_internal/syncer.py
+++ b/python/ray/train/_internal/syncer.py
@@ -1,36 +1,25 @@
import abc
+import logging
import threading
+import time
import traceback
-from typing import (
- Any,
- Callable,
- Dict,
- List,
- Union,
- Optional,
- Tuple,
-)
import warnings
-
-import logging
-import time
from dataclasses import dataclass
-
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from ray._private.thirdparty.tabulate.tabulate import tabulate
from ray.air._internal.remote_storage import (
- fs_hint,
- upload_to_uri,
- download_from_uri,
delete_at_uri,
+ download_from_uri,
+ fs_hint,
is_non_local_path_uri,
+ upload_to_uri,
)
from ray.train.constants import _DEPRECATED_VALUE
from ray.util import log_once
-from ray.util.annotations import PublicAPI, DeveloperAPI
+from ray.util.annotations import DeveloperAPI, PublicAPI
from ray.widgets import Template
-
logger = logging.getLogger(__name__)
# Syncing period for syncing checkpoints between nodes or to cloud.
diff --git a/python/ray/train/_internal/utils.py b/python/ray/train/_internal/utils.py
index 7cc27afa3451..37fcec6ead2e 100644
--- a/python/ray/train/_internal/utils.py
+++ b/python/ray/train/_internal/utils.py
@@ -1,28 +1,17 @@
import abc
import functools
import inspect
-import os
import logging
+import os
from pathlib import Path
-
-from typing import (
- Tuple,
- Dict,
- List,
- Any,
- Union,
- Callable,
- TypeVar,
- Optional,
-)
+from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union
import ray
-from ray.air._internal.util import find_free_port, StartTraceback
from ray.actor import ActorHandle
+from ray.air._internal.util import StartTraceback, find_free_port
from ray.exceptions import RayActorError
from ray.types import ObjectRef
-
T = TypeVar("T")
logger = logging.getLogger(__name__)
diff --git a/python/ray/train/_internal/worker_group.py b/python/ray/train/_internal/worker_group.py
index 229f5a474daa..1ded484abc63 100644
--- a/python/ray/train/_internal/worker_group.py
+++ b/python/ray/train/_internal/worker_group.py
@@ -3,11 +3,11 @@
import socket
from collections import defaultdict
from dataclasses import dataclass
-from typing import Callable, List, TypeVar, Optional, Dict, Type, Tuple, Union
+from typing import Callable, Dict, List, Optional, Tuple, Type, TypeVar, Union
import ray
from ray.actor import ActorHandle
-from ray.air._internal.util import skip_exceptions, exception_cause
+from ray.air._internal.util import exception_cause, skip_exceptions
from ray.types import ObjectRef
from ray.util.placement_group import PlacementGroup
diff --git a/python/ray/train/base_trainer.py b/python/ray/train/base_trainer.py
index 494da2783fde..567fb0932f82 100644
--- a/python/ray/train/base_trainer.py
+++ b/python/ray/train/base_trainer.py
@@ -4,33 +4,30 @@
import json
import logging
import os
+import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type, Union
-import warnings
import pyarrow.fs
import ray
import ray.cloudpickle as pickle
-from ray.air._internal.config import ensure_only_allowed_dataclass_keys_updated
+from ray._private.dict import merge_dicts
from ray.air._internal import usage as air_usage
+from ray.air._internal.config import ensure_only_allowed_dataclass_keys_updated
from ray.air._internal.usage import AirEntrypoint
from ray.air.config import RunConfig, ScalingConfig
from ray.air.result import Result
+from ray.train import Checkpoint
from ray.train._internal.session import _get_session
from ray.train._internal.storage import _exists_at_fs_path, get_fs_and_path
-
-from ray.train import Checkpoint
from ray.train.constants import TRAIN_DATASET_KEY
from ray.util import PublicAPI
from ray.util.annotations import DeveloperAPI
-from ray._private.dict import merge_dicts
-
if TYPE_CHECKING:
from ray.data import Dataset
from ray.data.preprocessor import Preprocessor
-
from ray.tune import Trainable
_TRAINER_PKL = "trainer.pkl"
@@ -578,8 +575,8 @@ def fit(self) -> Result:
TrainingFailedError: If any failures during the execution of
``self.as_trainable()``, or during the Tune execution loop.
"""
- from ray.tune.tuner import Tuner, TunerInternal
from ray.tune import TuneError
+ from ray.tune.tuner import Tuner, TunerInternal
trainable = self.as_trainable()
param_space = self._extract_fields_for_tuner_param_space()
diff --git a/python/ray/train/constants.py b/python/ray/train/constants.py
index 80b68271f564..e333055a6999 100644
--- a/python/ray/train/constants.py
+++ b/python/ray/train/constants.py
@@ -2,11 +2,11 @@
from pathlib import Path
from ray.air.constants import ( # noqa: F401
+ COPY_DIRECTORY_CHECKPOINTS_INSTEAD_OF_MOVING_ENV,
EVALUATION_DATASET_KEY,
MODEL_KEY,
PREPROCESSOR_KEY,
TRAIN_DATASET_KEY,
- COPY_DIRECTORY_CHECKPOINTS_INSTEAD_OF_MOVING_ENV,
)
diff --git a/python/ray/train/context.py b/python/ray/train/context.py
index 183711b1c0fb..c39da0dd7c2b 100644
--- a/python/ray/train/context.py
+++ b/python/ray/train/context.py
@@ -1,11 +1,10 @@
import threading
-from typing import TYPE_CHECKING, Optional, Dict, Any
+from typing import TYPE_CHECKING, Any, Dict, Optional
from ray.train._internal import session
from ray.train._internal.storage import StorageContext
from ray.util.annotations import DeveloperAPI, PublicAPI
-
if TYPE_CHECKING:
from ray.tune.execution.placement_groups import PlacementGroupFactory
diff --git a/python/ray/train/data_parallel_trainer.py b/python/ray/train/data_parallel_trainer.py
index 1ee45d466de9..52f673630e3e 100644
--- a/python/ray/train/data_parallel_trainer.py
+++ b/python/ray/train/data_parallel_trainer.py
@@ -1,15 +1,15 @@
import inspect
import logging
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Type, Union
-from ray._private.thirdparty.tabulate.tabulate import tabulate
import ray
+from ray._private.thirdparty.tabulate.tabulate import tabulate
from ray.air.config import RunConfig, ScalingConfig
from ray.train import BackendConfig, Checkpoint, TrainingIterator
from ray.train._internal import session
-from ray.train._internal.session import _TrainingResult, get_session
from ray.train._internal.backend_executor import BackendExecutor, TrialInfo
from ray.train._internal.data_config import DataConfig
+from ray.train._internal.session import _TrainingResult, get_session
from ray.train._internal.utils import construct_train_func
from ray.train.trainer import BaseTrainer, GenDataset
from ray.util.annotations import DeveloperAPI, PublicAPI
@@ -485,7 +485,7 @@ def _repr_mimebundle_(self, **kwargs):
Returns:
A mimebundle containing an ipywidget repr and a simple text repr.
"""
- from ipywidgets import HTML, VBox, Tab, Layout
+ from ipywidgets import HTML, Layout, Tab, VBox
title = HTML(f"
{self.__class__.__name__}
")
@@ -557,7 +557,7 @@ def _data_config_repr_html_(self) -> str:
return Template("rendered_html_common.html.j2").render(content=content)
def _datasets_repr_(self) -> str:
- from ipywidgets import HTML, VBox, Layout
+ from ipywidgets import HTML, Layout, VBox
content = []
if self.datasets:
diff --git a/python/ray/train/examples/accelerate/accelerate_torch_trainer.py b/python/ray/train/examples/accelerate/accelerate_torch_trainer.py
index f303e7be1308..1d2e34d14652 100644
--- a/python/ray/train/examples/accelerate/accelerate_torch_trainer.py
+++ b/python/ray/train/examples/accelerate/accelerate_torch_trainer.py
@@ -6,11 +6,12 @@
Fine-tune a BERT model with Hugging Face Accelerate and Ray Train and Ray Data
"""
+from tempfile import TemporaryDirectory
+
import evaluate
import torch
from accelerate import Accelerator
from datasets import load_dataset
-from tempfile import TemporaryDirectory
from torch.optim import AdamW
from transformers import (
AutoModelForSequenceClassification,
@@ -21,7 +22,7 @@
import ray
import ray.train
-from ray.train import DataConfig, ScalingConfig, Checkpoint
+from ray.train import Checkpoint, DataConfig, ScalingConfig
from ray.train.torch import TorchTrainer
diff --git a/python/ray/train/examples/accelerate/accelerate_torch_trainer_no_raydata.py b/python/ray/train/examples/accelerate/accelerate_torch_trainer_no_raydata.py
index c992cbaebc3f..e364e04c458a 100644
--- a/python/ray/train/examples/accelerate/accelerate_torch_trainer_no_raydata.py
+++ b/python/ray/train/examples/accelerate/accelerate_torch_trainer_no_raydata.py
@@ -6,11 +6,12 @@
Fine-tune a BERT model with Hugging Face Accelerate and Ray Train
"""
+from tempfile import TemporaryDirectory
+
import evaluate
import torch
from accelerate import Accelerator
from datasets import load_dataset
-from tempfile import TemporaryDirectory
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import (
@@ -21,7 +22,7 @@
)
import ray.train
-from ray.train import ScalingConfig, Checkpoint
+from ray.train import Checkpoint, ScalingConfig
from ray.train.torch import TorchTrainer
diff --git a/python/ray/train/examples/deepspeed/deepspeed_torch_trainer.py b/python/ray/train/examples/deepspeed/deepspeed_torch_trainer.py
index f3c2eae2dd83..c72e11994f5c 100644
--- a/python/ray/train/examples/deepspeed/deepspeed_torch_trainer.py
+++ b/python/ray/train/examples/deepspeed/deepspeed_torch_trainer.py
@@ -6,18 +6,14 @@
Fine-tune a BERT model with DeepSpeed ZeRO-3 and Ray Train and Ray Data
"""
+from tempfile import TemporaryDirectory
+
import deepspeed
import torch
-
from datasets import load_dataset
from deepspeed.accelerator import get_accelerator
-from tempfile import TemporaryDirectory
from torchmetrics.classification import BinaryAccuracy, BinaryF1Score
-from transformers import (
- AutoModelForSequenceClassification,
- AutoTokenizer,
- set_seed,
-)
+from transformers import AutoModelForSequenceClassification, AutoTokenizer, set_seed
import ray
import ray.train
diff --git a/python/ray/train/examples/deepspeed/deepspeed_torch_trainer_no_raydata.py b/python/ray/train/examples/deepspeed/deepspeed_torch_trainer_no_raydata.py
index eb19ac686705..330fe16da2a6 100644
--- a/python/ray/train/examples/deepspeed/deepspeed_torch_trainer_no_raydata.py
+++ b/python/ray/train/examples/deepspeed/deepspeed_torch_trainer_no_raydata.py
@@ -6,24 +6,19 @@
Fine-tune a BERT model with DeepSpeed ZeRO-3 and Ray Train
"""
+from tempfile import TemporaryDirectory
+
import deepspeed
import torch
-
from datasets import load_dataset
from deepspeed.accelerator import get_accelerator
-from tempfile import TemporaryDirectory
from torch.utils.data import DataLoader
from torchmetrics.classification import BinaryAccuracy, BinaryF1Score
-from transformers import (
- AutoModelForSequenceClassification,
- AutoTokenizer,
- set_seed,
-)
+from transformers import AutoModelForSequenceClassification, AutoTokenizer, set_seed
import ray
import ray.train
-
-from ray.train import ScalingConfig, Checkpoint
+from ray.train import Checkpoint, ScalingConfig
from ray.train.torch import TorchTrainer
diff --git a/python/ray/train/examples/experiment_tracking/lightning_exp_tracking_comet.py b/python/ray/train/examples/experiment_tracking/lightning_exp_tracking_comet.py
index c1bd34a15670..c1f2edf067b4 100644
--- a/python/ray/train/examples/experiment_tracking/lightning_exp_tracking_comet.py
+++ b/python/ray/train/examples/experiment_tracking/lightning_exp_tracking_comet.py
@@ -1,3 +1,4 @@
+# isort: skip_file
from lightning_exp_tracking_model_dl import DummyModel, dataloader
# __lightning_experiment_tracking_comet_start__
diff --git a/python/ray/train/examples/experiment_tracking/lightning_exp_tracking_model_dl.py b/python/ray/train/examples/experiment_tracking/lightning_exp_tracking_model_dl.py
index 340cf5bb56db..f09ec351fd44 100644
--- a/python/ray/train/examples/experiment_tracking/lightning_exp_tracking_model_dl.py
+++ b/python/ray/train/examples/experiment_tracking/lightning_exp_tracking_model_dl.py
@@ -1,11 +1,13 @@
# flake8: noqa
# fmt: off
+# # isort: skip_file
+
# __model_dl_start__
+import pytorch_lightning as pl
import torch
import torch.nn.functional as F
-import pytorch_lightning as pl
-from torch.utils.data import TensorDataset, DataLoader
+from torch.utils.data import DataLoader, TensorDataset
# Create dummy data
X = torch.randn(128, 3) # 128 samples, 3 features
diff --git a/python/ray/train/examples/experiment_tracking/lightning_exp_tracking_wandb.py b/python/ray/train/examples/experiment_tracking/lightning_exp_tracking_wandb.py
index 3fef95c4680e..07f1277d176a 100644
--- a/python/ray/train/examples/experiment_tracking/lightning_exp_tracking_wandb.py
+++ b/python/ray/train/examples/experiment_tracking/lightning_exp_tracking_wandb.py
@@ -1,16 +1,17 @@
# flake8: noqa
# fmt: off
+# # isort: skip_file
from lightning_exp_tracking_model_dl import DummyModel, dataloader
# __lightning_experiment_tracking_wandb_start__
import os
import pytorch_lightning as pl
+import wandb
from pytorch_lightning.loggers.wandb import WandbLogger
import ray
from ray.train import ScalingConfig
from ray.train.torch import TorchTrainer
-import wandb
def train_func(config):
diff --git a/python/ray/train/examples/experiment_tracking/torch_exp_tracking_wandb.py b/python/ray/train/examples/experiment_tracking/torch_exp_tracking_wandb.py
index 338717822bbe..a64b023fdcce 100644
--- a/python/ray/train/examples/experiment_tracking/torch_exp_tracking_wandb.py
+++ b/python/ray/train/examples/experiment_tracking/torch_exp_tracking_wandb.py
@@ -4,14 +4,16 @@
# __start__
# Run the following script with the WANDB_API_KEY env var set.
import os
-import ray
-from ray.train import ScalingConfig
-from ray.train.torch import TorchTrainer
+
import torch
+import wandb
+from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from torchvision.models import resnet18
-from torch.utils.data import DataLoader
-import wandb
+
+import ray
+from ray.train import ScalingConfig
+from ray.train.torch import TorchTrainer
assert os.environ.get("WANDB_API_KEY", None), "Please set WANDB_API_KEY env var."
diff --git a/python/ray/train/examples/horovod/horovod_cifar_pbt_example.py b/python/ray/train/examples/horovod/horovod_cifar_pbt_example.py
index c2806ffce994..ea6ffe36fc31 100755
--- a/python/ray/train/examples/horovod/horovod_cifar_pbt_example.py
+++ b/python/ray/train/examples/horovod/horovod_cifar_pbt_example.py
@@ -4,13 +4,15 @@
import numpy as np
import torch
import torch.nn as nn
-from torch.utils.data import DataLoader
import torchvision
import torchvision.transforms as transforms
+from torch.utils.data import DataLoader
from torchvision.models import resnet18
import ray
import ray.cloudpickle as cpickle
+import ray.train.torch
+from ray import train, tune
from ray.train import (
Checkpoint,
CheckpointConfig,
@@ -18,9 +20,7 @@
RunConfig,
ScalingConfig,
)
-import ray.train.torch
from ray.train.horovod import HorovodTrainer
-from ray import train, tune
from ray.tune.schedulers import create_scheduler
from ray.tune.tune_config import TuneConfig
from ray.tune.tuner import Tuner
diff --git a/python/ray/train/examples/horovod/horovod_pytorch_example.py b/python/ray/train/examples/horovod/horovod_pytorch_example.py
index 29cf5edce3f5..d20ed51e9ef7 100644
--- a/python/ray/train/examples/horovod/horovod_pytorch_example.py
+++ b/python/ray/train/examples/horovod/horovod_pytorch_example.py
@@ -1,18 +1,19 @@
import argparse
-from filelock import FileLock
-import horovod.torch as hvd
import os
+import tempfile
+
+import horovod.torch as hvd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data.distributed
+from filelock import FileLock
from torchvision import datasets, transforms
-import tempfile
+import ray.train.torch
from ray import train
from ray.train import Checkpoint, ScalingConfig
from ray.train.horovod import HorovodTrainer
-import ray.train.torch
def metric_average(val, name):
diff --git a/python/ray/train/examples/horovod/horovod_tune_example.py b/python/ray/train/examples/horovod/horovod_tune_example.py
index 05cb5b220414..9433d50635ad 100644
--- a/python/ray/train/examples/horovod/horovod_tune_example.py
+++ b/python/ray/train/examples/horovod/horovod_tune_example.py
@@ -1,12 +1,13 @@
-import numpy as np
import time
+
+import numpy as np
import torch
import ray
-from ray import train, tune
import ray.train.torch
-from ray.train.horovod import HorovodTrainer
+from ray import train, tune
from ray.train import ScalingConfig
+from ray.train.horovod import HorovodTrainer
from ray.tune.tune_config import TuneConfig
from ray.tune.tuner import Tuner
@@ -48,8 +49,8 @@ def forward(self, x):
def train_loop_per_worker(config):
- import torch
import horovod.torch as hvd
+ import torch
hvd.init()
device = ray.train.torch.get_device()
diff --git a/python/ray/train/examples/huggingface/huggingface_basic_language_modeling_example.py b/python/ray/train/examples/huggingface/huggingface_basic_language_modeling_example.py
index 8497183812a3..b213238a4027 100644
--- a/python/ray/train/examples/huggingface/huggingface_basic_language_modeling_example.py
+++ b/python/ray/train/examples/huggingface/huggingface_basic_language_modeling_example.py
@@ -18,8 +18,8 @@
import ray
import ray.data
-from ray.train.huggingface import TransformersTrainer
from ray.train import ScalingConfig
+from ray.train.huggingface import TransformersTrainer
def main(
diff --git a/python/ray/train/examples/mlflow_simple_example.py b/python/ray/train/examples/mlflow_simple_example.py
index 6c748f41a2c5..5e1a49f83bb2 100644
--- a/python/ray/train/examples/mlflow_simple_example.py
+++ b/python/ray/train/examples/mlflow_simple_example.py
@@ -1,9 +1,10 @@
from pathlib import Path
+
from ray import train
-from ray.train import ScalingConfig, RunConfig
+from ray.train import RunConfig, ScalingConfig
from ray.train.torch import TorchTrainer
-from ray.tune.logger.mlflow import MLflowLoggerCallback
from ray.tune.logger import TBXLoggerCallback
+from ray.tune.logger.mlflow import MLflowLoggerCallback
def train_func():
diff --git a/python/ray/train/examples/mosaic_cifar10_example.py b/python/ray/train/examples/mosaic_cifar10_example.py
index a8ef5bf081a7..cbb6e5bf6a44 100644
--- a/python/ray/train/examples/mosaic_cifar10_example.py
+++ b/python/ray/train/examples/mosaic_cifar10_example.py
@@ -1,11 +1,11 @@
import argparse
-from filelock import FileLock
import os
import torch
import torch.utils.data
import torchvision
-from torchvision import transforms, datasets
+from filelock import FileLock
+from torchvision import datasets, transforms
import ray
from ray import train
@@ -13,9 +13,9 @@
def trainer_init_per_worker(config):
+ import composer.optim
from composer.core.evaluator import Evaluator
from composer.models.tasks import ComposerClassifier
- import composer.optim
from torchmetrics.classification.accuracy import Accuracy
BATCH_SIZE = 64
@@ -80,6 +80,7 @@ def trainer_init_per_worker(config):
def train_mosaic_cifar10(num_workers=2, use_gpu=False, max_duration="5ep"):
from composer.algorithms import LabelSmoothing
+
from ray.train.mosaic import MosaicTrainer
trainer_init_config = {
diff --git a/python/ray/train/examples/pytorch/torch_data_prefetch_benchmark/auto_pipeline_for_host_to_device_data_transfer.py b/python/ray/train/examples/pytorch/torch_data_prefetch_benchmark/auto_pipeline_for_host_to_device_data_transfer.py
index ba56b4c4d666..2d759723861b 100644
--- a/python/ray/train/examples/pytorch/torch_data_prefetch_benchmark/auto_pipeline_for_host_to_device_data_transfer.py
+++ b/python/ray/train/examples/pytorch/torch_data_prefetch_benchmark/auto_pipeline_for_host_to_device_data_transfer.py
@@ -7,8 +7,8 @@
import torch.nn as nn
import ray.train as train
-from ray.train.torch import TorchTrainer
from ray.train import ScalingConfig
+from ray.train.torch import TorchTrainer
class Net(nn.Module):
diff --git a/python/ray/train/examples/pytorch/torch_fashion_mnist_example.py b/python/ray/train/examples/pytorch/torch_fashion_mnist_example.py
index d5aba832806f..00f2f273d1bb 100644
--- a/python/ray/train/examples/pytorch/torch_fashion_mnist_example.py
+++ b/python/ray/train/examples/pytorch/torch_fashion_mnist_example.py
@@ -1,12 +1,12 @@
import os
-from filelock import FileLock
from typing import Dict
import torch
+from filelock import FileLock
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
-from torchvision.transforms import ToTensor, Normalize
+from torchvision.transforms import Normalize, ToTensor
from tqdm import tqdm
import ray.train
diff --git a/python/ray/train/examples/pytorch/torch_linear_example.py b/python/ray/train/examples/pytorch/torch_linear_example.py
index 50a3fd514ec4..551b509ca65f 100644
--- a/python/ray/train/examples/pytorch/torch_linear_example.py
+++ b/python/ray/train/examples/pytorch/torch_linear_example.py
@@ -5,6 +5,7 @@
import numpy as np
import torch
import torch.nn as nn
+
import ray.train as train
from ray.train import Checkpoint, RunConfig, ScalingConfig
from ray.train.torch import TorchTrainer
diff --git a/python/ray/train/examples/pytorch/torch_regression_example.py b/python/ray/train/examples/pytorch/torch_regression_example.py
index 4b9acfe3b039..8bd54fbcb7ab 100644
--- a/python/ray/train/examples/pytorch/torch_regression_example.py
+++ b/python/ray/train/examples/pytorch/torch_regression_example.py
@@ -4,14 +4,13 @@
from typing import Tuple
import pandas as pd
-
import torch
import torch.nn as nn
import ray
import ray.train as train
-from ray.train import Checkpoint, ScalingConfig, DataConfig
from ray.data import Dataset
+from ray.train import Checkpoint, DataConfig, ScalingConfig
from ray.train.torch import TorchTrainer
diff --git a/python/ray/train/examples/pytorch/tune_torch_regression_example.py b/python/ray/train/examples/pytorch/tune_torch_regression_example.py
index b195ef2eb09d..f1a80398820e 100644
--- a/python/ray/train/examples/pytorch/tune_torch_regression_example.py
+++ b/python/ray/train/examples/pytorch/tune_torch_regression_example.py
@@ -1,14 +1,14 @@
import argparse
+from torch_regression_example import get_datasets, train_func
+
import ray
from ray import tune
+from ray.train import DataConfig, ScalingConfig
from ray.train.torch import TorchTrainer
-from ray.train import ScalingConfig, DataConfig
from ray.tune.tune_config import TuneConfig
from ray.tune.tuner import Tuner
-from torch_regression_example import train_func, get_datasets
-
def tune_linear(num_workers, num_samples, use_gpu):
train_dataset, val_dataset = get_datasets()
diff --git a/python/ray/train/examples/pytorch_geometric/distributed_sage_example.py b/python/ray/train/examples/pytorch_geometric/distributed_sage_example.py
index 06f0de80ff32..a1cc1d49c532 100644
--- a/python/ray/train/examples/pytorch_geometric/distributed_sage_example.py
+++ b/python/ray/train/examples/pytorch_geometric/distributed_sage_example.py
@@ -1,21 +1,20 @@
# Adapted from https://github.com/pyg-team/pytorch_geometric/blob/2.1.0
# /examples/multi_gpu/distributed_sampling.py
-import os
import argparse
-from filelock import FileLock
+import os
import torch
import torch.nn.functional as F
-
-from torch_geometric.datasets import Reddit, FakeDataset
+from filelock import FileLock
+from torch_geometric.datasets import FakeDataset, Reddit
from torch_geometric.loader import NeighborSampler
from torch_geometric.nn import SAGEConv
+from torch_geometric.transforms import RandomNodeSplit
from ray import train
from ray.train import ScalingConfig
from ray.train.torch import TorchTrainer
-from torch_geometric.transforms import RandomNodeSplit
class SAGE(torch.nn.Module):
diff --git a/python/ray/train/examples/tf/tensorflow_autoencoder_example.py b/python/ray/train/examples/tf/tensorflow_autoencoder_example.py
index b4dcd5afc974..e77bd9f0da23 100644
--- a/python/ray/train/examples/tf/tensorflow_autoencoder_example.py
+++ b/python/ray/train/examples/tf/tensorflow_autoencoder_example.py
@@ -3,20 +3,19 @@
# https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras
# https://blog.keras.io/building-autoencoders-in-keras.html
import argparse
+
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_datasets as tfds
-from ray import train
-from ray.data.datasource import SimpleTensorFlowDatasource
-from ray.train import Result
-from ray.train.tensorflow import TensorflowTrainer
-from ray.train.tensorflow import prepare_dataset_shard
-from ray.air.integrations.keras import ReportCheckpointCallback
import ray
-
+from ray import train
+from ray.air.integrations.keras import ReportCheckpointCallback
+from ray.data.datasource import SimpleTensorFlowDatasource
from ray.data.extensions import TensorArray
+from ray.train import Result
+from ray.train.tensorflow import TensorflowTrainer, prepare_dataset_shard
def get_dataset(split_type="train"):
diff --git a/python/ray/train/examples/tf/tensorflow_mnist_example.py b/python/ray/train/examples/tf/tensorflow_mnist_example.py
index d69e2e2d8d5f..3fd5d7c759df 100644
--- a/python/ray/train/examples/tf/tensorflow_mnist_example.py
+++ b/python/ray/train/examples/tf/tensorflow_mnist_example.py
@@ -2,16 +2,16 @@
# Original code:
# https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras
import argparse
-from filelock import FileLock
import json
import os
import numpy as np
import tensorflow as tf
+from filelock import FileLock
-from ray.train.tensorflow import TensorflowTrainer
from ray.air.integrations.keras import ReportCheckpointCallback
from ray.train import Result, RunConfig, ScalingConfig
+from ray.train.tensorflow import TensorflowTrainer
def mnist_dataset(batch_size: int) -> tf.data.Dataset:
diff --git a/python/ray/train/examples/tf/tensorflow_regression_example.py b/python/ray/train/examples/tf/tensorflow_regression_example.py
index 307f4dfdd135..8ada83596f26 100644
--- a/python/ray/train/examples/tf/tensorflow_regression_example.py
+++ b/python/ray/train/examples/tf/tensorflow_regression_example.py
@@ -4,9 +4,9 @@
import ray
from ray import train
-from ray.train import Result, ScalingConfig
from ray.air.integrations.keras import ReportCheckpointCallback
from ray.data.preprocessors import Concatenator
+from ray.train import Result, ScalingConfig
from ray.train.tensorflow import TensorflowTrainer
diff --git a/python/ray/train/examples/tf/tune_tensorflow_autoencoder_example.py b/python/ray/train/examples/tf/tune_tensorflow_autoencoder_example.py
index bfcdd63bdeba..f2d4c0c3ec89 100644
--- a/python/ray/train/examples/tf/tune_tensorflow_autoencoder_example.py
+++ b/python/ray/train/examples/tf/tune_tensorflow_autoencoder_example.py
@@ -2,9 +2,8 @@
import ray
from ray import tune
-from ray.train.tensorflow import TensorflowTrainer
-
from ray.train.examples.tf.tensorflow_mnist_example import train_func
+from ray.train.tensorflow import TensorflowTrainer
from ray.tune.tune_config import TuneConfig
from ray.tune.tuner import Tuner
diff --git a/python/ray/train/examples/tf/tune_tensorflow_mnist_example.py b/python/ray/train/examples/tf/tune_tensorflow_mnist_example.py
index c61bb7aeeee2..a20db160533c 100644
--- a/python/ray/train/examples/tf/tune_tensorflow_mnist_example.py
+++ b/python/ray/train/examples/tf/tune_tensorflow_mnist_example.py
@@ -2,10 +2,9 @@
import ray
from ray import tune
-from ray.train.tensorflow import TensorflowTrainer
from ray.train import ScalingConfig
-
from ray.train.examples.tf.tensorflow_mnist_example import train_func
+from ray.train.tensorflow import TensorflowTrainer
from ray.tune.tune_config import TuneConfig
from ray.tune.tuner import Tuner
diff --git a/python/ray/train/examples/transformers/transformers_torch_trainer_basic.py b/python/ray/train/examples/transformers/transformers_torch_trainer_basic.py
index 630177424f28..1c3159164853 100644
--- a/python/ray/train/examples/transformers/transformers_torch_trainer_basic.py
+++ b/python/ray/train/examples/transformers/transformers_torch_trainer_basic.py
@@ -1,16 +1,17 @@
-# Minimal Example adapted from https://huggingface.co/docs/transformers/training
-from datasets import load_dataset
-from transformers import AutoTokenizer
-from transformers import AutoModelForSequenceClassification
-from transformers import TrainingArguments, Trainer
-import numpy as np
import evaluate
+import numpy as np
-from ray.train.huggingface.transformers import (
- prepare_trainer,
- RayTrainReportCallback,
+# Minimal Example adapted from https://huggingface.co/docs/transformers/training
+from datasets import load_dataset
+from transformers import (
+ AutoModelForSequenceClassification,
+ AutoTokenizer,
+ Trainer,
+ TrainingArguments,
)
+
from ray.train import ScalingConfig
+from ray.train.huggingface.transformers import RayTrainReportCallback, prepare_trainer
from ray.train.torch import TorchTrainer
diff --git a/python/ray/train/gbdt_trainer.py b/python/ray/train/gbdt_trainer.py
index 22bbd0671582..60bad0c25839 100644
--- a/python/ray/train/gbdt_trainer.py
+++ b/python/ray/train/gbdt_trainer.py
@@ -1,18 +1,18 @@
-import os
import logging
+import os
import tempfile
import warnings
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Dict, Optional, Type
from ray import train, tune
+from ray._private.dict import flatten_dict
from ray.train import Checkpoint, RunConfig, ScalingConfig
from ray.train.constants import MODEL_KEY, TRAIN_DATASET_KEY
from ray.train.trainer import BaseTrainer, GenDataset
from ray.tune import Trainable
from ray.tune.execution.placement_groups import PlacementGroupFactory
from ray.util.annotations import DeveloperAPI
-from ray._private.dict import flatten_dict
if TYPE_CHECKING:
import xgboost_ray
diff --git a/python/ray/train/horovod/__init__.py b/python/ray/train/horovod/__init__.py
index 476da7f2d0b4..60342b1d0fe5 100644
--- a/python/ray/train/horovod/__init__.py
+++ b/python/ray/train/horovod/__init__.py
@@ -1,3 +1,4 @@
+# isort: off
try:
import horovod # noqa: F401
except ModuleNotFoundError:
@@ -6,8 +7,9 @@
"install 'horovod[pytorch]''. To install Horovod with TensorFlow support, "
"run 'pip install 'horovod[tensorflow]''."
)
+# isort: on
-from ray.train.horovod.horovod_trainer import HorovodTrainer
from ray.train.horovod.config import HorovodConfig
+from ray.train.horovod.horovod_trainer import HorovodTrainer
__all__ = ["HorovodConfig", "HorovodTrainer"]
diff --git a/python/ray/train/horovod/config.py b/python/ray/train/horovod/config.py
index 215d471527f1..acd56091d3a4 100644
--- a/python/ray/train/horovod/config.py
+++ b/python/ray/train/horovod/config.py
@@ -1,17 +1,15 @@
-from typing import Optional, Set
-
import os
from dataclasses import dataclass
-
-import ray
-from ray.train.backend import BackendConfig, Backend
-from ray.train._internal.utils import update_env_vars
-from ray.train._internal.worker_group import WorkerGroup, Worker
+from typing import Optional, Set
from horovod.ray.runner import Coordinator
from horovod.ray.utils import detect_nics, nics_to_env_var
from horovod.runner.common.util import secret, timeout
+import ray
+from ray.train._internal.utils import update_env_vars
+from ray.train._internal.worker_group import Worker, WorkerGroup
+from ray.train.backend import Backend, BackendConfig
from ray.util import PublicAPI
diff --git a/python/ray/train/horovod/horovod_trainer.py b/python/ray/train/horovod/horovod_trainer.py
index 0bc11cef0102..0a72008e6997 100644
--- a/python/ray/train/horovod/horovod_trainer.py
+++ b/python/ray/train/horovod/horovod_trainer.py
@@ -1,12 +1,10 @@
-from typing import Any, Dict, Callable, Optional, Union, TYPE_CHECKING
+from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union
-from ray.air.config import ScalingConfig, RunConfig
+from ray.air.config import RunConfig, ScalingConfig
from ray.train import Checkpoint, DataConfig
-from ray.train.trainer import GenDataset
-
-
from ray.train.data_parallel_trainer import DataParallelTrainer
from ray.train.horovod.config import HorovodConfig
+from ray.train.trainer import GenDataset
from ray.util.annotations import PublicAPI
if TYPE_CHECKING:
diff --git a/python/ray/train/huggingface/__init__.py b/python/ray/train/huggingface/__init__.py
index 2b5534fce990..ca55341a2044 100644
--- a/python/ray/train/huggingface/__init__.py
+++ b/python/ray/train/huggingface/__init__.py
@@ -1,15 +1,7 @@
-from ray.train.huggingface.huggingface_checkpoint import (
- HuggingFaceCheckpoint,
-)
-from ray.train.huggingface.huggingface_predictor import (
- HuggingFacePredictor,
-)
-from ray.train.huggingface.huggingface_trainer import (
- HuggingFaceTrainer,
-)
-
from ray.train.huggingface.accelerate import AccelerateTrainer
-
+from ray.train.huggingface.huggingface_checkpoint import HuggingFaceCheckpoint
+from ray.train.huggingface.huggingface_predictor import HuggingFacePredictor
+from ray.train.huggingface.huggingface_trainer import HuggingFaceTrainer
from ray.train.huggingface.transformers import (
TransformersCheckpoint,
TransformersPredictor,
diff --git a/python/ray/train/huggingface/accelerate/_accelerate_utils.py b/python/ray/train/huggingface/accelerate/_accelerate_utils.py
index 497878df2d78..e5ec8c084e88 100644
--- a/python/ray/train/huggingface/accelerate/_accelerate_utils.py
+++ b/python/ray/train/huggingface/accelerate/_accelerate_utils.py
@@ -16,21 +16,15 @@
import logging
import os
-from argparse import Namespace
-from typing import Optional, Tuple, Union
import tempfile
-from pathlib import Path
-from packaging.version import Version
-
+from argparse import Namespace
from contextlib import nullcontext
+from pathlib import Path
+from typing import Optional, Tuple, Union
import accelerate
-
-if Version(accelerate.__version__) < Version("0.17.0.dev0"):
- raise ImportError(
- f"AccelerateTrainer requires accelerate>=0.17.0, got {accelerate.__version__}"
- )
-
+from accelerate.commands.config import default_config_file, load_config_from_file
+from accelerate.commands.config.config_args import ClusterConfig
from accelerate.commands.launch import (
ComputeEnvironment,
_validate_launch_command,
@@ -40,8 +34,13 @@
prepare_simple_launcher_cmd_env,
)
from accelerate.utils import is_deepspeed_available
-from accelerate.commands.config import default_config_file, load_config_from_file
-from accelerate.commands.config.config_args import ClusterConfig
+from packaging.version import Version
+
+if Version(accelerate.__version__) < Version("0.17.0.dev0"):
+ raise ImportError(
+ f"AccelerateTrainer requires accelerate>=0.17.0, got {accelerate.__version__}"
+ )
+
logger = logging.getLogger(__name__)
diff --git a/python/ray/train/huggingface/accelerate/accelerate_trainer.py b/python/ray/train/huggingface/accelerate/accelerate_trainer.py
index 06d15dc331b8..708581efc4b4 100644
--- a/python/ray/train/huggingface/accelerate/accelerate_trainer.py
+++ b/python/ray/train/huggingface/accelerate/accelerate_trainer.py
@@ -2,25 +2,22 @@
import os
import tempfile
from pathlib import Path
-from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Type, Tuple, Union
+from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Type, Union
from ray import train
-from ray.util.annotations import Deprecated
-from ray.train import Checkpoint, RunConfig, ScalingConfig
-from ray.train import DataConfig
-from ray.train.torch import TorchConfig
-from ray.train.trainer import GenDataset
-
-from ray.train.torch import TorchTrainer, get_device
+from ray.train import Checkpoint, DataConfig, RunConfig, ScalingConfig
+from ray.train.torch import TorchConfig, TorchTrainer, get_device
from ray.train.torch.config import _set_torch_distributed_env_vars
+from ray.train.trainer import GenDataset
+from ray.util.annotations import Deprecated
ACCELERATE_IMPORT_ERROR: Optional[ImportError] = None
try:
from ray.train.huggingface.accelerate._accelerate_utils import (
- launch_command,
- AccelerateDefaultNamespace,
AccelerateConfigWrapper,
+ AccelerateDefaultNamespace,
+ launch_command,
load_accelerate_config,
)
except ImportError as e:
diff --git a/python/ray/train/huggingface/huggingface_checkpoint.py b/python/ray/train/huggingface/huggingface_checkpoint.py
index 5b8d85303eea..a4f878c13c84 100644
--- a/python/ray/train/huggingface/huggingface_checkpoint.py
+++ b/python/ray/train/huggingface/huggingface_checkpoint.py
@@ -1,6 +1,5 @@
-from ray.util.annotations import Deprecated
-
from ._deprecation_msg import deprecation_msg
+from ray.util.annotations import Deprecated
# TODO(ml-team): [code_removal]
diff --git a/python/ray/train/huggingface/huggingface_predictor.py b/python/ray/train/huggingface/huggingface_predictor.py
index 3f259d02a423..eb8ffd7714ea 100644
--- a/python/ray/train/huggingface/huggingface_predictor.py
+++ b/python/ray/train/huggingface/huggingface_predictor.py
@@ -1,11 +1,10 @@
import warnings
-from ray.util.annotations import Deprecated
+from ._deprecation_msg import deprecation_msg
from ray.train.huggingface.transformers.transformers_predictor import (
TransformersPredictor,
)
-
-from ._deprecation_msg import deprecation_msg
+from ray.util.annotations import Deprecated
@Deprecated(message=deprecation_msg)
diff --git a/python/ray/train/huggingface/huggingface_trainer.py b/python/ray/train/huggingface/huggingface_trainer.py
index 2375598e5b2f..b1c96cb19db7 100644
--- a/python/ray/train/huggingface/huggingface_trainer.py
+++ b/python/ray/train/huggingface/huggingface_trainer.py
@@ -1,11 +1,8 @@
import warnings
-from ray.util.annotations import Deprecated
-
-from ray.train.huggingface.transformers.transformers_trainer import (
- TransformersTrainer,
-)
from ._deprecation_msg import deprecation_msg
+from ray.train.huggingface.transformers.transformers_trainer import TransformersTrainer
+from ray.util.annotations import Deprecated
@Deprecated(message=deprecation_msg)
diff --git a/python/ray/train/huggingface/transformers/__init__.py b/python/ray/train/huggingface/transformers/__init__.py
index e6337099d387..39a8bff93795 100644
--- a/python/ray/train/huggingface/transformers/__init__.py
+++ b/python/ray/train/huggingface/transformers/__init__.py
@@ -1,16 +1,14 @@
+from ray.train.huggingface.transformers._transformers_utils import (
+ RayTrainReportCallback,
+ prepare_trainer,
+)
from ray.train.huggingface.transformers.transformers_checkpoint import (
TransformersCheckpoint,
)
from ray.train.huggingface.transformers.transformers_predictor import (
TransformersPredictor,
)
-from ray.train.huggingface.transformers.transformers_trainer import (
- TransformersTrainer,
-)
-from ray.train.huggingface.transformers._transformers_utils import (
- prepare_trainer,
- RayTrainReportCallback,
-)
+from ray.train.huggingface.transformers.transformers_trainer import TransformersTrainer
__all__ = [
"TransformersCheckpoint",
diff --git a/python/ray/train/huggingface/transformers/_transformers_utils.py b/python/ray/train/huggingface/transformers/_transformers_utils.py
index 7b64d7a122f0..953b64f58d87 100644
--- a/python/ray/train/huggingface/transformers/_transformers_utils.py
+++ b/python/ray/train/huggingface/transformers/_transformers_utils.py
@@ -1,24 +1,24 @@
-import os
import logging
+import os
import shutil
from pathlib import Path
-from typing import Any, Iterator, Optional, Tuple, Type
from tempfile import TemporaryDirectory
+from typing import Any, Iterator, Optional, Tuple, Type
from torch.utils.data import DataLoader, Dataset, IterableDataset
import ray
from ray import train
+from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag
from ray.data import DataIterator
-from ray.data.iterator import _IterableFromIterator
-from ray.data.dataset import MaterializedDataset
from ray.data._internal.iterator.stream_split_iterator import StreamSplitDataIterator
+from ray.data.dataset import MaterializedDataset
+from ray.data.iterator import _IterableFromIterator
from ray.train import Checkpoint
from ray.train.huggingface.transformers.transformers_checkpoint import (
TransformersCheckpoint,
)
from ray.util import PublicAPI
-from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag
logger = logging.getLogger(__name__)
diff --git a/python/ray/train/huggingface/transformers/transformers_checkpoint.py b/python/ray/train/huggingface/transformers/transformers_checkpoint.py
index fefe4471e298..391d5b89fde5 100644
--- a/python/ray/train/huggingface/transformers/transformers_checkpoint.py
+++ b/python/ray/train/huggingface/transformers/transformers_checkpoint.py
@@ -1,7 +1,6 @@
import os
import tempfile
-from typing import TYPE_CHECKING, Type, Optional, Union
-
+from typing import TYPE_CHECKING, Optional, Type, Union
import torch
diff --git a/python/ray/train/huggingface/transformers/transformers_predictor.py b/python/ray/train/huggingface/transformers/transformers_predictor.py
index 2f2a0c4a9b75..c5f51bc146ce 100644
--- a/python/ray/train/huggingface/transformers/transformers_predictor.py
+++ b/python/ray/train/huggingface/transformers/transformers_predictor.py
@@ -43,10 +43,11 @@ def tf_get_gpus():
if TYPE_CHECKING:
+ from transformers.modeling_tf_utils import TFPreTrainedModel
+ from transformers.modeling_utils import PreTrainedModel
+
from ray.data.preprocessor import Preprocessor
from ray.train.huggingface import TransformersCheckpoint
- from transformers.modeling_utils import PreTrainedModel
- from transformers.modeling_tf_utils import TFPreTrainedModel
logger = logging.getLogger(__name__)
diff --git a/python/ray/train/huggingface/transformers/transformers_trainer.py b/python/ray/train/huggingface/transformers/transformers_trainer.py
index b0be9c5706d8..8646a93b3d33 100644
--- a/python/ray/train/huggingface/transformers/transformers_trainer.py
+++ b/python/ray/train/huggingface/transformers/transformers_trainer.py
@@ -3,24 +3,20 @@
import os
import sys
import warnings
-from packaging.version import Version
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Type
+from packaging.version import Version
from torch.utils.data import Dataset as TorchDataset
import ray.train
from ray.air.config import RunConfig, ScalingConfig
-from ray.train.constants import (
- EVALUATION_DATASET_KEY,
- TRAIN_DATASET_KEY,
-)
from ray.train import Checkpoint, DataConfig
+from ray.train.constants import EVALUATION_DATASET_KEY, TRAIN_DATASET_KEY
from ray.train.data_parallel_trainer import DataParallelTrainer
from ray.train.torch import TorchConfig, TorchTrainer
from ray.train.trainer import GenDataset
from ray.util.annotations import Deprecated
-
TRANSFORMERS_IMPORT_ERROR: Optional[ImportError] = None
try:
diff --git a/python/ray/train/lightgbm/lightgbm_trainer.py b/python/ray/train/lightgbm/lightgbm_trainer.py
index d04f4eb50e1a..7a32db5e6b23 100644
--- a/python/ray/train/lightgbm/lightgbm_trainer.py
+++ b/python/ray/train/lightgbm/lightgbm_trainer.py
@@ -1,20 +1,20 @@
import os
-from typing import Dict, Any, Union
+from typing import Any, Dict, Union
-try:
- from packaging.version import Version
-except ImportError:
- from distutils.version import LooseVersion as Version
+import lightgbm
+import lightgbm_ray
+import xgboost_ray
+from lightgbm_ray.tune import TuneReportCheckpointCallback
from ray.train import Checkpoint
from ray.train.gbdt_trainer import GBDTTrainer
from ray.train.lightgbm import LightGBMCheckpoint
from ray.util.annotations import PublicAPI
-import lightgbm
-import lightgbm_ray
-import xgboost_ray
-from lightgbm_ray.tune import TuneReportCheckpointCallback
+try:
+ from packaging.version import Version
+except ImportError:
+ from distutils.version import LooseVersion as Version
@PublicAPI(stability="beta")
diff --git a/python/ray/train/lightning/__init__.py b/python/ray/train/lightning/__init__.py
index 9a923aea8aaa..7441f7b35715 100644
--- a/python/ray/train/lightning/__init__.py
+++ b/python/ray/train/lightning/__init__.py
@@ -8,19 +8,19 @@
)
# isort: on
-from ray.train.lightning.lightning_trainer import (
- LightningConfigBuilder,
- LightningTrainer,
-)
-from ray.train.lightning.lightning_checkpoint import LightningCheckpoint
-from ray.train.lightning.lightning_predictor import LightningPredictor
from ray.train.lightning._lightning_utils import (
- prepare_trainer,
RayDDPStrategy,
- RayFSDPStrategy,
RayDeepSpeedStrategy,
+ RayFSDPStrategy,
RayLightningEnvironment,
RayTrainReportCallback,
+ prepare_trainer,
+)
+from ray.train.lightning.lightning_checkpoint import LightningCheckpoint
+from ray.train.lightning.lightning_predictor import LightningPredictor
+from ray.train.lightning.lightning_trainer import (
+ LightningConfigBuilder,
+ LightningTrainer,
)
__all__ = [
diff --git a/python/ray/train/lightning/_lightning_utils.py b/python/ray/train/lightning/_lightning_utils.py
index 1439cc68783c..f2958107320c 100644
--- a/python/ray/train/lightning/_lightning_utils.py
+++ b/python/ray/train/lightning/_lightning_utils.py
@@ -1,25 +1,25 @@
-import os
-import ray
-from ray import train
-from ray.air.constants import MODEL_KEY
-from ray.data.dataset import DataIterator
-from ray.util import PublicAPI
-from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag
-
import logging
+import os
import shutil
-import torch
import tempfile
-from ray.train import Checkpoint
-from ray.train.lightning.lightning_checkpoint import LightningCheckpoint
-from packaging.version import Version
from typing import Any, Dict, Optional
-from torch.utils.data import IterableDataset, DataLoader
import pytorch_lightning as pl
-from pytorch_lightning.callbacks import ModelCheckpoint, Callback
+import torch
+from packaging.version import Version
+from pytorch_lightning.callbacks import Callback, ModelCheckpoint
from pytorch_lightning.plugins.environments import LightningEnvironment
from pytorch_lightning.strategies import DDPStrategy, DeepSpeedStrategy
+from torch.utils.data import DataLoader, IterableDataset
+
+import ray
+from ray import train
+from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag
+from ray.air.constants import MODEL_KEY
+from ray.data.dataset import DataIterator
+from ray.train import Checkpoint
+from ray.train.lightning.lightning_checkpoint import LightningCheckpoint
+from ray.util import PublicAPI
_LIGHTNING_GREATER_EQUAL_2_0 = Version(pl.__version__) >= Version("2.0.0")
_TORCH_GREATER_EQUAL_1_12 = Version(torch.__version__) >= Version("1.12.0")
diff --git a/python/ray/train/lightning/lightning_checkpoint.py b/python/ray/train/lightning/lightning_checkpoint.py
index d0fcdee90ae9..b01205a525ad 100644
--- a/python/ray/train/lightning/lightning_checkpoint.py
+++ b/python/ray/train/lightning/lightning_checkpoint.py
@@ -1,11 +1,11 @@
-import os
import logging
-import pytorch_lightning as pl
-import tempfile
+import os
import shutil
-
+import tempfile
from inspect import isclass
-from typing import Optional, Type, Dict, Any
+from typing import Any, Dict, Optional, Type
+
+import pytorch_lightning as pl
from ray.air.constants import MODEL_KEY
from ray.data import Preprocessor
diff --git a/python/ray/train/lightning/lightning_predictor.py b/python/ray/train/lightning/lightning_predictor.py
index 2674efdb1773..de2b1b07bcbc 100644
--- a/python/ray/train/lightning/lightning_predictor.py
+++ b/python/ray/train/lightning/lightning_predictor.py
@@ -1,11 +1,12 @@
import logging
from typing import Optional, Type
+import pytorch_lightning as pl
+
from ray.data.preprocessor import Preprocessor
from ray.train.lightning.lightning_checkpoint import LightningCheckpoint
from ray.train.torch.torch_predictor import TorchPredictor
from ray.util.annotations import PublicAPI
-import pytorch_lightning as pl
logger = logging.getLogger(__name__)
diff --git a/python/ray/train/lightning/lightning_trainer.py b/python/ray/train/lightning/lightning_trainer.py
index fb1a04a3acb6..4b81f48fb6f4 100644
--- a/python/ray/train/lightning/lightning_trainer.py
+++ b/python/ray/train/lightning/lightning_trainer.py
@@ -1,30 +1,28 @@
+import logging
import os
-import pytorch_lightning as pl
-
from copy import copy
from inspect import isclass
from typing import Any, Dict, Optional, Type
+import pytorch_lightning as pl
+
from ray.air import session
from ray.air.constants import MODEL_KEY
from ray.data.preprocessor import Preprocessor
-from ray.train import Checkpoint, DataConfig, CheckpointConfig, RunConfig, ScalingConfig
-from ray.train.trainer import GenDataset
-from ray.train.torch import TorchTrainer
-from ray.train.torch.config import TorchConfig
-from ray.util.annotations import Deprecated
+from ray.train import Checkpoint, CheckpointConfig, DataConfig, RunConfig, ScalingConfig
from ray.train.lightning._lightning_utils import (
+ RayDataModule,
RayDDPStrategy,
- RayFSDPStrategy,
RayDeepSpeedStrategy,
+ RayFSDPStrategy,
RayLightningEnvironment,
- RayDataModule,
RayModelCheckpoint,
prepare_trainer,
)
-
-
-import logging
+from ray.train.torch import TorchTrainer
+from ray.train.torch.config import TorchConfig
+from ray.train.trainer import GenDataset
+from ray.util.annotations import Deprecated
logger = logging.getLogger(__name__)
diff --git a/python/ray/train/mosaic/_mosaic_utils.py b/python/ray/train/mosaic/_mosaic_utils.py
index ce916ed5bd2f..1bc37c85cb84 100644
--- a/python/ray/train/mosaic/_mosaic_utils.py
+++ b/python/ray/train/mosaic/_mosaic_utils.py
@@ -1,9 +1,9 @@
-from typing import Any, Dict, Optional, List
-import torch
+from typing import Any, Dict, List, Optional
+import torch
+from composer.core.state import State
from composer.loggers import Logger
from composer.loggers.logger_destination import LoggerDestination
-from composer.core.state import State
import ray.train
diff --git a/python/ray/train/mosaic/mosaic_trainer.py b/python/ray/train/mosaic/mosaic_trainer.py
index a0d78b8c654d..f6e37132fc83 100644
--- a/python/ray/train/mosaic/mosaic_trainer.py
+++ b/python/ray/train/mosaic/mosaic_trainer.py
@@ -1,9 +1,9 @@
import inspect
-from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Type
import warnings
+from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Type
-from composer.trainer import Trainer
from composer.loggers.logger_destination import LoggerDestination
+from composer.trainer import Trainer
from ray.train import Checkpoint, DataConfig, RunConfig, ScalingConfig
from ray.train.mosaic._mosaic_utils import RayLogger
diff --git a/python/ray/train/predictor.py b/python/ray/train/predictor.py
index 5b8686c58da2..7e25bdaa8022 100644
--- a/python/ray/train/predictor.py
+++ b/python/ray/train/predictor.py
@@ -1,17 +1,17 @@
import abc
-from typing import Dict, Type, Optional, Union, Callable
+from typing import Callable, Dict, Optional, Type, Union
import numpy as np
import pandas as pd
-from ray.train import Checkpoint
from ray.air.data_batch_type import DataBatchType
from ray.air.util.data_batch_conversion import (
BatchFormat,
- _convert_batch_type_to_pandas,
_convert_batch_type_to_numpy,
+ _convert_batch_type_to_pandas,
)
from ray.data import Preprocessor
+from ray.train import Checkpoint
from ray.util.annotations import DeveloperAPI, PublicAPI
try:
diff --git a/python/ray/train/sklearn/sklearn_checkpoint.py b/python/ray/train/sklearn/sklearn_checkpoint.py
index 8a96b5809758..429af91ab458 100644
--- a/python/ray/train/sklearn/sklearn_checkpoint.py
+++ b/python/ray/train/sklearn/sklearn_checkpoint.py
@@ -3,8 +3,9 @@
from typing import TYPE_CHECKING, Optional, Union
from sklearn.base import BaseEstimator
-from ray.train._internal.framework_checkpoint import FrameworkCheckpoint
+
import ray.cloudpickle as cpickle
+from ray.train._internal.framework_checkpoint import FrameworkCheckpoint
from ray.util.annotations import PublicAPI
if TYPE_CHECKING:
diff --git a/python/ray/train/sklearn/sklearn_predictor.py b/python/ray/train/sklearn/sklearn_predictor.py
index e836eec099da..1fdc263d721f 100644
--- a/python/ray/train/sklearn/sklearn_predictor.py
+++ b/python/ray/train/sklearn/sklearn_predictor.py
@@ -8,10 +8,10 @@
from ray.air.data_batch_type import DataBatchType
from ray.air.util.data_batch_conversion import _unwrap_ndarray_object_type_if_needed
from ray.train.predictor import Predictor
-from ray.train.sklearn._sklearn_utils import _set_cpu_params
from ray.train.sklearn import SklearnCheckpoint
-from ray.util.joblib import register_ray
+from ray.train.sklearn._sklearn_utils import _set_cpu_params
from ray.util.annotations import PublicAPI
+from ray.util.joblib import register_ray
if TYPE_CHECKING:
from ray.data.preprocessor import Preprocessor
diff --git a/python/ray/train/sklearn/sklearn_trainer.py b/python/ray/train/sklearn/sklearn_trainer.py
index 5f872bbc4d69..79921a96944e 100644
--- a/python/ray/train/sklearn/sklearn_trainer.py
+++ b/python/ray/train/sklearn/sklearn_trainer.py
@@ -1,11 +1,11 @@
import logging
import os
+import tempfile
import warnings
from collections import defaultdict
from time import time
-import tempfile
from traceback import format_exc
-from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, Optional, Union, Tuple
+from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, Optional, Tuple, Union
import numpy as np
import pandas as pd
diff --git a/python/ray/train/tensorflow/config.py b/python/ray/train/tensorflow/config.py
index 52a827ce321e..ae3baedb2a6f 100644
--- a/python/ray/train/tensorflow/config.py
+++ b/python/ray/train/tensorflow/config.py
@@ -5,12 +5,11 @@
from typing import List
import ray
-from ray.train.backend import BackendConfig, Backend
from ray.train._internal.utils import get_address_and_port
from ray.train._internal.worker_group import WorkerGroup
+from ray.train.backend import Backend, BackendConfig
from ray.util import PublicAPI
-
logger = logging.getLogger(__name__)
diff --git a/python/ray/train/tensorflow/tensorflow_checkpoint.py b/python/ray/train/tensorflow/tensorflow_checkpoint.py
index 90e786c09b76..de8233ba7878 100644
--- a/python/ray/train/tensorflow/tensorflow_checkpoint.py
+++ b/python/ray/train/tensorflow/tensorflow_checkpoint.py
@@ -1,8 +1,8 @@
import os
import shutil
+import tempfile
from typing import TYPE_CHECKING, Optional
-import tempfile
import tensorflow as tf
from tensorflow import keras
diff --git a/python/ray/train/tensorflow/tensorflow_predictor.py b/python/ray/train/tensorflow/tensorflow_predictor.py
index a9eb2f54cfa6..ab353c333727 100644
--- a/python/ray/train/tensorflow/tensorflow_predictor.py
+++ b/python/ray/train/tensorflow/tensorflow_predictor.py
@@ -4,11 +4,11 @@
import numpy as np
import tensorflow as tf
-from ray.util import log_once
-from ray.train.predictor import DataBatchType
from ray.air._internal.tensorflow_utils import convert_ndarray_batch_to_tf_tensor_batch
from ray.train._internal.dl_predictor import DLPredictor
+from ray.train.predictor import DataBatchType
from ray.train.tensorflow import TensorflowCheckpoint
+from ray.util import log_once
from ray.util.annotations import DeveloperAPI, PublicAPI
if TYPE_CHECKING:
diff --git a/python/ray/train/tensorflow/tensorflow_trainer.py b/python/ray/train/tensorflow/tensorflow_trainer.py
index 6333707e106a..ffcf7ae84b77 100644
--- a/python/ray/train/tensorflow/tensorflow_trainer.py
+++ b/python/ray/train/tensorflow/tensorflow_trainer.py
@@ -1,10 +1,9 @@
-from typing import Any, Callable, Optional, Dict, Union, TYPE_CHECKING
+from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union
-from ray.train import DataConfig
+from ray.train import Checkpoint, DataConfig, RunConfig, ScalingConfig
+from ray.train.data_parallel_trainer import DataParallelTrainer
from ray.train.tensorflow.config import TensorflowConfig
from ray.train.trainer import GenDataset
-from ray.train.data_parallel_trainer import DataParallelTrainer
-from ray.train import Checkpoint, ScalingConfig, RunConfig
from ray.util import PublicAPI
if TYPE_CHECKING:
diff --git a/python/ray/train/tensorflow/train_loop_utils.py b/python/ray/train/tensorflow/train_loop_utils.py
index 088b42a8e8d2..b7862d6a365f 100644
--- a/python/ray/train/tensorflow/train_loop_utils.py
+++ b/python/ray/train/tensorflow/train_loop_utils.py
@@ -1,7 +1,7 @@
-from ray.util.annotations import PublicAPI
-
import tensorflow as tf
+from ray.util.annotations import PublicAPI
+
@PublicAPI(stability="beta")
def prepare_dataset_shard(tf_dataset_shard: tf.data.Dataset):
diff --git a/python/ray/train/tests/conftest.py b/python/ray/train/tests/conftest.py
index 302306f4fc6d..4f4af4fa3501 100644
--- a/python/ray/train/tests/conftest.py
+++ b/python/ray/train/tests/conftest.py
@@ -1,15 +1,15 @@
-# Trigger pytest hook to automatically zip test cluster logs to archive dir on failure
-from ray.tests.conftest import pytest_runtest_makereport # noqa
-from ray.tests.conftest import propagate_logs # noqa
-
-import boto3
import logging
+import boto3
import pytest
import ray
-from ray.cluster_utils import Cluster
from ray._private.test_utils import simulate_storage
+from ray.cluster_utils import Cluster
+
+# Trigger pytest hook to automatically zip test cluster logs to archive dir on failure
+from ray.tests.conftest import propagate_logs # noqa
+from ray.tests.conftest import pytest_runtest_makereport # noqa
@pytest.fixture
diff --git a/python/ray/train/tests/lightning_test_utils.py b/python/ray/train/tests/lightning_test_utils.py
index cd2d6718fe65..fab7c716ec0a 100644
--- a/python/ray/train/tests/lightning_test_utils.py
+++ b/python/ray/train/tests/lightning_test_utils.py
@@ -1,8 +1,7 @@
+import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
-import pytorch_lightning as pl
-
from torch.utils.data import DataLoader
from torchmetrics import Accuracy
diff --git a/python/ray/train/tests/test_accelerate_trainer_gpu.py b/python/ray/train/tests/test_accelerate_trainer_gpu.py
index 1fbefef5a5bb..02ec2d962b70 100644
--- a/python/ray/train/tests/test_accelerate_trainer_gpu.py
+++ b/python/ray/train/tests/test_accelerate_trainer_gpu.py
@@ -1,16 +1,16 @@
import os
+from tempfile import TemporaryDirectory
+
import pytest
import torch
-from tempfile import TemporaryDirectory
+import torch.nn as nn
+from accelerate import Accelerator
import ray
-import torch.nn as nn
-from ray.train.examples.pytorch.torch_linear_example import LinearDataset
-from ray.train import ScalingConfig
import ray.train as train
-from ray.train import Checkpoint
+from ray.train import Checkpoint, ScalingConfig
+from ray.train.examples.pytorch.torch_linear_example import LinearDataset
from ray.train.huggingface import AccelerateTrainer
-from accelerate import Accelerator
ACCELERATE_CONFIG_CPU = """compute_environment: LOCAL_MACHINE
deepspeed_config: {}
diff --git a/python/ray/train/tests/test_backend.py b/python/ray/train/tests/test_backend.py
index eafbccab3469..cdbbff60471a 100644
--- a/python/ray/train/tests/test_backend.py
+++ b/python/ray/train/tests/test_backend.py
@@ -1,10 +1,10 @@
import math
import os
import tempfile
+import time
from unittest.mock import patch
import pytest
-import time
import ray
import ray._private.ray_constants as ray_constants
@@ -13,20 +13,20 @@
# Trigger pytest hook to automatically zip test cluster logs to archive dir on failure
from ray.tests.conftest import pytest_runtest_makereport # noqa
+from ray.train import DataConfig
from ray.train._internal.backend_executor import (
BackendExecutor,
InactiveWorkerGroupError,
TrainBackendError,
TrainingWorkerError,
)
-from ray.train import DataConfig
-from ray.train._internal.worker_group import WorkerGroup, WorkerMetadata
from ray.train._internal.storage import StorageContext
+from ray.train._internal.worker_group import WorkerGroup, WorkerMetadata
from ray.train.backend import Backend, BackendConfig
from ray.train.constants import (
ENABLE_SHARE_CUDA_VISIBLE_DEVICES_ENV,
- TRAIN_ENABLE_WORKER_SPREAD_ENV,
ENABLE_SHARE_NEURON_CORES_ACCELERATOR_ENV,
+ TRAIN_ENABLE_WORKER_SPREAD_ENV,
)
from ray.train.tensorflow import TensorflowConfig
from ray.train.torch import TorchConfig
diff --git a/python/ray/train/tests/test_base_trainer.py b/python/ray/train/tests/test_base_trainer.py
index ff4a35922b4d..0b2bb0dc0950 100644
--- a/python/ray/train/tests/test_base_trainer.py
+++ b/python/ray/train/tests/test_base_trainer.py
@@ -5,8 +5,8 @@
import ray
from ray import train, tune
-from ray.train import Checkpoint, ScalingConfig
from ray.air.constants import MAX_REPR_LENGTH
+from ray.train import Checkpoint, ScalingConfig
from ray.train.gbdt_trainer import GBDTTrainer
from ray.train.trainer import BaseTrainer
from ray.util.placement_group import get_current_placement_group
diff --git a/python/ray/train/tests/test_checkpoint.py b/python/ray/train/tests/test_checkpoint.py
index 74ba55bcaef9..ced0a2674342 100644
--- a/python/ray/train/tests/test_checkpoint.py
+++ b/python/ray/train/tests/test_checkpoint.py
@@ -5,18 +5,16 @@
import pytest
import ray
-from ray.train._internal.storage import _exists_at_fs_path, _upload_to_fs_path
from ray.train._checkpoint import (
_CHECKPOINT_TEMP_DIR_PREFIX,
_METADATA_FILE_NAME,
+ Checkpoint,
_get_del_lock_path,
_list_existing_del_locks,
- Checkpoint,
)
-
+from ray.train._internal.storage import _exists_at_fs_path, _upload_to_fs_path
from ray.train.tests.test_new_persistence import _create_mock_custom_fs
-
_CHECKPOINT_CONTENT_FILE = "dummy.txt"
diff --git a/python/ray/train/tests/test_checkpoint_manager.py b/python/ray/train/tests/test_checkpoint_manager.py
index 87f0cbc414c9..2675db74ac57 100644
--- a/python/ray/train/tests/test_checkpoint_manager.py
+++ b/python/ray/train/tests/test_checkpoint_manager.py
@@ -1,14 +1,11 @@
-from pathlib import Path
import random
+from pathlib import Path
from typing import List
import pytest
from ray.train import Checkpoint, CheckpointConfig
-from ray.train._internal.checkpoint_manager import (
- _CheckpointManager,
- _TrainingResult,
-)
+from ray.train._internal.checkpoint_manager import _CheckpointManager, _TrainingResult
@pytest.fixture
diff --git a/python/ray/train/tests/test_data_parallel_trainer.py b/python/ray/train/tests/test_data_parallel_trainer.py
index 6437abff58af..710eae888faf 100644
--- a/python/ray/train/tests/test_data_parallel_trainer.py
+++ b/python/ray/train/tests/test_data_parallel_trainer.py
@@ -1,20 +1,20 @@
import os
import time
from unittest.mock import patch
+
import pytest
import ray
from ray import train, tune
-from ray.train import ScalingConfig, RunConfig
+from ray.train import RunConfig, ScalingConfig
from ray.train._internal.backend_executor import BackendExecutor
from ray.train._internal.worker_group import WorkerGroup
from ray.train.backend import Backend, BackendConfig
from ray.train.data_parallel_trainer import DataParallelTrainer
+from ray.train.tests.util import create_dict_checkpoint, load_dict_checkpoint
+from ray.tune.callback import Callback
from ray.tune.tune_config import TuneConfig
from ray.tune.tuner import Tuner
-from ray.tune.callback import Callback
-
-from ray.train.tests.util import create_dict_checkpoint, load_dict_checkpoint
@pytest.fixture
diff --git a/python/ray/train/tests/test_data_parallel_trainer_checkpointing.py b/python/ray/train/tests/test_data_parallel_trainer_checkpointing.py
index 7117cca64bb5..212275979adf 100644
--- a/python/ray/train/tests/test_data_parallel_trainer_checkpointing.py
+++ b/python/ray/train/tests/test_data_parallel_trainer_checkpointing.py
@@ -4,7 +4,6 @@
from ray import train
from ray.train import CheckpointConfig, RunConfig, ScalingConfig
from ray.train.data_parallel_trainer import DataParallelTrainer
-
from ray.train.tests.util import create_dict_checkpoint, load_dict_checkpoint
diff --git a/python/ray/train/tests/test_e2e_wandb_integration.py b/python/ray/train/tests/test_e2e_wandb_integration.py
index 3d666c1f41ed..e85e8e8df6a6 100644
--- a/python/ray/train/tests/test_e2e_wandb_integration.py
+++ b/python/ray/train/tests/test_e2e_wandb_integration.py
@@ -8,9 +8,9 @@
import pytest
import ray
-from ray.train import RunConfig, ScalingConfig
from ray.air.integrations.wandb import WANDB_ENV_VAR
from ray.air.tests.mocked_wandb_integration import WandbTestExperimentLogger
+from ray.train import RunConfig, ScalingConfig
from ray.train.examples.pytorch.torch_linear_example import (
train_func as linear_train_func,
)
diff --git a/python/ray/train/tests/test_examples.py b/python/ray/train/tests/test_examples.py
index 86c076271896..ed758280afb8 100644
--- a/python/ray/train/tests/test_examples.py
+++ b/python/ray/train/tests/test_examples.py
@@ -1,25 +1,24 @@
import pytest
-from ray.train import ScalingConfig
from ray.air.constants import TRAINING_ITERATION
-
+from ray.train import ScalingConfig
from ray.train.examples.horovod.horovod_example import (
train_func as horovod_torch_train_func,
)
-from ray.train.examples.tf.tensorflow_mnist_example import (
- train_func as tensorflow_mnist_train_func,
+from ray.train.examples.pytorch.torch_fashion_mnist_example import (
+ train_func_per_worker as fashion_mnist_train_func,
)
-from ray.train.examples.tf.tensorflow_quick_start import (
- train_func as tf_quick_start_train_func,
+from ray.train.examples.pytorch.torch_linear_example import (
+ train_func as linear_train_func,
)
from ray.train.examples.pytorch.torch_quick_start import (
train_func as torch_quick_start_train_func,
)
-from ray.train.examples.pytorch.torch_fashion_mnist_example import (
- train_func_per_worker as fashion_mnist_train_func,
+from ray.train.examples.tf.tensorflow_mnist_example import (
+ train_func as tensorflow_mnist_train_func,
)
-from ray.train.examples.pytorch.torch_linear_example import (
- train_func as linear_train_func,
+from ray.train.examples.tf.tensorflow_quick_start import (
+ train_func as tf_quick_start_train_func,
)
from ray.train.horovod.horovod_trainer import HorovodTrainer
from ray.train.tensorflow.tensorflow_trainer import TensorflowTrainer
diff --git a/python/ray/train/tests/test_gpu.py b/python/ray/train/tests/test_gpu.py
index 688b4709cb92..6fee72b088bd 100644
--- a/python/ray/train/tests/test_gpu.py
+++ b/python/ray/train/tests/test_gpu.py
@@ -1,10 +1,10 @@
import json
import os
-from pathlib import Path
import time
-from typing import Union, List, Dict
-
+from pathlib import Path
+from typing import Dict, List, Union
from unittest.mock import patch
+
import pytest
import torch
import torchvision
@@ -13,16 +13,15 @@
import ray
import ray.data
-from ray.exceptions import RayTaskError
from ray import train
-
+from ray.exceptions import RayTaskError
from ray.train import ScalingConfig
+from ray.train._internal.worker_group import WorkerGroup
from ray.train.constants import DEFAULT_NCCL_SOCKET_IFNAME
from ray.train.examples.pytorch.torch_linear_example import LinearDataset
from ray.train.torch.config import TorchConfig, _TorchBackend
from ray.train.torch.torch_trainer import TorchTrainer
from ray.train.trainer import TrainingFailedError
-from ray.train._internal.worker_group import WorkerGroup
class LinearDatasetDict(LinearDataset):
diff --git a/python/ray/train/tests/test_gpu_2.py b/python/ray/train/tests/test_gpu_2.py
index 1889a7d3349e..3981eb222a23 100644
--- a/python/ray/train/tests/test_gpu_2.py
+++ b/python/ray/train/tests/test_gpu_2.py
@@ -1,12 +1,11 @@
-import pytest
import numpy as np
+import pytest
import torch
import ray
import ray.data
-from ray import tune
-
import ray.train as train
+from ray import tune
from ray.air.config import ScalingConfig
from ray.train.examples.pytorch.torch_linear_example import LinearDataset
from ray.train.torch.torch_trainer import TorchTrainer
diff --git a/python/ray/train/tests/test_gpu_amp.py b/python/ray/train/tests/test_gpu_amp.py
index 82971bb7f6ab..c87e1a477488 100644
--- a/python/ray/train/tests/test_gpu_amp.py
+++ b/python/ray/train/tests/test_gpu_amp.py
@@ -1,9 +1,9 @@
+import os
+from tempfile import TemporaryDirectory
from timeit import default_timer as timer
-import os
import torch
import torchvision
-from tempfile import TemporaryDirectory
import ray.train as train
from ray.train import Checkpoint, ScalingConfig
diff --git a/python/ray/train/tests/test_gpu_auto_transfer.py b/python/ray/train/tests/test_gpu_auto_transfer.py
index a2def350386e..f90e763d7964 100644
--- a/python/ray/train/tests/test_gpu_auto_transfer.py
+++ b/python/ray/train/tests/test_gpu_auto_transfer.py
@@ -1,15 +1,15 @@
import os
-from unittest.mock import patch
from tempfile import TemporaryDirectory
-import pytest
+from unittest.mock import patch
+import pytest
import torch
import ray
+import ray.train.torch.train_loop_utils
from ray import train
-from ray.train import ScalingConfig, Checkpoint
+from ray.train import Checkpoint, ScalingConfig
from ray.train.torch import TorchTrainer
-import ray.train.torch.train_loop_utils
@pytest.mark.parametrize(
diff --git a/python/ray/train/tests/test_gpu_examples.py b/python/ray/train/tests/test_gpu_examples.py
index 2b53becd229a..eceb10436881 100644
--- a/python/ray/train/tests/test_gpu_examples.py
+++ b/python/ray/train/tests/test_gpu_examples.py
@@ -1,26 +1,24 @@
import os
+from tempfile import TemporaryDirectory
+
import pytest
import torch
-from tempfile import TemporaryDirectory
from ray import train
-from ray.train import Checkpoint, ScalingConfig
from ray.air.constants import TRAINING_ITERATION
+from ray.train import Checkpoint, ScalingConfig
from ray.train.examples.horovod.horovod_example import (
train_func as horovod_torch_train_func,
)
-from ray.train.examples.tf.tensorflow_mnist_example import (
- train_func as tensorflow_mnist_train_func,
-)
from ray.train.examples.pytorch.torch_fashion_mnist_example import (
train_func_per_worker as fashion_mnist_train_func,
)
-from ray.train.horovod.horovod_trainer import HorovodTrainer
-from ray.train.tests.test_tune import (
- torch_fashion_mnist,
- tune_tensorflow_mnist,
+from ray.train.examples.tf.tensorflow_mnist_example import (
+ train_func as tensorflow_mnist_train_func,
)
+from ray.train.horovod.horovod_trainer import HorovodTrainer
from ray.train.tensorflow.tensorflow_trainer import TensorflowTrainer
+from ray.train.tests.test_tune import torch_fashion_mnist, tune_tensorflow_mnist
from ray.train.torch.torch_trainer import TorchTrainer
diff --git a/python/ray/train/tests/test_horovod_trainer.py b/python/ray/train/tests/test_horovod_trainer.py
index 110fb687c6c7..76a13baef4d5 100644
--- a/python/ray/train/tests/test_horovod_trainer.py
+++ b/python/ray/train/tests/test_horovod_trainer.py
@@ -1,4 +1,5 @@
import os
+
import pytest
import torch
import torch.nn
@@ -7,11 +8,11 @@
from torchvision.transforms import transforms
import ray
+from ray.train import ScalingConfig
from ray.train.examples.horovod.horovod_pytorch_example import Net
from ray.train.examples.horovod.horovod_pytorch_example import (
train_func as hvd_train_func,
)
-from ray.train import ScalingConfig
from ray.train.horovod import HorovodTrainer
from ray.train.torch import TorchPredictor
diff --git a/python/ray/train/tests/test_huggingface.py b/python/ray/train/tests/test_huggingface.py
index b8b30946753e..f85af14c5655 100644
--- a/python/ray/train/tests/test_huggingface.py
+++ b/python/ray/train/tests/test_huggingface.py
@@ -1,5 +1,7 @@
-import pytest
from unittest.mock import patch
+
+import pytest
+
import ray
diff --git a/python/ray/train/tests/test_lightgbm_predictor.py b/python/ray/train/tests/test_lightgbm_predictor.py
index c4a00a1fef9c..edd5b4f5cbba 100644
--- a/python/ray/train/tests/test_lightgbm_predictor.py
+++ b/python/ray/train/tests/test_lightgbm_predictor.py
@@ -7,15 +7,10 @@
from ray.air.constants import MAX_REPR_LENGTH
from ray.air.util.data_batch_conversion import _convert_pandas_to_batch_type
-from ray.train.lightgbm import (
- LightGBMCheckpoint,
- LightGBMPredictor,
-)
+from ray.train.lightgbm import LightGBMCheckpoint, LightGBMPredictor
from ray.train.predictor import TYPE_TO_ENUM
-
from ray.train.tests.dummy_preprocessor import DummyPreprocessor
-
dummy_data = np.array([[1, 2], [3, 4], [5, 6]])
dummy_target = np.array([0, 1, 0])
model = lgbm.LGBMClassifier(n_estimators=10).fit(dummy_data, dummy_target).booster_
diff --git a/python/ray/train/tests/test_lightgbm_trainer.py b/python/ray/train/tests/test_lightgbm_trainer.py
index 1a5a8dcfefdb..c7d63a0d269d 100644
--- a/python/ray/train/tests/test_lightgbm_trainer.py
+++ b/python/ray/train/tests/test_lightgbm_trainer.py
@@ -1,18 +1,16 @@
import math
-import pytest
-import pandas as pd
import lightgbm as lgbm
+import pandas as pd
+import pytest
+from sklearn.datasets import load_breast_cancer
+from sklearn.model_selection import train_test_split
import ray
from ray import tune
+from ray.train import ScalingConfig
from ray.train.constants import TRAIN_DATASET_KEY
-
from ray.train.lightgbm import LightGBMTrainer
-from ray.train import ScalingConfig
-
-from sklearn.datasets import load_breast_cancer
-from sklearn.model_selection import train_test_split
@pytest.fixture
@@ -207,7 +205,8 @@ def test_lightgbm_trainer_resources():
if __name__ == "__main__":
- import pytest
import sys
+ import pytest
+
sys.exit(pytest.main(["-v", "-x", __file__]))
diff --git a/python/ray/train/tests/test_lightning_checkpoint.py b/python/ray/train/tests/test_lightning_checkpoint.py
index 5976668483ab..1e950b69c6cb 100644
--- a/python/ray/train/tests/test_lightning_checkpoint.py
+++ b/python/ray/train/tests/test_lightning_checkpoint.py
@@ -1,17 +1,18 @@
+import tempfile
+
import pytorch_lightning as pl
import torch
import torch.nn as nn
-import tempfile
+from torch.utils.data import DataLoader
import ray
from ray.air.constants import MODEL_KEY
-from torch.utils.data import DataLoader
-from ray.train.tests.lightning_test_utils import LinearModule, DummyDataModule
from ray.train.lightning import (
LightningCheckpoint,
LightningConfigBuilder,
LightningTrainer,
)
+from ray.train.tests.lightning_test_utils import DummyDataModule, LinearModule
class Net(pl.LightningModule):
diff --git a/python/ray/train/tests/test_lightning_deepspeed.py b/python/ray/train/tests/test_lightning_deepspeed.py
index ea2d7916203f..0ece51f9a715 100644
--- a/python/ray/train/tests/test_lightning_deepspeed.py
+++ b/python/ray/train/tests/test_lightning_deepspeed.py
@@ -1,14 +1,12 @@
import os
+
import pytest
import ray
-from ray.train import CheckpointConfig, RunConfig
from ray.air.constants import MODEL_KEY
+from ray.train import CheckpointConfig, RunConfig
from ray.train.lightning import LightningConfigBuilder, LightningTrainer
-from ray.train.tests.lightning_test_utils import (
- LinearModule,
- DummyDataModule,
-)
+from ray.train.tests.lightning_test_utils import DummyDataModule, LinearModule
@pytest.fixture
diff --git a/python/ray/train/tests/test_lightning_predictor.py b/python/ray/train/tests/test_lightning_predictor.py
index 4bd96cdd7030..7a2dacc2492e 100644
--- a/python/ray/train/tests/test_lightning_predictor.py
+++ b/python/ray/train/tests/test_lightning_predictor.py
@@ -1,14 +1,14 @@
import re
-import pytest
-import torch
import numpy as np
+import pytest
import pytorch_lightning as pl
+import torch
from torch.utils.data import DataLoader
from ray.air.constants import MAX_REPR_LENGTH, MODEL_KEY
-from ray.train.tests.conftest import * # noqa
from ray.train.lightning import LightningCheckpoint, LightningPredictor
+from ray.train.tests.conftest import * # noqa
from ray.train.tests.dummy_preprocessor import DummyPreprocessor
from ray.train.tests.lightning_test_utils import LightningMNISTClassifier
diff --git a/python/ray/train/tests/test_lightning_trainer.py b/python/ray/train/tests/test_lightning_trainer.py
index 457cc8b784a1..cbea061cc123 100644
--- a/python/ray/train/tests/test_lightning_trainer.py
+++ b/python/ray/train/tests/test_lightning_trainer.py
@@ -1,13 +1,13 @@
-import pytest
import numpy as np
+import pytest
import ray
-from ray.train.lightning import LightningConfigBuilder, LightningTrainer
from ray.air.util.data_batch_conversion import _convert_batch_type_to_pandas
+from ray.train.lightning import LightningConfigBuilder, LightningTrainer
from ray.train.tests.lightning_test_utils import (
- LinearModule,
DoubleLinearModule,
DummyDataModule,
+ LinearModule,
)
diff --git a/python/ray/train/tests/test_lightning_trainer_restore.py b/python/ray/train/tests/test_lightning_trainer_restore.py
index 54f0686f468b..9c2d92226dd7 100644
--- a/python/ray/train/tests/test_lightning_trainer_restore.py
+++ b/python/ray/train/tests/test_lightning_trainer_restore.py
@@ -1,21 +1,22 @@
import os
-import numpy as np
from pathlib import Path
+
+import numpy as np
import pytest
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
import ray
-from ray.train import RunConfig, CheckpointConfig
from ray.air.util.data_batch_conversion import _convert_batch_type_to_pandas
+from ray.train import CheckpointConfig, RunConfig
from ray.train.constants import MODEL_KEY
-from ray.train.trainer import TrainingFailedError
from ray.train.lightning import LightningConfigBuilder, LightningTrainer
from ray.train.tests.lightning_test_utils import (
DoubleLinearModule,
DummyDataModule,
LinearModule,
)
+from ray.train.trainer import TrainingFailedError
from ray.tune import Callback
diff --git a/python/ray/train/tests/test_mosaic_trainer.py b/python/ray/train/tests/test_mosaic_trainer.py
index 71eace1aabe2..6878948e426f 100644
--- a/python/ray/train/tests/test_mosaic_trainer.py
+++ b/python/ray/train/tests/test_mosaic_trainer.py
@@ -1,27 +1,24 @@
-from filelock import FileLock
import os
import pytest
-
import torch
import torch.utils.data
-
import torchvision
-from torchvision import transforms, datasets
+from filelock import FileLock
+from torchvision import datasets, transforms
-from ray.train import ScalingConfig
import ray.train as train
+from ray.train import ScalingConfig
from ray.train.trainer import TrainingFailedError
-
scaling_config = ScalingConfig(num_workers=2, use_gpu=False)
def trainer_init_per_worker(config):
- from torchmetrics.classification.accuracy import Accuracy
+ import composer.optim
from composer.core.evaluator import Evaluator
from composer.models.tasks import ComposerClassifier
- import composer.optim
+ from torchmetrics.classification.accuracy import Accuracy
BATCH_SIZE = 32
model = ComposerClassifier(
@@ -130,12 +127,12 @@ def bad_trainer_init_per_worker_1(a, b):
def test_loggers(ray_start_4_cpus):
- from ray.train.mosaic import MosaicTrainer
-
- from composer.loggers.logger_destination import LoggerDestination
+ from composer.core.callback import Callback
from composer.core.state import State
from composer.loggers import Logger
- from composer.core.callback import Callback
+ from composer.loggers.logger_destination import LoggerDestination
+
+ from ray.train.mosaic import MosaicTrainer
class _CallbackExistsError(ValueError):
pass
@@ -247,10 +244,10 @@ def test_metrics_key(ray_start_4_cpus):
def test_monitor_callbacks(ray_start_4_cpus):
- from ray.train.mosaic import MosaicTrainer
-
# Test Callbacks involving logging (SpeedMonitor, LRMonitor)
- from composer.callbacks import SpeedMonitor, LRMonitor
+ from composer.callbacks import LRMonitor, SpeedMonitor
+
+ from ray.train.mosaic import MosaicTrainer
trainer_init_config = {
"max_duration": "1ep",
diff --git a/python/ray/train/tests/test_new_persistence.py b/python/ray/train/tests/test_new_persistence.py
index a0110c4e33cd..74d918466ca9 100644
--- a/python/ray/train/tests/test_new_persistence.py
+++ b/python/ray/train/tests/test_new_persistence.py
@@ -1,28 +1,28 @@
-from contextlib import contextmanager
import logging
import os
-from pathlib import Path
import pickle
-import pytest
import re
import shutil
import tempfile
import time
+from contextlib import contextmanager
+from pathlib import Path
from typing import List, Optional, Tuple
import pyarrow.fs
+import pytest
import ray
from ray import train, tune
from ray._private.test_utils import simulate_storage
from ray.air._internal.uri_utils import URI
from ray.air.constants import EXPR_RESULT_FILE
+from ray.train._checkpoint import Checkpoint
from ray.train._internal.storage import (
+ StorageContext,
_delete_fs_path,
_download_from_fs_path,
- StorageContext,
)
-from ray.train._checkpoint import Checkpoint
from ray.train.base_trainer import TrainingFailedError
from ray.train.constants import RAY_AIR_NEW_PERSISTENCE_MODE
from ray.train.data_parallel_trainer import DataParallelTrainer
diff --git a/python/ray/train/tests/test_predictor.py b/python/ray/train/tests/test_predictor.py
index 93c0c59d3127..445458223304 100644
--- a/python/ray/train/tests/test_predictor.py
+++ b/python/ray/train/tests/test_predictor.py
@@ -1,18 +1,17 @@
-from typing import Optional, Dict, Union
import uuid
+from typing import Dict, Optional, Union
from unittest import mock
-import pytest
-import pandas as pd
import numpy as np
+import pandas as pd
+import pytest
import ray
-from ray.train._internal.framework_checkpoint import FrameworkCheckpoint
from ray.air.constants import TENSOR_COLUMN_NAME
from ray.air.util.data_batch_conversion import BatchFormat
from ray.data import Preprocessor
+from ray.train._internal.framework_checkpoint import FrameworkCheckpoint
from ray.train.predictor import Predictor, PredictorNotSerializableException
-
from ray.train.tests.util import create_dict_checkpoint, load_dict_checkpoint
diff --git a/python/ray/train/tests/test_session.py b/python/ray/train/tests/test_session.py
index b94850484479..00ccaa4ce589 100644
--- a/python/ray/train/tests/test_session.py
+++ b/python/ray/train/tests/test_session.py
@@ -6,29 +6,27 @@
import ray
from ray.air._internal.util import StartTraceback
-from ray.train._internal.accelerator import Accelerator
-from ray.train._internal.storage import StorageContext
from ray.air.constants import SESSION_MISUSE_LOG_ONCE_KEY
-from ray.train._internal.session import (
- init_session,
- shutdown_session,
- get_session,
- get_accelerator,
- set_accelerator,
-)
from ray.air.session import (
get_checkpoint,
- get_world_rank,
- get_local_rank,
- report,
get_dataset_shard,
+ get_local_rank,
+ get_world_rank,
get_world_size,
+ report,
)
+from ray.train._internal.accelerator import Accelerator
+from ray.train._internal.session import (
+ get_accelerator,
+ get_session,
+ init_session,
+ set_accelerator,
+ shutdown_session,
+)
+from ray.train._internal.storage import StorageContext
from ray.train.error import SessionMisuseError
-
from ray.train.tests.util import create_dict_checkpoint, load_dict_checkpoint
-
storage = StorageContext(
storage_path=tempfile.mkdtemp(),
experiment_dir_name="exp_name",
@@ -384,7 +382,8 @@ def f():
if __name__ == "__main__":
- import pytest
import sys
+ import pytest
+
sys.exit(pytest.main(["-v", "-x", __file__]))
diff --git a/python/ray/train/tests/test_sklearn_predictor.py b/python/ray/train/tests/test_sklearn_predictor.py
index 14c9a2cc1b27..864b503fed81 100644
--- a/python/ray/train/tests/test_sklearn_predictor.py
+++ b/python/ray/train/tests/test_sklearn_predictor.py
@@ -3,16 +3,14 @@
import numpy as np
import pandas as pd
import pytest
-from ray.air.util.data_batch_conversion import _convert_pandas_to_batch_type
-from ray.train.predictor import TYPE_TO_ENUM
from sklearn.ensemble import RandomForestClassifier
from ray.air.constants import MAX_REPR_LENGTH
+from ray.air.util.data_batch_conversion import _convert_pandas_to_batch_type
+from ray.train.predictor import TYPE_TO_ENUM
from ray.train.sklearn import SklearnCheckpoint, SklearnPredictor
-
from ray.train.tests.dummy_preprocessor import DummyPreprocessor
-
dummy_data = np.array([[1, 2], [3, 4], [5, 6]])
dummy_target = np.array([0, 1, 0])
model = RandomForestClassifier(n_estimators=10, random_state=0).fit(
diff --git a/python/ray/train/tests/test_sklearn_trainer.py b/python/ray/train/tests/test_sklearn_trainer.py
index 58fe4ab95951..28dbc9e7dc1b 100644
--- a/python/ray/train/tests/test_sklearn_trainer.py
+++ b/python/ray/train/tests/test_sklearn_trainer.py
@@ -1,16 +1,14 @@
-import pytest
import pandas as pd
+import pytest
+from sklearn.datasets import load_breast_cancer
+from sklearn.ensemble import RandomForestClassifier
+from sklearn.model_selection import train_test_split
import ray
from ray import tune
+from ray.train import ScalingConfig
from ray.train.constants import TRAIN_DATASET_KEY
-
from ray.train.sklearn import SklearnTrainer
-from ray.train import ScalingConfig
-
-from sklearn.datasets import load_breast_cancer
-from sklearn.model_selection import train_test_split
-from sklearn.ensemble import RandomForestClassifier
@pytest.fixture
@@ -141,7 +139,8 @@ def test_validation(ray_start_4_cpus):
if __name__ == "__main__":
- import pytest
import sys
+ import pytest
+
sys.exit(pytest.main(["-v", "-x", __file__]))
diff --git a/python/ray/train/tests/test_storage.py b/python/ray/train/tests/test_storage.py
index 36dcfd5ee166..b5c2c6edc2a8 100644
--- a/python/ray/train/tests/test_storage.py
+++ b/python/ray/train/tests/test_storage.py
@@ -1,8 +1,8 @@
import os
from pathlib import Path
-import pytest
import pyarrow.fs
+import pytest
import ray.cloudpickle as ray_pickle
from ray.train import Checkpoint, SyncConfig
@@ -11,7 +11,6 @@
StorageContext,
_list_at_fs_path,
)
-
from ray.train.tests.test_new_persistence import _resolve_storage_type
diff --git a/python/ray/train/tests/test_tensorflow_checkpoint.py b/python/ray/train/tests/test_tensorflow_checkpoint.py
index 368be8e3900f..5598420612e1 100644
--- a/python/ray/train/tests/test_tensorflow_checkpoint.py
+++ b/python/ray/train/tests/test_tensorflow_checkpoint.py
@@ -1,18 +1,16 @@
-from numpy import ndarray
import os.path
-import pytest
import tempfile
-import tensorflow as tf
-from typing import List
import unittest
+from typing import List
+
+import pytest
+import tensorflow as tf
+from numpy import ndarray
-from ray.train.tensorflow import (
- TensorflowCheckpoint,
- TensorflowTrainer,
-)
from ray import train
-from ray.train import ScalingConfig
from ray.data import Preprocessor
+from ray.train import ScalingConfig
+from ray.train.tensorflow import TensorflowCheckpoint, TensorflowTrainer
class DummyPreprocessor(Preprocessor):
diff --git a/python/ray/train/tests/test_tensorflow_predictor.py b/python/ray/train/tests/test_tensorflow_predictor.py
index 9074dcbc0374..069c60d53cc6 100644
--- a/python/ray/train/tests/test_tensorflow_predictor.py
+++ b/python/ray/train/tests/test_tensorflow_predictor.py
@@ -1,4 +1,6 @@
import re
+from typing import Tuple
+
import numpy as np
import pandas as pd
import pytest
@@ -6,14 +8,12 @@
from ray.air.constants import MAX_REPR_LENGTH
from ray.air.util.data_batch_conversion import (
- _convert_pandas_to_batch_type,
_convert_batch_type_to_pandas,
+ _convert_pandas_to_batch_type,
)
from ray.data.preprocessor import Preprocessor
from ray.train.predictor import TYPE_TO_ENUM
from ray.train.tensorflow import TensorflowCheckpoint, TensorflowPredictor
-from typing import Tuple
-
from ray.train.tests.dummy_preprocessor import DummyPreprocessor
diff --git a/python/ray/train/tests/test_tensorflow_trainer.py b/python/ray/train/tests/test_tensorflow_trainer.py
index 7ec21d7abb1a..54dbbfb16ab0 100644
--- a/python/ray/train/tests/test_tensorflow_trainer.py
+++ b/python/ray/train/tests/test_tensorflow_trainer.py
@@ -5,13 +5,13 @@
import ray
from ray import train
+from ray.data.preprocessors import Concatenator
from ray.train import ScalingConfig
+from ray.train.constants import TRAIN_DATASET_KEY
from ray.train.examples.tf.tensorflow_regression_example import (
train_func as tensorflow_linear_train_func,
)
-from ray.data.preprocessors import Concatenator
-from ray.train.constants import TRAIN_DATASET_KEY
-from ray.train.tensorflow import TensorflowTrainer, TensorflowCheckpoint
+from ray.train.tensorflow import TensorflowCheckpoint, TensorflowTrainer
@pytest.fixture
diff --git a/python/ray/train/tests/test_torch_fsdp.py b/python/ray/train/tests/test_torch_fsdp.py
index 7fdfad0635c4..b6cfba2b6615 100644
--- a/python/ray/train/tests/test_torch_fsdp.py
+++ b/python/ray/train/tests/test_torch_fsdp.py
@@ -3,10 +3,9 @@
from torch.distributed.fsdp import FullyShardedDataParallel
import ray
-
from ray import train
-from ray.train.torch import TorchTrainer
from ray.train import ScalingConfig
+from ray.train.torch import TorchTrainer
@pytest.fixture
diff --git a/python/ray/train/tests/test_torch_lightning_train.py b/python/ray/train/tests/test_torch_lightning_train.py
index 5d006cd5e368..69b62fb9c0f7 100644
--- a/python/ray/train/tests/test_torch_lightning_train.py
+++ b/python/ray/train/tests/test_torch_lightning_train.py
@@ -1,23 +1,20 @@
-import pytest
-import numpy as np
import os
+import numpy as np
+import pytest
+import pytorch_lightning as pl
+
import ray
-from ray.train.torch import TorchTrainer
+from ray.train import ScalingConfig
from ray.train.lightning import (
- RayDeepSpeedStrategy,
RayDDPStrategy,
+ RayDeepSpeedStrategy,
RayFSDPStrategy,
RayLightningEnvironment,
RayTrainReportCallback,
)
-
-from ray.train import ScalingConfig
-from ray.train.tests.lightning_test_utils import (
- LinearModule,
- DummyDataModule,
-)
-import pytorch_lightning as pl
+from ray.train.tests.lightning_test_utils import DummyDataModule, LinearModule
+from ray.train.torch import TorchTrainer
@pytest.fixture
diff --git a/python/ray/train/tests/test_torch_predictor.py b/python/ray/train/tests/test_torch_predictor.py
index 7ebe080d50d2..2be59b1ef4b8 100644
--- a/python/ray/train/tests/test_torch_predictor.py
+++ b/python/ray/train/tests/test_torch_predictor.py
@@ -7,12 +7,12 @@
from ray.air.constants import MAX_REPR_LENGTH
from ray.air.util.data_batch_conversion import (
- _convert_pandas_to_batch_type,
_convert_batch_type_to_pandas,
+ _convert_pandas_to_batch_type,
)
from ray.train.predictor import TYPE_TO_ENUM
-from ray.train.torch import TorchCheckpoint, TorchPredictor
from ray.train.tests.dummy_preprocessor import DummyPreprocessor
+from ray.train.torch import TorchCheckpoint, TorchPredictor
class DummyModelSingleTensor(torch.nn.Module):
diff --git a/python/ray/train/tests/test_torch_trainer.py b/python/ray/train/tests/test_torch_trainer.py
index adb49578ba50..b153b0e3b4ab 100644
--- a/python/ray/train/tests/test_torch_trainer.py
+++ b/python/ray/train/tests/test_torch_trainer.py
@@ -1,23 +1,22 @@
import contextlib
+import os
+import tempfile
+import time
import uuid
+from unittest.mock import patch
import pytest
-import time
import torch
-import os
-import tempfile
import ray
+import ray.train as train
+from ray.cluster_utils import Cluster
+from ray.train import Checkpoint, RunConfig, ScalingConfig
from ray.train.examples.pytorch.torch_linear_example import (
train_func as linear_train_func,
)
-from ray.train.torch import TorchPredictor, TorchTrainer
-from ray.train import Checkpoint, RunConfig, ScalingConfig
-from ray.train.torch import TorchConfig, TorchCheckpoint
+from ray.train.torch import TorchCheckpoint, TorchConfig, TorchPredictor, TorchTrainer
from ray.train.trainer import TrainingFailedError
-import ray.train as train
-from unittest.mock import patch
-from ray.cluster_utils import Cluster
@pytest.fixture
diff --git a/python/ray/train/tests/test_torch_transformers_train.py b/python/ray/train/tests/test_torch_transformers_train.py
index b76d1a9def64..811eea1399b6 100644
--- a/python/ray/train/tests/test_torch_transformers_train.py
+++ b/python/ray/train/tests/test_torch_transformers_train.py
@@ -1,20 +1,14 @@
import pandas as pd
import pytest
from datasets import Dataset
-from transformers import (
- AutoConfig,
- AutoModelForCausalLM,
- Trainer,
- TrainingArguments,
-)
+from transformers import AutoConfig, AutoModelForCausalLM, Trainer, TrainingArguments
import ray.data
-from ray.train import ScalingConfig, Checkpoint
-from ray.train.torch import TorchTrainer
+from ray import tune
+from ray.train import Checkpoint, ScalingConfig
from ray.train.huggingface.transformers import RayTrainReportCallback, prepare_trainer
from ray.train.tests._huggingface_data import train_data, validation_data
-
-from ray import tune
+from ray.train.torch import TorchTrainer
from ray.tune import Tuner
from ray.tune.schedulers.async_hyperband import ASHAScheduler
from ray.tune.schedulers.resource_changing_scheduler import (
diff --git a/python/ray/train/tests/test_torch_utils.py b/python/ray/train/tests/test_torch_utils.py
index fc3ba4dee40e..fe9473c7c3b4 100644
--- a/python/ray/train/tests/test_torch_utils.py
+++ b/python/ray/train/tests/test_torch_utils.py
@@ -4,9 +4,9 @@
import torch
from ray.air._internal.torch_utils import (
+ contains_tensor,
convert_pandas_to_torch_tensor,
load_torch_model,
- contains_tensor,
)
from ray.util.debug import _test_some_code_for_memory_leaks
diff --git a/python/ray/train/tests/test_train_usage.py b/python/ray/train/tests/test_train_usage.py
index 80dcf39b0d9b..0b1e08d1c968 100644
--- a/python/ray/train/tests/test_train_usage.py
+++ b/python/ray/train/tests/test_train_usage.py
@@ -14,7 +14,8 @@ def shutdown_only():
def run_torch():
from torch.utils.data import DataLoader, TensorDataset
- from ray.train.torch import get_device, prepare_model, prepare_data_loader
+
+ from ray.train.torch import get_device, prepare_data_loader, prepare_model
def train_func():
# Create dummy model and data loader
@@ -35,12 +36,13 @@ def train_func():
def run_lightning():
import pytorch_lightning as pl
+
from ray.train.lightning import (
- RayTrainReportCallback,
RayDDPStrategy,
- RayFSDPStrategy,
RayDeepSpeedStrategy,
+ RayFSDPStrategy,
RayLightningEnvironment,
+ RayTrainReportCallback,
prepare_trainer,
)
@@ -71,9 +73,10 @@ def train_func():
def run_transformers():
from datasets import Dataset
from transformers import Trainer, TrainingArguments
+
from ray.train.huggingface.transformers import (
- prepare_trainer,
RayTrainReportCallback,
+ prepare_trainer,
)
def train_func():
diff --git a/python/ray/train/tests/test_trainer_restore.py b/python/ray/train/tests/test_trainer_restore.py
index 00a6a9fc2ba4..450e426b4ccb 100644
--- a/python/ray/train/tests/test_trainer_restore.py
+++ b/python/ray/train/tests/test_trainer_restore.py
@@ -1,28 +1,22 @@
+import warnings
from pathlib import Path
from typing import Dict, List
+
import pytest
-import warnings
import ray
from ray import train
-from ray.train import (
- CheckpointConfig,
- RunConfig,
- ScalingConfig,
-)
from ray.air._internal.uri_utils import URI
+from ray.train import CheckpointConfig, RunConfig, ScalingConfig
from ray.train.base_trainer import BaseTrainer
-from ray.train.trainer import TrainingFailedError
-from ray.train.data_parallel_trainer import (
- DataParallelTrainer,
-)
+from ray.train.data_parallel_trainer import DataParallelTrainer
+from ray.train.huggingface import TransformersTrainer
+from ray.train.lightgbm import LightGBMTrainer
+from ray.train.tests.util import create_dict_checkpoint, load_dict_checkpoint
from ray.train.torch import TorchTrainer
+from ray.train.trainer import TrainingFailedError
from ray.train.xgboost import XGBoostTrainer
-from ray.train.lightgbm import LightGBMTrainer
-from ray.train.huggingface import TransformersTrainer
from ray.tune import Callback
-
-from ray.train.tests.util import create_dict_checkpoint, load_dict_checkpoint
from ray.tune.experiment import Trial
@@ -205,10 +199,8 @@ def test_trainer_with_init_fn_restore(
exp_name = f"{trainer_cls.__name__}_restore_test"
if trainer_cls == TransformersTrainer:
- from ray.train.tests.test_transformers_trainer import (
- train_function as hf_init,
- train_df,
- )
+ from ray.train.tests.test_transformers_trainer import train_df
+ from ray.train.tests.test_transformers_trainer import train_function as hf_init
trainer_init_fn = hf_init
trainer_init_config = {"epochs": 5, "save_strategy": "epoch"}
diff --git a/python/ray/train/tests/test_training_iterator.py b/python/ray/train/tests/test_training_iterator.py
index 52376a69efe6..0c625c5f0c3f 100644
--- a/python/ray/train/tests/test_training_iterator.py
+++ b/python/ray/train/tests/test_training_iterator.py
@@ -1,26 +1,26 @@
import functools
import time
from unittest.mock import patch
+
import pytest
-from ray.train._internal.worker_group import WorkerGroup
-from ray.train.trainer import TrainingIterator
import ray
from ray import train
-from ray.train import DataConfig
from ray.air._internal.util import StartTraceback
-from ray.train.backend import BackendConfig
-from ray.train._internal.session import init_session, get_session
+from ray.train import DataConfig
from ray.train._internal.backend_executor import BackendExecutor
+from ray.train._internal.session import get_session, init_session
from ray.train._internal.utils import construct_train_func
-from ray.train.examples.tf.tensorflow_mnist_example import (
- train_func as tensorflow_mnist_train_func,
-)
+from ray.train._internal.worker_group import WorkerGroup
+from ray.train.backend import BackendConfig
from ray.train.examples.pytorch.torch_linear_example import (
train_func as linear_train_func,
)
-
+from ray.train.examples.tf.tensorflow_mnist_example import (
+ train_func as tensorflow_mnist_train_func,
+)
from ray.train.tests.util import mock_storage_context
+from ray.train.trainer import TrainingIterator
MAX_RETRIES = 3
diff --git a/python/ray/train/tests/test_transformers_checkpoint.py b/python/ray/train/tests/test_transformers_checkpoint.py
index c35dd0e73366..b938cdacdba8 100644
--- a/python/ray/train/tests/test_transformers_checkpoint.py
+++ b/python/ray/train/tests/test_transformers_checkpoint.py
@@ -1,15 +1,13 @@
+from test_transformers_predictor import (
+ model_checkpoint,
+ test_strings,
+ tokenizer_checkpoint,
+)
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
from ray.train.huggingface import TransformersCheckpoint
-
-
from ray.train.tests.dummy_preprocessor import DummyPreprocessor
from ray.train.tests.test_torch_checkpoint import assert_equal_torch_models
-from test_transformers_predictor import (
- model_checkpoint,
- tokenizer_checkpoint,
- test_strings,
-)
def test_transformers_checkpoint(tmp_path):
@@ -42,6 +40,7 @@ def test_transformers_checkpoint(tmp_path):
if __name__ == "__main__":
import sys
+
import pytest
sys.exit(pytest.main(["-v", "-x", __file__]))
diff --git a/python/ray/train/tests/test_transformers_predictor.py b/python/ray/train/tests/test_transformers_predictor.py
index 695ded6a54bd..b0dc8220c359 100644
--- a/python/ray/train/tests/test_transformers_predictor.py
+++ b/python/ray/train/tests/test_transformers_predictor.py
@@ -4,23 +4,18 @@
import numpy as np
import pandas as pd
import pytest
-from ray.air.constants import MAX_REPR_LENGTH
-from ray.air.util.data_batch_conversion import _convert_pandas_to_batch_type
-from ray.train.predictor import TYPE_TO_ENUM
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
GPT2LMHeadModel,
)
-from transformers.pipelines import pipeline, Pipeline
-
-
-from ray.train.huggingface import (
- TransformersCheckpoint,
- TransformersPredictor,
-)
+from transformers.pipelines import Pipeline, pipeline
+from ray.air.constants import MAX_REPR_LENGTH
+from ray.air.util.data_batch_conversion import _convert_pandas_to_batch_type
+from ray.train.huggingface import TransformersCheckpoint, TransformersPredictor
+from ray.train.predictor import TYPE_TO_ENUM
from ray.train.tests.dummy_preprocessor import DummyPreprocessor
test_strings = ["Complete me", "And me", "Please complete"]
diff --git a/python/ray/train/tests/test_transformers_trainer.py b/python/ray/train/tests/test_transformers_trainer.py
index 11a7fce174e8..823ca49a9c01 100644
--- a/python/ray/train/tests/test_transformers_trainer.py
+++ b/python/ray/train/tests/test_transformers_trainer.py
@@ -1,19 +1,14 @@
import pandas as pd
import pytest
from datasets import Dataset
-from transformers import (
- AutoConfig,
- AutoModelForCausalLM,
- Trainer,
- TrainingArguments,
-)
+from transformers import AutoConfig, AutoModelForCausalLM, Trainer, TrainingArguments
import ray.data
-from ray.train.huggingface import TransformersTrainer
-from ray.train.trainer import TrainingFailedError
+from ray import tune
from ray.train import ScalingConfig
+from ray.train.huggingface import TransformersTrainer
from ray.train.tests._huggingface_data import train_data, validation_data
-from ray import tune
+from ray.train.trainer import TrainingFailedError
from ray.tune import Tuner
from ray.tune.schedulers.async_hyperband import ASHAScheduler
from ray.tune.schedulers.resource_changing_scheduler import (
diff --git a/python/ray/train/tests/test_transformers_trainer_steps.py b/python/ray/train/tests/test_transformers_trainer_steps.py
index 83f52966913f..e6c97db1c9a5 100644
--- a/python/ray/train/tests/test_transformers_trainer_steps.py
+++ b/python/ray/train/tests/test_transformers_trainer_steps.py
@@ -2,20 +2,13 @@
import pandas as pd
import pytest
-from transformers import (
- AutoConfig,
- AutoModelForCausalLM,
- Trainer,
- TrainingArguments,
-)
+from transformers import AutoConfig, AutoModelForCausalLM, Trainer, TrainingArguments
import ray.data
-from ray.train.huggingface import (
- TransformersTrainer,
-)
-from ray.train.trainer import TrainingFailedError
from ray.train import ScalingConfig
+from ray.train.huggingface import TransformersTrainer
from ray.train.tests._huggingface_data import train_data, validation_data
+from ray.train.trainer import TrainingFailedError
# 16 first rows of tokenized wikitext-2-raw-v1 training & validation
train_df = pd.read_json(train_data)
diff --git a/python/ray/train/tests/test_tune.py b/python/ray/train/tests/test_tune.py
index 089b4f0ae577..6f9b743e5c62 100644
--- a/python/ray/train/tests/test_tune.py
+++ b/python/ray/train/tests/test_tune.py
@@ -9,19 +9,18 @@
from ray.train._internal.worker_group import WorkerGroup
from ray.train.backend import Backend, BackendConfig
from ray.train.data_parallel_trainer import DataParallelTrainer
-from ray.train.examples.tf.tensorflow_mnist_example import (
- train_func as tensorflow_mnist_train_func,
-)
from ray.train.examples.pytorch.torch_fashion_mnist_example import (
train_func_per_worker as fashion_mnist_train_func,
)
+from ray.train.examples.tf.tensorflow_mnist_example import (
+ train_func as tensorflow_mnist_train_func,
+)
from ray.train.tensorflow.tensorflow_trainer import TensorflowTrainer
+from ray.train.tests.util import create_dict_checkpoint, load_dict_checkpoint
from ray.train.torch.torch_trainer import TorchTrainer
from ray.tune.tune_config import TuneConfig
from ray.tune.tuner import Tuner
-from ray.train.tests.util import create_dict_checkpoint, load_dict_checkpoint
-
@pytest.fixture(scope="module")
def ray_start_4_cpus():
diff --git a/python/ray/train/tests/test_utils.py b/python/ray/train/tests/test_utils.py
index 9c06a998094a..0974b00bb13f 100644
--- a/python/ray/train/tests/test_utils.py
+++ b/python/ray/train/tests/test_utils.py
@@ -18,7 +18,8 @@ def test_construct_path():
if __name__ == "__main__":
- import pytest
import sys
+ import pytest
+
sys.exit(pytest.main(["-v", "-x", __file__]))
diff --git a/python/ray/train/tests/test_windows.py b/python/ray/train/tests/test_windows.py
index f3c922cecddd..1c675c60cc05 100644
--- a/python/ray/train/tests/test_windows.py
+++ b/python/ray/train/tests/test_windows.py
@@ -7,7 +7,6 @@
import ray
from ray import train, tune
from ray.train.data_parallel_trainer import DataParallelTrainer
-
from ray.train.tests.util import create_dict_checkpoint
diff --git a/python/ray/train/tests/test_worker_group.py b/python/ray/train/tests/test_worker_group.py
index 1c5445977953..20b7a4989469 100644
--- a/python/ray/train/tests/test_worker_group.py
+++ b/python/ray/train/tests/test_worker_group.py
@@ -3,8 +3,8 @@
import pytest
import ray
-from ray.train._internal.worker_group import WorkerGroup, Worker, WorkerMetadata
import ray._private.ray_constants as ray_constants
+from ray.train._internal.worker_group import Worker, WorkerGroup, WorkerMetadata
@pytest.fixture
diff --git a/python/ray/train/tests/test_xgboost_predictor.py b/python/ray/train/tests/test_xgboost_predictor.py
index 064f50c6a8aa..34abf41ea09b 100644
--- a/python/ray/train/tests/test_xgboost_predictor.py
+++ b/python/ray/train/tests/test_xgboost_predictor.py
@@ -7,12 +7,8 @@
from ray.air.util.data_batch_conversion import _convert_pandas_to_batch_type
from ray.train.predictor import TYPE_TO_ENUM
-from ray.train.xgboost import (
- XGBoostCheckpoint,
- XGBoostPredictor,
-)
-
from ray.train.tests.dummy_preprocessor import DummyPreprocessor
+from ray.train.xgboost import XGBoostCheckpoint, XGBoostPredictor
dummy_data = np.array([[1, 2], [3, 4], [5, 6]])
dummy_target = np.array([0, 1, 0])
diff --git a/python/ray/train/tests/test_xgboost_trainer.py b/python/ray/train/tests/test_xgboost_trainer.py
index 60534cb1099c..e82cd2a6203e 100644
--- a/python/ray/train/tests/test_xgboost_trainer.py
+++ b/python/ray/train/tests/test_xgboost_trainer.py
@@ -1,19 +1,17 @@
-import pytest
import json
-import pandas as pd
+import pandas as pd
+import pytest
import xgboost as xgb
+from sklearn.datasets import load_breast_cancer
+from sklearn.model_selection import train_test_split
import ray
from ray import train, tune
from ray.train import ScalingConfig
from ray.train.constants import TRAIN_DATASET_KEY
-
from ray.train.xgboost import XGBoostTrainer
-from sklearn.datasets import load_breast_cancer
-from sklearn.model_selection import train_test_split
-
@pytest.fixture
def ray_start_4_cpus():
@@ -222,7 +220,8 @@ def test_xgboost_trainer_resources():
if __name__ == "__main__":
- import pytest
import sys
+ import pytest
+
sys.exit(pytest.main(["-v", "-x", __file__]))
diff --git a/python/ray/train/torch/config.py b/python/ray/train/torch/config.py
index 688e102179b2..c65b66bd0534 100644
--- a/python/ray/train/torch/config.py
+++ b/python/ray/train/torch/config.py
@@ -1,19 +1,18 @@
-from dataclasses import dataclass
import logging
import os
+from dataclasses import dataclass
from datetime import timedelta
from typing import Optional
-import ray
-from ray.train.backend import BackendConfig, Backend
-from ray.train.constants import DEFAULT_NCCL_SOCKET_IFNAME
-from ray.train._internal.worker_group import WorkerGroup
-from ray.train._internal.utils import get_address_and_port
-from ray.util import PublicAPI
-
import torch
import torch.distributed as dist
+import ray
+from ray.train._internal.utils import get_address_and_port
+from ray.train._internal.worker_group import WorkerGroup
+from ray.train.backend import Backend, BackendConfig
+from ray.train.constants import DEFAULT_NCCL_SOCKET_IFNAME
+from ray.util import PublicAPI
logger = logging.getLogger(__name__)
diff --git a/python/ray/train/torch/torch_checkpoint.py b/python/ray/train/torch/torch_checkpoint.py
index 7f64e313066e..65e33be91c56 100644
--- a/python/ray/train/torch/torch_checkpoint.py
+++ b/python/ray/train/torch/torch_checkpoint.py
@@ -1,14 +1,15 @@
-from typing import TYPE_CHECKING, Any, Dict, Optional
import os
import tempfile
-import torch
import warnings
+from typing import TYPE_CHECKING, Any, Dict, Optional
+
+import torch
-from ray.train._internal.framework_checkpoint import FrameworkCheckpoint
from ray.air._internal.torch_utils import (
- load_torch_model,
consume_prefix_in_state_dict_if_present_not_in_place,
+ load_torch_model,
)
+from ray.train._internal.framework_checkpoint import FrameworkCheckpoint
from ray.util.annotations import PublicAPI
if TYPE_CHECKING:
diff --git a/python/ray/train/torch/train_loop_utils.py b/python/ray/train/torch/train_loop_utils.py
index 34a94266b177..e44e8b0cdf9d 100644
--- a/python/ray/train/torch/train_loop_utils.py
+++ b/python/ray/train/torch/train_loop_utils.py
@@ -1,34 +1,34 @@
+import collections
import logging
import os
import random
import types
-import collections
+from typing import Any, Callable, Dict, List, Optional, Union
+
import numpy as np
+import torch
from packaging.version import Version
-from typing import Any, Dict, List, Optional, Callable, Union
+from torch.cuda.amp import GradScaler, autocast
+from torch.nn.parallel import DistributedDataParallel
+from torch.optim import Optimizer
+from torch.utils.data import (
+ DataLoader,
+ DistributedSampler,
+ IterableDataset,
+ RandomSampler,
+ SequentialSampler,
+)
+from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag
from ray.train._internal import session
from ray.train._internal.accelerator import Accelerator
from ray.train._internal.session import get_accelerator, set_accelerator
-from ray.util.annotations import PublicAPI, Deprecated
-from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag
-
-import torch
-from torch.cuda.amp import autocast, GradScaler
-from torch.nn.parallel import DistributedDataParallel
-from torch.optim import Optimizer
+from ray.util.annotations import Deprecated, PublicAPI
if Version(torch.__version__) < Version("1.11.0"):
FullyShardedDataParallel = None
else:
from torch.distributed.fsdp import FullyShardedDataParallel
-from torch.utils.data import (
- DistributedSampler,
- DataLoader,
- IterableDataset,
- SequentialSampler,
- RandomSampler,
-)
try:
from torch.profiler import profile
diff --git a/python/ray/train/trainer.py b/python/ray/train/trainer.py
index c6f219eb031c..2e16355c0e91 100644
--- a/python/ray/train/trainer.py
+++ b/python/ray/train/trainer.py
@@ -4,20 +4,14 @@
from ray.air._internal.util import StartTraceback
from ray.data import Dataset
-from ray.train import DataConfig
+from ray.train import Checkpoint, DataConfig
from ray.train._internal.backend_executor import (
BackendExecutor,
InactiveWorkerGroupError,
TrainBackendError,
TrainingWorkerError,
)
-from ray.train._internal.session import (
- _TrainingResult,
- _TrainSession,
- get_session,
-)
-from ray.train import Checkpoint
-
+from ray.train._internal.session import _TrainingResult, _TrainSession, get_session
from ray.train._internal.utils import ActorWrapper
from ray.train.backend import BackendConfig
from ray.train.base_trainer import ( # noqa: F401
@@ -27,7 +21,6 @@
)
from ray.util.annotations import DeveloperAPI
-
T = TypeVar("T")
S = TypeVar("S")
diff --git a/python/ray/train/xgboost/xgboost_trainer.py b/python/ray/train/xgboost/xgboost_trainer.py
index f61cd5919ed7..bf89c06cb536 100644
--- a/python/ray/train/xgboost/xgboost_trainer.py
+++ b/python/ray/train/xgboost/xgboost_trainer.py
@@ -1,19 +1,19 @@
import os
from typing import Any, Dict
-try:
- from packaging.version import Version
-except ImportError:
- from distutils.version import LooseVersion as Version
+import xgboost
+import xgboost_ray
+from xgboost_ray.tune import TuneReportCheckpointCallback
from ray.train import Checkpoint
from ray.train.gbdt_trainer import GBDTTrainer
from ray.train.xgboost import XGBoostCheckpoint
from ray.util.annotations import PublicAPI
-import xgboost
-import xgboost_ray
-from xgboost_ray.tune import TuneReportCheckpointCallback
+try:
+ from packaging.version import Version
+except ImportError:
+ from distutils.version import LooseVersion as Version
@PublicAPI(stability="beta")