diff --git a/doc/source/rllib/rllib-saving-and-loading-algos-and-policies.rst b/doc/source/rllib/rllib-saving-and-loading-algos-and-policies.rst index d49cb94585803..4ffe3a4e00c8d 100644 --- a/doc/source/rllib/rllib-saving-and-loading-algos-and-policies.rst +++ b/doc/source/rllib/rllib-saving-and-loading-algos-and-policies.rst @@ -8,7 +8,7 @@ Saving and Loading your RL Algorithms and Policies ################################################## -You can use :py:class:`~ray.air.checkpoint.Checkpoint` objects to store +You can use :py:class:`~ray.train.Checkpoint` objects to store and load the current state of your :py:class:`~ray.rllib.algorithms.algorithm.Algorithm` or :py:class:`~ray.rllib.policy.policy.Policy` and the neural networks (weights) within these structures. In the following, we will cover how you can create these @@ -26,7 +26,7 @@ or a single :py:class:`~ray.rllib.policy.policy.Policy` instance. The Algorithm- or Policy instances that were used to create the checkpoint in the first place may or may not have been trained prior to this. -RLlib uses the :py:class:`~ray.air.checkpoint.Checkpoint` class to create checkpoints and +RLlib uses the :py:class:`~ray.train.Checkpoint` class to create checkpoints and restore objects from them. The main file in a checkpoint directory, containing the state information, is currently @@ -50,7 +50,7 @@ How do I create an Algorithm checkpoint? ---------------------------------------- The :py:class:`~ray.rllib.algorithms.algorithm.Algorithm` ``save()`` method creates a new checkpoint -(directory with files in it) and returns the path to that directory. +(directory with files in it). Let's take a look at a simple example on how to create such an Algorithm checkpoint: @@ -69,8 +69,6 @@ like this: $ ls -la . .. - .is_checkpoint - .tune_metadata policies/ algorithm_state.pkl rllib_checkpoint.json diff --git a/doc/source/train/examples/pytorch/convert_existing_pytorch_code_to_ray_train.ipynb b/doc/source/train/examples/pytorch/convert_existing_pytorch_code_to_ray_train.ipynb index 1f8c0eefa90c6..813d3771f197e 100644 --- a/doc/source/train/examples/pytorch/convert_existing_pytorch_code_to_ray_train.ipynb +++ b/doc/source/train/examples/pytorch/convert_existing_pytorch_code_to_ray_train.ipynb @@ -821,12 +821,18 @@ "Enabling checkpointing is pretty easy - we just need to pass a `Checkpoint` object with the model state to the `ray.train.report()` API.\n", "\n", "```python\n", + " from ray import train\n", " from ray.train import Checkpoint\n", "\n", " with TemporaryDirectory() as tmpdir:\n", - " torch.save(model.state_dict(), os.path.join(tmpdir, \"checkpoint.pt\"))\n", - " train.report(dict(loss=test_loss), \n", - " checkpoint=Checkpoint.from_directory(tmpdir))\n", + " torch.save(\n", + " {\n", + " \"epoch\": epoch,\n", + " \"model\": model.module.state_dict()\n", + " },\n", + " os.path.join(tmpdir, \"checkpoint.pt\")\n", + " )\n", + " train.report(dict(loss=test_loss), checkpoint=Checkpoint.from_directory(tmpdir))\n", "```\n", "\n", "### Move the data loader to the training function\n", @@ -888,11 +894,17 @@ " loss_fn = nn.CrossEntropyLoss()\n", " optimizer = torch.optim.SGD(model.parameters(), lr=lr)\n", " \n", - " for t in range(epochs):\n", + " for epoch in range(epochs):\n", " train_epoch(train_dataloader, model, loss_fn, optimizer)\n", " test_loss = test_epoch(test_dataloader, model, loss_fn)\n", " with TemporaryDirectory() as tmpdir:\n", - " torch.save(model.state_dict(), os.path.join(tmpdir, \"checkpoint.pt\"))\n", + " torch.save(\n", + " {\n", + " \"epoch\": epoch,\n", + " \"model\": model.module.state_dict()\n", + " },\n", + " os.path.join(tmpdir, \"checkpoint.pt\")\n", + " )\n", " train.report(dict(loss=test_loss), checkpoint=Checkpoint.from_directory(tmpdir))\n", "\n", " print(\"Done!\")" diff --git a/doc/source/train/examples/pytorch/pytorch_resnet_finetune.ipynb b/doc/source/train/examples/pytorch/pytorch_resnet_finetune.ipynb index 4f731cba59c43..8e23947e6a209 100644 --- a/doc/source/train/examples/pytorch/pytorch_resnet_finetune.ipynb +++ b/doc/source/train/examples/pytorch/pytorch_resnet_finetune.ipynb @@ -356,7 +356,7 @@ "metadata": {}, "outputs": [], "source": [ - "from ray.train.torch import TorchTrainer, LegacyTorchCheckpoint\n", + "from ray.train.torch import TorchTrainer\n", "from ray.train import ScalingConfig, RunConfig, CheckpointConfig\n", "\n", "# Scale out model training across 4 GPUs.\n", diff --git a/doc/source/tune/tutorials/tune-storage.rst b/doc/source/tune/tutorials/tune-storage.rst index f4fda12ac1048..90cb26853e91b 100644 --- a/doc/source/tune/tutorials/tune-storage.rst +++ b/doc/source/tune/tutorials/tune-storage.rst @@ -166,7 +166,7 @@ that implements saving and loading checkpoints. import os import ray - from ray import air, tune + from ray import train, tune from your_module import my_trainable # Look for the existing cluster and connect to it @@ -179,14 +179,14 @@ that implements saving and loading checkpoints. tuner = tune.Tuner( my_trainable, - run_config=air.RunConfig( + run_config=train.RunConfig( # Name of your experiment name="my-tune-exp", # Configure how experiment data and checkpoints are persisted. # We recommend cloud storage checkpointing as it survives the cluster when # instances are terminated and has better performance. storage_path="s3://my-checkpoints-bucket/path/", - checkpoint_config=air.CheckpointConfig( + checkpoint_config=train.CheckpointConfig( # We'll keep the best five checkpoints at all times # (with the highest AUC scores, a metric reported by the trainable) checkpoint_score_attribute="max-auc", diff --git a/python/ray/air/checkpoint.py b/python/ray/air/checkpoint.py index 417981b2000de..fc2398226d793 100644 --- a/python/ray/air/checkpoint.py +++ b/python/ray/air/checkpoint.py @@ -28,7 +28,7 @@ ) from ray.air._internal.util import _copy_dir_ignore_conflicts from ray.air.constants import PREPROCESSOR_KEY, CHECKPOINT_ID_ATTR -from ray.util.annotations import DeveloperAPI, PublicAPI +from ray.util.annotations import Deprecated, DeveloperAPI if TYPE_CHECKING: from ray.data.preprocessor import Preprocessor @@ -63,9 +63,9 @@ class _CheckpointMetadata: checkpoint_state: Dict[str, Any] -@PublicAPI(stability="beta") +@Deprecated class Checkpoint: - """Ray AIR Checkpoint. + """[Deprecated] Ray AIR Checkpoint. An AIR Checkpoint are a common interface for accessing models across different AIR components and libraries. A Checkpoint can have its data @@ -166,6 +166,16 @@ def __init__( data_dict: Optional[dict] = None, uri: Optional[str] = None, ): + from ray.train._internal.storage import _use_storage_context + + if _use_storage_context(): + raise DeprecationWarning( + "`ray.air.Checkpoint` is deprecated. " + "Please use `ray.train.Checkpoint` instead. " + "See the `Checkpoint: New API` section in " + "https://github.com/ray-project/ray/issues/37868 for a migration guide." + ) + # First, resolve file:// URIs to local paths if uri: local_path = _get_local_path(uri) @@ -269,14 +279,6 @@ def path(self) -> Optional[str]: In all other cases, this will return None. - Example: - - >>> from ray.air import Checkpoint - >>> checkpoint = Checkpoint.from_uri("s3://some-bucket/some-location") - >>> assert checkpoint.path == "s3://some-bucket/some-location" - >>> checkpoint = Checkpoint.from_dict({"data": 1}) - >>> assert checkpoint.path == None - Returns: Checkpoint path if this checkpoint is reachable from the current node (e.g. cloud storage or locally available directory). @@ -302,14 +304,6 @@ def uri(self) -> Optional[str]: persist to cloud with :meth:`Checkpoint.to_uri() `. - Example: - - >>> from ray.air import Checkpoint - >>> checkpoint = Checkpoint.from_uri("s3://some-bucket/some-location") - >>> assert checkpoint.uri == "s3://some-bucket/some-location" - >>> checkpoint = Checkpoint.from_dict({"data": 1}) - >>> assert checkpoint.uri == None - Returns: Checkpoint URI if this URI is reachable from the current node (e.g. cloud storage or locally available file URI). diff --git a/python/ray/air/integrations/comet.py b/python/ray/air/integrations/comet.py index 159ddde4755d2..724c199f2762a 100644 --- a/python/ray/air/integrations/comet.py +++ b/python/ray/air/integrations/comet.py @@ -3,7 +3,7 @@ import pyarrow.fs -from ray.train import _use_storage_context +from ray.train._internal.storage import _use_storage_context from ray.tune.logger import LoggerCallback from ray.tune.experiment import Trial from ray.tune.utils import flatten_dict diff --git a/python/ray/air/integrations/wandb.py b/python/ray/air/integrations/wandb.py index 755cd5a715b0e..f7a92bcfc9535 100644 --- a/python/ray/air/integrations/wandb.py +++ b/python/ray/air/integrations/wandb.py @@ -18,7 +18,7 @@ from ray.air._internal import usage as air_usage from ray.air.util.node import _force_on_current_node -from ray.train import _use_storage_context +from ray.train._internal.storage import _use_storage_context from ray.tune.logger import LoggerCallback from ray.tune.utils import flatten_dict from ray.tune.experiment import Trial diff --git a/python/ray/air/tests/test_keras_callback.py b/python/ray/air/tests/test_keras_callback.py index e3a933a572bab..395b99106b3a0 100644 --- a/python/ray/air/tests/test_keras_callback.py +++ b/python/ray/air/tests/test_keras_callback.py @@ -138,7 +138,7 @@ def test_report_and_checkpoint_on_different_events(self, mock_report, model): assert second_metric == {"loss": 1} assert second_checkpoint is not None - def parse_call(self, call) -> Tuple[Dict, ray.air.Checkpoint]: + def parse_call(self, call) -> Tuple[Dict, train.Checkpoint]: (metrics,), kwargs = call checkpoint = kwargs["checkpoint"] return metrics, checkpoint diff --git a/python/ray/train/__init__.py b/python/ray/train/__init__.py index bef2f190a02eb..d1a3c0df3813f 100644 --- a/python/ray/train/__init__.py +++ b/python/ray/train/__init__.py @@ -13,14 +13,8 @@ from ray._private.usage import usage_lib -from ray.train._internal.storage import _use_storage_context - # Import this first so it can be used in other modules -if _use_storage_context(): - from ray.train._checkpoint import Checkpoint -else: - from ray.air import Checkpoint - +from ray.train._checkpoint import Checkpoint from ray.train._internal.data_config import DataConfig from ray.train._internal.session import get_checkpoint, get_dataset_shard, report from ray.train._internal.syncer import SyncConfig @@ -34,6 +28,7 @@ usage_lib.record_library_usage("train") +Checkpoint.__module__ = "ray.train" __all__ = [ "get_checkpoint", diff --git a/python/ray/train/_internal/session.py b/python/ray/train/_internal/session.py index 20b27d82d5332..000f0b2f19474 100644 --- a/python/ray/train/_internal/session.py +++ b/python/ray/train/_internal/session.py @@ -17,7 +17,6 @@ import ray from ray.air._internal.session import _get_session from ray.air._internal.util import StartTraceback, RunnerThread -from ray.air.checkpoint import Checkpoint from ray.air.constants import ( _RESULT_FETCH_TIMEOUT, _ERROR_FETCH_TIMEOUT, @@ -26,7 +25,7 @@ TIME_THIS_ITER_S, ) from ray.data import Dataset, DatasetPipeline -from ray.train._checkpoint import Checkpoint as NewCheckpoint +from ray.train import Checkpoint from ray.train._internal.accelerator import Accelerator from ray.train._internal.storage import _use_storage_context, StorageContext from ray.train.constants import ( @@ -79,6 +78,7 @@ class TrialInfo: experiment_name: Optional[str] = None +# TODO(justinvyu): [code_removal] @dataclass class TrainingResult: type: TrainingResultType @@ -576,20 +576,13 @@ def _report_training_result(self, training_result: _TrainingResult) -> None: sys.exit(0) def new_report( - self, metrics: Dict, checkpoint: Optional[NewCheckpoint] = None + self, metrics: Dict, checkpoint: Optional[Checkpoint] = None ) -> None: if self.ignore_report: return persisted_checkpoint = None if checkpoint: - # TODO(justinvyu): [code_removal] - if not isinstance(checkpoint, NewCheckpoint): - raise ValueError( - "You must pass a `ray.train.Checkpoint` " - "object to `train.report`. `ray.air.Checkpoint` is deprecated." - ) - # Persist the reported checkpoint files to storage. persisted_checkpoint = self.storage.persist_current_checkpoint(checkpoint) diff --git a/python/ray/train/lightning/lightning_checkpoint.py b/python/ray/train/lightning/lightning_checkpoint.py index 968ee79250587..1841ec5a13ea9 100644 --- a/python/ray/train/lightning/lightning_checkpoint.py +++ b/python/ray/train/lightning/lightning_checkpoint.py @@ -216,48 +216,6 @@ def get_model( ) -> pl.LightningModule: """Retrieve the model stored in this checkpoint. - Example: - .. testcode:: - - import pytorch_lightning as pl - from ray.train.lightning import LightningCheckpoint, LightningPredictor - - class MyLightningModule(pl.LightningModule): - def __init__(self, input_dim, output_dim) -> None: - super().__init__() - self.linear = nn.Linear(input_dim, output_dim) - self.save_hyperparameters() - - # ... - - # After the training is finished, LightningTrainer saves AIR - # checkpoints in the result directory, for example: - # ckpt_dir = "{storage_path}/LightningTrainer_.*/checkpoint_000000" - - # You can load model checkpoint with model init arguments - def load_checkpoint(ckpt_dir): - ckpt = LightningCheckpoint.from_directory(ckpt_dir) - - # `get_model()` takes the argument list of - # `LightningModule.load_from_checkpoint()` as additional kwargs. - # Please refer to PyTorch Lightning API for more details. - - return checkpoint.get_model( - model_class=MyLightningModule, - input_dim=32, - output_dim=10, - ) - - # You can also load checkpoint with a hyperparameter file - def load_checkpoint_with_hparams( - ckpt_dir, hparam_file="./hparams.yaml" - ): - ckpt = LightningCheckpoint.from_directory(ckpt_dir) - return ckpt.get_model( - model_class=MyLightningModule, - hparams_file=hparam_file - ) - Args: model_class: A subclass of ``pytorch_lightning.LightningModule`` that defines your model and training logic. diff --git a/python/ray/train/predictor.py b/python/ray/train/predictor.py index d503f469ecd6f..5b8686c58da2c 100644 --- a/python/ray/train/predictor.py +++ b/python/ray/train/predictor.py @@ -4,7 +4,7 @@ import numpy as np import pandas as pd -from ray.air.checkpoint import Checkpoint +from ray.train import Checkpoint from ray.air.data_batch_type import DataBatchType from ray.air.util.data_batch_conversion import ( BatchFormat, @@ -108,13 +108,13 @@ def from_pandas_udf( class PandasUDFPredictor(Predictor): @classmethod - def from_checkpoint(cls, checkpoint: Checkpoint, **kwargs): + def from_checkpoint(cls, checkpoint: Checkpoint, **kwargs) -> "Predictor": return PandasUDFPredictor() def _predict_pandas(self, df, **kwargs) -> "pd.DataFrame": return pandas_udf(df, **kwargs) - return PandasUDFPredictor.from_checkpoint(Checkpoint.from_dict({"dummy": 1})) + return PandasUDFPredictor() def get_preprocessor(self) -> Optional[Preprocessor]: """Get the preprocessor to use prior to executing predictions.""" diff --git a/python/ray/train/tensorflow/tensorflow_checkpoint.py b/python/ray/train/tensorflow/tensorflow_checkpoint.py index b5ae62110294f..f93a745dce1bd 100644 --- a/python/ray/train/tensorflow/tensorflow_checkpoint.py +++ b/python/ray/train/tensorflow/tensorflow_checkpoint.py @@ -207,23 +207,6 @@ def from_model( Returns: A :py:class:`TensorflowCheckpoint` containing the specified model. - - Examples: - - .. testcode:: - - from ray.train.tensorflow import TensorflowCheckpoint - import tensorflow as tf - - model = tf.keras.applications.resnet.ResNet101() - checkpoint = TensorflowCheckpoint.from_model(model) - - .. testoutput:: - :options: +MOCK - :hide: - - ... # Model may or may not be downloaded - """ checkpoint = cls.from_dict( {PREPROCESSOR_KEY: preprocessor, MODEL_KEY: model.get_weights()} diff --git a/python/ray/train/tests/test_transformers_trainer.py b/python/ray/train/tests/test_transformers_trainer.py index c6c4daf867c33..11a7fce174e8f 100644 --- a/python/ray/train/tests/test_transformers_trainer.py +++ b/python/ray/train/tests/test_transformers_trainer.py @@ -9,11 +9,7 @@ ) import ray.data -from ray.train.huggingface import ( - TransformersPredictor, - TransformersTrainer, - LegacyTransformersCheckpoint, -) +from ray.train.huggingface import TransformersTrainer from ray.train.trainer import TrainingFailedError from ray.train import ScalingConfig from ray.train.tests._huggingface_data import train_data, validation_data @@ -90,33 +86,6 @@ def train_function_local_dataset(train_dataset, eval_dataset=None, **config): return train_function(train_dataset, eval_dataset, **config) -def test_deprecations(ray_start_4_cpus): - """Tests that soft deprecations warn but still can be used""" - from ray.train.huggingface import ( - HuggingFaceCheckpoint, - HuggingFacePredictor, - HuggingFaceTrainer, - ) - - ray_train = ray.data.from_pandas(train_df) - ray_validation = ray.data.from_pandas(validation_df) - - with pytest.warns(DeprecationWarning): - obj = HuggingFaceCheckpoint.from_dict({"foo": "bar"}) - assert isinstance(obj, LegacyTransformersCheckpoint) - - with pytest.warns(DeprecationWarning): - obj = HuggingFacePredictor() - assert isinstance(obj, TransformersPredictor) - - with pytest.warns(DeprecationWarning): - obj = HuggingFaceTrainer( - train_function, - datasets={"train": ray_train, "evaluation": ray_validation}, - ) - assert isinstance(obj, TransformersTrainer) - - @pytest.mark.parametrize("save_strategy", ["no", "epoch"]) def test_e2e(ray_start_4_cpus, save_strategy): ray_train = ray.data.from_pandas(train_df) diff --git a/python/ray/train/torch/torch_checkpoint.py b/python/ray/train/torch/torch_checkpoint.py index 690872f78b964..865c3bd65ac5a 100644 --- a/python/ray/train/torch/torch_checkpoint.py +++ b/python/ray/train/torch/torch_checkpoint.py @@ -287,41 +287,6 @@ def from_state_dict( Returns: A :class:`TorchCheckpoint` containing the specified state dictionary. - - Examples: - - .. testcode:: - - import torch - import torch.nn as nn - from ray.train.torch import TorchCheckpoint - - # Set manual seed - torch.manual_seed(42) - - # Function to create a NN model - def create_model() -> nn.Module: - model = nn.Sequential(nn.Linear(1, 10), - nn.ReLU(), - nn.Linear(10,1)) - return model - - # Create a TorchCheckpoint from our model's state_dict - model = create_model() - checkpoint = TorchCheckpoint.from_state_dict(model.state_dict()) - - # Now load the model from the TorchCheckpoint by providing the - # model architecture - model_from_chkpt = checkpoint.get_model(create_model()) - - # Assert they have the same state dict - assert str(model.state_dict()) == str(model_from_chkpt.state_dict()) - print("worked") - - .. testoutput:: - :hide: - - ... """ return cls.from_dict({PREPROCESSOR_KEY: preprocessor, MODEL_KEY: state_dict}) @@ -348,42 +313,6 @@ def from_model( Returns: A :class:`TorchCheckpoint` containing the specified model. - - Examples: - - .. testcode:: - - from ray.train.torch import LegacyTorchCheckpoint - from ray.train.torch import TorchPredictor - import torch - - # Set manual seed - torch.manual_seed(42) - - # Create model identity and send a random tensor to it - model = torch.nn.Identity() - input = torch.randn(2, 2) - output = model(input) - - # Create a checkpoint - checkpoint = LegacyTorchCheckpoint.from_model(model) - - # You can use a class TorchCheckpoint to create an - # a class ray.train.torch.TorchPredictor and perform inference. - predictor = TorchPredictor.from_checkpoint(checkpoint) - pred = predictor.predict(input.numpy()) - - # Convert prediction dictionary value into a tensor - pred = torch.tensor(pred['predictions']) - - # Assert the output from the original and checkoint model are the same - assert torch.equal(output, pred) - print("worked") - - .. testoutput:: - :hide: - - ... """ return cls.from_dict({PREPROCESSOR_KEY: preprocessor, MODEL_KEY: model}) diff --git a/python/ray/tune/analysis/experiment_analysis.py b/python/ray/tune/analysis/experiment_analysis.py index a713c92e1fee7..f1c7a08468cc4 100644 --- a/python/ray/tune/analysis/experiment_analysis.py +++ b/python/ray/tune/analysis/experiment_analysis.py @@ -54,7 +54,7 @@ from ray.tune.trainable.util import TrainableUtil from ray.tune.utils.util import unflattened_lookup -from ray.util.annotations import PublicAPI +from ray.util.annotations import Deprecated, PublicAPI logger = logging.getLogger(__name__) @@ -62,7 +62,7 @@ @PublicAPI(stability="beta") -class NewExperimentAnalysis: +class ExperimentAnalysis: """Analyze results from a Ray Train/Tune experiment. To use this class, the run must store the history of reported metrics @@ -116,7 +116,7 @@ def __init__( self._experiment_fs_path = experiment_checkpoint_path experiment_json_filename = ( - NewExperimentAnalysis._find_newest_experiment_checkpoint( + ExperimentAnalysis._find_newest_experiment_checkpoint( self._fs, self._experiment_fs_path ) ) @@ -742,9 +742,15 @@ def get_trial_checkpoints_paths( "`ResultGrid` and use `Result.best_checkpoints` instead." ) + def fetch_trial_dataframes(self) -> Dict[str, DataFrame]: + raise DeprecationWarning( + "`fetch_trial_dataframes` is deprecated. " + "Access the `trial_dataframes` property instead." + ) -@PublicAPI(stability="beta") -class ExperimentAnalysis: + +@Deprecated +class LegacyExperimentAnalysis: """Analyze results from a Tune experiment. To use this class, the experiment must be executed with the JsonLogger. @@ -768,7 +774,7 @@ class ExperimentAnalysis: >>> from ray import tune >>> tune.run( # doctest: +SKIP ... my_trainable, name="my_exp", local_dir="~/tune_results") - >>> analysis = ExperimentAnalysis( # doctest: +SKIP + >>> analysis = LegacyExperimentAnalysis( # doctest: +SKIP ... experiment_checkpoint_path="~/tune_results/my_exp/state.json") """ diff --git a/python/ray/tune/impl/tuner_internal.py b/python/ray/tune/impl/tuner_internal.py index 09cd2e3dccb80..ceb6cea26fd90 100644 --- a/python/ray/tune/impl/tuner_internal.py +++ b/python/ray/tune/impl/tuner_internal.py @@ -32,7 +32,7 @@ get_fs_and_path, ) from ray.tune import Experiment, TuneError, ExperimentAnalysis -from ray.tune.analysis.experiment_analysis import NewExperimentAnalysis +from ray.tune.analysis.experiment_analysis import LegacyExperimentAnalysis from ray.tune.execution.experiment_state import _ResumeConfig from ray.tune.tune import _Config from ray.tune.registry import is_function_trainable @@ -404,13 +404,13 @@ def _restore_from_path_or_uri( # Load the experiment results at the point where it left off. try: if _use_storage_context(): - self._experiment_analysis = NewExperimentAnalysis( + self._experiment_analysis = ExperimentAnalysis( experiment_checkpoint_path=path_or_uri, default_metric=self._tune_config.metric, default_mode=self._tune_config.mode, ) else: - self._experiment_analysis = ExperimentAnalysis( + self._experiment_analysis = LegacyExperimentAnalysis( experiment_checkpoint_path=path_or_uri, default_metric=self._tune_config.metric, default_mode=self._tune_config.mode, diff --git a/python/ray/tune/result_grid.py b/python/ray/tune/result_grid.py index 654af2df6f90b..864f9e88dac2d 100644 --- a/python/ray/tune/result_grid.py +++ b/python/ray/tune/result_grid.py @@ -9,7 +9,6 @@ from ray.cloudpickle import cloudpickle from ray.exceptions import RayTaskError from ray.tune.analysis import ExperimentAnalysis -from ray.tune.analysis.experiment_analysis import NewExperimentAnalysis from ray.tune.error import TuneError from ray.tune.experiment import Trial from ray.tune.trainable.util import TrainableUtil @@ -318,9 +317,11 @@ def _trial_to_result(self, trial: Trial) -> Result: error=self._populate_exception(trial), _local_path=trial.local_path, _remote_path=trial.remote_path, - _storage_filesystem=self._experiment_analysis._fs - if isinstance(self._experiment_analysis, NewExperimentAnalysis) - else None, + _storage_filesystem=( + self._experiment_analysis._fs + if isinstance(self._experiment_analysis, ExperimentAnalysis) + else None + ), metrics_dataframe=metrics_df, best_checkpoints=best_checkpoints, ) diff --git a/python/ray/tune/search/searcher.py b/python/ray/tune/search/searcher.py index d27dd91190176..b4950316ed894 100644 --- a/python/ray/tune/search/searcher.py +++ b/python/ray/tune/search/searcher.py @@ -229,15 +229,17 @@ def add_evaluated_trials( # lazy imports to avoid circular dependencies from ray.tune.experiment import Trial from ray.tune.analysis import ExperimentAnalysis - from ray.tune.analysis.experiment_analysis import NewExperimentAnalysis from ray.tune.result import DONE + # TODO(justinvyu): [code_removal] + from ray.tune.analysis.experiment_analysis import LegacyExperimentAnalysis + if isinstance(trials_or_analysis, (list, tuple)): trials = trials_or_analysis elif isinstance(trials_or_analysis, Trial): trials = [trials_or_analysis] elif isinstance( - trials_or_analysis, (ExperimentAnalysis, NewExperimentAnalysis) + trials_or_analysis, (ExperimentAnalysis, LegacyExperimentAnalysis) ): trials = trials_or_analysis.trials else: diff --git a/python/ray/tune/tests/test_experiment_analysis.py b/python/ray/tune/tests/test_experiment_analysis.py index b95729e5b8ea6..1bead58c10762 100644 --- a/python/ray/tune/tests/test_experiment_analysis.py +++ b/python/ray/tune/tests/test_experiment_analysis.py @@ -13,7 +13,7 @@ from ray.air._internal.uri_utils import URI from ray.air.constants import EXPR_PROGRESS_FILE, EXPR_RESULT_FILE from ray.train._internal.storage import _delete_fs_path -from ray.tune.analysis.experiment_analysis import NewExperimentAnalysis +from ray.tune.analysis.experiment_analysis import ExperimentAnalysis from ray.tune.experiment import Trial from ray.tune.utils import flatten_dict @@ -100,7 +100,7 @@ def experiment_analysis(request): if load_from in ["dir", "cloud"]: # Test init without passing in in-memory trials. # Load them from an experiment directory instead. - yield NewExperimentAnalysis( + yield ExperimentAnalysis( str(URI(storage_path) / "test_experiment_analysis"), default_metric="ascending", default_mode="max", diff --git a/python/ray/tune/tune.py b/python/ray/tune/tune.py index 16460f9a1456c..36b14127b7031 100644 --- a/python/ray/tune/tune.py +++ b/python/ray/tune/tune.py @@ -32,7 +32,7 @@ from ray.train.constants import RAY_CHDIR_TO_TRIAL_DIR, _DEPRECATED_VALUE from ray.train._internal.storage import _use_storage_context from ray.tune.analysis import ExperimentAnalysis -from ray.tune.analysis.experiment_analysis import NewExperimentAnalysis +from ray.tune.analysis.experiment_analysis import LegacyExperimentAnalysis from ray.tune.callback import Callback from ray.tune.error import TuneError from ray.tune.execution.tune_controller import TuneController @@ -1161,7 +1161,7 @@ class and registered trainables. ) if _use_storage_context(): - return NewExperimentAnalysis( + return ExperimentAnalysis( experiment_checkpoint_path=runner.experiment_path, default_metric=metric, default_mode=mode, @@ -1169,7 +1169,7 @@ class and registered trainables. storage_filesystem=experiments[0].storage.storage_filesystem, ) else: - return ExperimentAnalysis( + return LegacyExperimentAnalysis( runner.experiment_state_path, trials=all_trials, default_metric=metric, diff --git a/rllib/algorithms/a2c/a2c.py b/rllib/algorithms/a2c/a2c.py index f6f2a8e750dea..ed3972549440c 100644 --- a/rllib/algorithms/a2c/a2c.py +++ b/rllib/algorithms/a2c/a2c.py @@ -43,8 +43,7 @@ class A2CConfig(A3CConfig): >>> algo.train() # doctest: +SKIP Example: - >>> import ray.air as air - >>> from ray import tune + >>> from ray import train, tune >>> from ray.rllib.algorithms.a2c import A2CConfig >>> config = A2CConfig() >>> # Print out some default values. diff --git a/rllib/examples/learner/train_w_bc_finetune_w_ppo.py b/rllib/examples/learner/train_w_bc_finetune_w_ppo.py index 1e2058c42a48e..b044b884baefe 100644 --- a/rllib/examples/learner/train_w_bc_finetune_w_ppo.py +++ b/rllib/examples/learner/train_w_bc_finetune_w_ppo.py @@ -11,7 +11,7 @@ import ray from ray import tune -from ray.air import RunConfig, FailureConfig +from ray.train import RunConfig, FailureConfig from ray.rllib.algorithms.ppo import PPOConfig from ray.rllib.algorithms.ppo.torch.ppo_torch_rl_module import PPOTorchRLModule from ray.rllib.algorithms.ppo.ppo_catalog import PPOCatalog diff --git a/rllib/policy/policy.py b/rllib/policy/policy.py index fb5837eb83552..1ab0653ead855 100644 --- a/rllib/policy/policy.py +++ b/rllib/policy/policy.py @@ -26,7 +26,7 @@ import ray import ray.cloudpickle as pickle from ray.actor import ActorHandle -from ray.air.checkpoint import Checkpoint +from ray.train import Checkpoint from ray.rllib.core.models.base import STATE_IN, STATE_OUT from ray.rllib.models.action_dist import ActionDistribution from ray.rllib.models.catalog import ModelCatalog