diff --git a/README.md b/README.md index fc2d1e371da..1f031e09c80 100644 --- a/README.md +++ b/README.md @@ -102,29 +102,21 @@ For a more in-depth read, check out [SparseML documentation](https://docs.neural The PyTorch optimization libraries are located under the `sparseml.pytorch.optim` package. Inside are APIs designed to make model optimization as easy as possible by integrating seamlessly into PyTorch training pipelines. -The integration is done using the `ScheduledModifierManager` class. It wraps your current optimizer's step function to apply SparseML `Modifier` optimizations at each step. -The `ScheduledModifierManager` class can be created from a recipe file or `SparseZoo` optimized model stub. - -With this setup, the training process can then be modified as desired to optimize the model using SparseML recipes. +The integration is done using the `ScheduledOptimizer` class. It is intended to wrap your current optimizer and its step function. The step function then calls into the `ScheduledModifierManager` class which can be created from a recipe file. With this setup, the training process can then be modified as desired to optimize the model. To enable all of this, the integration code you'll need to write is only a handful of lines: ```python -from sparseml.pytorch.optim import ScheduledModifierManager +from sparseml.pytorch.optim import ScheduledModifierManager, ScheduledOptimizer -# setup model = None # your model definition optimizer = None # your optimizer definition num_train_batches = len(train_data) / batch_size # your number of batches per training epoch -# integration manager = ScheduledModifierManager.from_yaml("/PATH/TO/recipe.yaml") -manager.initialize(model, optimizer, steps_per_epoch=num_train_batches) +optimizer = ScheduledOptimizer(optimizer, model, manager, steps_per_epoch=num_train_batches) # PyTorch training code... - -# finalize cleans up any added parameters or hooks -manager.finalize(model, optimizer) ``` ### Keras Optimization diff --git a/docs/source/quicktour.md b/docs/source/quicktour.md index d665a9b2f38..380fc0cf803 100644 --- a/docs/source/quicktour.md +++ b/docs/source/quicktour.md @@ -61,29 +61,21 @@ For a more in-depth read, check out [SparseML documentation](https://docs.neural The PyTorch optimization libraries are located under the `sparseml.pytorch.optim` package. Inside are APIs designed to make model optimization as easy as possible by integrating seamlessly into PyTorch training pipelines. -The integration is done using the `ScheduledModifierManager` class. It wraps your current optimizer's step function to apply SparseML `Modifier` optimizations at each step. -The `ScheduledModifierManager` class can be created from a recipe file or `SparseZoo` optimized model stub. - -With this setup, the training process can then be modified as desired to optimize the model using SparseML recipes. +The integration is done using the `ScheduledOptimizer` class. It is intended to wrap your current optimizer and its step function. The step function then calls into the `ScheduledModifierManager` class which can be created from a recipe file. With this setup, the training process can then be modified as desired to optimize the model. To enable all of this, the integration code you'll need to write is only a handful of lines: ```python -from sparseml.pytorch.optim import ScheduledModifierManager +from sparseml.pytorch.optim import ScheduledModifierManager, ScheduledOptimizer -# setup model = None # your model definition optimizer = None # your optimizer definition num_train_batches = len(train_data) / batch_size # your number of batches per training epoch -# integration manager = ScheduledModifierManager.from_yaml("/PATH/TO/recipe.yaml") -manager.initialize(model, optimizer, steps_per_epoch=num_train_batches) +optimizer = ScheduledOptimizer(optimizer, model, manager, steps_per_epoch=num_train_batches) # PyTorch training code... - -# finalize cleans up any added parameters or hooks -manager.finalize(model, optimizer) ``` ### Keras Optimization diff --git a/examples/pytorch-torchvision/main.py b/examples/pytorch-torchvision/main.py index 1e5a7f636be..4a90a8c602f 100644 --- a/examples/pytorch-torchvision/main.py +++ b/examples/pytorch-torchvision/main.py @@ -95,7 +95,7 @@ from torchvision import models from sparseml.pytorch.datasets.classification import ImageFolderDataset -from sparseml.pytorch.optim import ScheduledModifierManager +from sparseml.pytorch.optim import ScheduledModifierManager, ScheduledOptimizer from sparseml.pytorch.utils import ModuleExporter, PythonLogger, load_model from sparseml.utils import create_dirs @@ -408,9 +408,10 @@ def main(args): # add sparseml modifiers # ########################## manager = ScheduledModifierManager.from_yaml(args.recipe_path) - manager.initialize( - model, + optimizer = ScheduledOptimizer( optimizer, + model, + manager, steps_per_epoch=len(train_loader), loggers=[PythonLogger()], ) @@ -431,7 +432,6 @@ def main(args): ######################## # export trained model # ######################## - manager.finalize(model, optimizer) exporter = ModuleExporter(model, save_dir) sample_input = torch.randn(image_shape).unsqueeze(0) # sample batch for ONNX export exporter.export_onnx(sample_input) diff --git a/examples/pytorch-torchvision/pruning.ipynb b/examples/pytorch-torchvision/pruning.ipynb index 7688ceddf6a..984720c6a39 100644 --- a/examples/pytorch-torchvision/pruning.ipynb +++ b/examples/pytorch-torchvision/pruning.ipynb @@ -272,9 +272,18 @@ "source": [ "from sparsezoo import Zoo\n", "\n", - "recipe_path = Zoo.download_recipe_from_stub(\n", - " \"zoo:cv/classification/resnet_v1-50/pytorch/torchvision/imagenette/pruned-conservative\"\n", - ")\n", + "recipe = Zoo.search_recipes(\n", + " domain=\"cv\",\n", + " sub_domain=\"classification\",\n", + " architecture=\"resnet_v1\",\n", + " sub_architecture=\"50\",\n", + " framework=\"pytorch\",\n", + " repo=\"torchvision\",\n", + " dataset=\"imagenette\",\n", + " optim_name=\"pruned\",\n", + ")[0] # unwrap search result\n", + "recipe.download()\n", + "recipe_path = recipe.downloaded_path()\n", "print(f\"Recipe downloaded to: {recipe_path}\")" ] }, @@ -284,15 +293,15 @@ "metadata": {}, "outputs": [], "source": [ - "from sparseml.pytorch.optim import ScheduledModifierManager\n", + "from sparseml.pytorch.optim import (\n", + " ScheduledModifierManager,\n", + " ScheduledOptimizer,\n", + ")\n", "\n", "# create ScheduledModifierManager and Optimizer wrapper\n", "manager = ScheduledModifierManager.from_yaml(recipe_path)\n", - "manager.initialize(\n", - " model,\n", - " optimizer,\n", - " steps_per_epoch=len(train_loader),\n", - " loggers=[],\n", + "optimizer = ScheduledOptimizer(\n", + " optimizer, model, manager, steps_per_epoch=len(train_loader), loggers=[],\n", ")\n", "\n", "train_model(\n", @@ -303,9 +312,7 @@ " device,\n", " num_epochs=manager.max_epochs,\n", " is_inception=False,\n", - ")\n", - "\n", - "manager.finalize(model, optimizer)" + ")" ] }, { diff --git a/notebooks/pytorch_classification.ipynb b/notebooks/pytorch_classification.ipynb index 5f0ea5425d3..0c7a215c6b0 100644 --- a/notebooks/pytorch_classification.ipynb +++ b/notebooks/pytorch_classification.ipynb @@ -221,9 +221,18 @@ "source": [ "from sparsezoo import Zoo\n", "\n", - "recipe_path = Zoo.download_recipe_from_stub(\n", - " \"zoo:cv/classification/resnet_v1-50/tensorflow_v1/sparseml/imagenette/pruned-moderate\"\n", - ")\n", + "recipe = Zoo.search_recipes(\n", + " domain=\"cv\",\n", + " sub_domain=\"classification\",\n", + " architecture=\"resnet_v1\",\n", + " sub_architecture=\"50\",\n", + " framework=\"pytorch\",\n", + " repo=\"sparseml\",\n", + " dataset=\"imagenette\",\n", + " optim_name=\"pruned\",\n", + ")[0] # unwrap search result\n", + "recipe.download()\n", + "recipe_path = recipe.downloaded_path()\n", "print(f\"Recipe downloaded to: {recipe_path}\")" ] }, @@ -233,16 +242,15 @@ "metadata": {}, "outputs": [], "source": [ - "from sparseml.pytorch.optim import ScheduledModifierManager\n", - "\n", + "from sparseml.pytorch.optim import (\n", + " ScheduledModifierManager,\n", + " ScheduledOptimizer,\n", + ")\n", "\n", "# create ScheduledModifierManager and Optimizer wrapper\n", "manager = ScheduledModifierManager.from_yaml(recipe_path)\n", - "manager.initialize(\n", - " model,\n", - " optimizer,\n", - " steps_per_epoch=len(train_loader),\n", - " loggers=[],\n", + "optimizer = ScheduledOptimizer(\n", + " optimizer, model, manager, steps_per_epoch=len(train_loader), loggers=[],\n", ")\n", "\n", "\n", @@ -260,19 +268,19 @@ " epoch_name, train_loss, train_acc\n", " )\n", " )\n", - "\n", + " \n", " # run validation loop\n", " print(\"Running Validation Epoch {}\".format(epoch_name))\n", - " val_loss, val_acc = run_model_one_epoch(model, train_loader, criterion, device)\n", + " val_loss, val_acc = run_model_one_epoch(\n", + " model, train_loader, criterion, device\n", + " )\n", " print(\n", " \"Validation Epoch: {}\\nVal Loss: {}\\nTop 1 Acc: {}\\n\".format(\n", " epoch_name, val_loss, val_acc\n", " )\n", " )\n", - "\n", - " epoch += 1\n", - "\n", - "manager.finalize()" + " \n", + " epoch += 1" ] }, { diff --git a/notebooks/pytorch_detection.ipynb b/notebooks/pytorch_detection.ipynb index 1fc8e77f184..35627277586 100644 --- a/notebooks/pytorch_detection.ipynb +++ b/notebooks/pytorch_detection.ipynb @@ -240,9 +240,18 @@ "source": [ "from sparsezoo import Zoo\n", "\n", - "recipe_path = Zoo.download_recipe_from_stub(\n", - " \"zoo:cv/detection/ssd-resnet18_300/pytorch/sparseml/voc/pruned-moderate\"\n", - ")\n", + "recipe = Zoo.search_recipes(\n", + " domain=\"cv\",\n", + " sub_domain=\"detection\",\n", + " architecture=\"ssd\",\n", + " sub_architecture=\"resnet18_300\",\n", + " framework=\"pytorch\",\n", + " repo=\"sparseml\",\n", + " dataset=\"voc\",\n", + " optim_name=\"pruned\",\n", + ")[0] # unwrap search result\n", + "recipe.download()\n", + "recipe_path = recipe.downloaded_path()\n", "print(f\"Recipe downloaded to: {recipe_path}\")" ] }, @@ -252,13 +261,17 @@ "metadata": {}, "outputs": [], "source": [ - "from sparseml.pytorch.optim import ScheduledModifierManager\n", + "from sparseml.pytorch.optim import (\n", + " ScheduledModifierManager,\n", + " ScheduledOptimizer,\n", + ")\n", "\n", "# create ScheduledModifierManager and Optimizer wrapper\n", "manager = ScheduledModifierManager.from_yaml(recipe_path)\n", - "manager.initialize(\n", - " model,\n", + "optimizer = ScheduledOptimizer(\n", " optimizer,\n", + " model,\n", + " manager,\n", " steps_per_epoch=len(train_loader),\n", " loggers=[],\n", ")\n", @@ -278,9 +291,7 @@ " val_loss = run_model_one_epoch(model, train_loader, criterion, device)\n", " print(\"Validation Epoch: {}\\nVal Loss: {}\\n\".format(epoch_name, val_loss))\n", "\n", - " epoch += 1\n", - " \n", - "manager.finalize()" + " epoch += 1" ] }, { diff --git a/scripts/pytorch_vision.py b/scripts/pytorch_vision.py index 44b3bbf0541..d38fac15c66 100644 --- a/scripts/pytorch_vision.py +++ b/scripts/pytorch_vision.py @@ -414,7 +414,7 @@ import torch from torch.nn import Module from torch.nn import functional as torch_functional -from torch.optim import SGD, Adam, Optimizer +from torch.optim import SGD, Adam from torch.optim.optimizer import Optimizer from torch.utils.data import DataLoader from tqdm.auto import tqdm @@ -432,6 +432,7 @@ from sparseml.pytorch.optim import ( ConstantPruningModifier, ScheduledModifierManager, + ScheduledOptimizer, default_exponential_check_lrs, lr_loss_sensitivity, pruning_loss_sens_magnitude, @@ -968,7 +969,7 @@ def _create_scheduled_optimizer( model: Module, train_loader: DataLoader, loggers: List[Any], -) -> Tuple[int, Optimizer, ScheduledModifierManager]: +) -> Tuple[int, ScheduledOptimizer, ScheduledModifierManager]: # optimizer setup if args.optim == "SGD": optim_const = SGD @@ -1011,8 +1012,13 @@ def _create_scheduled_optimizer( manager = ScheduledModifierManager.from_yaml( file_path=args.recipe_path, add_modifiers=add_mods ) - manager.initialize(model, optim, steps_per_epoch=len(train_loader), loggers=loggers) - + optim = ScheduledOptimizer( + optim, + model, + manager, + steps_per_epoch=len(train_loader), + loggers=loggers, + ) LOGGER.info("created manager: {}".format(manager)) return epoch, optim, manager @@ -1108,8 +1114,8 @@ def train(args, model, train_loader, val_loader, input_shape, save_dir, loggers) LOGGER.info("starting training from epoch {}".format(epoch)) if epoch > 0: - LOGGER.info("adjusting Manager to restore point") - ScheduledModifierManager.adjust_optimizer_step(optim, epoch, 0) + LOGGER.info("adjusting ScheduledOptimizer to restore point") + optim.adjust_current_step(epoch, 0) best_loss = None val_res = None @@ -1118,7 +1124,7 @@ def train(args, model, train_loader, val_loader, input_shape, save_dir, loggers) if args.debug_steps > 0: # correct since all optimizer steps are not # taken in the epochs for debug mode - ScheduledModifierManager.adjust_optimizer_step(optim, epoch, 0) + optim.adjust_current_step(epoch, 0) if args.rank != -1: # sync DDP dataloaders train_loader.sampler.set_epoch(epoch) @@ -1167,8 +1173,6 @@ def train(args, model, train_loader, val_loader, input_shape, save_dir, loggers) # export the final model LOGGER.info("completed...") - # finalize manager - manager.finalize(model, optim) if args.is_main_process: _save_model_training( model, optim, input_shape, "model", save_dir, epoch, val_res diff --git a/src/sparseml/pytorch/optim/__init__.py b/src/sparseml/pytorch/optim/__init__.py index 9fc307e45f4..5885a85fb1c 100644 --- a/src/sparseml/pytorch/optim/__init__.py +++ b/src/sparseml/pytorch/optim/__init__.py @@ -33,6 +33,7 @@ from .modifier_pruning import * from .modifier_quantization import * from .modifier_regularizer import * +from .optimizer import * from .sensitivity_as import * from .sensitivity_lr import * from .sensitivity_pruning import * diff --git a/src/sparseml/pytorch/optim/manager.py b/src/sparseml/pytorch/optim/manager.py index bd613bc4e36..ac6929d6ec4 100644 --- a/src/sparseml/pytorch/optim/manager.py +++ b/src/sparseml/pytorch/optim/manager.py @@ -18,8 +18,6 @@ Also handles loading modifiers from yaml files """ -import weakref -from functools import wraps from typing import Dict, List, Union import torch @@ -87,123 +85,22 @@ def from_yaml( return manager - @staticmethod - def adjust_optimizer_step(optimizer: Optimizer, epoch: int, step: int): - """ - Adjust the current step for the optimizer's managed schedule to the given - epoch and step. - - :param optimizer: the manager initialized optimizer to adjust the step for - :param epoch: the epoch to set the current global step to match - :param step: the step (batch) within the epoch to set the - current global step to match - """ - if not getattr(optimizer.step, "_with_modifiers", False): - raise RuntimeError( - "Optimizer not initialized with ScheduledModifierManager.initialize" - ) - optimizer._steps = epoch * optimizer._steps_per_epoch + step - _set_scheduled_epoch(optimizer) - def __init__(self, modifiers: List[ScheduledModifier]): super().__init__(modifiers=modifiers) - def initialize( - self, - module: Module, - optimizer: Optimizer, - steps_per_epoch: int, - loggers: Union[List[PyTorchLogger], None] = None, - ): + def initialize(self, module: Module, optimizer: Optimizer): """ Handles initializing and setting up the contained modifiers Called once on construction of the scheduled optimizer - :param optimizer: optimizer to modify :param module: module to modify - :param steps_per_epoch: the number of steps or batches in each epoch, - used to calculate decimals within the epoch - :param loggers: loggers to log important info to within the modifiers; - ex tensorboard or to the console + :param optimizer: optimizer to modify """ - if steps_per_epoch <= 0: - raise ValueError("steps_per_epoch must be >= 0") - super().initialize(module, optimizer) for mod in self._modifiers: mod.initialize(module, optimizer) - self._modify_optimizer_step(module, optimizer, steps_per_epoch) - - self.initialize_loggers(loggers) - - def _modify_optimizer_step( - self, module: Module, optimizer: Optimizer, steps_per_epoch: int - ): - def _step_with_modifiers(step_method): - if getattr(step_method, "_with_modifiers", False): - # `optimizer.step()` has already been replaced, return. - return step_method - - # Prevent cyclic references by keeping a weak reference - # to optimizer class and original unbound step method - optim_ref = weakref.ref(step_method.__self__) - original_step_func = step_method.__func__ - optim_cls = optim_ref().__class__ - del step_method - - recipe_manager = self - - @wraps(original_step_func) - def modifier_step_wrapper(*args, **kwargs): - optim_instance = optim_ref() - - # set current epoch - _set_scheduled_epoch(optim_instance) - - # run modifiers - recipe_manager.update( - module, - optim_instance, - optim_instance._epoch, - optim_instance._steps_per_epoch, - ) - recipe_manager.optimizer_pre_step( - module, - optim_instance, - optim_instance._epoch, - optim_instance._steps_per_epoch, - ) - - # optimizer step - optim_outputs = original_step_func.__get__(optim_instance, optim_cls)( - *args, **kwargs - ) - - # post step hooks - recipe_manager.optimizer_post_step( - module, - optim_instance, - optim_instance._epoch, - optim_instance._steps_per_epoch, - ) - optim_instance._steps += 1 - - return optim_outputs - - # Note that the returned function here is no longer a bound method, - # so attributes like `__func__` and `__self__` no longer exist. - modifier_step_wrapper._with_modifiers = True - modifier_step_wrapper._original_step_func = original_step_func - return modifier_step_wrapper - - # wrap optimizer step method - optimizer.step = _step_with_modifiers(optimizer.step) - optimizer._epoch = 0 - optimizer._steps = 0 - optimizer._steps_per_epoch = steps_per_epoch - def state_dict(self) -> Dict[str, Dict]: """ :return: Dictionary to store any state variables from this Manager's Modifiers. @@ -356,33 +253,6 @@ def optimizer_post_step( mod.optimizer_post_step(module, optimizer, epoch, steps_per_epoch) - def finalize(self, module: Module, optimizer: Optimizer): - """ - Remove extra information and hooks added to the module and optimizer - by the Modifier. - - :param module: module to finalize - :param optimizer: optimizer to finalize - """ - super().finalize(module, optimizer) - for mod in self._modifiers: - mod.finalize(module, optimizer) - - # revert optimizer to use original step, do not invoke manager - original_step_func = getattr(optimizer.step, "_original_step_func", None) - if original_step_func: - # delete wrapped step function and added variables - del optimizer.step # delete wrapped step function - del optimizer._epoch - del optimizer._steps - del optimizer._steps_per_epoch - - # bind unbound original step function back to optimizer instance and reset - bound_original_step_func = original_step_func.__get__( - optimizer, optimizer.__class__ - ) - setattr(optimizer, "step", bound_original_step_func) - def load_manager( path: str, manager: ScheduledModifierManager, map_location: Union[None, str] = "cpu" @@ -398,11 +268,3 @@ def load_manager( if "manager" in state_dict: state_dict = state_dict["manager"] manager.load_state_dict(state_dict) - - -def _set_scheduled_epoch(optimizer: Optimizer): - epoch_num = optimizer._steps // optimizer._steps_per_epoch - epoch_steps = optimizer._steps % optimizer._steps_per_epoch - optimizer._epoch = float(epoch_num) + float(epoch_steps) / float( - optimizer._steps_per_epoch - ) diff --git a/src/sparseml/pytorch/optim/modifier.py b/src/sparseml/pytorch/optim/modifier.py index a887fa13fbe..a5b5bb14e54 100644 --- a/src/sparseml/pytorch/optim/modifier.py +++ b/src/sparseml/pytorch/optim/modifier.py @@ -250,16 +250,6 @@ def optimizer_post_step( if not self._enabled: raise RuntimeError("modifier must be enabled") - def finalize(self, module: Module, optimizer: Optimizer): - """ - Remove extra information and hooks added to the module and optimizer - by the Modifier. - - :param module: module to finalize - :param optimizer: optimizer to finalize - """ - self.enabled = False - class ScheduledModifier(Modifier, BaseScheduled): """ diff --git a/src/sparseml/pytorch/optim/modifier_as.py b/src/sparseml/pytorch/optim/modifier_as.py index 79e36da9fa7..8746af52957 100644 --- a/src/sparseml/pytorch/optim/modifier_as.py +++ b/src/sparseml/pytorch/optim/modifier_as.py @@ -348,15 +348,3 @@ def _regularize_tracked(self, tens: Union[Tuple[Tensor, ...], Tensor]): reduced = reduced / len(tens) return reduced - - def finalize(self, module: Module, optimizer: Optimizer): - """ - Remove extra information and hooks added to the module and optimizer - by the Modifier. - - :param module: module to finalize - :param optimizer: optimizer to finalize - """ - super().finalize(module, optimizer) - for tracker in self._trackers: - tracker.disable() diff --git a/src/sparseml/pytorch/optim/modifier_pruning.py b/src/sparseml/pytorch/optim/modifier_pruning.py index dc89bbfc44b..abad881c245 100644 --- a/src/sparseml/pytorch/optim/modifier_pruning.py +++ b/src/sparseml/pytorch/optim/modifier_pruning.py @@ -291,19 +291,7 @@ def optimizer_post_step( # be sure to apply mask again after optimizer update because # weights may have changed (optimizer with momentum, not masking gradient) for mask in self._module_masks: - mask.enabled = False - - def finalize(self, module: Module, optimizer: Optimizer): - """ - Remove extra information and hooks added to the module and optimizer - by the Modifier. - - :param module: module to finalize - :param optimizer: optimizer to finalize - """ - super().finalize(module, optimizer) - for mask in self._module_masks: - mask.enabled = False + mask.apply() @PyTorchModifierYAML() @@ -664,18 +652,6 @@ def optimizer_post_step( for mask in self._module_masks: mask.apply() - def finalize(self, module: Module, optimizer: Optimizer): - """ - Remove extra information and hooks added to the module and optimizer - by the Modifier. - - :param module: module to finalize - :param optimizer: optimizer to finalize - """ - super().finalize(module, optimizer) - for mask in self._module_masks: - mask.enabled = False - def validate(self): """ Validate the values of the params for the current instance are valid diff --git a/src/sparseml/pytorch/optim/optimizer.py b/src/sparseml/pytorch/optim/optimizer.py new file mode 100644 index 00000000000..6a99c0247fa --- /dev/null +++ b/src/sparseml/pytorch/optim/optimizer.py @@ -0,0 +1,236 @@ +# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Optimizer wrapper for enforcing Modifiers on the training process of a Module. +""" + +from typing import List, Union + +from torch import Tensor +from torch.nn import Module +from torch.optim.optimizer import Optimizer + +from sparseml.pytorch.optim.manager import ScheduledModifierManager +from sparseml.pytorch.utils import ( + PyTorchLogger, + get_optim_learning_rate, + set_optim_learning_rate, +) + + +__all__ = ["ScheduledOptimizer"] + + +class ScheduledOptimizer(Optimizer): + """ + An optimizer wrapper to handle applying modifiers according to their schedule + to both the passed in optimizer and the module. + + Overrides the step() function so that this method can call before and after on the + modifiers to apply appropriate modifications to both the optimizer and the module. + + The epoch_start and epoch_end are based on how many steps have been taken + along with the steps_per_epoch. + + | Lifecycle: + | - training cycle + | - zero_grad + | - loss_update + | - modifiers.loss_update + | - step + | - modifiers.update + | - modifiers.optimizer_pre_step + | - optimizer.step + | - modifiers.optimizers_post_step + + :param module: module to modify + :param optimizer: optimizer to modify + :param manager: the manager or list of managers used to apply modifications + :param steps_per_epoch: the number of steps or batches in each epoch, + not strictly required and can be set to -1. + used to calculate decimals within the epoch, + when not using can result in irregularities + :param loggers: loggers to log important info to within the modifiers; + ex tensorboard or to the console + + """ + + def __init__( + self, + optimizer: Optimizer, + module: Module, + manager: ScheduledModifierManager, + steps_per_epoch: int, + loggers: Union[List[PyTorchLogger], None] = None, + ): + # do not call into super since this instance is not passing all calls to + # the nested optimizer + + if steps_per_epoch <= 0: + raise ValueError("steps_per_epoch must be >= 0") + + self._optimizer = optimizer + self._module = module + self._manager = manager + self._steps_per_epoch = steps_per_epoch + self._steps = 0 + + self._epoch = 0.0 + self._manager.initialize(self._module, self._optimizer) + self._manager.initialize_loggers(loggers) + + def __del__(self): + del self._manager + + def __getstate__(self): + return self._optimizer.__getstate__() + + def __setstate__(self, state): + self._optimizer.__setstate__(state) + + def __repr__(self): + self._optimizer.__repr__() + + def __getattr__(self, item): + return getattr(self._optimizer, item) + + def __setattr__(self, key, value): + if key in [ + "_optimizer", + "_module", + "_manager", + "_steps_per_epoch", + "_steps", + "_epoch", + "learning_rate", + "param_groups", + "step", + ]: + super().__setattr__(key, value) + else: + setattr(self._optimizer, key, value) + + @property + def learning_rate(self) -> float: + """ + :return: convenience function to get the first learning rate for any of + the param groups in the optimizer + """ + return get_optim_learning_rate(self._optimizer) + + @learning_rate.setter + def learning_rate(self, value: float): + """ + :param value: the learning rate to set for the optimizer, + will set all param groups in the optim to this value + """ + set_optim_learning_rate(self._optimizer, value) + + @property + def manager(self) -> ScheduledModifierManager: + """ + :return: The ScheduledModifierManager for this optimizer + """ + return self._manager + + @property + def param_groups(self): + return self._optimizer.param_groups + + @param_groups.setter + def param_groups(self, value): + self._optimizer.param_groups = value + + def state_dict(self): + return (self._optimizer.state_dict(),) + + def load_state_dict(self, state_dict): + return self._optimizer.load_state_dict(state_dict) + + def manager_state_dict(self): + return self._manager.state_dict() + + def load_manager_state_dict(self, state_dict): + self._manager.load_state_dict(state_dict) + + def add_param_group(self, param_group): + self._optimizer.add_param_group(param_group) + + def zero_grad(self): + self._optimizer.zero_grad() + + def step(self, closure=None): + """ + Called to perform a step on the optimizer activation normal. + Updates the current epoch based on the step count. + Calls into modifiers before the step happens. + Calls into modifiers after the step happens. + + :param closure: optional closure passed into the contained optimizer + for the step + """ + self._set_epoch() + + self._manager.update( + self._module, self._optimizer, self._epoch, self._steps_per_epoch + ) + self._manager.optimizer_pre_step( + self._module, self._optimizer, self._epoch, self._steps_per_epoch + ) + self._optimizer.step(closure) + self._manager.optimizer_post_step( + self._module, self._optimizer, self._epoch, self._steps_per_epoch + ) + self._steps += 1 + + def loss_update(self, loss: Tensor) -> Tensor: + """ + Optional call to update modifiers based on the calculated loss. + Not needed unless one or more of the modifier is using the loss + to make a modification or is modifying the loss itself. + + :param loss: the calculated loss after running a forward pass and loss_fn + :return: the modified loss tensor + """ + loss = self._manager.loss_update( + loss, self._module, self._optimizer, self._epoch, self._steps_per_epoch + ) + + return loss + + def adjust_current_step(self, epoch: int, step: int): + """ + Adjust the current step for the manager's schedule to the given epoch and step. + + :param epoch: the epoch to set the current global step to match + :param step: the step (batch) within the epoch to set the + current global step to match + """ + self._steps = epoch * self._steps_per_epoch + step + self._set_epoch() + self._manager.update( + self._module, + self._optimizer, + self._epoch, + self._steps_per_epoch, + log_updates=False, + ) + + def _set_epoch(self): + epoch_num = self._steps // self._steps_per_epoch + epoch_steps = self._steps % self._steps_per_epoch + self._epoch = float(epoch_num) + float(epoch_steps) / float( + self._steps_per_epoch + ) diff --git a/src/sparseml/pytorch/utils/exporter.py b/src/sparseml/pytorch/utils/exporter.py index e8cb532ad4a..8a2ce55e65d 100644 --- a/src/sparseml/pytorch/utils/exporter.py +++ b/src/sparseml/pytorch/utils/exporter.py @@ -18,7 +18,7 @@ import os from copy import deepcopy -from typing import Any, Dict, Iterable, List +from typing import Any, Iterable, List import numpy import onnx @@ -160,7 +160,7 @@ def export_pytorch( epoch: int = None, name: str = "model.pth", use_zipfile_serialization_if_available: bool = True, - extras: Dict[str, Any] = None, + include_modifiers: bool = False, ): """ Export the pytorch state dicts into pth file within a @@ -171,9 +171,9 @@ def export_pytorch( :param name: name of the pytorch file to save :param use_zipfile_serialization_if_available: for torch >= 1.6.0 only exports the Module's state dict using the new zipfile serialization - :param extras: dictionary of additional names to objects to serialize in the saved - state dict. All values must have a callable `state_dict()` property. - i.e. {'manager': scheduled_modifier_manager}. Default is None + :param include_modifiers: if True, and a ScheduledOptimizer is provided + as the optimizer, the associated ScheduledModifierManager and its + Modifiers will be exported under the 'manager' key. Default is False """ pytorch_path = os.path.join(self._output_dir, "pytorch") pth_path = os.path.join(pytorch_path, name) @@ -186,7 +186,7 @@ def export_pytorch( use_zipfile_serialization_if_available=( use_zipfile_serialization_if_available ), - extras=extras, + include_modifiers=include_modifiers, ) def export_samples( diff --git a/src/sparseml/pytorch/utils/model.py b/src/sparseml/pytorch/utils/model.py index 138ed4ecf22..d3b87bcf396 100644 --- a/src/sparseml/pytorch/utils/model.py +++ b/src/sparseml/pytorch/utils/model.py @@ -17,7 +17,7 @@ """ from collections import OrderedDict -from typing import Any, Dict, List, Tuple, Union +from typing import List, Tuple, Union import torch from torch.nn import DataParallel, Module @@ -135,7 +135,7 @@ def save_model( optimizer: Optimizer = None, epoch: Union[int, None] = None, use_zipfile_serialization_if_available: bool = True, - extras: Dict[str, Any] = None, + include_modifiers: bool = False, ): """ Save a model's state dict into a file at the given path. @@ -147,9 +147,9 @@ def save_model( :param epoch: the epoch to save :param use_zipfile_serialization_if_available: for torch >= 1.6.0 only exports the model's state dict using the new zipfile serialization - :param extras: dictionary of additional names to objects to serialize in the saved - state dict. All values must have a callable `state_dict()` property. - i.e. {'manager': scheduled_modifier_manager}. Default is None + :param include_modifiers: if True, and a ScheduledOptimizer is provided + as the optimizer, the associated ScheduledModifierManager and its + Modifiers will be exported under the 'manager' key. Default is False """ create_parent_dirs(path) @@ -170,9 +170,8 @@ def save_model( if epoch: save_dict["epoch"] = epoch - if extras: - for name, obj in extras.items(): - save_dict["name"] = obj.state_dict() + if include_modifiers and optimizer and hasattr(optimizer, "manager_state_dict"): + save_dict["manager"] = optimizer.manager_state_dict() if torch.__version__ < "1.6": torch.save(save_dict, path) diff --git a/tests/sparseml/pytorch/optim/test_manager.py b/tests/sparseml/pytorch/optim/test_manager.py index be3750d7a2c..957881353f9 100644 --- a/tests/sparseml/pytorch/optim/test_manager.py +++ b/tests/sparseml/pytorch/optim/test_manager.py @@ -52,23 +52,6 @@ "optim_lambda", [create_optim_sgd, create_optim_adam], scope="function" ) class TestManagerImpl(ModifierTest): - def test_initialize( - self, - modifier_lambda: Callable[[], Modifier], - model_lambda: Callable[[], Module], - optim_lambda: Callable[[Module], Optimizer], - test_steps_per_epoch: float, # noqa: F811 - ): - modifier = modifier_lambda() - model = model_lambda() - optimizer = optim_lambda(model) - - self.initialize_helper( - modifier, model, optimizer, steps_per_epoch=test_steps_per_epoch - ) - assert modifier.initialized - assert optimizer.step._with_modifiers # assert modifier steps added - def test_yaml( self, modifier_lambda: Callable[[], Modifier], @@ -80,38 +63,6 @@ def test_yaml( # no yaml tests for manager return - def test_lifecycle( - self, - modifier_lambda, - model_lambda, - optim_lambda, - test_steps_per_epoch, # noqa: F811 - ): - manager = modifier_lambda() - model = model_lambda() - optimizer = optim_lambda(model) - - original_step_func = optimizer.step.__func__ - - self.initialize_helper( - manager, model, optimizer, steps_per_epoch=test_steps_per_epoch - ) - assert manager.initialized - assert optimizer.step._with_modifiers # assert modifier steps added - assert optimizer.step._original_step_func is original_step_func - assert optimizer.step != original_step_func - - assert optimizer._steps == 0 - assert optimizer._epoch == 0.0 - for i in range(1, test_steps_per_epoch + 2): - optimizer.step() - assert optimizer._steps == i - assert optimizer._epoch >= 1.0 - - # test original step func is restored - manager.finalize(model, optimizer) - assert optimizer.step.__func__ is original_step_func - @pytest.mark.skipif( os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False), diff --git a/tests/sparseml/pytorch/optim/test_modifier.py b/tests/sparseml/pytorch/optim/test_modifier.py index 09fe81bb5d3..948d590e6eb 100644 --- a/tests/sparseml/pytorch/optim/test_modifier.py +++ b/tests/sparseml/pytorch/optim/test_modifier.py @@ -26,7 +26,6 @@ Modifier, PyTorchModifierYAML, ScheduledModifier, - ScheduledModifierManager, ScheduledUpdateModifier, ) from sparseml.pytorch.utils import PythonLogger, TensorBoardLogger @@ -68,12 +67,8 @@ def initialize_helper( model: Module = None, optimizer: Optimizer = None, log_initialize: bool = True, - steps_per_epoch: float = None, ): - if isinstance(modifier, ScheduledModifierManager): - modifier.initialize(model, optimizer, steps_per_epoch) - else: - modifier.initialize(model, optimizer) + modifier.initialize(model, optimizer) if log_initialize: modifier.initialize_loggers([PythonLogger()]) @@ -133,15 +128,10 @@ def test_props( ): model = model_lambda() optimizer = optim_lambda(model) - - initialize_kwargs = {"model": model, "optimizer": optimizer} - if isinstance(modifier_lambda(), ScheduledModifierManager): - initialize_kwargs["steps_per_epoch"] = test_steps_per_epoch - super().test_props( modifier_lambda, framework=PYTORCH_FRAMEWORK, - initialize_kwargs=initialize_kwargs, + initialize_kwargs={"model": model, "optimizer": optimizer}, ) def test_initialize( @@ -207,9 +197,7 @@ def test_update( with pytest.raises(RuntimeError): modifier.update(model, optimizer, test_epoch, test_steps_per_epoch) - self.initialize_helper( - modifier, model, optimizer, steps_per_epoch=test_steps_per_epoch - ) + self.initialize_helper(modifier, model, optimizer) modifier.enabled = False with pytest.raises(RuntimeError): @@ -233,25 +221,12 @@ def test_log_update( with pytest.raises(RuntimeError): modifier.log_update(model, optimizer, test_epoch, test_steps_per_epoch) - self.initialize_helper( - modifier, - model, - optimizer, - log_initialize=False, - steps_per_epoch=test_steps_per_epoch, - ) + self.initialize_helper(modifier, model, optimizer, log_initialize=False) - if not isinstance(modifier, ScheduledModifierManager): - with pytest.raises(RuntimeError): - modifier.log_update(model, optimizer, test_epoch, test_steps_per_epoch) - - self.initialize_helper( - modifier, - model, - optimizer, - log_initialize=True, - steps_per_epoch=test_steps_per_epoch, - ) + with pytest.raises(RuntimeError): + modifier.log_update(model, optimizer, test_epoch, test_steps_per_epoch) + + self.initialize_helper(modifier, model, optimizer, log_initialize=True) modifier.enabled = False with pytest.raises(RuntimeError): @@ -278,9 +253,7 @@ def test_loss_update( test_loss, model, optimizer, test_epoch, test_steps_per_epoch ) - self.initialize_helper( - modifier, model, optimizer, steps_per_epoch=test_steps_per_epoch - ) + self.initialize_helper(modifier, model, optimizer) new_loss = modifier.loss_update( test_loss, model, optimizer, test_epoch, test_steps_per_epoch ) @@ -304,9 +277,7 @@ def test_optimizer_pre_step( model, optimizer, test_epoch, test_steps_per_epoch ) - self.initialize_helper( - modifier, model, optimizer, steps_per_epoch=test_steps_per_epoch - ) + self.initialize_helper(modifier, model, optimizer) modifier.enabled = False with pytest.raises(RuntimeError): @@ -334,9 +305,7 @@ def test_optimizer_post_step( model, optimizer, test_epoch, test_steps_per_epoch ) - self.initialize_helper( - modifier, model, optimizer, steps_per_epoch=test_steps_per_epoch - ) + self.initialize_helper(modifier, model, optimizer) modifier.enabled = False with pytest.raises(RuntimeError): @@ -393,9 +362,7 @@ def test_start_pending( with pytest.raises(RuntimeError): modifier.start_pending(0.0, test_steps_per_epoch) - self.initialize_helper( - modifier, model, optimizer, steps_per_epoch=test_steps_per_epoch - ) + self.initialize_helper(modifier, model, optimizer) modifier.enabled = False assert not modifier.start_pending(modifier.start_epoch, test_steps_per_epoch) modifier.enabled = True @@ -421,9 +388,7 @@ def test_end_pending( with pytest.raises(RuntimeError): modifier.end_pending(0.0, test_steps_per_epoch) - self.initialize_helper( - modifier, model, optimizer, steps_per_epoch=test_steps_per_epoch - ) + self.initialize_helper(modifier, model, optimizer) self.start_helper(modifier, model, optimizer) modifier.enabled = False assert not modifier.end_pending(modifier.start_epoch, test_steps_per_epoch) @@ -451,9 +416,7 @@ def test_update_ready( with pytest.raises(RuntimeError): modifier.update_ready(0.0, test_steps_per_epoch) - self.initialize_helper( - modifier, model, optimizer, steps_per_epoch=test_steps_per_epoch - ) + self.initialize_helper(modifier, model, optimizer) modifier.enabled = False assert not modifier.update_ready(modifier.start_epoch, test_steps_per_epoch) modifier.enabled = True @@ -483,9 +446,7 @@ def test_scheduled_update( with pytest.raises(RuntimeError): modifier.scheduled_update(model, optimizer, 0.0, test_steps_per_epoch) - self.initialize_helper( - modifier, model, optimizer, steps_per_epoch=test_steps_per_epoch - ) + self.initialize_helper(modifier, model, optimizer) if modifier.start_epoch <= 0.0: modifier.scheduled_update(model, optimizer, 0.0, test_steps_per_epoch) @@ -541,24 +502,12 @@ def test_scheduled_log_update( with pytest.raises(RuntimeError): modifier.scheduled_log_update(model, optimizer, 0.0, test_steps_per_epoch) - self.initialize_helper( - modifier, - model, - optimizer, - log_initialize=False, - steps_per_epoch=test_steps_per_epoch, - ) + self.initialize_helper(modifier, model, optimizer, log_initialize=False) with pytest.raises(RuntimeError): modifier.scheduled_log_update(model, optimizer, 0.0, test_steps_per_epoch) - self.initialize_helper( - modifier, - model, - optimizer, - log_initialize=True, - steps_per_epoch=test_steps_per_epoch, - ) + self.initialize_helper(modifier, model, optimizer, log_initialize=True) for epoch in range( int(modifier.start_epoch) if modifier.start_epoch >= 0.0 else 0, @@ -626,9 +575,7 @@ def test_update_ready( modifier = modifier_lambda() model = model_lambda() optimizer = optim_lambda(model) - self.initialize_helper( - modifier, model, optimizer, steps_per_epoch=test_steps_per_epoch - ) + self.initialize_helper(modifier, model, optimizer) self.start_helper(modifier, model, optimizer) min_update_freq = 1.0 / float(test_steps_per_epoch) @@ -662,9 +609,7 @@ def test_scheduled_update( modifier = modifier_lambda() model = model_lambda() optimizer = optim_lambda(model) - self.initialize_helper( - modifier, model, optimizer, steps_per_epoch=test_steps_per_epoch - ) + self.initialize_helper(modifier, model, optimizer) self.start_helper(modifier, model, optimizer) min_update_freq = 1.0 / float(test_steps_per_epoch) diff --git a/tests/sparseml/pytorch/optim/test_modifier_regularizer.py b/tests/sparseml/pytorch/optim/test_modifier_optimizer.py similarity index 100% rename from tests/sparseml/pytorch/optim/test_modifier_regularizer.py rename to tests/sparseml/pytorch/optim/test_modifier_optimizer.py diff --git a/tests/sparseml/pytorch/optim/test_optimizer.py b/tests/sparseml/pytorch/optim/test_optimizer.py new file mode 100644 index 00000000000..f577f4324ae --- /dev/null +++ b/tests/sparseml/pytorch/optim/test_optimizer.py @@ -0,0 +1,71 @@ +# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +from typing import Callable, Optional + +import pytest +import torch +from torch.nn import Module +from torch.optim import SGD +from torch.optim.optimizer import Optimizer + +from sparseml.pytorch.optim import ScheduledModifierManager, ScheduledOptimizer +from tests.sparseml.pytorch.helpers import MLPNet + + +class FakeOptim(SGD): + def zero_grad(self) -> None: + return + + def step(self, closure: Optional[Callable[[], float]] = ...) -> None: + return + + +class FakeManager(ScheduledModifierManager): + def __init__(self): + super().__init__([]) + self.last_called_epoch = -1 + + def update( + self, module: Module, optimizer: Optimizer, epoch: float, steps_per_epoch: int + ): + super().update(module, optimizer, epoch, steps_per_epoch) + self.last_called_epoch = epoch + + +@pytest.mark.skipif( + os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False), + reason="Skipping pytorch tests", +) +def test_optim(): + model = MLPNet() + optim = FakeOptim(model.parameters(), 0.1) + steps_per_epoch = 100 + manager = FakeManager() + + with pytest.raises(ValueError): + ScheduledOptimizer(optim, model, manager, steps_per_epoch=-1) + + optim = ScheduledOptimizer(optim, model, manager, steps_per_epoch) + + for epoch in range(10): + for batch in range(steps_per_epoch): + optim.loss_update(torch.tensor(0.0)) + optim.step() + expected_epoch = float(epoch) + float(batch) / float(steps_per_epoch) + assert ( + abs(expected_epoch - manager.last_called_epoch) < sys.float_info.epsilon + )