diff --git a/src/sparseml/pytorch/optim/modifier.py b/src/sparseml/pytorch/optim/modifier.py index 907867c583d..55ac01bace4 100644 --- a/src/sparseml/pytorch/optim/modifier.py +++ b/src/sparseml/pytorch/optim/modifier.py @@ -196,9 +196,7 @@ def initialize( for individual modifiers. """ self._initialized = True - - if loggers: - self.initialize_loggers(loggers) + self.initialize_loggers(loggers) def initialize_loggers(self, loggers: Union[None, List[BaseLogger]]): """ diff --git a/src/sparseml/pytorch/optim/optimizer.py b/src/sparseml/pytorch/optim/optimizer.py index 02a2def1eb5..cdb9788e7de 100644 --- a/src/sparseml/pytorch/optim/optimizer.py +++ b/src/sparseml/pytorch/optim/optimizer.py @@ -81,11 +81,11 @@ def __init__( ): # do not call into super since this instance is not passing all calls to # the nested optimizer - warnings.warn( - "ScheduledOptimizer is deprecated and will be deleted in the future. " - "Please replace with manager.modify", - UserWarning, - ) + # warnings.warn( + # "ScheduledOptimizer is deprecated and will be deleted in the future. " + # "Please replace with manager.modify", + # UserWarning, + # ) TODO: uncomment in next release once docs are ready manager.initialize(module, epoch=0.0, loggers=loggers) self._wrapper = RecipeManagerStepWrapper( @@ -107,7 +107,7 @@ def __getattr__(self, item): if item in self.__dict__: return getattr(self, item) - return getattr(self._wrapped, item) + return getattr(self._wrapper.wrapped_optimizer, item) def __setattr__(self, key, value): if key in [ @@ -118,7 +118,7 @@ def __setattr__(self, key, value): ]: super().__setattr__(key, value) else: - setattr(self._optimizer, key, value) + setattr(self._wrapper.wrapped_optimizer, key, value) @property def learning_rate(self) -> float: @@ -126,7 +126,7 @@ def learning_rate(self) -> float: :return: convenience function to get the first learning rate for any of the param groups in the optimizer """ - return get_optim_learning_rate(self._optimizer) + return get_optim_learning_rate(self._wrapper.wrapped_optimizer) @learning_rate.setter def learning_rate(self, value: float): @@ -134,7 +134,7 @@ def learning_rate(self, value: float): :param value: the learning rate to set for the optimizer, will set all param groups in the optim to this value """ - set_optim_learning_rate(self._optimizer, value) + set_optim_learning_rate(self._wrapper.wrapped_optimizer, value) @property def manager(self) -> ScheduledModifierManager: @@ -144,10 +144,10 @@ def manager(self) -> ScheduledModifierManager: return self._wrapper.wrapped_manager def manager_state_dict(self): - return self._manager.state_dict() + return self._wrapper.wrapped_manager.state_dict() def load_manager_state_dict(self, state_dict): - self._manager.load_state_dict(state_dict) + self._wrapper.wrapped_manager.load_state_dict(state_dict) def step(self, closure=None): """ diff --git a/tests/sparseml/pytorch/optim/test_modifier.py b/tests/sparseml/pytorch/optim/test_modifier.py index c0b05ca10fc..2ceb55c7dd0 100644 --- a/tests/sparseml/pytorch/optim/test_modifier.py +++ b/tests/sparseml/pytorch/optim/test_modifier.py @@ -216,11 +216,6 @@ def test_log_update( model = model_lambda() optimizer = optim_lambda(model) - with pytest.raises(RuntimeError): - modifier.log_update(model, optimizer, test_epoch, test_steps_per_epoch) - - self.initialize_helper(modifier, model, log_initialize=False) - with pytest.raises(RuntimeError): modifier.log_update(model, optimizer, test_epoch, test_steps_per_epoch) @@ -496,11 +491,6 @@ def test_scheduled_log_update( model = model_lambda() optimizer = optim_lambda(model) - with pytest.raises(RuntimeError): - modifier.scheduled_log_update(model, optimizer, 0.0, test_steps_per_epoch) - - self.initialize_helper(modifier, model, log_initialize=False) - with pytest.raises(RuntimeError): modifier.scheduled_log_update(model, optimizer, 0.0, test_steps_per_epoch)