From 11d5c9ee6bfebb4026f93f4d9a0a3e2d908e5811 Mon Sep 17 00:00:00 2001 From: Tuan Nguyen Date: Fri, 3 Sep 2021 11:18:43 -0400 Subject: [PATCH] Default behavior for sparseml w/o recipes --- src/transformers/sparse.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/transformers/sparse.py b/src/transformers/sparse.py index 60f120e44cbb..a2a6c2497d87 100644 --- a/src/transformers/sparse.py +++ b/src/transformers/sparse.py @@ -111,17 +111,15 @@ def create_scheduler(self, num_training_steps: int): # scheduler already set return - if self.manager.learning_rate_modifiers: + if self.manager is not None and self.manager.learning_rate_modifiers: # allow SparseML to manage LR and set a dummy scheduler - self.lr_scheduler = torch.optim.lr_scheduler.LambdaLR( - self.optimizer, lambda _: 1.0, -1 - ) + self.lr_scheduler = torch.optim.lr_scheduler.LambdaLR(self.optimizer, lambda _: 1.0, -1) else: # default scheduler super().create_scheduler(num_training_steps) def qat_active(self, epoch: int): - if not self.manager.quantization_modifiers: + if self.manager is None or not self.manager.quantization_modifiers: return False qat_start = min([mod.start_epoch for mod in self.manager.quantization_modifiers])