diff --git a/lightning_examples/text-transformers/.meta.yml b/lightning_examples/text-transformers/.meta.yml index 394d05be4..163cbf7f6 100644 --- a/lightning_examples/text-transformers/.meta.yml +++ b/lightning_examples/text-transformers/.meta.yml @@ -1,9 +1,9 @@ title: Finetune Transformers Models with PyTorch Lightning author: PL team created: 2021-01-31 -updated: 2021-12-03 +updated: 2022-02-08 license: CC BY-SA -build: 2 +build: 0 tags: - Text description: | @@ -17,5 +17,4 @@ requirements: - scikit-learn - torchtext>=0.9 accelerator: - - CPU - GPU diff --git a/lightning_examples/text-transformers/text-transformers.py b/lightning_examples/text-transformers/text-transformers.py index 57786f010..6f34a9b2e 100644 --- a/lightning_examples/text-transformers/text-transformers.py +++ b/lightning_examples/text-transformers/text-transformers.py @@ -224,8 +224,8 @@ def setup(self, stage=None) -> None: # Calculate total steps tb_size = self.hparams.train_batch_size * max(1, self.trainer.gpus) - ab_size = self.trainer.accumulate_grad_batches * float(self.trainer.max_epochs) - self.total_steps = (len(train_loader.dataset) // tb_size) // ab_size + ab_size = tb_size * self.trainer.accumulate_grad_batches + self.total_steps = int((len(train_loader.dataset) / ab_size) * float(self.trainer.max_epochs)) def configure_optimizers(self): """Prepare optimizer and schedule (linear warmup and decay)"""