Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We鈥檒l occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix EarlyStopping logic when min_epochs not met #6705

Merged
merged 8 commits into from Apr 6, 2021
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
3 changes: 3 additions & 0 deletions CHANGELOG.md
Expand Up @@ -214,6 +214,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- Fixed a bug where gradients were disabled after calling `Trainer.predict` ([#6657](https://github.com/PyTorchLightning/pytorch-lightning/pull/6657))


- Fixed `EarlyStopping` logic when `min_epochs` or `min_steps` requirement is not met ([#6705](https://github.com/PyTorchLightning/pytorch-lightning/pull/6705))


## [1.2.4] - 2021-03-16

### Changed
Expand Down
1 change: 1 addition & 0 deletions pytorch_lightning/trainer/trainer.py
Expand Up @@ -614,6 +614,7 @@ def run_train(self) -> None:
f' ({self.min_epochs}) or minimum steps ({self.min_steps}) has'
' not been met. Training will continue...'
)
self.should_stop = False

# hook
self.train_loop.on_train_end()
Expand Down
35 changes: 35 additions & 0 deletions tests/trainer/test_trainer.py
Expand Up @@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import os
import pickle
Expand Down Expand Up @@ -546,6 +547,40 @@ def test_trainer_min_steps_and_epochs(tmpdir):
assert trainer.global_step >= math.floor(num_train_samples * 1.5), "Model did not train for at least min_steps"


def test_trainer_min_steps_and_min_epochs_not_reached(tmpdir, caplog):
carmocca marked this conversation as resolved.
Show resolved Hide resolved
""" Test that min_epochs/min_steps in Trainer are enforced even if EarlyStopping is triggered. """

class TestModel(BoringModel):
training_step_invoked = 0

def training_step(self, batch, batch_idx):
output = super().training_step(batch, batch_idx)
output["loss"] = output["loss"] * 0.0 # force minimal loss to trigger early stopping
self.log("loss", output["loss"])
self.training_step_invoked += 1
assert not self.trainer.should_stop
return output

model = TestModel()
early_stop = EarlyStopping(monitor="loss", patience=0)
min_epochs = 5
trainer = Trainer(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
min_epochs=min_epochs,
limit_val_batches=0,
limit_train_batches=2,
callbacks=[early_stop]
)
with caplog.at_level(logging.INFO, logger="pytorch_lightning.trainer.trainer"):
trainer.fit(model)

message = f"minimum epochs ({min_epochs}) or minimum steps (None) has not been met. Training will continue"
num_messages = len([record.message for record in caplog.records if message in record.message])
assert num_messages == min_epochs - 2
assert model.training_step_invoked == min_epochs * 2


def test_trainer_max_steps_accumulate_batches(tmpdir):
"""Verify model trains according to specified max steps with grad accumulated batches"""
model = BoringModel()
Expand Down