Skip to content

Commit

Permalink
drop unused test with result api (Lightning-AI#5058)
Browse files Browse the repository at this point in the history
Co-authored-by: chaton <thomas@grid.ai>
Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com>
  • Loading branch information
3 people committed Dec 12, 2020
1 parent b50ad9e commit a49291d
Show file tree
Hide file tree
Showing 6 changed files with 1 addition and 381 deletions.
230 changes: 0 additions & 230 deletions tests/base/deterministic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
from torch import nn
from torch.utils.data import Dataset, DataLoader

from pytorch_lightning.core.step_result import TrainResult, EvalResult
from pytorch_lightning.core.lightning import LightningModule


Expand Down Expand Up @@ -111,235 +110,6 @@ def training_epoch_end_scalar(self, outputs):
assert batch_out.grad_fn is None
assert isinstance(batch_out, torch.Tensor)

def training_step_no_default_callbacks_for_train_loop(self, batch, batch_idx):
"""
Early stop and checkpoint only on these values
"""
acc = self.step(batch, batch_idx)
result = TrainResult(minimize=acc)
assert 'early_step_on' not in result
assert 'checkpoint_on' in result
return result

def training_step_no_callbacks_result_obj(self, batch, batch_idx):
"""
Early stop and checkpoint only on these values
"""
acc = self.step(batch, batch_idx)
result = TrainResult(minimize=acc, checkpoint_on=False)
assert 'early_step_on' not in result
assert 'checkpoint_on' not in result
return result

def training_step_result_log_epoch_and_step_for_callbacks(self, batch, batch_idx):
"""
Early stop and checkpoint only on these values
"""
acc = self.step(batch, batch_idx)

self.assert_backward = False
losses = [20, 19, 18, 10, 15, 14, 9, 11, 11, 20]
idx = self.current_epoch
loss = acc + losses[idx]
result = TrainResult(minimize=loss, early_stop_on=loss, checkpoint_on=loss)
return result

def training_step_result_log_step_only(self, batch, batch_idx):
acc = self.step(batch, batch_idx)
result = TrainResult(minimize=acc)

# step only metrics
result.log(f'step_log_and_pbar_acc1_b{batch_idx}', torch.tensor(11).type_as(acc), prog_bar=True)
result.log(f'step_log_acc2_b{batch_idx}', torch.tensor(12).type_as(acc))
result.log(f'step_pbar_acc3_b{batch_idx}', torch.tensor(13).type_as(acc), logger=False, prog_bar=True)

self.training_step_called = True
return result

def training_step_result_log_epoch_only(self, batch, batch_idx):
acc = self.step(batch, batch_idx)
result = TrainResult(minimize=acc)

result.log(f'epoch_log_and_pbar_acc1_e{self.current_epoch}', torch.tensor(14).type_as(acc),
on_epoch=True, prog_bar=True, on_step=False)
result.log(f'epoch_log_acc2_e{self.current_epoch}', torch.tensor(15).type_as(acc),
on_epoch=True, on_step=False)
result.log(f'epoch_pbar_acc3_e{self.current_epoch}', torch.tensor(16).type_as(acc),
on_epoch=True, logger=False, prog_bar=True, on_step=False)

self.training_step_called = True
return result

def training_step_result_log_epoch_and_step(self, batch, batch_idx):
acc = self.step(batch, batch_idx)
result = TrainResult(minimize=acc)

val_1 = (5 + batch_idx) * (self.current_epoch + 1)
val_2 = (6 + batch_idx) * (self.current_epoch + 1)
val_3 = (7 + batch_idx) * (self.current_epoch + 1)
result.log('step_epoch_log_and_pbar_acc1', torch.tensor(val_1).type_as(acc),
on_epoch=True, prog_bar=True)
result.log('step_epoch_log_acc2', torch.tensor(val_2).type_as(acc),
on_epoch=True)
result.log('step_epoch_pbar_acc3', torch.tensor(val_3).type_as(acc),
on_epoch=True, logger=False, prog_bar=True)

self.training_step_called = True
return result

def training_epoch_end_return_for_log_epoch_and_step(self, result):
"""
There should be an array of scalars without graphs that are all 171 (4 of them)
"""
self.training_epoch_end_called = True

if self.use_dp or self.use_ddp2:
pass
else:
# only saw 4 batches
assert isinstance(result, TrainResult)

result.step_epoch_log_acc2 = result.step_epoch_log_acc2_step.prod()
result.step_epoch_pbar_acc3 = result.step_epoch_pbar_acc3_step.prod()
result.step_epoch_log_and_pbar_acc1 = result.step_epoch_log_and_pbar_acc1_step.prod()
result.minimize = result.minimize.mean()
result.checkpoint_on = result.checkpoint_on.mean()

result.step_epoch_log_and_pbar_acc1_step = result.step_epoch_log_and_pbar_acc1_step.prod()
result.step_epoch_log_and_pbar_acc1_epoch = result.step_epoch_log_and_pbar_acc1_epoch.prod()
result.step_epoch_log_acc2_step = result.step_epoch_log_acc2_step.prod()
result.step_epoch_log_acc2_epoch = result.step_epoch_log_acc2_epoch.prod()
result.step_epoch_pbar_acc3_step = result.step_epoch_pbar_acc3_step.prod()
result.step_epoch_pbar_acc3_epoch = result.step_epoch_pbar_acc3_epoch.prod()
result.log('epoch_end_log_acc', torch.tensor(1212).type_as(result.step_epoch_log_acc2_epoch),
logger=True, on_epoch=True)
result.log('epoch_end_pbar_acc', torch.tensor(1213).type_as(result.step_epoch_log_acc2_epoch),
logger=False, prog_bar=True, on_epoch=True)
result.log('epoch_end_log_pbar_acc', torch.tensor(1214).type_as(result.step_epoch_log_acc2_epoch),
logger=True, prog_bar=True, on_epoch=True)
return result

# --------------------------
# EvalResults
# --------------------------
def validation_step_result_callbacks(self, batch, batch_idx):
acc = self.step(batch, batch_idx)

self.assert_backward = False
losses = [20, 19, 20, 21, 22, 23]
idx = self.current_epoch
loss = acc + losses[idx]
result = EvalResult(early_stop_on=loss, checkpoint_on=loss)

self.validation_step_called = True
return result

def validation_step_result_no_callbacks(self, batch, batch_idx):
acc = self.step(batch, batch_idx)

self.assert_backward = False
losses = [20, 19, 20, 21, 22, 23, 50, 50, 50, 50, 50, 50]
idx = self.current_epoch
loss = acc + losses[idx]

result = EvalResult(checkpoint_on=loss)

self.validation_step_called = True
return result

def validation_step_result_only_epoch_metrics(self, batch, batch_idx):
"""
Only track epoch level metrics
"""
acc = self.step(batch, batch_idx)
result = EvalResult(checkpoint_on=acc, early_stop_on=acc)

# step only metrics
result.log('no_val_no_pbar', torch.tensor(11 + batch_idx).type_as(acc), prog_bar=False, logger=False)
result.log('val_step_log_acc', torch.tensor(11 + batch_idx).type_as(acc), prog_bar=False, logger=True)
result.log('val_step_log_pbar_acc', torch.tensor(12 + batch_idx).type_as(acc), prog_bar=True, logger=True)
result.log('val_step_pbar_acc', torch.tensor(13 + batch_idx).type_as(acc), prog_bar=True, logger=False)

self.validation_step_called = True
return result

def validation_step_result_only_step_metrics(self, batch, batch_idx):
"""
Only track epoch level metrics
"""
acc = self.step(batch, batch_idx)
result = EvalResult(checkpoint_on=acc, early_stop_on=acc)

# step only metrics
result.log('no_val_no_pbar', torch.tensor(11 + batch_idx).type_as(acc),
prog_bar=False, logger=False, on_epoch=False, on_step=True)
result.log('val_step_log_acc', torch.tensor(11 + batch_idx).type_as(acc),
prog_bar=False, logger=True, on_epoch=False, on_step=True)
result.log('val_step_log_pbar_acc', torch.tensor(12 + batch_idx).type_as(acc),
prog_bar=True, logger=True, on_epoch=False, on_step=True)
result.log('val_step_pbar_acc', torch.tensor(13 + batch_idx).type_as(acc),
prog_bar=True, logger=False, on_epoch=False, on_step=True)
result.log('val_step_batch_idx', torch.tensor(batch_idx).type_as(acc),
prog_bar=True, logger=True, on_epoch=False, on_step=True)

self.validation_step_called = True
return result

def validation_step_result_epoch_step_metrics(self, batch, batch_idx):
"""
Only track epoch level metrics
"""
acc = self.step(batch, batch_idx)
result = EvalResult(checkpoint_on=acc, early_stop_on=acc)

# step only metrics
result.log('no_val_no_pbar', torch.tensor(11 + batch_idx).type_as(acc),
prog_bar=False, logger=False, on_epoch=True, on_step=True)
result.log('val_step_log_acc', torch.tensor(11 + batch_idx).type_as(acc),
prog_bar=False, logger=True, on_epoch=True, on_step=True)
result.log('val_step_log_pbar_acc', torch.tensor(12 + batch_idx).type_as(acc),
prog_bar=True, logger=True, on_epoch=True, on_step=True)
result.log('val_step_pbar_acc', torch.tensor(13 + batch_idx).type_as(acc),
prog_bar=True, logger=False, on_epoch=True, on_step=True)
result.log('val_step_batch_idx', torch.tensor(batch_idx).type_as(acc),
prog_bar=True, logger=True, on_epoch=True, on_step=True)

self.validation_step_called = True
return result

def validation_step_for_epoch_end_result(self, batch, batch_idx):
"""
EvalResult flows to epoch end (without step_end)
"""
acc = self.step(batch, batch_idx)
result = EvalResult(checkpoint_on=acc, early_stop_on=acc)

# step only metrics
result.log('val_step_metric', torch.tensor(batch_idx).type_as(acc),
prog_bar=True, logger=True, on_epoch=True, on_step=False)
result.log('batch_idx', torch.tensor(batch_idx).type_as(acc),
prog_bar=True, logger=True, on_epoch=True, on_step=False)

self.validation_step_called = True
return result

def validation_epoch_end_result(self, result):
self.validation_epoch_end_called = True

if self.trainer.running_sanity_check:
assert len(result.batch_idx) == 2
else:
assert len(result.batch_idx) == self.trainer.limit_val_batches

expected_val = result.val_step_metric.sum() / len(result.batch_idx)
result.val_step_metric = result.val_step_metric.mean()
result.batch_idx = result.batch_idx.mean()
assert result.val_step_metric == expected_val

result.log('val_epoch_end_metric', torch.tensor(189).type_as(result.val_step_metric), prog_bar=True)

return result

# --------------------------
# dictionary returns
# --------------------------
Expand Down
32 changes: 0 additions & 32 deletions tests/base/model_test_steps.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,38 +59,6 @@ def test_step(self, batch, batch_idx, *args, **kwargs):
'test_dic': {'test_loss_a': loss_test}})
return output

def test_step_result_obj(self, batch, batch_idx, *args, **kwargs):
"""
Default, baseline test_step
:param batch:
:return:
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self(x)

loss_test = self.loss(y, y_hat)

# acc
labels_hat = torch.argmax(y_hat, dim=1)
test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
test_acc = torch.tensor(test_acc)

test_acc = test_acc.type_as(x)

result = EvalResult()
# alternate possible outputs to test
if batch_idx % 1 == 0:
result.log_dict({'test_loss': loss_test, 'test_acc': test_acc})
return result
if batch_idx % 2 == 0:
return test_acc

if batch_idx % 3 == 0:
result.log_dict({'test_loss': loss_test, 'test_acc': test_acc})
result.test_dic = {'test_loss_a': loss_test}
return result

def test_step__multiple_dataloaders(self, batch, batch_idx, dataloader_idx, **kwargs):
"""
Default, baseline test_step
Expand Down
66 changes: 0 additions & 66 deletions tests/base/model_train_steps.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,25 +53,6 @@ def training_step(self, batch, batch_idx, optimizer_idx=None):
)
return output

def training_step_result_obj(self, batch, batch_idx, optimizer_idx=None):
# forward pass
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self(x)

# calculate loss
loss_val = self.loss(y, y_hat)
log_val = loss_val

# alternate between tensors and scalars for "log" and "progress_bar"
if batch_idx % 2 == 0:
log_val = log_val.item()

result = TrainResult(loss_val)
result.log('some_val', log_val * log_val, prog_bar=True, logger=False)
result.log('train_some_val', log_val * log_val)
return result

def training_step__inf_loss(self, batch, batch_idx, optimizer_idx=None):
output = self.training_step(batch, batch_idx, optimizer_idx)
if batch_idx == self.test_step_inf_loss:
Expand All @@ -81,19 +62,6 @@ def training_step__inf_loss(self, batch, batch_idx, optimizer_idx=None):
output /= 0
return output

def training_step_full_loop_result_obj_dp(self, batch, batch_idx, optimizer_idx=None):
"""
Full loop flow train step (result obj + dp)
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self(x.to(self.device))
loss_val = y_hat.sum()
result = TrainResult(minimize=loss_val)
result.log('train_step_metric', loss_val + 1)
self.training_step_called = True
return result

def training_step_result_obj_dp(self, batch, batch_idx, optimizer_idx=None):
# forward pass
x, y = batch
Expand Down Expand Up @@ -136,23 +104,6 @@ def training_epoch_end_full_loop_result_obj_dp(self, result):

return result

def eval_step_full_loop_result_obj_dp(self, batch, batch_idx, optimizer_idx=None):
"""
Full loop flow train step (result obj + dp)
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self(x.to(self.device))
loss_val = y_hat.sum()
result = EvalResult(checkpoint_on=loss_val, early_stop_on=loss_val)

eval_name = 'validation' if not self.trainer.testing else 'test'
result.log(f'{eval_name}_step_metric', loss_val + 1, on_step=True)

setattr(self, f'{eval_name}_step_called', True)

return result

def eval_step_end_full_loop_result_obj_dp(self, result):
"""
Full loop flow train step (result obj + dp)
Expand Down Expand Up @@ -198,20 +149,3 @@ def eval_epoch_end_full_loop_result_obj_dp(self, result):
setattr(result, f'{eval_name}_step_metric', reduced)

return result

def training_step__using_metrics(self, batch, batch_idx, optimizer_idx=None):
"""Lightning calls this inside the training loop"""
# forward pass
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self(x)

# calculate loss
loss_val = self.loss(y, y_hat)

# call metric
val = self.metric(x, y)

result = TrainResult(minimize=loss_val)
result.log('metric_val', val)
return result
19 changes: 0 additions & 19 deletions tests/base/model_valid_steps.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,25 +71,6 @@ def validation_step_no_monitor(self, batch, batch_idx, *args, **kwargs):
})
return output

def validation_step_result_obj(self, batch, batch_idx, *args, **kwargs):
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self(x)

loss_val = self.loss(y, y_hat)

# acc
labels_hat = torch.argmax(y_hat, dim=1)
val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
val_acc = torch.tensor(val_acc).type_as(x)

result = EvalResult(checkpoint_on=loss_val, early_stop_on=loss_val)
result.log_dict({
'val_loss': loss_val,
'val_acc': val_acc,
})
return result

def validation_step_result_obj_dp(self, batch, batch_idx, *args, **kwargs):
x, y = batch
x = x.view(x.size(0), -1)
Expand Down
Loading

0 comments on commit a49291d

Please sign in to comment.