Skip to content

Commit

Permalink
feat: logging of train and eval better aligned (#105)
Browse files Browse the repository at this point in the history
  • Loading branch information
maximilianwerk committed Oct 11, 2021
1 parent 562c65f commit c971f82
Show file tree
Hide file tree
Showing 4 changed files with 68 additions and 41 deletions.
24 changes: 12 additions & 12 deletions finetuner/tuner/keras/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from ..base import BaseTuner
from ...helper import DocumentArrayLike
from ..dataset.helper import get_dataset
from ..logger import LogGenerator


class KerasTuner(BaseTuner):
Expand Down Expand Up @@ -62,11 +63,12 @@ def _train(self, data, optimizer, description: str):
losses = []
metrics = []

get_desc_str = (
lambda: f'Loss={np.mean(losses):.2f} Accuracy={np.mean(metrics):.2f}'
)
log_generator = LogGenerator('T', losses, metrics)

with ProgressBar(
description, message_on_done=log_generator, final_line_feed=False
) as p:

with ProgressBar(description, message_on_done=get_desc_str) as p:
for inputs, label in data:
with tf.GradientTape() as tape:
outputs = self.wrapped_model(inputs, training=True)
Expand All @@ -81,21 +83,19 @@ def _train(self, data, optimizer, description: str):
losses.append(loss.numpy())
metrics.append(metric.numpy())

p.update(message=get_desc_str())
p.update(message=log_generator())

return losses, metrics

def _eval(self, data, description: str = 'Evaluating'):
def _eval(self, data, description: str = 'Evaluating', train_log: str = ''):
head_layer = self.head_layer()

losses = []
metrics = []

get_desc_str = (
lambda: f'Loss={np.mean(losses):.2f} Accuracy={np.mean(metrics):.2f}'
)
log_generator = LogGenerator('E', losses, metrics, train_log)

with ProgressBar(description, message_on_done=get_desc_str) as p:
with ProgressBar(description, message_on_done=log_generator) as p:
for inputs, label in data:
outputs = self.wrapped_model(inputs, training=False)
loss = head_layer.loss_fn(pred_val=outputs, target_val=label)
Expand All @@ -104,7 +104,7 @@ def _eval(self, data, description: str = 'Evaluating'):
losses.append(loss.numpy())
metrics.append(metric.numpy())

p.update(message=get_desc_str())
p.update(message=log_generator())

return losses, metrics

Expand Down Expand Up @@ -143,7 +143,7 @@ def fit(
metrics_train.extend(mt)

if eval_data:
le, me = self._eval(_eval_data)
le, me = self._eval(_eval_data, train_log=LogGenerator('T', lt, mt)())
losses_eval.extend(le)
metrics_eval.extend(me)

Expand Down
33 changes: 33 additions & 0 deletions finetuner/tuner/logger.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
import numpy as np


class LogGenerator:
def __init__(self, name, losses, metrics, prefix: str = ''):
self._losses = losses
self._metrics = metrics
self._prefix = prefix
self._name = name

def __call__(self):
if self._prefix:
prefix = f'{self._prefix} | '
else:
prefix = ''
return f'{prefix}{self._name}: {self.get_statistic()}'

def get_statistic(self):
return f'L={self.mean_loss():>8} A={self.mean_metric():>4}'

def mean_loss(self):
return LogGenerator.get_log_value(self._losses)

def mean_metric(self):
return LogGenerator.get_log_value(self._metrics)

@staticmethod
def get_log_value(data):
mean = np.mean(data)
if mean < 1e5:
return f'{mean:.2f}'
else:
return f'{mean:.2e}'
27 changes: 12 additions & 15 deletions finetuner/tuner/paddle/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from ..base import BaseTuner, BaseHead, BaseArityModel
from ...helper import DocumentArrayLike
from ..dataset.helper import get_dataset
from ..logger import LogGenerator


class _ArityModel(BaseArityModel, nn.Layer):
Expand Down Expand Up @@ -39,17 +40,15 @@ def _get_data_loader(self, inputs, batch_size: int, shuffle: bool):
shuffle=shuffle,
)

def _eval(self, data, description: str = 'Evaluating'):
def _eval(self, data, description: str = 'Evaluating', train_log: str = ''):
self.wrapped_model.eval()

losses = []
metrics = []

get_desc_str = (
lambda: f'Loss={np.mean(losses):.2f} Accuracy={np.mean(metrics):.2f}'
)
log_generator = LogGenerator('E', losses, metrics, train_log)

with ProgressBar(description, message_on_done=get_desc_str) as p:
with ProgressBar(description, message_on_done=log_generator) as p:
for inputs, label in data:
outputs = self.wrapped_model(*inputs)
loss = self.wrapped_model.loss_fn(outputs, label)
Expand All @@ -58,7 +57,7 @@ def _eval(self, data, description: str = 'Evaluating'):
losses.append(loss.item())
metrics.append(metric.numpy())

p.update(message=get_desc_str())
p.update(message=log_generator())

return losses, metrics

Expand All @@ -69,11 +68,11 @@ def _train(self, data, optimizer: Optimizer, description: str):
losses = []
metrics = []

get_desc_str = (
lambda: f'Loss={np.mean(losses):.2f} Accuracy={float(np.mean(metrics)):.2f}'
)
log_generator = LogGenerator('T', losses, metrics)

with ProgressBar(description, message_on_done=get_desc_str) as p:
with ProgressBar(
description, message_on_done=log_generator, final_line_feed=False
) as p:
for inputs, label in data:
# forward step
outputs = self.wrapped_model(*inputs)
Expand All @@ -88,7 +87,7 @@ def _train(self, data, optimizer: Optimizer, description: str):
losses.append(loss.item())
metrics.append(metric.numpy())

p.update(message=get_desc_str())
p.update(message=log_generator())
return losses, metrics

def fit(
Expand All @@ -99,10 +98,8 @@ def fit(
batch_size: int = 256,
**kwargs,
):
model = self.wrapped_model

optimizer = paddle.optimizer.RMSProp(
learning_rate=0.01, parameters=model.parameters()
learning_rate=0.01, parameters=self.wrapped_model.parameters()
)

losses_train = []
Expand All @@ -127,7 +124,7 @@ def fit(
inputs=eval_data, batch_size=batch_size, shuffle=False
)

le, me = self._eval(_data)
le, me = self._eval(_data, train_log=LogGenerator('T', lt, mt)())
losses_eval.extend(le)
metrics_eval.extend(me)

Expand Down
25 changes: 11 additions & 14 deletions finetuner/tuner/pytorch/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from typing import Optional

import numpy as np
import torch
import torch.nn as nn
from jina.helper import cached_property
Expand All @@ -12,6 +11,7 @@
from ..base import BaseTuner, BaseHead, BaseArityModel
from ...helper import DocumentArrayLike
from ..dataset.helper import get_dataset
from ..logger import LogGenerator


class _ArityModel(BaseArityModel, nn.Module):
Expand Down Expand Up @@ -41,17 +41,14 @@ def _get_data_loader(self, inputs, batch_size: int, shuffle: bool):
shuffle=shuffle,
)

def _eval(self, data, description: str = 'Evaluating'):
def _eval(self, data, description: str = 'Evaluating', train_log: str = ''):
self.wrapped_model.eval()

losses = []
metrics = []
log_generator = LogGenerator('E', losses, metrics, train_log)

get_desc_str = (
lambda: f'Loss={np.mean(losses):.2f} Accuracy={np.mean(metrics):.2f}'
)

with ProgressBar(description, message_on_done=get_desc_str) as p:
with ProgressBar(description, message_on_done=log_generator) as p:
for inputs, label in data:
with torch.inference_mode():
outputs = self.wrapped_model(*inputs)
Expand All @@ -61,7 +58,7 @@ def _eval(self, data, description: str = 'Evaluating'):
losses.append(loss.item())
metrics.append(metric.numpy())

p.update(message=get_desc_str())
p.update(message=log_generator())

return losses, metrics

Expand All @@ -72,11 +69,11 @@ def _train(self, data, optimizer: Optimizer, description: str):
losses = []
metrics = []

get_desc_str = (
lambda: f'Loss={np.mean(losses):.2f} Accuracy={np.mean(metrics):.2f}'
)
log_generator = LogGenerator('T', losses, metrics)

with ProgressBar(description, message_on_done=get_desc_str) as p:
with ProgressBar(
description, message_on_done=log_generator, final_line_feed=False
) as p:
for inputs, label in data:
# forward step
outputs = self.wrapped_model(*inputs)
Expand All @@ -91,7 +88,7 @@ def _train(self, data, optimizer: Optimizer, description: str):
losses.append(loss.item())
metrics.append(metric.numpy())

p.update(message=get_desc_str())
p.update(message=log_generator())
return losses, metrics

def fit(
Expand Down Expand Up @@ -128,7 +125,7 @@ def fit(
inputs=eval_data, batch_size=batch_size, shuffle=False
)

le, me = self._eval(_data)
le, me = self._eval(_data, train_log=LogGenerator('T', lt, mt)())
losses_eval.extend(le)
metrics_eval.extend(me)

Expand Down

0 comments on commit c971f82

Please sign in to comment.