Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 13 additions & 22 deletions avalanche/evaluation/metric_definitions.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,10 +75,6 @@ class PluginMetric(Metric[TResult], StrategyCallbacks['MetricResult'], ABC):
An instance of this class usually leverages a `Metric` instance to update,
reset and emit metric results at appropriate times
(during specific callbacks).

This class also provides a utility method, `_next_x_position`, which can
be used to progressively label each metric value with its appropriate "x"
position in the plot.
"""
def __init__(self):
"""
Expand All @@ -88,7 +84,11 @@ def __init__(self):
experience.
"""
super().__init__()
self._metric_x_counters: Dict[str, int] = dict()

self.global_it_counter = 0
"""
Counter that can be used by each metric to get increasing x values.
"""

@abstractmethod
def result(self) -> Optional[TResult]:
Expand All @@ -98,6 +98,12 @@ def result(self) -> Optional[TResult]:
def reset(self) -> None:
pass

def get_global_counter(self):
"""
:return: the global counter incremented after each minibatch.
"""
return self.global_it_counter

def before_training(self, strategy: 'BaseStrategy') -> 'MetricResult':
pass

Expand Down Expand Up @@ -135,7 +141,7 @@ def after_backward(self, strategy: 'BaseStrategy') -> 'MetricResult':

def after_training_iteration(self, strategy: 'BaseStrategy') \
-> 'MetricResult':
pass
self.global_it_counter += 1

def before_update(self, strategy: 'BaseStrategy') -> 'MetricResult':
pass
Expand Down Expand Up @@ -188,22 +194,7 @@ def after_eval_forward(self, strategy: 'BaseStrategy') \

def after_eval_iteration(self, strategy: 'BaseStrategy') \
-> 'MetricResult':
pass

def _next_x_position(self, metric_name: str, initial_x: int = 0) -> int:
"""
Utility method that can be used to get the next "x" position of a
metric value (given its name).

:param metric_name: The metric value name.
:param initial_x: The initial "x" value. Defaults to 0.
:return: The next "x" value to use.
"""
if metric_name not in self._metric_x_counters:
self._metric_x_counters[metric_name] = initial_x
x_result = self._metric_x_counters[metric_name]
self._metric_x_counters[metric_name] += 1
return x_result
self.global_it_counter += 1


__all__ = ['Metric', 'PluginMetric']
10 changes: 5 additions & 5 deletions avalanche/evaluation/metrics/accuracy.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
metric_value = self.result()

metric_name = get_metric_name(self, strategy)
plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, metric_value, plot_x_position)]

Expand Down Expand Up @@ -193,7 +193,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
metric_value = self.result()

metric_name = get_metric_name(self, strategy)
plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, metric_value, plot_x_position)]

Expand Down Expand Up @@ -233,7 +233,7 @@ def _package_result(self, strategy: 'BaseStrategy'):
metric_value = self.result()

metric_name = get_metric_name(self, strategy)
plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, metric_value, plot_x_position)]

Expand Down Expand Up @@ -279,7 +279,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> \

metric_name = get_metric_name(self, strategy, add_experience=True)

plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, metric_value, plot_x_position)]

Expand Down Expand Up @@ -329,7 +329,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> \
.format(str(self),
phase_name,
stream)
plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, metric_value, plot_x_position)]

Expand Down
2 changes: 1 addition & 1 deletion avalanche/evaluation/metrics/confusion_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
.format(str(self),
phase_name,
stream)
plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

if self._save_image:
cm_image = self._image_creator(exp_cm)
Expand Down
10 changes: 5 additions & 5 deletions avalanche/evaluation/metrics/cpu_usage.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:

metric_name = get_metric_name(self, strategy)

plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, metric_value, plot_x_position)]

Expand Down Expand Up @@ -201,7 +201,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
cpu_usage = self.result()

metric_name = get_metric_name(self, strategy)
plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, cpu_usage, plot_x_position)]

Expand Down Expand Up @@ -253,7 +253,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:

metric_name = get_metric_name(self, strategy)

plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(
self, metric_name, cpu_usage, plot_x_position)]
Expand Down Expand Up @@ -297,7 +297,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
exp_cpu = self.result()

metric_name = get_metric_name(self, strategy, add_experience=True)
plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, exp_cpu, plot_x_position)]

Expand Down Expand Up @@ -345,7 +345,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
.format(str(self),
phase_name,
stream)
plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, exp_cpu, plot_x_position)]

Expand Down
8 changes: 4 additions & 4 deletions avalanche/evaluation/metrics/disk_usage.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:

metric_name = get_metric_name(self, strategy)

plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, metric_value, plot_x_position)]

Expand Down Expand Up @@ -183,7 +183,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
disk_usage = self.result()

metric_name = get_metric_name(self, strategy)
plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, disk_usage, plot_x_position)]

Expand Down Expand Up @@ -225,7 +225,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
exp_disk = self.result()

metric_name = get_metric_name(self, strategy, add_experience=True)
plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, exp_disk, plot_x_position)]

Expand Down Expand Up @@ -272,7 +272,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
.format(str(self),
phase_name,
stream)
plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, exp_disk, plot_x_position)]

Expand Down
30 changes: 15 additions & 15 deletions avalanche/evaluation/metrics/forgetting.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def __init__(self):
The general metric to compute forgetting
"""

self._last_accuracy = Accuracy()
self._current_accuracy = Accuracy()
"""
The average accuracy over the current evaluation experience
"""
Expand Down Expand Up @@ -192,26 +192,26 @@ def before_eval(self, strategy) -> None:
self.reset_last_accuracy()

def before_eval_exp(self, strategy: 'BaseStrategy') -> None:
self._last_accuracy.reset()
self._current_accuracy.reset()

def after_eval_iteration(self, strategy: 'BaseStrategy') -> None:
self.eval_exp_id = strategy.experience.current_experience
self._last_accuracy.update(strategy.mb_y,
strategy.logits)
self._current_accuracy.update(strategy.mb_y,
strategy.logits)

def after_eval_exp(self, strategy: 'BaseStrategy') \
-> MetricResult:
# update experience on which training just ended
if self.train_exp_id == self.eval_exp_id:
self.update(self.eval_exp_id,
self._last_accuracy.result(),
self._current_accuracy.result(),
initial=True)
else:
# update other experiences
# if experience has not been encountered in training
# its value will not be considered in forgetting
self.update(self.eval_exp_id,
self._last_accuracy.result())
self._current_accuracy.result())

# this checks if the evaluation experience has been
# already encountered at training time
Expand All @@ -225,7 +225,7 @@ def _package_result(self, strategy: 'BaseStrategy') \

forgetting = self.result(k=self.eval_exp_id)
metric_name = get_metric_name(self, strategy, add_experience=True)
plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

metric_values = [MetricValue(
self, metric_name, forgetting, plot_x_position)]
Expand Down Expand Up @@ -265,7 +265,7 @@ def __init__(self):
The general metric to compute forgetting
"""

self._last_accuracy = Accuracy()
self._current_accuracy = Accuracy()
"""
The average accuracy over the current evaluation experience
"""
Expand Down Expand Up @@ -336,29 +336,29 @@ def before_training_exp(self, strategy: 'BaseStrategy') -> None:
self.train_exp_id = strategy.experience.current_experience

def before_eval(self, strategy) -> None:
self.reset_last_accuracy()
self.reset_current_accuracy()
self.stream_forgetting.reset()

def before_eval_exp(self, strategy: 'BaseStrategy') -> None:
self._last_accuracy.reset()
self._current_accuracy.reset()

def after_eval_iteration(self, strategy: 'BaseStrategy') -> None:
self.eval_exp_id = strategy.experience.current_experience
self._last_accuracy.update(strategy.mb_y,
strategy.logits)
self._current_accuracy.update(strategy.mb_y,
strategy.logits)

def after_eval_exp(self, strategy: 'BaseStrategy') -> None:
# update experience on which training just ended
if self.train_exp_id == self.eval_exp_id:
self.exp_update(self.eval_exp_id,
self._last_accuracy.result(),
self._current_accuracy.result(),
initial=True)
else:
# update other experiences
# if experience has not been encountered in training
# its value will not be considered in forgetting
self.exp_update(self.eval_exp_id,
self._last_accuracy.result())
self._current_accuracy.result())

# this checks if the evaluation experience has been
# already encountered at training time
Expand All @@ -382,7 +382,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> \
.format(str(self),
phase_name,
stream)
plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, metric_value, plot_x_position)]

Expand Down
8 changes: 4 additions & 4 deletions avalanche/evaluation/metrics/gpu_usage.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
gpu_usage = self.result()

metric_name = get_metric_name(self, strategy)
plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, gpu_usage, plot_x_position)]

Expand Down Expand Up @@ -220,7 +220,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
gpu_usage = self.result()

metric_name = get_metric_name(self, strategy)
plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, gpu_usage, plot_x_position)]

Expand Down Expand Up @@ -271,7 +271,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
gpu_usage = self.result()

metric_name = get_metric_name(self, strategy, add_experience=True)
plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, gpu_usage, plot_x_position)]

Expand Down Expand Up @@ -323,7 +323,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
.format(str(self),
phase_name,
stream)
plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, gpu_usage, plot_x_position)]

Expand Down
10 changes: 5 additions & 5 deletions avalanche/evaluation/metrics/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
metric_value = self.result()

metric_name = get_metric_name(self, strategy)
plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, metric_value, plot_x_position)]

Expand Down Expand Up @@ -166,7 +166,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
metric_value = self.result()

metric_name = get_metric_name(self, strategy)
plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, metric_value, plot_x_position)]

Expand Down Expand Up @@ -206,7 +206,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> MetricResult:
metric_value = self.result()

metric_name = get_metric_name(self, strategy)
plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, metric_value, plot_x_position)]

Expand Down Expand Up @@ -251,7 +251,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> \

metric_name = get_metric_name(self, strategy, add_experience=True)

plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, metric_value, plot_x_position)]

Expand Down Expand Up @@ -301,7 +301,7 @@ def _package_result(self, strategy: 'BaseStrategy') -> \
phase_name,
stream)

plot_x_position = self._next_x_position(metric_name)
plot_x_position = self.get_global_counter()

return [MetricValue(self, metric_name, metric_value, plot_x_position)]

Expand Down
Loading