-
Notifications
You must be signed in to change notification settings - Fork 0
/
metric.py
96 lines (76 loc) · 2.88 KB
/
metric.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import numpy as np
import torch
import torch.distributed as dist
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.sum, self.val],
dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.sum = t[1]
self.val = t[2]
self.avg = self.sum / self.count
class ContinaulMetric(object):
def __init__(self, args, per_task_evaluation=False):
self.total_task = args.split
self.per_task_evaluation = per_task_evaluation
self.reset()
def reset(self):
self.task_average = 0
self.sample_average = 0
self.backward_transfer = 0
self.task_learning_average = 0
if not self.per_task_evaluation:
self.task_matrix = np.zeros((self.total_task, 1))
else:
self.task_matrix = np.zeros((self.total_task, self.total_task))
self.task_metrics = {}
for task in range(self.task_matrix.shape[0]):
self.task_metrics[task] = {}
for data_task in range(self.task_matrix.shape[1]):
self.task_metrics[task][data_task] = AverageMeter()
def update(self, data_task, model_task, val, n=1):
if not self.per_task_evaluation:
data_task = 0
self.task_metrics[model_task][data_task].update(val, n)
def summarize(self, task):
for model_task in range(self.task_matrix.shape[0]):
for data_task in range(self.task_matrix.shape[1]):
self.task_matrix[model_task][data_task] = self.task_metrics[model_task][data_task].avg
if not self.per_task_evaluation:
self.task_average = self.task_matrix[task][0]
else:
task_accuracy = [np.mean(self.task_matrix[t][:t+1])
for t in range(self.task_matrix.shape[0])]
self.task_average = task_accuracy[task]
self.task_learning_average = np.diag(
self.task_matrix)[:task+1].mean()
if task > 0:
self.backward_transfer = np.mean(
np.max(self.task_matrix[:task]-self.task_matrix[task], axis=1)[:task])