This repository has been archived by the owner on Dec 18, 2023. It is now read-only.
/
fairlearn.py
62 lines (54 loc) · 1.74 KB
/
fairlearn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
from fairlearn.metrics import MetricFrame
from credoai.utils import ValidationError, global_logger
########### General functions shared across evaluators ###########
def create_metric_frame(metrics, y_pred, y_true, sensitive_features):
"""Creates metric frame from dictionary of key:Metric"""
metrics = {name: metric.fun for name, metric in metrics.items()}
return MetricFrame(
metrics=metrics,
y_true=y_true,
y_pred=y_pred,
sensitive_features=sensitive_features,
)
def setup_metric_frames(
performance_metrics,
prob_metrics,
thresh_metrics,
y_pred,
y_prob,
y_true,
sensitive_features,
):
metric_frames = {}
if y_pred is not None and performance_metrics:
metric_frames["pred"] = create_metric_frame(
performance_metrics,
y_pred,
y_true,
sensitive_features=sensitive_features,
)
if prob_metrics:
if y_prob is not None:
metric_frames["prob"] = create_metric_frame(
prob_metrics,
y_prob,
y_true,
sensitive_features=sensitive_features,
)
else:
global_logger.warn(
f"Metrics ({list(prob_metrics.keys())}) requested, but no y_prob available"
)
if thresh_metrics:
if y_prob is not None:
metric_frames["thresh"] = create_metric_frame(
thresh_metrics,
y_prob,
y_true,
sensitive_features=sensitive_features,
)
else:
global_logger.warn(
f"Metrics ({list(thresh_metrics.keys())}) requested, but no y_prob available"
)
return metric_frames