From c21a1b7f6b4566f4bea8cc2d566836174738b667 Mon Sep 17 00:00:00 2001 From: Samuel Hoffman Date: Tue, 7 May 2019 10:43:28 -0400 Subject: [PATCH] Fix loop indentation typo See #80 --- .../reject_option_classification.py | 68 +++++++++---------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/aif360/algorithms/postprocessing/reject_option_classification.py b/aif360/algorithms/postprocessing/reject_option_classification.py index 897119fe..e5ae8017 100644 --- a/aif360/algorithms/postprocessing/reject_option_classification.py +++ b/aif360/algorithms/postprocessing/reject_option_classification.py @@ -116,40 +116,40 @@ def fit(self, dataset_true, dataset_pred): low_ROC_margin = 0.0 high_ROC_margin = (1.0-class_thresh) - # Iterate through ROC margins - for ROC_margin in np.linspace( - low_ROC_margin, - high_ROC_margin, - self.num_ROC_margin): - self.ROC_margin = ROC_margin - - # Predict using the current threshold and margin - dataset_transf_pred = self.predict(dataset_pred) - - dataset_transf_metric_pred = BinaryLabelDatasetMetric( - dataset_transf_pred, - unprivileged_groups=self.unprivileged_groups, - privileged_groups=self.privileged_groups) - classified_transf_metric = ClassificationMetric( - dataset_true, - dataset_transf_pred, - unprivileged_groups=self.unprivileged_groups, - privileged_groups=self.privileged_groups) - - ROC_margin_arr[cnt] = self.ROC_margin - class_thresh_arr[cnt] = self.classification_threshold - - # Balanced accuracy and fairness metric computations - balanced_acc_arr[cnt] = 0.5*(classified_transf_metric.true_positive_rate()\ - +classified_transf_metric.true_negative_rate()) - if self.metric_name == "Statistical parity difference": - fair_metric_arr[cnt] = dataset_transf_metric_pred.mean_difference() - elif self.metric_name == "Average odds difference": - fair_metric_arr[cnt] = classified_transf_metric.average_odds_difference() - elif self.metric_name == "Equal opportunity difference": - fair_metric_arr[cnt] = classified_transf_metric.equal_opportunity_difference() - - cnt += 1 + # Iterate through ROC margins + for ROC_margin in np.linspace( + low_ROC_margin, + high_ROC_margin, + self.num_ROC_margin): + self.ROC_margin = ROC_margin + + # Predict using the current threshold and margin + dataset_transf_pred = self.predict(dataset_pred) + + dataset_transf_metric_pred = BinaryLabelDatasetMetric( + dataset_transf_pred, + unprivileged_groups=self.unprivileged_groups, + privileged_groups=self.privileged_groups) + classified_transf_metric = ClassificationMetric( + dataset_true, + dataset_transf_pred, + unprivileged_groups=self.unprivileged_groups, + privileged_groups=self.privileged_groups) + + ROC_margin_arr[cnt] = self.ROC_margin + class_thresh_arr[cnt] = self.classification_threshold + + # Balanced accuracy and fairness metric computations + balanced_acc_arr[cnt] = 0.5*(classified_transf_metric.true_positive_rate()\ + +classified_transf_metric.true_negative_rate()) + if self.metric_name == "Statistical parity difference": + fair_metric_arr[cnt] = dataset_transf_metric_pred.mean_difference() + elif self.metric_name == "Average odds difference": + fair_metric_arr[cnt] = classified_transf_metric.average_odds_difference() + elif self.metric_name == "Equal opportunity difference": + fair_metric_arr[cnt] = classified_transf_metric.equal_opportunity_difference() + + cnt += 1 rel_inds = np.logical_and(fair_metric_arr >= self.metric_lb, fair_metric_arr <= self.metric_ub)