From 1e8a9d2d0fd1fd20119530566330e5613f1eab62 Mon Sep 17 00:00:00 2001 From: govarsha Date: Fri, 12 Jan 2024 19:53:10 +0530 Subject: [PATCH 1/2] added validation data metrics, updated automlx --- ads/opctl/operator/lowcode/anomaly/const.py | 35 +++- .../lowcode/anomaly/model/anomaly_dataset.py | 28 ++- .../operator/lowcode/anomaly/model/automlx.py | 32 ++- .../operator/lowcode/anomaly/model/autots.py | 9 +- .../lowcode/anomaly/model/base_model.py | 191 +++++++++++++++--- .../operator/lowcode/anomaly/model/tods.py | 17 +- .../lowcode/anomaly/operator_config.py | 13 +- .../operator/lowcode/anomaly/schema.yaml | 51 ++++- ads/opctl/operator/lowcode/anomaly/utils.py | 21 ++ 9 files changed, 336 insertions(+), 61 deletions(-) diff --git a/ads/opctl/operator/lowcode/anomaly/const.py b/ads/opctl/operator/lowcode/anomaly/const.py index cd87e636e..41e758861 100644 --- a/ads/opctl/operator/lowcode/anomaly/const.py +++ b/ads/opctl/operator/lowcode/anomaly/const.py @@ -48,7 +48,40 @@ class SupportedMetrics(str, metaclass=ExtendedEnumMeta): UNSUPERVISED_UNIFY95 = "unsupervised_unify95" UNSUPERVISED_UNIFY95_LOG_LOSS = "unsupervised_unify95_log_loss" UNSUPERVISED_N1_EXPERTS = "unsupervised_n-1_experts" - + RECALL = "Recall" + PRECISION = "Precision" + ACCURACY = "Accuracy" + F1_SCORE = "f1_score" + FP = "False Positive" + FN = "False Negative" + TP = "True Positive" + TN = "True Negative" + ROC_AUC = "ROC_AUC" + PRC_AUC = "PRC_AUC" + MCC = "MCC" + MEAN_RECALL = "Mean Recall" + MEAN_PRECISION = "Mean Precision" + MEAN_ACCURACY = "Mean Accuracy" + MEAN_F1_SCORE = "Mean f1_score" + MEAN_FP = "Mean False Positive" + MEAN_FN = "Mean False Negative" + MEAN_TP = "Mean True Positive" + MEAN_TN = "Mean True Negative" + MEAN_ROC_AUC = "Mean ROC_AUC" + MEAN_PRC_AUC = "Mean PRC_AUC" + MEAN_MCC = "Mean MCC" + MEDIAN_RECALL = "Median Recall" + MEDIAN_PRECISION = "Median Precision" + MEDIAN_ACCURACY = "Median Accuracy" + MEDIAN_F1_SCORE = "Median f1_score" + MEDIAN_FP = "Median False Positive" + MEDIAN_FN = "Median False Negative" + MEDIAN_TP = "Median True Positive" + MEDIAN_TN = "Median True Negative" + MEDIAN_ROC_AUC = "Median ROC_AUC" + MEDIAN_PRC_AUC = "Median PRC_AUC" + MEDIAN_MCC = "Median MCC" + ELAPSED_TIME = "Elapsed Time" class OutputColumns(str, metaclass=ExtendedEnumMeta): ANOMALY_COL = "anomaly" diff --git a/ads/opctl/operator/lowcode/anomaly/model/anomaly_dataset.py b/ads/opctl/operator/lowcode/anomaly/model/anomaly_dataset.py index d340dc5f2..09c727262 100644 --- a/ads/opctl/operator/lowcode/anomaly/model/anomaly_dataset.py +++ b/ads/opctl/operator/lowcode/anomaly/model/anomaly_dataset.py @@ -69,8 +69,9 @@ def _load_data(self, spec): class AnomalyOutput: - def __init__(self): + def __init__(self, date_column): self.category_map = dict() + self.date_column = date_column def add_output(self, category: str, anomalies: pd.DataFrame, scores: pd.DataFrame): self.category_map[category] = (anomalies, scores) @@ -83,15 +84,29 @@ def get_scores_by_cat(self, category: str): def get_inliers_by_cat(self, category: str, data: pd.DataFrame): anomaly = self.get_anomalies_by_cat(category) + scores = self.get_scores_by_cat(category) inlier_indices = anomaly.index[anomaly[OutputColumns.ANOMALY_COL] == 0] - - return data.iloc[inlier_indices] + inliers = data.iloc[inlier_indices] + if scores is not None and not scores.empty: + inliers = pd.merge( + inliers, + scores, + on=self.date_column, + how='inner') + return inliers def get_outliers_by_cat(self, category: str, data: pd.DataFrame): anomaly = self.get_anomalies_by_cat(category) + scores = self.get_scores_by_cat(category) outliers_indices = anomaly.index[anomaly[OutputColumns.ANOMALY_COL] == 1] - - return data.iloc[outliers_indices] + outliers = data.iloc[outliers_indices] + if scores is not None and not scores.empty: + outliers = pd.merge( + outliers, + scores, + on=self.date_column, + how='inner') + return outliers def get_inliers(self, full_data_dict): inliers = pd.DataFrame() @@ -128,3 +143,6 @@ def get_scores(self, target_category_columns): score[target_category_columns[0]] = category scores = pd.concat([scores, score], axis=0, ignore_index=True) return scores + + def get_num_anomalies_by_cat(self, category: str): + return (self.category_map[category][0][OutputColumns.ANOMALY_COL] == 1).sum() diff --git a/ads/opctl/operator/lowcode/anomaly/model/automlx.py b/ads/opctl/operator/lowcode/anomaly/model/automlx.py index 95e0bde98..0c8084d82 100644 --- a/ads/opctl/operator/lowcode/anomaly/model/automlx.py +++ b/ads/opctl/operator/lowcode/anomaly/model/automlx.py @@ -7,6 +7,7 @@ import pandas as pd from ads.common.decorator.runtime_dependency import runtime_dependency +from .anomaly_dataset import AnomalyOutput from .base_model import AnomalyOperatorBaseModel from ads.opctl.operator.lowcode.anomaly.const import OutputColumns @@ -22,11 +23,34 @@ class AutoMLXOperatorModel(AnomalyOperatorBaseModel): ), ) def _build_model(self) -> pd.DataFrame: - est = automl.Pipeline(task='anomaly_detection') + + + date_column = self.spec.datetime_column.name dataset = self.datasets - est.fit(dataset.data, y=None) - y_pred = est.predict(dataset.data) - dataset.data[OutputColumns.ANOMALY_COL] = y_pred + + full_data_dict = dataset.full_data_dict + + anomaly_output = AnomalyOutput(date_column=date_column) + + # Iterate over the full_data_dict items + for target, df in full_data_dict.items(): + est = automl.Pipeline(task='anomaly_detection') + est.fit(df, y=None) + y_pred = est.predict(df) + scores = est.predict_proba(df) + + anomaly = pd.DataFrame({ + date_column: df[date_column], + OutputColumns.ANOMALY_COL: y_pred + }) + score = pd.DataFrame({ + date_column: df[date_column], + OutputColumns.SCORE_COL: [item[1] for item in scores] + }) + anomaly_output.add_output(target, anomaly, score) + + return anomaly_output + def _generate_report(self): import datapane as dp diff --git a/ads/opctl/operator/lowcode/anomaly/model/autots.py b/ads/opctl/operator/lowcode/anomaly/model/autots.py index 888094aa3..13a271daa 100644 --- a/ads/opctl/operator/lowcode/anomaly/model/autots.py +++ b/ads/opctl/operator/lowcode/anomaly/model/autots.py @@ -48,13 +48,7 @@ def _build_model(self) -> AnomalyOutput: full_data_dict = dataset.full_data_dict - target_category_column = ( - self.spec.target_category_columns[0] - if self.spec.target_category_columns is not None - else None - ) - - anomaly_output = AnomalyOutput() + anomaly_output = AnomalyOutput(date_column=date_column) # Iterate over the full_data_dict items for target, df in full_data_dict.items(): @@ -70,6 +64,7 @@ def _build_model(self) -> AnomalyOutput: columns={score.columns.values[0]: OutputColumns.SCORE_COL}, inplace=True, ) + score = 1-score score = score.reset_index(drop=False) col = anomaly.columns.values[0] diff --git a/ads/opctl/operator/lowcode/anomaly/model/base_model.py b/ads/opctl/operator/lowcode/anomaly/model/base_model.py index b1f1871c2..e956973e0 100644 --- a/ads/opctl/operator/lowcode/anomaly/model/base_model.py +++ b/ads/opctl/operator/lowcode/anomaly/model/base_model.py @@ -12,14 +12,15 @@ import fsspec import pandas as pd +import numpy as np from ads.common.auth import default_signer from ads.opctl import logger from .. import utils from ..operator_config import AnomalyOperatorConfig, AnomalyOperatorSpec -from .anomaly_dataset import AnomalyDatasets -from ..const import OutputColumns +from .anomaly_dataset import AnomalyDatasets, AnomalyOutput +from ..const import OutputColumns, SupportedMetrics from ..const import SupportedModels from ads.opctl.operator.common.utils import human_time_friendly from ads.common.object_storage_details import ObjectStorageDetails @@ -47,8 +48,16 @@ def generate_report(self): import matplotlib.pyplot as plt start_time = time.time() - anomaly_output = self._build_model() + elapsed_time = time.time() - start_time + + summary_metrics = None + total_metrics = None + validation_data = None + + if self.spec.validation_data: + total_metrics, summary_metrics, validation_data = \ + self._validation_data_evaluate_metrics(anomaly_output, self.spec.validation_data.url, elapsed_time) table_blocks = [ dp.DataTable(df, label=col) for col, df in self.datasets.full_data_dict.items() @@ -83,7 +92,6 @@ def generate_report(self): blocks.append(dp.Group(blocks=figure_blocks, label=target)) plots = dp.Select(blocks=blocks) if len(blocks) > 1 else blocks[0] - elapsed_time = time.time() - start_time report_sections = [] title_text = dp.Text("# Anomaly Detection Report") @@ -105,21 +113,137 @@ def generate_report(self): ) ] ) + sec_text = dp.Text(f"## Train Evaluation Metrics") + sec = dp.DataTable(self._evaluation_metrics(anomaly_output)) + evaluation_metrics_sec = [sec_text, sec] + + validation_metrics_sections = [] + if total_metrics is not None and not total_metrics.empty: + sec_text = dp.Text(f"## Validation Data Evaluation Metrics") + sec = dp.DataTable(total_metrics) + validation_metrics_sections = validation_metrics_sections + [sec_text, sec] + + if summary_metrics is not None and not summary_metrics.empty: + sec_text = dp.Text(f"## Validation Data Summary Metrics") + sec = dp.DataTable(summary_metrics) + validation_metrics_sections = validation_metrics_sections + [sec_text, sec] + report_sections = ( - [summary] - + [plots] - + [data_table] - + [title_text] - + [yaml_appendix_title, yaml_appendix] + [title_text, summary] + + [plots] + + [data_table] + + evaluation_metrics_sec + + validation_metrics_sections + + [yaml_appendix_title, yaml_appendix] ) + # save the report and result CSV self._save_report( report_sections=report_sections, - inliers=anomaly_output.get_inliers(self.datasets.full_data_dict), - outliers=anomaly_output.get_outliers(self.datasets.full_data_dict), - scores=anomaly_output.get_scores(self.spec.target_category_columns), + anomaly_output=anomaly_output, + validation_metrics=total_metrics ) + def _evaluation_metrics(self, anomaly_output): + total_metrics = pd.DataFrame() + for cat in anomaly_output.category_map: + num_anomalies = anomaly_output.get_num_anomalies_by_cat(cat) + metrics_df = pd.DataFrame.from_dict( + { + 'Num of Anomalies': num_anomalies + }, + orient="index", + columns=[cat]) + total_metrics = pd.concat([total_metrics, metrics_df], axis=1) + return total_metrics + + def _validation_data_evaluate_metrics(self, anomaly_output, filename, elapsed_time): + total_metrics = pd.DataFrame() + summary_metrics = pd.DataFrame() + data = None + try: + storage_options = ( + default_signer() + if ObjectStorageDetails.is_oci_path(filename) + else {} + ) + data = utils._load_data( + filename=filename, + format=self.spec.validation_data.format, + storage_options=storage_options, + columns=self.spec.validation_data.columns, + ) + except pd.errors.EmptyDataError: + logger.warn("Empty testdata file") + return total_metrics, summary_metrics, None + + if data.empty: + return total_metrics, summary_metrics, None + + for cat in anomaly_output.category_map: + output = anomaly_output.category_map[cat][0] + date_col = self.spec.datetime_column.name + + val_data = data[data[self.spec.target_category_columns[0]] == cat] + val_data[date_col] = pd.to_datetime(val_data[date_col]) + + dates = output[output[date_col].isin(val_data[date_col])][date_col] + + metrics_df = utils._build_metrics_df( + val_data[val_data[date_col].isin(dates)][OutputColumns.ANOMALY_COL].values, + output[output[date_col].isin(dates)][OutputColumns.ANOMALY_COL].values, + cat + ) + total_metrics = pd.concat([total_metrics, metrics_df], axis=1) + + if total_metrics.empty: + return total_metrics, summary_metrics, data + + summary_metrics = pd.DataFrame( + { + SupportedMetrics.MEAN_RECALL: np.mean( + total_metrics.loc[SupportedMetrics.RECALL] + ), + SupportedMetrics.MEDIAN_RECALL: np.median( + total_metrics.loc[SupportedMetrics.RECALL] + ), + SupportedMetrics.MEAN_PRECISION: np.mean( + total_metrics.loc[SupportedMetrics.PRECISION] + ), + SupportedMetrics.MEDIAN_PRECISION: np.median( + total_metrics.loc[SupportedMetrics.PRECISION] + ), + SupportedMetrics.MEAN_ACCURACY: np.mean( + total_metrics.loc[SupportedMetrics.ACCURACY] + ), + SupportedMetrics.MEDIAN_ACCURACY: np.median( + total_metrics.loc[SupportedMetrics.ACCURACY] + ), + SupportedMetrics.MEAN_F1_SCORE: np.mean( + total_metrics.loc[SupportedMetrics.F1_SCORE] + ), + SupportedMetrics.MEDIAN_F1_SCORE: np.median( + total_metrics.loc[SupportedMetrics.F1_SCORE] + ), + SupportedMetrics.MEAN_ROC_AUC: np.mean( + total_metrics.loc[SupportedMetrics.ROC_AUC] + ), + SupportedMetrics.MEDIAN_ROC_AUC: np.median( + total_metrics.loc[SupportedMetrics.ROC_AUC] + ), + SupportedMetrics.MEAN_PRC_AUC: np.mean( + total_metrics.loc[SupportedMetrics.PRC_AUC] + ), + SupportedMetrics.MEDIAN_PRC_AUC: np.median( + total_metrics.loc[SupportedMetrics.PRC_AUC] + ), + SupportedMetrics.ELAPSED_TIME: elapsed_time, + }, + index=["All Targets"], + ) + + return total_metrics, summary_metrics, data + def _load_data(self): """Loads input data.""" @@ -131,11 +255,10 @@ def _load_data(self): ) def _save_report( - self, - report_sections: Tuple, - inliers: pd.DataFrame, - outliers: pd.DataFrame, - scores: pd.DataFrame, + self, + report_sections: Tuple, + anomaly_output: AnomalyOutput, + validation_metrics: pd.DataFrame ): """Saves resulting reports to the given folder.""" import datapane as dp @@ -161,19 +284,22 @@ def _save_report( dp.save_report(report_sections, report_local_path) with open(report_local_path) as f1: with fsspec.open( - os.path.join(output_dir, self.spec.report_file_name), - "w", - **default_signer(), + os.path.join(output_dir, self.spec.report_file_name), + "w", + **default_signer(), ) as f2: f2.write(f1.read()) - utils._write_data( - data=inliers, - filename=os.path.join(output_dir, self.spec.inliers_filename), - format="csv", - storage_options=storage_options, - ) + if self.spec.generate_inliers: + inliers = anomaly_output.get_inliers(self.datasets.full_data_dict) + utils._write_data( + data=inliers, + filename=os.path.join(output_dir, self.spec.inliers_filename), + format="csv", + storage_options=storage_options, + ) + outliers=anomaly_output.get_outliers(self.datasets.full_data_dict) utils._write_data( data=outliers, filename=os.path.join(output_dir, self.spec.outliers_filename), @@ -181,12 +307,13 @@ def _save_report( storage_options=storage_options, ) - utils._write_data( - data=scores, - filename=os.path.join(output_dir, self.spec.scores_filename), - format="csv", - storage_options=storage_options, - ) + if validation_metrics is not None and not validation_metrics.empty: + utils._write_data( + data=validation_metrics.rename_axis("metrics").reset_index(), + filename=os.path.join(output_dir, self.spec.validation_metrics_filename), + format="csv", + storage_options=storage_options, + ) logger.warn( f"The report has been successfully " diff --git a/ads/opctl/operator/lowcode/anomaly/model/tods.py b/ads/opctl/operator/lowcode/anomaly/model/tods.py index 10a7c77f8..eaa120faa 100644 --- a/ads/opctl/operator/lowcode/anomaly/model/tods.py +++ b/ads/opctl/operator/lowcode/anomaly/model/tods.py @@ -59,7 +59,8 @@ def _build_model(self) -> pd.DataFrame: predictions_test = {} prediction_score_test = {} dataset = self.datasets - anomaly_output = AnomalyOutput() + date_column = self.spec.datetime_column.name + anomaly_output = AnomalyOutput(date_column=date_column) # Iterate over the full_data_dict items for target, df in self.datasets.full_data_dict.items(): @@ -91,12 +92,14 @@ def _build_model(self) -> pd.DataFrame: OutputColumns.ANOMALY_COL ] = predictions_train[target] - score = pd.DataFrame( - data=prediction_score_train[target], columns=[OutputColumns.SCORE_COL] - ) - anomaly = pd.DataFrame( - data=predictions_train[target], columns=[OutputColumns.ANOMALY_COL] - ) + anomaly = pd.DataFrame({ + date_column: df[date_column], + OutputColumns.ANOMALY_COL: predictions_train[target] + }) + score = pd.DataFrame({ + date_column: df[date_column], + OutputColumns.SCORE_COL: prediction_score_train[target] + }) anomaly_output.add_output(target, anomaly, score) return anomaly_output diff --git a/ads/opctl/operator/lowcode/anomaly/operator_config.py b/ads/opctl/operator/lowcode/anomaly/operator_config.py index 23d8dd23a..2b9a38f75 100644 --- a/ads/opctl/operator/lowcode/anomaly/operator_config.py +++ b/ads/opctl/operator/lowcode/anomaly/operator_config.py @@ -63,15 +63,15 @@ class AnomalyOperatorSpec(DataClassSerializable): input_data: InputData = field(default_factory=InputData) datetime_column: DateTimeColumn = field(default_factory=DateTimeColumn) test_data: TestData = field(default_factory=TestData) + validation_data: TestData = field(default_factory=TestData) output_directory: OutputDirectory = field(default_factory=OutputDirectory) report_file_name: str = None report_title: str = None report_theme: str = None metrics_filename: str = None - test_metrics_filename: str = None + validation_metrics_filename: str = None inliers_filename: str = None outliers_filename: str = None - scores_filename: str = None global_explanation_filename: str = None local_explanation_filename: str = None target_column: str = None @@ -80,6 +80,7 @@ class AnomalyOperatorSpec(DataClassSerializable): generate_report: bool = None generate_metrics: bool = None generate_explanations: bool = None + generate_inliers: bool = None model: str = None model_kwargs: Dict = field(default_factory=dict) metric: str = None @@ -90,7 +91,13 @@ def __post_init__(self): self.report_theme = self.report_theme or "light" self.inliers_filename = self.inliers_filename or "inliers.csv" self.outliers_filename = self.outliers_filename or "outliers.csv" - self.scores_filename = self.scores_filename or "scores.csv" + self.validation_metrics_filename = self.validation_metrics_filename or "validation_metrics.csv" + + self.generate_inliers = ( + self.generate_inliers + if self.generate_inliers is not None + else False + ) self.model_kwargs = self.model_kwargs or dict() diff --git a/ads/opctl/operator/lowcode/anomaly/schema.yaml b/ads/opctl/operator/lowcode/anomaly/schema.yaml index 516478862..16eb76b4e 100644 --- a/ads/opctl/operator/lowcode/anomaly/schema.yaml +++ b/ads/opctl/operator/lowcode/anomaly/schema.yaml @@ -113,6 +113,45 @@ spec: type: dict type: dict + validation_data: + required: false + meta: + description: "Optional, only if evaluation is needed." + schema: + connect_args: + nullable: true + required: false + type: dict + format: + required: false + type: string + allowed: + - csv + - json + - clipboard + - excel + - hdf + - sql + columns: + required: false + type: list + schema: + type: string + url: + required: true + type: string + default: test.csv + meta: + description: "The url can be local, or remote. For example: `oci://@/data.csv`" + name: + required: false + type: string + options: + nullable: true + required: false + type: dict + type: dict + output_directory: required: false schema: @@ -167,10 +206,10 @@ spec: default: metrics.csv meta: description: "Placed into output_directory location. Defaults to metrics.csv" - test_metrics_filename: + validation_metrics_filename: required: false type: string - default: test_metrics.csv + default: validation_metrics.csv meta: description: "Placed into output_directory location. Defaults to test_metrics.csv" global_explanation_filename: @@ -207,6 +246,14 @@ spec: meta: description: "Metrics files generation can be enabled using this flag. Defaults to true." + generate_inliers: + type: boolean + required: false + default: false + meta: + description: "Generates inliers.csv" + + target_column: type: string required: true diff --git a/ads/opctl/operator/lowcode/anomaly/utils.py b/ads/opctl/operator/lowcode/anomaly/utils.py index 067a8a142..5b44270b3 100644 --- a/ads/opctl/operator/lowcode/anomaly/utils.py +++ b/ads/opctl/operator/lowcode/anomaly/utils.py @@ -8,6 +8,27 @@ import pandas as pd import fsspec from .operator_config import AnomalyOperatorSpec +from .const import SupportedMetrics + + +def _build_metrics_df(y_true, y_pred, column_name): + from sklearn.metrics import recall_score, precision_score, accuracy_score, f1_score, confusion_matrix, \ + roc_auc_score, precision_recall_curve, auc, matthews_corrcoef + metrics = dict() + metrics[SupportedMetrics.RECALL] = recall_score(y_true, y_pred) + metrics[SupportedMetrics.PRECISION] = precision_score(y_true, y_pred) + metrics[SupportedMetrics.ACCURACY] = accuracy_score(y_true, y_pred) + metrics[SupportedMetrics.F1_SCORE] = f1_score(y_true, y_pred) + tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel() + metrics[SupportedMetrics.FP] = fp + metrics[SupportedMetrics.FN] = fn + metrics[SupportedMetrics.TP] = tp + metrics[SupportedMetrics.TN] = tn + metrics[SupportedMetrics.ROC_AUC] = roc_auc_score(y_true, y_pred) + precision, recall, thresholds = precision_recall_curve(y_true, y_pred) + metrics[SupportedMetrics.PRC_AUC] = auc(recall, precision) + metrics[SupportedMetrics.MCC] = matthews_corrcoef(y_true, y_pred) + return pd.DataFrame.from_dict(metrics, orient="index", columns=[column_name]) def _call_pandas_fsspec(pd_fn, filename, storage_options, **kwargs): From a1f3a568db479fd8acc2bc69ac35d4dcfaaeb83d Mon Sep 17 00:00:00 2001 From: govarsha Date: Fri, 12 Jan 2024 19:56:21 +0530 Subject: [PATCH 2/2] changed const.py --- ads/opctl/operator/lowcode/anomaly/const.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/ads/opctl/operator/lowcode/anomaly/const.py b/ads/opctl/operator/lowcode/anomaly/const.py index 41e758861..b8614c921 100644 --- a/ads/opctl/operator/lowcode/anomaly/const.py +++ b/ads/opctl/operator/lowcode/anomaly/const.py @@ -63,10 +63,6 @@ class SupportedMetrics(str, metaclass=ExtendedEnumMeta): MEAN_PRECISION = "Mean Precision" MEAN_ACCURACY = "Mean Accuracy" MEAN_F1_SCORE = "Mean f1_score" - MEAN_FP = "Mean False Positive" - MEAN_FN = "Mean False Negative" - MEAN_TP = "Mean True Positive" - MEAN_TN = "Mean True Negative" MEAN_ROC_AUC = "Mean ROC_AUC" MEAN_PRC_AUC = "Mean PRC_AUC" MEAN_MCC = "Mean MCC" @@ -74,10 +70,6 @@ class SupportedMetrics(str, metaclass=ExtendedEnumMeta): MEDIAN_PRECISION = "Median Precision" MEDIAN_ACCURACY = "Median Accuracy" MEDIAN_F1_SCORE = "Median f1_score" - MEDIAN_FP = "Median False Positive" - MEDIAN_FN = "Median False Negative" - MEDIAN_TP = "Median True Positive" - MEDIAN_TN = "Median True Negative" MEDIAN_ROC_AUC = "Median ROC_AUC" MEDIAN_PRC_AUC = "Median PRC_AUC" MEDIAN_MCC = "Median MCC"