Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

1140 bug cv add label map to conditions #1228

Merged
merged 3 commits into from Apr 10, 2022
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
19 changes: 9 additions & 10 deletions deepchecks/vision/checks/performance/class_performance.py
Expand Up @@ -163,7 +163,7 @@ def condition(check_result: pd.DataFrame):
not_passed_test = check_result.loc[check_result['Dataset'] == 'Test']
if len(not_passed):
details = f'Found metrics with scores below threshold:\n' \
f'{not_passed_test[["Class", "Metric", "Value"]].to_dict("records")}'
f'{not_passed_test[["Class Name", "Metric", "Value"]].to_dict("records")}'
return ConditionResult(ConditionCategory.FAIL, details)
return ConditionResult(ConditionCategory.PASS)

Expand All @@ -188,15 +188,15 @@ def condition(check_result: pd.DataFrame) -> ConditionResult:
test_scores = check_result.loc[check_result['Dataset'] == 'Test']
train_scores = check_result.loc[check_result['Dataset'] == 'Train']

if check_result.get('Class') is not None:
classes = check_result['Class'].unique()
if check_result.get('Class Name') is not None:
classes = check_result['Class Name'].unique()
else:
classes = None
explained_failures = []
if classes is not None:
for class_name in classes:
test_scores_class = test_scores.loc[test_scores['Class'] == class_name]
train_scores_class = train_scores.loc[train_scores['Class'] == class_name]
test_scores_class = test_scores.loc[test_scores['Class Name'] == class_name]
train_scores_class = train_scores.loc[train_scores['Class Name'] == class_name]
test_scores_dict = dict(zip(test_scores_class['Metric'], test_scores_class['Value']))
train_scores_dict = dict(zip(train_scores_class['Metric'], train_scores_class['Value']))
# Calculate percentage of change from train to test
Expand Down Expand Up @@ -256,9 +256,8 @@ def add_condition_class_performance_imbalance_ratio_not_greater_than(
DeepchecksValueError
if unknown score function name were passed;
"""
# TODO: Redefine default scorers when making the condition work
# if score is None:
# score = next(iter(MULTICLASS_SCORERS_NON_AVERAGE))
if score is None:
raise DeepchecksValueError('Must define "score" parameter')

def condition(check_result: pd.DataFrame) -> ConditionResult:
if score not in set(check_result['Metric']):
Expand All @@ -270,12 +269,12 @@ def condition(check_result: pd.DataFrame) -> ConditionResult:

min_value_index = data['Value'].idxmin()
min_row = data.loc[min_value_index]
min_class_name = min_row['Class']
min_class_name = min_row['Class Name']
min_value = min_row['Value']

max_value_index = data['Value'].idxmax()
max_row = data.loc[max_value_index]
max_class_name = max_row['Class']
max_class_name = max_row['Class Name']
max_value = max_row['Value']

relative_difference = abs((min_value - max_value) / max_value)
Expand Down