diff --git a/tests/expectations/metrics/test_core.py b/tests/expectations/metrics/test_core.py index 61fc377e0bb5..bf87921e6bc0 100644 --- a/tests/expectations/metrics/test_core.py +++ b/tests/expectations/metrics/test_core.py @@ -522,7 +522,7 @@ def test_map_column_value_lengths_between_pd(): def test_map_unique_pd_column_exists(): - engine = build_pandas_engine(pd.DataFrame({"a": [1, 2, 3, 3, None]})) + engine = build_pandas_engine(pd.DataFrame({"a": [1, 2, 3, 3, 4, None]})) metrics: dict = {} @@ -532,7 +532,7 @@ def test_map_unique_pd_column_exists(): table_columns_metric, results = get_table_columns_metric(engine=engine) metrics.update(results) - desired_metric = MetricConfiguration( + condition_metric = MetricConfiguration( metric_name="column_values.unique.condition", metric_domain_kwargs={"column": "a"}, metric_value_kwargs=dict(), @@ -541,11 +541,27 @@ def test_map_unique_pd_column_exists(): }, ) results = engine.resolve_metrics( - metrics_to_resolve=(desired_metric,), + metrics_to_resolve=(condition_metric,), metrics=metrics, ) + metrics.update(results) + + unexpected_count_metric = MetricConfiguration( + metric_name="column_values.unique.unexpected_count", + metric_domain_kwargs={"column": "a"}, + metric_value_kwargs=dict(), + metric_dependencies={ + "unexpected_condition": condition_metric, + "table.columns": table_columns_metric, + }, + ) + results = engine.resolve_metrics( + metrics_to_resolve=(unexpected_count_metric,), metrics=metrics + ) + metrics.update(results) - assert list(results[desired_metric.id][0]) == [False, False, True, True] + assert list(metrics[condition_metric.id][0]) == [False, False, True, True, False] + assert metrics[unexpected_count_metric.id] == 2 def test_map_unique_pd_column_does_not_exist():