diff --git a/src/sentry/constants.py b/src/sentry/constants.py index 3e5492f470f1f6..53376c468aa804 100644 --- a/src/sentry/constants.py +++ b/src/sentry/constants.py @@ -46,7 +46,6 @@ def get_all_languages() -> List[str]: "date": _("Last Seen"), "new": _("First Seen"), "freq": _("Frequency"), - "better_priority": _("Better Priority"), } SEARCH_SORT_OPTIONS = { diff --git a/src/sentry/models/savedsearch.py b/src/sentry/models/savedsearch.py index b9208e10936a56..b6d90aeb4639f5 100644 --- a/src/sentry/models/savedsearch.py +++ b/src/sentry/models/savedsearch.py @@ -18,7 +18,6 @@ class SortOptions: FREQ = "freq" USER = "user" INBOX = "inbox" - BETTER_PRIORITY = "betterPriority" @classmethod def as_choices(cls): @@ -29,7 +28,6 @@ def as_choices(cls): (cls.FREQ, _("Events")), (cls.USER, _("Users")), (cls.INBOX, _("Date Added")), - (cls.BETTER_PRIORITY, _("Better Priority")), ) diff --git a/src/sentry/search/snuba/executors.py b/src/sentry/search/snuba/executors.py index 78bac15f0cf12e..3319dfbba42631 100644 --- a/src/sentry/search/snuba/executors.py +++ b/src/sentry/search/snuba/executors.py @@ -76,7 +76,7 @@ class PrioritySortWeights(TypedDict): @dataclass -class BetterPriorityParams: +class PriorityParams: # (event or issue age_hours) / (event or issue halflife hours) # any event or issue age that is greater than max_pow times the half-life hours will get clipped max_pow: int @@ -238,7 +238,7 @@ def _prepare_aggregations( end: datetime, having: Sequence[Sequence[Any]], aggregate_kwargs: Optional[PrioritySortWeights] = None, - replace_better_priority_aggregation: Optional[bool] = False, + replace_priority_aggregation: Optional[bool] = False, ) -> list[Any]: extra_aggregations = self.dependency_aggregations.get(sort_field, []) required_aggregations = set([sort_field, "total"] + extra_aggregations) @@ -249,8 +249,8 @@ def _prepare_aggregations( aggregations = [] for alias in required_aggregations: aggregation = self.aggregation_defs[alias] - if replace_better_priority_aggregation and alias in ["priority", "better_priority"]: - aggregation = self.aggregation_defs["better_priority_issue_platform"] + if replace_priority_aggregation and alias == "priority": + aggregation = self.aggregation_defs["priority_issue_platform"] if callable(aggregation): if aggregate_kwargs: aggregation = aggregation(start, end, aggregate_kwargs.get(alias, {})) @@ -302,10 +302,7 @@ def _prepare_params_for_category( else: conditions.append(converted_filter) - if ( - sort_field in ["priority", "better_priority"] - and group_category is not GroupCategory.ERROR.value - ): + if sort_field == "priority" and group_category is not GroupCategory.ERROR.value: aggregations = self._prepare_aggregations( sort_field, start, end, having, aggregate_kwargs, True ) @@ -503,13 +500,13 @@ def has_sort_strategy(self, sort_by: str) -> bool: return sort_by in self.sort_strategies.keys() -def better_priority_aggregation( +def priority_aggregation( start: datetime, end: datetime, aggregate_kwargs: PrioritySortWeights, ) -> Sequence[str]: - return better_priority_aggregation_impl( - BetterPriorityParams( + return priority_aggregation_impl( + PriorityParams( max_pow=16, min_score=0.01, event_age_weight=1, @@ -529,13 +526,13 @@ def better_priority_aggregation( ) -def better_priority_issue_platform_aggregation( +def priority_issue_platform_aggregation( start: datetime, end: datetime, aggregate_kwargs: PrioritySortWeights, ) -> Sequence[str]: - return better_priority_aggregation_impl( - BetterPriorityParams( + return priority_aggregation_impl( + PriorityParams( max_pow=16, min_score=0.01, event_age_weight=1, @@ -555,8 +552,8 @@ def better_priority_issue_platform_aggregation( ) -def better_priority_aggregation_impl( - params: BetterPriorityParams, +def priority_aggregation_impl( + params: PriorityParams, timestamp_column: str, use_stacktrace: bool, start: datetime, @@ -695,24 +692,22 @@ class PostgresSnubaQueryExecutor(AbstractQueryExecutor): "date": "last_seen", "freq": "times_seen", "new": "first_seen", - "priority": "better_priority", + "priority": "priority", "user": "user_count", # We don't need a corresponding snuba field here, since this sort only happens # in Postgres "inbox": "", - "betterPriority": "better_priority", } aggregation_defs = { "times_seen": ["count()", ""], "first_seen": ["multiply(toUInt64(min(timestamp)), 1000)", ""], "last_seen": ["multiply(toUInt64(max(timestamp)), 1000)", ""], - "priority": better_priority_aggregation, + "priority": priority_aggregation, # Only makes sense with WITH TOTALS, returns 1 for an individual group. "total": ["uniq", ISSUE_FIELD_NAME], "user_count": ["uniq", "tags[sentry:user]"], - "better_priority": better_priority_aggregation, - "better_priority_issue_platform": better_priority_issue_platform_aggregation, + "priority_issue_platform": priority_issue_platform_aggregation, } @property diff --git a/tests/snuba/api/endpoints/test_organization_group_index.py b/tests/snuba/api/endpoints/test_organization_group_index.py index 5dce9d0763f586..9e6cc816d91a50 100644 --- a/tests/snuba/api/endpoints/test_organization_group_index.py +++ b/tests/snuba/api/endpoints/test_organization_group_index.py @@ -114,7 +114,7 @@ def test_query_for_archived(self): assert len(response.data) == 1 assert response.data[0]["id"] == str(group.id) - def test_sort_by_better_priority(self): + def test_sort_by_priority(self): group = self.store_event( data={ "timestamp": iso_format(before_now(seconds=10)), @@ -164,7 +164,7 @@ def test_sort_by_better_priority(self): } response = self.get_success_response( - sort="betterPriority", + sort="priority", query="is:unresolved", limit=25, start=iso_format(before_now(days=1)), diff --git a/tests/snuba/search/test_backend.py b/tests/snuba/search/test_backend.py index 8a3777aab62926..b12e379cb98339 100644 --- a/tests/snuba/search/test_backend.py +++ b/tests/snuba/search/test_backend.py @@ -79,7 +79,7 @@ def make_query( if limit is not None: kwargs["limit"] = limit if aggregate_kwargs: - kwargs["aggregate_kwargs"] = {"better_priority": {**aggregate_kwargs}} + kwargs["aggregate_kwargs"] = {"priority": {**aggregate_kwargs}} return self.backend.query( projects, @@ -364,7 +364,7 @@ def test_sort(self): results = self.make_query(sort_by="user") assert list(results) == [self.group1, self.group2] - def test_better_priority_sort(self): + def test_priority_sort(self): weights: PrioritySortWeights = { "log_level": 5, "has_stacktrace": 5, @@ -375,7 +375,7 @@ def test_better_priority_sort(self): "norm": False, } results = self.make_query( - sort_by="betterPriority", + sort_by="priority", aggregate_kwargs=weights, ) assert list(results) == [self.group2, self.group1] @@ -2597,12 +2597,12 @@ def test_error_main_thread_no_results(self): assert len(results) == 0 -class EventsBetterPriorityTest(SharedSnubaTest, OccurrenceTestMixin): +class EventsPriorityTest(SharedSnubaTest, OccurrenceTestMixin): @property def backend(self): return EventsDatasetSnubaSearchBackend() - def test_better_priority_sort_old_and_new_events(self): + def test_priority_sort_old_and_new_events(self): """Test that an issue with only one old event is ranked lower than an issue with only one new event""" new_project = self.create_project(organization=self.project.organization) base_datetime = (datetime.utcnow() - timedelta(days=3)).replace(tzinfo=pytz.utc) @@ -2644,7 +2644,7 @@ def test_better_priority_sort_old_and_new_events(self): "norm": False, } results = self.make_query( - sort_by="betterPriority", + sort_by="priority", projects=[new_project], aggregate_kwargs=weights, ) @@ -2652,7 +2652,7 @@ def test_better_priority_sort_old_and_new_events(self): old_group = Group.objects.get(id=old_event.group.id) assert list(results) == [recent_group, old_group] - def test_better_priority_sort_v2(self): + def test_priority_sort_v2(self): """Test that the v2 formula works.""" new_project = self.create_project(organization=self.project.organization) base_datetime = (datetime.utcnow() - timedelta(days=3)).replace(tzinfo=pytz.utc) @@ -2694,7 +2694,7 @@ def test_better_priority_sort_v2(self): "norm": False, } results = self.make_query( - sort_by="betterPriority", + sort_by="priority", projects=[new_project], aggregate_kwargs=weights, ) @@ -2702,7 +2702,7 @@ def test_better_priority_sort_v2(self): old_group = Group.objects.get(id=old_event.group.id) assert list(results) == [recent_group, old_group] - def test_better_priority_log_level_results(self): + def test_priority_log_level_results(self): """Test that the scoring results change when we pass in different log level weights""" base_datetime = (datetime.utcnow() - timedelta(hours=1)).replace(tzinfo=pytz.utc) event1 = self.store_event( @@ -2733,7 +2733,7 @@ def test_better_priority_log_level_results(self): group2 = Group.objects.get(id=event2.group.id) agg_kwargs = { - "better_priority": { + "priority": { "log_level": 0, "has_stacktrace": 0, "relative_volume": 1, @@ -2749,7 +2749,7 @@ def test_better_priority_log_level_results(self): end=None, project_ids=[self.project.id], environment_ids=[], - sort_field="better_priority", + sort_field="priority", organization=self.organization, group_ids=[group1.id, group2.id], limit=150, @@ -2760,14 +2760,14 @@ def test_better_priority_log_level_results(self): # initially group 2's score is higher since it has a more recent event assert group2_score_before > group1_score_before - agg_kwargs["better_priority"].update({"log_level": 5}) + agg_kwargs["priority"].update({"log_level": 5}) results2 = query_executor.snuba_search( start=None, end=None, project_ids=[self.project.id], environment_ids=[], - sort_field="better_priority", + sort_field="priority", organization=self.organization, group_ids=[group1.id, group2.id], limit=150, @@ -2778,11 +2778,11 @@ def test_better_priority_log_level_results(self): # ensure fatal has a higher score than error assert group1_score_after > group2_score_after - def test_better_priority_has_stacktrace_results(self): + def test_priority_has_stacktrace_results(self): """Test that the scoring results change when we pass in different has_stacktrace weights""" base_datetime = (datetime.utcnow() - timedelta(hours=1)).replace(tzinfo=pytz.utc) agg_kwargs = { - "better_priority": { + "priority": { "log_level": 0, "has_stacktrace": 0, "relative_volume": 1, @@ -2833,7 +2833,7 @@ def test_better_priority_has_stacktrace_results(self): end=None, project_ids=[self.project.id], environment_ids=[], - sort_field="better_priority", + sort_field="priority", organization=self.organization, group_ids=[group1.id, group2.id], limit=150, @@ -2843,13 +2843,13 @@ def test_better_priority_has_stacktrace_results(self): group2_score = results[1][1] assert group1_score == group2_score - agg_kwargs["better_priority"].update({"has_stacktrace": 3}) + agg_kwargs["priority"].update({"has_stacktrace": 3}) results = query_executor.snuba_search( start=None, end=None, project_ids=[self.project.id], environment_ids=[], - sort_field="better_priority", + sort_field="priority", organization=self.organization, group_ids=[group1.id, group2.id], limit=150, @@ -2860,7 +2860,7 @@ def test_better_priority_has_stacktrace_results(self): # check that a group with an event with a stacktrace has a higher weight than one without assert group1_score < group2_score - def test_better_priority_event_halflife_results(self): + def test_priority_event_halflife_results(self): """Test that the scoring results change when we pass in different event halflife weights""" base_datetime = (datetime.utcnow() - timedelta(hours=1)).replace(tzinfo=pytz.utc) event1 = self.store_event( @@ -2891,7 +2891,7 @@ def test_better_priority_event_halflife_results(self): group2 = Group.objects.get(id=event2.group.id) agg_kwargs = { - "better_priority": { + "priority": { "log_level": 0, "has_stacktrace": 0, "relative_volume": 1, @@ -2907,7 +2907,7 @@ def test_better_priority_event_halflife_results(self): end=None, project_ids=[self.project.id], environment_ids=[], - sort_field="better_priority", + sort_field="priority", organization=self.organization, group_ids=[group1.id, group2.id], limit=150, @@ -2918,13 +2918,13 @@ def test_better_priority_event_halflife_results(self): # initially group 2's score is higher since it has a more recent event assert group2_score_before > group1_score_before - agg_kwargs["better_priority"].update({"event_halflife_hours": 2}) + agg_kwargs["priority"].update({"event_halflife_hours": 2}) results = query_executor.snuba_search( start=None, end=None, project_ids=[self.project.id], environment_ids=[], - sort_field="better_priority", + sort_field="priority", organization=self.organization, group_ids=[group1.id, group2.id], limit=150, @@ -2934,7 +2934,7 @@ def test_better_priority_event_halflife_results(self): group2_score_after = results[1][1] assert group1_score_after < group2_score_after - def test_better_priority_mixed_group_types(self): + def test_priority_mixed_group_types(self): base_datetime = (datetime.utcnow() - timedelta(hours=1)).replace(tzinfo=pytz.utc) error_event = self.store_event( @@ -2967,7 +2967,7 @@ def test_better_priority_mixed_group_types(self): profile_group_1 = group_info.group agg_kwargs = { - "better_priority": { + "priority": { "log_level": 0, "has_stacktrace": 0, "relative_volume": 1, @@ -2989,7 +2989,7 @@ def test_better_priority_mixed_group_types(self): end=None, project_ids=[self.project.id], environment_ids=[], - sort_field="better_priority", + sort_field="priority", organization=self.organization, group_ids=[profile_group_1.id, error_group.id], limit=150,