From a27dca78d27a9bb9248ef61402b04e0194264ae8 Mon Sep 17 00:00:00 2001 From: Colleen O'Rourke Date: Mon, 22 Apr 2024 11:09:51 -0700 Subject: [PATCH 1/6] ref(rules): Refactor get_rate to handle batch --- .../rules/conditions/event_frequency.py | 59 ++++++-- .../rules/conditions/test_event_frequency.py | 128 ++++++++++++++---- 2 files changed, 150 insertions(+), 37 deletions(-) diff --git a/src/sentry/rules/conditions/event_frequency.py b/src/sentry/rules/conditions/event_frequency.py index 0f322bda690fe9..f6a2a25842cfeb 100644 --- a/src/sentry/rules/conditions/event_frequency.py +++ b/src/sentry/rules/conditions/event_frequency.py @@ -140,13 +140,15 @@ def passes(self, event: GroupEvent, state: EventState) -> bool: if state.is_new and value > 1: return False - # TODO(mgaeta): Bug: Rule is optional. + comparison_type = self.get_option("comparisonType", ComparisonType.COUNT) + comparison_interval = COMPARISON_INTERVALS[self.get_option("comparisonInterval")][1] try: - current_value = self.get_rate(event, interval, self.rule.environment_id) # type: ignore[arg-type, union-attr] + current_value = self.get_rate(event, interval, self.rule.environment_id, comparison_type, comparison_interval) # type: ignore[arg-type, union-attr] # XXX(CEO): once inc-666 work is concluded, rm try/except except RateLimitExceeded: metrics.incr("rule.event_frequency.snuba_query_limit") return False + logging.info("event_frequency_rule current: %s, threshold: %s", current_value, value) return current_value > value @@ -214,7 +216,16 @@ def batch_query_hook( """ raise NotImplementedError - def get_rate(self, event: GroupEvent, interval: str, environment_id: int) -> int: + def get_rate( + self, + event: GroupEvent, + interval: str, + environment_id: int, + comparison_type: str, + comparison_interval: timedelta | None = None, + batch: bool = False, + group_ids: list[int] | None = None, + ) -> int: _, duration = self.intervals[interval] end = timezone.now() # For conditions with interval >= 1 hour we don't need to worry about read your writes @@ -223,18 +234,40 @@ def get_rate(self, event: GroupEvent, interval: str, environment_id: int) -> int if duration >= timedelta(hours=1): option_override_cm = options_override({"consistent": False}) with option_override_cm: - result: int = self.query(event, end - duration, end, environment_id=environment_id) - comparison_type = self.get_option("comparisonType", ComparisonType.COUNT) + if batch: + start = end - duration + result = self.batch_query( + group_ids=group_ids, + start=start, + end=end, + environment_id=environment_id, + ) + else: + result: int = self.query(event, end - duration, end, environment_id=environment_id) if comparison_type == ComparisonType.PERCENT: - comparison_interval = COMPARISON_INTERVALS[self.get_option("comparisonInterval")][1] comparison_end = end - comparison_interval - # TODO: Figure out if there's a way we can do this less frequently. All queries are - # automatically cached for 10s. We could consider trying to cache this and the main - # query for 20s to reduce the load. - comparison_result = self.query( - event, comparison_end - duration, comparison_end, environment_id=environment_id - ) - result = percent_increase(result, comparison_result) + if batch: + comparison_result = self.batch_query( + group_ids=group_ids, + start=start, + end=comparison_end, + environment_id=environment_id, + ) + result = { + group_id: percent_increase(result[group_id], comparison_result[group_id]) + for group_id in group_ids + } + else: + # TODO: Figure out if there's a way we can do this less frequently. All queries are + # automatically cached for 10s. We could consider trying to cache this and the main + # query for 20s to reduce the load. + comparison_result = self.query( + event, + comparison_end - duration, + comparison_end, + environment_id=environment_id, + ) + result = percent_increase(result, comparison_result) return result diff --git a/tests/snuba/rules/conditions/test_event_frequency.py b/tests/snuba/rules/conditions/test_event_frequency.py index 209e1e153d3301..66176e558c45ff 100644 --- a/tests/snuba/rules/conditions/test_event_frequency.py +++ b/tests/snuba/rules/conditions/test_event_frequency.py @@ -290,43 +290,63 @@ def _run_test(self, minutes, data, passes, add_events=False): self.assertDoesNotPass(environment_rule, event, is_new=False) def test_one_minute_with_events(self): - data = {"interval": "1m", "value": 6} + data = {"interval": "1m", "value": 6, "comparisonType": "count", "comparisonInterval": "5m"} self._run_test(data=data, minutes=1, passes=True, add_events=True) - data = {"interval": "1m", "value": 16} + data = { + "interval": "1m", + "value": 16, + "comparisonType": "count", + "comparisonInterval": "5m", + } self._run_test(data=data, minutes=1, passes=False) def test_one_hour_with_events(self): - data = {"interval": "1h", "value": 6} + data = {"interval": "1h", "value": 6, "comparisonType": "count", "comparisonInterval": "5m"} self._run_test(data=data, minutes=60, passes=True, add_events=True) - data = {"interval": "1h", "value": 16} + data = { + "interval": "1h", + "value": 16, + "comparisonType": "count", + "comparisonInterval": "5m", + } self._run_test(data=data, minutes=60, passes=False) def test_one_day_with_events(self): - data = {"interval": "1d", "value": 6} + data = {"interval": "1d", "value": 6, "comparisonType": "count", "comparisonInterval": "5m"} self._run_test(data=data, minutes=1440, passes=True, add_events=True) - data = {"interval": "1d", "value": 16} + data = { + "interval": "1d", + "value": 16, + "comparisonType": "count", + "comparisonInterval": "5m", + } self._run_test(data=data, minutes=1440, passes=False) def test_one_week_with_events(self): - data = {"interval": "1w", "value": 6} + data = {"interval": "1w", "value": 6, "comparisonType": "count", "comparisonInterval": "5m"} self._run_test(data=data, minutes=10080, passes=True, add_events=True) - data = {"interval": "1w", "value": 16} + data = { + "interval": "1w", + "value": 16, + "comparisonType": "count", + "comparisonInterval": "5m", + } self._run_test(data=data, minutes=10080, passes=False) def test_one_minute_no_events(self): - data = {"interval": "1m", "value": 6} + data = {"interval": "1m", "value": 6, "comparisonType": "count", "comparisonInterval": "5m"} self._run_test(data=data, minutes=1, passes=False) def test_one_hour_no_events(self): - data = {"interval": "1h", "value": 6} + data = {"interval": "1h", "value": 6, "comparisonType": "count", "comparisonInterval": "5m"} self._run_test(data=data, minutes=60, passes=False) def test_one_day_no_events(self): - data = {"interval": "1d", "value": 6} + data = {"interval": "1d", "value": 6, "comparisonType": "count", "comparisonInterval": "5m"} self._run_test(data=data, minutes=1440, passes=False) def test_one_week_no_events(self): - data = {"interval": "1w", "value": 6} + data = {"interval": "1w", "value": 6, "comparisonType": "count", "comparisonInterval": "5m"} self._run_test(data=data, minutes=10080, passes=False) def test_comparison(self): @@ -516,57 +536,117 @@ def increment(self, event, count, environment=None, timestamp=None): @patch("sentry.rules.conditions.event_frequency.MIN_SESSIONS_TO_FIRE", 1) def test_five_minutes_with_events(self): self._make_sessions(60) - data = {"interval": "5m", "value": 39} + data = { + "interval": "5m", + "value": 39, + "comparisonType": "count", + "comparisonInterval": "5m", + } self._run_test(data=data, minutes=5, passes=True, add_events=True) - data = {"interval": "5m", "value": 41} + data = { + "interval": "5m", + "value": 41, + "comparisonType": "count", + "comparisonInterval": "5m", + } self._run_test(data=data, minutes=5, passes=False) @patch("sentry.rules.conditions.event_frequency.MIN_SESSIONS_TO_FIRE", 1) def test_ten_minutes_with_events(self): self._make_sessions(60) - data = {"interval": "10m", "value": 49} + data = { + "interval": "10m", + "value": 49, + "comparisonType": "count", + "comparisonInterval": "5m", + } self._run_test(data=data, minutes=10, passes=True, add_events=True) - data = {"interval": "10m", "value": 51} + data = { + "interval": "10m", + "value": 51, + "comparisonType": "count", + "comparisonInterval": "5m", + } self._run_test(data=data, minutes=10, passes=False) @patch("sentry.rules.conditions.event_frequency.MIN_SESSIONS_TO_FIRE", 1) def test_thirty_minutes_with_events(self): self._make_sessions(60) - data = {"interval": "30m", "value": 49} + data = { + "interval": "30m", + "value": 49, + "comparisonType": "count", + "comparisonInterval": "5m", + } self._run_test(data=data, minutes=30, passes=True, add_events=True) - data = {"interval": "30m", "value": 51} + data = { + "interval": "30m", + "value": 51, + "comparisonType": "count", + "comparisonInterval": "5m", + } self._run_test(data=data, minutes=30, passes=False) @patch("sentry.rules.conditions.event_frequency.MIN_SESSIONS_TO_FIRE", 1) def test_one_hour_with_events(self): self._make_sessions(60) - data = {"interval": "1h", "value": 49} + data = { + "interval": "1h", + "value": 49, + "comparisonType": "count", + "comparisonInterval": "5m", + } self._run_test(data=data, minutes=60, add_events=True, passes=True) - data = {"interval": "1h", "value": 51} + data = { + "interval": "1h", + "value": 51, + "comparisonType": "count", + "comparisonInterval": "5m", + } self._run_test(data=data, minutes=60, passes=False) @patch("sentry.rules.conditions.event_frequency.MIN_SESSIONS_TO_FIRE", 1) def test_five_minutes_no_events(self): self._make_sessions(60) - data = {"interval": "5m", "value": 39} + data = { + "interval": "5m", + "value": 39, + "comparisonType": "count", + "comparisonInterval": "5m", + } self._run_test(data=data, minutes=5, passes=True, add_events=True) @patch("sentry.rules.conditions.event_frequency.MIN_SESSIONS_TO_FIRE", 1) def test_ten_minutes_no_events(self): self._make_sessions(60) - data = {"interval": "10m", "value": 49} + data = { + "interval": "10m", + "value": 49, + "comparisonType": "count", + "comparisonInterval": "5m", + } self._run_test(data=data, minutes=10, passes=True, add_events=True) @patch("sentry.rules.conditions.event_frequency.MIN_SESSIONS_TO_FIRE", 1) def test_thirty_minutes_no_events(self): self._make_sessions(60) - data = {"interval": "30m", "value": 49} + data = { + "interval": "30m", + "value": 49, + "comparisonType": "count", + "comparisonInterval": "5m", + } self._run_test(data=data, minutes=30, passes=True, add_events=True) @patch("sentry.rules.conditions.event_frequency.MIN_SESSIONS_TO_FIRE", 1) def test_one_hour_no_events(self): self._make_sessions(60) - data = {"interval": "1h", "value": 49} + data = { + "interval": "1h", + "value": 49, + "comparisonType": "count", + "comparisonInterval": "5m", + } self._run_test(data=data, minutes=60, passes=False) @patch("sentry.rules.conditions.event_frequency.MIN_SESSIONS_TO_FIRE", 1) From e07165671c356ccc7db384aa31361e73dd916aa5 Mon Sep 17 00:00:00 2001 From: Colleen O'Rourke Date: Mon, 22 Apr 2024 12:44:14 -0700 Subject: [PATCH 2/6] break into get rate single and bulk --- .../rules/conditions/event_frequency.py | 77 +++++++++++-------- 1 file changed, 45 insertions(+), 32 deletions(-) diff --git a/src/sentry/rules/conditions/event_frequency.py b/src/sentry/rules/conditions/event_frequency.py index f6a2a25842cfeb..7ed4f065be33fa 100644 --- a/src/sentry/rules/conditions/event_frequency.py +++ b/src/sentry/rules/conditions/event_frequency.py @@ -143,7 +143,7 @@ def passes(self, event: GroupEvent, state: EventState) -> bool: comparison_type = self.get_option("comparisonType", ComparisonType.COUNT) comparison_interval = COMPARISON_INTERVALS[self.get_option("comparisonInterval")][1] try: - current_value = self.get_rate(event, interval, self.rule.environment_id, comparison_type, comparison_interval) # type: ignore[arg-type, union-attr] + current_value = self.get_rate(duration, comparison_interval, event, self.rule.environment_id, comparison_type) # type: ignore[arg-type, union-attr] # XXX(CEO): once inc-666 work is concluded, rm try/except except RateLimitExceeded: metrics.incr("rule.event_frequency.snuba_query_limit") @@ -218,14 +218,13 @@ def batch_query_hook( def get_rate( self, - event: GroupEvent, interval: str, environment_id: int, comparison_type: str, comparison_interval: timedelta | None = None, - batch: bool = False, - group_ids: list[int] | None = None, - ) -> int: + event: GroupEvent | None = None, + group_ids: set[int] | None = None, + ) -> int | dict[int, int]: _, duration = self.intervals[interval] end = timezone.now() # For conditions with interval >= 1 hour we don't need to worry about read your writes @@ -234,43 +233,57 @@ def get_rate( if duration >= timedelta(hours=1): option_override_cm = options_override({"consistent": False}) with option_override_cm: - if batch: - start = end - duration - result = self.batch_query( - group_ids=group_ids, - start=start, - end=end, - environment_id=environment_id, - ) - else: - result: int = self.query(event, end - duration, end, environment_id=environment_id) + start = end - duration + if event: + result = self.get_rate_single(event, start, end, environment_id) + elif group_ids: + result = self.get_rate_bulk(group_ids, start, end, environment_id) if comparison_type == ComparisonType.PERCENT: comparison_end = end - comparison_interval - if batch: - comparison_result = self.batch_query( - group_ids=group_ids, - start=start, - end=comparison_end, - environment_id=environment_id, + # TODO: Figure out if there's a way we can do this less frequently. All queries are + # automatically cached for 10s. We could consider trying to cache this and the main + # query for 20s to reduce the load. + start = comparison_end - duration + comparison_end = end - comparison_interval + if event: + comparison_result = self.get_rate_single( + event, start, comparison_end, environment_id + ) + result = percent_increase(result, comparison_result) + elif group_ids: + comparison_result = self.get_rate_bulk( + group_ids, start, comparison_end, environment_id ) result = { group_id: percent_increase(result[group_id], comparison_result[group_id]) for group_id in group_ids } - else: - # TODO: Figure out if there's a way we can do this less frequently. All queries are - # automatically cached for 10s. We could consider trying to cache this and the main - # query for 20s to reduce the load. - comparison_result = self.query( - event, - comparison_end - duration, - comparison_end, - environment_id=environment_id, - ) - result = percent_increase(result, comparison_result) return result + def get_rate_single( + self, + event: GroupEvent, + start: datetime, + end: datetime, + environment_id: int, + ) -> int: + return self.query(event, start, end, environment_id=environment_id) + + def get_rate_bulk( + self, + group_ids: set[int], + start: datetime, + end: datetime, + environment_id: int, + ) -> int: + return self.batch_query( + group_ids=group_ids, + start=start, + end=end, + environment_id=environment_id, + ) + def get_snuba_query_result( self, tsdb_function: Callable[..., Any], From 56120eed9540bc1c1cadbbac308c0dc3a6194a2d Mon Sep 17 00:00:00 2001 From: Colleen O'Rourke Date: Mon, 22 Apr 2024 17:51:02 -0700 Subject: [PATCH 3/6] nuke get_start_end_from_duration, add a default value for comparison interval --- .../rules/conditions/event_frequency.py | 53 ++++++++++++++++++- 1 file changed, 52 insertions(+), 1 deletion(-) diff --git a/src/sentry/rules/conditions/event_frequency.py b/src/sentry/rules/conditions/event_frequency.py index 7ed4f065be33fa..56f19224b429a5 100644 --- a/src/sentry/rules/conditions/event_frequency.py +++ b/src/sentry/rules/conditions/event_frequency.py @@ -141,7 +141,9 @@ def passes(self, event: GroupEvent, state: EventState) -> bool: return False comparison_type = self.get_option("comparisonType", ComparisonType.COUNT) - comparison_interval = COMPARISON_INTERVALS[self.get_option("comparisonInterval")][1] + comparison_interval_option = self.get_option("comparisonInterval", "5m") + comparison_interval = COMPARISON_INTERVALS[comparison_interval_option][1] + _, duration = self.intervals[interval] try: current_value = self.get_rate(duration, comparison_interval, event, self.rule.environment_id, comparison_type) # type: ignore[arg-type, union-attr] # XXX(CEO): once inc-666 work is concluded, rm try/except @@ -232,6 +234,28 @@ def get_rate( option_override_cm: contextlib.AbstractContextManager[object] = contextlib.nullcontext() if duration >= timedelta(hours=1): option_override_cm = options_override({"consistent": False}) +<<<<<<< HEAD +======= + return option_override_cm + + def get_comparison_start_end( + self, interval: timedelta, duration: timedelta + ) -> tuple[datetime, datetime]: + end = timezone.now() - interval + start = end - duration + return (start, end) + + def get_rate( + self, + duration: timedelta, + interval: timedelta, + event: GroupEvent, + environment_id: int, + comparison_type: str, + ) -> int: + start, end = self.get_comparison_start_end(timedelta(), duration) + option_override_cm = self.get_option_override(duration) +>>>>>>> b3304fb0b21 (nuke get_start_end_from_duration, add a default value for comparison interval) with option_override_cm: start = end - duration if event: @@ -276,6 +300,7 @@ def get_rate_bulk( start: datetime, end: datetime, environment_id: int, +<<<<<<< HEAD ) -> int: return self.batch_query( group_ids=group_ids, @@ -283,6 +308,32 @@ def get_rate_bulk( end=end, environment_id=environment_id, ) +======= + comparison_type: str, + ) -> dict[int, int]: + start, end = self.get_comparison_start_end(timedelta(), duration) + option_override_cm = self.get_option_override(duration) + with option_override_cm: + result = self.batch_query( + group_ids=group_ids, + start=start, + end=end, + environment_id=environment_id, + ) + if comparison_type == ComparisonType.PERCENT: + start, comparison_end = self.get_comparison_start_end(interval, duration) + comparison_result = self.batch_query( + group_ids=group_ids, + start=start, + end=comparison_end, + environment_id=environment_id, + ) + result = { + group_id: percent_increase(result[group_id], comparison_result[group_id]) + for group_id in group_ids + } + return result +>>>>>>> b3304fb0b21 (nuke get_start_end_from_duration, add a default value for comparison interval) def get_snuba_query_result( self, From 1505c311a915bd5acb7909618e2317b2eecef975 Mon Sep 17 00:00:00 2001 From: Colleen O'Rourke Date: Tue, 23 Apr 2024 10:58:13 -0700 Subject: [PATCH 4/6] fixed after bad rebase --- .../rules/conditions/event_frequency.py | 75 +++---------------- 1 file changed, 12 insertions(+), 63 deletions(-) diff --git a/src/sentry/rules/conditions/event_frequency.py b/src/sentry/rules/conditions/event_frequency.py index 56f19224b429a5..35c99fb6d4ba7a 100644 --- a/src/sentry/rules/conditions/event_frequency.py +++ b/src/sentry/rules/conditions/event_frequency.py @@ -145,7 +145,7 @@ def passes(self, event: GroupEvent, state: EventState) -> bool: comparison_interval = COMPARISON_INTERVALS[comparison_interval_option][1] _, duration = self.intervals[interval] try: - current_value = self.get_rate(duration, comparison_interval, event, self.rule.environment_id, comparison_type) # type: ignore[arg-type, union-attr] + current_value = self.get_rate(duration=duration, comparison_interval=comparison_interval, event=event, environment_id=self.rule.environment_id, comparison_type=comparison_type) # type: ignore[arg-type, union-attr] # XXX(CEO): once inc-666 work is concluded, rm try/except except RateLimitExceeded: metrics.incr("rule.event_frequency.snuba_query_limit") @@ -218,24 +218,12 @@ def batch_query_hook( """ raise NotImplementedError - def get_rate( - self, - interval: str, - environment_id: int, - comparison_type: str, - comparison_interval: timedelta | None = None, - event: GroupEvent | None = None, - group_ids: set[int] | None = None, - ) -> int | dict[int, int]: - _, duration = self.intervals[interval] - end = timezone.now() + def get_option_override(self, duration: timedelta) -> contextlib.AbstractContextManager[object]: # For conditions with interval >= 1 hour we don't need to worry about read your writes # consistency. Disable it so that we can scale to more nodes. option_override_cm: contextlib.AbstractContextManager[object] = contextlib.nullcontext() if duration >= timedelta(hours=1): option_override_cm = options_override({"consistent": False}) -<<<<<<< HEAD -======= return option_override_cm def get_comparison_start_end( @@ -248,72 +236,34 @@ def get_comparison_start_end( def get_rate( self, duration: timedelta, - interval: timedelta, + comparison_interval: timedelta, event: GroupEvent, environment_id: int, comparison_type: str, ) -> int: start, end = self.get_comparison_start_end(timedelta(), duration) - option_override_cm = self.get_option_override(duration) ->>>>>>> b3304fb0b21 (nuke get_start_end_from_duration, add a default value for comparison interval) - with option_override_cm: - start = end - duration - if event: - result = self.get_rate_single(event, start, end, environment_id) - elif group_ids: - result = self.get_rate_bulk(group_ids, start, end, environment_id) + with self.get_option_override(duration): + result = self.query(event, start, end, environment_id=environment_id) if comparison_type == ComparisonType.PERCENT: - comparison_end = end - comparison_interval # TODO: Figure out if there's a way we can do this less frequently. All queries are # automatically cached for 10s. We could consider trying to cache this and the main # query for 20s to reduce the load. - start = comparison_end - duration - comparison_end = end - comparison_interval - if event: - comparison_result = self.get_rate_single( - event, start, comparison_end, environment_id - ) - result = percent_increase(result, comparison_result) - elif group_ids: - comparison_result = self.get_rate_bulk( - group_ids, start, comparison_end, environment_id - ) - result = { - group_id: percent_increase(result[group_id], comparison_result[group_id]) - for group_id in group_ids - } + start, end = self.get_comparison_start_end(comparison_interval, duration) + comparison_result = self.get_rate_single(event, start, end, environment_id) + result = percent_increase(result, comparison_result) return result - def get_rate_single( - self, - event: GroupEvent, - start: datetime, - end: datetime, - environment_id: int, - ) -> int: - return self.query(event, start, end, environment_id=environment_id) - def get_rate_bulk( self, + duration: timedelta, + comparison_interval: timedelta, group_ids: set[int], - start: datetime, - end: datetime, environment_id: int, -<<<<<<< HEAD - ) -> int: - return self.batch_query( - group_ids=group_ids, - start=start, - end=end, - environment_id=environment_id, - ) -======= comparison_type: str, ) -> dict[int, int]: start, end = self.get_comparison_start_end(timedelta(), duration) - option_override_cm = self.get_option_override(duration) - with option_override_cm: + with self.get_option_override(duration): result = self.batch_query( group_ids=group_ids, start=start, @@ -321,7 +271,7 @@ def get_rate_bulk( environment_id=environment_id, ) if comparison_type == ComparisonType.PERCENT: - start, comparison_end = self.get_comparison_start_end(interval, duration) + start, comparison_end = self.get_comparison_start_end(comparison_interval, duration) comparison_result = self.batch_query( group_ids=group_ids, start=start, @@ -333,7 +283,6 @@ def get_rate_bulk( for group_id in group_ids } return result ->>>>>>> b3304fb0b21 (nuke get_start_end_from_duration, add a default value for comparison interval) def get_snuba_query_result( self, From 1a19685ecd6803ec4fa315a5021960021a2e95b6 Mon Sep 17 00:00:00 2001 From: Colleen O'Rourke Date: Tue, 23 Apr 2024 11:33:06 -0700 Subject: [PATCH 5/6] fix function call, add docstring --- src/sentry/rules/conditions/event_frequency.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/sentry/rules/conditions/event_frequency.py b/src/sentry/rules/conditions/event_frequency.py index 35c99fb6d4ba7a..01ecc670027be5 100644 --- a/src/sentry/rules/conditions/event_frequency.py +++ b/src/sentry/rules/conditions/event_frequency.py @@ -229,6 +229,12 @@ def get_option_override(self, duration: timedelta) -> contextlib.AbstractContext def get_comparison_start_end( self, interval: timedelta, duration: timedelta ) -> tuple[datetime, datetime]: + """ + Calculate the start and end times for the query. `interval` is only used for EventFrequencyPercentCondition + as the '5 minutes' in The issue affects more than 100 percent of sessions in 5 minutes, otherwise it's the current time. + `duration` is the time frame in which the condition is measuring counts, e.g. the '10 minutes' in + "The issue is seen more than 100 times in 10 minutes" + """ end = timezone.now() - interval start = end - duration return (start, end) @@ -249,7 +255,7 @@ def get_rate( # automatically cached for 10s. We could consider trying to cache this and the main # query for 20s to reduce the load. start, end = self.get_comparison_start_end(comparison_interval, duration) - comparison_result = self.get_rate_single(event, start, end, environment_id) + comparison_result = self.query(event, start, end, environment_id=environment_id) result = percent_increase(result, comparison_result) return result From 983095f9809c2d6764ed7020707a321d2ace6595 Mon Sep 17 00:00:00 2001 From: Colleen O'Rourke Date: Tue, 23 Apr 2024 11:35:15 -0700 Subject: [PATCH 6/6] pull out get_rate_bulk --- .../rules/conditions/event_frequency.py | 30 ------------------- 1 file changed, 30 deletions(-) diff --git a/src/sentry/rules/conditions/event_frequency.py b/src/sentry/rules/conditions/event_frequency.py index 01ecc670027be5..7c2c6bcdf8f3a5 100644 --- a/src/sentry/rules/conditions/event_frequency.py +++ b/src/sentry/rules/conditions/event_frequency.py @@ -260,36 +260,6 @@ def get_rate( return result - def get_rate_bulk( - self, - duration: timedelta, - comparison_interval: timedelta, - group_ids: set[int], - environment_id: int, - comparison_type: str, - ) -> dict[int, int]: - start, end = self.get_comparison_start_end(timedelta(), duration) - with self.get_option_override(duration): - result = self.batch_query( - group_ids=group_ids, - start=start, - end=end, - environment_id=environment_id, - ) - if comparison_type == ComparisonType.PERCENT: - start, comparison_end = self.get_comparison_start_end(comparison_interval, duration) - comparison_result = self.batch_query( - group_ids=group_ids, - start=start, - end=comparison_end, - environment_id=environment_id, - ) - result = { - group_id: percent_increase(result[group_id], comparison_result[group_id]) - for group_id in group_ids - } - return result - def get_snuba_query_result( self, tsdb_function: Callable[..., Any],