diff --git a/src/sentry/workflow_engine/tasks/workflows.py b/src/sentry/workflow_engine/tasks/workflows.py index 5d43b78b54d2c7..5f60296a577e9a 100644 --- a/src/sentry/workflow_engine/tasks/workflows.py +++ b/src/sentry/workflow_engine/tasks/workflows.py @@ -25,6 +25,7 @@ ) from sentry.workflow_engine.types import WorkflowEventData from sentry.workflow_engine.utils import log_context, scopedstats +from sentry.workflow_engine.utils.sentry_level_utils import quiet_redis_noise logger = log_context.get_logger(__name__) @@ -75,8 +76,8 @@ def process_workflow_activity(activity_id: int, group_id: int, detector_id: int) event=activity, group=group, ) - - process_workflows(event_data, event_start_time=activity.datetime, detector=detector) + with quiet_redis_noise(): + process_workflows(event_data, event_start_time=activity.datetime, detector=detector) metrics.incr( "workflow_engine.tasks.process_workflows.activity_update.executed", tags={"activity_type": activity.type, "detector_type": detector.type}, @@ -136,7 +137,8 @@ def process_workflows_event( if start_timestamp_seconds else datetime.now(tz=UTC) ) - process_workflows(event_data, event_start_time=event_start_time) + with quiet_redis_noise(): + process_workflows(event_data, event_start_time=event_start_time) duration = time.time() - start_time is_slow = duration > 1.0 # We want full coverage for particularly slow cases, plus a random sampling. @@ -180,6 +182,7 @@ def schedule_delayed_workflows(**kwargs: Any) -> None: "Configured to use process_pending_batch for delayed_workflow; exiting." ) return - process_buffered_workflows() + with quiet_redis_noise(): + process_buffered_workflows() except UnableToAcquireLock as error: logger.warning("schedule_delayed_workflows.fail", extra={"error": error}) diff --git a/src/sentry/workflow_engine/utils/sentry_level_utils.py b/src/sentry/workflow_engine/utils/sentry_level_utils.py new file mode 100644 index 00000000000000..aa679180450340 --- /dev/null +++ b/src/sentry/workflow_engine/utils/sentry_level_utils.py @@ -0,0 +1,59 @@ +import logging +from collections.abc import Generator +from contextlib import contextmanager +from typing import Any, Literal + +import sentry_sdk + +logger = logging.getLogger(__name__) + + +# sentry_sdk doesn't export these. +_Event = Any +_ExcInfo = Any + +SentryLevel = Literal["error", "warning", "info"] + + +@contextmanager +def set_sentry_exception_levels( + exception_levels: dict[type[BaseException], SentryLevel], +) -> Generator[None]: + """ + Context manager that sets up a Sentry error processor to set + specific exception types to configured Sentry levels. + + Args: + exception_levels: Map of exception type to their desired Sentry levels + Note that type matching is done by equality, not isinstance. + """ + + def process_error(event: _Event, exc_info: _ExcInfo) -> _Event | None: + exc = exc_info[1] + exc_type = type(exc) + + # Check if this exception type should have its level overridden + if exc_type in exception_levels: + new_level = exception_levels[exc_type] + event["level"] = new_level + + return event + + with sentry_sdk.new_scope() as scope: + scope.add_error_processor(process_error) + yield + + +@contextmanager +def quiet_redis_noise() -> Generator[None]: + """ + Context manager that sets up a Sentry error processor to quiet Redis noise. + Specifically, the current library versions report TimeoutError and MovedError + internally even when they're being appropriately handled, and it's incorrect for + those to be treated as errors in Sentry. + """ + from redis.exceptions import TimeoutError + from rediscluster.exceptions import MovedError # type: ignore[attr-defined] + + with set_sentry_exception_levels({TimeoutError: "info", MovedError: "info"}): + yield diff --git a/tests/sentry/workflow_engine/utils/test_sentry_level_utils.py b/tests/sentry/workflow_engine/utils/test_sentry_level_utils.py new file mode 100644 index 00000000000000..0f774351edf5bd --- /dev/null +++ b/tests/sentry/workflow_engine/utils/test_sentry_level_utils.py @@ -0,0 +1,36 @@ +from collections.abc import Callable +from typing import Any +from unittest.mock import Mock, patch + +from sentry.workflow_engine.utils.sentry_level_utils import set_sentry_exception_levels + + +class TestSetSentryExceptionLevels: + def test_basic_functionality_minimal_mocking(self) -> None: + with patch("sentry_sdk.new_scope") as mock_scope: + mock_scope_instance = Mock() + mock_scope.return_value.__enter__ = Mock(return_value=mock_scope_instance) + mock_scope.return_value.__exit__ = Mock(return_value=None) + + # Use a single-element list to capture the processor + captured_processors: list[Callable[[Any, Any], Any]] = [] + mock_scope_instance.add_error_processor = captured_processors.append + + # Use the context manager with exception type as key + with set_sentry_exception_levels({ValueError: "warning"}): + pass + + # Basic validation that processor was captured + assert len(captured_processors) == 1 + processor = captured_processors[0] + + # Test that it correctly processes a ValueError + event = {"level": "error", "other_data": "preserved"} + exc = ValueError("test error") + exc_info = (ValueError, exc, None) + + result = processor(event, exc_info) + + # Verify the level was changed and other data preserved + assert result["level"] == "warning" + assert result["other_data"] == "preserved"