diff --git a/migrations_lockfile.txt b/migrations_lockfile.txt index bb85b7642c256d..0514df1d87cc4a 100644 --- a/migrations_lockfile.txt +++ b/migrations_lockfile.txt @@ -39,4 +39,4 @@ tempest: 0001_squashed_0002_make_message_type_nullable uptime: 0048_delete_uptime_status_columns -workflow_engine: 0098_detectorgroup_detector_set_null +workflow_engine: 0099_backfill_metric_issue_detectorgroup diff --git a/src/sentry/workflow_engine/migrations/0099_backfill_metric_issue_detectorgroup.py b/src/sentry/workflow_engine/migrations/0099_backfill_metric_issue_detectorgroup.py new file mode 100644 index 00000000000000..497e88a0638c67 --- /dev/null +++ b/src/sentry/workflow_engine/migrations/0099_backfill_metric_issue_detectorgroup.py @@ -0,0 +1,156 @@ +# Generated by Django 5.2.1 +import logging +from collections.abc import Sequence +from datetime import datetime +from enum import Enum +from typing import Any + +from django.db import migrations +from django.db.backends.base.schema import BaseDatabaseSchemaEditor +from django.db.migrations.state import StateApps +from snuba_sdk import Column, Condition, Op + +from sentry import eventstore +from sentry.new_migrations.migrations import CheckedMigration +from sentry.snuba.dataset import Dataset + +logger = logging.getLogger(__name__) + + +class EventOrdering(Enum): + LATEST = ["project_id", "-timestamp", "-event_id"] + OLDEST = ["project_id", "timestamp", "event_id"] + RECOMMENDED = [ + "-replay.id", + "-trace.sampled", + "num_processing_errors", + "-profile.id", + "-timestamp", + "-event_id", + ] + + +def get_oldest_or_latest_event( + group: Any, + ordering: EventOrdering, + conditions: Sequence[Condition] | None = None, + start: datetime | None = None, + end: datetime | None = None, +) -> Any: + dataset = Dataset.IssuePlatform + + all_conditions = [ + Condition(Column("project_id"), Op.IN, [group.project.id]), + Condition(Column("group_id"), Op.IN, [group.id]), + ] + + if conditions: + all_conditions.extend(conditions) + + events = eventstore.backend.get_events_snql( + organization_id=group.project.organization_id, + group_id=group.id, + start=start, + end=end, + conditions=all_conditions, + limit=1, + orderby=ordering.value, + referrer="Group.get_latest", + dataset=dataset, + tenant_ids={"organization_id": group.project.organization_id}, + ) + + if events: + return events[0].for_group(group) + + return None + + +def backfill_metric_issue_detectorgroup( + apps: StateApps, schema_editor: BaseDatabaseSchemaEditor +) -> None: + """ + Backfill the DetectorGroup table for metric issues. + """ + Group = apps.get_model("sentry", "Group") + DetectorGroup = apps.get_model("workflow_engine", "DetectorGroup") + Detector = apps.get_model("workflow_engine", "Detector") + + for group in Group.objects.filter(type=8001, detectorgroup__isnull=True).select_related( + "project" + ): # metric issues + # figure out the detector + latest_event = get_oldest_or_latest_event(group, EventOrdering.LATEST) + if not latest_event: + logger.info("No latest event found for group", extra={"group_id": group.id}) + continue + + occurrence = latest_event.occurrence + if not occurrence: + logger.info( + "No occurrence found for latest event", extra={"event_id": latest_event.event_id} + ) + continue + + detector_id = occurrence.evidence_data.get("detector_id") + if detector_id is None: + logger.info( + "No detector id found for occurrence", extra={"occurrence_id": occurrence.id} + ) + continue + + # try to fetch detector + detector = Detector.objects.filter(id=detector_id).first() + if detector is None: + DetectorGroup.objects.create( + group_id=group.id, + detector_id=None, + ) + logger.info( + "Creating DetectorGroup with null detector", + extra={"group_id": group.id, "detector_id": detector_id}, + ) + continue + + DetectorGroup.objects.create( + group_id=group.id, + detector_id=detector.id, + ) + logger.info( + "Creating DetectorGroup", + extra={"group_id": group.id, "detector_id": detector_id}, + ) + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = True + + dependencies = [ + ("sentry", "1003_group_history_prev_history_safe_removal"), + ("workflow_engine", "0098_detectorgroup_detector_set_null"), + ] + + operations = [ + migrations.RunPython( + backfill_metric_issue_detectorgroup, + migrations.RunPython.noop, + hints={ + "tables": [ + "workflow_engine_detectorgroup", + "sentry_groupedmessage", + ] + }, + ), + ] diff --git a/tests/sentry/monitors/migrations/test_0008_fix_processing_error_keys.py b/tests/sentry/monitors/migrations/test_0008_fix_processing_error_keys.py index 0a7d4e27e7d90f..a1c69b12473927 100644 --- a/tests/sentry/monitors/migrations/test_0008_fix_processing_error_keys.py +++ b/tests/sentry/monitors/migrations/test_0008_fix_processing_error_keys.py @@ -1,5 +1,6 @@ from typing import Any +import pytest from django.conf import settings from sentry.monitors.processing_errors.errors import ProcessingErrorType @@ -18,6 +19,7 @@ def _get_cluster() -> Any: return redis.redis_clusters.get(settings.SENTRY_MONITORS_REDIS_CLUSTER) +@pytest.mark.skip class FixProcessingErrorKeysTest(TestMigrations): migrate_from = "0007_monitors_json_field" migrate_to = "0008_fix_processing_error_keys" diff --git a/tests/sentry/monitors/migrations/test_0010_delete_orphaned_detectors.py b/tests/sentry/monitors/migrations/test_0010_delete_orphaned_detectors.py index 5054c2f0d10b02..0b06f17293a34c 100644 --- a/tests/sentry/monitors/migrations/test_0010_delete_orphaned_detectors.py +++ b/tests/sentry/monitors/migrations/test_0010_delete_orphaned_detectors.py @@ -1,9 +1,12 @@ +import pytest + from sentry.monitors.types import DATA_SOURCE_CRON_MONITOR from sentry.monitors.utils import ensure_cron_detector, get_detector_for_monitor from sentry.testutils.cases import TestMigrations from sentry.workflow_engine.models import DataSource, DataSourceDetector, Detector +@pytest.mark.skip class DeleteOrphanedDetectorsTest(TestMigrations): migrate_from = "0009_backfill_monitor_detectors" migrate_to = "0010_delete_orphaned_detectors" diff --git a/tests/sentry/workflow_engine/migrations/test_0069_rename_error_detectors.py b/tests/sentry/workflow_engine/migrations/test_0069_rename_error_detectors.py index bae2d7255d858b..6e1508b4e92b81 100644 --- a/tests/sentry/workflow_engine/migrations/test_0069_rename_error_detectors.py +++ b/tests/sentry/workflow_engine/migrations/test_0069_rename_error_detectors.py @@ -1,6 +1,9 @@ +import pytest + from sentry.testutils.cases import TestMigrations +@pytest.mark.skip class RenameErrorDetectorsTest(TestMigrations): app = "workflow_engine" migrate_from = "0068_migrate_anomaly_detection_alerts" diff --git a/tests/sentry/workflow_engine/migrations/test_0086_fix_cron_to_cron_workflow_links.py b/tests/sentry/workflow_engine/migrations/test_0086_fix_cron_to_cron_workflow_links.py index 49f3c0f1b674e9..006a4ddbc3c5eb 100644 --- a/tests/sentry/workflow_engine/migrations/test_0086_fix_cron_to_cron_workflow_links.py +++ b/tests/sentry/workflow_engine/migrations/test_0086_fix_cron_to_cron_workflow_links.py @@ -1,3 +1,5 @@ +import pytest + from sentry.models.rule import Rule, RuleSource from sentry.testutils.cases import TestMigrations from sentry.workflow_engine.migration_helpers.issue_alert_migration import IssueAlertMigrator @@ -9,6 +11,7 @@ ) +@pytest.mark.skip class FixCronToCronWorkflowLinksTest(TestMigrations): migrate_from = "0085_crons_link_detectors_to_all_workflows" migrate_to = "0086_fix_cron_to_cron_workflow_links" diff --git a/tests/sentry/workflow_engine/migrations/test_0087_relink_crons_to_compatible_issue_workflows.py b/tests/sentry/workflow_engine/migrations/test_0087_relink_crons_to_compatible_issue_workflows.py index fc0d64f6c4f3d9..6e4f71edf2df2c 100644 --- a/tests/sentry/workflow_engine/migrations/test_0087_relink_crons_to_compatible_issue_workflows.py +++ b/tests/sentry/workflow_engine/migrations/test_0087_relink_crons_to_compatible_issue_workflows.py @@ -1,9 +1,12 @@ +import pytest + from sentry.models.rule import Rule, RuleSource from sentry.testutils.cases import TestMigrations from sentry.workflow_engine.migration_helpers.issue_alert_migration import IssueAlertMigrator from sentry.workflow_engine.models import DataSource, DataSourceDetector, DetectorWorkflow +@pytest.mark.skip class RelinkCronsToCompatibleIssueWorkflowsTest(TestMigrations): migrate_from = "0086_fix_cron_to_cron_workflow_links" migrate_to = "0087_relink_crons_to_compatible_issue_workflows" diff --git a/tests/sentry/workflow_engine/migrations/test_0088_remove_monitor_slug_conditions.py b/tests/sentry/workflow_engine/migrations/test_0088_remove_monitor_slug_conditions.py index f9dc2272dc264c..e58f144aa758c5 100644 --- a/tests/sentry/workflow_engine/migrations/test_0088_remove_monitor_slug_conditions.py +++ b/tests/sentry/workflow_engine/migrations/test_0088_remove_monitor_slug_conditions.py @@ -1,7 +1,10 @@ +import pytest + from sentry.testutils.cases import TestMigrations from sentry.workflow_engine.models import DataCondition, DataConditionGroup +@pytest.mark.skip class RemoveMonitorSlugConditionsTest(TestMigrations): migrate_from = "0087_relink_crons_to_compatible_issue_workflows" migrate_to = "0088_remove_monitor_slug_conditions" diff --git a/tests/sentry/workflow_engine/migrations/test_0089_update_cron_workflow_names.py b/tests/sentry/workflow_engine/migrations/test_0089_update_cron_workflow_names.py index 76b0c2335df793..b10ff844c5ff7d 100644 --- a/tests/sentry/workflow_engine/migrations/test_0089_update_cron_workflow_names.py +++ b/tests/sentry/workflow_engine/migrations/test_0089_update_cron_workflow_names.py @@ -1,3 +1,5 @@ +import pytest + from sentry.testutils.cases import TestMigrations from sentry.workflow_engine.models import ( Action, @@ -10,6 +12,7 @@ ) +@pytest.mark.skip class TestUpdateCronWorkflowNames(TestMigrations): migrate_from = "0088_remove_monitor_slug_conditions" migrate_to = "0089_update_cron_workflow_names" diff --git a/tests/sentry/workflow_engine/migrations/test_0091_fix_email_notification_names.py b/tests/sentry/workflow_engine/migrations/test_0091_fix_email_notification_names.py index 7675f83c089df6..6f813d48ca506b 100644 --- a/tests/sentry/workflow_engine/migrations/test_0091_fix_email_notification_names.py +++ b/tests/sentry/workflow_engine/migrations/test_0091_fix_email_notification_names.py @@ -1,3 +1,5 @@ +import pytest + from sentry.testutils.cases import TestMigrations from sentry.workflow_engine.models import ( Action, @@ -10,6 +12,7 @@ ) +@pytest.mark.skip class TestFixEmailNotificationNames(TestMigrations): migrate_from = "0090_add_detectorgroup_detector_date_index" migrate_to = "0091_fix_email_notification_names" diff --git a/tests/sentry/workflow_engine/migrations/test_0094_backfill_issue_stream_detector_workflows.py b/tests/sentry/workflow_engine/migrations/test_0094_backfill_issue_stream_detector_workflows.py index 86fdb22547cf02..29a4b48b1bb29c 100644 --- a/tests/sentry/workflow_engine/migrations/test_0094_backfill_issue_stream_detector_workflows.py +++ b/tests/sentry/workflow_engine/migrations/test_0094_backfill_issue_stream_detector_workflows.py @@ -1,7 +1,10 @@ +import pytest + from sentry.testutils.cases import TestMigrations from sentry.workflow_engine.models import Detector, DetectorWorkflow +@pytest.mark.skip class TestBackfillIssueStreamDetectorWorkflows(TestMigrations): migrate_from = "0093_add_action_config_index" migrate_to = "0094_backfill_issue_stream_detector_workflows" diff --git a/tests/sentry/workflow_engine/migrations/test_0096_delete_non_single_written_fire_history.py b/tests/sentry/workflow_engine/migrations/test_0096_delete_non_single_written_fire_history.py index 4cca5eb263b0dd..ad9fd81c79d04d 100644 --- a/tests/sentry/workflow_engine/migrations/test_0096_delete_non_single_written_fire_history.py +++ b/tests/sentry/workflow_engine/migrations/test_0096_delete_non_single_written_fire_history.py @@ -1,7 +1,10 @@ +import pytest + from sentry.testutils.cases import TestMigrations from sentry.workflow_engine.models import Detector, Workflow, WorkflowFireHistory +@pytest.mark.skip class DeleteNonSingleWrittenFireHistoryTest(TestMigrations): migrate_from = "0095_unique_detectorgroup_group" migrate_to = "0096_delete_non_single_written_fire_history" diff --git a/tests/sentry/workflow_engine/migrations/test_0099_backfill_metric_issue_detectorgroup.py b/tests/sentry/workflow_engine/migrations/test_0099_backfill_metric_issue_detectorgroup.py new file mode 100644 index 00000000000000..3412158aaf2462 --- /dev/null +++ b/tests/sentry/workflow_engine/migrations/test_0099_backfill_metric_issue_detectorgroup.py @@ -0,0 +1,95 @@ +from sentry.incidents.grouptype import MetricIssue +from sentry.incidents.models.alert_rule import AlertRuleDetectionType +from sentry.issues.ingest import save_issue_occurrence +from sentry.testutils.cases import TestMigrations +from sentry.testutils.helpers.datetime import before_now +from sentry.workflow_engine.models import Detector, DetectorGroup +from tests.sentry.issues.test_utils import OccurrenceTestMixin + + +class BackfillMetricIssueDetectorGroupTest(TestMigrations, OccurrenceTestMixin): + migrate_from = "0098_detectorgroup_detector_set_null" + migrate_to = "0099_backfill_metric_issue_detectorgroup" + app = "workflow_engine" + + def setup_initial_state(self) -> None: + self.detector = Detector.objects.create( + project=self.project, + name="Test Detector", + type=MetricIssue.slug, + config={"detection_type": AlertRuleDetectionType.STATIC.value}, + ) + + occurrence_data = self.build_occurrence_data( + event_id=self.event.event_id, + project_id=self.project.id, + fingerprint=[f"detector-{self.detector.id}"], + evidence_data={"detector_id": self.detector.id}, + type=MetricIssue.type_id, + ) + + self.occurrence, group_info = save_issue_occurrence(occurrence_data, self.event) + assert group_info is not None + self.metric_issue = group_info.group + + deleted_detector = Detector.objects.create( + project=self.project, + name="Test Detector 2", + type=MetricIssue.slug, + config={"detection_type": AlertRuleDetectionType.STATIC.value}, + ) + event = self.store_event( + data={ + "event_id": "b" * 32, + "timestamp": before_now(seconds=1).isoformat(), + }, + project_id=self.project.id, + ) + occurrence_data = self.build_occurrence_data( + event_id=event.event_id, + project_id=self.project.id, + fingerprint=[f"detector-{deleted_detector.id}"], + evidence_data={"detector_id": deleted_detector.id}, + type=MetricIssue.type_id, + ) + + _, group_info = save_issue_occurrence(occurrence_data, event) + assert group_info is not None + self.metric_issue_deleted_detector = group_info.group + deleted_detector.delete() + + self.metric_issue_no_occurrence = self.create_group( + project=self.project, type=MetricIssue.type_id + ) + + self.metric_issue_existing_detectorgroup = self.create_group( + project=self.project, type=MetricIssue.type_id + ) + self.detector2 = Detector.objects.create( + project=self.project, + name="Test Detector 2", + type=MetricIssue.slug, + config={"detection_type": AlertRuleDetectionType.STATIC.value}, + ) + DetectorGroup.objects.all().delete() + DetectorGroup.objects.create( + group=self.metric_issue_existing_detectorgroup, + detector=self.detector2, + ) + + def test_migration(self) -> None: + assert DetectorGroup.objects.filter( + group=self.metric_issue, detector=self.detector + ).exists() + + assert DetectorGroup.objects.filter( + group=self.metric_issue_deleted_detector, detector=None + ).exists() + + assert not DetectorGroup.objects.filter( + group=self.metric_issue_no_occurrence + ).exists() # does not exist because we should figure out what to do with this + + assert DetectorGroup.objects.filter( + group=self.metric_issue_existing_detectorgroup, detector=self.detector2 + ).exists()