Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions src/sentry/deletions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,9 @@ def load_defaults(manager: DeletionTaskManager) -> None:
manager.register(integrations.RepositoryProjectPathConfig, defaults.RepositoryProjectPathConfigDeletionTask)
manager.register(monitors.Monitor, defaults.MonitorDeletionTask)
manager.register(monitors.MonitorEnvironment, defaults.MonitorEnvironmentDeletionTask)
manager.register(monitors.MonitorCheckIn, BulkModelDeletionTask)
manager.register(monitors.MonitorIncident, defaults.MonitorIncidentDeletionTask)
manager.register(monitors.MonitorEnvBrokenDetection, BulkModelDeletionTask)
manager.register(sentry_apps.PlatformExternalIssue, defaults.PlatformExternalIssueDeletionTask)
manager.register(sentry_apps.SentryApp, defaults.SentryAppDeletionTask)
manager.register(sentry_apps.SentryAppInstallation, defaults.SentryAppInstallationDeletionTask)
Expand Down
1 change: 1 addition & 0 deletions src/sentry/deletions/defaults/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from .incident import * # noqa: F401,F403
from .monitor import * # noqa: F401,F403
from .monitor_environment import * # noqa: F401,F403
from .monitor_incident import * # noqa: F401,F403
from .organization import * # noqa: F401,F403
from .organizationintegration import * # noqa: F401,F403
from .organizationmember import * # noqa: F401,F403
Expand Down
12 changes: 3 additions & 9 deletions src/sentry/deletions/defaults/monitor.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,4 @@
from sentry.deletions.base import (
BaseRelation,
BulkModelDeletionTask,
ModelDeletionTask,
ModelRelation,
)
from sentry.deletions.base import BaseRelation, ModelDeletionTask, ModelRelation
from sentry.monitors.models import Monitor


Expand All @@ -12,8 +7,7 @@ def get_child_relations(self, instance: Monitor) -> list[BaseRelation]:
from sentry.monitors import models

return [
ModelRelation(
models.MonitorCheckIn, {"monitor_id": instance.id}, BulkModelDeletionTask
),
ModelRelation(models.MonitorIncident, {"monitor_id": instance.id}),
ModelRelation(models.MonitorCheckIn, {"monitor_id": instance.id}),
ModelRelation(models.MonitorEnvironment, {"monitor_id": instance.id}),
]
12 changes: 5 additions & 7 deletions src/sentry/deletions/defaults/monitor_environment.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,4 @@
from sentry.deletions.base import (
BaseRelation,
BulkModelDeletionTask,
ModelDeletionTask,
ModelRelation,
)
from sentry.deletions.base import BaseRelation, ModelDeletionTask, ModelRelation
from sentry.monitors.models import MonitorEnvironment


Expand All @@ -12,9 +7,12 @@ def get_child_relations(self, instance: MonitorEnvironment) -> list[BaseRelation
from sentry.monitors import models

return [
ModelRelation(
models.MonitorIncident,
{"monitor_environment_id": instance.id},
),
ModelRelation(
models.MonitorCheckIn,
{"monitor_environment_id": instance.id},
BulkModelDeletionTask,
),
]
14 changes: 14 additions & 0 deletions src/sentry/deletions/defaults/monitor_incident.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from sentry.deletions.base import BaseRelation, ModelDeletionTask, ModelRelation
from sentry.monitors.models import MonitorIncident


class MonitorIncidentDeletionTask(ModelDeletionTask[MonitorIncident]):
def get_child_relations(self, instance: MonitorIncident) -> list[BaseRelation]:
from sentry.monitors import models

return [
ModelRelation(
models.MonitorEnvBrokenDetection,
{"monitor_incident_id": instance.id},
),
]
268 changes: 268 additions & 0 deletions tests/sentry/deletions/test_monitor_checkin.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,268 @@
from sentry.deletions.tasks.scheduled import run_scheduled_deletions
from sentry.models.environment import Environment
from sentry.monitors.models import (
CheckInStatus,
Monitor,
MonitorCheckIn,
MonitorEnvBrokenDetection,
MonitorEnvironment,
MonitorIncident,
ScheduleType,
)
from sentry.testutils.cases import APITestCase, TransactionTestCase
from sentry.testutils.hybrid_cloud import HybridCloudTestMixin


class DeleteMonitorCheckInTest(APITestCase, TransactionTestCase, HybridCloudTestMixin):
def test_delete_monitor_checkin_with_incidents_and_detections(self) -> None:
"""
Test that deleting MonitorCheckIns properly cascades to:
- MonitorIncidents (via starting_checkin_id and resolving_checkin_id)
- MonitorEnvBrokenDetection (via MonitorIncident)

This tests the get_child_relations_bulk() implementation which should:
1. Use __in queries for MonitorIncidents pointing to multiple check-ins
2. Bulk delete MonitorEnvBrokenDetection via BulkModelDeletionTask
"""
project = self.create_project(name="test")
env = Environment.objects.create(organization_id=project.organization_id, name="prod")

monitor = Monitor.objects.create(
organization_id=project.organization.id,
project_id=project.id,
config={"schedule": "* * * * *", "schedule_type": ScheduleType.CRONTAB},
)
monitor_env = MonitorEnvironment.objects.create(
monitor=monitor,
environment_id=env.id,
)

# Create multiple check-ins to test bulk deletion
checkin1 = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_env,
project_id=project.id,
status=CheckInStatus.ERROR,
)
checkin2 = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_env,
project_id=project.id,
status=CheckInStatus.ERROR,
)
checkin3 = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_env,
project_id=project.id,
status=CheckInStatus.OK,
)

# Create incidents - one starting with checkin1, resolving with checkin3
incident1 = MonitorIncident.objects.create(
monitor=monitor,
monitor_environment=monitor_env,
starting_checkin=checkin1,
resolving_checkin=checkin3,
)

# Create another incident - starting with checkin2, not yet resolved
incident2 = MonitorIncident.objects.create(
monitor=monitor,
monitor_environment=monitor_env,
starting_checkin=checkin2,
resolving_checkin=None,
)

# Create MonitorEnvBrokenDetection pointing to incidents
detection1 = MonitorEnvBrokenDetection.objects.create(
monitor_incident=incident1,
)
detection2 = MonitorEnvBrokenDetection.objects.create(
monitor_incident=incident2,
)

# Verify initial state
assert MonitorCheckIn.objects.filter(monitor=monitor).count() == 3
assert MonitorIncident.objects.filter(monitor=monitor).count() == 2
assert MonitorEnvBrokenDetection.objects.count() == 2

# Schedule monitor for deletion (which should cascade to check-ins)
self.ScheduledDeletion.schedule(instance=monitor, days=0)

with self.tasks():
run_scheduled_deletions()

# Verify everything is deleted
assert not Monitor.objects.filter(id=monitor.id).exists()
assert not MonitorEnvironment.objects.filter(id=monitor_env.id).exists()
assert not MonitorCheckIn.objects.filter(
id__in=[checkin1.id, checkin2.id, checkin3.id]
).exists()
assert not MonitorIncident.objects.filter(id__in=[incident1.id, incident2.id]).exists()
assert not MonitorEnvBrokenDetection.objects.filter(
id__in=[detection1.id, detection2.id]
).exists()

# Shared objects should continue to exist
assert Environment.objects.filter(id=env.id).exists()
assert self.project.__class__.objects.filter(id=self.project.id).exists()

def test_delete_multiple_checkins_with_shared_incident(self) -> None:
"""
Test edge case where one incident references multiple check-ins
(starting_checkin != resolving_checkin from the same batch).
"""
project = self.create_project(name="test")
env = Environment.objects.create(organization_id=project.organization_id, name="prod")

monitor = Monitor.objects.create(
organization_id=project.organization.id,
project_id=project.id,
config={"schedule": "* * * * *", "schedule_type": ScheduleType.CRONTAB},
)
monitor_env = MonitorEnvironment.objects.create(
monitor=monitor,
environment_id=env.id,
)

# Create check-ins
failed_checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_env,
project_id=project.id,
status=CheckInStatus.ERROR,
)
ok_checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_env,
project_id=project.id,
status=CheckInStatus.OK,
)

# Create incident that references BOTH check-ins
incident = MonitorIncident.objects.create(
monitor=monitor,
monitor_environment=monitor_env,
starting_checkin=failed_checkin,
resolving_checkin=ok_checkin,
)

detection = MonitorEnvBrokenDetection.objects.create(
monitor_incident=incident,
)

# Schedule monitor for deletion
self.ScheduledDeletion.schedule(instance=monitor, days=0)

with self.tasks():
run_scheduled_deletions()

# Verify complete deletion
assert not MonitorCheckIn.objects.filter(id__in=[failed_checkin.id, ok_checkin.id]).exists()
assert not MonitorIncident.objects.filter(id=incident.id).exists()
assert not MonitorEnvBrokenDetection.objects.filter(id=detection.id).exists()

def test_delete_monitor_only_affects_its_own_checkins(self) -> None:
"""
Test that deleting one monitor's check-ins doesn't affect another monitor's data.
This verifies that the __in queries are properly scoped.
"""
project = self.create_project(name="test")
env = Environment.objects.create(organization_id=project.organization_id, name="prod")

# Create first monitor with check-ins and incidents
monitor1 = Monitor.objects.create(
organization_id=project.organization.id,
project_id=project.id,
name="Monitor 1",
config={"schedule": "* * * * *", "schedule_type": ScheduleType.CRONTAB},
)
monitor_env1 = MonitorEnvironment.objects.create(
monitor=monitor1,
environment_id=env.id,
)
checkin1_m1 = MonitorCheckIn.objects.create(
monitor=monitor1,
monitor_environment=monitor_env1,
project_id=project.id,
status=CheckInStatus.ERROR,
)
checkin2_m1 = MonitorCheckIn.objects.create(
monitor=monitor1,
monitor_environment=monitor_env1,
project_id=project.id,
status=CheckInStatus.OK,
)
incident1_m1 = MonitorIncident.objects.create(
monitor=monitor1,
monitor_environment=monitor_env1,
starting_checkin=checkin1_m1,
resolving_checkin=checkin2_m1,
)
detection1_m1 = MonitorEnvBrokenDetection.objects.create(
monitor_incident=incident1_m1,
)

# Create second monitor with check-ins and incidents
monitor2 = Monitor.objects.create(
organization_id=project.organization.id,
project_id=project.id,
name="Monitor 2",
config={"schedule": "0 * * * *", "schedule_type": ScheduleType.CRONTAB},
)
monitor_env2 = MonitorEnvironment.objects.create(
monitor=monitor2,
environment_id=env.id,
)
checkin1_m2 = MonitorCheckIn.objects.create(
monitor=monitor2,
monitor_environment=monitor_env2,
project_id=project.id,
status=CheckInStatus.ERROR,
)
checkin2_m2 = MonitorCheckIn.objects.create(
monitor=monitor2,
monitor_environment=monitor_env2,
project_id=project.id,
status=CheckInStatus.OK,
)
incident1_m2 = MonitorIncident.objects.create(
monitor=monitor2,
monitor_environment=monitor_env2,
starting_checkin=checkin1_m2,
resolving_checkin=checkin2_m2,
)
detection1_m2 = MonitorEnvBrokenDetection.objects.create(
monitor_incident=incident1_m2,
)

# Verify initial state - both monitors exist with their data
assert MonitorCheckIn.objects.count() == 4
assert MonitorIncident.objects.count() == 2
assert MonitorEnvBrokenDetection.objects.count() == 2

# Delete only monitor1
self.ScheduledDeletion.schedule(instance=monitor1, days=0)

with self.tasks():
run_scheduled_deletions()

# Verify monitor1 and its data are deleted
assert not Monitor.objects.filter(id=monitor1.id).exists()
assert not MonitorEnvironment.objects.filter(id=monitor_env1.id).exists()
assert not MonitorCheckIn.objects.filter(id__in=[checkin1_m1.id, checkin2_m1.id]).exists()
assert not MonitorIncident.objects.filter(id=incident1_m1.id).exists()
assert not MonitorEnvBrokenDetection.objects.filter(id=detection1_m1.id).exists()

# Verify monitor2 and ALL its data still exist (unaffected)
assert Monitor.objects.filter(id=monitor2.id).exists()
assert MonitorEnvironment.objects.filter(id=monitor_env2.id).exists()
assert MonitorCheckIn.objects.filter(id=checkin1_m2.id).exists()
assert MonitorCheckIn.objects.filter(id=checkin2_m2.id).exists()
assert MonitorIncident.objects.filter(id=incident1_m2.id).exists()
assert MonitorEnvBrokenDetection.objects.filter(id=detection1_m2.id).exists()

# Verify counts - should only have monitor2's data remaining
assert MonitorCheckIn.objects.count() == 2
assert MonitorIncident.objects.count() == 1
assert MonitorEnvBrokenDetection.objects.count() == 1
Loading