-
Notifications
You must be signed in to change notification settings - Fork 514
rename session recording to session replay #1207
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1 +1,22 @@ | ||
| ALTER TABLE "SessionRecordingChunk" RENAME COLUMN "tabId" TO "sessionReplaySegmentId"; | ||
|
|
||
| ALTER TABLE "SessionRecording" RENAME TO "SessionReplay"; | ||
| ALTER TABLE "SessionRecordingChunk" RENAME TO "SessionReplayChunk"; | ||
| ALTER TABLE "SessionReplayChunk" RENAME COLUMN "sessionRecordingId" TO "sessionReplayId"; | ||
|
|
||
| -- Rename primary key constraints | ||
| ALTER TABLE "SessionReplay" RENAME CONSTRAINT "SessionRecording_pkey" TO "SessionReplay_pkey"; | ||
| ALTER TABLE "SessionReplayChunk" RENAME CONSTRAINT "SessionRecordingChunk_pkey" TO "SessionReplayChunk_pkey"; | ||
|
|
||
| -- Rename foreign key constraints | ||
| ALTER TABLE "SessionReplay" RENAME CONSTRAINT "SessionRecording_tenancyId_fkey" TO "SessionReplay_tenancyId_fkey"; | ||
| ALTER TABLE "SessionReplay" RENAME CONSTRAINT "SessionRecording_tenancyId_projectUserId_fkey" TO "SessionReplay_tenancyId_projectUserId_fkey"; | ||
| ALTER TABLE "SessionReplayChunk" RENAME CONSTRAINT "SessionRecordingChunk_tenancyId_fkey" TO "SessionReplayChunk_tenancyId_fkey"; | ||
| ALTER TABLE "SessionReplayChunk" RENAME CONSTRAINT "SessionRecordingChunk_tenancyId_sessionRecordingId_fkey" TO "SessionReplayChunk_tenancyId_sessionReplayId_fkey"; | ||
|
|
||
| -- Rename indexes | ||
| ALTER INDEX "SessionRecording_tenancyId_lastEventAt_idx" RENAME TO "SessionReplay_tenancyId_lastEventAt_idx"; | ||
| ALTER INDEX "SessionRecording_tenancyId_projectUserId_startedAt_idx" RENAME TO "SessionReplay_tenancyId_projectUserId_startedAt_idx"; | ||
| ALTER INDEX "SessionRecording_tenancyId_refreshTokenId_updatedAt_idx" RENAME TO "SessionReplay_tenancyId_refreshTokenId_updatedAt_idx"; | ||
| ALTER INDEX "SessionRecordingChunk_tenancyId_sessionRecordingId_batchId_key" RENAME TO "SessionReplayChunk_tenancyId_sessionReplayId_batchId_key"; | ||
| ALTER INDEX "SessionRecordingChunk_tenancyId_sessionRecordingId_createdA_idx" RENAME TO "SessionReplayChunk_tenancyId_sessionReplayId_createdAt_idx"; | ||
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -60,8 +60,8 @@ model Tenancy { | |||||
| organizationId String? @db.Uuid | ||||||
| hasNoOrganization BooleanTrue? | ||||||
| emailOutboxes EmailOutbox[] | ||||||
| sessionRecordings SessionRecording[] | ||||||
| sessionRecordingChunks SessionRecordingChunk[] | ||||||
| sessionReplays SessionReplay[] | ||||||
| sessionReplayChunks SessionReplayChunk[] | ||||||
|
|
||||||
| @@unique([projectId, branchId, organizationId]) | ||||||
| @@unique([projectId, branchId, hasNoOrganization]) | ||||||
|
|
@@ -236,7 +236,7 @@ model ProjectUser { | |||||
| Project Project? @relation(fields: [projectId], references: [id]) | ||||||
| projectId String? | ||||||
| userNotificationPreference UserNotificationPreference[] | ||||||
| sessionRecordings SessionRecording[] | ||||||
| sessionReplays SessionReplay[] | ||||||
|
|
||||||
| @@id([tenancyId, projectUserId]) | ||||||
| @@unique([mirroredProjectId, mirroredBranchId, projectUserId]) | ||||||
|
|
@@ -280,7 +280,7 @@ model ProjectUserOAuthAccount { | |||||
| @@index([tenancyId, projectUserId]) | ||||||
| } | ||||||
|
|
||||||
| model SessionRecording { | ||||||
| model SessionReplay { | ||||||
| id String @db.Uuid | ||||||
|
|
||||||
| tenancyId String @db.Uuid | ||||||
|
|
@@ -296,20 +296,21 @@ model SessionRecording { | |||||
| projectUser ProjectUser @relation(fields: [tenancyId, projectUserId], references: [tenancyId, projectUserId], onDelete: Cascade) | ||||||
| tenancy Tenancy @relation(fields: [tenancyId], references: [id], onDelete: Cascade) | ||||||
|
|
||||||
| chunks SessionRecordingChunk[] | ||||||
| chunks SessionReplayChunk[] | ||||||
|
|
||||||
| @@id([tenancyId, id]) | ||||||
| @@map("SessionReplay") | ||||||
| @@index([tenancyId, projectUserId, startedAt]) | ||||||
| @@index([tenancyId, lastEventAt]) | ||||||
| // index by updatedAt instead of lastEventAt because event timing can be spoofed | ||||||
| @@index([tenancyId, refreshTokenId, updatedAt]) | ||||||
| } | ||||||
|
|
||||||
| model SessionRecordingChunk { | ||||||
| model SessionReplayChunk { | ||||||
| id String @id @default(uuid()) @db.Uuid | ||||||
|
|
||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Redundant The
Suggested change
Note: If this suggestion doesn't match your team's coding style, reply to this and let me know. I'll remember it for next time! |
||||||
| tenancyId String @db.Uuid | ||||||
| sessionRecordingId String @db.Uuid | ||||||
| tenancyId String @db.Uuid | ||||||
| sessionReplayId String @db.Uuid @map("sessionReplayId") | ||||||
|
|
||||||
| // Unique per uploaded batch for a given session id. | ||||||
| batchId String @db.Uuid | ||||||
|
|
@@ -329,11 +330,12 @@ model SessionRecordingChunk { | |||||
|
|
||||||
| createdAt DateTime @default(now()) | ||||||
|
|
||||||
| sessionRecording SessionRecording @relation(fields: [tenancyId, sessionRecordingId], references: [tenancyId, id], onDelete: Cascade) | ||||||
| tenancy Tenancy @relation(fields: [tenancyId], references: [id], onDelete: Cascade) | ||||||
| sessionReplay SessionReplay @relation(fields: [tenancyId, sessionReplayId], references: [tenancyId, id], onDelete: Cascade) | ||||||
| tenancy Tenancy @relation(fields: [tenancyId], references: [id], onDelete: Cascade) | ||||||
|
|
||||||
| @@unique([tenancyId, sessionRecordingId, batchId]) | ||||||
| @@index([tenancyId, sessionRecordingId, createdAt]) | ||||||
| @@unique([tenancyId, sessionReplayId, batchId]) | ||||||
| @@map("SessionReplayChunk") | ||||||
| @@index([tenancyId, sessionReplayId, createdAt]) | ||||||
| } | ||||||
|
|
||||||
| enum ContactChannelType { | ||||||
|
|
||||||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -19,7 +19,7 @@ export const GET = createSmartRouteHandler({ | |
| tenancy: adaptSchema.defined(), | ||
| }).defined(), | ||
| params: yupObject({ | ||
| session_recording_id: yupString().defined(), | ||
| session_replay_id: yupString().defined(), | ||
| }).defined(), | ||
| query: yupObject({ | ||
| offset: yupString().optional(), | ||
|
|
@@ -49,19 +49,19 @@ export const GET = createSmartRouteHandler({ | |
| async handler({ auth, params, query }) { | ||
| const prisma = await getPrismaClientForTenancy(auth.tenancy); | ||
|
|
||
| const sessionRecordingId = params.session_recording_id; | ||
| const exists = await prisma.sessionRecording.findUnique({ | ||
| where: { tenancyId_id: { tenancyId: auth.tenancy.id, id: sessionRecordingId } }, | ||
| const sessionReplayId = params.session_replay_id; | ||
| const exists = await prisma.sessionReplay.findUnique({ | ||
| where: { tenancyId_id: { tenancyId: auth.tenancy.id, id: sessionReplayId } }, | ||
| select: { id: true }, | ||
| }); | ||
| if (!exists) { | ||
| throw new KnownErrors.ItemNotFound(sessionRecordingId); | ||
| throw new KnownErrors.ItemNotFound(sessionReplayId); | ||
| } | ||
|
|
||
| const chunks = await prisma.sessionRecordingChunk.findMany({ | ||
| const chunks = await prisma.sessionReplayChunk.findMany({ | ||
| where: { | ||
| tenancyId: auth.tenancy.id, | ||
| sessionRecordingId, | ||
| sessionReplayId, | ||
| }, | ||
| orderBy: [{ firstEventAt: "asc" }, { id: "asc" }], | ||
| select: { | ||
|
|
@@ -110,20 +110,20 @@ export const GET = createSmartRouteHandler({ | |
| try { | ||
| parsed = JSON.parse(new TextDecoder().decode(unzipped)); | ||
| } catch (e) { | ||
| throw new StackAssertionError("Failed to decode session recording chunk JSON", { cause: e }); | ||
| throw new StackAssertionError("Failed to decode session replay chunk JSON", { cause: e }); | ||
| } | ||
|
|
||
| if (typeof parsed !== "object" || parsed === null) { | ||
| throw new StackAssertionError("Decoded session recording chunk is not an object"); | ||
| throw new StackAssertionError("Decoded session replay chunk is not an object"); | ||
| } | ||
| if (parsed.session_recording_id !== sessionRecordingId) { | ||
| throw new StackAssertionError("Decoded session recording chunk session_recording_id mismatch", { | ||
| expected: sessionRecordingId, | ||
| actual: parsed.session_recording_id, | ||
| if (parsed.session_replay_id !== sessionReplayId) { | ||
| throw new StackAssertionError("Decoded session replay chunk session_replay_id mismatch", { | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
| expected: sessionReplayId, | ||
| actual: parsed.session_replay_id, | ||
| }); | ||
| } | ||
| if (!Array.isArray(parsed.events)) { | ||
| throw new StackAssertionError("Decoded session recording chunk events is not an array"); | ||
| throw new StackAssertionError("Decoded session replay chunk events is not an array"); | ||
| } | ||
|
|
||
| chunkEvents[idx] = { chunk_id: chunk.id, events: parsed.events as any[] }; | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Modifying an already-committed migration
This migration file previously contained only one statement (renaming the
tabIdcolumn), which was already committed on the base branchrename-replay-tab-id. This PR appends three additional DDL statements to the same migration file.Prisma tracks which migrations have been applied by storing the SQL content hash in
_prisma_migrations. Any environment that already ran the original single-line migration will have a hash mismatch against this updated file. Whenprisma migrate deployis next run on such an environment, Prisma will report a "drift" error and refuse to apply, because the applied migration no longer matches the file on disk.Since the base branch is a pre-merge feature branch (not yet applied to
devor production), the risk here is scoped to developer local environments and CI runs on the base branch. However, if any developer or CI runner has already applied the one-line migration against a real database, they will need to manually resolve the drift (e.g.prisma migrate resolve --applied).The safer practice is to add a new, separate migration file for the table renames (e.g.
20260217000000_rename_session_recording_tables_to_session_replay.sql). This keeps each migration atomic, avoids hash conflicts on any already-migrated database, and also fixes the misleading migration name (which currently only references thetabIdrename, not the table renames).