diff --git a/.changeset/large-jars-kiss.md b/.changeset/large-jars-kiss.md new file mode 100644 index 0000000000..11fecc9a39 --- /dev/null +++ b/.changeset/large-jars-kiss.md @@ -0,0 +1,6 @@ +--- +"@core/electric": minor +"electric-sql": minor +--- + +feat: server-driven unsubscribes to allow clients to unsubscribe from shapes diff --git a/clients/typescript/src/_generated/protocol/satellite.ts b/clients/typescript/src/_generated/protocol/satellite.ts index d729b2b5d9..60559d0c34 100644 --- a/clients/typescript/src/_generated/protocol/satellite.ts +++ b/clients/typescript/src/_generated/protocol/satellite.ts @@ -1,9 +1,3 @@ -// Code generated by protoc-gen-ts_proto. DO NOT EDIT. -// versions: -// protoc-gen-ts_proto v1.175.0 -// protoc v5.26.1 -// source: protocol/satellite.proto - /* eslint-disable */ import Long from "long"; import _m0 from "protobufjs/minimal.js"; @@ -132,7 +126,11 @@ export interface SatInStartReplicationReq { * The SQL dialect used by the client * Defaults to SQLite if not specified */ - sqlDialect?: SatInStartReplicationReq_Dialect | undefined; + sqlDialect?: + | SatInStartReplicationReq_Dialect + | undefined; + /** List of subscription IDs for which the client observed a GONE batch after unsubscribing */ + observedGoneBatch: string[]; } export enum SatInStartReplicationReq_Option { @@ -261,6 +259,8 @@ export interface SatOpLogAck { subscriptionIds: string[]; /** Transaction IDs for which additional data was received immediately after this transaction */ additionalDataSourceIds: Long[]; + /** Subscription IDs for GONE batches received at this LSN */ + goneSubscriptionIds: string[]; } /** @@ -696,6 +696,20 @@ export interface SatSubsDataEnd { $type: "Electric.Satellite.SatSubsDataEnd"; } +/** Begin delimiter for the incoming subscription data */ +export interface SatUnsubsDataBegin { + $type: "Electric.Satellite.SatUnsubsDataBegin"; + /** Identifier of the subscriptions that were handled as unsubbed */ + subscriptionIds: string[]; + /** LSN at which this data is being sent. May be a duplicate of a transaction that was sent immediately before. */ + lsn: Uint8Array; +} + +/** End delimiter for the incoming subscription data */ +export interface SatUnsubsDataEnd { + $type: "Electric.Satellite.SatUnsubsDataEnd"; +} + /** Begin delimiter for the initial shape data */ export interface SatShapeDataBegin { $type: "Electric.Satellite.SatShapeDataBegin"; @@ -711,7 +725,7 @@ export interface SatShapeDataEnd { } function createBaseSatRpcRequest(): SatRpcRequest { - return { $type: "Electric.Satellite.SatRpcRequest", method: "", requestId: 0, message: new Uint8Array(0) }; + return { $type: "Electric.Satellite.SatRpcRequest", method: "", requestId: 0, message: new Uint8Array() }; } export const SatRpcRequest = { @@ -768,13 +782,14 @@ export const SatRpcRequest = { }, create, I>>(base?: I): SatRpcRequest { - return SatRpcRequest.fromPartial(base ?? ({} as any)); + return SatRpcRequest.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatRpcRequest { const message = createBaseSatRpcRequest(); message.method = object.method ?? ""; message.requestId = object.requestId ?? 0; - message.message = object.message ?? new Uint8Array(0); + message.message = object.message ?? new Uint8Array(); return message; }, }; @@ -849,8 +864,9 @@ export const SatRpcResponse = { }, create, I>>(base?: I): SatRpcResponse { - return SatRpcResponse.fromPartial(base ?? ({} as any)); + return SatRpcResponse.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatRpcResponse { const message = createBaseSatRpcResponse(); message.method = object.method ?? ""; @@ -913,8 +929,9 @@ export const SatAuthHeaderPair = { }, create, I>>(base?: I): SatAuthHeaderPair { - return SatAuthHeaderPair.fromPartial(base ?? ({} as any)); + return SatAuthHeaderPair.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatAuthHeaderPair { const message = createBaseSatAuthHeaderPair(); message.key = object.key ?? 0; @@ -983,8 +1000,9 @@ export const SatAuthReq = { }, create, I>>(base?: I): SatAuthReq { - return SatAuthReq.fromPartial(base ?? ({} as any)); + return SatAuthReq.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatAuthReq { const message = createBaseSatAuthReq(); message.id = object.id ?? ""; @@ -1044,8 +1062,9 @@ export const SatAuthResp = { }, create, I>>(base?: I): SatAuthResp { - return SatAuthResp.fromPartial(base ?? ({} as any)); + return SatAuthResp.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatAuthResp { const message = createBaseSatAuthResp(); message.id = object.id ?? ""; @@ -1114,8 +1133,9 @@ export const SatErrorResp = { }, create, I>>(base?: I): SatErrorResp { - return SatErrorResp.fromPartial(base ?? ({} as any)); + return SatErrorResp.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatErrorResp { const message = createBaseSatErrorResp(); message.errorType = object.errorType ?? 0; @@ -1130,12 +1150,13 @@ messageTypeRegistry.set(SatErrorResp.$type, SatErrorResp); function createBaseSatInStartReplicationReq(): SatInStartReplicationReq { return { $type: "Electric.Satellite.SatInStartReplicationReq", - lsn: new Uint8Array(0), + lsn: new Uint8Array(), options: [], subscriptionIds: [], schemaVersion: undefined, observedTransactionData: [], sqlDialect: undefined, + observedGoneBatch: [], }; } @@ -1165,6 +1186,9 @@ export const SatInStartReplicationReq = { if (message.sqlDialect !== undefined) { writer.uint32(56).int32(message.sqlDialect); } + for (const v of message.observedGoneBatch) { + writer.uint32(66).string(v!); + } return writer; }, @@ -1237,6 +1261,13 @@ export const SatInStartReplicationReq = { message.sqlDialect = reader.int32() as any; continue; + case 8: + if (tag !== 66) { + break; + } + + message.observedGoneBatch.push(reader.string()); + continue; } if ((tag & 7) === 4 || tag === 0) { break; @@ -1247,16 +1278,18 @@ export const SatInStartReplicationReq = { }, create, I>>(base?: I): SatInStartReplicationReq { - return SatInStartReplicationReq.fromPartial(base ?? ({} as any)); + return SatInStartReplicationReq.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatInStartReplicationReq { const message = createBaseSatInStartReplicationReq(); - message.lsn = object.lsn ?? new Uint8Array(0); + message.lsn = object.lsn ?? new Uint8Array(); message.options = object.options?.map((e) => e) || []; message.subscriptionIds = object.subscriptionIds?.map((e) => e) || []; message.schemaVersion = object.schemaVersion ?? undefined; message.observedTransactionData = object.observedTransactionData?.map((e) => Long.fromValue(e)) || []; message.sqlDialect = object.sqlDialect ?? undefined; + message.observedGoneBatch = object.observedGoneBatch?.map((e) => e) || []; return message; }, }; @@ -1311,8 +1344,9 @@ export const SatInStartReplicationResp = { }, create, I>>(base?: I): SatInStartReplicationResp { - return SatInStartReplicationResp.fromPartial(base ?? ({} as any)); + return SatInStartReplicationResp.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatInStartReplicationResp { const message = createBaseSatInStartReplicationResp(); message.err = (object.err !== undefined && object.err !== null) @@ -1375,8 +1409,9 @@ export const SatInStartReplicationResp_ReplicationError = { create, I>>( base?: I, ): SatInStartReplicationResp_ReplicationError { - return SatInStartReplicationResp_ReplicationError.fromPartial(base ?? ({} as any)); + return SatInStartReplicationResp_ReplicationError.fromPartial(base ?? {}); }, + fromPartial, I>>( object: I, ): SatInStartReplicationResp_ReplicationError { @@ -1417,8 +1452,9 @@ export const SatInStopReplicationReq = { }, create, I>>(base?: I): SatInStopReplicationReq { - return SatInStopReplicationReq.fromPartial(base ?? ({} as any)); + return SatInStopReplicationReq.fromPartial(base ?? {}); }, + fromPartial, I>>(_: I): SatInStopReplicationReq { const message = createBaseSatInStopReplicationReq(); return message; @@ -1455,8 +1491,9 @@ export const SatInStopReplicationResp = { }, create, I>>(base?: I): SatInStopReplicationResp { - return SatInStopReplicationResp.fromPartial(base ?? ({} as any)); + return SatInStopReplicationResp.fromPartial(base ?? {}); }, + fromPartial, I>>(_: I): SatInStopReplicationResp { const message = createBaseSatInStopReplicationResp(); return message; @@ -1479,10 +1516,10 @@ export const SatRelationColumn = { if (message.type !== "") { writer.uint32(18).string(message.type); } - if (message.primaryKey !== false) { + if (message.primaryKey === true) { writer.uint32(24).bool(message.primaryKey); } - if (message.isNullable !== false) { + if (message.isNullable === true) { writer.uint32(32).bool(message.isNullable); } return writer; @@ -1533,8 +1570,9 @@ export const SatRelationColumn = { }, create, I>>(base?: I): SatRelationColumn { - return SatRelationColumn.fromPartial(base ?? ({} as any)); + return SatRelationColumn.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatRelationColumn { const message = createBaseSatRelationColumn(); message.name = object.name ?? ""; @@ -1632,8 +1670,9 @@ export const SatRelation = { }, create, I>>(base?: I): SatRelation { - return SatRelation.fromPartial(base ?? ({} as any)); + return SatRelation.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatRelation { const message = createBaseSatRelation(); message.schemaName = object.schemaName ?? ""; @@ -1685,8 +1724,9 @@ export const SatOpLog = { }, create, I>>(base?: I): SatOpLog { - return SatOpLog.fromPartial(base ?? ({} as any)); + return SatOpLog.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatOpLog { const message = createBaseSatOpLog(); message.ops = object.ops?.map((e) => SatTransOp.fromPartial(e)) || []; @@ -1700,10 +1740,11 @@ function createBaseSatOpLogAck(): SatOpLogAck { return { $type: "Electric.Satellite.SatOpLogAck", ackTimestamp: Long.UZERO, - lsn: new Uint8Array(0), + lsn: new Uint8Array(), transactionId: Long.UZERO, subscriptionIds: [], additionalDataSourceIds: [], + goneSubscriptionIds: [], }; } @@ -1711,13 +1752,13 @@ export const SatOpLogAck = { $type: "Electric.Satellite.SatOpLogAck" as const, encode(message: SatOpLogAck, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (!message.ackTimestamp.equals(Long.UZERO)) { + if (!message.ackTimestamp.isZero()) { writer.uint32(8).uint64(message.ackTimestamp); } if (message.lsn.length !== 0) { writer.uint32(18).bytes(message.lsn); } - if (!message.transactionId.equals(Long.UZERO)) { + if (!message.transactionId.isZero()) { writer.uint32(24).uint64(message.transactionId); } for (const v of message.subscriptionIds) { @@ -1728,6 +1769,9 @@ export const SatOpLogAck = { writer.uint64(v); } writer.ldelim(); + for (const v of message.goneSubscriptionIds) { + writer.uint32(50).string(v!); + } return writer; }, @@ -1783,6 +1827,13 @@ export const SatOpLogAck = { } break; + case 6: + if (tag !== 50) { + break; + } + + message.goneSubscriptionIds.push(reader.string()); + continue; } if ((tag & 7) === 4 || tag === 0) { break; @@ -1793,19 +1844,21 @@ export const SatOpLogAck = { }, create, I>>(base?: I): SatOpLogAck { - return SatOpLogAck.fromPartial(base ?? ({} as any)); + return SatOpLogAck.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatOpLogAck { const message = createBaseSatOpLogAck(); message.ackTimestamp = (object.ackTimestamp !== undefined && object.ackTimestamp !== null) ? Long.fromValue(object.ackTimestamp) : Long.UZERO; - message.lsn = object.lsn ?? new Uint8Array(0); + message.lsn = object.lsn ?? new Uint8Array(); message.transactionId = (object.transactionId !== undefined && object.transactionId !== null) ? Long.fromValue(object.transactionId) : Long.UZERO; message.subscriptionIds = object.subscriptionIds?.map((e) => e) || []; message.additionalDataSourceIds = object.additionalDataSourceIds?.map((e) => Long.fromValue(e)) || []; + message.goneSubscriptionIds = object.goneSubscriptionIds?.map((e) => e) || []; return message; }, }; @@ -1952,8 +2005,9 @@ export const SatTransOp = { }, create, I>>(base?: I): SatTransOp { - return SatTransOp.fromPartial(base ?? ({} as any)); + return SatTransOp.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatTransOp { const message = createBaseSatTransOp(); message.begin = (object.begin !== undefined && object.begin !== null) @@ -1994,7 +2048,7 @@ function createBaseSatOpBegin(): SatOpBegin { return { $type: "Electric.Satellite.SatOpBegin", commitTimestamp: Long.UZERO, - lsn: new Uint8Array(0), + lsn: new Uint8Array(), origin: undefined, isMigration: false, additionalDataRef: Long.UZERO, @@ -2006,7 +2060,7 @@ export const SatOpBegin = { $type: "Electric.Satellite.SatOpBegin" as const, encode(message: SatOpBegin, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (!message.commitTimestamp.equals(Long.UZERO)) { + if (!message.commitTimestamp.isZero()) { writer.uint32(8).uint64(message.commitTimestamp); } if (message.lsn.length !== 0) { @@ -2015,10 +2069,10 @@ export const SatOpBegin = { if (message.origin !== undefined) { writer.uint32(34).string(message.origin); } - if (message.isMigration !== false) { + if (message.isMigration === true) { writer.uint32(40).bool(message.isMigration); } - if (!message.additionalDataRef.equals(Long.UZERO)) { + if (!message.additionalDataRef.isZero()) { writer.uint32(48).uint64(message.additionalDataRef); } if (message.transactionId !== undefined) { @@ -2086,14 +2140,15 @@ export const SatOpBegin = { }, create, I>>(base?: I): SatOpBegin { - return SatOpBegin.fromPartial(base ?? ({} as any)); + return SatOpBegin.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatOpBegin { const message = createBaseSatOpBegin(); message.commitTimestamp = (object.commitTimestamp !== undefined && object.commitTimestamp !== null) ? Long.fromValue(object.commitTimestamp) : Long.UZERO; - message.lsn = object.lsn ?? new Uint8Array(0); + message.lsn = object.lsn ?? new Uint8Array(); message.origin = object.origin ?? undefined; message.isMigration = object.isMigration ?? false; message.additionalDataRef = (object.additionalDataRef !== undefined && object.additionalDataRef !== null) @@ -2116,7 +2171,7 @@ export const SatOpAdditionalBegin = { $type: "Electric.Satellite.SatOpAdditionalBegin" as const, encode(message: SatOpAdditionalBegin, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (!message.ref.equals(Long.UZERO)) { + if (!message.ref.isZero()) { writer.uint32(8).uint64(message.ref); } return writer; @@ -2146,8 +2201,9 @@ export const SatOpAdditionalBegin = { }, create, I>>(base?: I): SatOpAdditionalBegin { - return SatOpAdditionalBegin.fromPartial(base ?? ({} as any)); + return SatOpAdditionalBegin.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatOpAdditionalBegin { const message = createBaseSatOpAdditionalBegin(); message.ref = (object.ref !== undefined && object.ref !== null) ? Long.fromValue(object.ref) : Long.UZERO; @@ -2161,7 +2217,7 @@ function createBaseSatOpCommit(): SatOpCommit { return { $type: "Electric.Satellite.SatOpCommit", commitTimestamp: Long.UZERO, - lsn: new Uint8Array(0), + lsn: new Uint8Array(), additionalDataRef: Long.UZERO, transactionId: undefined, }; @@ -2171,13 +2227,13 @@ export const SatOpCommit = { $type: "Electric.Satellite.SatOpCommit" as const, encode(message: SatOpCommit, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (!message.commitTimestamp.equals(Long.UZERO)) { + if (!message.commitTimestamp.isZero()) { writer.uint32(8).uint64(message.commitTimestamp); } if (message.lsn.length !== 0) { writer.uint32(26).bytes(message.lsn); } - if (!message.additionalDataRef.equals(Long.UZERO)) { + if (!message.additionalDataRef.isZero()) { writer.uint32(32).uint64(message.additionalDataRef); } if (message.transactionId !== undefined) { @@ -2231,14 +2287,15 @@ export const SatOpCommit = { }, create, I>>(base?: I): SatOpCommit { - return SatOpCommit.fromPartial(base ?? ({} as any)); + return SatOpCommit.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatOpCommit { const message = createBaseSatOpCommit(); message.commitTimestamp = (object.commitTimestamp !== undefined && object.commitTimestamp !== null) ? Long.fromValue(object.commitTimestamp) : Long.UZERO; - message.lsn = object.lsn ?? new Uint8Array(0); + message.lsn = object.lsn ?? new Uint8Array(); message.additionalDataRef = (object.additionalDataRef !== undefined && object.additionalDataRef !== null) ? Long.fromValue(object.additionalDataRef) : Long.UZERO; @@ -2259,7 +2316,7 @@ export const SatOpAdditionalCommit = { $type: "Electric.Satellite.SatOpAdditionalCommit" as const, encode(message: SatOpAdditionalCommit, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (!message.ref.equals(Long.UZERO)) { + if (!message.ref.isZero()) { writer.uint32(8).uint64(message.ref); } return writer; @@ -2289,8 +2346,9 @@ export const SatOpAdditionalCommit = { }, create, I>>(base?: I): SatOpAdditionalCommit { - return SatOpAdditionalCommit.fromPartial(base ?? ({} as any)); + return SatOpAdditionalCommit.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatOpAdditionalCommit { const message = createBaseSatOpAdditionalCommit(); message.ref = (object.ref !== undefined && object.ref !== null) ? Long.fromValue(object.ref) : Long.UZERO; @@ -2358,8 +2416,9 @@ export const SatOpInsert = { }, create, I>>(base?: I): SatOpInsert { - return SatOpInsert.fromPartial(base ?? ({} as any)); + return SatOpInsert.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatOpInsert { const message = createBaseSatOpInsert(); message.relationId = object.relationId ?? 0; @@ -2447,8 +2506,9 @@ export const SatOpUpdate = { }, create, I>>(base?: I): SatOpUpdate { - return SatOpUpdate.fromPartial(base ?? ({} as any)); + return SatOpUpdate.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatOpUpdate { const message = createBaseSatOpUpdate(); message.relationId = object.relationId ?? 0; @@ -2523,8 +2583,9 @@ export const SatOpDelete = { }, create, I>>(base?: I): SatOpDelete { - return SatOpDelete.fromPartial(base ?? ({} as any)); + return SatOpDelete.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatOpDelete { const message = createBaseSatOpDelete(); message.relationId = object.relationId ?? 0; @@ -2596,8 +2657,9 @@ export const SatOpCompensation = { }, create, I>>(base?: I): SatOpCompensation { - return SatOpCompensation.fromPartial(base ?? ({} as any)); + return SatOpCompensation.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatOpCompensation { const message = createBaseSatOpCompensation(); message.relationId = object.relationId ?? 0; @@ -2659,8 +2721,9 @@ export const SatOpGone = { }, create, I>>(base?: I): SatOpGone { - return SatOpGone.fromPartial(base ?? ({} as any)); + return SatOpGone.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatOpGone { const message = createBaseSatOpGone(); message.relationId = object.relationId ?? 0; @@ -2674,7 +2737,7 @@ export const SatOpGone = { messageTypeRegistry.set(SatOpGone.$type, SatOpGone); function createBaseSatOpRow(): SatOpRow { - return { $type: "Electric.Satellite.SatOpRow", nullsBitmask: new Uint8Array(0), values: [] }; + return { $type: "Electric.Satellite.SatOpRow", nullsBitmask: new Uint8Array(), values: [] }; } export const SatOpRow = { @@ -2721,11 +2784,12 @@ export const SatOpRow = { }, create, I>>(base?: I): SatOpRow { - return SatOpRow.fromPartial(base ?? ({} as any)); + return SatOpRow.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatOpRow { const message = createBaseSatOpRow(); - message.nullsBitmask = object.nullsBitmask ?? new Uint8Array(0); + message.nullsBitmask = object.nullsBitmask ?? new Uint8Array(); message.values = object.values?.map((e) => e) || []; return message; }, @@ -2801,8 +2865,9 @@ export const SatOpMigrate = { }, create, I>>(base?: I): SatOpMigrate { - return SatOpMigrate.fromPartial(base ?? ({} as any)); + return SatOpMigrate.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatOpMigrate { const message = createBaseSatOpMigrate(); message.version = object.version ?? ""; @@ -2867,8 +2932,9 @@ export const SatOpMigrate_Stmt = { }, create, I>>(base?: I): SatOpMigrate_Stmt { - return SatOpMigrate_Stmt.fromPartial(base ?? ({} as any)); + return SatOpMigrate_Stmt.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatOpMigrate_Stmt { const message = createBaseSatOpMigrate_Stmt(); message.type = object.type ?? 0; @@ -2961,8 +3027,9 @@ export const SatOpMigrate_PgColumnType = { }, create, I>>(base?: I): SatOpMigrate_PgColumnType { - return SatOpMigrate_PgColumnType.fromPartial(base ?? ({} as any)); + return SatOpMigrate_PgColumnType.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatOpMigrate_PgColumnType { const message = createBaseSatOpMigrate_PgColumnType(); message.name = object.name ?? ""; @@ -3032,8 +3099,9 @@ export const SatOpMigrate_Column = { }, create, I>>(base?: I): SatOpMigrate_Column { - return SatOpMigrate_Column.fromPartial(base ?? ({} as any)); + return SatOpMigrate_Column.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatOpMigrate_Column { const message = createBaseSatOpMigrate_Column(); message.name = object.name ?? ""; @@ -3105,8 +3173,9 @@ export const SatOpMigrate_ForeignKey = { }, create, I>>(base?: I): SatOpMigrate_ForeignKey { - return SatOpMigrate_ForeignKey.fromPartial(base ?? ({} as any)); + return SatOpMigrate_ForeignKey.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatOpMigrate_ForeignKey { const message = createBaseSatOpMigrate_ForeignKey(); message.fkCols = object.fkCols?.map((e) => e) || []; @@ -3186,8 +3255,9 @@ export const SatOpMigrate_Table = { }, create, I>>(base?: I): SatOpMigrate_Table { - return SatOpMigrate_Table.fromPartial(base ?? ({} as any)); + return SatOpMigrate_Table.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatOpMigrate_Table { const message = createBaseSatOpMigrate_Table(); message.name = object.name ?? ""; @@ -3248,8 +3318,9 @@ export const SatOpMigrate_EnumType = { }, create, I>>(base?: I): SatOpMigrate_EnumType { - return SatOpMigrate_EnumType.fromPartial(base ?? ({} as any)); + return SatOpMigrate_EnumType.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatOpMigrate_EnumType { const message = createBaseSatOpMigrate_EnumType(); message.name = object.name ?? ""; @@ -3308,8 +3379,9 @@ export const SatSubsReq = { }, create, I>>(base?: I): SatSubsReq { - return SatSubsReq.fromPartial(base ?? ({} as any)); + return SatSubsReq.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatSubsReq { const message = createBaseSatSubsReq(); message.subscriptionId = object.subscriptionId ?? ""; @@ -3368,8 +3440,9 @@ export const SatSubsResp = { }, create, I>>(base?: I): SatSubsResp { - return SatSubsResp.fromPartial(base ?? ({} as any)); + return SatSubsResp.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatSubsResp { const message = createBaseSatSubsResp(); message.subscriptionId = object.subscriptionId ?? ""; @@ -3440,8 +3513,9 @@ export const SatSubsResp_SatSubsError = { }, create, I>>(base?: I): SatSubsResp_SatSubsError { - return SatSubsResp_SatSubsError.fromPartial(base ?? ({} as any)); + return SatSubsResp_SatSubsError.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatSubsResp_SatSubsError { const message = createBaseSatSubsResp_SatSubsError(); message.code = object.code ?? 0; @@ -3514,8 +3588,9 @@ export const SatSubsResp_SatSubsError_ShapeReqError = { create, I>>( base?: I, ): SatSubsResp_SatSubsError_ShapeReqError { - return SatSubsResp_SatSubsError_ShapeReqError.fromPartial(base ?? ({} as any)); + return SatSubsResp_SatSubsError_ShapeReqError.fromPartial(base ?? {}); }, + fromPartial, I>>( object: I, ): SatSubsResp_SatSubsError_ShapeReqError { @@ -3567,8 +3642,9 @@ export const SatUnsubsReq = { }, create, I>>(base?: I): SatUnsubsReq { - return SatUnsubsReq.fromPartial(base ?? ({} as any)); + return SatUnsubsReq.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatUnsubsReq { const message = createBaseSatUnsubsReq(); message.subscriptionIds = object.subscriptionIds?.map((e) => e) || []; @@ -3606,8 +3682,9 @@ export const SatUnsubsResp = { }, create, I>>(base?: I): SatUnsubsResp { - return SatUnsubsResp.fromPartial(base ?? ({} as any)); + return SatUnsubsResp.fromPartial(base ?? {}); }, + fromPartial, I>>(_: I): SatUnsubsResp { const message = createBaseSatUnsubsResp(); return message; @@ -3664,8 +3741,9 @@ export const SatShapeReq = { }, create, I>>(base?: I): SatShapeReq { - return SatShapeReq.fromPartial(base ?? ({} as any)); + return SatShapeReq.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatShapeReq { const message = createBaseSatShapeReq(); message.requestId = object.requestId ?? ""; @@ -3716,8 +3794,9 @@ export const SatShapeDef = { }, create, I>>(base?: I): SatShapeDef { - return SatShapeDef.fromPartial(base ?? ({} as any)); + return SatShapeDef.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatShapeDef { const message = createBaseSatShapeDef(); message.selects = object.selects?.map((e) => SatShapeDef_Select.fromPartial(e)) || []; @@ -3775,8 +3854,9 @@ export const SatShapeDef_Relation = { }, create, I>>(base?: I): SatShapeDef_Relation { - return SatShapeDef_Relation.fromPartial(base ?? ({} as any)); + return SatShapeDef_Relation.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatShapeDef_Relation { const message = createBaseSatShapeDef_Relation(); message.foreignKey = object.foreignKey?.map((e) => e) || []; @@ -3847,8 +3927,9 @@ export const SatShapeDef_Select = { }, create, I>>(base?: I): SatShapeDef_Select { - return SatShapeDef_Select.fromPartial(base ?? ({} as any)); + return SatShapeDef_Select.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatShapeDef_Select { const message = createBaseSatShapeDef_Select(); message.tablename = object.tablename ?? ""; @@ -3934,8 +4015,9 @@ export const SatSubsDataError = { }, create, I>>(base?: I): SatSubsDataError { - return SatSubsDataError.fromPartial(base ?? ({} as any)); + return SatSubsDataError.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatSubsDataError { const message = createBaseSatSubsDataError(); message.code = object.code ?? 0; @@ -4007,8 +4089,9 @@ export const SatSubsDataError_ShapeReqError = { }, create, I>>(base?: I): SatSubsDataError_ShapeReqError { - return SatSubsDataError_ShapeReqError.fromPartial(base ?? ({} as any)); + return SatSubsDataError_ShapeReqError.fromPartial(base ?? {}); }, + fromPartial, I>>( object: I, ): SatSubsDataError_ShapeReqError { @@ -4023,7 +4106,7 @@ export const SatSubsDataError_ShapeReqError = { messageTypeRegistry.set(SatSubsDataError_ShapeReqError.$type, SatSubsDataError_ShapeReqError); function createBaseSatSubsDataBegin(): SatSubsDataBegin { - return { $type: "Electric.Satellite.SatSubsDataBegin", subscriptionId: "", lsn: new Uint8Array(0) }; + return { $type: "Electric.Satellite.SatSubsDataBegin", subscriptionId: "", lsn: new Uint8Array() }; } export const SatSubsDataBegin = { @@ -4070,12 +4153,13 @@ export const SatSubsDataBegin = { }, create, I>>(base?: I): SatSubsDataBegin { - return SatSubsDataBegin.fromPartial(base ?? ({} as any)); + return SatSubsDataBegin.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatSubsDataBegin { const message = createBaseSatSubsDataBegin(); message.subscriptionId = object.subscriptionId ?? ""; - message.lsn = object.lsn ?? new Uint8Array(0); + message.lsn = object.lsn ?? new Uint8Array(); return message; }, }; @@ -4110,8 +4194,9 @@ export const SatSubsDataEnd = { }, create, I>>(base?: I): SatSubsDataEnd { - return SatSubsDataEnd.fromPartial(base ?? ({} as any)); + return SatSubsDataEnd.fromPartial(base ?? {}); }, + fromPartial, I>>(_: I): SatSubsDataEnd { const message = createBaseSatSubsDataEnd(); return message; @@ -4120,6 +4205,106 @@ export const SatSubsDataEnd = { messageTypeRegistry.set(SatSubsDataEnd.$type, SatSubsDataEnd); +function createBaseSatUnsubsDataBegin(): SatUnsubsDataBegin { + return { $type: "Electric.Satellite.SatUnsubsDataBegin", subscriptionIds: [], lsn: new Uint8Array() }; +} + +export const SatUnsubsDataBegin = { + $type: "Electric.Satellite.SatUnsubsDataBegin" as const, + + encode(message: SatUnsubsDataBegin, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + for (const v of message.subscriptionIds) { + writer.uint32(10).string(v!); + } + if (message.lsn.length !== 0) { + writer.uint32(18).bytes(message.lsn); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): SatUnsubsDataBegin { + const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseSatUnsubsDataBegin(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.subscriptionIds.push(reader.string()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.lsn = reader.bytes(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + create, I>>(base?: I): SatUnsubsDataBegin { + return SatUnsubsDataBegin.fromPartial(base ?? {}); + }, + + fromPartial, I>>(object: I): SatUnsubsDataBegin { + const message = createBaseSatUnsubsDataBegin(); + message.subscriptionIds = object.subscriptionIds?.map((e) => e) || []; + message.lsn = object.lsn ?? new Uint8Array(); + return message; + }, +}; + +messageTypeRegistry.set(SatUnsubsDataBegin.$type, SatUnsubsDataBegin); + +function createBaseSatUnsubsDataEnd(): SatUnsubsDataEnd { + return { $type: "Electric.Satellite.SatUnsubsDataEnd" }; +} + +export const SatUnsubsDataEnd = { + $type: "Electric.Satellite.SatUnsubsDataEnd" as const, + + encode(_: SatUnsubsDataEnd, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): SatUnsubsDataEnd { + const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseSatUnsubsDataEnd(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + create, I>>(base?: I): SatUnsubsDataEnd { + return SatUnsubsDataEnd.fromPartial(base ?? {}); + }, + + fromPartial, I>>(_: I): SatUnsubsDataEnd { + const message = createBaseSatUnsubsDataEnd(); + return message; + }, +}; + +messageTypeRegistry.set(SatUnsubsDataEnd.$type, SatUnsubsDataEnd); + function createBaseSatShapeDataBegin(): SatShapeDataBegin { return { $type: "Electric.Satellite.SatShapeDataBegin", requestId: "", uuid: "" }; } @@ -4168,8 +4353,9 @@ export const SatShapeDataBegin = { }, create, I>>(base?: I): SatShapeDataBegin { - return SatShapeDataBegin.fromPartial(base ?? ({} as any)); + return SatShapeDataBegin.fromPartial(base ?? {}); }, + fromPartial, I>>(object: I): SatShapeDataBegin { const message = createBaseSatShapeDataBegin(); message.requestId = object.requestId ?? ""; @@ -4208,8 +4394,9 @@ export const SatShapeDataEnd = { }, create, I>>(base?: I): SatShapeDataEnd { - return SatShapeDataEnd.fromPartial(base ?? ({} as any)); + return SatShapeDataEnd.fromPartial(base ?? {}); }, + fromPartial, I>>(_: I): SatShapeDataEnd { const message = createBaseSatShapeDataEnd(); return message; @@ -4227,12 +4414,11 @@ export interface Root { unsubscribe(request: SatUnsubsReq): Promise; } -export const RootServiceName = "Electric.Satellite.Root"; export class RootClientImpl implements Root { private readonly rpc: Rpc; private readonly service: string; constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || RootServiceName; + this.service = opts?.service || "Electric.Satellite.Root"; this.rpc = rpc; this.authenticate = this.authenticate.bind(this); this.startReplication = this.startReplication.bind(this); @@ -4277,12 +4463,11 @@ export interface ClientRoot { stopReplication(request: SatInStopReplicationReq): Promise; } -export const ClientRootServiceName = "Electric.Satellite.ClientRoot"; export class ClientRootClientImpl implements ClientRoot { private readonly rpc: Rpc; private readonly service: string; constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || ClientRootServiceName; + this.service = opts?.service || "Electric.Satellite.ClientRoot"; this.rpc = rpc; this.startReplication = this.startReplication.bind(this); this.stopReplication = this.stopReplication.bind(this); @@ -4307,7 +4492,7 @@ interface Rpc { type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; export type DeepPartial = T extends Builtin ? T - : T extends Long ? string | number | Long : T extends globalThis.Array ? globalThis.Array> + : T extends Long ? string | number | Long : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> : T extends {} ? { [K in Exclude]?: DeepPartial } : Partial; diff --git a/clients/typescript/src/_generated/typeRegistry.ts b/clients/typescript/src/_generated/typeRegistry.ts index 5c02f5ba98..7f840bd7c5 100644 --- a/clients/typescript/src/_generated/typeRegistry.ts +++ b/clients/typescript/src/_generated/typeRegistry.ts @@ -1,8 +1,3 @@ -// Code generated by protoc-gen-ts_proto. DO NOT EDIT. -// versions: -// protoc-gen-ts_proto v1.175.0 -// protoc v5.26.1 - /* eslint-disable */ import Long from "long"; import _m0 from "protobufjs/minimal.js"; @@ -20,7 +15,7 @@ export const messageTypeRegistry = new Map(); type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined; export type DeepPartial = T extends Builtin ? T - : T extends Long ? string | number | Long : T extends globalThis.Array ? globalThis.Array> + : T extends Long ? string | number | Long : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> : T extends {} ? { [K in Exclude]?: DeepPartial } : Partial; diff --git a/clients/typescript/src/client/model/shapes.ts b/clients/typescript/src/client/model/shapes.ts index 33c25adcbf..6f110e7afc 100644 --- a/clients/typescript/src/client/model/shapes.ts +++ b/clients/typescript/src/client/model/shapes.ts @@ -39,6 +39,7 @@ export class ShapeManager extends BaseShapeManager { }) return { + id: sub.id, synced: dataReceivedProm, } } @@ -60,6 +61,7 @@ export class ShapeManagerMock extends BaseShapeManager { ) return { + id: 'unknown', synced: Promise.resolve(), } } diff --git a/clients/typescript/src/migrators/query-builder/builder.ts b/clients/typescript/src/migrators/query-builder/builder.ts index 60d28985a6..ea3931d324 100644 --- a/clients/typescript/src/migrators/query-builder/builder.ts +++ b/clients/typescript/src/migrators/query-builder/builder.ts @@ -316,16 +316,15 @@ export abstract class QueryBuilder { let positionalParam = 1 const pos = (i: number) => `${this.makePositionalParam(i)}` const makeInsertPattern = () => { - return ` (${Array.from( - { length: columnCount }, - () => `${pos(positionalParam++)}` - ).join(', ')})` + const insertRow = Array.from({ length: columnCount }, () => + pos(positionalParam++) + ) + + return ` (${insertRow.join(', ')})` } - // Largest number below maxSqlParamers that evenly divides by column count, - // divided by columnCount, giving the amount of rows we can insert at once - const batchMaxSize = - (maxParameters - (maxParameters % columnCount)) / columnCount + // Amount of rows we can insert at once + const batchMaxSize = Math.floor(maxParameters / columnCount) while (processed < recordCount) { positionalParam = 1 // start counting parameters from 1 again const currentInsertCount = Math.min(recordCount - processed, batchMaxSize) @@ -346,4 +345,69 @@ export abstract class QueryBuilder { } return stmts } + + /** + * Prepare multiple batched DELETE statements for an array of records. + * + * Since SQLite only supports a limited amount of positional `?` parameters, + * we generate multiple delete statements with each one being filled as much + * as possible from the given data. This function only supports column equality checks + * + * @param baseSql base SQL string to which inserts should be appended + * @param columns columns that describe records + * @param records records to be inserted + * @param maxParameters max parameters this SQLite can accept - determines batching factor + * @param suffixSql optional SQL string to append to each insert statement + * @returns array of statements ready to be executed by the adapter + */ + public prepareDeleteBatchedStatements( + baseSql: string, + columns: Array, + records: T[], + maxParameters: number, + suffixSql = '' + ): Statement[] { + const stmts: Statement[] = [] + const columnCount = columns.length + const recordCount = records.length + let processed = 0 + let positionalParam = 1 + const pos = (i: number) => this.makePositionalParam(i) + const makeWherePattern = () => { + const columnComparisons = Array.from( + { length: columnCount }, + (_, i) => `"${columns[i] as string}" = ${pos(positionalParam++)}` + ) + + return ` (${columnComparisons.join(' AND ')})` + } + + // Amount of rows we can delete at once + const batchMaxSize = Math.floor(maxParameters / columnCount) + while (processed < recordCount) { + positionalParam = 1 // start counting parameters from 1 again + const currentDeleteCount = Math.min(recordCount - processed, batchMaxSize) + let sql = + baseSql + + Array.from({ length: currentDeleteCount }, makeWherePattern).join( + ' OR ' + ) + + if (suffixSql !== '') { + sql += ' ' + suffixSql + } + + const args = records + .slice(processed, processed + currentDeleteCount) + .flatMap((record) => columns.map((col) => record[col] as SqlValue)) + + processed += currentDeleteCount + stmts.push({ sql, args }) + } + return stmts + } + + public makeQT(tablename: string): QualifiedTablename { + return new QualifiedTablename(this.defaultNamespace, tablename) + } } diff --git a/clients/typescript/src/satellite/client.ts b/clients/typescript/src/satellite/client.ts index 494238692c..5ae132ed8d 100644 --- a/clients/typescript/src/satellite/client.ts +++ b/clients/typescript/src/satellite/client.ts @@ -23,6 +23,8 @@ import { SatShapeDataEnd, SatUnsubsReq, SatUnsubsResp, + SatUnsubsDataBegin, + SatUnsubsDataEnd, Root, RootClientImpl, SatRpcRequest, @@ -72,6 +74,8 @@ import { DataChange, isDataChange, ReplicatedRowTransformer, + DataGone, + GoneBatchCallback, } from '../util/types' import { base64, @@ -95,6 +99,7 @@ import { SubscribeResponse, SubscriptionDeliveredCallback, SubscriptionErrorCallback, + SubscriptionId, UnsubscribeResponse, } from './shapes/types' import { SubscriptionsDataCache } from './shapes/cache' @@ -131,6 +136,12 @@ type Events = { outbound_started: () => void [SUBSCRIPTION_DELIVERED]: SubscriptionDeliveredCallback [SUBSCRIPTION_ERROR]: SubscriptionErrorCallback + goneBatch: ( + lsn: LSN, + subscriptionIds: SubscriptionId[], + changes: DataGone[], + ack: () => void + ) => Promise } type EventEmitter = AsyncEventEmitter @@ -181,6 +192,8 @@ export class SatelliteClient implements Client { SatRpcResponse: (msg) => this.rpcClient.handleResponse(msg), SatRpcRequest: (msg) => this.handleRpcRequest(msg), SatOpLogAck: (msg) => void msg, // Server doesn't send that + SatUnsubsDataBegin: (msg) => this.handleUnsubsDataBegin(msg), + SatUnsubsDataEnd: (msg) => this.handleUnsubsDataEnd(msg), } satisfies HandlerMapping).map((e) => [getFullTypeName(e[0]), e[1]]) ) @@ -261,10 +274,13 @@ export class SatelliteClient implements Client { DEFAULT_ACK_PERIOD ), additionalData: [], + goneBatch: [], + receivingUnsubsBatch: false, unseenAdditionalDataRefs: new Set(), seenAdditionalDataSinceLastTx: { dataRefs: [], subscriptions: [], + gone: [], }, } } @@ -473,6 +489,16 @@ export class SatelliteClient implements Client { this.emitter.removeListener('relation', callback) } + subscribeToGoneBatch(callback: GoneBatchCallback) { + this.emitter.on('goneBatch', async (lsn, ids, changes, ack) => { + await callback(lsn, ids, changes) + ack() + }) + } + unsubscribeToGoneBatch(_callback: GoneBatchCallback) { + // TODO: real removeListener implementation, because the old one for txns doesn't work + } + enqueueTransaction(transaction: DataTransaction): void { if (this.outbound.isReplicating !== ReplicationStatus.ACTIVE) { throw new SatelliteError( @@ -911,6 +937,8 @@ export class SatelliteClient implements Client { private handleTransaction(message: SatOpLog) { if (!this.subscriptionsDataCache.isDelivering()) { this.processOpLogMessage(message) + } else if (this.inbound.receivingUnsubsBatch) { + this.processUnsubsDataMessage(message) } else { try { this.subscriptionsDataCache.transaction(message.ops) @@ -963,6 +991,39 @@ export class SatelliteClient implements Client { return {} } + private handleUnsubsDataBegin(msg: SatUnsubsDataBegin): void { + this.inbound.receivingUnsubsBatch = msg.subscriptionIds + this.inbound.last_lsn = msg.lsn + } + + private handleUnsubsDataEnd(_msg: SatUnsubsDataEnd): void { + if (!this.inbound.receivingUnsubsBatch) + throw new SatelliteError( + SatelliteErrorCode.PROTOCOL_VIOLATION, + 'Received a `SatUnsubsDataEnd` message but not the begin message' + ) + + // We need to copy the value here so that the callback we're building 8 lines down + // will make a closure over array value instead of over `this` and will use current + // value instead of whatever is the value of `this.inbound.receivingUnsubsBatch` in + // the future. + const subscriptionIds = [...this.inbound.receivingUnsubsBatch] + + this.emitter.enqueueEmit( + 'goneBatch', + this.inbound.last_lsn!, + subscriptionIds, + this.inbound.goneBatch, + () => { + this.inbound.seenAdditionalDataSinceLastTx.gone.push(...subscriptionIds) + this.maybeSendAck('additionalData') + } + ) + + this.inbound.receivingUnsubsBatch = [] + this.inbound.goneBatch = [] + } + private delayIncomingMessages( fn: () => Promise, opts: { allowedRpcResponses: Array } @@ -1020,6 +1081,29 @@ export class SatelliteClient implements Client { return rel } + private processUnsubsDataMessage(msg: SatOpLog): void { + msg.ops.forEach((op) => { + if (!op.gone) + throw new SatelliteError( + SatelliteErrorCode.PROTOCOL_VIOLATION, + 'Expected to see only GONE messages in unsubscription data' + ) + + const rel = this.getRelation(op.gone) + this.inbound.goneBatch.push({ + relation: rel, + type: DataChangeType.GONE, + oldRecord: deserializeRow( + op.gone.pkData!, + rel, + this.dbDescription, + this.decoder + ), + tags: [], + }) + }) + } + private processOpLogMessage(opLogMessage: SatOpLog): void { const replication = this.inbound opLogMessage.ops.map((op) => { @@ -1076,6 +1160,7 @@ export class SatelliteClient implements Client { this.inbound.seenAdditionalDataSinceLastTx = { dataRefs: [], subscriptions: [], + gone: [], } this.maybeSendAck() }) @@ -1277,6 +1362,7 @@ export class SatelliteClient implements Client { this.inbound.seenAdditionalDataSinceLastTx.subscriptions, additionalDataSourceIds: this.inbound.seenAdditionalDataSinceLastTx.dataRefs, + goneSubscriptionIds: this.inbound.seenAdditionalDataSinceLastTx.gone, } this.sendMessage(msg) diff --git a/clients/typescript/src/satellite/index.ts b/clients/typescript/src/satellite/index.ts index c2bba05969..b5e72ee0aa 100644 --- a/clients/typescript/src/satellite/index.ts +++ b/clients/typescript/src/satellite/index.ts @@ -22,6 +22,7 @@ import { AdditionalDataCallback, DbRecord, ReplicatedRowTransformer, + GoneBatchCallback, } from '../util/types' import { Shape, @@ -81,7 +82,7 @@ export interface Satellite { clientDisconnect(): void authenticate(token: string): Promise subscribe(shapeDefinitions: Shape[]): Promise - unsubscribe(shapeUuid: string): Promise + unsubscribe(shapeUuids: string[]): Promise setReplicationTransform( tableName: QualifiedTablename, @@ -110,6 +111,8 @@ export interface Client { unsubscribeToTransactions(callback: TransactionCallback): void subscribeToAdditionalData(callback: AdditionalDataCallback): void unsubscribeToAdditionalData(callback: AdditionalDataCallback): void + subscribeToGoneBatch(callback: GoneBatchCallback): void + unsubscribeToGoneBatch(callback: GoneBatchCallback): void enqueueTransaction(transaction: DataTransaction): void getLastSentLsn(): LSN subscribeToOutboundStarted(callback: OutboundStartedCallback): void diff --git a/clients/typescript/src/satellite/mock.ts b/clients/typescript/src/satellite/mock.ts index b2637dd929..8a6a508441 100644 --- a/clients/typescript/src/satellite/mock.ts +++ b/clients/typescript/src/satellite/mock.ts @@ -22,6 +22,9 @@ import { AdditionalDataCallback, ConnectivityState, ReplicatedRowTransformer, + GoneBatchCallback, + DataGone, + DataChangeType, } from '../util/types' import { ElectricConfig } from '../config/index' @@ -92,11 +95,12 @@ export class MockSatelliteProcess implements Satellite { subscribe(_shapeDefinitions: Shape[]): Promise { return Promise.resolve({ + id: 'test', synced: Promise.resolve(), }) } - unsubscribe(_shapeUuid: string): Promise { + unsubscribe(_shapeUuid: string[]): Promise { throw new Error('Method not implemented.') } @@ -187,6 +191,7 @@ type Events = { [SUBSCRIPTION_ERROR]: (error: SatelliteError, subscriptionId: string) => void outbound_started: OutboundStartedCallback error: (error: SatelliteError) => void + goneBatch: GoneBatchCallback } export class MockSatelliteClient extends AsyncEventEmitter @@ -211,6 +216,7 @@ export class MockSatelliteClient outboundStartedCallback?: OutboundStartedCallback relationData: Record = {} + goneBatches: Record = {} deliverFirst = false @@ -236,6 +242,18 @@ export class MockSatelliteClient data.push(record) } + setGoneBatch( + subscriptionId: string, + batch: { tablename: string; record: DataGone['oldRecord'] }[] + ): void { + this.goneBatches[subscriptionId] = batch.map((x) => ({ + type: DataChangeType.GONE, + tags: [], + relation: this.relations[x.tablename], + oldRecord: x.record, + })) + } + enableDeliverFirst() { this.deliverFirst = true } @@ -308,7 +326,24 @@ export class MockSatelliteClient }) } - unsubscribe(_subIds: string[]): Promise { + unsubscribe(subIds: string[]): Promise { + const gone: DataGone[] = [] + + for (const id of subIds) { + gone.push(...(this.goneBatches[id] ?? [])) + delete this.goneBatches[id] + } + + setTimeout( + () => + this.enqueueEmit( + 'goneBatch', + base64.toBytes(base64.encode('124')), + subIds, + gone + ), + 1 + ) return Promise.resolve({}) } @@ -328,6 +363,14 @@ export class MockSatelliteClient this.removeListener(SUBSCRIPTION_ERROR, errorCallback) } + subscribeToGoneBatch(callback: GoneBatchCallback): void { + this.on('goneBatch', callback) + } + + unsubscribeToGoneBatch(callback: GoneBatchCallback): void { + this.off('goneBatch', callback) + } + subscribeToError(cb: (error: SatelliteError) => void): void { this.on('error', cb) } diff --git a/clients/typescript/src/satellite/oplog.ts b/clients/typescript/src/satellite/oplog.ts index f9abfd7f6b..974bd0225e 100644 --- a/clients/typescript/src/satellite/oplog.ts +++ b/clients/typescript/src/satellite/oplog.ts @@ -389,28 +389,31 @@ function deserialiseRow(str: string, rel: Pick): Rec { }) } +export function extractPK(c: DataChange) { + const columnValues = c.record ? c.record : c.oldRecord! + + return primaryKeyToStr( + Object.fromEntries( + c.relation.columns + .filter((c) => c.primaryKey) + .map((col) => [col.name, columnValues[col.name]!]) + ) + ) +} + export const fromTransaction = ( transaction: DataTransaction, - relations: RelationsCache, + _relations: RelationsCache, namespace: string ): OplogEntry[] => { return transaction.changes.map((t) => { - const columnValues = t.record ? t.record : t.oldRecord! - const pk = primaryKeyToStr( - Object.fromEntries( - relations[`${t.relation.table}`].columns - .filter((c) => c.primaryKey) - .map((col) => [col.name, columnValues[col.name]!]) - ) - ) - return { namespace, tablename: t.relation.table, optype: stringToOpType(t.type), newRow: serialiseRow(t.record), oldRow: serialiseRow(t.oldRecord), - primaryKey: pk, + primaryKey: extractPK(t), rowid: -1, // not required timestamp: new Date( transaction.commit_timestamp.toNumber() diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index 3dd732c831..857d055974 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -39,6 +39,7 @@ import { Uuid, DbRecord as DataRecord, ReplicatedRowTransformer, + DataGone, ServerTransaction, } from '../util/types' import { SatelliteOpts } from './config' @@ -49,6 +50,7 @@ import { ShadowEntry, ShadowEntryChanges, encodeTags, + extractPK, fromTransaction, generateTag, getShadowPrimaryKey, @@ -78,12 +80,14 @@ import { decodeUserIdFromToken } from '../auth/secure' import { InvalidArgumentError } from '../client/validation/errors/invalidArgumentError' import Long from 'long' import { QueryBuilder } from '../migrators/query-builder' +import groupBy from 'lodash.groupby' type ChangeAccumulator = { [key: string]: Change } export type ShapeSubscription = { + id: string synced: Promise } @@ -98,6 +102,7 @@ type MetaEntries = { lsn: string | null subscriptions: string seenAdditionalData: string + seenGoneBatch: string } type ConnectRetryHandler = (error: Error, attempt: number) => boolean @@ -344,6 +349,9 @@ export class SatelliteProcess implements Satellite { this._handleClientOutboundStarted.bind(this) this.client.subscribeToOutboundStarted(clientOutboundStartedCallback) + const clientGoneBatchCallback = this._applyGoneBatch.bind(this) + this.client.subscribeToGoneBatch(clientGoneBatchCallback) + const clientSubscriptionDataCallback = this._handleSubscriptionData.bind(this) const clientSubscriptionErrorCallback = @@ -365,6 +373,7 @@ export class SatelliteProcess implements Satellite { clientSubscriptionDataCallback, clientSubscriptionErrorCallback ) + this.client.unsubscribeToGoneBatch(clientGoneBatchCallback) } } @@ -426,6 +435,7 @@ export class SatelliteProcess implements Satellite { this.subscriptions.getDuplicatingSubscription(shapeDefinitions) if (existingSubscription !== null && 'inFlight' in existingSubscription) { return { + id: existingSubscription['inFlight'], synced: this.subscriptionNotifiers[existingSubscription.inFlight].promise, } @@ -433,7 +443,10 @@ export class SatelliteProcess implements Satellite { existingSubscription !== null && 'fulfilled' in existingSubscription ) { - return { synced: Promise.resolve() } + return { + id: existingSubscription['fulfilled'], + synced: Promise.resolve(), + } } // If no exact match found, we try to establish the subscription @@ -483,6 +496,7 @@ export class SatelliteProcess implements Satellite { } return { + id: subId, synced: subProm, } } catch (error: any) { @@ -490,12 +504,14 @@ export class SatelliteProcess implements Satellite { } } - async unsubscribe(_subscriptionId: string): Promise { - throw new SatelliteError( - SatelliteErrorCode.INTERNAL, - 'unsubscribe shape not supported' + async unsubscribe(subscriptionIds: string[]): Promise { + await this.client.unsubscribe(subscriptionIds) + + // If the server didn't send an error, we persist the fact the subscription was deleted. + this.subscriptions.unsubscribe(subscriptionIds) + await this.adapter.run( + this._setMetaStatement('subscriptions', this.subscriptions.serialize()) ) - // return this.subscriptions.unsubscribe(subscriptionId) } async _handleSubscriptionData(subsData: SubscriptionData): Promise { @@ -689,7 +705,7 @@ export class SatelliteProcess implements Satellite { // TODO: this is obviously too conservative // we should also work on updating subscriptions // atomically on unsubscribe() - await this.subscriptions.unsubscribeAll() + await this.subscriptions.unsubscribeAllAndGC() await this.adapter.runInTransaction( this._setMetaStatement('lsn', null), @@ -1366,7 +1382,7 @@ export class SatelliteProcess implements Satellite { // update lsn. stmts.push(this.updateLsnStmt(lsn)) - stmts.push(this._resetSeenAdditionalDataStmt()) + stmts.push(this._resetAllSeenStmt()) const processDML = async (changes: DataChange[]) => { const tx = { @@ -1482,6 +1498,67 @@ export class SatelliteProcess implements Satellite { ]) } + async _applyGoneBatch( + lsn: LSN, + _subscriptionIds: string[], + allGone: DataGone[] + ) { + const fakeOplogEntries: OplogEntry[] = allGone.map( + (x): OplogEntry => ({ + namespace: this.builder.defaultNamespace, + tablename: x.relation.table, + primaryKey: extractPK(x), + optype: 'GONE', + // Fields below don't matter here. + rowid: -1, + timestamp: '', + clearTags: '', + }) + ) + + // Batch-delete shadow entries + const stmts = this.builder.prepareDeleteBatchedStatements( + `DELETE FROM ${this.opts.shadowTable} WHERE `, + ['namespace', 'tablename', 'primaryKey'], + fakeOplogEntries, + this.maxSqlParameters + ) + + const groupedChanges = groupBy(allGone, (x) => x.relation.table) + const affectedTables = Object.keys(groupedChanges).map((x) => + this.builder.makeQT(x) + ) + + // Batch-delete affected rows per table + for (const [table, gone] of Object.entries(groupedChanges)) { + if (gone.length === 0) continue + + const fqtn = this.builder.makeQT(table) + const pkCols = gone[0].relation.columns + .filter((x) => x.primaryKey) + .map((x) => x.name) + + stmts.push( + ...this.builder.prepareDeleteBatchedStatements( + `DELETE FROM ${fqtn} WHERE`, + pkCols, + gone.map((x) => x.oldRecord) as Record[], + this.maxSqlParameters + ) + ) + } + + await this.adapter.runInTransaction( + this.updateLsnStmt(lsn), + { sql: this.builder.deferOrDisableFKsForTx }, + ...this._disableTriggers(affectedTables), + ...stmts, + ...this._enableTriggers(affectedTables) + ) + + await this._notifyChanges(fakeOplogEntries, 'remote') + } + private async maybeGarbageCollect( origin: string, commitTimestamp: Date @@ -1532,6 +1609,17 @@ export class SatelliteProcess implements Satellite { ] } + _addSeenGoneBatchStmt(subscriptionIds: string[]): Statement { + const meta = `${this.opts.metaTable}` + + return { + sql: `INSERT INTO ${meta} VALUES ('seenGoneBatch', ${this.builder.makePositionalParam( + 1 + )} ON CONFLICT (key) DO UPDATE SET value = ${meta}.value || ',' || excluded.value`, + args: [subscriptionIds.join(',')], + } + } + _addSeenAdditionalDataStmt(ref: string): Statement { const meta = `${this.opts.metaTable}` const sql = ` @@ -1545,8 +1633,15 @@ export class SatelliteProcess implements Satellite { return { sql, args } } - _resetSeenAdditionalDataStmt(): Statement { - return this._setMetaStatement('seenAdditionalData', '') + _resetAllSeenStmt( + keys: (keyof MetaEntries)[] = ['seenAdditionalData', 'seenGoneBatch'] + ): Statement { + const whereClause = keys + .map((_, i) => `key = ${this.builder.makePositionalParam(i + 1)}`) + .join(' OR ') + const sql = `UPDATE ${this.opts.metaTable} SET VALUE = '' WHERE ${whereClause}` + + return { sql, args: keys } } _setMetaStatement( diff --git a/clients/typescript/src/satellite/shapes/index.ts b/clients/typescript/src/satellite/shapes/index.ts index c587f97560..7396351a94 100644 --- a/clients/typescript/src/satellite/shapes/index.ts +++ b/clients/typescript/src/satellite/shapes/index.ts @@ -59,17 +59,20 @@ export interface SubscriptionsManager { * Deletes the subscription(s) from the manager. * @param subId the identifier of the subscription or an array of subscription identifiers */ - unsubscribe( - subId: SubscriptionId | SubscriptionId[] - ): Promise + unsubscribe(subId: SubscriptionId[]): void + + /** + * Delete the subscriptions from the manager and call a GC function + */ + unsubscribeAndGC(subIds: SubscriptionId[]): Promise /** * Deletes all subscriptions from the manager. Useful to - * reset the state of the manager. + * reset the state of the manager. Calls the configured GC. * Returns the subscription identifiers of all subscriptions * that were deleted. */ - unsubscribeAll(): Promise + unsubscribeAllAndGC(): Promise /** * Converts the state of the manager to a string format that diff --git a/clients/typescript/src/satellite/shapes/manager.ts b/clients/typescript/src/satellite/shapes/manager.ts index 494207a798..6ecc48b188 100644 --- a/clients/typescript/src/satellite/shapes/manager.ts +++ b/clients/typescript/src/satellite/shapes/manager.ts @@ -117,15 +117,13 @@ export class InMemorySubscriptionsManager subs.forEach((sub: SubscriptionId) => this._gcSubscription(sub)) } - /** - * Unsubscribes from one or more subscriptions. - * @param subId A subscription ID or an array of subscription IDs. - */ - async unsubscribe( - subIds: SubscriptionId | SubscriptionId[] - ): Promise { - const ids = Array.isArray(subIds) ? subIds : [subIds] - const shapes: ShapeDefinition[] = ids.flatMap( + unsubscribe(subIds: SubscriptionId[]): void { + // remove all subscriptions from memory + this._gcSubscriptions(subIds) + } + + async unsubscribeAndGC(subIds: SubscriptionId[]): Promise { + const shapes: ShapeDefinition[] = subIds.flatMap( (id) => this.shapesForActiveSubscription(id) ?? [] ) @@ -134,13 +132,13 @@ export class InMemorySubscriptionsManager await this.gcHandler(shapes) } // also remove all subscriptions from memory - this._gcSubscriptions(ids) - return ids + this.unsubscribe(subIds) } - unsubscribeAll(): Promise { + async unsubscribeAllAndGC(): Promise { const ids = Object.keys(this.fulfilledSubscriptions) - return this.unsubscribe(ids) + await this.unsubscribeAndGC(ids) + return ids } serialize(): string { diff --git a/clients/typescript/src/util/proto.ts b/clients/typescript/src/util/proto.ts index 777a78ddf6..2ca29339c9 100644 --- a/clients/typescript/src/util/proto.ts +++ b/clients/typescript/src/util/proto.ts @@ -127,6 +127,8 @@ const msgtypetuples: MappingTuples = { SatRpcRequest: [21, Pb.SatRpcRequest], SatRpcResponse: [22, Pb.SatRpcResponse], SatOpLogAck: [23, Pb.SatOpLogAck], + SatUnsubsDataBegin: [24, Pb.SatUnsubsDataBegin], + SatUnsubsDataEnd: [25, Pb.SatUnsubsDataEnd], } const msgtypemapping = Object.fromEntries( @@ -178,6 +180,8 @@ export type SatPbMsg = | Pb.SatShapeDataBegin | Pb.SatShapeDataEnd | Pb.SatOpLogAck + | Pb.SatUnsubsDataBegin + | Pb.SatUnsubsDataEnd export type SatPbMsgObj< Msg extends { $type: string }, @@ -414,6 +418,12 @@ export function msgToString(message: MessageOfInterest): string { return `#SatOpLogAck{lsn: ${base64.fromBytes(message.lsn)}, txid: ${ message.transactionId }}` + case 'Electric.Satellite.SatUnsubsDataBegin': + return `#SatSatUnsubsDataBegin{lsn: ${base64.fromBytes(message.lsn)}, ${ + message.subscriptionIds + }}` + case 'Electric.Satellite.SatUnsubsDataEnd': + return `#SatSatUnsubsDataEnd{}` } } diff --git a/clients/typescript/src/util/types.ts b/clients/typescript/src/util/types.ts index ad0c3b5da8..c2c7b86963 100644 --- a/clients/typescript/src/util/types.ts +++ b/clients/typescript/src/util/types.ts @@ -154,6 +154,13 @@ export type DataChange = { tags: Tag[] } +export type DataGone = { + relation: Relation + type: DataChangeType.GONE + oldRecord: DbRecord + tags: [] +} + export type DataInsert = { relation: Relation type: DataChangeType.INSERT @@ -202,9 +209,12 @@ export interface InboundReplication extends Replication { additionalData: AdditionalData[] unseenAdditionalDataRefs: Set incomplete?: 'transaction' | 'additionalData' + goneBatch: DataGone[] + receivingUnsubsBatch: false | string[] seenAdditionalDataSinceLastTx: { subscriptions: string[] dataRefs: Long[] + gone: string[] } } @@ -250,6 +260,11 @@ export type IncomingTransactionCallback = ( AckCb: () => void ) => void export type OutboundStartedCallback = () => void +export type GoneBatchCallback = ( + lsn: LSN, + subscriptionIds: string[], + changes: DataGone[] +) => void | Promise export type ConnectivityStatus = 'connected' | 'disconnected' export type ConnectivityState = { diff --git a/clients/typescript/test/satellite/process.ts b/clients/typescript/test/satellite/process.ts index 22179b1e5c..92713a5965 100644 --- a/clients/typescript/test/satellite/process.ts +++ b/clients/typescript/test/satellite/process.ts @@ -49,7 +49,10 @@ import { Shape, SubscriptionData } from '../../src/satellite/shapes/types' import { mergeEntries } from '../../src/satellite/merge' import { MockSubscriptionsManager } from '../../src/satellite/shapes/manager' import { AuthState, insecureAuthToken } from '../../src/auth' -import { ConnectivityStateChangeNotification } from '../../src/notifiers' +import { + ChangeCallback, + ConnectivityStateChangeNotification, +} from '../../src/notifiers' import { QueryBuilder } from '../../src/migrators/query-builder' import { SatelliteOpts } from '../../src/satellite/config' @@ -1929,6 +1932,47 @@ export const processTests = (test: TestFn) => { t.deepEqual(results, []) }) + test('GONE batch is applied as DELETEs', async (t) => { + const { client, satellite, adapter } = t.context + const { runMigrations, authState, token } = t.context + await runMigrations() + const tablename = 'parent' + + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData(tablename, parentRecord) + client.setRelationData(tablename, { ...parentRecord, id: 2 }) + + await startSatellite(satellite, authState, token) + + satellite!.relations = relations + const { synced, id } = await satellite.subscribe([{ tablename }]) + await synced + await satellite._performSnapshot() + + const promise = new Promise((r: ChangeCallback) => { + satellite.notifier.subscribeToDataChanges(r) + }) + client.setGoneBatch(id, [ + { tablename, record: { id: 1 } }, + { tablename, record: { id: 2 } }, + ]) + // Send additional data + await satellite.unsubscribe([id]) + + const change = await promise + t.is(change.changes.length, 1) + t.deepEqual(change.changes[0].recordChanges, [ + { primaryKey: { id: 1 }, type: 'GONE' }, + { primaryKey: { id: 2 }, type: 'GONE' }, + ]) + + const results = await adapter.query({ + sql: 'SELECT * FROM parent', + }) + t.deepEqual(results, []) + }) + test('a subscription that failed to apply because of FK constraint triggers GC', async (t) => { const { client, @@ -2270,7 +2314,7 @@ export const processTests = (test: TestFn) => { throw e } - await subsManager.unsubscribeAll() + await subsManager.unsubscribeAllAndGC() // if we reach here, the FKs were not violated // Check that everything was deleted diff --git a/clients/typescript/test/util/subscriptions.test.ts b/clients/typescript/test/util/subscriptions.test.ts index 84f1a8231e..0b9c1dd59d 100644 --- a/clients/typescript/test/util/subscriptions.test.ts +++ b/clients/typescript/test/util/subscriptions.test.ts @@ -78,7 +78,7 @@ test('some tests', (t) => { manager.subscriptionDelivered(subscriptionData) // not active after unsubscribe - manager.unsubscribe(subscriptionId) + manager.unsubscribe([subscriptionId]) t.is(manager.shapesForActiveSubscription(subscriptionId), undefined) // able to subscribe again after unsubscribe diff --git a/components/electric/lib/electric/postgres/extension.ex b/components/electric/lib/electric/postgres/extension.ex index 5c6774bd2d..5a5fad22fa 100644 --- a/components/electric/lib/electric/postgres/extension.ex +++ b/components/electric/lib/electric/postgres/extension.ex @@ -31,6 +31,7 @@ defmodule Electric.Postgres.Extension do @client_shape_subscriptions_relation "client_shape_subscriptions" @client_checkpoints_relation "client_checkpoints" @client_additional_data_relation "client_additional_data" + @client_unsub_points_relation "client_unsub_points" @grants_relation "grants" @roles_relation "roles" @@ -49,6 +50,7 @@ defmodule Electric.Postgres.Extension do @client_shape_subscriptions_table electric.(@client_shape_subscriptions_relation) @client_checkpoints_table electric.(@client_checkpoints_relation) @client_additional_data_table electric.(@client_additional_data_relation) + @client_unsub_points_table electric.(@client_unsub_points_relation) @grants_table electric.(@grants_relation) @roles_table electric.(@roles_relation) @@ -123,6 +125,7 @@ defmodule Electric.Postgres.Extension do def client_shape_subscriptions_table, do: @client_shape_subscriptions_table def client_checkpoints_table, do: @client_checkpoints_table def client_additional_data_table, do: @client_additional_data_table + def client_unsub_points_table, do: @client_unsub_points_table def grants_table, do: @grants_table def roles_table, do: @roles_table @@ -379,7 +382,8 @@ defmodule Electric.Postgres.Extension do Migrations.Migration_20240110110200_DropUnusedFunctions, Migrations.Migration_20240205141200_ReinstallTriggerFunctionWriteCorrectMaxTag, Migrations.Migration_20240213160300_DropGenerateElectrifiedSqlFunction, - Migrations.Migration_20240417131000_ClientReconnectionInfoTables + Migrations.Migration_20240417131000_ClientReconnectionInfoTables, + Migrations.Migration_20240501000000_UnsubPoints ] end diff --git a/components/electric/lib/electric/postgres/extension/migrations/20240501000000_client_reconnection_unsub_points.ex b/components/electric/lib/electric/postgres/extension/migrations/20240501000000_client_reconnection_unsub_points.ex new file mode 100644 index 0000000000..2bcd57c7f2 --- /dev/null +++ b/components/electric/lib/electric/postgres/extension/migrations/20240501000000_client_reconnection_unsub_points.ex @@ -0,0 +1,25 @@ +defmodule Electric.Postgres.Extension.Migrations.Migration_20240501000000_UnsubPoints do + alias Electric.Postgres.Extension + + @behaviour Extension.Migration + + @impl true + def version, do: 2024_05_01_00_00_00 + + @impl true + def up(_schema) do + [ + """ + CREATE TABLE #{Extension.client_unsub_points_table()} ( + client_id VARCHAR(64) NOT NULL, + subscription_id UUID NOT NULL, + wal_pos BIGINT NOT NULL, + PRIMARY KEY (client_id, subscription_id) + ) + """ + ] + end + + @impl true + def down(_), do: [] +end diff --git a/components/electric/lib/electric/replication/initial_sync.ex b/components/electric/lib/electric/replication/initial_sync.ex index d5779d4ad2..d9a59a1fa8 100644 --- a/components/electric/lib/electric/replication/initial_sync.ex +++ b/components/electric/lib/electric/replication/initial_sync.ex @@ -139,8 +139,9 @@ defmodule Electric.Replication.InitialSync do {:ok, schema_version} = Extension.SchemaCache.load(origin) run_in_readonly_txn_with_checkpoint(opts, {ref, parent}, marker, fn conn, xmin -> - Enum.reduce_while(subquery_map, {Graph.new(), %{}}, fn {layer, changes}, - {acc_graph, results} -> + Enum.reduce_while(subquery_map, {MapSet.new(), Graph.new(), %{}}, fn {layer, changes}, + {req_ids, acc_graph, + results} -> case Shapes.ShapeRequest.query_moved_in_layer_data( conn, layer, @@ -151,7 +152,7 @@ defmodule Electric.Replication.InitialSync do ) do {:ok, _, data, graph} -> {:cont, - {Utils.merge_graph_edges(acc_graph, graph), + {MapSet.put(req_ids, layer.request_id), Utils.merge_graph_edges(acc_graph, graph), Map.merge(results, data, fn _, {change, v1}, {_, v2} -> {change, v1 ++ v2} end)}} {:error, reason} -> diff --git a/components/electric/lib/electric/replication/shapes/querying.ex b/components/electric/lib/electric/replication/shapes/querying.ex index 1eeb90d43a..413366fb7e 100644 --- a/components/electric/lib/electric/replication/shapes/querying.ex +++ b/components/electric/lib/electric/replication/shapes/querying.ex @@ -12,10 +12,13 @@ defmodule Electric.Replication.Shapes.Querying do alias Electric.Replication.Eval alias Electric.Replication.Shapes.ChangeProcessing alias Electric.Replication.Shapes.ShapeRequest.Layer + alias Electric.Replication.Shapes.SentRowsGraph alias Electric.Utils - @type results :: %{Layer.graph_key() => {Changes.change(), [String.t(), ...]}} + @type results :: %{ + SentRowsGraph.row_id() => {Changes.change(), request_ids :: [String.t(), ...]} + } @doc """ Query PostgreSQL for data which corresponds to this layer. @@ -271,7 +274,7 @@ defmodule Electric.Replication.Shapes.Querying do do: Enum.any?(key, &is_nil(Map.fetch!(record, &1))) @spec rows_to_changes_with_tags([tuple()], [String.t(), ...], Layer.t(), String.t()) :: - [{Layer.graph_key(), Changes.NewRecord.t()}] + [{SentRowsGraph.row_id(), Changes.NewRecord.t()}] defp rows_to_changes_with_tags( rows, col_names, diff --git a/components/electric/lib/electric/replication/shapes/sent_rows_graph.ex b/components/electric/lib/electric/replication/shapes/sent_rows_graph.ex new file mode 100644 index 0000000000..766125b839 --- /dev/null +++ b/components/electric/lib/electric/replication/shapes/sent_rows_graph.ex @@ -0,0 +1,183 @@ +defmodule Electric.Replication.Shapes.SentRowsGraph do + @moduledoc """ + Module responsible for operations over the sent rows graph. + + We're keeping track of sent rows in a graph that has all references between rows, and + why exactly they were sent. + + Graph nodes are row identifiers: pairs of a row relation (schema name and table name) + and row primary key (a list in the order of the PK columns serialized as strings). + + Graph edges are more interesting: they are following the directions of shape requests + over both one-to-many and many-to-one foreign keys on rows. When a row is added to the + graph, it's linked to the "parent" that causes this row to be present in one of the shapes + (or a special `:root` node for first-layer rows), and the edge between the "parent" and + the new row is labeled with the unique key of the layer that allowed this to happen. A row + can be linked to the same parent with multiple edges, each labeled with a different key + if they are parts of multiple shapes. + + Graph edge key must be a 2-tuple where the first element is a "request id" - a unique ID + of a requested shape within a connection and the second element is a deterministic value + identifying the layer that caused this edge to exist. + """ + alias Electric.Replication.Shapes.ShapeRequest + + @type row_id :: {ShapeRequest.relation(), [String.t(), ...]} + + @doc """ + Remove all edges from the sent rows graph that were added because of a given request id, + returning deleted vertices that have no more incoming edges. + + ## Examples + + iex> {popped_vertices, new_graph} = + ...> Graph.new() + ...> |> Graph.add_edge(:root, :v1, label: {"r1", "l1"}) + ...> |> Graph.add_edge(:root, :v1, label: {"r2", "l1"}) + ...> |> Graph.add_edge(:v1, :v2, label: {"r1", "l2"}) + ...> |> pop_by_request_ids("r1") + iex> popped_vertices + [:v2] + iex> new_graph + #Graph :v1]> + + iex> {popped_vertices, new_graph} = + ...> Graph.new() + ...> |> Graph.add_edge(:root, :v1, label: {"r1", "l1"}) + ...> |> Graph.add_edge(:root, :v1, label: {"r2", "l1"}) + ...> |> Graph.add_edge(:v1, :v2, label: {"r1", "l2"}) + ...> |> pop_by_request_ids(["r1", "r2"]) + iex> popped_vertices + [:v2, :v1] + iex> new_graph + #Graph + + iex> {popped_vertices, new_graph} = + ...> Graph.new() + ...> |> pop_by_request_ids(["r1", "r2"]) + iex> popped_vertices + [] + iex> new_graph + #Graph + """ + @spec pop_by_request_ids(Graph.t(), String.t() | [String.t()] | MapSet.t(String.t()), keyword()) :: + {[row_id()], Graph.t()} + def pop_by_request_ids(graph, request_id_or_ids, opts \\ []) + + def pop_by_request_ids(graph, [], _), do: {[], graph} + + def pop_by_request_ids(graph, id_or_ids, opts) when is_binary(id_or_ids) or is_list(id_or_ids), + do: pop_by_request_ids(graph, MapSet.new(List.wrap(id_or_ids)), opts) + + def pop_by_request_ids(graph, %MapSet{} = request_ids, opts) do + root_vertex = Keyword.get(opts, :root_vertex, :root) + + if Graph.has_vertex?(graph, root_vertex), + do: do_pop_by_request_id(graph, request_ids, root_vertex), + else: {[], graph} + end + + defp do_pop_by_request_id(%Graph{} = graph, %MapSet{} = request_ids, root_vertex) do + predicate = fn {id, _} -> MapSet.member?(request_ids, id) end + + {edges, vertices} = + dfs_traverse( + [Graph.Utils.vertex_id(root_vertex)], + graph, + {[], []}, + fn + ^root_vertex, _, acc -> + {:next, acc} + + v, incoming_edges, {edges, vertices} -> + incoming_edges + |> Enum.flat_map(fn {source_v, meta} -> + Enum.map(Map.keys(meta), &{source_v, v, &1}) + end) + |> Enum.split_with(&predicate.(elem(&1, 2))) + |> case do + {_all_edges, []} -> + # If all incoming edges match the request ID, we'll pop the vertex + {:next, {edges, [v | vertices]}} + + {new_edges, _rest} -> + # If some incoming edges are unaffected, we'll pop the edges explicitly + {:next, {new_edges ++ edges, vertices}} + end + end, + fn meta -> any_key_matches_predicate?(meta, predicate) end + ) + + graph = + edges + |> Enum.reduce(graph, fn {v1, v2, label}, acc -> Graph.delete_edge(acc, v1, v2, label) end) + |> Graph.delete_vertices(vertices) + + {vertices, graph} + end + + defp any_key_matches_predicate?(map, predicate) when is_map(map), + do: any_key_matches_predicate?(:maps.iterator(map), predicate) + + defp any_key_matches_predicate?(iter, predicate) do + case :maps.next(iter) do + {k, _, iter} -> + if predicate.(k), do: true, else: any_key_matches_predicate?(iter, predicate) + + :none -> + false + end + end + + @doc false + def dfs_traverse( + vertices, + graph, + acc, + fun, + edge_predicate_fun, + visited \\ MapSet.new() + ) + + def dfs_traverse( + [v_id | rest], + %Graph{out_edges: oe, in_edges: ie, vertices: vs, edges: e} = g, + acc, + fun, + edge_predicate_fun, + visited + ) + when is_function(fun, 3) and is_function(edge_predicate_fun, 1) do + if MapSet.member?(visited, v_id) do + dfs_traverse(rest, g, visited, fun, acc) + else + v = Map.get(vs, v_id) + in_edges = Enum.map(Map.get(ie, v_id, []), &{Map.fetch!(vs, &1), Map.fetch!(e, {&1, v_id})}) + + case fun.(v, in_edges, acc) do + {:next, acc2} -> + visited = MapSet.put(visited, v_id) + + out = + oe + |> Map.get(v_id, MapSet.new()) + |> Enum.filter(&edge_predicate_fun.(Map.fetch!(e, {v_id, &1}))) + |> Enum.sort_by(fn id -> Graph.Utils.edge_weight(g, v_id, id) end) + + dfs_traverse(out ++ rest, g, acc2, fun, edge_predicate_fun, visited) + + {:skip, acc2} -> + # Skip this vertex and it's out-neighbors + visited = MapSet.put(visited, v_id) + dfs_traverse(rest, g, acc2, fun, edge_predicate_fun, visited) + + {:halt, acc2} -> + acc2 + end + end + end + + def dfs_traverse([], _g, acc, _, _, _) do + acc + end +end diff --git a/components/electric/lib/electric/replication/shapes/shape_request.ex b/components/electric/lib/electric/replication/shapes/shape_request.ex index 4d1ac1ea6e..2932188769 100644 --- a/components/electric/lib/electric/replication/shapes/shape_request.ex +++ b/components/electric/lib/electric/replication/shapes/shape_request.ex @@ -143,9 +143,14 @@ defmodule Electric.Replication.Shapes.ShapeRequest do origin, context ) do - # We're converting these records to a list of keys to query next layers on - curr_records = - Enum.map(moved_in_records, fn {id, record} -> {id, %Changes.NewRecord{record: record}} end) + # We're converting these records to a list of keys to query next layers on and building a fake-rooted graph. + # We're "rooting" the top layer of records so that `SentRowsGraph` functions know where to start traversal. + # It's important to get of that fake root later. + {curr_records, graph} = + Enum.map_reduce(moved_in_records, Graph.new(), fn {id, record}, acc -> + {{id, %Changes.NewRecord{record: record}}, + Graph.add_edge(acc, :fake_root, id, label: layer.key)} + end) # We only need to follow one-to-many relations here from the already-fetched rows filtered_layer = %Layer{ @@ -159,7 +164,8 @@ defmodule Electric.Replication.Shapes.ShapeRequest do schema_version, origin, context, - curr_records + curr_records, + graph ) end end diff --git a/components/electric/lib/electric/satellite/client_reconnection_info.ex b/components/electric/lib/electric/satellite/client_reconnection_info.ex index 043716c396..275c530247 100644 --- a/components/electric/lib/electric/satellite/client_reconnection_info.ex +++ b/components/electric/lib/electric/satellite/client_reconnection_info.ex @@ -154,13 +154,15 @@ defmodule Electric.Satellite.ClientReconnectionInfo do client_checkpoints_table: 0, client_shape_subscriptions_table: 0, client_additional_data_table: 0, - client_actions_table: 0 + client_actions_table: 0, + client_unsub_points_table: 0 ] alias Electric.Postgres.CachedWal alias Electric.Postgres.Repo.Client alias Electric.Replication.Connectors alias Electric.Replication.Changes.Transaction + alias Electric.Replication.Shapes.SentRowsGraph alias Electric.Replication.Shapes alias Electric.Replication.Shapes.ShapeRequest alias Electric.Utils @@ -198,6 +200,10 @@ defmodule Electric.Satellite.ClientReconnectionInfo do source_txns :: [non_neg_integer(), ...]} @type additional_data_row :: additional_data_sub_row() | additional_data_txn_row() + @unsubscribe_points_ets :unsubscribe_points + @type unsubscribe_points_row :: + {{client_id(), sub_id :: String.t(), wal_pos :: non_neg_integer()}} + def start_link(connector_config) do origin = Connectors.origin(connector_config) GenServer.start_link(__MODULE__, connector_config, name: name(origin)) @@ -225,7 +231,8 @@ defmodule Electric.Satellite.ClientReconnectionInfo do client_shape_subscriptions_table(), client_checkpoints_table(), client_actions_table(), - client_additional_data_table() + client_additional_data_table(), + client_unsub_points_table() ], &Client.query!("DELETE FROM #{&1} WHERE client_id = $1", [client_id]) ) @@ -234,58 +241,44 @@ defmodule Electric.Satellite.ClientReconnectionInfo do :ok end - defp clear_all_ets_data(client_id) do + @doc false + def clear_all_ets_data(client_id) do :ets.match_delete(@actions_ets, {{client_id, :_}, :_}) :ets.match_delete(@subscriptions_ets, {{client_id, :_}, :_, :_, :_}) :ets.match_delete(@additional_data_ets, {{client_id, :_, :_, :_, :_}, :_, :_}) + :ets.match_delete(@unsubscribe_points_ets, {{client_id, :_, :_}}) :ets.delete(@checkpoint_ets, client_id) end - def fetch_subscriptions(client_id, subscription_ids) - when is_list(subscription_ids) and subscription_ids != [] do - :ets.select(@subscriptions_ets, [ - {{{client_id, :"$1"}, :_, :"$2", :_}, [ids_ms_guard(subscription_ids)], [{{:"$1", :"$2"}}]} - ]) - end + @doc """ + List all subscriptions that can be continued by a client. + """ + def fetch_subscriptions(client_id, ids, lsn, unsub_ids) when is_list(ids) and ids != [] do + subscription_ets_ms = Enum.map(ids, &{{{client_id, &1}, :_, :"$1", :_}, [], [{{&1, :"$1"}}]}) + results = :ets.select(@subscriptions_ets, subscription_ets_ms) - def delete_subscriptions(origin, client_id, subscription_ids) - when is_list(subscription_ids) and subscription_ids != [] do - ms_guard = ids_ms_guard(subscription_ids) + seen_unsub_batches = + MapSet.new(observed_unsub_points(client_id, lsn, unsub_ids), &elem(&1, 0)) - :ets.select_delete(@subscriptions_ets, [ - {{{client_id, :"$1"}, :_, :_, :_}, [ms_guard], [true]} - ]) + Enum.reject(results, fn {id, _} -> MapSet.member?(seen_unsub_batches, id) end) + end - :ets.select_delete(@additional_data_ets, [ - {{{client_id, :_, :_, :subscription, :"$1"}, :_, :_}, [ms_guard], [true]} - ]) + defp delete_subscriptions(client_id, ids) when is_list(ids) and ids != [] do + subscription_ets_ms = Enum.map(ids, &{{{client_id, &1}, :_, :_, :_}, [], [true]}) + :ets.select_delete(@subscriptions_ets, subscription_ets_ms) - Client.pooled_transaction(origin, fn -> - subs_uuids = Enum.map(subscription_ids, &encode_uuid/1) + additional_data_ms = + Enum.map(ids, &{{{client_id, :_, :_, :subscription, &1}, :_, :_}, [], [true]}) - Enum.each( - [client_shape_subscriptions_table(), client_additional_data_table()], - &Client.query!("DELETE FROM #{&1} WHERE client_id = $1 AND subscription_id = ANY($2)", [ - client_id, - subs_uuids - ]) - ) - end) + :ets.select_delete(@additional_data_ets, additional_data_ms) - :ok - rescue - exception -> - # Clear in-memory cache for the client to force its reloading from the database when the - # client reconnects. - clear_all_ets_data(client_id) - reraise exception, __STACKTRACE__ - end - - defp ids_ms_guard([id]), do: {:"=:=", :"$1", id} + unsubscribe_points_ms = Enum.map(ids, &{{{client_id, &1, :_}}, [], [true]}) + :ets.select_delete(@unsubscribe_points_ets, unsubscribe_points_ms) - defp ids_ms_guard([_id1, _id2 | _] = ids) do - id_matches = Enum.map(ids, &{:"=:=", :"$1", &1}) - List.to_tuple([:or | id_matches]) + %{} + |> merge_discarded({@subscriptions_ets, {client_id, {:sub_ids, ids}}}) + |> merge_discarded({@additional_data_ets, {client_id, {:sub_ids, ids}}}) + |> merge_discarded({@unsubscribe_points_ets, {client_id, {:sub_ids, ids}}}) end @doc """ @@ -350,6 +343,7 @@ defmodule Electric.Satellite.ClientReconnectionInfo do new_wal_pos = Keyword.fetch!(opts, :ack_point) txids = Keyword.fetch!(opts, :including_data) subscription_ids = Keyword.fetch!(opts, :including_subscriptions) + unsub_id_list = Keyword.fetch!(opts, :including_unsubscribes) cached_wal_impl = Keyword.get(opts, :cached_wal_impl, CachedWal.EtsBacked) origin = Keyword.fetch!(opts, :origin) advance_graph_fn = Keyword.fetch!(opts, :advance_graph_using) @@ -368,7 +362,14 @@ defmodule Electric.Satellite.ClientReconnectionInfo do ) {new_graph, pending_actions, count, discarded_acc} = - advance_up_to_new_wal_pos(graph, advance_graph_fn, client_id, txn_stream) + advance_up_to_new_wal_pos( + graph, + advance_graph_fn, + client_id, + new_wal_pos, + unsub_id_list, + txn_stream + ) Logger.debug( "Advancing graph for #{inspect(client_id)} from #{inspect(acked_wal_pos)} to #{inspect(new_wal_pos)} by #{count} txns" @@ -387,7 +388,7 @@ defmodule Electric.Satellite.ClientReconnectionInfo do store_client_actions(pending_actions) if opts[:purge_additional_data] do - purge_additional_data_for_client(client_id) + purge_additional_data_for_client(client_id, subscription_ids) end end) @@ -424,8 +425,9 @@ defmodule Electric.Satellite.ClientReconnectionInfo do has the same effect as if PG is overloaded and just took a lot of time to process our readonly querying. """ - @spec advance_on_reconnection!(any(), any()) :: - {:ok, Graph.t(), Shapes.action_context()} | {:error, term()} + @spec advance_on_reconnection!(binary(), Keyword.t()) :: + {:ok, Graph.t(), Shapes.action_context(), {[String.t()], [term()]} | nil} + | {:error, term()} def advance_on_reconnection!(client_id, opts) do # We need to remove all additional data "in the future", but # execute actions that were seen but not fulfilled that way. @@ -444,23 +446,92 @@ defmodule Electric.Satellite.ClientReconnectionInfo do Shapes.merge_actions_for_tx(acc, actions, txid) end) - {:ok, new_graph, actions} + # If there are any not-continued unacknowledged unsubscriptions left, remove rows from the graph + # and prepare a GONE batch. + case :ets.select(@unsubscribe_points_ets, [{{{client_id, :"$1", :_}}, [], [:"$1"]}]) do + [] -> + {:ok, new_graph, actions, nil} + + unsent_unsub_ids -> + request_ids = request_ids_for_subscriptions(client_id, unsent_unsub_ids) + + {gone_nodes, new_graph} = SentRowsGraph.pop_by_request_ids(new_graph, request_ids) + + {:ok, new_graph, actions, {unsent_unsub_ids, gone_nodes}} + end + end + end + + @spec observed_unsub_points(client_id(), non_neg_integer(), [String.t()]) :: [ + {String.t(), non_neg_integer()} + ] + defp observed_unsub_points(client_id, wal_pos, observed_unsub_batches) do + # Return all points before acked LSN, and all at acked LSN that have been explicitly observed. + matches = + [ + {{{client_id, :"$1", :"$2"}}, [{:<, :"$2", wal_pos}], [{{:"$1", :"$2"}}]} + | Enum.map(observed_unsub_batches, &{{{client_id, &1, wal_pos}}, [], [{{&1, wal_pos}}]}) + ] + + :ets.select(@unsubscribe_points_ets, matches) + end + + defp pop_valid_unsub_points(client_id, new_wal_pos, observed_unsub_batches) do + points = observed_unsub_points(client_id, new_wal_pos, observed_unsub_batches) + + :ets.select_delete( + @unsubscribe_points_ets, + Enum.map(points, fn {id, wal_pos} -> {{{client_id, id, wal_pos}}, [], [true]} end) + ) + + Enum.map(points, &elem(&1, 0)) + end + + defp prune_active_subs(client_id, new_wal_pos, observed_unsub_batches) do + relevant_unsubs = pop_valid_unsub_points(client_id, new_wal_pos, observed_unsub_batches) + + client_id + |> list_subscriptions() + |> Enum.split_with(fn {_, sub_id, _} -> sub_id in relevant_unsubs end) + |> case do + {[], subs} -> + {subs, [], %{}} + + {unsubs, subs} -> + removed_request_ids = + Enum.flat_map(unsubs, fn {_, _, requests} -> Enum.map(requests, & &1.id) end) + + discarded = delete_subscriptions(client_id, Enum.map(unsubs, &elem(&1, 1))) + + {subs, removed_request_ids, discarded} end end # We're essentially advancing the graph until next fully acknowledged transaction + # subscription data - defp advance_up_to_new_wal_pos(graph, advance_graph_fn, client_id, txn_stream) do - subs = list_subscriptions(client_id) - - txn_stream - |> Enum.reduce({graph, %{}, 0, %{}}, fn %Transaction{} = txn, - {graph, pending_actions, count, discarded_acc} -> + defp advance_up_to_new_wal_pos( + graph, + advance_graph_fn, + client_id, + new_wal_pos, + unsub_batches, + txn_stream + ) do + {subs, removed_request_ids, discarded_acc} = + prune_active_subs(client_id, new_wal_pos, unsub_batches) + + {_, graph} = SentRowsGraph.pop_by_request_ids(graph, removed_request_ids) + + state = {graph, %{}, 0, discarded_acc} + + Enum.reduce(txn_stream, state, fn %Transaction{} = txn, + {graph, pending_actions, count, discarded_acc} -> {graph, pending_actions, discarded_acc} = client_id |> pop_additional_data_before(txn.xid) |> Enum.reduce({graph, pending_actions, discarded_acc}, fn {:transaction, diff, included_txns}, {graph, pending_actions, discarded_acc} -> + {_, diff} = SentRowsGraph.pop_by_request_ids(diff, removed_request_ids) graph = merge_in_graph_diff(graph, diff) popped_actions = clear_stored_actions(client_id, included_txns) @@ -470,6 +541,9 @@ defmodule Electric.Satellite.ClientReconnectionInfo do {graph, pending_actions, merge_discarded(discarded_acc, popped_actions)} {:subscription, diff, []}, {graph, pending_actions, discarded_acc} -> + # This cannot be a diff for an unsubbed subscription, because they were + # deleted in `prune_active_subs/3`. Each sub diff carries only data for + # subscription itself, so we don't need to pop rows when other subs are gone. graph = merge_in_graph_diff(graph, diff) {graph, pending_actions, discarded_acc} end) @@ -524,8 +598,8 @@ defmodule Electric.Satellite.ClientReconnectionInfo do defp get_active_shapes_for_txid(all_subs, txid) do all_subs - |> Enum.take_while(fn {xmin, _} -> xmin < txid end) - |> Enum.flat_map(&elem(&1, 1)) + |> Enum.take_while(fn {xmin, _, _} -> xmin < txid end) + |> Enum.flat_map(&elem(&1, 2)) end defp merge_in_graph_diff(graph, diff), do: Utils.merge_graph_edges(graph, diff) @@ -595,10 +669,24 @@ defmodule Electric.Satellite.ClientReconnectionInfo do DELETE FROM #{client_additional_data_table()} WHERE client_id = $1 """ - defp purge_additional_data_for_client(client_id) do - :ets.match_delete(@additional_data_ets, {{client_id, :_, :_, :_, :_}, :_, :_}) + @delete_unsubscribe_points_query """ + DELETE FROM #{client_unsub_points_table()} WHERE client_id = $1 AND subscription_id = ANY($2) + """ + defp purge_additional_data_for_client(client_id, continued_subscriptions) do + :ets.match_delete(@additional_data_ets, {{client_id, :_, :_, :_, :_}, :_, :_}) Client.query!(@delete_additional_data_for_client_query, [client_id]) + + # If the client continued any subscriptions without acknowledging the GONE batch, + # remove the unsubscribe points for those. + continued_subscriptions + |> Enum.map(&{{{client_id, &1, :_}}, [], [true]}) + |> then(&:ets.select_delete(@unsubscribe_points_ets, &1)) + + Client.query!(@delete_unsubscribe_points_query, [ + client_id, + Enum.map(continued_subscriptions, &encode_uuid/1) + ]) end @insert_subscription_query """ @@ -640,10 +728,17 @@ defmodule Electric.Satellite.ClientReconnectionInfo do defp list_subscriptions(client_id) do :ets.select(@subscriptions_ets, [ - {{{client_id, :_}, :"$1", :"$2", :_}, [], [{{:"$1", :"$2"}}]} + {{{client_id, :"$3"}, :"$1", :"$2", :_}, [], [{{:"$1", :"$3", :"$2"}}]} ]) end + defp request_ids_for_subscriptions(client_id, subscription_ids) do + subscription_ids + |> Enum.map(&{{{client_id, &1}, :_, :"$1", :_}, [], [:"$1"]}) + |> then(&:ets.select(@subscriptions_ets, &1)) + |> Enum.flat_map(fn x -> Enum.map(x, & &1.id) end) + end + @insert_subscription_data_query """ INSERT INTO #{client_additional_data_table()}( @@ -729,6 +824,33 @@ defmodule Electric.Satellite.ClientReconnectionInfo do reraise exception, __STACKTRACE__ end + @insert_unsub_points_query """ + INSERT INTO #{client_unsub_points_table()} + (client_id, wal_pos, subscription_id) + SELECT $1, $2, unnest($3::uuid[]) + """ + + @doc """ + Store client-requested unsubscribe until the client acknowledges it. + """ + def unsubscribe(origin, client_id, ids, wal_pos) when is_list(ids) and ids != [] do + :ets.insert(@unsubscribe_points_ets, Enum.map(ids, &{{client_id, &1, wal_pos}})) + + Client.pooled_query!(origin, @insert_unsub_points_query, [ + client_id, + wal_pos, + Enum.map(ids, &encode_uuid/1) + ]) + + :ok + rescue + exception -> + # Clear in-memory cache for the client to force its reloading from the database when the + # client reconnects. + clear_all_ets_data(client_id) + reraise exception, __STACKTRACE__ + end + @doc """ Restore client reconnection info cache from the database. @@ -783,9 +905,9 @@ defmodule Electric.Satellite.ClientReconnectionInfo do Map.update( acc, @additional_data_ets, - {client_id, [xmin], [order], nil}, - fn {^client_id, xmins, orders, txid} -> - {client_id, [xmin | xmins], [order | orders], txid} + {client_id, [xmin], [order], nil, []}, + fn {^client_id, xmins, orders, txid, sub_ids} -> + {client_id, [xmin | xmins], [order | orders], txid, sub_ids} end ) end @@ -794,13 +916,45 @@ defmodule Electric.Satellite.ClientReconnectionInfo do Map.update( acc, @additional_data_ets, - {client_id, [], [], new_txid}, - fn {^client_id, xmins, orders, txid} -> - {client_id, xmins, orders, max(new_txid, txid)} + {client_id, [], [], new_txid, []}, + fn {^client_id, xmins, orders, txid, sub_ids} -> + {client_id, xmins, orders, max(new_txid, txid), sub_ids} + end + ) + end + + defp merge_discarded(acc, {@additional_data_ets, {client_id, {:sub_ids, new_sub_ids}}}) + when is_list(new_sub_ids) do + Map.update( + acc, + @additional_data_ets, + {client_id, [], [], nil, new_sub_ids}, + fn {^client_id, xmins, orders, txid, sub_ids} -> + {client_id, xmins, orders, txid, sub_ids ++ new_sub_ids} end ) end + defp merge_discarded(acc, {@subscriptions_ets, {client_id, {:sub_ids, new_sub_ids}}}) + when is_list(new_sub_ids) do + Map.update( + acc, + @subscriptions_ets, + {client_id, new_sub_ids}, + fn {^client_id, sub_ids} -> {client_id, sub_ids ++ new_sub_ids} end + ) + end + + defp merge_discarded(acc, {@unsubscribe_points_ets, {client_id, {:sub_ids, new_sub_ids}}}) + when is_list(new_sub_ids) do + Map.update( + acc, + @unsubscribe_points_ets, + {client_id, new_sub_ids}, + fn {^client_id, sub_ids} -> {client_id, sub_ids ++ new_sub_ids} end + ) + end + @delete_actions_for_xids_query """ DELETE FROM #{client_actions_table()} @@ -815,9 +969,20 @@ defmodule Electric.Satellite.ClientReconnectionInfo do client_id = $1 AND ( (min_txid, ord) = ANY(SELECT * FROM unnest($2::xid8[], $3::bigint[])) OR coalesce(min_txid <= $4, false) + OR subscription_id = ANY($5::text[]::uuid[]) ) """ + @delete_subscriptions_query """ + DELETE FROM #{client_shape_subscriptions_table()} + WHERE client_id = $1 AND subscription_id = ANY($2::text[]::uuid[]) + """ + + @delete_unsubscribe_points_query """ + DELETE FROM #{client_unsub_points_table()} + WHERE client_id = $1 AND subscription_id = ANY($2::text[]::uuid[]) + """ + # Given the accumulator of discarded ETS entries, issue one DELETE statement per table to # remove all discarded entries from the database. # @@ -827,8 +992,14 @@ defmodule Electric.Satellite.ClientReconnectionInfo do {@actions_ets, {client_id, txids}} -> Client.query!(@delete_actions_for_xids_query, [client_id, txids]) - {@additional_data_ets, {client_id, xmins, orders, txid}} -> - Client.query!(@delete_additional_data_query, [client_id, xmins, orders, txid]) + {@additional_data_ets, {client_id, xmins, orders, txid, sub_ids}} -> + Client.query!(@delete_additional_data_query, [client_id, xmins, orders, txid, sub_ids]) + + {@subscriptions_ets, {client_id, sub_ids}} -> + Client.query!(@delete_subscriptions_query, [client_id, sub_ids]) + + {@unsubscribe_points_ets, {client_id, sub_ids}} -> + Client.query!(@delete_unsubscribe_points_query, [client_id, sub_ids]) end) end @@ -840,13 +1011,15 @@ defmodule Electric.Satellite.ClientReconnectionInfo do subscriptions_table = :ets.new(@subscriptions_ets, [:named_table, :public, :ordered_set]) additional_data_table = :ets.new(@additional_data_ets, [:named_table, :public, :ordered_set]) actions_table = :ets.new(@actions_ets, [:named_table, :public, :set]) + unsub_points_table = :ets.new(@unsubscribe_points_ets, [:named_table, :public, :ordered_set]) state = %{ origin: Connectors.origin(connector_config), checkpoint_table: checkpoint_table, subscriptions_table: subscriptions_table, additional_data_table: additional_data_table, - actions_table: actions_table + actions_table: actions_table, + unsub_points_table: unsub_points_table } # Restore cached info for all clients at initialisation time. @@ -870,16 +1043,19 @@ defmodule Electric.Satellite.ClientReconnectionInfo do with {:ok, checkpoints} <- load_checkpoints(client_ids), {:ok, subscriptions} <- load_subscriptions(client_ids), {:ok, additional_data} <- load_additional_data(client_ids), - {:ok, actions} <- load_actions(client_ids) do + {:ok, actions} <- load_actions(client_ids), + {:ok, unsub_points} <- load_unsub_points(client_ids) do :ets.insert(state.checkpoint_table, checkpoints) :ets.insert(state.subscriptions_table, subscriptions) :ets.insert(state.additional_data_table, additional_data) :ets.insert(state.actions_table, actions) + :ets.insert(state.unsub_points_table, unsub_points) Logger.debug("Restored #{length(checkpoints)} cached client_checkpoints") Logger.debug("Restored #{length(subscriptions)} cached client_shape_subscriptions") Logger.debug("Restored #{length(additional_data)} cached client_additional_data records") Logger.debug("Restored #{length(actions)} cached client_actions") + Logger.debug("Restored #{length(unsub_points)} cached client_unsub_points") :ok end @@ -937,6 +1113,18 @@ defmodule Electric.Satellite.ClientReconnectionInfo do end end + @load_actions_query "SELECT * FROM #{client_unsub_points_table()}" + defp load_unsub_points(client_ids) do + with {:ok, {_cols, rows}} <- scoped_query(@load_actions_query, client_ids) do + tuples = + Enum.map(rows, fn [client_id, subs_id, wal_pos] -> + {{client_id, decode_uuid(subs_id), wal_pos}} + end) + + {:ok, tuples} + end + end + # Perform a query that can be scoped by `client_ids`. defp scoped_query(query, nil) do Client.query(query, []) diff --git a/components/electric/lib/electric/satellite/protobuf.ex b/components/electric/lib/electric/satellite/protobuf.ex index e3256e56e7..ef56d517f4 100644 --- a/components/electric/lib/electric/satellite/protobuf.ex +++ b/components/electric/lib/electric/satellite/protobuf.ex @@ -12,7 +12,9 @@ defmodule Electric.Satellite.Protobuf do SatShapeDataEnd, SatRpcRequest, SatRpcResponse, - SatOpLogAck + SatOpLogAck, + SatUnsubsDataBegin, + SatUnsubsDataEnd } require Logger @@ -32,7 +34,9 @@ defmodule Electric.Satellite.Protobuf do SatShapeDataEnd => 18, SatRpcRequest => 21, SatRpcResponse => 22, - SatOpLogAck => 23 + SatOpLogAck => 23, + SatUnsubsDataBegin => 24, + SatUnsubsDataEnd => 25 } if Enum.any?(Map.values(@mapping), &(&1 in @reserved)) do @@ -71,6 +75,8 @@ defmodule Electric.Satellite.Protobuf do | %SatShapeDataEnd{} | %SatRpcRequest{} | %SatRpcResponse{} + | %SatUnsubsDataBegin{} + | %SatUnsubsDataEnd{} @type rpc_req() :: %Satellite.SatAuthReq{} @@ -126,7 +132,9 @@ defmodule Electric.Satellite.Protobuf do SatUnsubsResp, SatOpAdditionalBegin, SatOpAdditionalCommit, - SatOpLogAck + SatOpLogAck, + SatUnsubsDataBegin, + SatUnsubsDataEnd } end end diff --git a/components/electric/lib/electric/satellite/protobuf_messages.ex b/components/electric/lib/electric/satellite/protobuf_messages.ex index 75fb1e1949..288d01ee3e 100644 --- a/components/electric/lib/electric/satellite/protobuf_messages.ex +++ b/components/electric/lib/electric/satellite/protobuf_messages.ex @@ -3335,7 +3335,8 @@ subscription_ids: [], schema_version: nil, observed_transaction_data: [], - sql_dialect: nil + sql_dialect: nil, + observed_gone_batch: [] ( ( @@ -3357,6 +3358,7 @@ |> encode_options(msg) |> encode_subscription_ids(msg) |> encode_observed_transaction_data(msg) + |> encode_observed_gone_batch(msg) end ) @@ -3486,6 +3488,26 @@ reraise Protox.EncodingError.new(:sql_dialect, "invalid field value"), __STACKTRACE__ end + end, + defp encode_observed_gone_batch(acc, msg) do + try do + case msg.observed_gone_batch do + [] -> + acc + + values -> + [ + acc, + Enum.reduce(values, [], fn value, acc -> + [acc, "B", Protox.Encode.encode_string(value)] + end) + ] + end + rescue + ArgumentError -> + reraise Protox.EncodingError.new(:observed_gone_batch, "invalid field value"), + __STACKTRACE__ + end end ] @@ -3589,6 +3611,15 @@ {[sql_dialect: value], rest} + {8, _, bytes} -> + {len, bytes} = Protox.Varint.decode(bytes) + {delimited, rest} = Protox.Decode.parse_delimited(bytes, len) + + {[ + observed_gone_batch: + msg.observed_gone_batch ++ [Protox.Decode.validate_string(delimited)] + ], rest} + {tag, wire_type, rest} -> {_, rest} = Protox.Decode.parse_unknown(tag, wire_type, rest) {[], rest} @@ -3653,7 +3684,8 @@ 6 => {:observed_transaction_data, :packed, :uint64}, 7 => {:sql_dialect, {:oneof, :_sql_dialect}, - {:enum, Electric.Satellite.SatInStartReplicationReq.Dialect}} + {:enum, Electric.Satellite.SatInStartReplicationReq.Dialect}}, + 8 => {:observed_gone_batch, :unpacked, :string} } end @@ -3664,6 +3696,7 @@ def defs_by_name() do %{ lsn: {1, {:scalar, ""}, :bytes}, + observed_gone_batch: {8, :unpacked, :string}, observed_transaction_data: {6, :packed, :uint64}, options: {2, :packed, {:enum, Electric.Satellite.SatInStartReplicationReq.Option}}, schema_version: {5, {:oneof, :_schema_version}, :string}, @@ -3732,6 +3765,15 @@ name: :sql_dialect, tag: 7, type: {:enum, Electric.Satellite.SatInStartReplicationReq.Dialect} + }, + %{ + __struct__: Protox.Field, + json_name: "observedGoneBatch", + kind: :unpacked, + label: :repeated, + name: :observed_gone_batch, + tag: 8, + type: :string } ] end @@ -3956,6 +3998,46 @@ }} end ), + ( + def field_def(:observed_gone_batch) do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "observedGoneBatch", + kind: :unpacked, + label: :repeated, + name: :observed_gone_batch, + tag: 8, + type: :string + }} + end + + def field_def("observedGoneBatch") do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "observedGoneBatch", + kind: :unpacked, + label: :repeated, + name: :observed_gone_batch, + tag: 8, + type: :string + }} + end + + def field_def("observed_gone_batch") do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "observedGoneBatch", + kind: :unpacked, + label: :repeated, + name: :observed_gone_batch, + tag: 8, + type: :string + }} + end + ), def field_def(_) do {:error, :no_such_field} end @@ -3998,6 +4080,9 @@ def default(:sql_dialect) do {:error, :no_default_value} end, + def default(:observed_gone_batch) do + {:error, :no_default_value} + end, def default(_) do {:error, :no_such_field} end @@ -6348,6 +6433,172 @@ end ) end, + defmodule Electric.Satellite.SatUnsubsDataEnd do + @moduledoc false + defstruct [] + + ( + ( + @spec encode(struct) :: {:ok, iodata} + def encode(msg) do + {:ok, encode!(msg)} + end + + @spec encode!(struct) :: iodata + def encode!(_msg) do + [] + end + ) + + [] + [] + [] + ) + + ( + ( + @spec decode(binary) :: {:ok, struct} | {:error, any} + def decode(bytes) do + try do + {:ok, decode!(bytes)} + rescue + e in [Protox.DecodingError, Protox.IllegalTagError, Protox.RequiredFieldsError] -> + {:error, e} + end + end + + ( + @spec decode!(binary) :: struct | no_return + def decode!(bytes) do + parse_key_value(bytes, struct(Electric.Satellite.SatUnsubsDataEnd)) + end + ) + ) + + ( + @spec parse_key_value(binary, struct) :: struct + defp parse_key_value(<<>>, msg) do + msg + end + + defp parse_key_value(bytes, msg) do + {field, rest} = + case Protox.Decode.parse_key(bytes) do + {0, _, _} -> + raise %Protox.IllegalTagError{} + + {tag, wire_type, rest} -> + {_, rest} = Protox.Decode.parse_unknown(tag, wire_type, rest) + {[], rest} + end + + msg_updated = struct(msg, field) + parse_key_value(rest, msg_updated) + end + ) + + [] + ) + + ( + @spec json_decode(iodata(), keyword()) :: {:ok, struct()} | {:error, any()} + def json_decode(input, opts \\ []) do + try do + {:ok, json_decode!(input, opts)} + rescue + e in Protox.JsonDecodingError -> {:error, e} + end + end + + @spec json_decode!(iodata(), keyword()) :: struct() | no_return() + def json_decode!(input, opts \\ []) do + {json_library_wrapper, json_library} = Protox.JsonLibrary.get_library(opts, :decode) + + Protox.JsonDecode.decode!( + input, + Electric.Satellite.SatUnsubsDataEnd, + &json_library_wrapper.decode!(json_library, &1) + ) + end + + @spec json_encode(struct(), keyword()) :: {:ok, iodata()} | {:error, any()} + def json_encode(msg, opts \\ []) do + try do + {:ok, json_encode!(msg, opts)} + rescue + e in Protox.JsonEncodingError -> {:error, e} + end + end + + @spec json_encode!(struct(), keyword()) :: iodata() | no_return() + def json_encode!(msg, opts \\ []) do + {json_library_wrapper, json_library} = Protox.JsonLibrary.get_library(opts, :encode) + Protox.JsonEncode.encode!(msg, &json_library_wrapper.encode!(json_library, &1)) + end + ) + + ( + @deprecated "Use fields_defs()/0 instead" + @spec defs() :: %{ + required(non_neg_integer) => {atom, Protox.Types.kind(), Protox.Types.type()} + } + def defs() do + %{} + end + + @deprecated "Use fields_defs()/0 instead" + @spec defs_by_name() :: %{ + required(atom) => {non_neg_integer, Protox.Types.kind(), Protox.Types.type()} + } + def defs_by_name() do + %{} + end + ) + + ( + @spec fields_defs() :: list(Protox.Field.t()) + def fields_defs() do + [] + end + + [ + @spec(field_def(atom) :: {:ok, Protox.Field.t()} | {:error, :no_such_field}), + def field_def(_) do + {:error, :no_such_field} + end + ] + ) + + [] + + ( + @spec required_fields() :: [] + def required_fields() do + [] + end + ) + + ( + @spec syntax() :: atom() + def syntax() do + :proto3 + end + ) + + [ + @spec(default(atom) :: {:ok, boolean | integer | String.t() | float} | {:error, atom}), + def default(_) do + {:error, :no_such_field} + end + ] + + ( + @spec file_options() :: nil + def file_options() do + nil + end + ) + end, defmodule Electric.Satellite.SatOpUpdate do @moduledoc false defstruct relation_id: 0, row_data: nil, old_row_data: nil, tags: [] @@ -12040,7 +12291,8 @@ lsn: "", transaction_id: 0, subscription_ids: [], - additional_data_source_ids: [] + additional_data_source_ids: [], + gone_subscription_ids: [] ( ( @@ -12061,6 +12313,7 @@ |> encode_transaction_id(msg) |> encode_subscription_ids(msg) |> encode_additional_data_source_ids(msg) + |> encode_gone_subscription_ids(msg) end ) @@ -12154,6 +12407,26 @@ ), __STACKTRACE__ end + end, + defp encode_gone_subscription_ids(acc, msg) do + try do + case msg.gone_subscription_ids do + [] -> + acc + + values -> + [ + acc, + Enum.reduce(values, [], fn value, acc -> + [acc, "2", Protox.Encode.encode_string(value)] + end) + ] + end + rescue + ArgumentError -> + reraise Protox.EncodingError.new(:gone_subscription_ids, "invalid field value"), + __STACKTRACE__ + end end ] @@ -12228,6 +12501,15 @@ {value, rest} = Protox.Decode.parse_uint64(bytes) {[additional_data_source_ids: msg.additional_data_source_ids ++ [value]], rest} + {6, _, bytes} -> + {len, bytes} = Protox.Varint.decode(bytes) + {delimited, rest} = Protox.Decode.parse_delimited(bytes, len) + + {[ + gone_subscription_ids: + msg.gone_subscription_ids ++ [Protox.Decode.validate_string(delimited)] + ], rest} + {tag, wire_type, rest} -> {_, rest} = Protox.Decode.parse_unknown(tag, wire_type, rest) {[], rest} @@ -12289,7 +12571,8 @@ 2 => {:lsn, {:scalar, ""}, :bytes}, 3 => {:transaction_id, {:scalar, 0}, :uint64}, 4 => {:subscription_ids, :unpacked, :string}, - 5 => {:additional_data_source_ids, :packed, :uint64} + 5 => {:additional_data_source_ids, :packed, :uint64}, + 6 => {:gone_subscription_ids, :unpacked, :string} } end @@ -12301,6 +12584,7 @@ %{ ack_timestamp: {1, {:scalar, 0}, :uint64}, additional_data_source_ids: {5, :packed, :uint64}, + gone_subscription_ids: {6, :unpacked, :string}, lsn: {2, {:scalar, ""}, :bytes}, subscription_ids: {4, :unpacked, :string}, transaction_id: {3, {:scalar, 0}, :uint64} @@ -12356,12 +12640,21 @@ name: :additional_data_source_ids, tag: 5, type: :uint64 - } - ] - end - - [ - @spec(field_def(atom) :: {:ok, Protox.Field.t()} | {:error, :no_such_field}), + }, + %{ + __struct__: Protox.Field, + json_name: "goneSubscriptionIds", + kind: :unpacked, + label: :repeated, + name: :gone_subscription_ids, + tag: 6, + type: :string + } + ] + end + + [ + @spec(field_def(atom) :: {:ok, Protox.Field.t()} | {:error, :no_such_field}), ( def field_def(:ack_timestamp) do {:ok, @@ -12551,6 +12844,46 @@ }} end ), + ( + def field_def(:gone_subscription_ids) do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "goneSubscriptionIds", + kind: :unpacked, + label: :repeated, + name: :gone_subscription_ids, + tag: 6, + type: :string + }} + end + + def field_def("goneSubscriptionIds") do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "goneSubscriptionIds", + kind: :unpacked, + label: :repeated, + name: :gone_subscription_ids, + tag: 6, + type: :string + }} + end + + def field_def("gone_subscription_ids") do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "goneSubscriptionIds", + kind: :unpacked, + label: :repeated, + name: :gone_subscription_ids, + tag: 6, + type: :string + }} + end + ), def field_def(_) do {:error, :no_such_field} end @@ -12590,6 +12923,9 @@ def default(:additional_data_source_ids) do {:error, :no_default_value} end, + def default(:gone_subscription_ids) do + {:error, :no_default_value} + end, def default(_) do {:error, :no_such_field} end @@ -15831,6 +16167,319 @@ end ) end, + defmodule Electric.Satellite.SatUnsubsDataBegin do + @moduledoc false + defstruct subscription_ids: [], lsn: "" + + ( + ( + @spec encode(struct) :: {:ok, iodata} | {:error, any} + def encode(msg) do + try do + {:ok, encode!(msg)} + rescue + e in [Protox.EncodingError, Protox.RequiredFieldsError] -> {:error, e} + end + end + + @spec encode!(struct) :: iodata | no_return + def encode!(msg) do + [] |> encode_subscription_ids(msg) |> encode_lsn(msg) + end + ) + + [] + + [ + defp encode_subscription_ids(acc, msg) do + try do + case msg.subscription_ids do + [] -> + acc + + values -> + [ + acc, + Enum.reduce(values, [], fn value, acc -> + [acc, "\n", Protox.Encode.encode_string(value)] + end) + ] + end + rescue + ArgumentError -> + reraise Protox.EncodingError.new(:subscription_ids, "invalid field value"), + __STACKTRACE__ + end + end, + defp encode_lsn(acc, msg) do + try do + if msg.lsn == "" do + acc + else + [acc, "\x12", Protox.Encode.encode_bytes(msg.lsn)] + end + rescue + ArgumentError -> + reraise Protox.EncodingError.new(:lsn, "invalid field value"), __STACKTRACE__ + end + end + ] + + [] + ) + + ( + ( + @spec decode(binary) :: {:ok, struct} | {:error, any} + def decode(bytes) do + try do + {:ok, decode!(bytes)} + rescue + e in [Protox.DecodingError, Protox.IllegalTagError, Protox.RequiredFieldsError] -> + {:error, e} + end + end + + ( + @spec decode!(binary) :: struct | no_return + def decode!(bytes) do + parse_key_value(bytes, struct(Electric.Satellite.SatUnsubsDataBegin)) + end + ) + ) + + ( + @spec parse_key_value(binary, struct) :: struct + defp parse_key_value(<<>>, msg) do + msg + end + + defp parse_key_value(bytes, msg) do + {field, rest} = + case Protox.Decode.parse_key(bytes) do + {0, _, _} -> + raise %Protox.IllegalTagError{} + + {1, _, bytes} -> + {len, bytes} = Protox.Varint.decode(bytes) + {delimited, rest} = Protox.Decode.parse_delimited(bytes, len) + + {[ + subscription_ids: + msg.subscription_ids ++ [Protox.Decode.validate_string(delimited)] + ], rest} + + {2, _, bytes} -> + {len, bytes} = Protox.Varint.decode(bytes) + {delimited, rest} = Protox.Decode.parse_delimited(bytes, len) + {[lsn: delimited], rest} + + {tag, wire_type, rest} -> + {_, rest} = Protox.Decode.parse_unknown(tag, wire_type, rest) + {[], rest} + end + + msg_updated = struct(msg, field) + parse_key_value(rest, msg_updated) + end + ) + + [] + ) + + ( + @spec json_decode(iodata(), keyword()) :: {:ok, struct()} | {:error, any()} + def json_decode(input, opts \\ []) do + try do + {:ok, json_decode!(input, opts)} + rescue + e in Protox.JsonDecodingError -> {:error, e} + end + end + + @spec json_decode!(iodata(), keyword()) :: struct() | no_return() + def json_decode!(input, opts \\ []) do + {json_library_wrapper, json_library} = Protox.JsonLibrary.get_library(opts, :decode) + + Protox.JsonDecode.decode!( + input, + Electric.Satellite.SatUnsubsDataBegin, + &json_library_wrapper.decode!(json_library, &1) + ) + end + + @spec json_encode(struct(), keyword()) :: {:ok, iodata()} | {:error, any()} + def json_encode(msg, opts \\ []) do + try do + {:ok, json_encode!(msg, opts)} + rescue + e in Protox.JsonEncodingError -> {:error, e} + end + end + + @spec json_encode!(struct(), keyword()) :: iodata() | no_return() + def json_encode!(msg, opts \\ []) do + {json_library_wrapper, json_library} = Protox.JsonLibrary.get_library(opts, :encode) + Protox.JsonEncode.encode!(msg, &json_library_wrapper.encode!(json_library, &1)) + end + ) + + ( + @deprecated "Use fields_defs()/0 instead" + @spec defs() :: %{ + required(non_neg_integer) => {atom, Protox.Types.kind(), Protox.Types.type()} + } + def defs() do + %{1 => {:subscription_ids, :unpacked, :string}, 2 => {:lsn, {:scalar, ""}, :bytes}} + end + + @deprecated "Use fields_defs()/0 instead" + @spec defs_by_name() :: %{ + required(atom) => {non_neg_integer, Protox.Types.kind(), Protox.Types.type()} + } + def defs_by_name() do + %{lsn: {2, {:scalar, ""}, :bytes}, subscription_ids: {1, :unpacked, :string}} + end + ) + + ( + @spec fields_defs() :: list(Protox.Field.t()) + def fields_defs() do + [ + %{ + __struct__: Protox.Field, + json_name: "subscriptionIds", + kind: :unpacked, + label: :repeated, + name: :subscription_ids, + tag: 1, + type: :string + }, + %{ + __struct__: Protox.Field, + json_name: "lsn", + kind: {:scalar, ""}, + label: :optional, + name: :lsn, + tag: 2, + type: :bytes + } + ] + end + + [ + @spec(field_def(atom) :: {:ok, Protox.Field.t()} | {:error, :no_such_field}), + ( + def field_def(:subscription_ids) do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "subscriptionIds", + kind: :unpacked, + label: :repeated, + name: :subscription_ids, + tag: 1, + type: :string + }} + end + + def field_def("subscriptionIds") do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "subscriptionIds", + kind: :unpacked, + label: :repeated, + name: :subscription_ids, + tag: 1, + type: :string + }} + end + + def field_def("subscription_ids") do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "subscriptionIds", + kind: :unpacked, + label: :repeated, + name: :subscription_ids, + tag: 1, + type: :string + }} + end + ), + ( + def field_def(:lsn) do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "lsn", + kind: {:scalar, ""}, + label: :optional, + name: :lsn, + tag: 2, + type: :bytes + }} + end + + def field_def("lsn") do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "lsn", + kind: {:scalar, ""}, + label: :optional, + name: :lsn, + tag: 2, + type: :bytes + }} + end + + [] + ), + def field_def(_) do + {:error, :no_such_field} + end + ] + ) + + [] + + ( + @spec required_fields() :: [] + def required_fields() do + [] + end + ) + + ( + @spec syntax() :: atom() + def syntax() do + :proto3 + end + ) + + [ + @spec(default(atom) :: {:ok, boolean | integer | String.t() | float} | {:error, atom}), + def default(:subscription_ids) do + {:error, :no_default_value} + end, + def default(:lsn) do + {:ok, ""} + end, + def default(_) do + {:error, :no_such_field} + end + ] + + ( + @spec file_options() :: nil + def file_options() do + nil + end + ) + end, defmodule Electric.Satellite.SatOpDelete do @moduledoc false defstruct relation_id: 0, old_row_data: nil, tags: [] diff --git a/components/electric/lib/electric/satellite/protocol.ex b/components/electric/lib/electric/satellite/protocol.ex index 381028c4e6..f6c291d88f 100644 --- a/components/electric/lib/electric/satellite/protocol.ex +++ b/components/electric/lib/electric/satellite/protocol.ex @@ -2,6 +2,7 @@ defmodule Electric.Satellite.Protocol do @moduledoc """ Protocol for communication with Satellite """ + alias Electric.Replication.Shapes.SentRowsGraph use Electric.Satellite.Protobuf use Pathex import Electric.Satellite.Protobuf, only: [is_allowed_rpc_method: 1] @@ -34,12 +35,16 @@ defmodule Electric.Satellite.Protocol do @type outgoing() :: {deep_msg_list(), State.t()} | {:error, deep_msg_list(), State.t()} @type txn_processing() :: {deep_msg_list(), actions(), State.t()} - @producer_demand 5 - - @spec handle_rpc_request(PB.rpc_req(), State.t()) :: - {:error, %SatErrorResp{} | PB.rpc_resp()} + @type handle_rpc_result :: + {:error, PB.rpc_resp() | %SatErrorResp{}} | {:reply, PB.rpc_resp(), State.t()} + | {:reply, PB.rpc_resp(), followups :: deep_msg_list(), State.t()} | {:force_unpause, PB.rpc_resp(), State.t()} + | {:force_unpause, PB.rpc_resp(), followups :: deep_msg_list(), State.t()} + + @producer_demand 5 + + @spec handle_rpc_request(PB.rpc_req(), State.t()) :: handle_rpc_result() def handle_rpc_request(%SatAuthReq{id: client_id, token: token}, state) when not auth_passed?(state) and client_id != "" and token != "" do Logger.metadata(client_id: client_id) @@ -250,25 +255,74 @@ defmodule Electric.Satellite.Protocol do needs_unpausing? = is_out_rep_paused(state) and Enum.any?(ids, &is_next_pending_subscription(state, &1)) + removed_subs = Map.take(state.subscriptions, ids) + removed_ids = Map.keys(removed_subs) + + request_ids = + removed_subs + # We don't need to send GONEs for unsent data + |> Enum.reject(fn {id, _} -> OutRep.subscription_pending?(id, state.out_rep) end) + |> Enum.flat_map(fn {_, requests} -> Enum.map(requests, & &1.id) end) + out_rep = - ids + removed_ids |> Enum.reduce(state.out_rep, &OutRep.remove_pause_point(&2, :subscription, &1)) - |> Map.update!(:subscription_data_to_send, &Map.drop(&1, ids)) + |> Map.update!(:subscription_data_to_send, &Map.drop(&1, removed_ids)) - state = - state - |> Map.put(:out_rep, out_rep) - |> Map.update!(:subscriptions, &Map.drop(&1, ids)) + ClientReconnectionInfo.unsubscribe( + state.origin, + state.client_id, + removed_ids, + state.out_rep.last_seen_wal_pos + ) + + {gone_messages, out_rep} = + if request_ids != [] do + {gone, graph} = + Shapes.SentRowsGraph.pop_by_request_ids(state.out_rep.sent_rows_graph, request_ids) - ClientReconnectionInfo.delete_subscriptions(state.origin, state.client_id, ids) + # We're sending back all subscription IDs to not confuse the client + {msgs, out_rep} = prepare_unsubs_data(ids, gone, out_rep) + + {msgs, %OutRep{out_rep | sent_rows_graph: graph}} + else + {[], out_rep} + end + + state = %State{ + state + | out_rep: out_rep, + subscriptions: Map.drop(state.subscriptions, ids) + } if needs_unpausing? do - {:force_unpause, %SatUnsubsResp{}, state} + {:force_unpause, %SatUnsubsResp{}, gone_messages, state} else - {:reply, %SatUnsubsResp{}, state} + {:reply, %SatUnsubsResp{}, gone_messages, state} end end + @spec prepare_unsubs_data([String.t()], [tuple()], OutRep.t()) :: + {[deep_msg_list()], OutRep.t()} + defp prepare_unsubs_data(subscription_ids, gone_nodes, %OutRep{} = out_rep) do + {serialized_log, unknown_relations, known_relations} = + gone_nodes + |> Enum.map(fn {relation, pk} -> %Changes.Gone{pk: pk, relation: relation} end) + |> Serialization.serialize_shape_data_as_tx(out_rep.relations) + + msgs = [ + serialize_unknown_relations(unknown_relations), + %SatUnsubsDataBegin{ + lsn: CachedWal.Api.serialize_wal_position(out_rep.last_seen_wal_pos), + subscription_ids: subscription_ids + }, + serialized_log, + %SatUnsubsDataEnd{} + ] + + {msgs, %OutRep{out_rep | relations: known_relations}} + end + @spec process_message(PB.sq_pb_msg(), State.t()) :: {nil | :stop | deep_msg_list(), State.t()} | {:force_unpause, deep_msg_list(), State.t()} @@ -286,8 +340,11 @@ defmodule Electric.Satellite.Protocol do {:reply, result, state} -> {%{resp | result: {:message, rpc_encode(result)}}, state} - {:force_unpause, result, state} -> - {:force_unpause, %{resp | result: {:message, rpc_encode(result)}}, state} + {:reply, result, followups, state} -> + {[%{resp | result: {:message, rpc_encode(result)}}, followups], state} + + {:force_unpause, result, followups, state} -> + {:force_unpause, [%{resp | result: {:message, rpc_encode(result)}}, followups], state} {:error, %SatErrorResp{} = error} -> {:error, %{resp | result: {:error, error}}} @@ -437,7 +494,7 @@ defmodule Electric.Satellite.Protocol do def process_message(%SatOpLogAck{} = msg, %State{} = state) do case OutRep.ack_transactions(state.out_rep, msg.transaction_id) do :error -> - %SatErrorResp{error_type: :INVALID_REQUEST, message: "Acknowledged unknown txn"} + {:error, %SatErrorResp{error_type: :INVALID_REQUEST, message: "Acknowledged unknown txn"}} {:ok, out_rep} -> # continue if suspended; GC/checkpoint @@ -453,6 +510,7 @@ defmodule Electric.Satellite.Protocol do ack_point: sent_pos, including_data: msg.additional_data_source_ids, including_subscriptions: msg.subscription_ids, + including_unsubscribes: msg.gone_subscription_ids, cached_wal_impl: CachedWal.EtsBacked, origin: state.origin, advance_graph_using: {&advance_graph_by_tx/4, [state.auth.user_id]} @@ -496,9 +554,7 @@ defmodule Electric.Satellite.Protocol do %SatInStartReplicationReq{}, binary() | :initial_sync, State.t() - ) :: - {:error, %SatErrorResp{} | PB.rpc_resp()} - | {:reply, PB.rpc_resp(), State.t()} + ) :: handle_rpc_result() defp handle_start_replication_request( %{subscription_ids: []} = msg, :initial_sync, @@ -528,10 +584,10 @@ defmodule Electric.Satellite.Protocol do )} end - defp handle_start_replication_request(msg, lsn, state) do + defp handle_start_replication_request(%SatInStartReplicationReq{} = msg, lsn, state) do if CachedWal.Api.lsn_in_cached_window?(state.origin, lsn) do - case restore_client_state(msg.subscription_ids, msg.observed_transaction_data, lsn, state) do - {:ok, state} -> + case restore_client_state(msg, lsn, state) do + {:ok, state, immediate_msgs} -> Logger.debug("Continuing sync for client #{state.client_id} from lsn #{lsn}") state = @@ -541,7 +597,7 @@ defmodule Electric.Satellite.Protocol do {:reply, %SatInStartReplicationResp{unacked_window_size: state.out_rep.allowed_unacked_txs}, - state} + immediate_msgs, state} error -> error @@ -749,17 +805,33 @@ defmodule Electric.Satellite.Protocol do @spec move_in_data_received( non_neg_integer(), + MapSet.t(String.t()), Graph.t(), Shapes.Querying.results(), non_neg_integer(), [non_neg_integer(), ...], State.t() ) :: outgoing() - def move_in_data_received(ref, graph_diff, changes, xmin, included_txns, state) do + def move_in_data_received( + ref, + %MapSet{} = request_ids, + %Graph{} = graph_diff, + changes, + xmin, + included_txns, + %State{} = state + ) do # It's a trade-off where to filter out already-sent changes. Current implementation # prefers copying more data into itself and filtering here. Maybe sending a MapSet # of already-sent IDs to the Task process that does the querying is more optimal, # but more testing is required. + active_request_ids = MapSet.new(current_shapes(state), & &1.id) + + # These request IDs were unsubbed while the data was moving in. We need to prune the graph & changes for that. + gone_request_ids = MapSet.difference(request_ids, active_request_ids) + + {_, graph_diff} = + SentRowsGraph.pop_by_request_ids(graph_diff, gone_request_ids, root_vertex: :fake_root) # Store this data in case of disconnect until acknowledged ClientReconnectionInfo.store_additional_txn_data!( @@ -768,7 +840,7 @@ defmodule Electric.Satellite.Protocol do xmin, ref, included_txns, - graph_diff + Graph.delete_vertex(graph_diff, :fake_root) ) if is_paused_on_move_in(state, ref) do @@ -944,10 +1016,16 @@ defmodule Electric.Satellite.Protocol do end defp handle_move_in_data(ref, changes, %State{} = state) do + active_req_ids = MapSet.new(current_shapes(state), & &1.id) + # No actions are possible from changes formatted as NewRecords. {graph, changes, _actions} = changes - |> Stream.reject(fn {id, _} -> State.row_sent?(state, id) end) + |> Stream.reject(fn {id, {_, req_ids}} -> + # We don't want sent rows, or changes that were relevant for an unsubbed request. + State.row_sent?(state, id) or + not Enum.all?(req_ids, &MapSet.member?(active_req_ids, &1)) + end) |> Stream.map(fn {_id, {change, _req_ids}} -> change end) |> Shapes.process_additional_changes(state.out_rep.sent_rows_graph, current_shapes(state)) @@ -1081,6 +1159,9 @@ defmodule Electric.Satellite.Protocol do end @spec query_move_in_data(actions(), State.t()) :: {:ok, State.t()} | {:error, deep_msg_list()} + # Empty case, no actions required. + defp query_move_in_data({_, []}, %State{} = state), do: {:ok, state} + defp query_move_in_data(actions, %State{} = state) do ref = make_ref() parent = self() @@ -1138,23 +1219,35 @@ defmodule Electric.Satellite.Protocol do ) end - defp restore_client_state(subscription_ids, observed_txn_data, lsn, %State{} = state) do + defp restore_client_state(%SatInStartReplicationReq{} = msg, lsn, %State{} = state) do :ok = ClientReconnectionInfo.restore_cache_for_client(state.origin, state.client_id) Logger.debug("Successfully loaded client reconnection info") - with {:ok, state} <- restore_subscriptions(subscription_ids, state) do - restore_graph(lsn, observed_txn_data, state) + with {:ok, state} <- restore_subscriptions(msg, lsn, state) do + restore_graph(msg, lsn, state) end end - defp restore_subscriptions([], %State{} = state), do: {:ok, state} + @spec restore_subscriptions(%SatInStartReplicationReq{}, CachedWal.Api.wal_pos(), State.t()) :: + {:ok, State.t()} | {:error, term()} + defp restore_subscriptions( + %SatInStartReplicationReq{subscription_ids: []}, + _, + %State{} = state + ), + do: {:ok, state} - defp restore_subscriptions(subscription_ids, %State{} = state) do + defp restore_subscriptions(%SatInStartReplicationReq{} = msg, lsn, %State{} = state) do subscription_data = - ClientReconnectionInfo.fetch_subscriptions(state.client_id, subscription_ids) + ClientReconnectionInfo.fetch_subscriptions( + state.client_id, + msg.subscription_ids, + lsn, + msg.observed_gone_batch + ) |> Map.new() - case Enum.find(subscription_ids, &(not Map.has_key?(subscription_data, &1))) do + case Enum.find(msg.subscription_ids, &(not Map.has_key?(subscription_data, &1))) do nil -> state = Map.update!(state, :subscriptions, &Map.merge(&1, subscription_data)) {:ok, state} @@ -1162,29 +1255,34 @@ defmodule Electric.Satellite.Protocol do id -> id = if String.length(id) > 128, do: String.slice(id, 0..125) <> "...", else: id - {:halt, - {:error, start_replication_error(:SUBSCRIPTION_NOT_FOUND, "Unknown subscription: #{id}")}} + {:error, start_replication_error(:SUBSCRIPTION_NOT_FOUND, "Unknown subscription: #{id}")} end end - defp restore_graph(lsn, observed_txn_data, %State{} = state) do + defp restore_graph(%SatInStartReplicationReq{} = msg, lsn, %State{} = state) do ClientReconnectionInfo.advance_on_reconnection!(state.client_id, ack_point: lsn, - including_data: observed_txn_data, + including_data: msg.observed_transaction_data, including_subscriptions: Map.keys(state.subscriptions), + including_unsubscribes: msg.observed_gone_batch, cached_wal_impl: CachedWal.EtsBacked, origin: state.origin, advance_graph_using: {&advance_graph_by_tx/4, [state.auth.user_id]} ) |> case do # If no actions are "missing" after catch-up, then we don't need to do anything here. - {:ok, graph, {_, []}} -> - {:ok, %{state | out_rep: %{state.out_rep | sent_rows_graph: graph}}} + {:ok, graph, actions, gone_batch} -> + out_rep = %OutRep{state.out_rep | sent_rows_graph: graph} + + {msgs, out_rep} = + case gone_batch do + nil -> {[], out_rep} + {ids, gones} -> prepare_unsubs_data(ids, gones, out_rep) + end - {:ok, graph, actions} -> - state = %{state | out_rep: %{state.out_rep | sent_rows_graph: graph}} - # This returns an ok-tuple or a full replication error - query_move_in_data(actions, state) + with {:ok, state} <- query_move_in_data(actions, %{state | out_rep: out_rep}) do + {:ok, state, msgs} + end _ -> Logger.info( diff --git a/components/electric/lib/electric/satellite/protocol/out_rep.ex b/components/electric/lib/electric/satellite/protocol/out_rep.ex index 0f60a4c2bf..d6cf71464b 100644 --- a/components/electric/lib/electric/satellite/protocol/out_rep.ex +++ b/components/electric/lib/electric/satellite/protocol/out_rep.ex @@ -170,6 +170,10 @@ defmodule Electric.Satellite.Protocol.OutRep do end @spec ack_transactions(t(), non_neg_integer()) :: :error | {:ok, t()} + # Special-case txid = 0 for clients that need to send an ACK message having not seen any transactions + # Use-case: a client has established a subscription, but no additional txns touch their shape. + def ack_transactions(%__MODULE__{} = out, 0), do: {:ok, out} + def ack_transactions( %__MODULE__{unacked_transaction_count: count, unacked_transactions: txn_ids} = out, txn_id diff --git a/components/electric/lib/electric/satellite/serialization.ex b/components/electric/lib/electric/satellite/serialization.ex index 6304910158..e7c9e34830 100644 --- a/components/electric/lib/electric/satellite/serialization.ex +++ b/components/electric/lib/electric/satellite/serialization.ex @@ -379,7 +379,7 @@ defmodule Electric.Satellite.Serialization do """ @spec deserialize_trans(String.t(), %SatOpLog{}, %Transaction{} | nil, cached_relations()) :: { - incomplete :: %Transaction{} | nil, + incomplete :: %Transaction{} | additional_data() | nil, # Complete transactions are send in reverse order complete :: [%Transaction{} | additional_data()] } @@ -392,13 +392,23 @@ defmodule Electric.Satellite.Serialization do deserialize_op_log(origin, op_log, {trans, []}, relations) end + def deserialize_trans( + origin, + %SatOpLog{} = op_log, + {:additional_data, _, _} = data, + relations + ) + when origin !== "" do + deserialize_op_log(origin, op_log, {data, []}, relations) + end + defp deserialize_op_log(origin, %SatOpLog{} = msg, incomplete, relations) do Enum.reduce(msg.ops, incomplete, fn - %SatTransOp{op: {:additional_begin, %SatOpAdditionalBegin{}}}, {nil, complete} -> - {{:additional_data, []}, complete} + %SatTransOp{op: {:additional_begin, %SatOpAdditionalBegin{ref: ref}}}, {nil, complete} -> + {{:additional_data, ref, []}, complete} %SatTransOp{op: {:additional_commit, %SatOpAdditionalCommit{ref: ref}}}, - {{:additional_data, changes}, complete} -> + {{:additional_data, ref, changes}, complete} -> {nil, [{:additional_data, ref, Enum.reverse(changes)} | complete]} %SatTransOp{op: {:begin, %SatOpBegin{} = op}}, {nil, complete} -> @@ -437,8 +447,8 @@ defmodule Electric.Satellite.Serialization do %Transaction{} = trans -> {%Transaction{trans | changes: [change | trans.changes]}, complete} - {:additional_data, changes} -> - {{:additional_data, [change | changes]}, complete} + {:additional_data, ref, changes} -> + {{:additional_data, ref, [change | changes]}, complete} end end) end diff --git a/components/electric/lib/electric/satellite/ws_server.ex b/components/electric/lib/electric/satellite/ws_server.ex index 699624060f..396a56888d 100644 --- a/components/electric/lib/electric/satellite/ws_server.ex +++ b/components/electric/lib/electric/satellite/ws_server.ex @@ -261,10 +261,18 @@ defmodule Electric.Satellite.WebsocketServer do end def handle_info( - {:move_in_query_data, ref, xmin, {graph_updates, changes}, included_txns}, + {:move_in_query_data, ref, xmin, {request_ids, graph_updates, changes}, included_txns}, state ) do - Protocol.move_in_data_received(ref, graph_updates, changes, xmin, included_txns, state) + Protocol.move_in_data_received( + ref, + request_ids, + graph_updates, + changes, + xmin, + included_txns, + state + ) |> push() end diff --git a/components/electric/lib/satellite/protocol_helpers.ex b/components/electric/lib/satellite/protocol_helpers.ex index a8b4b6e992..7178bff8cd 100644 --- a/components/electric/lib/satellite/protocol_helpers.ex +++ b/components/electric/lib/satellite/protocol_helpers.ex @@ -29,7 +29,9 @@ defmodule Satellite.ProtocolHelpers do ) end - def simple_sub_request(tables) when is_list(tables) do + @spec simple_sub_request(atom() | binary() | keyword()) :: + {sub_id :: binary(), req_id :: binary(), %SatSubsReq{}} + def simple_sub_request(tables) do subscription_id = Electric.Utils.uuid4() request_id = Electric.Utils.uuid4() @@ -40,14 +42,15 @@ defmodule Satellite.ProtocolHelpers do %SatShapeReq{ request_id: request_id, shape_definition: %SatShapeDef{ - selects: Enum.map(tables, &unwrap_tables/1) + selects: Enum.map(List.wrap(tables), &unwrap_tables/1) } } ] }} end - defp unwrap_tables(table) when is_binary(table), do: unwrap_tables({table, []}) + defp unwrap_tables(table) when is_binary(table) or is_atom(table), + do: unwrap_tables({table, []}) defp unwrap_tables({table, kw_list}) do base_select = diff --git a/components/electric/test/electric/replication/shapes/sent_rows_graph_test.exs b/components/electric/test/electric/replication/shapes/sent_rows_graph_test.exs new file mode 100644 index 0000000000..104ab925e7 --- /dev/null +++ b/components/electric/test/electric/replication/shapes/sent_rows_graph_test.exs @@ -0,0 +1,5 @@ +defmodule Electric.Replication.Shapes.SentRowsGraphTest do + use ExUnit.Case, async: true + + doctest Electric.Replication.Shapes.SentRowsGraph, import: true +end diff --git a/components/electric/test/electric/satellite/subscriptions_test.exs b/components/electric/test/electric/satellite/subscriptions_test.exs index 9801cda68b..afd3c53d8a 100644 --- a/components/electric/test/electric/satellite/subscriptions_test.exs +++ b/components/electric/test/electric/satellite/subscriptions_test.exs @@ -1,6 +1,7 @@ defmodule Electric.Satellite.SubscriptionsTest do use ExUnit.Case, async: false + alias Electric.Satellite.ClientReconnectionInfo alias Electric.Replication.Changes.UpdatedRecord alias Satellite.ProtocolHelpers alias Electric.Replication.Postgres.Client @@ -447,7 +448,7 @@ defmodule Electric.Satellite.SubscriptionsTest do test "client can subscribe, then unsubscribe to stop streaming any further data", %{conn: pg_conn} = ctx do MockClient.with_connect([auth: ctx, id: ctx.client_id, port: ctx.port], fn conn -> - start_replication_and_assert_response(conn, ctx.electrified_count) + rel_map = start_replication_and_assert_response(conn, ctx.electrified_count) request_id = uuid4() sub_id = uuid4() @@ -469,9 +470,11 @@ defmodule Electric.Satellite.SubscriptionsTest do assert {[request_id], []} == receive_subscription_data(conn, sub_id) + uuid = uuid4() + {:ok, 1} = :epgsql.equery(pg_conn, "INSERT INTO public.users (id, name) VALUES ($1, $2)", [ - uuid4(), + uuid, "Garry" ]) @@ -491,6 +494,8 @@ defmodule Electric.Satellite.SubscriptionsTest do subscription_ids: [sub_id] }) + assert {[^sub_id], _, [%Gone{pk: [^uuid]}]} = receive_unsub_gone_batch(conn, rel_map) + {:ok, 1} = :epgsql.equery(pg_conn, "INSERT INTO public.users (id, name) VALUES ($1, $2)", [ uuid4(), @@ -1349,6 +1354,384 @@ defmodule Electric.Satellite.SubscriptionsTest do refute_received {^conn, %SatOpLog{ops: [%{op: {:additional_begin, _}} | _]}} end) end + + @tag with_sql: """ + INSERT INTO public.users (id, name) VALUES ('#{@john_doe_id}', 'John Doe'); + INSERT INTO public.authored_entries (id, author_id, content) VALUES ('#{@entry_id}', '#{@john_doe_id}', 'Hello world'); + INSERT INTO public.comments (id, entry_id, content, author_id) VALUES ('#{uuid4()}', '#{@entry_id}', 'Comment 1', '#{@john_doe_id}'); + """ + test "The client can unsubscribe and get a `GONE` message list", + %{conn: pg_conn} = ctx do + MockClient.with_connect([auth: ctx, id: ctx.client_id, port: ctx.port], fn conn -> + rel_map = start_replication_and_assert_response(conn, ctx.electrified_count) + + {sub_id, request_id, request} = + ProtocolHelpers.simple_sub_request( + users: [ + include: [ + authored_entries: [over: "author_id"] + ] + ] + ) + + assert {:ok, %SatSubsResp{err: nil}} = + MockClient.make_rpc_call(conn, "subscribe", request) + + assert {[^request_id], data} = receive_subscription_data(conn, sub_id) + + assert [ + %SatOpInsert{row_data: %{values: [@john_doe_id, "John Doe"]}}, + %SatOpInsert{row_data: %{values: [@entry_id, "Hello world", @john_doe_id]}} + ] = data + + {:ok, 1} = + :epgsql.equery( + pg_conn, + "INSERT INTO public.authored_entries (id, author_id, content) VALUES ($1, $2, $3)", + [ + @other_entry_id, + @john_doe_id, + "Second item" + ] + ) + + assert [%NewRecord{record: %{"content" => "Second item"}}] = + receive_txn_changes(conn, rel_map) + + assert {:ok, %SatUnsubsResp{}} = + MockClient.make_rpc_call(conn, "unsubscribe", %SatUnsubsReq{ + subscription_ids: [sub_id] + }) + + assert {[^sub_id], _, data} = receive_unsub_gone_batch(conn, rel_map) + + assert MapSet.new([ + %Gone{relation: {"public", "users"}, pk: [@john_doe_id]}, + %Gone{relation: {"public", "authored_entries"}, pk: [@entry_id]}, + %Gone{relation: {"public", "authored_entries"}, pk: [@other_entry_id]} + ]) == MapSet.new(data) + end) + end + + @tag with_sql: """ + INSERT INTO public.users (id, name) VALUES ('#{@john_doe_id}', 'John Doe'); + INSERT INTO public.authored_entries (id, author_id, content) VALUES ('#{@entry_id}', '#{@john_doe_id}', 'Hello world'); + INSERT INTO public.comments (id, entry_id, content, author_id) VALUES ('#{uuid4()}', '#{@entry_id}', 'Comment 1', '#{@john_doe_id}'); + """ + test "Unsubscribe from one shape keeps rows that are in others", %{conn: pg_conn} = ctx do + MockClient.with_connect([auth: ctx, id: ctx.client_id, port: ctx.port], fn conn -> + rel_map = start_replication_and_assert_response(conn, ctx.electrified_count) + + {sub_id, request_id, request} = + ProtocolHelpers.simple_sub_request( + users: [ + include: [ + authored_entries: [over: "author_id"] + ] + ] + ) + + assert {:ok, %SatSubsResp{err: nil}} = + MockClient.make_rpc_call(conn, "subscribe", request) + + assert {[^request_id], data} = receive_subscription_data(conn, sub_id) + + assert [ + %SatOpInsert{row_data: %{values: [@john_doe_id, "John Doe"]}}, + %SatOpInsert{row_data: %{values: [@entry_id, "Hello world", @john_doe_id]}} + ] = data + + {sub_id2, request_id2, request} = ProtocolHelpers.simple_sub_request("users") + + assert {:ok, %SatSubsResp{err: nil}} = + MockClient.make_rpc_call(conn, "subscribe", request) + + assert {[^request_id2], []} = receive_subscription_data(conn, sub_id2) + + assert {:ok, %SatUnsubsResp{}} = + MockClient.make_rpc_call(conn, "unsubscribe", %SatUnsubsReq{ + subscription_ids: [sub_id] + }) + + assert {[^sub_id], _, data} = receive_unsub_gone_batch(conn, rel_map) + + # We should see only the authored entry gone, because subscription 2 keeps a reference to the user. + assert [%Gone{relation: {"public", "authored_entries"}, pk: [@entry_id]}] == data + + # And check that the graph has been updated and updates come through correctly + + Client.with_transaction(pg_conn, fn tx_conn -> + {:ok, 1} = + :epgsql.equery( + tx_conn, + "UPDATE public.users SET name = $2 WHERE id = $1", + [@john_doe_id, "Johnny Doe"] + ) + + {:ok, 1} = + :epgsql.equery( + tx_conn, + "UPDATE public.authored_entries SET content = $2 WHERE id = $1", + [@entry_id, "Updated"] + ) + end) + + assert [%UpdatedRecord{record: %{"name" => "Johnny Doe"}}] = + receive_txn_changes(conn, rel_map) + end) + end + + @tag with_sql: """ + INSERT INTO public.users (id, name) VALUES ('#{@john_doe_id}', 'John Doe'); + """ + test "Unsub + reconnect: doesn't resend a GONE batch if acknowledged on reconnect", ctx do + {last_lsn, rel_map, sub_id} = + MockClient.with_connect([auth: ctx, id: ctx.client_id, port: ctx.port], fn conn -> + rel_map = start_replication_and_assert_response(conn, ctx.electrified_count) + + {sub_id, [%NewRecord{record: %{"id" => @john_doe_id}}]} = + simple_subscribe(conn, rel_map, :users) + + {:ok, %SatUnsubsResp{}} = + MockClient.make_rpc_call(conn, "unsubscribe", %SatUnsubsReq{ + subscription_ids: [sub_id] + }) + + {[^sub_id], lsn, [%Gone{pk: [@john_doe_id]}]} = receive_unsub_gone_batch(conn, rel_map) + {lsn, rel_map, sub_id} + end) + + MockClient.with_connect([auth: ctx, id: ctx.client_id, port: ctx.port], fn conn -> + # A reconnect at precisely the LSN of a GONE batch assumes + filled field assumes it was seen + assert {:ok, _} = + MockClient.make_rpc_call(conn, "startReplication", %SatInStartReplicationReq{ + lsn: last_lsn, + observed_gone_batch: [sub_id] + }) + + assert {_, [%NewRecord{record: %{"id" => @john_doe_id}}]} = + simple_subscribe(conn, rel_map, :users) + + refute_received {^conn, %SatUnsubsDataBegin{}} + end) + end + + @tag with_sql: "INSERT INTO public.users (id, name) VALUES ('#{@john_doe_id}', 'John Doe')" + test "Unsub + reconnect: reconnection before GONE batch with sub id acts as if unsub never happened", + ctx do + {last_lsn, rel_map, sub_id} = + MockClient.with_connect([auth: ctx, id: ctx.client_id, port: ctx.port], fn conn -> + rel_map = start_replication_and_assert_response(conn, ctx.electrified_count) + + {sub_id, [%NewRecord{record: %{"id" => @john_doe_id}}]} = + simple_subscribe(conn, rel_map, :users) + + {:ok, %SatUnsubsResp{}} = + MockClient.make_rpc_call(conn, "unsubscribe", %SatUnsubsReq{ + subscription_ids: [sub_id] + }) + + {[^sub_id], lsn, [%Gone{pk: [@john_doe_id]}]} = receive_unsub_gone_batch(conn, rel_map) + {lsn, rel_map, sub_id} + end) + + MockClient.with_connect([auth: ctx, id: ctx.client_id, port: ctx.port], fn conn -> + # A reconnect with same sub ID you unsubbed from is still allowed before the acknowledge point + assert {:ok, %{err: nil}} = + MockClient.make_rpc_call(conn, "startReplication", %SatInStartReplicationReq{ + lsn: last_lsn, + subscription_ids: [sub_id] + }) + + {_sub_id, []} = + simple_subscribe(conn, rel_map, :users) + + refute_received {^conn, %SatUnsubsDataBegin{}} + end) + end + + @tag with_sql: "INSERT INTO public.users (id, name) VALUES ('#{@john_doe_id}', 'John Doe')" + test "Unsub + reconnect: send a GONE batch if reconnected before unsub but without this subscription", + ctx do + {last_lsn, rel_map, sub_id} = + MockClient.with_connect([auth: ctx, id: ctx.client_id, port: ctx.port], fn conn -> + rel_map = start_replication_and_assert_response(conn, ctx.electrified_count) + + {sub_id, [%NewRecord{record: %{"id" => @john_doe_id}}]} = + simple_subscribe(conn, rel_map, :users) + + {lsn, [%Gone{pk: [@john_doe_id]}]} = simple_unsub(conn, rel_map, sub_id) + {lsn, rel_map, sub_id} + end) + + MockClient.with_connect([auth: ctx, id: ctx.client_id, port: ctx.port], fn conn -> + # A reconnect at precisely the LSN of a GONE batch and without filled field assumes GONE batch was not seen + assert {:ok, %{err: nil}} = + MockClient.make_rpc_call(conn, "startReplication", %SatInStartReplicationReq{ + lsn: last_lsn + }) + + {[^sub_id], _, [%Gone{pk: [@john_doe_id]}]} = receive_unsub_gone_batch(conn, rel_map) + end) + end + + @tag with_sql: "INSERT INTO public.users (id, name) VALUES ('#{@john_doe_id}', 'John Doe')" + test "Unsub + reconnect: reconnection after a gone batch & without ID assumes it seen", ctx do + {last_lsn, rel_map} = + MockClient.with_connect([auth: ctx, id: ctx.client_id, port: ctx.port], fn conn -> + rel_map = start_replication_and_assert_response(conn, ctx.electrified_count) + + {sub_id, [%NewRecord{record: %{"id" => @john_doe_id}}]} = + simple_subscribe(conn, rel_map, :users) + + {_lsn, [%Gone{pk: [@john_doe_id]}]} = simple_unsub(conn, rel_map, sub_id) + + # Get a valid LSN after the unsub point + {_, []} = simple_subscribe(conn, rel_map, :my_entries) + + {:ok, _} = + :epgsql.squery( + ctx.conn, + "INSERT INTO public.my_entries (id, content) VALUES (gen_random_uuid(), 'test')" + ) + + assert %{lsn: lsn, changes: [%{relation: {_, "my_entries"}}]} = + receive_txn(conn, rel_map) + + {lsn, rel_map} + end) + + MockClient.with_connect([auth: ctx, id: ctx.client_id, port: ctx.port], fn conn -> + # A reconnect at an LSN deserialize_op_log than the GONE batch assumes it was seen + assert {:ok, %{err: nil}} = + MockClient.make_rpc_call(conn, "startReplication", %SatInStartReplicationReq{ + lsn: last_lsn + }) + + {_, [%NewRecord{record: %{"id" => @john_doe_id}}]} = + simple_subscribe(conn, rel_map, :users) + + refute_received {^conn, %SatUnsubsDataBegin{}} + end) + end + + @tag with_sql: "INSERT INTO public.users (id, name) VALUES ('#{@john_doe_id}', 'John Doe')" + test "Unsub + reconnect: can't reconnect with a subscription id at a point after a GONE batch", + ctx do + {last_lsn, _, sub_id} = + MockClient.with_connect([auth: ctx, id: ctx.client_id, port: ctx.port], fn conn -> + rel_map = start_replication_and_assert_response(conn, ctx.electrified_count) + + {first_sub_id, [%NewRecord{record: %{"id" => @john_doe_id}}]} = + simple_subscribe(conn, rel_map, :users) + + {_lsn, [%Gone{pk: [@john_doe_id]}]} = simple_unsub(conn, rel_map, first_sub_id) + + # Get a valid LSN after the unsub point + {_, []} = simple_subscribe(conn, rel_map, :my_entries) + + {:ok, _} = + :epgsql.squery( + ctx.conn, + "INSERT INTO public.my_entries (id, content) VALUES (gen_random_uuid(), 'test')" + ) + + assert %{lsn: lsn, changes: [%{relation: {_, "my_entries"}}]} = + receive_txn(conn, rel_map) + + {lsn, rel_map, first_sub_id} + end) + + MockClient.with_connect([auth: ctx, id: ctx.client_id, port: ctx.port], fn conn -> + # A reconnect at an LSN deserialize_op_log than the GONE batch assumes it was seen + assert {:ok, %{err: %{code: :SUBSCRIPTION_NOT_FOUND}}} = + MockClient.make_rpc_call(conn, "startReplication", %SatInStartReplicationReq{ + lsn: last_lsn, + subscription_ids: [sub_id] + }) + end) + end + + @tag with_sql: "INSERT INTO public.users (id, name) VALUES ('#{@john_doe_id}', 'John Doe')" + test "Unsub + reconnect: can't reconnect with a subscription id at a point after a GONE batch is acknowledged", + ctx do + {last_lsn, sub_id} = + MockClient.with_connect([auth: ctx, id: ctx.client_id, port: ctx.port], fn conn -> + rel_map = start_replication_and_assert_response(conn, ctx.electrified_count) + + {sub_id, [%NewRecord{record: %{"id" => @john_doe_id}}]} = + simple_subscribe(conn, rel_map, :users) + + {lsn, [%Gone{pk: [@john_doe_id]}]} = simple_unsub(conn, rel_map, sub_id) + + MockClient.send_data(conn, %SatOpLogAck{lsn: lsn, gone_subscription_ids: [sub_id]}) + + {lsn, sub_id} + end) + + MockClient.with_connect([auth: ctx, id: ctx.client_id, port: ctx.port], fn conn -> + # Should fail, because it's after the acknowledgement point of unsub, so sub id is not valid + assert {:ok, %{err: %{code: :SUBSCRIPTION_NOT_FOUND}}} = + MockClient.make_rpc_call(conn, "startReplication", %SatInStartReplicationReq{ + lsn: last_lsn, + subscription_ids: [sub_id] + }) + end) + end + + @tag with_sql: "INSERT INTO public.users (id, name) VALUES ('#{@john_doe_id}', 'John Doe')" + test "Unsub + reconnect: unsubscribe points are cached", + ctx do + {last_lsn, rel_map, sub_id} = + MockClient.with_connect([auth: ctx, id: ctx.client_id, port: ctx.port], fn conn -> + rel_map = start_replication_and_assert_response(conn, ctx.electrified_count) + + {sub_id, [%NewRecord{record: %{"id" => @john_doe_id}}]} = + simple_subscribe(conn, rel_map, :users) + + {lsn, [%Gone{pk: [@john_doe_id]}]} = simple_unsub(conn, rel_map, sub_id) + {lsn, rel_map, sub_id} + end) + + # Clear ETS cache to force DB reload + ClientReconnectionInfo.clear_all_ets_data(ctx.client_id) + + MockClient.with_connect([auth: ctx, id: ctx.client_id, port: ctx.port], fn conn -> + # A reconnect at precisely the LSN of a GONE batch and without filled field assumes GONE batch was not seen + assert {:ok, %{err: nil}} = + MockClient.make_rpc_call(conn, "startReplication", %SatInStartReplicationReq{ + lsn: last_lsn + }) + + {[^sub_id], _, [%Gone{pk: [@john_doe_id]}]} = receive_unsub_gone_batch(conn, rel_map) + end) + end + end + + defp simple_subscribe(conn, rel_map, shape) do + {sub_id, request_id, request} = ProtocolHelpers.simple_sub_request(shape) + + assert {:ok, %SatSubsResp{err: nil}} = + MockClient.make_rpc_call(conn, "subscribe", request) + + {[^request_id], data} = + receive_subscription_data(conn, sub_id, relations: rel_map) + + {sub_id, data} + end + + defp simple_unsub(conn, rel_map, id_or_ids) do + ids = List.wrap(id_or_ids) + + assert {:ok, %SatUnsubsResp{}} = + MockClient.make_rpc_call(conn, "unsubscribe", %SatUnsubsReq{ + subscription_ids: ids + }) + + {received_ids, lsn, msgs} = receive_unsub_gone_batch(conn, rel_map) + assert Enum.sort(ids) == Enum.sort(received_ids) + + {lsn, msgs} end # Here we intentionally set display settings to unsupported values on the database, so that @@ -1365,4 +1748,12 @@ defmodule Electric.Satellite.SubscriptionsTest do """ ) end + + def flush() do + receive do + _ -> flush() + after + 0 -> :ok + end + end end diff --git a/components/electric/test/electric/satellite/ws_server_test.exs b/components/electric/test/electric/satellite/ws_server_test.exs index 572e55eacb..c604027536 100644 --- a/components/electric/test/electric/satellite/ws_server_test.exs +++ b/components/electric/test/electric/satellite/ws_server_test.exs @@ -1,6 +1,7 @@ defmodule Electric.Satellite.WebsocketServerTest do use ExUnit.Case, async: false + alias Satellite.ProtocolHelpers alias Electric.Replication.Changes.ReferencedRecord use Electric.Satellite.Protobuf @@ -17,6 +18,7 @@ defmodule Electric.Satellite.WebsocketServerTest do alias Electric.Replication.Changes.{ NewRecord, + UpdatedRecord, Transaction } @@ -36,6 +38,11 @@ defmodule Electric.Satellite.WebsocketServerTest do "CREATE TABLE public.test1 (id TEXT PRIMARY KEY, electric_user_id VARCHAR(64), content VARCHAR(64))", "CREATE TABLE public.test_child (id TEXT PRIMARY KEY, electric_user_id VARCHAR(64), parent_id TEXT REFERENCES test1 (id))" ]} + @test_shape_migration {"20230101", + [ + "CREATE TABLE public.test1 (id TEXT PRIMARY KEY, content VARCHAR(64))", + "CREATE TABLE public.test_child (id TEXT PRIMARY KEY, parent_id TEXT NOT NULL REFERENCES test1 (id), some_flag BOOLEAN NOT NULL)" + ]} @current_wal_pos 1 @@ -53,6 +60,8 @@ defmodule Electric.Satellite.WebsocketServerTest do end setup ctx do + test_pid = self() + ctx = ctx |> Map.update( @@ -60,6 +69,13 @@ defmodule Electric.Satellite.WebsocketServerTest do &mock_data_function/2, fn {name, opts} -> &apply(__MODULE__, name, [&1, &2, opts]) end ) + |> Map.put_new(:move_in_data_fun, {:mock_move_in_data_fn, []}) + |> Map.update!( + :move_in_data_fun, + fn {name, opts} -> + &apply(__MODULE__, name, [&1, &2, &3, &4, [{:test_pid, test_pid} | opts]]) + end + ) |> Map.put_new(:allowed_unacked_txs, 30) connector_config = [origin: "test-origin", connection: []] @@ -70,6 +86,7 @@ defmodule Electric.Satellite.WebsocketServerTest do auth_provider: Auth.provider(), connector_config: connector_config, subscription_data_fun: ctx.subscription_data_fun, + move_in_data_fun: ctx.move_in_data_fun, allowed_unacked_txs: ctx.allowed_unacked_txs} start_link_supervised!({Bandit, port: port, plug: plug}) @@ -769,6 +786,176 @@ defmodule Electric.Satellite.WebsocketServerTest do refute_receive {^conn, %SatOpLog{ops: [_, _, _]}} end) end + + @tag with_migrations: [@test_shape_migration] + test "move-in after an unsubscribe should not contain rows for the subscription that's gone", + ctx do + with_connect([port: ctx.port, auth: ctx, id: ctx.client_id], fn conn -> + rel_map = start_replication_and_assert_response(conn, 2) + + [{client_name, _client_pid}] = active_clients() + mocked_producer = Producer.name(client_name) + + ## Establish 2 subscriptions that intersect on the root, but differ in children + + {sub_id1, req_id, req} = + ProtocolHelpers.simple_sub_request( + test1: [ + where: "this.content ILIKE 's%'", + include: [test_child: [over: "parent_id", where: "this.some_flag"]] + ] + ) + + assert {:ok, %SatSubsResp{subscription_id: ^sub_id1, err: nil}} = + MockClient.make_rpc_call(conn, "subscribe", req) + + # The real data function would have made a magic write which we're emulating here + DownstreamProducerMock.produce(mocked_producer, build_events([], 1)) + assert {[^req_id], []} = receive_subscription_data(conn, sub_id1) + + {sub_id2, req_id, req} = + ProtocolHelpers.simple_sub_request( + test1: [ + where: "this.content ILIKE 's%'", + include: [test_child: [over: "parent_id", where: "not this.some_flag"]] + ] + ) + + assert {:ok, %SatSubsResp{subscription_id: ^sub_id2, err: nil}} = + MockClient.make_rpc_call(conn, "subscribe", req) + + # The real data function would have made a magic write which we're emulating here + DownstreamProducerMock.produce(mocked_producer, build_events([], 2)) + assert {[^req_id], []} = receive_subscription_data(conn, sub_id2) + + ## A transaction that causes a move-in (client needs to see `test_child` rows) + DownstreamProducerMock.produce( + mocked_producer, + %Changes.Transaction{ + lsn: 3, + xid: 3, + commit_timestamp: DateTime.utc_now(), + changes: [ + %UpdatedRecord{ + relation: {"public", "test1"}, + record: %{ + "id" => "parent_1", + "content" => "super", + "parent_id" => "1" + } + } + ], + origin: ctx.client_id + } + |> then(&[{&1, &1.lsn}]) + ) + + # Intercept the move-in function to supply data + assert_receive {:mock_move_in, {mock_pid, mock_ref}, 3, sq_map} + + child_layers = sq_map |> Map.keys() |> Enum.flat_map(& &1.next_layers) + sub1_layer = Enum.find(child_layers, &(&1.where_target.query == "this.some_flag")) + sub2_layer = Enum.find(child_layers, &(&1.where_target.query == "not this.some_flag")) + + xmin = 5 + + graph = + Graph.new() + |> Graph.add_edge( + {{"public", "test1"}, ["parent_1"]}, + {{"public", "test_child"}, ["child_1"]}, + label: sub1_layer.key + ) + |> Graph.add_edge( + {{"public", "test1"}, ["parent_1"]}, + {{"public", "test_child"}, ["child_2"]}, + label: sub2_layer.key + ) + + data = %{ + {{"public", "test_child"}, ["child_1"]} => { + %NewRecord{ + relation: {"public", "test_child"}, + record: %{"id" => "child_1", "parent_id" => "parent_1", "some_flag" => "t"}, + tags: [] + }, + [sub1_layer.request_id] + }, + {{"public", "test_child"}, ["child_2"]} => { + %NewRecord{ + relation: {"public", "test_child"}, + record: %{"id" => "child_2", "parent_id" => "parent_1", "some_flag" => "f"}, + tags: [] + }, + [sub2_layer.request_id] + } + } + + send(mock_pid, {:mock_move_in_data, mock_ref, {xmin, graph, data}}) + + assert %{additional_data_ref: ref, changes: [%NewRecord{record: %{"id" => "parent_1"}}]} = + receive_txn(conn, rel_map) + + ## Before the query is fulfilled, we unsubscribe from one of the shapes. + assert {:ok, _} = + MockClient.make_rpc_call(conn, "unsubscribe", %SatUnsubsReq{ + subscription_ids: [sub_id2] + }) + + {[^sub_id2], _, []} = receive_unsub_gone_batch(conn, rel_map) + + ## And then the data arrives + DownstreamProducerMock.produce(mocked_producer, build_events([], 6)) + send(mock_pid, {:mock_move_in_trigger, mock_ref}) + + ## And we see only a row that's in the subscription that's still live + {^ref, [%{record: %{"id" => "child_1", "some_flag" => "t"}}]} = + receive_additional_changes(conn, rel_map) + + ## And further changes to the other child are not propagated + DownstreamProducerMock.produce( + mocked_producer, + %Changes.Transaction{ + lsn: 7, + xid: 7, + commit_timestamp: DateTime.utc_now(), + changes: [ + %UpdatedRecord{ + relation: {"public", "test_child"}, + old_record: %{ + "id" => "child_1", + "parent_id" => "parent_1", + "some_flag" => "t" + }, + record: %{ + "id" => "child_1", + "parent_id" => "parent_1", + "some_flag" => "t" + } + }, + %UpdatedRecord{ + relation: {"public", "test_child"}, + old_record: %{ + "id" => "child_2", + "parent_id" => "parent_1", + "some_flag" => "f" + }, + record: %{ + "id" => "child_2", + "parent_id" => "parent_1", + "some_flag" => "f" + } + } + ], + origin: ctx.client_id + } + |> then(&[{&1, &1.lsn}]) + ) + + assert %{changes: [%UpdatedRecord{record: %{"id" => "child_1"}}]} = + receive_txn(conn, rel_map) + end) + end end describe "Incoming replication (Satellite -> PG)" do @@ -892,6 +1079,38 @@ defmodule Electric.Satellite.WebsocketServerTest do ) end + def mock_move_in_data_fn( + move_in_ref, + {subquery_map, affected_txs}, + _context, + [reply_to: {ref, pid}, connection: _], + opts \\ [] + ) do + test_pid = Keyword.fetch!(opts, :test_pid) + test_ref = make_ref() + + send(test_pid, {:mock_move_in, {self(), test_ref}, move_in_ref, subquery_map}) + + {insertion_point, graph_updates, changes} = + receive do + {:mock_move_in_data, ^test_ref, value} -> + value + end + + send(pid, {:data_insertion_point, ref, insertion_point}) + + request_ids = MapSet.new(Map.keys(subquery_map), & &1.request_id) + + receive do + {:mock_move_in_trigger, ^test_ref} -> + send( + pid, + {:move_in_query_data, move_in_ref, insertion_point, + {request_ids, graph_updates, changes}, affected_txs} + ) + end + end + def clean_connections() do :ok = drain_pids(active_clients()) :ok = drain_active_resources(connectors()) diff --git a/components/electric/test/support/satellite_helpers.ex b/components/electric/test/support/satellite_helpers.ex index b9c59a3873..20b773997f 100644 --- a/components/electric/test/support/satellite_helpers.ex +++ b/components/electric/test/support/satellite_helpers.ex @@ -1,4 +1,5 @@ defmodule ElectricTest.SatelliteHelpers do + alias Electric.Replication.Changes.Gone alias Electric.Replication.Changes.Transaction alias Electric.Satellite.Serialization use Electric.Satellite.Protobuf @@ -162,6 +163,84 @@ defmodule ElectricTest.SatelliteHelpers do :ok end + @doc """ + Wait for and receive a batch of `GONE` messages after an unsubscribe + + Waits for the `SatSubsDataBegin` message, then for each shape data, then for the end message, + and verifies their order. Returns a tuple, with first element being all the mentioned request IDs, and the second being all the data. + """ + @spec receive_unsub_gone_batch(term(), String.t(), [ + {:timeout, non_neg_integer()} | {:expecting_lsn, String.t()} + ]) :: {[String.t(), ...], String.t(), [Gone.t()]} + def receive_unsub_gone_batch(conn, relation_map, opts \\ []) do + # TODO: Addition of shapes complicated initial data sending for multiple requests due to records + # fulfilling multiple requests so we're "cheating" here while the client doesn't care by + # sending all but one "request data" messages empty, and stuffing entire response into the first one. + # See paired comment in `Electric.Satellite.Protocol.handle_subscription_data/3` + first_message_timeout = Keyword.get(opts, :timeout, 1000) + + receive do + {^conn, %SatUnsubsDataBegin{subscription_ids: subscription_ids, lsn: received_lsn}} -> + case Keyword.fetch(opts, :expecting_lsn) do + {:ok, expected_lsn} -> assert expected_lsn == received_lsn + _ -> nil + end + + result = + receive_rest_of_unsub_data(conn, []) + |> assert_unsub_data_format(relation_map) + + {subscription_ids, received_lsn, result} + after + first_message_timeout -> + {:messages, messages} = :erlang.process_info(self(), :messages) + + flunk( + "Timed out waiting for #{inspect(%SatUnsubsDataBegin{})} after #{first_message_timeout} ms.\n\nCurrent messages: #{inspect(messages, pretty: true)}" + ) + end + end + + defp receive_rest_of_unsub_data(conn, acc) do + receive do + {^conn, %SatUnsubsDataEnd{}} -> + Enum.reverse(acc) + + {_, %type{} = msg} when type in [SatOpLog] -> + receive_rest_of_unsub_data(conn, [msg | acc]) + after + 100 -> + flunk( + "Timeout while waiting for message sequence responding to an unsubscribe, received:\n#{inspect(acc, pretty: true)}" + ) + end + end + + defp assert_unsub_data_format(messages, relation_map) do + Enum.each(messages, fn op -> assert %SatOpLog{} = op end) + + fake_begin = %SatOpLog{ops: [%SatTransOp{op: {:additional_begin, %SatOpAdditionalBegin{}}}]} + + fake_commit = %SatOpLog{ + ops: [%SatTransOp{op: {:additional_commit, %SatOpAdditionalCommit{}}}] + } + + {incomplete, []} = + Serialization.deserialize_trans("postgres_1", fake_begin, nil, relation_map) + + {incomplete, []} = + Enum.reduce( + messages, + {incomplete, []}, + &Serialization.deserialize_trans("postgres_1", &1, elem(&2, 0), relation_map) + ) + + {nil, [{:additional_data, _, changes}]} = + Serialization.deserialize_trans("postgres_1", fake_commit, incomplete, relation_map) + + changes |> Enum.sort_by(&{&1.relation, &1.pk}) + end + @doc """ Wait for and receives subscription data response as sent back to the test process by `Satellite.TestWsClient`. @@ -169,8 +248,11 @@ defmodule ElectricTest.SatelliteHelpers do and verifies their order. Returns a tuple, with first element being all the mentioned request IDs, and the second being all the data. """ @spec receive_subscription_data(term(), String.t(), [ - {:timeout, non_neg_integer()} | {:expecting_lsn, String.t()} | {:returning_lsn, true} - ]) :: {[String.t()], [%SatOpInsert{}]} + {:timeout, non_neg_integer()} + | {:expecting_lsn, String.t()} + | {:returning_lsn, true} + | {:relations, cached_rels()} + ]) :: {request_ids :: [String.t()], data :: [%SatOpInsert{}]} def receive_subscription_data(conn, subscription_id, opts \\ []) do # TODO: Addition of shapes complicated initial data sending for multiple requests due to records # fulfilling multiple requests so we're "cheating" here while the client doesn't care by @@ -188,6 +270,7 @@ defmodule ElectricTest.SatelliteHelpers do result = receive_rest_of_subscription_data(conn, []) |> assert_subscription_data_format({[], []}) + |> maybe_unwrap_subscription_data(Keyword.get(opts, :relations)) if Keyword.has_key?(opts, :returning_lsn), do: {received_lsn, result}, else: result after @@ -236,4 +319,21 @@ defmodule ElectricTest.SatelliteHelpers do assert_subscription_data_format(messages, {[id | ids], data ++ oplogs}) end + + defp maybe_unwrap_subscription_data(results, nil), do: results + + defp maybe_unwrap_subscription_data({ids, inserts}, rel_map) do + oplog = + %SatOpLog{ + ops: + [%SatTransOp{op: {:additional_begin, %SatOpAdditionalBegin{ref: nil}}}] ++ + Enum.map(inserts, &%SatTransOp{op: {:insert, &1}}) ++ + [%SatTransOp{op: {:additional_commit, %SatOpAdditionalCommit{ref: nil}}}] + } + + {nil, [{_, _, changes}]} = + Serialization.deserialize_trans("postgres_1", oplog, nil, rel_map) + + {ids, changes} + end end diff --git a/protocol/satellite.proto b/protocol/satellite.proto index a9057096dd..05ce561251 100644 --- a/protocol/satellite.proto +++ b/protocol/satellite.proto @@ -128,14 +128,14 @@ message SatInStartReplicationReq { // the subscriptions identifiers the client wants to resume subscription repeated string subscription_ids = 4; - + // The version of the most recent migration seen by the client. optional string schema_version = 5; - + /** - * List of transaction IDs for which the client - * observed additional data before disconnect - */ + * List of transaction IDs for which the client + * observed additional data before disconnect + */ repeated uint64 observed_transaction_data = 6; /** @@ -150,6 +150,9 @@ message SatInStartReplicationReq { // server. The server is responsible to keep/gc the remaining subscriptions. // - the server must ensure that if the client skips a portion of the replication // stream, the client is not able to read data it no longer has access to. + + /** List of subscription IDs for which the client observed a GONE batch after unsubscribing */ + repeated string observed_gone_batch = 8; } // (Producer) The result of the start replication requests @@ -255,6 +258,8 @@ message SatOpLogAck { repeated string subscription_ids = 4; /** Transaction IDs for which additional data was received immediately after this transaction */ repeated uint64 additional_data_source_ids = 5; + /** Subscription IDs for GONE batches received at this LSN */ + repeated string gone_subscription_ids = 6; } // (Producer) Single operation, should be only send as part of the SatOplog @@ -647,6 +652,19 @@ message SatSubsDataBegin { message SatSubsDataEnd { } +// Begin delimiter for the incoming subscription data +message SatUnsubsDataBegin { + // Identifier of the subscriptions that were handled as unsubbed + repeated string subscription_ids = 1; + // LSN at which this data is being sent. May be a duplicate of a transaction that was sent immediately before. + bytes lsn = 2; +} + +// End delimiter for the incoming subscription data +message SatUnsubsDataEnd { +} + + // Begin delimiter for the initial shape data message SatShapeDataBegin { // Identifier of the request