diff --git a/.changeset/cold-items-explain.md b/.changeset/cold-items-explain.md new file mode 100644 index 000000000..78baa7bfc --- /dev/null +++ b/.changeset/cold-items-explain.md @@ -0,0 +1,5 @@ +--- +'@powersync/service-module-mongodb': patch +--- + +Fix diagnostics schema authorization issues for MongoDB diff --git a/.changeset/fifty-dogs-reply.md b/.changeset/fifty-dogs-reply.md new file mode 100644 index 000000000..d19c4b701 --- /dev/null +++ b/.changeset/fifty-dogs-reply.md @@ -0,0 +1,5 @@ +--- +'@powersync/service-module-mongodb': minor +--- + +Reduce permissions required for replicating a single mongodb database diff --git a/.changeset/gentle-icons-try.md b/.changeset/gentle-icons-try.md new file mode 100644 index 000000000..103672131 --- /dev/null +++ b/.changeset/gentle-icons-try.md @@ -0,0 +1,5 @@ +--- +'@powersync/service-module-mysql': patch +--- + +Fixed MySQL version checking to better handle non-semantic version strings diff --git a/.changeset/green-peas-roll.md b/.changeset/green-peas-roll.md new file mode 100644 index 000000000..61762c082 --- /dev/null +++ b/.changeset/green-peas-roll.md @@ -0,0 +1,6 @@ +--- +'@powersync/service-module-mongodb': minor +'@powersync/service-image': minor +--- + +Add MongoDB support (Alpha) diff --git a/.changeset/healthy-rules-arrive.md b/.changeset/healthy-rules-arrive.md new file mode 100644 index 000000000..a64c34b1b --- /dev/null +++ b/.changeset/healthy-rules-arrive.md @@ -0,0 +1,5 @@ +--- +'@powersync/service-module-mysql': patch +--- + +Fixed mysql schema json parsing diff --git a/.changeset/heavy-shirts-chew.md b/.changeset/heavy-shirts-chew.md new file mode 100644 index 000000000..9bcb5e662 --- /dev/null +++ b/.changeset/heavy-shirts-chew.md @@ -0,0 +1,5 @@ +--- +'@powersync/service-core': patch +--- + +Improved sync rules storage cached parsed sync rules, accommodating different parsing options where necessary. diff --git a/.changeset/lemon-terms-play.md b/.changeset/lemon-terms-play.md new file mode 100644 index 000000000..218d1ebff --- /dev/null +++ b/.changeset/lemon-terms-play.md @@ -0,0 +1,8 @@ +--- +'@powersync/service-module-mysql': minor +--- + +Generate random serverId based on syncrule id for MySQL replication client +Consolidated type mappings between snapshot and replicated values +Enabled MySQL tests in CI + diff --git a/.changeset/olive-spoons-stare.md b/.changeset/olive-spoons-stare.md new file mode 100644 index 000000000..df58ecaf4 --- /dev/null +++ b/.changeset/olive-spoons-stare.md @@ -0,0 +1,5 @@ +--- +'@powersync/service-core': patch +--- + +Moved tag variable initialization in diagnostics route to ensure it is initialized before usage diff --git a/.changeset/orange-eagles-tap.md b/.changeset/orange-eagles-tap.md new file mode 100644 index 000000000..7a21eec33 --- /dev/null +++ b/.changeset/orange-eagles-tap.md @@ -0,0 +1,5 @@ +--- +'@powersync/lib-services-framework': minor +--- + +Added disposable listeners and observers diff --git a/.changeset/popular-snails-cough.md b/.changeset/popular-snails-cough.md new file mode 100644 index 000000000..5dc9e2d4d --- /dev/null +++ b/.changeset/popular-snails-cough.md @@ -0,0 +1,6 @@ +--- +'@powersync/service-core': minor +'@powersync/service-sync-rules': minor +--- + +Added ability to emit data replication events diff --git a/.changeset/rotten-pumas-protect.md b/.changeset/rotten-pumas-protect.md new file mode 100644 index 000000000..65fc44e61 --- /dev/null +++ b/.changeset/rotten-pumas-protect.md @@ -0,0 +1,9 @@ +--- +'@powersync/service-core': minor +'@powersync/service-module-mysql': minor +'@powersync/service-sync-rules': minor +--- + +Introduced alpha support for MySQL as a datasource for replication. +Bunch of cleanup + diff --git a/.changeset/slow-stingrays-kiss.md b/.changeset/slow-stingrays-kiss.md new file mode 100644 index 000000000..a93126de7 --- /dev/null +++ b/.changeset/slow-stingrays-kiss.md @@ -0,0 +1,5 @@ +--- +'@powersync/service-core': minor +--- + +Moved Write Checkpoint APIs to SyncBucketStorage diff --git a/.changeset/sour-turkeys-collect.md b/.changeset/sour-turkeys-collect.md new file mode 100644 index 000000000..d9bc279fb --- /dev/null +++ b/.changeset/sour-turkeys-collect.md @@ -0,0 +1,7 @@ +--- +'@powersync/service-module-postgres': patch +'@powersync/service-rsocket-router': patch +'@powersync/service-types': patch +--- + +Updates from Replication events changes diff --git a/.changeset/tender-vans-impress.md b/.changeset/tender-vans-impress.md new file mode 100644 index 000000000..106960369 --- /dev/null +++ b/.changeset/tender-vans-impress.md @@ -0,0 +1,16 @@ +--- +'@powersync/service-core': minor +'@powersync/service-sync-rules': minor +'@powersync/lib-services-framework': minor +'@powersync/service-jpgwire': minor +'@powersync/service-types': minor +'@powersync/service-image': major +'@powersync/service-module-postgres': patch +--- + +- Introduced modules to the powersync service architecture + - Core functionality has been moved to "engine" classes. Modules can register additional functionality with these engines. + - The sync API functionality used by the routes has been abstracted to an interface. API routes are now managed by the RouterEngine. + - Replication is managed by the ReplicationEngine and new replication data sources can be registered to the engine by modules. +- Refactored existing Postgres replication as a module. +- Removed Postgres specific code from the core service packages. diff --git a/.changeset/violet-garlics-know.md b/.changeset/violet-garlics-know.md new file mode 100644 index 000000000..cb973e611 --- /dev/null +++ b/.changeset/violet-garlics-know.md @@ -0,0 +1,5 @@ +--- +'@powersync/service-sync-rules': minor +--- + +Support json_each as a table-valued function. diff --git a/.changeset/weak-cats-hug.md b/.changeset/weak-cats-hug.md new file mode 100644 index 000000000..df152386e --- /dev/null +++ b/.changeset/weak-cats-hug.md @@ -0,0 +1,5 @@ +--- +'@powersync/service-sync-rules': minor +--- + +Optionally include original types in generated schemas as a comment. diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index a1a8d4899..9f5fbd055 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -22,7 +22,7 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - - name: Build and push + - name: Test Build Docker Image uses: docker/build-push-action@v5 with: cache-from: type=registry,ref=stevenontong/${{vars.DOCKER_REGISTRY}}:cache @@ -30,14 +30,59 @@ jobs: platforms: linux/amd64 push: false file: ./service/Dockerfile - # TODO remove this when removing Journey Micro - build-args: | - GITHUB_TOKEN=${{secrets.RESTRICTED_PACKAGES_TOKEN}} - run-tests: - name: Test + run-core-tests: + name: Core Test runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Start MongoDB + uses: supercharge/mongodb-github-action@1.8.0 + with: + mongodb-version: '6.0' + mongodb-replica-set: test-rs + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version-file: '.nvmrc' + + - uses: pnpm/action-setup@v4 + name: Install pnpm + with: + version: 9 + run_install: false + + - name: Get pnpm store directory + shell: bash + run: | + echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV + + - uses: actions/cache@v3 + name: Setup pnpm cache + with: + path: ${{ env.STORE_PATH }} + key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm-store- + + - name: Install dependencies + run: pnpm install + + - name: Build + shell: bash + run: pnpm build + + - name: Test + run: pnpm test --filter '!./modules/*' + + run-postgres-tests: + name: Postgres Test + runs-on: ubuntu-latest + needs: run-core-tests + strategy: fail-fast: false matrix: @@ -97,4 +142,123 @@ jobs: run: pnpm build - name: Test - run: pnpm test + run: pnpm test --filter='./modules/module-postgres' + + run-mysql-tests: + name: MySQL Test + runs-on: ubuntu-latest + needs: run-core-tests + + strategy: + fail-fast: false + matrix: + mysql-version: [5.7, 8.0, 8.4] + + steps: + - uses: actions/checkout@v4 + + - name: Start MySQL + run: | + docker run \ + --name MySQLTestDatabase \ + -e MYSQL_ROOT_PASSWORD=mypassword \ + -e MYSQL_DATABASE=mydatabase \ + -p 3306:3306 \ + -d mysql:${{ matrix.mysql-version }} \ + --log-bin=/var/lib/mysql/mysql-bin.log \ + --gtid_mode=ON \ + --enforce_gtid_consistency=ON \ + --server-id=1 + + - name: Start MongoDB + uses: supercharge/mongodb-github-action@1.8.0 + with: + mongodb-version: '6.0' + mongodb-replica-set: test-rs + + - name: Setup NodeJS + uses: actions/setup-node@v4 + with: + node-version-file: '.nvmrc' + + - uses: pnpm/action-setup@v4 + name: Install pnpm + with: + version: 9 + run_install: false + + - name: Get pnpm store directory + shell: bash + run: | + echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV + + - uses: actions/cache@v3 + name: Setup pnpm cache + with: + path: ${{ env.STORE_PATH }} + key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm-store- + + - name: Install dependencies + run: pnpm install + + - name: Build + shell: bash + run: pnpm build + + - name: Test + run: pnpm test --filter='./modules/module-mysql' + + run-mongodb-tests: + name: MongoDB Test + runs-on: ubuntu-latest + needs: run-core-tests + + strategy: + fail-fast: false + matrix: + mongodb-version: ['6.0', '7.0', '8.0'] + + steps: + - uses: actions/checkout@v4 + + - name: Start MongoDB + uses: supercharge/mongodb-github-action@1.8.0 + with: + mongodb-version: ${{ matrix.mongodb-version }} + mongodb-replica-set: test-rs + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version-file: '.nvmrc' + + - uses: pnpm/action-setup@v4 + name: Install pnpm + with: + version: 9 + run_install: false + + - name: Get pnpm store directory + shell: bash + run: | + echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV + + - uses: actions/cache@v3 + name: Setup pnpm cache + with: + path: ${{ env.STORE_PATH }} + key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm-store- + + - name: Install dependencies + run: pnpm install + + - name: Build + shell: bash + run: pnpm build + + - name: Test + run: pnpm test --filter='./modules/module-mongodb' diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 000000000..a9893037d --- /dev/null +++ b/.prettierignore @@ -0,0 +1,5 @@ +**/.git +**/node_modules +dist +lib +pnpm-lock.yaml diff --git a/README.md b/README.md index 06a11cf0a..1e9ac4fbb 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@

-*[PowerSync](https://www.powersync.com) is a sync engine for building local-first apps with instantly-responsive UI/UX and simplified state transfer. Syncs between SQLite on the client-side and Postgres, MongoDB or MySQL on the server-side.* +_[PowerSync](https://www.powersync.com) is a sync engine for building local-first apps with instantly-responsive UI/UX and simplified state transfer. Syncs between SQLite on the client-side and Postgres, MongoDB or MySQL on the server-side._ # PowerSync Service @@ -11,6 +11,7 @@ The service can be started using the public Docker image. See the image [notes](./service/README.md) # Monorepo Structure: + ## Packages - [packages/service-core](./packages/service-core/README.md) @@ -52,13 +53,13 @@ Contains the PowerSync Service code. This project is used to build the `journeya - [docs](./docs/README.md) -Technical documentation regarding the implementation of PowerSync. +Technical documentation regarding the implementation of PowerSync. ## Test Client - [test-client](./test-client/README.md) -Contains a minimal client demonstrating direct usage of the HTTP stream sync API. This can be used to test sync rules in contexts such as automated testing. +Contains a minimal client demonstrating direct usage of the HTTP stream sync API. This can be used to test sync rules in contexts such as automated testing. # Developing diff --git a/libs/lib-services/package.json b/libs/lib-services/package.json index 77e175aa3..add5fc587 100644 --- a/libs/lib-services/package.json +++ b/libs/lib-services/package.json @@ -26,11 +26,13 @@ "dotenv": "^16.4.5", "lodash": "^4.17.21", "ts-codec": "^1.2.2", + "uuid": "^9.0.1", "winston": "^3.13.0", "zod": "^3.23.8" }, "devDependencies": { "@types/lodash": "^4.17.5", - "vitest": "^0.34.6" + "@types/uuid": "^9.0.4", + "vitest": "^2.1.1" } } diff --git a/libs/lib-services/src/container.ts b/libs/lib-services/src/container.ts index 0448a9f0d..4e015284f 100644 --- a/libs/lib-services/src/container.ts +++ b/libs/lib-services/src/container.ts @@ -23,14 +23,34 @@ export type ContainerImplementationDefaultGenerators = { [type in ContainerImplementation]: () => ContainerImplementationTypes[type]; }; +/** + * Helper for identifying constructors + */ +export interface Abstract { + prototype: T; +} +/** + * A basic class constructor + */ +export type Newable = new (...args: never[]) => T; + +/** + * Identifier used to get and register implementations + */ +export type ServiceIdentifier = string | symbol | Newable | Abstract | ContainerImplementation; + const DEFAULT_GENERATORS: ContainerImplementationDefaultGenerators = { [ContainerImplementation.REPORTER]: () => NoOpReporter, [ContainerImplementation.PROBES]: () => createFSProbe(), [ContainerImplementation.TERMINATION_HANDLER]: () => createTerminationHandler() }; +/** + * A container which provides means for registering and getting various + * function implementations. + */ export class Container { - protected implementations: Partial; + protected implementations: Map, any>; /** * Manager for system health probes @@ -54,13 +74,39 @@ export class Container { } constructor() { - this.implementations = {}; + this.implementations = new Map(); + } + + /** + * Gets an implementation given an identifier. + * An exception is thrown if the implementation has not been registered. + * Core [ContainerImplementation] identifiers are mapped to their respective implementation types. + * This also allows for getting generic implementations (unknown to the core framework) which have been registered. + */ + getImplementation(identifier: Newable | Abstract): T; + getImplementation(identifier: T): ContainerImplementationTypes[T]; + getImplementation(identifier: ServiceIdentifier): T; + getImplementation(identifier: ServiceIdentifier): T { + const implementation = this.implementations.get(identifier); + if (!implementation) { + throw new Error(`Implementation for ${String(identifier)} has not been registered.`); + } + return implementation; } - getImplementation(type: Type) { - const implementation = this.implementations[type]; + /** + * Gets an implementation given an identifier. + * Null is returned if the implementation has not been registered yet. + * Core [ContainerImplementation] identifiers are mapped to their respective implementation types. + * This also allows for getting generic implementations (unknown to the core framework) which have been registered. + */ + getOptional(identifier: Newable | Abstract): T | null; + getOptional(identifier: T): ContainerImplementationTypes[T] | null; + getOptional(identifier: ServiceIdentifier): T | null; + getOptional(identifier: ServiceIdentifier): T | null { + const implementation = this.implementations.get(identifier); if (!implementation) { - throw new Error(`Implementation for ${type} has not been registered.`); + return null; } return implementation; } @@ -71,15 +117,15 @@ export class Container { registerDefaults(options?: RegisterDefaultsOptions) { _.difference(Object.values(ContainerImplementation), options?.skip ?? []).forEach((type) => { const generator = DEFAULT_GENERATORS[type]; - this.implementations[type] = generator() as any; // :( + this.register(type, generator()); }); } /** - * Allows for overriding a default implementation + * Allows for registering core and generic implementations of services/helpers. */ - register(type: Type, implementation: ContainerImplementationTypes[Type]) { - this.implementations[type] = implementation; + register(identifier: ServiceIdentifier, implementation: T) { + this.implementations.set(identifier, implementation); } } diff --git a/libs/lib-services/src/system/LifeCycledSystem.ts b/libs/lib-services/src/system/LifeCycledSystem.ts index 3cd77c938..bcbc911be 100644 --- a/libs/lib-services/src/system/LifeCycledSystem.ts +++ b/libs/lib-services/src/system/LifeCycledSystem.ts @@ -20,7 +20,7 @@ export type ComponentLifecycle = PartialLifecycle & { }; export type LifecycleHandler = () => ComponentLifecycle; -export abstract class LifeCycledSystem { +export class LifeCycledSystem { components: ComponentLifecycle[] = []; constructor() { diff --git a/libs/lib-services/src/utils/BaseObserver.ts b/libs/lib-services/src/utils/BaseObserver.ts new file mode 100644 index 000000000..937fde59a --- /dev/null +++ b/libs/lib-services/src/utils/BaseObserver.ts @@ -0,0 +1,33 @@ +import { v4 as uuid } from 'uuid'; + +export interface ObserverClient { + registerListener(listener: Partial): () => void; +} + +export class BaseObserver implements ObserverClient { + protected listeners: { [id: string]: Partial }; + + constructor() { + this.listeners = {}; + } + + registerListener(listener: Partial): () => void { + const id = uuid(); + this.listeners[id] = listener; + return () => { + delete this.listeners[id]; + }; + } + + iterateListeners(cb: (listener: Partial) => any) { + for (let i in this.listeners) { + cb(this.listeners[i]); + } + } + + async iterateAsyncListeners(cb: (listener: Partial) => Promise) { + for (let i in this.listeners) { + await cb(this.listeners[i]); + } + } +} diff --git a/libs/lib-services/src/utils/DisposableObserver.ts b/libs/lib-services/src/utils/DisposableObserver.ts new file mode 100644 index 000000000..1440d57e7 --- /dev/null +++ b/libs/lib-services/src/utils/DisposableObserver.ts @@ -0,0 +1,37 @@ +import { BaseObserver, ObserverClient } from './BaseObserver.js'; + +export interface DisposableListener { + /** + * Event which is fired when the `[Symbol.disposed]` method is called. + */ + disposed: () => void; +} + +export interface DisposableObserverClient extends ObserverClient, Disposable { + /** + * Registers a listener that is automatically disposed when the parent is disposed. + * This is useful for disposing nested listeners. + */ + registerManagedListener: (parent: DisposableObserverClient, cb: Partial) => () => void; +} + +export class DisposableObserver + extends BaseObserver + implements DisposableObserverClient +{ + registerManagedListener(parent: DisposableObserverClient, cb: Partial) { + const disposer = this.registerListener(cb); + parent.registerListener({ + disposed: () => { + disposer(); + } + }); + return disposer; + } + + [Symbol.dispose]() { + this.iterateListeners((cb) => cb.disposed?.()); + // Delete all callbacks + Object.keys(this.listeners).forEach((key) => delete this.listeners[key]); + } +} diff --git a/libs/lib-services/src/utils/utils-index.ts b/libs/lib-services/src/utils/utils-index.ts index 17384042b..59b89d274 100644 --- a/libs/lib-services/src/utils/utils-index.ts +++ b/libs/lib-services/src/utils/utils-index.ts @@ -1 +1,3 @@ +export * from './BaseObserver.js'; +export * from './DisposableObserver.js'; export * from './environment-variables.js'; diff --git a/libs/lib-services/test/src/DisposeableObserver.test.ts b/libs/lib-services/test/src/DisposeableObserver.test.ts new file mode 100644 index 000000000..1cde6a58b --- /dev/null +++ b/libs/lib-services/test/src/DisposeableObserver.test.ts @@ -0,0 +1,58 @@ +import { describe, expect, test } from 'vitest'; + +import { DisposableListener, DisposableObserver } from '../../src/utils/DisposableObserver.js'; + +describe('DisposableObserver', () => { + test('it should dispose all listeners on dispose', () => { + const listener = new DisposableObserver(); + + let wasDisposed = false; + listener.registerListener({ + disposed: () => { + wasDisposed = true; + } + }); + + listener[Symbol.dispose](); + + expect(wasDisposed).equals(true); + expect(Object.keys(listener['listeners']).length).equals(0); + }); + + test('it should dispose nested listeners for managed listeners', () => { + interface ParentListener extends DisposableListener { + childCreated: (child: DisposableObserver) => void; + } + class ParentObserver extends DisposableObserver { + createChild() { + const child = new DisposableObserver(); + this.iterateListeners((cb) => cb.childCreated?.(child)); + } + } + + const parent = new ParentObserver(); + let aChild: DisposableObserver | null = null; + + parent.registerListener({ + childCreated: (child) => { + aChild = child; + child.registerManagedListener(parent, { + test: () => { + // this does nothing + } + }); + } + }); + + parent.createChild(); + + // The managed listener should add a `disposed` listener + expect(Object.keys(parent['listeners']).length).equals(2); + expect(Object.keys(aChild!['listeners']).length).equals(1); + + parent[Symbol.dispose](); + expect(Object.keys(parent['listeners']).length).equals(0); + // The listener attached to the child should be disposed when the parent was disposed + expect(Object.keys(aChild!['listeners']).length).equals(0); + }); +}); diff --git a/modules/module-mongodb/CHANGELOG.md b/modules/module-mongodb/CHANGELOG.md new file mode 100644 index 000000000..05f7d8b81 --- /dev/null +++ b/modules/module-mongodb/CHANGELOG.md @@ -0,0 +1 @@ +# @powersync/service-module-mongodb diff --git a/modules/module-mongodb/LICENSE b/modules/module-mongodb/LICENSE new file mode 100644 index 000000000..c8efd46cc --- /dev/null +++ b/modules/module-mongodb/LICENSE @@ -0,0 +1,67 @@ +# Functional Source License, Version 1.1, Apache 2.0 Future License + +## Abbreviation + +FSL-1.1-Apache-2.0 + +## Notice + +Copyright 2023-2024 Journey Mobile, Inc. + +## Terms and Conditions + +### Licensor ("We") + +The party offering the Software under these Terms and Conditions. + +### The Software + +The "Software" is each version of the software that we make available under these Terms and Conditions, as indicated by our inclusion of these Terms and Conditions with the Software. + +### License Grant + +Subject to your compliance with this License Grant and the Patents, Redistribution and Trademark clauses below, we hereby grant you the right to use, copy, modify, create derivative works, publicly perform, publicly display and redistribute the Software for any Permitted Purpose identified below. + +### Permitted Purpose + +A Permitted Purpose is any purpose other than a Competing Use. A Competing Use means making the Software available to others in a commercial product or service that: + +1. substitutes for the Software; +2. substitutes for any other product or service we offer using the Software that exists as of the date we make the Software available; or +3. offers the same or substantially similar functionality as the Software. + +Permitted Purposes specifically include using the Software: + +1. for your internal use and access; +2. for non-commercial education; +3. for non-commercial research; and +4. in connection with professional services that you provide to a licensee using the Software in accordance with these Terms and Conditions. + +### Patents + +To the extent your use for a Permitted Purpose would necessarily infringe our patents, the license grant above includes a license under our patents. If you make a claim against any party that the Software infringes or contributes to the infringement of any patent, then your patent license to the Software ends immediately. + +### Redistribution + +The Terms and Conditions apply to all copies, modifications and derivatives of the Software. +If you redistribute any copies, modifications or derivatives of the Software, you must include a copy of or a link to these Terms and Conditions and not remove any copyright notices provided in or with the Software. + +### Disclaimer + +THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTIES OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION WARRANTIES OF FITNESS FOR A PARTICULAR PURPOSE, MERCHANTABILITY, TITLE OR NON-INFRINGEMENT. +IN NO EVENT WILL WE HAVE ANY LIABILITY TO YOU ARISING OUT OF OR RELATED TO THE SOFTWARE, INCLUDING INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES, EVEN IF WE HAVE BEEN INFORMED OF THEIR POSSIBILITY IN ADVANCE. + +### Trademarks + +Except for displaying the License Details and identifying us as the origin of the Software, you have no right under these Terms and Conditions to use our trademarks, trade names, service marks or product names. + +## Grant of Future License + +We hereby irrevocably grant you an additional license to use the Software under the Apache License, Version 2.0 that is effective on the second anniversary of the date we make the Software available. On or after that date, you may use the Software under the Apache License, Version 2.0, in which case the following will apply: + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/modules/module-mongodb/README.md b/modules/module-mongodb/README.md new file mode 100644 index 000000000..f9e9e4c64 --- /dev/null +++ b/modules/module-mongodb/README.md @@ -0,0 +1,3 @@ +# PowerSync Service Module MongoDB + +MongoDB replication module for PowerSync diff --git a/modules/module-mongodb/package.json b/modules/module-mongodb/package.json new file mode 100644 index 000000000..8236e3b29 --- /dev/null +++ b/modules/module-mongodb/package.json @@ -0,0 +1,44 @@ +{ + "name": "@powersync/service-module-mongodb", + "repository": "https://github.com/powersync-ja/powersync-service", + "types": "dist/index.d.ts", + "version": "0.0.1", + "main": "dist/index.js", + "license": "FSL-1.1-Apache-2.0", + "type": "module", + "publishConfig": { + "access": "public" + }, + "scripts": { + "build": "tsc -b", + "build:tests": "tsc -b test/tsconfig.json", + "clean": "rm -rf ./dist && tsc -b --clean", + "test": "vitest" + }, + "exports": { + ".": { + "import": "./dist/index.js", + "require": "./dist/index.js", + "default": "./dist/index.js" + }, + "./types": { + "import": "./dist/types/types.js", + "require": "./dist/types/types.js", + "default": "./dist/types/types.js" + } + }, + "dependencies": { + "@powersync/lib-services-framework": "workspace:*", + "@powersync/service-core": "workspace:*", + "@powersync/service-jsonbig": "workspace:*", + "@powersync/service-sync-rules": "workspace:*", + "@powersync/service-types": "workspace:*", + "mongodb": "^6.7.0", + "ts-codec": "^1.2.2", + "uuid": "^9.0.1", + "uri-js": "^4.4.1" + }, + "devDependencies": { + "@types/uuid": "^9.0.4" + } +} diff --git a/modules/module-mongodb/src/api/MongoRouteAPIAdapter.ts b/modules/module-mongodb/src/api/MongoRouteAPIAdapter.ts new file mode 100644 index 000000000..a0bc519ec --- /dev/null +++ b/modules/module-mongodb/src/api/MongoRouteAPIAdapter.ts @@ -0,0 +1,366 @@ +import { api, ParseSyncRulesOptions, SourceTable } from '@powersync/service-core'; +import * as mongo from 'mongodb'; + +import * as sync_rules from '@powersync/service-sync-rules'; +import * as service_types from '@powersync/service-types'; +import { MongoManager } from '../replication/MongoManager.js'; +import { constructAfterRecord, createCheckpoint } from '../replication/MongoRelation.js'; +import * as types from '../types/types.js'; +import { escapeRegExp } from '../utils.js'; +import { CHECKPOINTS_COLLECTION } from '../replication/replication-utils.js'; + +export class MongoRouteAPIAdapter implements api.RouteAPI { + protected client: mongo.MongoClient; + public db: mongo.Db; + + connectionTag: string; + defaultSchema: string; + + constructor(protected config: types.ResolvedConnectionConfig) { + const manager = new MongoManager(config); + this.client = manager.client; + this.db = manager.db; + this.defaultSchema = manager.db.databaseName; + this.connectionTag = config.tag ?? sync_rules.DEFAULT_TAG; + } + + getParseSyncRulesOptions(): ParseSyncRulesOptions { + return { + defaultSchema: this.defaultSchema + }; + } + + async shutdown(): Promise { + await this.client.close(); + } + + async [Symbol.asyncDispose]() { + await this.shutdown(); + } + + async getSourceConfig(): Promise { + return this.config; + } + + async getConnectionStatus(): Promise { + const base = { + id: this.config.id, + uri: types.baseUri(this.config) + }; + + try { + await this.client.connect(); + await this.db.command({ hello: 1 }); + } catch (e) { + return { + ...base, + connected: false, + errors: [{ level: 'fatal', message: e.message }] + }; + } + return { + ...base, + connected: true, + errors: [] + }; + } + + async executeQuery(query: string, params: any[]): Promise { + return service_types.internal_routes.ExecuteSqlResponse.encode({ + results: { + columns: [], + rows: [] + }, + success: false, + error: 'SQL querying is not supported for MongoDB' + }); + } + + async getDebugTablesInfo( + tablePatterns: sync_rules.TablePattern[], + sqlSyncRules: sync_rules.SqlSyncRules + ): Promise { + let result: api.PatternResult[] = []; + + const validatePostImages = (schema: string, collection: mongo.CollectionInfo): service_types.ReplicationError[] => { + if (this.config.postImages == types.PostImagesOption.OFF) { + return []; + } else if (!collection.options?.changeStreamPreAndPostImages?.enabled) { + if (this.config.postImages == types.PostImagesOption.READ_ONLY) { + return [ + { level: 'fatal', message: `changeStreamPreAndPostImages not enabled on ${schema}.${collection.name}` } + ]; + } else { + return [ + { + level: 'warning', + message: `changeStreamPreAndPostImages not enabled on ${schema}.${collection.name}, will be enabled automatically` + } + ]; + } + } else { + return []; + } + }; + + for (let tablePattern of tablePatterns) { + const schema = tablePattern.schema; + + let patternResult: api.PatternResult = { + schema: schema, + pattern: tablePattern.tablePattern, + wildcard: tablePattern.isWildcard + }; + result.push(patternResult); + + let nameFilter: RegExp | string; + if (tablePattern.isWildcard) { + nameFilter = new RegExp('^' + escapeRegExp(tablePattern.tablePrefix)); + } else { + nameFilter = tablePattern.name; + } + + // Check if the collection exists + const collections = await this.client + .db(schema) + .listCollections( + { + name: nameFilter + }, + { nameOnly: false } + ) + .toArray(); + + if (tablePattern.isWildcard) { + patternResult.tables = []; + for (let collection of collections) { + const sourceTable = new SourceTable( + 0, + this.connectionTag, + collection.name, + schema, + collection.name, + [], + true + ); + let errors: service_types.ReplicationError[] = []; + if (collection.type == 'view') { + errors.push({ level: 'warning', message: `Collection ${schema}.${tablePattern.name} is a view` }); + } else { + errors.push(...validatePostImages(schema, collection)); + } + const syncData = sqlSyncRules.tableSyncsData(sourceTable); + const syncParameters = sqlSyncRules.tableSyncsParameters(sourceTable); + patternResult.tables.push({ + schema, + name: collection.name, + replication_id: ['_id'], + data_queries: syncData, + parameter_queries: syncParameters, + errors: errors + }); + } + } else { + const sourceTable = new SourceTable( + 0, + this.connectionTag, + tablePattern.name, + schema, + tablePattern.name, + [], + true + ); + + const syncData = sqlSyncRules.tableSyncsData(sourceTable); + const syncParameters = sqlSyncRules.tableSyncsParameters(sourceTable); + const collection = collections[0]; + + let errors: service_types.ReplicationError[] = []; + if (collections.length != 1) { + errors.push({ level: 'warning', message: `Collection ${schema}.${tablePattern.name} not found` }); + } else if (collection.type == 'view') { + errors.push({ level: 'warning', message: `Collection ${schema}.${tablePattern.name} is a view` }); + } else if (!collection.options?.changeStreamPreAndPostImages?.enabled) { + errors.push(...validatePostImages(schema, collection)); + } + + patternResult.table = { + schema, + name: tablePattern.name, + replication_id: ['_id'], + data_queries: syncData, + parameter_queries: syncParameters, + errors + }; + } + } + return result; + } + + async getReplicationLag(options: api.ReplicationLagOptions): Promise { + // There is no fast way to get replication lag in bytes in MongoDB. + // We can get replication lag in seconds, but need a different API for that. + return undefined; + } + + async getReplicationHead(): Promise { + return createCheckpoint(this.client, this.db); + } + + async getConnectionSchema(): Promise { + const sampleSize = 50; + + const databases = await this.db.admin().listDatabases({ nameOnly: true }); + const filteredDatabases = databases.databases.filter((db) => { + return !['local', 'admin', 'config'].includes(db.name); + }); + const databaseSchemas = await Promise.all( + filteredDatabases.map(async (db) => { + /** + * Filtering the list of database with `authorizedDatabases: true` + * does not produce the full list of databases under some circumstances. + * This catches any potential auth errors. + */ + let collections: mongo.CollectionInfo[]; + try { + collections = await this.client.db(db.name).listCollections().toArray(); + } catch (e) { + if (e instanceof mongo.MongoServerError && e.codeName == 'Unauthorized') { + // Ignore databases we're not authorized to query + return null; + } + throw e; + } + + let tables: service_types.TableSchema[] = []; + for (let collection of collections) { + if ([CHECKPOINTS_COLLECTION].includes(collection.name)) { + continue; + } + if (collection.name.startsWith('system.')) { + // system.views, system.js, system.profile, system.buckets + // https://www.mongodb.com/docs/manual/reference/system-collections/ + continue; + } + if (collection.type == 'view') { + continue; + } + try { + const sampleDocuments = await this.db + .collection(collection.name) + .aggregate([{ $sample: { size: sampleSize } }]) + .toArray(); + + if (sampleDocuments.length > 0) { + const columns = this.getColumnsFromDocuments(sampleDocuments); + + tables.push({ + name: collection.name, + // Since documents are sampled in a random order, we need to sort + // to get a consistent order + columns: columns.sort((a, b) => a.name.localeCompare(b.name)) + }); + } else { + tables.push({ + name: collection.name, + columns: [] + }); + } + } catch (e) { + if (e instanceof mongo.MongoServerError && e.codeName == 'Unauthorized') { + // Ignore collections we're not authorized to query + continue; + } + throw e; + } + } + + return { + name: db.name, + tables: tables + } satisfies service_types.DatabaseSchema; + }) + ); + return databaseSchemas.filter((schema) => !!schema); + } + + private getColumnsFromDocuments(documents: mongo.BSON.Document[]) { + let columns = new Map }>(); + for (const document of documents) { + const parsed = constructAfterRecord(document); + for (const key in parsed) { + const value = parsed[key]; + const type = sync_rules.sqliteTypeOf(value); + const sqliteType = sync_rules.ExpressionType.fromTypeText(type); + let entry = columns.get(key); + if (entry == null) { + entry = { sqliteType, bsonTypes: new Set() }; + columns.set(key, entry); + } else { + entry.sqliteType = entry.sqliteType.or(sqliteType); + } + const bsonType = this.getBsonType(document[key]); + if (bsonType != null) { + entry.bsonTypes.add(bsonType); + } + } + } + return [...columns.entries()].map(([key, value]) => { + const internal_type = value.bsonTypes.size == 0 ? '' : [...value.bsonTypes].join(' | '); + return { + name: key, + type: internal_type, + sqlite_type: value.sqliteType.typeFlags, + internal_type, + pg_type: internal_type + }; + }); + } + + private getBsonType(data: any): string | null { + if (data == null) { + // null or undefined + return 'Null'; + } else if (typeof data == 'string') { + return 'String'; + } else if (typeof data == 'number') { + if (Number.isInteger(data)) { + return 'Integer'; + } else { + return 'Double'; + } + } else if (typeof data == 'bigint') { + return 'Long'; + } else if (typeof data == 'boolean') { + return 'Boolean'; + } else if (data instanceof mongo.ObjectId) { + return 'ObjectId'; + } else if (data instanceof mongo.UUID) { + return 'UUID'; + } else if (data instanceof Date) { + return 'Date'; + } else if (data instanceof mongo.Timestamp) { + return 'Timestamp'; + } else if (data instanceof mongo.Binary) { + return 'Binary'; + } else if (data instanceof mongo.Long) { + return 'Long'; + } else if (data instanceof RegExp) { + return 'RegExp'; + } else if (data instanceof mongo.MinKey) { + return 'MinKey'; + } else if (data instanceof mongo.MaxKey) { + return 'MaxKey'; + } else if (data instanceof mongo.Decimal128) { + return 'Decimal'; + } else if (Array.isArray(data)) { + return 'Array'; + } else if (data instanceof Uint8Array) { + return 'Binary'; + } else if (typeof data == 'object') { + return 'Object'; + } else { + return null; + } + } +} diff --git a/modules/module-mongodb/src/index.ts b/modules/module-mongodb/src/index.ts new file mode 100644 index 000000000..6ecba2a8e --- /dev/null +++ b/modules/module-mongodb/src/index.ts @@ -0,0 +1 @@ +export * from './module/MongoModule.js'; diff --git a/modules/module-mongodb/src/module/MongoModule.ts b/modules/module-mongodb/src/module/MongoModule.ts new file mode 100644 index 000000000..bbd9ab869 --- /dev/null +++ b/modules/module-mongodb/src/module/MongoModule.ts @@ -0,0 +1,65 @@ +import { api, ConfigurationFileSyncRulesProvider, replication, system, TearDownOptions } from '@powersync/service-core'; +import { MongoRouteAPIAdapter } from '../api/MongoRouteAPIAdapter.js'; +import { ConnectionManagerFactory } from '../replication/ConnectionManagerFactory.js'; +import { MongoErrorRateLimiter } from '../replication/MongoErrorRateLimiter.js'; +import { ChangeStreamReplicator } from '../replication/ChangeStreamReplicator.js'; +import * as types from '../types/types.js'; +import { MongoManager } from '../replication/MongoManager.js'; +import { checkSourceConfiguration } from '../replication/replication-utils.js'; + +export class MongoModule extends replication.ReplicationModule { + constructor() { + super({ + name: 'MongoDB', + type: types.MONGO_CONNECTION_TYPE, + configSchema: types.MongoConnectionConfig + }); + } + + async initialize(context: system.ServiceContextContainer): Promise { + await super.initialize(context); + } + + protected createRouteAPIAdapter(): api.RouteAPI { + return new MongoRouteAPIAdapter(this.resolveConfig(this.decodedConfig!)); + } + + protected createReplicator(context: system.ServiceContext): replication.AbstractReplicator { + const normalisedConfig = this.resolveConfig(this.decodedConfig!); + const syncRuleProvider = new ConfigurationFileSyncRulesProvider(context.configuration.sync_rules); + const connectionFactory = new ConnectionManagerFactory(normalisedConfig); + + return new ChangeStreamReplicator({ + id: this.getDefaultId(normalisedConfig.database ?? ''), + syncRuleProvider: syncRuleProvider, + storageEngine: context.storageEngine, + connectionFactory: connectionFactory, + rateLimiter: new MongoErrorRateLimiter() + }); + } + + /** + * Combines base config with normalized connection settings + */ + private resolveConfig(config: types.MongoConnectionConfig): types.ResolvedConnectionConfig { + return { + ...config, + ...types.normalizeConnectionConfig(config) + }; + } + + async teardown(options: TearDownOptions): Promise { + // TODO: Implement? + } + + async testConnection(config: types.MongoConnectionConfig): Promise { + this.decodeConfig(config); + const normalisedConfig = this.resolveConfig(this.decodedConfig!); + const connectionManager = new MongoManager(normalisedConfig); + try { + return checkSourceConfiguration(connectionManager); + } finally { + await connectionManager.end(); + } + } +} diff --git a/modules/module-mongodb/src/replication/ChangeStream.ts b/modules/module-mongodb/src/replication/ChangeStream.ts new file mode 100644 index 000000000..730f39813 --- /dev/null +++ b/modules/module-mongodb/src/replication/ChangeStream.ts @@ -0,0 +1,670 @@ +import { container, logger } from '@powersync/lib-services-framework'; +import { Metrics, SaveOperationTag, SourceEntityDescriptor, SourceTable, storage } from '@powersync/service-core'; +import { DatabaseInputRow, SqliteRow, SqlSyncRules, TablePattern } from '@powersync/service-sync-rules'; +import * as mongo from 'mongodb'; +import { MongoManager } from './MongoManager.js'; +import { + constructAfterRecord, + createCheckpoint, + getMongoLsn, + getMongoRelation, + mongoLsnToTimestamp +} from './MongoRelation.js'; +import { escapeRegExp } from '../utils.js'; +import { CHECKPOINTS_COLLECTION } from './replication-utils.js'; +import { PostImagesOption } from '../types/types.js'; + +export const ZERO_LSN = '0000000000000000'; + +export interface ChangeStreamOptions { + connections: MongoManager; + storage: storage.SyncRulesBucketStorage; + abort_signal: AbortSignal; +} + +interface InitResult { + needsInitialSync: boolean; +} + +/** + * Thrown when the change stream is not valid anymore, and replication + * must be restarted. + * + * Possible reasons: + * * Some change stream documents do not have postImages. + * * startAfter/resumeToken is not valid anymore. + */ +export class ChangeStreamInvalidatedError extends Error { + constructor(message: string) { + super(message); + } +} + +export class ChangeStream { + sync_rules: SqlSyncRules; + group_id: number; + + connection_id = 1; + + private readonly storage: storage.SyncRulesBucketStorage; + + private connections: MongoManager; + private readonly client: mongo.MongoClient; + private readonly defaultDb: mongo.Db; + + private abort_signal: AbortSignal; + + private relation_cache = new Map(); + + constructor(options: ChangeStreamOptions) { + this.storage = options.storage; + this.group_id = options.storage.group_id; + this.connections = options.connections; + this.client = this.connections.client; + this.defaultDb = this.connections.db; + this.sync_rules = options.storage.getParsedSyncRules({ + defaultSchema: this.defaultDb.databaseName + }); + + this.abort_signal = options.abort_signal; + this.abort_signal.addEventListener( + 'abort', + () => { + // TODO: Fast abort? + }, + { once: true } + ); + } + + get stopped() { + return this.abort_signal.aborted; + } + + private get usePostImages() { + return this.connections.options.postImages != PostImagesOption.OFF; + } + + private get configurePostImages() { + return this.connections.options.postImages == PostImagesOption.AUTO_CONFIGURE; + } + + /** + * This resolves a pattern, persists the related metadata, and returns + * the resulting SourceTables. + * + * This implicitly checks the collection postImage configuration. + */ + async resolveQualifiedTableNames( + batch: storage.BucketStorageBatch, + tablePattern: TablePattern + ): Promise { + const schema = tablePattern.schema; + if (tablePattern.connectionTag != this.connections.connectionTag) { + return []; + } + + let nameFilter: RegExp | string; + if (tablePattern.isWildcard) { + nameFilter = new RegExp('^' + escapeRegExp(tablePattern.tablePrefix)); + } else { + nameFilter = tablePattern.name; + } + let result: storage.SourceTable[] = []; + + // Check if the collection exists + const collections = await this.client + .db(schema) + .listCollections( + { + name: nameFilter + }, + { nameOnly: false } + ) + .toArray(); + + if (!tablePattern.isWildcard && collections.length == 0) { + logger.warn(`Collection ${schema}.${tablePattern.name} not found`); + } + + for (let collection of collections) { + const table = await this.handleRelation( + batch, + { + name: collection.name, + schema, + objectId: collection.name, + replicationColumns: [{ name: '_id' }] + } as SourceEntityDescriptor, + // This is done as part of the initial setup - snapshot is handled elsewhere + { snapshot: false, collectionInfo: collection } + ); + + result.push(table); + } + + return result; + } + + async initSlot(): Promise { + const status = await this.storage.getStatus(); + if (status.snapshot_done && status.checkpoint_lsn) { + logger.info(`Initial replication already done`); + return { needsInitialSync: false }; + } + + return { needsInitialSync: true }; + } + + async estimatedCount(table: storage.SourceTable): Promise { + const db = this.client.db(table.schema); + const count = db.collection(table.table).estimatedDocumentCount(); + return `~${count}`; + } + + /** + * Start initial replication. + * + * If (partial) replication was done before on this slot, this clears the state + * and starts again from scratch. + */ + async startInitialReplication() { + await this.storage.clear(); + await this.initialReplication(); + } + + async initialReplication() { + const sourceTables = this.sync_rules.getSourceTables(); + await this.client.connect(); + + // We need to get the snapshot time before taking the initial snapshot. + const hello = await this.defaultDb.command({ hello: 1 }); + const snapshotTime = hello.lastWrite?.majorityOpTime?.ts as mongo.Timestamp; + if (hello.msg == 'isdbgrid') { + throw new Error('Sharded MongoDB Clusters are not supported yet (including MongoDB Serverless instances).'); + } else if (hello.setName == null) { + throw new Error('Standalone MongoDB instances are not supported - use a replicaset.'); + } else if (snapshotTime == null) { + // Not known where this would happen apart from the above cases + throw new Error('MongoDB lastWrite timestamp not found.'); + } + // We previously used {snapshot: true} for the snapshot session. + // While it gives nice consistency guarantees, it fails when the + // snapshot takes longer than 5 minutes, due to minSnapshotHistoryWindowInSeconds + // expiring the snapshot. + const session = await this.client.startSession(); + try { + await this.storage.startBatch( + { zeroLSN: ZERO_LSN, defaultSchema: this.defaultDb.databaseName, storeCurrentData: false }, + async (batch) => { + // Start by resolving all tables. + // This checks postImage configuration, and that should fail as + // earlier as possible. + let allSourceTables: SourceTable[] = []; + for (let tablePattern of sourceTables) { + const tables = await this.resolveQualifiedTableNames(batch, tablePattern); + allSourceTables.push(...tables); + } + + for (let table of allSourceTables) { + await this.snapshotTable(batch, table, session); + await batch.markSnapshotDone([table], ZERO_LSN); + + await touch(); + } + + const lsn = getMongoLsn(snapshotTime); + logger.info(`Snapshot commit at ${snapshotTime.inspect()} / ${lsn}`); + await batch.commit(lsn); + } + ); + } finally { + session.endSession(); + } + } + + private async setupCheckpointsCollection() { + const collection = await this.getCollectionInfo(this.defaultDb.databaseName, CHECKPOINTS_COLLECTION); + if (collection == null) { + await this.defaultDb.createCollection(CHECKPOINTS_COLLECTION, { + changeStreamPreAndPostImages: { enabled: true } + }); + } else if (this.usePostImages && collection.options?.changeStreamPreAndPostImages?.enabled != true) { + // Drop + create requires less permissions than collMod, + // and we don't care about the data in this collection. + await this.defaultDb.dropCollection(CHECKPOINTS_COLLECTION); + await this.defaultDb.createCollection(CHECKPOINTS_COLLECTION, { + changeStreamPreAndPostImages: { enabled: true } + }); + } + } + + private getSourceNamespaceFilters(): { $match: any; multipleDatabases: boolean } { + const sourceTables = this.sync_rules.getSourceTables(); + + let $inFilters: any[] = [{ db: this.defaultDb.databaseName, coll: CHECKPOINTS_COLLECTION }]; + let $refilters: any[] = []; + let multipleDatabases = false; + for (let tablePattern of sourceTables) { + if (tablePattern.connectionTag != this.connections.connectionTag) { + continue; + } + + if (tablePattern.schema != this.defaultDb.databaseName) { + multipleDatabases = true; + } + + if (tablePattern.isWildcard) { + $refilters.push({ + 'ns.db': tablePattern.schema, + 'ns.coll': new RegExp('^' + escapeRegExp(tablePattern.tablePrefix)) + }); + } else { + $inFilters.push({ + db: tablePattern.schema, + coll: tablePattern.name + }); + } + } + if ($refilters.length > 0) { + return { $match: { $or: [{ ns: { $in: $inFilters } }, ...$refilters] }, multipleDatabases }; + } + return { $match: { ns: { $in: $inFilters } }, multipleDatabases }; + } + + static *getQueryData(results: Iterable): Generator { + for (let row of results) { + yield constructAfterRecord(row); + } + } + + private async snapshotTable( + batch: storage.BucketStorageBatch, + table: storage.SourceTable, + session?: mongo.ClientSession + ) { + logger.info(`Replicating ${table.qualifiedName}`); + const estimatedCount = await this.estimatedCount(table); + let at = 0; + + const db = this.client.db(table.schema); + const collection = db.collection(table.table); + const query = collection.find({}, { session, readConcern: { level: 'majority' } }); + + const cursor = query.stream(); + + for await (let document of cursor) { + if (this.abort_signal.aborted) { + throw new Error(`Aborted initial replication`); + } + + at += 1; + + const record = constructAfterRecord(document); + + // This auto-flushes when the batch reaches its size limit + await batch.save({ + tag: SaveOperationTag.INSERT, + sourceTable: table, + before: undefined, + beforeReplicaId: undefined, + after: record, + afterReplicaId: document._id + }); + + at += 1; + Metrics.getInstance().rows_replicated_total.add(1); + + await touch(); + } + + await batch.flush(); + logger.info(`Replicated ${at} documents for ${table.qualifiedName}`); + } + + private async getRelation( + batch: storage.BucketStorageBatch, + descriptor: SourceEntityDescriptor + ): Promise { + const existing = this.relation_cache.get(descriptor.objectId); + if (existing != null) { + return existing; + } + + // Note: collection may have been dropped at this point, so we handle + // missing values. + const collection = await this.getCollectionInfo(descriptor.schema, descriptor.name); + + return this.handleRelation(batch, descriptor, { snapshot: false, collectionInfo: collection }); + } + + private async getCollectionInfo(db: string, name: string): Promise { + const collection = ( + await this.client + .db(db) + .listCollections( + { + name: name + }, + { nameOnly: false } + ) + .toArray() + )[0]; + return collection; + } + + private async checkPostImages(db: string, collectionInfo: mongo.CollectionInfo) { + if (!this.usePostImages) { + // Nothing to check + return; + } + + const enabled = collectionInfo.options?.changeStreamPreAndPostImages?.enabled == true; + + if (!enabled && this.configurePostImages) { + await this.client.db(db).command({ + collMod: collectionInfo.name, + changeStreamPreAndPostImages: { enabled: true } + }); + logger.info(`Enabled postImages on ${db}.${collectionInfo.name}`); + } else if (!enabled) { + throw new Error(`postImages not enabled on ${db}.${collectionInfo.name}`); + } + } + + async handleRelation( + batch: storage.BucketStorageBatch, + descriptor: SourceEntityDescriptor, + options: { snapshot: boolean; collectionInfo: mongo.CollectionInfo | undefined } + ) { + if (options.collectionInfo != null) { + await this.checkPostImages(descriptor.schema, options.collectionInfo); + } else { + // If collectionInfo is null, the collection may have been dropped. + // Ignore the postImages check in this case. + } + + const snapshot = options.snapshot; + if (!descriptor.objectId && typeof descriptor.objectId != 'string') { + throw new Error('objectId expected'); + } + const result = await this.storage.resolveTable({ + group_id: this.group_id, + connection_id: this.connection_id, + connection_tag: this.connections.connectionTag, + entity_descriptor: descriptor, + sync_rules: this.sync_rules + }); + this.relation_cache.set(descriptor.objectId, result.table); + + // Drop conflicting tables. This includes for example renamed tables. + await batch.drop(result.dropTables); + + // Snapshot if: + // 1. Snapshot is requested (false for initial snapshot, since that process handles it elsewhere) + // 2. Snapshot is not already done, AND: + // 3. The table is used in sync rules. + const shouldSnapshot = snapshot && !result.table.snapshotComplete && result.table.syncAny; + if (shouldSnapshot) { + // Truncate this table, in case a previous snapshot was interrupted. + await batch.truncate([result.table]); + + await this.snapshotTable(batch, result.table); + const no_checkpoint_before_lsn = await createCheckpoint(this.client, this.defaultDb); + + const [table] = await batch.markSnapshotDone([result.table], no_checkpoint_before_lsn); + return table; + } + + return result.table; + } + + async writeChange( + batch: storage.BucketStorageBatch, + table: storage.SourceTable, + change: mongo.ChangeStreamDocument + ): Promise { + if (!table.syncAny) { + logger.debug(`Collection ${table.qualifiedName} not used in sync rules - skipping`); + return null; + } + + Metrics.getInstance().rows_replicated_total.add(1); + if (change.operationType == 'insert') { + const baseRecord = constructAfterRecord(change.fullDocument); + return await batch.save({ + tag: SaveOperationTag.INSERT, + sourceTable: table, + before: undefined, + beforeReplicaId: undefined, + after: baseRecord, + afterReplicaId: change.documentKey._id + }); + } else if (change.operationType == 'update' || change.operationType == 'replace') { + if (change.fullDocument == null) { + // Treat as delete + return await batch.save({ + tag: SaveOperationTag.DELETE, + sourceTable: table, + before: undefined, + beforeReplicaId: change.documentKey._id + }); + } + const after = constructAfterRecord(change.fullDocument!); + return await batch.save({ + tag: SaveOperationTag.UPDATE, + sourceTable: table, + before: undefined, + beforeReplicaId: undefined, + after: after, + afterReplicaId: change.documentKey._id + }); + } else if (change.operationType == 'delete') { + return await batch.save({ + tag: SaveOperationTag.DELETE, + sourceTable: table, + before: undefined, + beforeReplicaId: change.documentKey._id + }); + } else { + throw new Error(`Unsupported operation: ${change.operationType}`); + } + } + + async replicate() { + try { + // If anything errors here, the entire replication process is halted, and + // all connections automatically closed, including this one. + + await this.initReplication(); + await this.streamChanges(); + } catch (e) { + await this.storage.reportError(e); + throw e; + } + } + + async initReplication() { + const result = await this.initSlot(); + await this.setupCheckpointsCollection(); + if (result.needsInitialSync) { + await this.startInitialReplication(); + } + } + + async streamChanges() { + try { + await this.streamChangesInternal(); + } catch (e) { + if ( + e instanceof mongo.MongoServerError && + e.codeName == 'NoMatchingDocument' && + e.errmsg?.includes('post-image was not found') + ) { + throw new ChangeStreamInvalidatedError(e.errmsg); + } + throw e; + } + } + + async streamChangesInternal() { + // Auto-activate as soon as initial replication is done + await this.storage.autoActivate(); + + await this.storage.startBatch( + { zeroLSN: ZERO_LSN, defaultSchema: this.defaultDb.databaseName, storeCurrentData: false }, + async (batch) => { + const lastLsn = batch.lastCheckpointLsn; + const startAfter = mongoLsnToTimestamp(lastLsn) ?? undefined; + logger.info(`Resume streaming at ${startAfter?.inspect()} / ${lastLsn}`); + + const filters = this.getSourceNamespaceFilters(); + + const pipeline: mongo.Document[] = [ + { + $match: filters.$match + }, + { $changeStreamSplitLargeEvent: {} } + ]; + + let fullDocument: 'required' | 'updateLookup'; + + if (this.usePostImages) { + // 'read_only' or 'auto_configure' + // Configuration happens during snapshot, or when we see new + // collections. + fullDocument = 'required'; + } else { + fullDocument = 'updateLookup'; + } + + const streamOptions: mongo.ChangeStreamOptions = { + startAtOperationTime: startAfter, + showExpandedEvents: true, + useBigInt64: true, + maxAwaitTimeMS: 200, + fullDocument: fullDocument + }; + let stream: mongo.ChangeStream; + if (filters.multipleDatabases) { + // Requires readAnyDatabase@admin on Atlas + stream = this.client.watch(pipeline, streamOptions); + } else { + // Same general result, but requires less permissions than the above + stream = this.defaultDb.watch(pipeline, streamOptions); + } + + if (this.abort_signal.aborted) { + stream.close(); + return; + } + + this.abort_signal.addEventListener('abort', () => { + stream.close(); + }); + + // Always start with a checkpoint. + // This helps us to clear erorrs when restarting, even if there is + // no data to replicate. + let waitForCheckpointLsn: string | null = await createCheckpoint(this.client, this.defaultDb); + + let splitDocument: mongo.ChangeStreamDocument | null = null; + + while (true) { + if (this.abort_signal.aborted) { + break; + } + + const originalChangeDocument = await stream.tryNext(); + + if (originalChangeDocument == null || this.abort_signal.aborted) { + continue; + } + await touch(); + + if (startAfter != null && originalChangeDocument.clusterTime?.lte(startAfter)) { + continue; + } + + let changeDocument = originalChangeDocument; + if (originalChangeDocument?.splitEvent != null) { + // Handle split events from $changeStreamSplitLargeEvent. + // This is only relevant for very large update operations. + const splitEvent = originalChangeDocument?.splitEvent; + + if (splitDocument == null) { + splitDocument = originalChangeDocument; + } else { + splitDocument = Object.assign(splitDocument, originalChangeDocument); + } + + if (splitEvent.fragment == splitEvent.of) { + // Got all fragments + changeDocument = splitDocument; + splitDocument = null; + } else { + // Wait for more fragments + continue; + } + } else if (splitDocument != null) { + // We were waiting for fragments, but got a different event + throw new Error(`Incomplete splitEvent: ${JSON.stringify(splitDocument.splitEvent)}`); + } + + // console.log('event', changeDocument); + + if ( + (changeDocument.operationType == 'insert' || + changeDocument.operationType == 'update' || + changeDocument.operationType == 'replace') && + changeDocument.ns.coll == CHECKPOINTS_COLLECTION + ) { + const lsn = getMongoLsn(changeDocument.clusterTime!); + if (waitForCheckpointLsn != null && lsn >= waitForCheckpointLsn) { + waitForCheckpointLsn = null; + } + await batch.commit(lsn); + } else if ( + changeDocument.operationType == 'insert' || + changeDocument.operationType == 'update' || + changeDocument.operationType == 'replace' || + changeDocument.operationType == 'delete' + ) { + if (waitForCheckpointLsn == null) { + waitForCheckpointLsn = await createCheckpoint(this.client, this.defaultDb); + } + const rel = getMongoRelation(changeDocument.ns); + const table = await this.getRelation(batch, rel); + if (table.syncAny) { + await this.writeChange(batch, table, changeDocument); + } + } else if (changeDocument.operationType == 'drop') { + const rel = getMongoRelation(changeDocument.ns); + const table = await this.getRelation(batch, rel); + if (table.syncAny) { + await batch.drop([table]); + this.relation_cache.delete(table.objectId); + } + } else if (changeDocument.operationType == 'rename') { + const relFrom = getMongoRelation(changeDocument.ns); + const relTo = getMongoRelation(changeDocument.to); + const tableFrom = await this.getRelation(batch, relFrom); + if (tableFrom.syncAny) { + await batch.drop([tableFrom]); + this.relation_cache.delete(tableFrom.objectId); + } + // Here we do need to snapshot the new table + const collection = await this.getCollectionInfo(relTo.schema, relTo.name); + await this.handleRelation(batch, relTo, { snapshot: true, collectionInfo: collection }); + } + } + } + ); + } +} + +async function touch() { + // FIXME: The hosted Kubernetes probe does not actually check the timestamp on this. + // FIXME: We need a timeout of around 5+ minutes in Kubernetes if we do start checking the timestamp, + // or reduce PING_INTERVAL here. + return container.probes.touch(); +} diff --git a/modules/module-mongodb/src/replication/ChangeStreamReplicationJob.ts b/modules/module-mongodb/src/replication/ChangeStreamReplicationJob.ts new file mode 100644 index 000000000..78842fd36 --- /dev/null +++ b/modules/module-mongodb/src/replication/ChangeStreamReplicationJob.ts @@ -0,0 +1,103 @@ +import { container } from '@powersync/lib-services-framework'; +import { ChangeStreamInvalidatedError, ChangeStream } from './ChangeStream.js'; + +import { replication } from '@powersync/service-core'; +import { ConnectionManagerFactory } from './ConnectionManagerFactory.js'; + +import * as mongo from 'mongodb'; + +export interface ChangeStreamReplicationJobOptions extends replication.AbstractReplicationJobOptions { + connectionFactory: ConnectionManagerFactory; +} + +export class ChangeStreamReplicationJob extends replication.AbstractReplicationJob { + private connectionFactory: ConnectionManagerFactory; + + constructor(options: ChangeStreamReplicationJobOptions) { + super(options); + this.connectionFactory = options.connectionFactory; + } + + async cleanUp(): Promise { + // TODO: Implement? + } + + async keepAlive() { + // TODO: Implement? + } + + private get slotName() { + return this.options.storage.slot_name; + } + + async replicate() { + try { + await this.replicateLoop(); + } catch (e) { + // Fatal exception + container.reporter.captureException(e, { + metadata: {} + }); + this.logger.error(`Replication failed`, e); + + if (e instanceof ChangeStreamInvalidatedError) { + // This stops replication on this slot, and creates a new slot + await this.options.storage.factory.slotRemoved(this.slotName); + } + } finally { + this.abortController.abort(); + } + } + + async replicateLoop() { + while (!this.isStopped) { + await this.replicateOnce(); + + if (!this.isStopped) { + await new Promise((resolve) => setTimeout(resolve, 5000)); + } + } + } + + async replicateOnce() { + // New connections on every iteration (every error with retry), + // otherwise we risk repeating errors related to the connection, + // such as caused by cached PG schemas. + const connectionManager = this.connectionFactory.create(); + try { + await this.rateLimiter?.waitUntilAllowed({ signal: this.abortController.signal }); + if (this.isStopped) { + return; + } + const stream = new ChangeStream({ + abort_signal: this.abortController.signal, + storage: this.options.storage, + connections: connectionManager + }); + await stream.replicate(); + } catch (e) { + if (this.abortController.signal.aborted) { + return; + } + this.logger.error(`Replication error`, e); + if (e.cause != null) { + // Without this additional log, the cause may not be visible in the logs. + this.logger.error(`cause`, e.cause); + } + if (e instanceof ChangeStreamInvalidatedError) { + throw e; + } else if (e instanceof mongo.MongoError && e.hasErrorLabel('NonResumableChangeStreamError')) { + throw new ChangeStreamInvalidatedError(e.message); + } else { + // Report the error if relevant, before retrying + container.reporter.captureException(e, { + metadata: {} + }); + // This sets the retry delay + this.rateLimiter?.reportError(e); + } + } finally { + await connectionManager.end(); + } + } +} diff --git a/modules/module-mongodb/src/replication/ChangeStreamReplicator.ts b/modules/module-mongodb/src/replication/ChangeStreamReplicator.ts new file mode 100644 index 000000000..2cf96c494 --- /dev/null +++ b/modules/module-mongodb/src/replication/ChangeStreamReplicator.ts @@ -0,0 +1,36 @@ +import { storage, replication } from '@powersync/service-core'; +import { ChangeStreamReplicationJob } from './ChangeStreamReplicationJob.js'; +import { ConnectionManagerFactory } from './ConnectionManagerFactory.js'; +import { MongoErrorRateLimiter } from './MongoErrorRateLimiter.js'; + +export interface ChangeStreamReplicatorOptions extends replication.AbstractReplicatorOptions { + connectionFactory: ConnectionManagerFactory; +} + +export class ChangeStreamReplicator extends replication.AbstractReplicator { + private readonly connectionFactory: ConnectionManagerFactory; + + constructor(options: ChangeStreamReplicatorOptions) { + super(options); + this.connectionFactory = options.connectionFactory; + } + + createJob(options: replication.CreateJobOptions): ChangeStreamReplicationJob { + return new ChangeStreamReplicationJob({ + id: this.createJobId(options.storage.group_id), + storage: options.storage, + connectionFactory: this.connectionFactory, + lock: options.lock, + rateLimiter: new MongoErrorRateLimiter() + }); + } + + async cleanUp(syncRulesStorage: storage.SyncRulesBucketStorage): Promise { + // TODO: Implement anything? + } + + async stop(): Promise { + await super.stop(); + await this.connectionFactory.shutdown(); + } +} diff --git a/modules/module-mongodb/src/replication/ConnectionManagerFactory.ts b/modules/module-mongodb/src/replication/ConnectionManagerFactory.ts new file mode 100644 index 000000000..c84c28e05 --- /dev/null +++ b/modules/module-mongodb/src/replication/ConnectionManagerFactory.ts @@ -0,0 +1,27 @@ +import { logger } from '@powersync/lib-services-framework'; +import { NormalizedMongoConnectionConfig } from '../types/types.js'; +import { MongoManager } from './MongoManager.js'; + +export class ConnectionManagerFactory { + private readonly connectionManagers: MongoManager[]; + private readonly dbConnectionConfig: NormalizedMongoConnectionConfig; + + constructor(dbConnectionConfig: NormalizedMongoConnectionConfig) { + this.dbConnectionConfig = dbConnectionConfig; + this.connectionManagers = []; + } + + create() { + const manager = new MongoManager(this.dbConnectionConfig); + this.connectionManagers.push(manager); + return manager; + } + + async shutdown() { + logger.info('Shutting down MongoDB connection Managers...'); + for (const manager of this.connectionManagers) { + await manager.end(); + } + logger.info('MongoDB connection Managers shutdown completed.'); + } +} diff --git a/modules/module-mongodb/src/replication/MongoErrorRateLimiter.ts b/modules/module-mongodb/src/replication/MongoErrorRateLimiter.ts new file mode 100644 index 000000000..17b65c66c --- /dev/null +++ b/modules/module-mongodb/src/replication/MongoErrorRateLimiter.ts @@ -0,0 +1,38 @@ +import { ErrorRateLimiter } from '@powersync/service-core'; +import { setTimeout } from 'timers/promises'; + +export class MongoErrorRateLimiter implements ErrorRateLimiter { + nextAllowed: number = Date.now(); + + async waitUntilAllowed(options?: { signal?: AbortSignal | undefined } | undefined): Promise { + const delay = Math.max(0, this.nextAllowed - Date.now()); + // Minimum delay between connections, even without errors + this.setDelay(500); + await setTimeout(delay, undefined, { signal: options?.signal }); + } + + mayPing(): boolean { + return Date.now() >= this.nextAllowed; + } + + reportError(e: any): void { + // FIXME: Check mongodb-specific requirements + const message = (e.message as string) ?? ''; + if (message.includes('password authentication failed')) { + // Wait 15 minutes, to avoid triggering Supabase's fail2ban + this.setDelay(900_000); + } else if (message.includes('ENOTFOUND')) { + // DNS lookup issue - incorrect URI or deleted instance + this.setDelay(120_000); + } else if (message.includes('ECONNREFUSED')) { + // Could be fail2ban or similar + this.setDelay(120_000); + } else { + this.setDelay(30_000); + } + } + + private setDelay(delay: number) { + this.nextAllowed = Math.max(this.nextAllowed, Date.now() + delay); + } +} diff --git a/modules/module-mongodb/src/replication/MongoManager.ts b/modules/module-mongodb/src/replication/MongoManager.ts new file mode 100644 index 000000000..cb2f9d54f --- /dev/null +++ b/modules/module-mongodb/src/replication/MongoManager.ts @@ -0,0 +1,47 @@ +import * as mongo from 'mongodb'; +import { NormalizedMongoConnectionConfig } from '../types/types.js'; + +export class MongoManager { + /** + * Do not use this for any transactions. + */ + public readonly client: mongo.MongoClient; + public readonly db: mongo.Db; + + constructor(public options: NormalizedMongoConnectionConfig) { + // The pool is lazy - no connections are opened until a query is performed. + this.client = new mongo.MongoClient(options.uri, { + auth: { + username: options.username, + password: options.password + }, + // Time for connection to timeout + connectTimeoutMS: 5_000, + // Time for individual requests to timeout + socketTimeoutMS: 60_000, + // How long to wait for new primary selection + serverSelectionTimeoutMS: 30_000, + + // Avoid too many connections: + // 1. It can overwhelm the source database. + // 2. Processing too many queries in parallel can cause the process to run out of memory. + maxPoolSize: 8, + + maxConnecting: 3, + maxIdleTimeMS: 60_000 + }); + this.db = this.client.db(options.database, {}); + } + + public get connectionTag() { + return this.options.tag; + } + + async end(): Promise { + await this.client.close(); + } + + async destroy() { + // TODO: Implement? + } +} diff --git a/modules/module-mongodb/src/replication/MongoRelation.ts b/modules/module-mongodb/src/replication/MongoRelation.ts new file mode 100644 index 000000000..e2dc675e1 --- /dev/null +++ b/modules/module-mongodb/src/replication/MongoRelation.ts @@ -0,0 +1,171 @@ +import { storage } from '@powersync/service-core'; +import { SqliteRow, SqliteValue, toSyncRulesRow } from '@powersync/service-sync-rules'; +import * as mongo from 'mongodb'; +import { JSONBig, JsonContainer } from '@powersync/service-jsonbig'; +import { CHECKPOINTS_COLLECTION } from './replication-utils.js'; + +export function getMongoRelation(source: mongo.ChangeStreamNameSpace): storage.SourceEntityDescriptor { + return { + name: source.coll, + schema: source.db, + objectId: source.coll, + replicationColumns: [{ name: '_id' }] + } satisfies storage.SourceEntityDescriptor; +} + +export function getMongoLsn(timestamp: mongo.Timestamp) { + const a = timestamp.high.toString(16).padStart(8, '0'); + const b = timestamp.low.toString(16).padStart(8, '0'); + return a + b; +} + +export function mongoLsnToTimestamp(lsn: string | null) { + if (lsn == null) { + return null; + } + const a = parseInt(lsn.substring(0, 8), 16); + const b = parseInt(lsn.substring(8, 16), 16); + return mongo.Timestamp.fromBits(b, a); +} + +export function constructAfterRecord(document: mongo.Document): SqliteRow { + let record: SqliteRow = {}; + for (let key of Object.keys(document)) { + record[key] = toMongoSyncRulesValue(document[key]); + } + return record; +} + +export function toMongoSyncRulesValue(data: any): SqliteValue { + const autoBigNum = true; + if (data == null) { + // null or undefined + return data; + } else if (typeof data == 'string') { + return data; + } else if (typeof data == 'number') { + if (Number.isInteger(data) && autoBigNum) { + return BigInt(data); + } else { + return data; + } + } else if (typeof data == 'bigint') { + return data; + } else if (typeof data == 'boolean') { + return data ? 1n : 0n; + } else if (data instanceof mongo.ObjectId) { + return data.toHexString(); + } else if (data instanceof mongo.UUID) { + return data.toHexString(); + } else if (data instanceof Date) { + return data.toISOString().replace('T', ' '); + } else if (data instanceof mongo.Binary) { + return new Uint8Array(data.buffer); + } else if (data instanceof mongo.Long) { + return data.toBigInt(); + } else if (data instanceof mongo.Decimal128) { + return data.toString(); + } else if (data instanceof mongo.MinKey || data instanceof mongo.MaxKey) { + return null; + } else if (data instanceof RegExp) { + return JSON.stringify({ pattern: data.source, options: data.flags }); + } else if (Array.isArray(data)) { + // We may be able to avoid some parse + stringify cycles here for JsonSqliteContainer. + return JSONBig.stringify(data.map((element) => filterJsonData(element))); + } else if (data instanceof Uint8Array) { + return data; + } else if (data instanceof JsonContainer) { + return data.toString(); + } else if (typeof data == 'object') { + let record: Record = {}; + for (let key of Object.keys(data)) { + record[key] = filterJsonData(data[key]); + } + return JSONBig.stringify(record); + } else { + return null; + } +} + +const DEPTH_LIMIT = 20; + +function filterJsonData(data: any, depth = 0): any { + const autoBigNum = true; + if (depth > DEPTH_LIMIT) { + // This is primarily to prevent infinite recursion + throw new Error(`json nested object depth exceeds the limit of ${DEPTH_LIMIT}`); + } + if (data == null) { + return data; // null or undefined + } else if (typeof data == 'string') { + return data; + } else if (typeof data == 'number') { + if (autoBigNum && Number.isInteger(data)) { + return BigInt(data); + } else { + return data; + } + } else if (typeof data == 'boolean') { + return data ? 1n : 0n; + } else if (typeof data == 'bigint') { + return data; + } else if (data instanceof Date) { + return data.toISOString().replace('T', ' '); + } else if (data instanceof mongo.ObjectId) { + return data.toHexString(); + } else if (data instanceof mongo.UUID) { + return data.toHexString(); + } else if (data instanceof mongo.Binary) { + return undefined; + } else if (data instanceof mongo.Long) { + return data.toBigInt(); + } else if (data instanceof mongo.Decimal128) { + return data.toString(); + } else if (data instanceof mongo.MinKey || data instanceof mongo.MaxKey) { + return null; + } else if (data instanceof RegExp) { + return { pattern: data.source, options: data.flags }; + } else if (Array.isArray(data)) { + return data.map((element) => filterJsonData(element, depth + 1)); + } else if (ArrayBuffer.isView(data)) { + return undefined; + } else if (data instanceof JsonContainer) { + // Can be stringified directly when using our JSONBig implementation + return data; + } else if (typeof data == 'object') { + let record: Record = {}; + for (let key of Object.keys(data)) { + record[key] = filterJsonData(data[key], depth + 1); + } + return record; + } else { + return undefined; + } +} + +export async function createCheckpoint(client: mongo.MongoClient, db: mongo.Db): Promise { + const session = client.startSession(); + try { + // Note: If multiple PowerSync instances are replicating the same source database, + // they'll modify the same checkpoint document. This is fine - it could create + // more replication load than required, but won't break anything. + await db.collection(CHECKPOINTS_COLLECTION).findOneAndUpdate( + { + _id: 'checkpoint' as any + }, + { + $inc: { i: 1 } + }, + { + upsert: true, + returnDocument: 'after', + session + } + ); + const time = session.operationTime!; + // TODO: Use the above when we support custom write checkpoints + return getMongoLsn(time); + } finally { + await session.endSession(); + } +} diff --git a/modules/module-mongodb/src/replication/replication-index.ts b/modules/module-mongodb/src/replication/replication-index.ts new file mode 100644 index 000000000..4ff43b56a --- /dev/null +++ b/modules/module-mongodb/src/replication/replication-index.ts @@ -0,0 +1,4 @@ +export * from './MongoRelation.js'; +export * from './ChangeStream.js'; +export * from './ChangeStreamReplicator.js'; +export * from './ChangeStreamReplicationJob.js'; diff --git a/modules/module-mongodb/src/replication/replication-utils.ts b/modules/module-mongodb/src/replication/replication-utils.ts new file mode 100644 index 000000000..5370fdd5e --- /dev/null +++ b/modules/module-mongodb/src/replication/replication-utils.ts @@ -0,0 +1,13 @@ +import { MongoManager } from './MongoManager.js'; + +export const CHECKPOINTS_COLLECTION = '_powersync_checkpoints'; + +export async function checkSourceConfiguration(connectionManager: MongoManager): Promise { + const db = connectionManager.db; + const hello = await db.command({ hello: 1 }); + if (hello.msg == 'isdbgrid') { + throw new Error('Sharded MongoDB Clusters are not supported yet (including MongoDB Serverless instances).'); + } else if (hello.setName == null) { + throw new Error('Standalone MongoDB instances are not supported - use a replicaset.'); + } +} diff --git a/modules/module-mongodb/src/types/types.ts b/modules/module-mongodb/src/types/types.ts new file mode 100644 index 000000000..1498193f5 --- /dev/null +++ b/modules/module-mongodb/src/types/types.ts @@ -0,0 +1,105 @@ +import { normalizeMongoConfig } from '@powersync/service-core'; +import * as service_types from '@powersync/service-types'; +import * as t from 'ts-codec'; + +export const MONGO_CONNECTION_TYPE = 'mongodb' as const; + +export enum PostImagesOption { + /** + * Use fullDocument: updateLookup on the changeStream. + * + * This does not guarantee consistency - operations may + * arrive out of order, especially when there is replication lag. + * + * This is the default option for backwards-compatability. + */ + OFF = 'off', + + /** + * Use fullDocument: required on the changeStream. + * + * Collections are automatically configured with: + * `changeStreamPreAndPostImages: { enabled: true }` + * + * This is the recommended behavior for new instances. + */ + AUTO_CONFIGURE = 'auto_configure', + + /** + * + * Use fullDocument: required on the changeStream. + * + * Collections are not automatically configured. Each + * collection must be configured configured manually before + * replicating with: + * + * `changeStreamPreAndPostImages: { enabled: true }` + * + * Use when the collMod permission is not available. + */ + READ_ONLY = 'read_only' +} + +export interface NormalizedMongoConnectionConfig { + id: string; + tag: string; + + uri: string; + database: string; + + username?: string; + password?: string; + + postImages: PostImagesOption; +} + +export const MongoConnectionConfig = service_types.configFile.DataSourceConfig.and( + t.object({ + type: t.literal(MONGO_CONNECTION_TYPE), + /** Unique identifier for the connection - optional when a single connection is present. */ + id: t.string.optional(), + /** Tag used as reference in sync rules. Defaults to "default". Does not have to be unique. */ + tag: t.string.optional(), + uri: t.string, + username: t.string.optional(), + password: t.string.optional(), + database: t.string.optional(), + + post_images: t.literal('off').or(t.literal('auto_configure')).or(t.literal('read_only')).optional() + }) +); + +/** + * Config input specified when starting services + */ +export type MongoConnectionConfig = t.Decoded; + +/** + * Resolved version of {@link MongoConnectionConfig} + */ +export type ResolvedConnectionConfig = MongoConnectionConfig & NormalizedMongoConnectionConfig; + +/** + * Validate and normalize connection options. + * + * Returns destructured options. + */ +export function normalizeConnectionConfig(options: MongoConnectionConfig): NormalizedMongoConnectionConfig { + const base = normalizeMongoConfig(options); + + return { + ...base, + id: options.id ?? 'default', + tag: options.tag ?? 'default', + postImages: (options.post_images as PostImagesOption | undefined) ?? PostImagesOption.OFF + }; +} + +/** + * Construct a mongodb URI, without username, password or ssl options. + * + * Only contains hostname, port, database. + */ +export function baseUri(options: NormalizedMongoConnectionConfig) { + return options.uri; +} diff --git a/modules/module-mongodb/src/utils.ts b/modules/module-mongodb/src/utils.ts new file mode 100644 index 000000000..badee3083 --- /dev/null +++ b/modules/module-mongodb/src/utils.ts @@ -0,0 +1,4 @@ +export function escapeRegExp(string: string) { + // https://stackoverflow.com/a/3561711/214837 + return string.replace(/[/\-\\^$*+?.()|[\]{}]/g, '\\$&'); +} diff --git a/modules/module-mongodb/test/src/change_stream.test.ts b/modules/module-mongodb/test/src/change_stream.test.ts new file mode 100644 index 000000000..0380fce6b --- /dev/null +++ b/modules/module-mongodb/test/src/change_stream.test.ts @@ -0,0 +1,517 @@ +import { putOp, removeOp } from '@core-tests/stream_utils.js'; +import { MONGO_STORAGE_FACTORY } from '@core-tests/util.js'; +import { BucketStorageFactory } from '@powersync/service-core'; +import * as crypto from 'crypto'; +import * as mongo from 'mongodb'; +import { setTimeout } from 'node:timers/promises'; +import { describe, expect, test, vi } from 'vitest'; +import { ChangeStreamTestContext } from './change_stream_utils.js'; +import { PostImagesOption } from '@module/types/types.js'; + +type StorageFactory = () => Promise; + +const BASIC_SYNC_RULES = ` +bucket_definitions: + global: + data: + - SELECT _id as id, description FROM "test_data" +`; + +describe('change stream - mongodb', { timeout: 20_000 }, function () { + defineChangeStreamTests(MONGO_STORAGE_FACTORY); +}); + +function defineChangeStreamTests(factory: StorageFactory) { + test('replicating basic values', async () => { + await using context = await ChangeStreamTestContext.open(factory); + const { db } = context; + await context.updateSyncRules(` +bucket_definitions: + global: + data: + - SELECT _id as id, description, num FROM "test_data"`); + + await db.createCollection('test_data', { + changeStreamPreAndPostImages: { enabled: false } + }); + const collection = db.collection('test_data'); + + await context.replicateSnapshot(); + + context.startStreaming(); + + const result = await collection.insertOne({ description: 'test1', num: 1152921504606846976n }); + const test_id = result.insertedId; + await setTimeout(30); + await collection.updateOne({ _id: test_id }, { $set: { description: 'test2' } }); + await setTimeout(30); + await collection.replaceOne({ _id: test_id }, { description: 'test3' }); + await setTimeout(30); + await collection.deleteOne({ _id: test_id }); + + const data = await context.getBucketData('global[]'); + + expect(data).toMatchObject([ + putOp('test_data', { id: test_id.toHexString(), description: 'test1', num: 1152921504606846976n }), + putOp('test_data', { id: test_id.toHexString(), description: 'test2', num: 1152921504606846976n }), + putOp('test_data', { id: test_id.toHexString(), description: 'test3' }), + removeOp('test_data', test_id.toHexString()) + ]); + }); + + test('replicating wildcard', async () => { + await using context = await ChangeStreamTestContext.open(factory); + const { db } = context; + await context.updateSyncRules(` +bucket_definitions: + global: + data: + - SELECT _id as id, description, num FROM "test_%"`); + + await db.createCollection('test_data', { + changeStreamPreAndPostImages: { enabled: false } + }); + const collection = db.collection('test_data'); + + const result = await collection.insertOne({ description: 'test1', num: 1152921504606846976n }); + const test_id = result.insertedId; + + await context.replicateSnapshot(); + + context.startStreaming(); + + await setTimeout(30); + await collection.updateOne({ _id: test_id }, { $set: { description: 'test2' } }); + + const data = await context.getBucketData('global[]'); + + expect(data).toMatchObject([ + putOp('test_data', { id: test_id.toHexString(), description: 'test1', num: 1152921504606846976n }), + putOp('test_data', { id: test_id.toHexString(), description: 'test2', num: 1152921504606846976n }) + ]); + }); + + test('updateLookup - no fullDocument available', async () => { + await using context = await ChangeStreamTestContext.open(factory, { postImages: PostImagesOption.OFF }); + const { db, client } = context; + await context.updateSyncRules(` +bucket_definitions: + global: + data: + - SELECT _id as id, description, num FROM "test_data"`); + + await db.createCollection('test_data', { + changeStreamPreAndPostImages: { enabled: false } + }); + const collection = db.collection('test_data'); + + await context.replicateSnapshot(); + context.startStreaming(); + + const session = client.startSession(); + let test_id: mongo.ObjectId | undefined; + try { + await session.withTransaction(async () => { + const result = await collection.insertOne({ description: 'test1', num: 1152921504606846976n }, { session }); + test_id = result.insertedId; + await collection.updateOne({ _id: test_id }, { $set: { description: 'test2' } }, { session }); + await collection.replaceOne({ _id: test_id }, { description: 'test3' }, { session }); + await collection.deleteOne({ _id: test_id }, { session }); + }); + } finally { + await session.endSession(); + } + + const data = await context.getBucketData('global[]'); + + expect(data).toMatchObject([ + putOp('test_data', { id: test_id!.toHexString(), description: 'test1', num: 1152921504606846976n }), + // fullDocument is not available at the point this is replicated, resulting in it treated as a remove + removeOp('test_data', test_id!.toHexString()), + putOp('test_data', { id: test_id!.toHexString(), description: 'test3' }), + removeOp('test_data', test_id!.toHexString()) + ]); + }); + + test('postImages - autoConfigure', async () => { + // Similar to the above test, but with postImages enabled. + // This resolves the consistency issue. + await using context = await ChangeStreamTestContext.open(factory, { postImages: PostImagesOption.AUTO_CONFIGURE }); + const { db, client } = context; + await context.updateSyncRules(` +bucket_definitions: + global: + data: + - SELECT _id as id, description, num FROM "test_data"`); + + await db.createCollection('test_data', { + // enabled: false here, but autoConfigure will enable it. + changeStreamPreAndPostImages: { enabled: false } + }); + const collection = db.collection('test_data'); + + await context.replicateSnapshot(); + + context.startStreaming(); + + const session = client.startSession(); + let test_id: mongo.ObjectId | undefined; + try { + await session.withTransaction(async () => { + const result = await collection.insertOne({ description: 'test1', num: 1152921504606846976n }, { session }); + test_id = result.insertedId; + await collection.updateOne({ _id: test_id }, { $set: { description: 'test2' } }, { session }); + await collection.replaceOne({ _id: test_id }, { description: 'test3' }, { session }); + await collection.deleteOne({ _id: test_id }, { session }); + }); + } finally { + await session.endSession(); + } + + const data = await context.getBucketData('global[]'); + + expect(data).toMatchObject([ + putOp('test_data', { id: test_id!.toHexString(), description: 'test1', num: 1152921504606846976n }), + // The postImage helps us get this data + putOp('test_data', { id: test_id!.toHexString(), description: 'test2', num: 1152921504606846976n }), + putOp('test_data', { id: test_id!.toHexString(), description: 'test3' }), + removeOp('test_data', test_id!.toHexString()) + ]); + }); + + test('postImages - on', async () => { + // Similar to postImages - autoConfigure, but does not auto-configure. + // changeStreamPreAndPostImages must be manually configured. + await using context = await ChangeStreamTestContext.open(factory, { postImages: PostImagesOption.READ_ONLY }); + const { db, client } = context; + await context.updateSyncRules(` +bucket_definitions: + global: + data: + - SELECT _id as id, description, num FROM "test_data"`); + + await db.createCollection('test_data', { + changeStreamPreAndPostImages: { enabled: true } + }); + const collection = db.collection('test_data'); + + await context.replicateSnapshot(); + + context.startStreaming(); + + const session = client.startSession(); + let test_id: mongo.ObjectId | undefined; + try { + await session.withTransaction(async () => { + const result = await collection.insertOne({ description: 'test1', num: 1152921504606846976n }, { session }); + test_id = result.insertedId; + await collection.updateOne({ _id: test_id }, { $set: { description: 'test2' } }, { session }); + await collection.replaceOne({ _id: test_id }, { description: 'test3' }, { session }); + await collection.deleteOne({ _id: test_id }, { session }); + }); + } finally { + await session.endSession(); + } + + const data = await context.getBucketData('global[]'); + + expect(data).toMatchObject([ + putOp('test_data', { id: test_id!.toHexString(), description: 'test1', num: 1152921504606846976n }), + // The postImage helps us get this data + putOp('test_data', { id: test_id!.toHexString(), description: 'test2', num: 1152921504606846976n }), + putOp('test_data', { id: test_id!.toHexString(), description: 'test3' }), + removeOp('test_data', test_id!.toHexString()) + ]); + }); + + test('replicating case sensitive table', async () => { + await using context = await ChangeStreamTestContext.open(factory); + const { db } = context; + await context.updateSyncRules(` + bucket_definitions: + global: + data: + - SELECT _id as id, description FROM "test_DATA" + `); + + await context.replicateSnapshot(); + + context.startStreaming(); + + const collection = db.collection('test_DATA'); + const result = await collection.insertOne({ description: 'test1' }); + const test_id = result.insertedId.toHexString(); + + const data = await context.getBucketData('global[]'); + + expect(data).toMatchObject([putOp('test_DATA', { id: test_id, description: 'test1' })]); + }); + + test('replicating large values', async () => { + await using context = await ChangeStreamTestContext.open(factory); + const { db } = context; + await context.updateSyncRules(` + bucket_definitions: + global: + data: + - SELECT _id as id, name, description FROM "test_data" + `); + + await context.replicateSnapshot(); + context.startStreaming(); + + const largeDescription = crypto.randomBytes(20_000).toString('hex'); + + const collection = db.collection('test_data'); + const result = await collection.insertOne({ name: 'test1', description: largeDescription }); + const test_id = result.insertedId; + + await collection.updateOne({ _id: test_id }, { $set: { name: 'test2' } }); + + const data = await context.getBucketData('global[]'); + expect(data.slice(0, 1)).toMatchObject([ + putOp('test_data', { id: test_id.toHexString(), name: 'test1', description: largeDescription }) + ]); + expect(data.slice(1)).toMatchObject([ + putOp('test_data', { id: test_id.toHexString(), name: 'test2', description: largeDescription }) + ]); + }); + + test('replicating dropCollection', async () => { + await using context = await ChangeStreamTestContext.open(factory); + const { db } = context; + const syncRuleContent = ` +bucket_definitions: + global: + data: + - SELECT _id as id, description FROM "test_data" + by_test_data: + parameters: SELECT _id as id FROM test_data WHERE id = token_parameters.user_id + data: [] +`; + await context.updateSyncRules(syncRuleContent); + await context.replicateSnapshot(); + context.startStreaming(); + + const collection = db.collection('test_data'); + const result = await collection.insertOne({ description: 'test1' }); + const test_id = result.insertedId.toHexString(); + + await collection.drop(); + + const data = await context.getBucketData('global[]'); + + expect(data).toMatchObject([ + putOp('test_data', { id: test_id, description: 'test1' }), + removeOp('test_data', test_id) + ]); + }); + + test('replicating renameCollection', async () => { + await using context = await ChangeStreamTestContext.open(factory); + const { db } = context; + const syncRuleContent = ` +bucket_definitions: + global: + data: + - SELECT _id as id, description FROM "test_data1" + - SELECT _id as id, description FROM "test_data2" +`; + await context.updateSyncRules(syncRuleContent); + await context.replicateSnapshot(); + context.startStreaming(); + + const collection = db.collection('test_data1'); + const result = await collection.insertOne({ description: 'test1' }); + const test_id = result.insertedId.toHexString(); + + await collection.rename('test_data2'); + + const data = await context.getBucketData('global[]'); + + expect(data).toMatchObject([ + putOp('test_data1', { id: test_id, description: 'test1' }), + removeOp('test_data1', test_id), + putOp('test_data2', { id: test_id, description: 'test1' }) + ]); + }); + + test('initial sync', async () => { + await using context = await ChangeStreamTestContext.open(factory); + const { db } = context; + await context.updateSyncRules(BASIC_SYNC_RULES); + + const collection = db.collection('test_data'); + const result = await collection.insertOne({ description: 'test1' }); + const test_id = result.insertedId.toHexString(); + + await context.replicateSnapshot(); + context.startStreaming(); + + const data = await context.getBucketData('global[]'); + expect(data).toMatchObject([putOp('test_data', { id: test_id, description: 'test1' })]); + }); + + test('large record', async () => { + // Test a large update. + + // Without $changeStreamSplitLargeEvent, we get this error: + // MongoServerError: PlanExecutor error during aggregation :: caused by :: BSONObj size: 33554925 (0x20001ED) is invalid. + // Size must be between 0 and 16793600(16MB) + + await using context = await ChangeStreamTestContext.open(factory); + await context.updateSyncRules(`bucket_definitions: + global: + data: + - SELECT _id as id, name, other FROM "test_data"`); + const { db } = context; + + await context.replicateSnapshot(); + + const collection = db.collection('test_data'); + const result = await collection.insertOne({ name: 't1' }); + const test_id = result.insertedId; + + // 12MB field. + // The field appears twice in the ChangeStream event, so the total size + // is > 16MB. + + // We don't actually have this description field in the sync rules, + // That causes other issues, not relevant for this specific test. + const largeDescription = crypto.randomBytes(12000000 / 2).toString('hex'); + + await collection.updateOne({ _id: test_id }, { $set: { description: largeDescription } }); + context.startStreaming(); + + const data = await context.getBucketData('global[]'); + expect(data.length).toEqual(2); + const row1 = JSON.parse(data[0].data as string); + expect(row1).toEqual({ id: test_id.toHexString(), name: 't1' }); + delete data[0].data; + expect(data[0]).toMatchObject({ + object_id: test_id.toHexString(), + object_type: 'test_data', + op: 'PUT', + op_id: '1' + }); + const row2 = JSON.parse(data[1].data as string); + expect(row2).toEqual({ id: test_id.toHexString(), name: 't1' }); + delete data[1].data; + expect(data[1]).toMatchObject({ + object_id: test_id.toHexString(), + object_type: 'test_data', + op: 'PUT', + op_id: '2' + }); + }); + + test('collection not in sync rules', async () => { + await using context = await ChangeStreamTestContext.open(factory); + const { db } = context; + await context.updateSyncRules(BASIC_SYNC_RULES); + + await context.replicateSnapshot(); + + context.startStreaming(); + + const collection = db.collection('test_donotsync'); + const result = await collection.insertOne({ description: 'test' }); + + const data = await context.getBucketData('global[]'); + + expect(data).toMatchObject([]); + }); + + test('postImages - new collection with postImages enabled', async () => { + await using context = await ChangeStreamTestContext.open(factory, { postImages: PostImagesOption.AUTO_CONFIGURE }); + const { db } = context; + await context.updateSyncRules(` +bucket_definitions: + global: + data: + - SELECT _id as id, description FROM "test_%"`); + + await context.replicateSnapshot(); + + await db.createCollection('test_data', { + // enabled: true here - everything should work + changeStreamPreAndPostImages: { enabled: true } + }); + const collection = db.collection('test_data'); + const result = await collection.insertOne({ description: 'test1' }); + const test_id = result.insertedId; + await collection.updateOne({ _id: test_id }, { $set: { description: 'test2' } }); + + context.startStreaming(); + + const data = await context.getBucketData('global[]'); + expect(data).toMatchObject([ + putOp('test_data', { id: test_id!.toHexString(), description: 'test1' }), + putOp('test_data', { id: test_id!.toHexString(), description: 'test2' }) + ]); + }); + + test('postImages - new collection with postImages disabled', async () => { + await using context = await ChangeStreamTestContext.open(factory, { postImages: PostImagesOption.AUTO_CONFIGURE }); + const { db } = context; + await context.updateSyncRules(` +bucket_definitions: + global: + data: + - SELECT _id as id, description FROM "test_data%"`); + + await context.replicateSnapshot(); + + await db.createCollection('test_data', { + // enabled: false here, but autoConfigure will enable it. + // Unfortunately, that is too late, and replication must be restarted. + changeStreamPreAndPostImages: { enabled: false } + }); + const collection = db.collection('test_data'); + const result = await collection.insertOne({ description: 'test1' }); + const test_id = result.insertedId; + await collection.updateOne({ _id: test_id }, { $set: { description: 'test2' } }); + + context.startStreaming(); + + await expect(() => context.getBucketData('global[]')).rejects.toMatchObject({ + message: expect.stringContaining('stream was configured to require a post-image for all update events') + }); + }); + + test('recover from error', async () => { + await using context = await ChangeStreamTestContext.open(factory); + const { db } = context; + await context.updateSyncRules(` +bucket_definitions: + global: + data: + - SELECT _id as id, description, num FROM "test_data"`); + + await db.createCollection('test_data', { + changeStreamPreAndPostImages: { enabled: false } + }); + + const collection = db.collection('test_data'); + await collection.insertOne({ description: 'test1', num: 1152921504606846976n }); + + await context.replicateSnapshot(); + + // Simulate an error + await context.storage!.reportError(new Error('simulated error')); + expect((await context.factory.getActiveSyncRulesContent())?.last_fatal_error).toEqual('simulated error'); + + // startStreaming() should automatically clear the error. + context.startStreaming(); + + // getBucketData() creates a checkpoint that clears the error, so we don't do that + // Just wait, and check that the error is cleared automatically. + await vi.waitUntil( + async () => { + const error = (await context.factory.getActiveSyncRulesContent())?.last_fatal_error; + return error == null; + }, + { timeout: 2_000 } + ); + }); +} diff --git a/modules/module-mongodb/test/src/change_stream_utils.ts b/modules/module-mongodb/test/src/change_stream_utils.ts new file mode 100644 index 000000000..77a5d9647 --- /dev/null +++ b/modules/module-mongodb/test/src/change_stream_utils.ts @@ -0,0 +1,178 @@ +import { ActiveCheckpoint, BucketStorageFactory, OpId, SyncRulesBucketStorage } from '@powersync/service-core'; + +import { TEST_CONNECTION_OPTIONS, clearTestDb } from './util.js'; +import { fromAsync } from '@core-tests/stream_utils.js'; +import { MongoManager } from '@module/replication/MongoManager.js'; +import { ChangeStream, ChangeStreamOptions } from '@module/replication/ChangeStream.js'; +import * as mongo from 'mongodb'; +import { createCheckpoint } from '@module/replication/MongoRelation.js'; +import { NormalizedMongoConnectionConfig } from '@module/types/types.js'; + +export class ChangeStreamTestContext { + private _walStream?: ChangeStream; + private abortController = new AbortController(); + private streamPromise?: Promise; + public storage?: SyncRulesBucketStorage; + + /** + * Tests operating on the mongo change stream need to configure the stream and manage asynchronous + * replication, which gets a little tricky. + * + * This configures all the context, and tears it down afterwards. + */ + static async open(factory: () => Promise, options?: Partial) { + const f = await factory(); + const connectionManager = new MongoManager({ ...TEST_CONNECTION_OPTIONS, ...options }); + + await clearTestDb(connectionManager.db); + return new ChangeStreamTestContext(f, connectionManager); + } + + constructor( + public factory: BucketStorageFactory, + public connectionManager: MongoManager + ) {} + + async dispose() { + this.abortController.abort(); + await this.streamPromise?.catch((e) => e); + await this.connectionManager.destroy(); + } + + async [Symbol.asyncDispose]() { + await this.dispose(); + } + + get client() { + return this.connectionManager.client; + } + + get db() { + return this.connectionManager.db; + } + + get connectionTag() { + return this.connectionManager.connectionTag; + } + + async updateSyncRules(content: string) { + const syncRules = await this.factory.updateSyncRules({ content: content }); + this.storage = this.factory.getInstance(syncRules); + return this.storage!; + } + + get walStream() { + if (this.storage == null) { + throw new Error('updateSyncRules() first'); + } + if (this._walStream) { + return this._walStream; + } + const options: ChangeStreamOptions = { + storage: this.storage, + connections: this.connectionManager, + abort_signal: this.abortController.signal + }; + this._walStream = new ChangeStream(options); + return this._walStream!; + } + + async replicateSnapshot() { + await this.walStream.initReplication(); + await this.storage!.autoActivate(); + } + + startStreaming() { + this.streamPromise = this.walStream.streamChanges(); + } + + async getCheckpoint(options?: { timeout?: number }) { + let checkpoint = await Promise.race([ + getClientCheckpoint(this.client, this.db, this.factory, { timeout: options?.timeout ?? 15_000 }), + this.streamPromise + ]); + if (typeof checkpoint == 'undefined') { + // This indicates an issue with the test setup - streamingPromise completed instead + // of getClientCheckpoint() + throw new Error('Test failure - streamingPromise completed'); + } + return checkpoint as string; + } + + async getBucketsDataBatch(buckets: Record, options?: { timeout?: number }) { + let checkpoint = await this.getCheckpoint(options); + const map = new Map(Object.entries(buckets)); + return fromAsync(this.storage!.getBucketDataBatch(checkpoint, map)); + } + + async getBucketData( + bucket: string, + start?: string, + options?: { timeout?: number; limit?: number; chunkLimitBytes?: number } + ) { + start ??= '0'; + let checkpoint = await this.getCheckpoint(options); + const map = new Map([[bucket, start]]); + const batch = this.storage!.getBucketDataBatch(checkpoint, map, { + limit: options?.limit, + chunkLimitBytes: options?.chunkLimitBytes + }); + const batches = await fromAsync(batch); + return batches[0]?.batch.data ?? []; + } + + async getChecksums(buckets: string[], options?: { timeout?: number }) { + let checkpoint = await this.getCheckpoint(options); + return this.storage!.getChecksums(checkpoint, buckets); + } + + async getChecksum(bucket: string, options?: { timeout?: number }) { + let checkpoint = await this.getCheckpoint(options); + const map = await this.storage!.getChecksums(checkpoint, [bucket]); + return map.get(bucket); + } +} + +export async function getClientCheckpoint( + client: mongo.MongoClient, + db: mongo.Db, + bucketStorage: BucketStorageFactory, + options?: { timeout?: number } +): Promise { + const start = Date.now(); + const lsn = await createCheckpoint(client, db); + // This old API needs a persisted checkpoint id. + // Since we don't use LSNs anymore, the only way to get that is to wait. + + const timeout = options?.timeout ?? 50_000; + let lastCp: ActiveCheckpoint | null = null; + + while (Date.now() - start < timeout) { + const cp = await bucketStorage.getActiveCheckpoint(); + lastCp = cp; + if (!cp.hasSyncRules()) { + throw new Error('No sync rules available'); + } + if (cp.lsn && cp.lsn >= lsn) { + return cp.checkpoint; + } + + await new Promise((resolve) => setTimeout(resolve, 30)); + } + + throw new Error(`Timeout while waiting for checkpoint ${lsn}. Last checkpoint: ${lastCp?.lsn}`); +} + +export async function setSnapshotHistorySeconds(client: mongo.MongoClient, seconds: number) { + const { minSnapshotHistoryWindowInSeconds: currentValue } = await client + .db('admin') + .command({ getParameter: 1, minSnapshotHistoryWindowInSeconds: 1 }); + + await client.db('admin').command({ setParameter: 1, minSnapshotHistoryWindowInSeconds: seconds }); + + return { + async [Symbol.asyncDispose]() { + await client.db('admin').command({ setParameter: 1, minSnapshotHistoryWindowInSeconds: currentValue }); + } + }; +} diff --git a/modules/module-mongodb/test/src/env.ts b/modules/module-mongodb/test/src/env.ts new file mode 100644 index 000000000..e460c80b3 --- /dev/null +++ b/modules/module-mongodb/test/src/env.ts @@ -0,0 +1,7 @@ +import { utils } from '@powersync/lib-services-framework'; + +export const env = utils.collectEnvironmentVariables({ + MONGO_TEST_DATA_URL: utils.type.string.default('mongodb://localhost:27017/powersync_test_data'), + CI: utils.type.boolean.default('false'), + SLOW_TESTS: utils.type.boolean.default('false') +}); diff --git a/modules/module-mongodb/test/src/mongo_test.test.ts b/modules/module-mongodb/test/src/mongo_test.test.ts new file mode 100644 index 000000000..5d30067da --- /dev/null +++ b/modules/module-mongodb/test/src/mongo_test.test.ts @@ -0,0 +1,449 @@ +import { MongoRouteAPIAdapter } from '@module/api/MongoRouteAPIAdapter.js'; +import { ChangeStream } from '@module/replication/ChangeStream.js'; +import { constructAfterRecord } from '@module/replication/MongoRelation.js'; +import { SqliteRow, SqlSyncRules } from '@powersync/service-sync-rules'; +import * as mongo from 'mongodb'; +import { describe, expect, test } from 'vitest'; +import { clearTestDb, connectMongoData, TEST_CONNECTION_OPTIONS } from './util.js'; +import { PostImagesOption } from '@module/types/types.js'; + +describe('mongo data types', () => { + async function setupTable(db: mongo.Db) { + await clearTestDb(db); + } + + async function insert(collection: mongo.Collection) { + await collection.insertMany([ + { + _id: 1 as any, + null: null, + text: 'text', + uuid: new mongo.UUID('baeb2514-4c57-436d-b3cc-c1256211656d'), + bool: true, + bytea: Buffer.from('test'), + int2: 1000, + int4: 1000000, + int8: 9007199254740993n, + float: 3.14, + decimal: new mongo.Decimal128('3.14') + }, + { _id: 2 as any, nested: { test: 'thing' } }, + { _id: 3 as any, date: new Date('2023-03-06 15:47+02') }, + { + _id: 4 as any, + timestamp: mongo.Timestamp.fromBits(123, 456), + objectId: mongo.ObjectId.createFromHexString('66e834cc91d805df11fa0ecb'), + regexp: new mongo.BSONRegExp('test', 'i'), + minKey: new mongo.MinKey(), + maxKey: new mongo.MaxKey(), + symbol: new mongo.BSONSymbol('test'), + js: new mongo.Code('testcode'), + js2: new mongo.Code('testcode', { foo: 'bar' }), + pointer: new mongo.DBRef('mycollection', mongo.ObjectId.createFromHexString('66e834cc91d805df11fa0ecb')), + pointer2: new mongo.DBRef( + 'mycollection', + mongo.ObjectId.createFromHexString('66e834cc91d805df11fa0ecb'), + 'mydb', + { foo: 'bar' } + ), + undefined: undefined + } + ]); + } + + async function insertNested(collection: mongo.Collection) { + await collection.insertMany([ + { + _id: 1 as any, + null: [null], + text: ['text'], + uuid: [new mongo.UUID('baeb2514-4c57-436d-b3cc-c1256211656d')], + bool: [true], + bytea: [Buffer.from('test')], + int2: [1000], + int4: [1000000], + int8: [9007199254740993n], + float: [3.14], + decimal: [new mongo.Decimal128('3.14')] + }, + { _id: 2 as any, nested: [{ test: 'thing' }] }, + { _id: 3 as any, date: [new Date('2023-03-06 15:47+02')] }, + { + _id: 10 as any, + timestamp: [mongo.Timestamp.fromBits(123, 456)], + objectId: [mongo.ObjectId.createFromHexString('66e834cc91d805df11fa0ecb')], + regexp: [new mongo.BSONRegExp('test', 'i')], + minKey: [new mongo.MinKey()], + maxKey: [new mongo.MaxKey()], + symbol: [new mongo.BSONSymbol('test')], + js: [new mongo.Code('testcode')], + pointer: [new mongo.DBRef('mycollection', mongo.ObjectId.createFromHexString('66e834cc91d805df11fa0ecb'))], + undefined: [undefined] + } + ]); + } + + function checkResults(transformed: Record[]) { + expect(transformed[0]).toMatchObject({ + _id: 1n, + text: 'text', + uuid: 'baeb2514-4c57-436d-b3cc-c1256211656d', + bool: 1n, + bytea: new Uint8Array([116, 101, 115, 116]), + int2: 1000n, + int4: 1000000n, + int8: 9007199254740993n, + float: 3.14, + null: null, + decimal: '3.14' + }); + expect(transformed[1]).toMatchObject({ + _id: 2n, + nested: '{"test":"thing"}' + }); + + expect(transformed[2]).toMatchObject({ + _id: 3n, + date: '2023-03-06 13:47:00.000Z' + }); + + expect(transformed[3]).toMatchObject({ + _id: 4n, + objectId: '66e834cc91d805df11fa0ecb', + timestamp: 1958505087099n, + regexp: '{"pattern":"test","options":"i"}', + minKey: null, + maxKey: null, + symbol: 'test', + js: '{"code":"testcode","scope":null}', + js2: '{"code":"testcode","scope":{"foo":"bar"}}', + pointer: '{"collection":"mycollection","oid":"66e834cc91d805df11fa0ecb","fields":{}}', + pointer2: '{"collection":"mycollection","oid":"66e834cc91d805df11fa0ecb","db":"mydb","fields":{"foo":"bar"}}', + undefined: null + }); + } + + function checkResultsNested(transformed: Record[]) { + expect(transformed[0]).toMatchObject({ + _id: 1n, + text: `["text"]`, + uuid: '["baeb2514-4c57-436d-b3cc-c1256211656d"]', + bool: '[1]', + bytea: '[null]', + int2: '[1000]', + int4: '[1000000]', + int8: `[9007199254740993]`, + float: '[3.14]', + null: '[null]' + }); + + // Note: Depending on to what extent we use the original postgres value, the whitespace may change, and order may change. + // We do expect that decimals and big numbers are preserved. + expect(transformed[1]).toMatchObject({ + _id: 2n, + nested: '[{"test":"thing"}]' + }); + + expect(transformed[2]).toMatchObject({ + _id: 3n, + date: '["2023-03-06 13:47:00.000Z"]' + }); + + expect(transformed[3]).toMatchObject({ + _id: 10n, + objectId: '["66e834cc91d805df11fa0ecb"]', + timestamp: '[1958505087099]', + regexp: '[{"pattern":"test","options":"i"}]', + symbol: '["test"]', + js: '[{"code":"testcode","scope":null}]', + pointer: '[{"collection":"mycollection","oid":"66e834cc91d805df11fa0ecb","fields":{}}]', + minKey: '[null]', + maxKey: '[null]', + undefined: '[null]' + }); + } + + test('test direct queries', async () => { + const { db, client } = await connectMongoData(); + const collection = db.collection('test_data'); + try { + await setupTable(db); + + await insert(collection); + + const transformed = [...ChangeStream.getQueryData(await db.collection('test_data').find().toArray())]; + + checkResults(transformed); + } finally { + await client.close(); + } + }); + + test('test direct queries - arrays', async () => { + const { db, client } = await connectMongoData(); + const collection = db.collection('test_data_arrays'); + try { + await setupTable(db); + + await insertNested(collection); + + const transformed = [...ChangeStream.getQueryData(await db.collection('test_data_arrays').find().toArray())]; + + checkResultsNested(transformed); + } finally { + await client.close(); + } + }); + + test('test replication', async () => { + // With MongoDB, replication uses the exact same document format + // as normal queries. We test it anyway. + const { db, client } = await connectMongoData(); + const collection = db.collection('test_data'); + try { + await setupTable(db); + + const stream = db.watch([], { + useBigInt64: true, + maxAwaitTimeMS: 50, + fullDocument: 'updateLookup' + }); + + await stream.tryNext(); + + await insert(collection); + + const transformed = await getReplicationTx(stream, 4); + + checkResults(transformed); + } finally { + await client.close(); + } + }); + + test('test replication - arrays', async () => { + const { db, client } = await connectMongoData(); + const collection = db.collection('test_data'); + try { + await setupTable(db); + + const stream = db.watch([], { + useBigInt64: true, + maxAwaitTimeMS: 50, + fullDocument: 'updateLookup' + }); + + await stream.tryNext(); + + await insertNested(collection); + + const transformed = await getReplicationTx(stream, 4); + + checkResultsNested(transformed); + } finally { + await client.close(); + } + }); + + test('connection schema', async () => { + await using adapter = new MongoRouteAPIAdapter({ + type: 'mongodb', + ...TEST_CONNECTION_OPTIONS + }); + const db = adapter.db; + await clearTestDb(db); + + const collection = db.collection('test_data'); + await setupTable(db); + await insert(collection); + + const schema = await adapter.getConnectionSchema(); + const dbSchema = schema.filter((s) => s.name == TEST_CONNECTION_OPTIONS.database)[0]; + expect(dbSchema).not.toBeNull(); + expect(dbSchema.tables).toMatchObject([ + { + name: 'test_data', + columns: [ + { name: '_id', sqlite_type: 4, internal_type: 'Integer' }, + { name: 'bool', sqlite_type: 4, internal_type: 'Boolean' }, + { name: 'bytea', sqlite_type: 1, internal_type: 'Binary' }, + { name: 'date', sqlite_type: 2, internal_type: 'Date' }, + { name: 'decimal', sqlite_type: 2, internal_type: 'Decimal' }, + { name: 'float', sqlite_type: 8, internal_type: 'Double' }, + { name: 'int2', sqlite_type: 4, internal_type: 'Integer' }, + { name: 'int4', sqlite_type: 4, internal_type: 'Integer' }, + { name: 'int8', sqlite_type: 4, internal_type: 'Long' }, + // We can fix these later + { name: 'js', sqlite_type: 2, internal_type: 'Object' }, + { name: 'js2', sqlite_type: 2, internal_type: 'Object' }, + { name: 'maxKey', sqlite_type: 0, internal_type: 'MaxKey' }, + { name: 'minKey', sqlite_type: 0, internal_type: 'MinKey' }, + { name: 'nested', sqlite_type: 2, internal_type: 'Object' }, + { name: 'null', sqlite_type: 0, internal_type: 'Null' }, + { name: 'objectId', sqlite_type: 2, internal_type: 'ObjectId' }, + // We can fix these later + { name: 'pointer', sqlite_type: 2, internal_type: 'Object' }, + { name: 'pointer2', sqlite_type: 2, internal_type: 'Object' }, + { name: 'regexp', sqlite_type: 2, internal_type: 'RegExp' }, + // Can fix this later + { name: 'symbol', sqlite_type: 2, internal_type: 'String' }, + { name: 'text', sqlite_type: 2, internal_type: 'String' }, + { name: 'timestamp', sqlite_type: 4, internal_type: 'Timestamp' }, + { name: 'undefined', sqlite_type: 0, internal_type: 'Null' }, + { name: 'uuid', sqlite_type: 2, internal_type: 'UUID' } + ] + } + ]); + }); + + test('validate postImages', async () => { + await using adapter = new MongoRouteAPIAdapter({ + type: 'mongodb', + ...TEST_CONNECTION_OPTIONS, + postImages: PostImagesOption.READ_ONLY + }); + const db = adapter.db; + await clearTestDb(db); + + const collection = db.collection('test_data'); + await setupTable(db); + await insert(collection); + + const rules = SqlSyncRules.fromYaml( + ` +bucket_definitions: + global: + data: + - select _id as id, * from test_data + + `, + { + ...adapter.getParseSyncRulesOptions(), + // No schema-based validation at this point + schema: undefined + } + ); + const source_table_patterns = rules.getSourceTables(); + const results = await adapter.getDebugTablesInfo(source_table_patterns, rules); + + const result = results[0]; + expect(result).not.toBeNull(); + expect(result.table).toMatchObject({ + schema: 'powersync_test_data', + name: 'test_data', + replication_id: ['_id'], + data_queries: true, + parameter_queries: false, + errors: [ + { + level: 'fatal', + message: 'changeStreamPreAndPostImages not enabled on powersync_test_data.test_data' + } + ] + }); + }); + + test('validate postImages - auto-configure', async () => { + await using adapter = new MongoRouteAPIAdapter({ + type: 'mongodb', + ...TEST_CONNECTION_OPTIONS, + postImages: PostImagesOption.AUTO_CONFIGURE + }); + const db = adapter.db; + await clearTestDb(db); + + const collection = db.collection('test_data'); + await setupTable(db); + await insert(collection); + + const rules = SqlSyncRules.fromYaml( + ` +bucket_definitions: + global: + data: + - select _id as id, * from test_data + + `, + { + ...adapter.getParseSyncRulesOptions(), + // No schema-based validation at this point + schema: undefined + } + ); + const source_table_patterns = rules.getSourceTables(); + const results = await adapter.getDebugTablesInfo(source_table_patterns, rules); + + const result = results[0]; + expect(result).not.toBeNull(); + expect(result.table).toMatchObject({ + schema: 'powersync_test_data', + name: 'test_data', + replication_id: ['_id'], + data_queries: true, + parameter_queries: false, + errors: [ + { + level: 'warning', + message: + 'changeStreamPreAndPostImages not enabled on powersync_test_data.test_data, will be enabled automatically' + } + ] + }); + }); + + test('validate postImages - off', async () => { + await using adapter = new MongoRouteAPIAdapter({ + type: 'mongodb', + ...TEST_CONNECTION_OPTIONS, + postImages: PostImagesOption.OFF + }); + const db = adapter.db; + await clearTestDb(db); + + const collection = db.collection('test_data'); + await setupTable(db); + await insert(collection); + + const rules = SqlSyncRules.fromYaml( + ` +bucket_definitions: + global: + data: + - select _id as id, * from test_data + + `, + { + ...adapter.getParseSyncRulesOptions(), + // No schema-based validation at this point + schema: undefined + } + ); + const source_table_patterns = rules.getSourceTables(); + const results = await adapter.getDebugTablesInfo(source_table_patterns, rules); + + const result = results[0]; + expect(result).not.toBeNull(); + expect(result.table).toMatchObject({ + schema: 'powersync_test_data', + name: 'test_data', + replication_id: ['_id'], + data_queries: true, + parameter_queries: false, + errors: [] + }); + }); +}); + +/** + * Return all the inserts from the first transaction in the replication stream. + */ +async function getReplicationTx(replicationStream: mongo.ChangeStream, count: number) { + let transformed: SqliteRow[] = []; + for await (const doc of replicationStream) { + transformed.push(constructAfterRecord((doc as any).fullDocument)); + if (transformed.length == count) { + break; + } + } + return transformed; +} diff --git a/modules/module-mongodb/test/src/setup.ts b/modules/module-mongodb/test/src/setup.ts new file mode 100644 index 000000000..b924cf736 --- /dev/null +++ b/modules/module-mongodb/test/src/setup.ts @@ -0,0 +1,7 @@ +import { container } from '@powersync/lib-services-framework'; +import { beforeAll } from 'vitest'; + +beforeAll(() => { + // Executes for every test file + container.registerDefaults(); +}); diff --git a/modules/module-mongodb/test/src/slow_tests.test.ts b/modules/module-mongodb/test/src/slow_tests.test.ts new file mode 100644 index 000000000..535e967c4 --- /dev/null +++ b/modules/module-mongodb/test/src/slow_tests.test.ts @@ -0,0 +1,109 @@ +import { MONGO_STORAGE_FACTORY } from '@core-tests/util.js'; +import { BucketStorageFactory } from '@powersync/service-core'; +import * as mongo from 'mongodb'; +import { setTimeout } from 'node:timers/promises'; +import { describe, expect, test } from 'vitest'; +import { ChangeStreamTestContext, setSnapshotHistorySeconds } from './change_stream_utils.js'; +import { env } from './env.js'; + +type StorageFactory = () => Promise; + +const BASIC_SYNC_RULES = ` +bucket_definitions: + global: + data: + - SELECT _id as id, description FROM "test_data" +`; + +describe('change stream slow tests - mongodb', { timeout: 60_000 }, function () { + if (env.CI || env.SLOW_TESTS) { + defineSlowTests(MONGO_STORAGE_FACTORY); + } else { + // Need something in this file. + test('no-op', () => {}); + } +}); + +function defineSlowTests(factory: StorageFactory) { + test('replicating snapshot with lots of data', async () => { + await using context = await ChangeStreamTestContext.open(factory); + // Test with low minSnapshotHistoryWindowInSeconds, to trigger: + // > Read timestamp .. is older than the oldest available timestamp. + // This happened when we had {snapshot: true} in the initial + // snapshot session. + await using _ = await setSnapshotHistorySeconds(context.client, 1); + const { db } = context; + await context.updateSyncRules(` +bucket_definitions: + global: + data: + - SELECT _id as id, description, num FROM "test_data1" + - SELECT _id as id, description, num FROM "test_data2" + `); + + const collection1 = db.collection('test_data1'); + const collection2 = db.collection('test_data2'); + + let operations: mongo.AnyBulkWriteOperation[] = []; + for (let i = 0; i < 10_000; i++) { + operations.push({ insertOne: { document: { description: `pre${i}`, num: i } } }); + } + await collection1.bulkWrite(operations); + await collection2.bulkWrite(operations); + + await context.replicateSnapshot(); + context.startStreaming(); + const checksum = await context.getChecksum('global[]'); + expect(checksum).toMatchObject({ + count: 20_000 + }); + }); + + test('writes concurrently with snapshot', async () => { + // If there is an issue with snapshotTime (the start LSN for the + // changestream), we may miss updates, which this test would + // hopefully catch. + + await using context = await ChangeStreamTestContext.open(factory); + const { db } = context; + await context.updateSyncRules(` +bucket_definitions: + global: + data: + - SELECT _id as id, description, num FROM "test_data" + `); + + const collection = db.collection('test_data'); + + let operations: mongo.AnyBulkWriteOperation[] = []; + for (let i = 0; i < 5_000; i++) { + operations.push({ insertOne: { document: { description: `pre${i}`, num: i } } }); + } + await collection.bulkWrite(operations); + + const snapshotPromise = context.replicateSnapshot(); + + for (let i = 49; i >= 0; i--) { + await collection.updateMany( + { num: { $gte: i * 100, $lt: i * 100 + 100 } }, + { $set: { description: 'updated' + i } } + ); + await setTimeout(20); + } + + await snapshotPromise; + context.startStreaming(); + + const data = await context.getBucketData('global[]', undefined, { limit: 50_000, chunkLimitBytes: 60_000_000 }); + + const preDocuments = data.filter((d) => JSON.parse(d.data! as string).description.startsWith('pre')).length; + const updatedDocuments = data.filter((d) => JSON.parse(d.data! as string).description.startsWith('updated')).length; + + // If the test works properly, preDocuments should be around 2000-3000. + // The total should be around 9000-9900. + // However, it is very sensitive to timing, so we allow a wide range. + // updatedDocuments must be strictly >= 5000, otherwise something broke. + expect(updatedDocuments).toBeGreaterThanOrEqual(5_000); + expect(preDocuments).toBeLessThanOrEqual(5_000); + }); +} diff --git a/modules/module-mongodb/test/src/util.ts b/modules/module-mongodb/test/src/util.ts new file mode 100644 index 000000000..a101f77a5 --- /dev/null +++ b/modules/module-mongodb/test/src/util.ts @@ -0,0 +1,52 @@ +import * as types from '@module/types/types.js'; +import { BucketStorageFactory, Metrics, MongoBucketStorage, OpId } from '@powersync/service-core'; + +import { env } from './env.js'; +import { logger } from '@powersync/lib-services-framework'; +import { connectMongo } from '@core-tests/util.js'; +import * as mongo from 'mongodb'; + +// The metrics need to be initialized before they can be used +await Metrics.initialise({ + disable_telemetry_sharing: true, + powersync_instance_id: 'test', + internal_metrics_endpoint: 'unused.for.tests.com' +}); +Metrics.getInstance().resetCounters(); + +export const TEST_URI = env.MONGO_TEST_DATA_URL; + +export const TEST_CONNECTION_OPTIONS = types.normalizeConnectionConfig({ + type: 'mongodb', + uri: TEST_URI +}); + +export type StorageFactory = () => Promise; + +export const INITIALIZED_MONGO_STORAGE_FACTORY: StorageFactory = async () => { + const db = await connectMongo(); + + // None of the PG tests insert data into this collection, so it was never created + if (!(await db.db.listCollections({ name: db.bucket_parameters.collectionName }).hasNext())) { + await db.db.createCollection('bucket_parameters'); + } + + await db.clear(); + + return new MongoBucketStorage(db, { slot_name_prefix: 'test_' }); +}; + +export async function clearTestDb(db: mongo.Db) { + await db.dropDatabase(); +} + +export async function connectMongoData() { + const client = new mongo.MongoClient(env.MONGO_TEST_DATA_URL, { + connectTimeoutMS: env.CI ? 15_000 : 5_000, + socketTimeoutMS: env.CI ? 15_000 : 5_000, + serverSelectionTimeoutMS: env.CI ? 15_000 : 2_500, + useBigInt64: true + }); + const dbname = new URL(env.MONGO_TEST_DATA_URL).pathname.substring(1); + return { client, db: client.db(dbname) }; +} diff --git a/modules/module-mongodb/test/tsconfig.json b/modules/module-mongodb/test/tsconfig.json new file mode 100644 index 000000000..18898c4ee --- /dev/null +++ b/modules/module-mongodb/test/tsconfig.json @@ -0,0 +1,28 @@ +{ + "extends": "../../../tsconfig.base.json", + "compilerOptions": { + "rootDir": "src", + "baseUrl": "./", + "noEmit": true, + "esModuleInterop": true, + "skipLibCheck": true, + "sourceMap": true, + "paths": { + "@/*": ["../../../packages/service-core/src/*"], + "@module/*": ["../src/*"], + "@core-tests/*": ["../../../packages/service-core/test/src/*"] + } + }, + "include": ["src"], + "references": [ + { + "path": "../" + }, + { + "path": "../../../packages/service-core/test" + }, + { + "path": "../../../packages/service-core/" + } + ] +} diff --git a/modules/module-mongodb/tsconfig.json b/modules/module-mongodb/tsconfig.json new file mode 100644 index 000000000..6afdde02f --- /dev/null +++ b/modules/module-mongodb/tsconfig.json @@ -0,0 +1,28 @@ +{ + "extends": "../../tsconfig.base.json", + "compilerOptions": { + "rootDir": "src", + "outDir": "dist", + "esModuleInterop": true, + "skipLibCheck": true, + "sourceMap": true + }, + "include": ["src"], + "references": [ + { + "path": "../../packages/types" + }, + { + "path": "../../packages/jsonbig" + }, + { + "path": "../../packages/sync-rules" + }, + { + "path": "../../packages/service-core" + }, + { + "path": "../../libs/lib-services" + } + ] +} diff --git a/modules/module-mongodb/vitest.config.ts b/modules/module-mongodb/vitest.config.ts new file mode 100644 index 000000000..7a39c1f71 --- /dev/null +++ b/modules/module-mongodb/vitest.config.ts @@ -0,0 +1,15 @@ +import { defineConfig } from 'vitest/config'; +import tsconfigPaths from 'vite-tsconfig-paths'; + +export default defineConfig({ + plugins: [tsconfigPaths()], + test: { + setupFiles: './test/src/setup.ts', + poolOptions: { + threads: { + singleThread: true + } + }, + pool: 'threads' + } +}); diff --git a/modules/module-mysql/LICENSE b/modules/module-mysql/LICENSE new file mode 100644 index 000000000..c8efd46cc --- /dev/null +++ b/modules/module-mysql/LICENSE @@ -0,0 +1,67 @@ +# Functional Source License, Version 1.1, Apache 2.0 Future License + +## Abbreviation + +FSL-1.1-Apache-2.0 + +## Notice + +Copyright 2023-2024 Journey Mobile, Inc. + +## Terms and Conditions + +### Licensor ("We") + +The party offering the Software under these Terms and Conditions. + +### The Software + +The "Software" is each version of the software that we make available under these Terms and Conditions, as indicated by our inclusion of these Terms and Conditions with the Software. + +### License Grant + +Subject to your compliance with this License Grant and the Patents, Redistribution and Trademark clauses below, we hereby grant you the right to use, copy, modify, create derivative works, publicly perform, publicly display and redistribute the Software for any Permitted Purpose identified below. + +### Permitted Purpose + +A Permitted Purpose is any purpose other than a Competing Use. A Competing Use means making the Software available to others in a commercial product or service that: + +1. substitutes for the Software; +2. substitutes for any other product or service we offer using the Software that exists as of the date we make the Software available; or +3. offers the same or substantially similar functionality as the Software. + +Permitted Purposes specifically include using the Software: + +1. for your internal use and access; +2. for non-commercial education; +3. for non-commercial research; and +4. in connection with professional services that you provide to a licensee using the Software in accordance with these Terms and Conditions. + +### Patents + +To the extent your use for a Permitted Purpose would necessarily infringe our patents, the license grant above includes a license under our patents. If you make a claim against any party that the Software infringes or contributes to the infringement of any patent, then your patent license to the Software ends immediately. + +### Redistribution + +The Terms and Conditions apply to all copies, modifications and derivatives of the Software. +If you redistribute any copies, modifications or derivatives of the Software, you must include a copy of or a link to these Terms and Conditions and not remove any copyright notices provided in or with the Software. + +### Disclaimer + +THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTIES OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION WARRANTIES OF FITNESS FOR A PARTICULAR PURPOSE, MERCHANTABILITY, TITLE OR NON-INFRINGEMENT. +IN NO EVENT WILL WE HAVE ANY LIABILITY TO YOU ARISING OUT OF OR RELATED TO THE SOFTWARE, INCLUDING INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES, EVEN IF WE HAVE BEEN INFORMED OF THEIR POSSIBILITY IN ADVANCE. + +### Trademarks + +Except for displaying the License Details and identifying us as the origin of the Software, you have no right under these Terms and Conditions to use our trademarks, trade names, service marks or product names. + +## Grant of Future License + +We hereby irrevocably grant you an additional license to use the Software under the Apache License, Version 2.0 that is effective on the second anniversary of the date we make the Software available. On or after that date, you may use the Software under the Apache License, Version 2.0, in which case the following will apply: + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/modules/module-mysql/README.md b/modules/module-mysql/README.md new file mode 100644 index 000000000..93b33d14c --- /dev/null +++ b/modules/module-mysql/README.md @@ -0,0 +1,3 @@ +# PowerSync MySQL Module + +This is a module which provides MySQL replication to PowerSync. diff --git a/modules/module-mysql/dev/.env.template b/modules/module-mysql/dev/.env.template new file mode 100644 index 000000000..d82ac9668 --- /dev/null +++ b/modules/module-mysql/dev/.env.template @@ -0,0 +1,2 @@ +PS_MONGO_URI=mongodb://mongo:27017/powersync_demo +PS_PORT=8080 \ No newline at end of file diff --git a/modules/module-mysql/dev/README.md b/modules/module-mysql/dev/README.md new file mode 100644 index 000000000..fe62ef533 --- /dev/null +++ b/modules/module-mysql/dev/README.md @@ -0,0 +1,9 @@ +# MySQL Development Helpers + +This folder contains some helpers for developing with MySQL. + +- `./.env.template` contains basic settings to be applied to a root `.env` file +- `./config` contains YAML configuration files for a MySQL todo list application +- `./docker/mysql` contains a docker compose file for starting Mysql + +TODO this does not contain any auth or backend functionality. diff --git a/modules/module-mysql/dev/config/sync_rules.yaml b/modules/module-mysql/dev/config/sync_rules.yaml new file mode 100644 index 000000000..5c0eb9932 --- /dev/null +++ b/modules/module-mysql/dev/config/sync_rules.yaml @@ -0,0 +1,10 @@ +# See Documentation for more information: +# https://docs.powersync.com/usage/sync-rules +# Note that changes to this file are not watched. +# The service needs to be restarted for changes to take effect. + +bucket_definitions: + global: + data: + - SELECT * FROM lists + - SELECT * FROM todos diff --git a/modules/module-mysql/dev/docker/mysql/docker-compose.yaml b/modules/module-mysql/dev/docker/mysql/docker-compose.yaml new file mode 100644 index 000000000..50dfd2d2b --- /dev/null +++ b/modules/module-mysql/dev/docker/mysql/docker-compose.yaml @@ -0,0 +1,17 @@ +services: + mysql: + image: mysql:8.0 + environment: + MYSQL_ROOT_PASSWORD: root_password + MYSQL_DATABASE: mydatabase + MYSQL_USER: myuser + MYSQL_PASSWORD: mypassword + ports: + - '3306:3306' + volumes: + - ./init-scripts/my.cnf:/etc/mysql/my.cnf + - ./init-scripts/mysql.sql:/docker-entrypoint-initdb.d/init_user.sql + - mysql_data:/var/lib/mysql + +volumes: + mysql_data: diff --git a/modules/module-mysql/dev/docker/mysql/init-scripts/my.cnf b/modules/module-mysql/dev/docker/mysql/init-scripts/my.cnf new file mode 100644 index 000000000..99f01c70a --- /dev/null +++ b/modules/module-mysql/dev/docker/mysql/init-scripts/my.cnf @@ -0,0 +1,9 @@ +[mysqld] +gtid_mode = ON +enforce-gtid-consistency = ON +# Row format required for ZongJi +binlog_format = row +log_bin=mysql-bin +server-id=1 +binlog-do-db=mydatabase +replicate-do-table=mydatabase.lists \ No newline at end of file diff --git a/modules/module-mysql/dev/docker/mysql/init-scripts/mysql.sql b/modules/module-mysql/dev/docker/mysql/init-scripts/mysql.sql new file mode 100644 index 000000000..8e5cb3538 --- /dev/null +++ b/modules/module-mysql/dev/docker/mysql/init-scripts/mysql.sql @@ -0,0 +1,38 @@ +-- Create a user with necessary privileges +CREATE USER 'repl_user'@'%' IDENTIFIED BY 'good_password'; + +-- Grant replication client privilege +GRANT REPLICATION SLAVE, REPLICATION CLIENT, RELOAD ON *.* TO 'repl_user'@'%'; +GRANT REPLICATION SLAVE, REPLICATION CLIENT, RELOAD ON *.* TO 'myuser'@'%'; + +-- Grant access to the specific database +GRANT ALL PRIVILEGES ON mydatabase.* TO 'repl_user'@'%'; + +-- Apply changes +FLUSH PRIVILEGES; + +CREATE TABLE lists ( + id CHAR(36) NOT NULL DEFAULT (UUID()), -- String UUID (36 characters) + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + name TEXT NOT NULL, + owner_id CHAR(36) NOT NULL, + PRIMARY KEY (id) +); + +CREATE TABLE todos ( + id CHAR(36) NOT NULL DEFAULT (UUID()), -- String UUID (36 characters) + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + completed_at TIMESTAMP NULL, + description TEXT NOT NULL, + completed BOOLEAN NOT NULL DEFAULT FALSE, + created_by CHAR(36) NULL, + completed_by CHAR(36) NULL, + list_id CHAR(36) NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY (list_id) REFERENCES lists (id) ON DELETE CASCADE +); + +-- TODO fix case where no data is present +INSERT INTO lists (id, name, owner_id) +VALUES + (UUID(), 'Do a demo', UUID()); \ No newline at end of file diff --git a/modules/module-mysql/package.json b/modules/module-mysql/package.json new file mode 100644 index 000000000..584ff1fa9 --- /dev/null +++ b/modules/module-mysql/package.json @@ -0,0 +1,49 @@ +{ + "name": "@powersync/service-module-mysql", + "repository": "https://github.com/powersync-ja/powersync-service", + "types": "dist/index.d.ts", + "version": "0.0.1", + "license": "FSL-1.1-Apache-2.0", + "main": "dist/index.js", + "type": "module", + "publishConfig": { + "access": "public" + }, + "scripts": { + "build": "tsc -b", + "build:tests": "tsc -b test/tsconfig.json", + "clean": "rm -rf ./dist && tsc -b --clean", + "test": "vitest" + }, + "exports": { + ".": { + "import": "./dist/index.js", + "require": "./dist/index.js", + "default": "./dist/index.js" + }, + "./types": { + "import": "./dist/types/types.js", + "require": "./dist/types/types.js", + "default": "./dist/types/types.js" + } + }, + "dependencies": { + "@powersync/lib-services-framework": "workspace:*", + "@powersync/service-core": "workspace:*", + "@powersync/service-sync-rules": "workspace:*", + "@powersync/service-types": "workspace:*", + "@powersync/service-jsonbig": "workspace:*", + "@powersync/mysql-zongji": "^0.1.0", + "semver": "^7.5.4", + "async": "^3.2.4", + "mysql2": "^3.11.0", + "ts-codec": "^1.2.2", + "uri-js": "^4.4.1", + "uuid": "^9.0.1" + }, + "devDependencies": { + "@types/semver": "^7.5.4", + "@types/async": "^3.2.24", + "@types/uuid": "^9.0.4" + } +} diff --git a/modules/module-mysql/src/api/MySQLRouteAPIAdapter.ts b/modules/module-mysql/src/api/MySQLRouteAPIAdapter.ts new file mode 100644 index 000000000..faa140adc --- /dev/null +++ b/modules/module-mysql/src/api/MySQLRouteAPIAdapter.ts @@ -0,0 +1,359 @@ +import { api, ParseSyncRulesOptions, storage } from '@powersync/service-core'; + +import * as sync_rules from '@powersync/service-sync-rules'; +import * as service_types from '@powersync/service-types'; +import mysql from 'mysql2/promise'; +import * as common from '../common/common-index.js'; +import * as mysql_utils from '../utils/mysql-utils.js'; +import * as types from '../types/types.js'; +import { toExpressionTypeFromMySQLType } from '../common/common-index.js'; + +type SchemaResult = { + schema_name: string; + table_name: string; + columns: string; +}; + +export class MySQLRouteAPIAdapter implements api.RouteAPI { + protected pool: mysql.Pool; + + constructor(protected config: types.ResolvedConnectionConfig) { + this.pool = mysql_utils.createPool(config).promise(); + } + + async shutdown(): Promise { + return this.pool.end(); + } + + async getSourceConfig(): Promise { + return this.config; + } + + getParseSyncRulesOptions(): ParseSyncRulesOptions { + return { + // In MySQL Schema and Database are the same thing. There is no default database + defaultSchema: this.config.database + }; + } + + async getConnectionStatus(): Promise { + const base = { + id: this.config.id, + uri: `mysql://${this.config.hostname}:${this.config.port}/${this.config.database}` + }; + try { + await this.retriedQuery({ + query: `SELECT 'PowerSync connection test'` + }); + } catch (e) { + return { + ...base, + connected: false, + errors: [{ level: 'fatal', message: `${e.code} - message: ${e.message}` }] + }; + } + const connection = await this.pool.getConnection(); + try { + const errors = await common.checkSourceConfiguration(connection); + if (errors.length) { + return { + ...base, + connected: true, + errors: errors.map((e) => ({ level: 'fatal', message: e })) + }; + } + } catch (e) { + return { + ...base, + connected: true, + errors: [{ level: 'fatal', message: e.message }] + }; + } finally { + connection.release(); + } + return { + ...base, + connected: true, + errors: [] + }; + } + + async executeQuery(query: string, params: any[]): Promise { + if (!this.config.debug_api) { + return service_types.internal_routes.ExecuteSqlResponse.encode({ + results: { + columns: [], + rows: [] + }, + success: false, + error: 'SQL querying is not enabled' + }); + } + try { + const [results, fields] = await this.pool.query(query, params); + return service_types.internal_routes.ExecuteSqlResponse.encode({ + success: true, + results: { + columns: fields.map((c) => c.name), + rows: results.map((row) => { + /** + * Row will be in the format: + * @rows: [ { test: 2 } ] + */ + return fields.map((c) => { + const value = row[c.name]; + const sqlValue = sync_rules.toSyncRulesValue(value); + if (typeof sqlValue == 'bigint') { + return Number(value); + } else if (value instanceof Date) { + return value.toISOString(); + } else if (sync_rules.isJsonValue(sqlValue)) { + return sqlValue; + } else { + return null; + } + }); + }) + } + }); + } catch (e) { + return service_types.internal_routes.ExecuteSqlResponse.encode({ + results: { + columns: [], + rows: [] + }, + success: false, + error: e.message + }); + } + } + + async getDebugTablesInfo( + tablePatterns: sync_rules.TablePattern[], + sqlSyncRules: sync_rules.SqlSyncRules + ): Promise { + let result: api.PatternResult[] = []; + + for (let tablePattern of tablePatterns) { + const schema = tablePattern.schema; + let patternResult: api.PatternResult = { + schema: schema, + pattern: tablePattern.tablePattern, + wildcard: tablePattern.isWildcard + }; + result.push(patternResult); + + if (tablePattern.isWildcard) { + patternResult.tables = []; + const prefix = tablePattern.tablePrefix; + + const [results] = await this.pool.query( + `SELECT + TABLE_NAME AS table_name + FROM + INFORMATION_SCHEMA.TABLES + WHERE + TABLE_SCHEMA = ? + AND TABLE_NAME LIKE ?`, + [schema, tablePattern.tablePattern] + ); + + for (let row of results) { + const name = row.table_name as string; + + if (!name.startsWith(prefix)) { + continue; + } + + const details = await this.getDebugTableInfo(tablePattern, name, sqlSyncRules); + patternResult.tables.push(details); + } + } else { + const [results] = await this.pool.query( + `SELECT + TABLE_NAME AS table_name + FROM + INFORMATION_SCHEMA.TABLES + WHERE + TABLE_SCHEMA = ? + AND TABLE_NAME = ?`, + [tablePattern.schema, tablePattern.tablePattern] + ); + + if (results.length == 0) { + // Table not found + patternResult.table = await this.getDebugTableInfo(tablePattern, tablePattern.name, sqlSyncRules); + } else { + const row = results[0]; + patternResult.table = await this.getDebugTableInfo(tablePattern, row.table_name, sqlSyncRules); + } + } + } + + return result; + } + + protected async getDebugTableInfo( + tablePattern: sync_rules.TablePattern, + tableName: string, + syncRules: sync_rules.SqlSyncRules + ): Promise { + const { schema } = tablePattern; + + let idColumnsResult: common.ReplicationIdentityColumnsResult | null = null; + let idColumnsError: service_types.ReplicationError | null = null; + let connection: mysql.PoolConnection | null = null; + try { + connection = await this.pool.getConnection(); + idColumnsResult = await common.getReplicationIdentityColumns({ + connection: connection, + schema, + table_name: tableName + }); + } catch (ex) { + idColumnsError = { level: 'fatal', message: ex.message }; + } finally { + connection?.release(); + } + + const idColumns = idColumnsResult?.columns ?? []; + const sourceTable = new storage.SourceTable(0, this.config.tag, tableName, schema, tableName, idColumns, true); + const syncData = syncRules.tableSyncsData(sourceTable); + const syncParameters = syncRules.tableSyncsParameters(sourceTable); + + if (idColumns.length == 0 && idColumnsError == null) { + let message = `No replication id found for ${sourceTable.qualifiedName}. Replica identity: ${idColumnsResult?.identity}.`; + if (idColumnsResult?.identity == 'default') { + message += ' Configure a primary key on the table.'; + } + idColumnsError = { level: 'fatal', message }; + } + + let selectError: service_types.ReplicationError | null = null; + try { + await this.retriedQuery({ + query: `SELECT * FROM ${sourceTable.table} LIMIT 1` + }); + } catch (e) { + selectError = { level: 'fatal', message: e.message }; + } + + return { + schema: schema, + name: tableName, + pattern: tablePattern.isWildcard ? tablePattern.tablePattern : undefined, + replication_id: idColumns.map((c) => c.name), + data_queries: syncData, + parameter_queries: syncParameters, + errors: [idColumnsError, selectError].filter((error) => error != null) as service_types.ReplicationError[] + }; + } + + async getReplicationLag(options: api.ReplicationLagOptions): Promise { + const { bucketStorage } = options; + const lastCheckpoint = await bucketStorage.getCheckpoint(); + + const current = lastCheckpoint.lsn + ? common.ReplicatedGTID.fromSerialized(lastCheckpoint.lsn) + : common.ReplicatedGTID.ZERO; + + const connection = await this.pool.getConnection(); + const head = await common.readExecutedGtid(connection); + const lag = await current.distanceTo(connection, head); + connection.release(); + if (lag == null) { + throw new Error(`Could not determine replication lag`); + } + + return lag; + } + + async getReplicationHead(): Promise { + const connection = await this.pool.getConnection(); + const result = await common.readExecutedGtid(connection); + connection.release(); + return result.comparable; + } + + async getConnectionSchema(): Promise { + const [results] = await this.retriedQuery({ + query: ` + SELECT + tbl.schema_name, + tbl.table_name, + tbl.quoted_name, + JSON_ARRAYAGG(JSON_OBJECT('column_name', a.column_name, 'data_type', a.data_type)) AS columns + FROM + ( + SELECT + TABLE_SCHEMA AS schema_name, + TABLE_NAME AS table_name, + CONCAT('\`', TABLE_SCHEMA, '\`.\`', TABLE_NAME, '\`') AS quoted_name + FROM + INFORMATION_SCHEMA.TABLES + WHERE + TABLE_TYPE = 'BASE TABLE' + AND TABLE_SCHEMA NOT IN ('information_schema', 'mysql', 'performance_schema', 'sys') + ) AS tbl + LEFT JOIN + ( + SELECT + TABLE_SCHEMA AS schema_name, + TABLE_NAME AS table_name, + COLUMN_NAME AS column_name, + COLUMN_TYPE AS data_type + FROM + INFORMATION_SCHEMA.COLUMNS + ) AS a + ON + tbl.schema_name = a.schema_name + AND tbl.table_name = a.table_name + GROUP BY + tbl.schema_name, tbl.table_name, tbl.quoted_name; + ` + }); + + /** + * Reduces the SQL results into a Record of {@link DatabaseSchema} + * then returns the values as an array. + */ + + return Object.values( + (results as SchemaResult[]).reduce((hash: Record, result) => { + const schema = + hash[result.schema_name] || + (hash[result.schema_name] = { + name: result.schema_name, + tables: [] + }); + + const columns = JSON.parse(result.columns).map((column: { data_type: string; column_name: string }) => ({ + name: column.column_name, + type: column.data_type, + sqlite_type: toExpressionTypeFromMySQLType(column.data_type).typeFlags, + internal_type: column.data_type, + pg_type: column.data_type + })); + + schema.tables.push({ + name: result.table_name, + columns: columns + }); + + return hash; + }, {}) + ); + } + + protected async retriedQuery(options: { query: string; params?: any[] }) { + const connection = await this.pool.getConnection(); + + return mysql_utils + .retriedQuery({ + connection: connection, + query: options.query, + params: options.params + }) + .finally(() => connection.release()); + } +} diff --git a/modules/module-mysql/src/common/ReplicatedGTID.ts b/modules/module-mysql/src/common/ReplicatedGTID.ts new file mode 100644 index 000000000..d51d43a73 --- /dev/null +++ b/modules/module-mysql/src/common/ReplicatedGTID.ts @@ -0,0 +1,158 @@ +import mysql from 'mysql2/promise'; +import * as uuid from 'uuid'; +import * as mysql_utils from '../utils/mysql-utils.js'; + +export type BinLogPosition = { + filename: string; + offset: number; +}; + +export type ReplicatedGTIDSpecification = { + raw_gtid: string; + /** + * The (end) position in a BinLog file where this transaction has been replicated in. + */ + position: BinLogPosition; +}; + +export type BinLogGTIDFormat = { + server_id: Buffer; + transaction_range: number; +}; + +export type BinLogGTIDEvent = { + raw_gtid: BinLogGTIDFormat; + position: BinLogPosition; +}; + +/** + * A wrapper around the MySQL GTID value. + * This adds and tracks additional metadata such as the BinLog filename + * and position where this GTID could be located. + */ +export class ReplicatedGTID { + static fromSerialized(comparable: string): ReplicatedGTID { + return new ReplicatedGTID(ReplicatedGTID.deserialize(comparable)); + } + + private static deserialize(comparable: string): ReplicatedGTIDSpecification { + const components = comparable.split('|'); + if (components.length < 3) { + throw new Error(`Invalid serialized GTID: ${comparable}`); + } + + return { + raw_gtid: components[1], + position: { + filename: components[2], + offset: parseInt(components[3]) + } satisfies BinLogPosition + }; + } + + static fromBinLogEvent(event: BinLogGTIDEvent) { + const { raw_gtid, position } = event; + const stringGTID = `${uuid.stringify(raw_gtid.server_id)}:${raw_gtid.transaction_range}`; + return new ReplicatedGTID({ + raw_gtid: stringGTID, + position + }); + } + + /** + * Special case for the zero GTID which means no transactions have been executed. + */ + static ZERO = new ReplicatedGTID({ raw_gtid: '0:0', position: { filename: '', offset: 0 } }); + + constructor(protected options: ReplicatedGTIDSpecification) {} + + /** + * Get the BinLog position of this replicated GTID event + */ + get position() { + return this.options.position; + } + + /** + * Get the raw Global Transaction ID. This of the format `server_id:transaction_ranges` + */ + get raw() { + return this.options.raw_gtid; + } + + get serverId() { + return this.options.raw_gtid.split(':')[0]; + } + + /** + * Transforms a GTID into a comparable string format, ensuring lexicographical + * order aligns with the GTID's relative age. This assumes that all GTIDs + * have the same server ID. + * + * @returns A comparable string in the format + * `padded_end_transaction|raw_gtid|binlog_filename|binlog_position` + */ + get comparable() { + const { raw, position } = this; + const [, transactionRanges] = this.raw.split(':'); + + let maxTransactionId = 0; + + for (const range of transactionRanges.split(',')) { + const [start, end] = range.split('-'); + maxTransactionId = Math.max(maxTransactionId, parseInt(start, 10), parseInt(end || start, 10)); + } + + const paddedTransactionId = maxTransactionId.toString().padStart(16, '0'); + return [paddedTransactionId, raw, position.filename, position.offset].join('|'); + } + + toString() { + return this.comparable; + } + + /** + * Calculates the distance in bytes from this GTID to the provided argument. + */ + async distanceTo(connection: mysql.Connection, to: ReplicatedGTID): Promise { + const [logFiles] = await mysql_utils.retriedQuery({ + connection, + query: `SHOW BINARY LOGS;` + }); + + // Default to the first file for the start to handle the zero GTID case. + const startFileIndex = Math.max( + logFiles.findIndex((f) => f['Log_name'] == this.position.filename), + 0 + ); + const startFileEntry = logFiles[startFileIndex]; + + if (!startFileEntry) { + return null; + } + + /** + * Fall back to the next position for comparison if the replicated position is not present + */ + const endPosition = to.position; + + // Default to the past the last file to cater for the HEAD case + const testEndFileIndex = logFiles.findIndex((f) => f['Log_name'] == endPosition?.filename); + // If the endPosition is not defined and found. Fallback to the last file as the end + const endFileIndex = testEndFileIndex < 0 && !endPosition ? logFiles.length : logFiles.length - 1; + + const endFileEntry = logFiles[endFileIndex]; + + if (!endFileEntry) { + return null; + } + + return ( + startFileEntry['File_size'] - + this.position.offset - + endFileEntry['File_size'] + + endPosition.offset + + logFiles.slice(startFileIndex + 1, endFileIndex).reduce((sum, file) => sum + file['File_size'], 0) + ); + } +} diff --git a/modules/module-mysql/src/common/check-source-configuration.ts b/modules/module-mysql/src/common/check-source-configuration.ts new file mode 100644 index 000000000..6319fc3b7 --- /dev/null +++ b/modules/module-mysql/src/common/check-source-configuration.ts @@ -0,0 +1,58 @@ +import mysqlPromise from 'mysql2/promise'; +import * as mysql_utils from '../utils/mysql-utils.js'; + +const MIN_SUPPORTED_VERSION = '5.7.0'; + +export async function checkSourceConfiguration(connection: mysqlPromise.Connection): Promise { + const errors: string[] = []; + + const version = await mysql_utils.getMySQLVersion(connection); + if (!mysql_utils.isVersionAtLeast(version, MIN_SUPPORTED_VERSION)) { + errors.push(`MySQL versions older than ${MIN_SUPPORTED_VERSION} are not supported. Your version is: ${version}.`); + } + + const [[result]] = await mysql_utils.retriedQuery({ + connection, + query: ` + SELECT + @@GLOBAL.gtid_mode AS gtid_mode, + @@GLOBAL.log_bin AS log_bin, + @@GLOBAL.server_id AS server_id, + @@GLOBAL.log_bin_basename AS binlog_file, + @@GLOBAL.log_bin_index AS binlog_index_file + ` + }); + + if (result.gtid_mode != 'ON') { + errors.push(`GTID is not enabled, it is currently set to ${result.gtid_mode}. Please enable it.`); + } + + if (result.log_bin != 1) { + errors.push('Binary logging is not enabled. Please enable it.'); + } + + if (result.server_id < 0) { + errors.push( + `Your Server ID setting is too low, it must be greater than 0. It is currently ${result.server_id}. Please correct your configuration.` + ); + } + + if (!result.binlog_file) { + errors.push('Binary log file is not set. Please check your settings.'); + } + + if (!result.binlog_index_file) { + errors.push('Binary log index file is not set. Please check your settings.'); + } + + const [[binLogFormatResult]] = await mysql_utils.retriedQuery({ + connection, + query: `SHOW VARIABLES LIKE 'binlog_format';` + }); + + if (binLogFormatResult.Value !== 'ROW') { + errors.push('Binary log format must be set to "ROW". Please correct your configuration'); + } + + return errors; +} diff --git a/modules/module-mysql/src/common/common-index.ts b/modules/module-mysql/src/common/common-index.ts new file mode 100644 index 000000000..6da005718 --- /dev/null +++ b/modules/module-mysql/src/common/common-index.ts @@ -0,0 +1,6 @@ +export * from './check-source-configuration.js'; +export * from './get-replication-columns.js'; +export * from './get-tables-from-pattern.js'; +export * from './mysql-to-sqlite.js'; +export * from './read-executed-gtid.js'; +export * from './ReplicatedGTID.js'; diff --git a/modules/module-mysql/src/common/get-replication-columns.ts b/modules/module-mysql/src/common/get-replication-columns.ts new file mode 100644 index 000000000..fa0eb8fde --- /dev/null +++ b/modules/module-mysql/src/common/get-replication-columns.ts @@ -0,0 +1,124 @@ +import { storage } from '@powersync/service-core'; +import mysqlPromise from 'mysql2/promise'; +import * as mysql_utils from '../utils/mysql-utils.js'; + +export type GetReplicationColumnsOptions = { + connection: mysqlPromise.Connection; + schema: string; + table_name: string; +}; + +export type ReplicationIdentityColumnsResult = { + columns: storage.ColumnDescriptor[]; + // TODO maybe export an enum from the core package + identity: string; +}; + +export async function getReplicationIdentityColumns( + options: GetReplicationColumnsOptions +): Promise { + const { connection, schema, table_name } = options; + const [primaryKeyColumns] = await mysql_utils.retriedQuery({ + connection: connection, + query: ` + SELECT + s.COLUMN_NAME AS name, + c.DATA_TYPE AS type + FROM + INFORMATION_SCHEMA.STATISTICS s + JOIN + INFORMATION_SCHEMA.COLUMNS c + ON + s.TABLE_SCHEMA = c.TABLE_SCHEMA + AND s.TABLE_NAME = c.TABLE_NAME + AND s.COLUMN_NAME = c.COLUMN_NAME + WHERE + s.TABLE_SCHEMA = ? + AND s.TABLE_NAME = ? + AND s.INDEX_NAME = 'PRIMARY' + ORDER BY + s.SEQ_IN_INDEX; + `, + params: [schema, table_name] + }); + + if (primaryKeyColumns.length) { + return { + columns: primaryKeyColumns.map((row) => ({ + name: row.name, + type: row.type + })), + identity: 'default' + }; + } + + // TODO: test code with tables with unique keys, compound key etc. + // No primary key, find the first valid unique key + const [uniqueKeyColumns] = await mysql_utils.retriedQuery({ + connection: connection, + query: ` + SELECT + s.INDEX_NAME, + s.COLUMN_NAME, + c.DATA_TYPE, + s.NON_UNIQUE, + s.NULLABLE + FROM + INFORMATION_SCHEMA.STATISTICS s + JOIN + INFORMATION_SCHEMA.COLUMNS c + ON + s.TABLE_SCHEMA = c.TABLE_SCHEMA + AND s.TABLE_NAME = c.TABLE_NAME + AND s.COLUMN_NAME = c.COLUMN_NAME + WHERE + s.TABLE_SCHEMA = ? + AND s.TABLE_NAME = ? + AND s.INDEX_NAME != 'PRIMARY' + AND s.NON_UNIQUE = 0 + ORDER BY s.SEQ_IN_INDEX; + `, + params: [schema, table_name] + }); + + if (uniqueKeyColumns.length > 0) { + return { + columns: uniqueKeyColumns.map((col) => ({ + name: col.COLUMN_NAME, + type: col.DATA_TYPE + })), + identity: 'index' + }; + } + + const [allColumns] = await mysql_utils.retriedQuery({ + connection: connection, + query: ` + SELECT + s.COLUMN_NAME AS name, + c.DATA_TYPE as type + FROM + INFORMATION_SCHEMA.COLUMNS s + JOIN + INFORMATION_SCHEMA.COLUMNS c + ON + s.TABLE_SCHEMA = c.TABLE_SCHEMA + AND s.TABLE_NAME = c.TABLE_NAME + AND s.COLUMN_NAME = c.COLUMN_NAME + WHERE + s.TABLE_SCHEMA = ? + AND s.TABLE_NAME = ? + ORDER BY + s.ORDINAL_POSITION; + `, + params: [schema, table_name] + }); + + return { + columns: allColumns.map((row) => ({ + name: row.name, + type: row.type + })), + identity: 'full' + }; +} diff --git a/modules/module-mysql/src/common/get-tables-from-pattern.ts b/modules/module-mysql/src/common/get-tables-from-pattern.ts new file mode 100644 index 000000000..166bf93a0 --- /dev/null +++ b/modules/module-mysql/src/common/get-tables-from-pattern.ts @@ -0,0 +1,44 @@ +import * as sync_rules from '@powersync/service-sync-rules'; +import mysql from 'mysql2/promise'; + +export type GetDebugTablesInfoOptions = { + connection: mysql.Connection; + tablePattern: sync_rules.TablePattern; +}; + +export async function getTablesFromPattern(options: GetDebugTablesInfoOptions): Promise> { + const { connection, tablePattern } = options; + const schema = tablePattern.schema; + + if (tablePattern.isWildcard) { + const [results] = await connection.query( + `SELECT + TABLE_NAME AS table_name + FROM + INFORMATION_SCHEMA.TABLES + WHERE + TABLE_SCHEMA = ? + AND TABLE_NAME LIKE ?`, + [schema, tablePattern.tablePattern] + ); + + return new Set( + results + .filter((result) => result.table_name.startsWith(tablePattern.tablePrefix)) + .map((result) => result.table_name) + ); + } else { + const [[match]] = await connection.query( + `SELECT + TABLE_NAME AS table_name + FROM + INFORMATION_SCHEMA.TABLES + WHERE + TABLE_SCHEMA = ? + AND TABLE_NAME = ?`, + [tablePattern.schema, tablePattern.tablePattern] + ); + // Only return the first result + return new Set([match.table_name]); + } +} diff --git a/modules/module-mysql/src/common/mysql-to-sqlite.ts b/modules/module-mysql/src/common/mysql-to-sqlite.ts new file mode 100644 index 000000000..8cc2487d8 --- /dev/null +++ b/modules/module-mysql/src/common/mysql-to-sqlite.ts @@ -0,0 +1,206 @@ +import * as sync_rules from '@powersync/service-sync-rules'; +import { ExpressionType } from '@powersync/service-sync-rules'; +import { ColumnDescriptor } from '@powersync/service-core'; +import mysql from 'mysql2'; +import { JSONBig, JsonContainer } from '@powersync/service-jsonbig'; +import { ColumnDefinition, TableMapEntry } from '@powersync/mysql-zongji'; + +export enum ADDITIONAL_MYSQL_TYPES { + DATETIME2 = 18, + TIMESTAMP2 = 17, + BINARY = 100, + VARBINARY = 101, + TEXT = 102 +} + +export const MySQLTypesMap: { [key: number]: string } = {}; +for (const [name, code] of Object.entries(mysql.Types)) { + MySQLTypesMap[code as number] = name; +} +for (const [name, code] of Object.entries(ADDITIONAL_MYSQL_TYPES)) { + MySQLTypesMap[code as number] = name; +} + +export function toColumnDescriptors(columns: mysql.FieldPacket[]): Map; +export function toColumnDescriptors(tableMap: TableMapEntry): Map; + +export function toColumnDescriptors(columns: mysql.FieldPacket[] | TableMapEntry): Map { + const columnMap = new Map(); + if (Array.isArray(columns)) { + for (const column of columns) { + columnMap.set(column.name, toColumnDescriptorFromFieldPacket(column)); + } + } else { + for (const column of columns.columns) { + columnMap.set(column.name, toColumnDescriptorFromDefinition(column)); + } + } + + return columnMap; +} + +export function toColumnDescriptorFromFieldPacket(column: mysql.FieldPacket): ColumnDescriptor { + let typeId = column.type!; + const BINARY_FLAG = 128; + const MYSQL_ENUM_FLAG = 256; + const MYSQL_SET_FLAG = 2048; + + switch (column.type) { + case mysql.Types.STRING: + if (((column.flags as number) & BINARY_FLAG) !== 0) { + typeId = ADDITIONAL_MYSQL_TYPES.BINARY; + } else if (((column.flags as number) & MYSQL_ENUM_FLAG) !== 0) { + typeId = mysql.Types.ENUM; + } else if (((column.flags as number) & MYSQL_SET_FLAG) !== 0) { + typeId = mysql.Types.SET; + } + break; + + case mysql.Types.VAR_STRING: + typeId = ((column.flags as number) & BINARY_FLAG) !== 0 ? ADDITIONAL_MYSQL_TYPES.VARBINARY : column.type; + break; + case mysql.Types.BLOB: + typeId = ((column.flags as number) & BINARY_FLAG) === 0 ? ADDITIONAL_MYSQL_TYPES.TEXT : column.type; + break; + } + + const columnType = MySQLTypesMap[typeId]; + + return { + name: column.name, + type: columnType, + typeId: typeId + }; +} + +export function toColumnDescriptorFromDefinition(column: ColumnDefinition): ColumnDescriptor { + let typeId = column.type; + + switch (column.type) { + case mysql.Types.STRING: + typeId = !column.charset ? ADDITIONAL_MYSQL_TYPES.BINARY : column.type; + break; + case mysql.Types.VAR_STRING: + case mysql.Types.VARCHAR: + typeId = !column.charset ? ADDITIONAL_MYSQL_TYPES.VARBINARY : column.type; + break; + case mysql.Types.BLOB: + typeId = column.charset ? ADDITIONAL_MYSQL_TYPES.TEXT : column.type; + break; + } + + const columnType = MySQLTypesMap[typeId]; + + return { + name: column.name, + type: columnType, + typeId: typeId + }; +} + +export function toSQLiteRow(row: Record, columns: Map): sync_rules.SqliteRow { + for (let key in row) { + // We are very much expecting the column to be there + const column = columns.get(key)!; + + if (row[key] !== null) { + switch (column.typeId) { + case mysql.Types.DATE: + // Only parse the date part + row[key] = row[key].toISOString().split('T')[0]; + break; + case mysql.Types.DATETIME: + case ADDITIONAL_MYSQL_TYPES.DATETIME2: + case mysql.Types.TIMESTAMP: + case ADDITIONAL_MYSQL_TYPES.TIMESTAMP2: + row[key] = row[key].toISOString(); + break; + case mysql.Types.JSON: + if (typeof row[key] === 'string') { + row[key] = new JsonContainer(row[key]); + } + break; + case mysql.Types.BIT: + case mysql.Types.BLOB: + case mysql.Types.TINY_BLOB: + case mysql.Types.MEDIUM_BLOB: + case mysql.Types.LONG_BLOB: + case ADDITIONAL_MYSQL_TYPES.BINARY: + case ADDITIONAL_MYSQL_TYPES.VARBINARY: + row[key] = new Uint8Array(Object.values(row[key])); + break; + case mysql.Types.LONGLONG: + if (typeof row[key] === 'string') { + row[key] = BigInt(row[key]); + } else if (typeof row[key] === 'number') { + // Zongji returns BIGINT as a number when it can be represented as a number + row[key] = BigInt(row[key]); + } + break; + case mysql.Types.TINY: + case mysql.Types.SHORT: + case mysql.Types.LONG: + case mysql.Types.INT24: + // Handle all integer values a BigInt + if (typeof row[key] === 'number') { + row[key] = BigInt(row[key]); + } + break; + case mysql.Types.SET: + // Convert to JSON array from string + const values = row[key].split(','); + row[key] = JSONBig.stringify(values); + break; + } + } + } + return sync_rules.toSyncRulesRow(row); +} + +export function toExpressionTypeFromMySQLType(mysqlType: string | undefined): ExpressionType { + if (!mysqlType) { + return ExpressionType.TEXT; + } + + const upperCaseType = mysqlType.toUpperCase(); + // Handle type with parameters like VARCHAR(255), DECIMAL(10,2), etc. + const baseType = upperCaseType.split('(')[0]; + + switch (baseType) { + case 'BIT': + case 'BOOL': + case 'BOOLEAN': + case 'TINYINT': + case 'SMALLINT': + case 'MEDIUMINT': + case 'INT': + case 'INTEGER': + case 'BIGINT': + case 'UNSIGNED BIGINT': + return ExpressionType.INTEGER; + case 'BINARY': + case 'VARBINARY': + case 'TINYBLOB': + case 'MEDIUMBLOB': + case 'LONGBLOB': + case 'BLOB': + case 'GEOMETRY': + case 'POINT': + case 'LINESTRING': + case 'POLYGON': + case 'MULTIPOINT': + case 'MULTILINESTRING': + case 'MULTIPOLYGON': + case 'GEOMETRYCOLLECTION': + return ExpressionType.BLOB; + case 'FLOAT': + case 'DOUBLE': + case 'REAL': + return ExpressionType.REAL; + case 'JSON': + return ExpressionType.TEXT; + default: + // In addition to the normal text types, includes: DECIMAL, NUMERIC, DATE, TIME, DATETIME, TIMESTAMP, YEAR, ENUM, SET + return ExpressionType.TEXT; + } +} diff --git a/modules/module-mysql/src/common/read-executed-gtid.ts b/modules/module-mysql/src/common/read-executed-gtid.ts new file mode 100644 index 000000000..9f60c3362 --- /dev/null +++ b/modules/module-mysql/src/common/read-executed-gtid.ts @@ -0,0 +1,48 @@ +import mysqlPromise from 'mysql2/promise'; +import * as mysql_utils from '../utils/mysql-utils.js'; +import { ReplicatedGTID } from './ReplicatedGTID.js'; + +/** + * Gets the current master HEAD GTID + */ +export async function readExecutedGtid(connection: mysqlPromise.Connection): Promise { + const version = await mysql_utils.getMySQLVersion(connection); + + let binlogStatus: mysqlPromise.RowDataPacket; + if (mysql_utils.isVersionAtLeast(version, '8.4.0')) { + // Syntax for the below query changed in 8.4.0 + const [[binLogResult]] = await mysql_utils.retriedQuery({ + connection, + query: `SHOW BINARY LOG STATUS` + }); + binlogStatus = binLogResult; + } else { + const [[binLogResult]] = await mysql_utils.retriedQuery({ + connection, + query: `SHOW MASTER STATUS` + }); + binlogStatus = binLogResult; + } + const position = { + filename: binlogStatus.File, + offset: parseInt(binlogStatus.Position) + }; + + return new ReplicatedGTID({ + // The head always points to the next position to start replication from + position, + raw_gtid: binlogStatus.Executed_Gtid_Set + }); +} + +export async function isBinlogStillAvailable( + connection: mysqlPromise.Connection, + binlogFile: string +): Promise { + const [logFiles] = await mysql_utils.retriedQuery({ + connection, + query: `SHOW BINARY LOGS;` + }); + + return logFiles.some((f) => f['Log_name'] == binlogFile); +} diff --git a/modules/module-mysql/src/index.ts b/modules/module-mysql/src/index.ts new file mode 100644 index 000000000..3abe77fc5 --- /dev/null +++ b/modules/module-mysql/src/index.ts @@ -0,0 +1 @@ +export * from './module/MySQLModule.js'; diff --git a/modules/module-mysql/src/module/MySQLModule.ts b/modules/module-mysql/src/module/MySQLModule.ts new file mode 100644 index 000000000..a4ab36bae --- /dev/null +++ b/modules/module-mysql/src/module/MySQLModule.ts @@ -0,0 +1,71 @@ +import { api, ConfigurationFileSyncRulesProvider, replication, system, TearDownOptions } from '@powersync/service-core'; + +import { MySQLRouteAPIAdapter } from '../api/MySQLRouteAPIAdapter.js'; +import { BinLogReplicator } from '../replication/BinLogReplicator.js'; +import { MySQLErrorRateLimiter } from '../replication/MySQLErrorRateLimiter.js'; +import * as types from '../types/types.js'; +import { MySQLConnectionManagerFactory } from '../replication/MySQLConnectionManagerFactory.js'; +import { MySQLConnectionConfig } from '../types/types.js'; +import { checkSourceConfiguration } from '../common/check-source-configuration.js'; +import { MySQLConnectionManager } from '../replication/MySQLConnectionManager.js'; + +export class MySQLModule extends replication.ReplicationModule { + constructor() { + super({ + name: 'MySQL', + type: types.MYSQL_CONNECTION_TYPE, + configSchema: types.MySQLConnectionConfig + }); + } + + async initialize(context: system.ServiceContextContainer): Promise { + await super.initialize(context); + } + + protected createRouteAPIAdapter(): api.RouteAPI { + return new MySQLRouteAPIAdapter(this.resolveConfig(this.decodedConfig!)); + } + + protected createReplicator(context: system.ServiceContext): replication.AbstractReplicator { + const normalisedConfig = this.resolveConfig(this.decodedConfig!); + const syncRuleProvider = new ConfigurationFileSyncRulesProvider(context.configuration.sync_rules); + const connectionFactory = new MySQLConnectionManagerFactory(normalisedConfig); + + return new BinLogReplicator({ + id: this.getDefaultId(normalisedConfig.database), + syncRuleProvider: syncRuleProvider, + storageEngine: context.storageEngine, + connectionFactory: connectionFactory, + rateLimiter: new MySQLErrorRateLimiter() + }); + } + + /** + * Combines base config with normalized connection settings + */ + private resolveConfig(config: types.MySQLConnectionConfig): types.ResolvedConnectionConfig { + return { + ...config, + ...types.normalizeConnectionConfig(config) + }; + } + + async teardown(options: TearDownOptions): Promise { + // No specific teardown required for MySQL + } + + async testConnection(config: MySQLConnectionConfig): Promise { + this.decodeConfig(config); + const normalisedConfig = this.resolveConfig(this.decodedConfig!); + const connectionManager = new MySQLConnectionManager(normalisedConfig, {}); + const connection = await connectionManager.getConnection(); + try { + const errors = await checkSourceConfiguration(connection); + if (errors.length > 0) { + throw new Error(errors.join('\n')); + } + } finally { + await connectionManager.end(); + } + } +} diff --git a/modules/module-mysql/src/replication/BinLogReplicationJob.ts b/modules/module-mysql/src/replication/BinLogReplicationJob.ts new file mode 100644 index 000000000..aa1a838b2 --- /dev/null +++ b/modules/module-mysql/src/replication/BinLogReplicationJob.ts @@ -0,0 +1,94 @@ +import { container } from '@powersync/lib-services-framework'; +import { replication } from '@powersync/service-core'; +import { BinlogConfigurationError, BinLogStream } from './BinLogStream.js'; +import { MySQLConnectionManagerFactory } from './MySQLConnectionManagerFactory.js'; + +export interface BinLogReplicationJobOptions extends replication.AbstractReplicationJobOptions { + connectionFactory: MySQLConnectionManagerFactory; +} + +export class BinLogReplicationJob extends replication.AbstractReplicationJob { + private connectionFactory: MySQLConnectionManagerFactory; + + constructor(options: BinLogReplicationJobOptions) { + super(options); + this.connectionFactory = options.connectionFactory; + } + + get slot_name() { + return this.options.storage.slot_name; + } + + async keepAlive() {} + + async replicate() { + try { + await this.replicateLoop(); + } catch (e) { + // Fatal exception + container.reporter.captureException(e, { + metadata: { + replication_slot: this.slot_name + } + }); + this.logger.error(`Replication failed on ${this.slot_name}`, e); + } finally { + this.abortController.abort(); + } + } + + async replicateLoop() { + while (!this.isStopped) { + await this.replicateOnce(); + + if (!this.isStopped) { + await new Promise((resolve) => setTimeout(resolve, 5000)); + } + } + } + + async replicateOnce() { + // New connections on every iteration (every error with retry), + // otherwise we risk repeating errors related to the connection, + // such as caused by cached PG schemas. + const connectionManager = this.connectionFactory.create({ + // Pool connections are only used intermittently. + idleTimeout: 30_000 + }); + try { + await this.rateLimiter?.waitUntilAllowed({ signal: this.abortController.signal }); + if (this.isStopped) { + return; + } + const stream = new BinLogStream({ + abortSignal: this.abortController.signal, + storage: this.options.storage, + connections: connectionManager + }); + await stream.replicate(); + } catch (e) { + if (this.abortController.signal.aborted) { + return; + } + this.logger.error(`Replication error`, e); + if (e.cause != null) { + this.logger.error(`cause`, e.cause); + } + + if (e instanceof BinlogConfigurationError) { + throw e; + } else { + // Report the error if relevant, before retrying + container.reporter.captureException(e, { + metadata: { + replication_slot: this.slot_name + } + }); + // This sets the retry delay + this.rateLimiter?.reportError(e); + } + } finally { + await connectionManager.end(); + } + } +} diff --git a/modules/module-mysql/src/replication/BinLogReplicator.ts b/modules/module-mysql/src/replication/BinLogReplicator.ts new file mode 100644 index 000000000..ca07f4a0a --- /dev/null +++ b/modules/module-mysql/src/replication/BinLogReplicator.ts @@ -0,0 +1,35 @@ +import { replication, storage } from '@powersync/service-core'; +import { BinLogReplicationJob } from './BinLogReplicationJob.js'; +import { MySQLConnectionManagerFactory } from './MySQLConnectionManagerFactory.js'; + +export interface BinLogReplicatorOptions extends replication.AbstractReplicatorOptions { + connectionFactory: MySQLConnectionManagerFactory; +} + +export class BinLogReplicator extends replication.AbstractReplicator { + private readonly connectionFactory: MySQLConnectionManagerFactory; + + constructor(options: BinLogReplicatorOptions) { + super(options); + this.connectionFactory = options.connectionFactory; + } + + createJob(options: replication.CreateJobOptions): BinLogReplicationJob { + return new BinLogReplicationJob({ + id: this.createJobId(options.storage.group_id), + storage: options.storage, + lock: options.lock, + connectionFactory: this.connectionFactory, + rateLimiter: this.rateLimiter + }); + } + + async cleanUp(syncRulesStorage: storage.SyncRulesBucketStorage): Promise { + // The MySQL module does not create anything which requires cleanup on the MySQL server. + } + + async stop(): Promise { + await super.stop(); + await this.connectionFactory.shutdown(); + } +} diff --git a/modules/module-mysql/src/replication/BinLogStream.ts b/modules/module-mysql/src/replication/BinLogStream.ts new file mode 100644 index 000000000..d5e0c43ad --- /dev/null +++ b/modules/module-mysql/src/replication/BinLogStream.ts @@ -0,0 +1,601 @@ +import { logger } from '@powersync/lib-services-framework'; +import * as sync_rules from '@powersync/service-sync-rules'; +import async from 'async'; + +import { ColumnDescriptor, framework, getUuidReplicaIdentityBson, Metrics, storage } from '@powersync/service-core'; +import mysql, { FieldPacket } from 'mysql2'; + +import { BinLogEvent, StartOptions, TableMapEntry } from '@powersync/mysql-zongji'; +import * as common from '../common/common-index.js'; +import * as zongji_utils from './zongji/zongji-utils.js'; +import { MySQLConnectionManager } from './MySQLConnectionManager.js'; +import { isBinlogStillAvailable, ReplicatedGTID, toColumnDescriptors } from '../common/common-index.js'; +import mysqlPromise from 'mysql2/promise'; +import { createRandomServerId } from '../utils/mysql-utils.js'; + +export interface BinLogStreamOptions { + connections: MySQLConnectionManager; + storage: storage.SyncRulesBucketStorage; + abortSignal: AbortSignal; +} + +interface MysqlRelId { + schema: string; + name: string; +} + +interface WriteChangePayload { + type: storage.SaveOperationTag; + data: Data; + previous_data?: Data; + database: string; + table: string; + sourceTable: storage.SourceTable; + columns: Map; +} + +export type Data = Record; + +export class BinlogConfigurationError extends Error { + constructor(message: string) { + super(message); + } +} + +/** + * MySQL does not have same relation structure. Just returning unique key as string. + * @param source + */ +function getMysqlRelId(source: MysqlRelId): string { + return `${source.schema}.${source.name}`; +} + +export class BinLogStream { + private readonly syncRules: sync_rules.SqlSyncRules; + private readonly groupId: number; + + private readonly storage: storage.SyncRulesBucketStorage; + + private readonly connections: MySQLConnectionManager; + + private abortSignal: AbortSignal; + + private tableCache = new Map(); + + constructor(protected options: BinLogStreamOptions) { + this.storage = options.storage; + this.connections = options.connections; + this.syncRules = options.storage.getParsedSyncRules({ defaultSchema: this.defaultSchema }); + this.groupId = options.storage.group_id; + this.abortSignal = options.abortSignal; + } + + get connectionTag() { + return this.connections.connectionTag; + } + + get connectionId() { + // Default to 1 if not set + return this.connections.connectionId ? Number.parseInt(this.connections.connectionId) : 1; + } + + get stopped() { + return this.abortSignal.aborted; + } + + get defaultSchema() { + return this.connections.databaseName; + } + + async handleRelation(batch: storage.BucketStorageBatch, entity: storage.SourceEntityDescriptor, snapshot: boolean) { + const result = await this.storage.resolveTable({ + group_id: this.groupId, + connection_id: this.connectionId, + connection_tag: this.connectionTag, + entity_descriptor: entity, + sync_rules: this.syncRules + }); + this.tableCache.set(entity.objectId, result.table); + + // Drop conflicting tables. This includes for example renamed tables. + await batch.drop(result.dropTables); + + // Snapshot if: + // 1. Snapshot is requested (false for initial snapshot, since that process handles it elsewhere) + // 2. Snapshot is not already done, AND: + // 3. The table is used in sync rules. + const shouldSnapshot = snapshot && !result.table.snapshotComplete && result.table.syncAny; + + if (shouldSnapshot) { + // Truncate this table, in case a previous snapshot was interrupted. + await batch.truncate([result.table]); + + let gtid: common.ReplicatedGTID; + // Start the snapshot inside a transaction. + // We use a dedicated connection for this. + const connection = await this.connections.getStreamingConnection(); + const promiseConnection = (connection as mysql.Connection).promise(); + try { + await promiseConnection.query('BEGIN'); + try { + gtid = await common.readExecutedGtid(promiseConnection); + await this.snapshotTable(connection.connection, batch, result.table); + await promiseConnection.query('COMMIT'); + } catch (e) { + await promiseConnection.query('ROLLBACK'); + throw e; + } + } finally { + connection.release(); + } + const [table] = await batch.markSnapshotDone([result.table], gtid.comparable); + return table; + } + + return result.table; + } + + async getQualifiedTableNames( + batch: storage.BucketStorageBatch, + tablePattern: sync_rules.TablePattern + ): Promise { + if (tablePattern.connectionTag != this.connectionTag) { + return []; + } + + let tableRows: any[]; + const prefix = tablePattern.isWildcard ? tablePattern.tablePrefix : undefined; + if (tablePattern.isWildcard) { + const result = await this.connections.query( + `SELECT TABLE_NAME +FROM information_schema.tables +WHERE TABLE_SCHEMA = ? AND TABLE_NAME LIKE ?; +`, + [tablePattern.schema, tablePattern.tablePattern] + ); + tableRows = result[0]; + } else { + const result = await this.connections.query( + `SELECT TABLE_NAME +FROM information_schema.tables +WHERE TABLE_SCHEMA = ? AND TABLE_NAME = ?; +`, + [tablePattern.schema, tablePattern.tablePattern] + ); + tableRows = result[0]; + } + let tables: storage.SourceTable[] = []; + + for (let row of tableRows) { + const name = row['TABLE_NAME'] as string; + if (prefix && !name.startsWith(prefix)) { + continue; + } + + const result = await this.connections.query( + `SELECT 1 +FROM information_schema.tables +WHERE table_schema = ? AND table_name = ? +AND table_type = 'BASE TABLE';`, + [tablePattern.schema, tablePattern.name] + ); + if (result[0].length == 0) { + logger.info(`Skipping ${tablePattern.schema}.${name} - no table exists/is not a base table`); + continue; + } + + const connection = await this.connections.getConnection(); + const replicationColumns = await common.getReplicationIdentityColumns({ + connection: connection, + schema: tablePattern.schema, + table_name: tablePattern.name + }); + connection.release(); + + const table = await this.handleRelation( + batch, + { + name, + schema: tablePattern.schema, + objectId: getMysqlRelId(tablePattern), + replicationColumns: replicationColumns.columns + }, + false + ); + + tables.push(table); + } + return tables; + } + + /** + * Checks if the initial sync has been completed yet. + */ + protected async checkInitialReplicated(): Promise { + const status = await this.storage.getStatus(); + const lastKnowGTID = status.checkpoint_lsn ? common.ReplicatedGTID.fromSerialized(status.checkpoint_lsn) : null; + if (status.snapshot_done && status.checkpoint_lsn) { + logger.info(`Initial replication already done.`); + + if (lastKnowGTID) { + // Check if the binlog is still available. If it isn't we need to snapshot again. + const connection = await this.connections.getConnection(); + try { + const isAvailable = await isBinlogStillAvailable(connection, lastKnowGTID.position.filename); + if (!isAvailable) { + logger.info( + `Binlog file ${lastKnowGTID.position.filename} is no longer available, starting initial replication again.` + ); + } + return isAvailable; + } finally { + connection.release(); + } + } + + return true; + } + + return false; + } + + /** + * Does the initial replication of the database tables. + * + * If (partial) replication was done before on this slot, this clears the state + * and starts again from scratch. + */ + async startInitialReplication() { + await this.storage.clear(); + // Replication will be performed in a single transaction on this connection + const connection = await this.connections.getStreamingConnection(); + const promiseConnection = (connection as mysql.Connection).promise(); + const headGTID = await common.readExecutedGtid(promiseConnection); + logger.info(`Using snapshot checkpoint GTID: '${headGTID}'`); + try { + logger.info(`Starting initial replication`); + await promiseConnection.query( + 'SET TRANSACTION ISOLATION LEVEL REPEATABLE READ, READ ONLY' + ); + await promiseConnection.query('START TRANSACTION'); + const sourceTables = this.syncRules.getSourceTables(); + await this.storage.startBatch( + { zeroLSN: ReplicatedGTID.ZERO.comparable, defaultSchema: this.defaultSchema, storeCurrentData: true }, + async (batch) => { + for (let tablePattern of sourceTables) { + const tables = await this.getQualifiedTableNames(batch, tablePattern); + for (let table of tables) { + await this.snapshotTable(connection as mysql.Connection, batch, table); + await batch.markSnapshotDone([table], headGTID.comparable); + await framework.container.probes.touch(); + } + } + await batch.commit(headGTID.comparable); + } + ); + logger.info(`Initial replication done`); + await promiseConnection.query('COMMIT'); + } catch (e) { + await promiseConnection.query('ROLLBACK'); + throw e; + } finally { + connection.release(); + } + } + + private async snapshotTable( + connection: mysql.Connection, + batch: storage.BucketStorageBatch, + table: storage.SourceTable + ) { + logger.info(`Replicating ${table.qualifiedName}`); + // TODO count rows and log progress at certain batch sizes + + let columns: Map; + return new Promise((resolve, reject) => { + // MAX_EXECUTION_TIME(0) hint disables execution timeout for this query + connection + .query(`SELECT /*+ MAX_EXECUTION_TIME(0) */ * FROM ${table.schema}.${table.table}`) + .on('error', (err) => { + reject(err); + }) + .on('fields', (fields: FieldPacket[]) => { + // Map the columns and their types + columns = toColumnDescriptors(fields); + }) + .on('result', async (row) => { + connection.pause(); + const record = common.toSQLiteRow(row, columns); + + await batch.save({ + tag: storage.SaveOperationTag.INSERT, + sourceTable: table, + before: undefined, + beforeReplicaId: undefined, + after: record, + afterReplicaId: getUuidReplicaIdentityBson(record, table.replicaIdColumns) + }); + connection.resume(); + Metrics.getInstance().rows_replicated_total.add(1); + }) + .on('end', async function () { + await batch.flush(); + resolve(); + }); + }); + } + + async replicate() { + try { + // If anything errors here, the entire replication process is halted, and + // all connections automatically closed, including this one. + await this.initReplication(); + await this.streamChanges(); + logger.info('BinlogStream has been shut down'); + } catch (e) { + await this.storage.reportError(e); + throw e; + } + } + + async initReplication() { + const connection = await this.connections.getConnection(); + const errors = await common.checkSourceConfiguration(connection); + connection.release(); + + if (errors.length > 0) { + throw new BinlogConfigurationError(`Binlog Configuration Errors: ${errors.join(', ')}`); + } + + const initialReplicationCompleted = await this.checkInitialReplicated(); + if (!initialReplicationCompleted) { + await this.startInitialReplication(); + } + } + + private getTable(tableId: string): storage.SourceTable { + const table = this.tableCache.get(tableId); + if (table == null) { + // We should always receive a replication message before the relation is used. + // If we can't find it, it's a bug. + throw new Error(`Missing relation cache for ${tableId}`); + } + return table; + } + + async streamChanges() { + // Auto-activate as soon as initial replication is done + await this.storage.autoActivate(); + const serverId = createRandomServerId(this.storage.group_id); + logger.info(`Starting replication. Created replica client with serverId:${serverId}`); + + const connection = await this.connections.getConnection(); + const { checkpoint_lsn } = await this.storage.getStatus(); + if (checkpoint_lsn) { + logger.info(`Existing checkpoint found: ${checkpoint_lsn}`); + } + + const fromGTID = checkpoint_lsn + ? common.ReplicatedGTID.fromSerialized(checkpoint_lsn) + : await common.readExecutedGtid(connection); + const binLogPositionState = fromGTID.position; + connection.release(); + + if (!this.stopped) { + await this.storage.startBatch( + { zeroLSN: ReplicatedGTID.ZERO.comparable, defaultSchema: this.defaultSchema, storeCurrentData: true }, + async (batch) => { + const zongji = this.connections.createBinlogListener(); + + let currentGTID: common.ReplicatedGTID | null = null; + + const queue = async.queue(async (evt: BinLogEvent) => { + // State machine + switch (true) { + case zongji_utils.eventIsGTIDLog(evt): + currentGTID = common.ReplicatedGTID.fromBinLogEvent({ + raw_gtid: { + server_id: evt.serverId, + transaction_range: evt.transactionRange + }, + position: { + filename: binLogPositionState.filename, + offset: evt.nextPosition + } + }); + break; + case zongji_utils.eventIsRotation(evt): + // Update the position + binLogPositionState.filename = evt.binlogName; + binLogPositionState.offset = evt.position; + break; + case zongji_utils.eventIsWriteMutation(evt): + const writeTableInfo = evt.tableMap[evt.tableId]; + await this.writeChanges(batch, { + type: storage.SaveOperationTag.INSERT, + data: evt.rows, + tableEntry: writeTableInfo + }); + break; + case zongji_utils.eventIsUpdateMutation(evt): + const updateTableInfo = evt.tableMap[evt.tableId]; + await this.writeChanges(batch, { + type: storage.SaveOperationTag.UPDATE, + data: evt.rows.map((row) => row.after), + previous_data: evt.rows.map((row) => row.before), + tableEntry: updateTableInfo + }); + break; + case zongji_utils.eventIsDeleteMutation(evt): + const deleteTableInfo = evt.tableMap[evt.tableId]; + await this.writeChanges(batch, { + type: storage.SaveOperationTag.DELETE, + data: evt.rows, + tableEntry: deleteTableInfo + }); + break; + case zongji_utils.eventIsXid(evt): + Metrics.getInstance().transactions_replicated_total.add(1); + // Need to commit with a replicated GTID with updated next position + await batch.commit( + new common.ReplicatedGTID({ + raw_gtid: currentGTID!.raw, + position: { + filename: binLogPositionState.filename, + offset: evt.nextPosition + } + }).comparable + ); + currentGTID = null; + // chunks_replicated_total.add(1); + break; + } + }, 1); + + zongji.on('binlog', (evt: BinLogEvent) => { + if (!this.stopped) { + logger.info(`Received Binlog event:${evt.getEventName()}`); + queue.push(evt); + } else { + logger.info(`Replication is busy stopping, ignoring event ${evt.getEventName()}`); + } + }); + + if (this.stopped) { + // Powersync is shutting down, don't start replicating + return; + } + + logger.info(`Reading binlog from: ${binLogPositionState.filename}:${binLogPositionState.offset}`); + + // Only listen for changes to tables in the sync rules + const includedTables = [...this.tableCache.values()].map((table) => table.table); + zongji.start({ + includeEvents: ['tablemap', 'writerows', 'updaterows', 'deleterows', 'xid', 'rotate', 'gtidlog'], + excludeEvents: [], + includeSchema: { [this.defaultSchema]: includedTables }, + filename: binLogPositionState.filename, + position: binLogPositionState.offset, + serverId: serverId + } satisfies StartOptions); + + // Forever young + await new Promise((resolve, reject) => { + zongji.on('error', (error) => { + logger.error('Error on Binlog listener:', error); + zongji.stop(); + queue.kill(); + reject(error); + }); + + zongji.on('stopped', () => { + logger.info('Binlog listener stopped. Replication ended.'); + resolve(); + }); + + queue.error((error) => { + logger.error('Binlog listener queue error:', error); + zongji.stop(); + queue.kill(); + reject(error); + }); + + this.abortSignal.addEventListener( + 'abort', + () => { + logger.info('Abort signal received, stopping replication...'); + zongji.stop(); + queue.kill(); + resolve(); + }, + { once: true } + ); + }); + } + ); + } + } + + private async writeChanges( + batch: storage.BucketStorageBatch, + msg: { + type: storage.SaveOperationTag; + data: Data[]; + previous_data?: Data[]; + tableEntry: TableMapEntry; + } + ): Promise { + const columns = toColumnDescriptors(msg.tableEntry); + + for (const [index, row] of msg.data.entries()) { + await this.writeChange(batch, { + type: msg.type, + database: msg.tableEntry.parentSchema, + sourceTable: this.getTable( + getMysqlRelId({ + schema: msg.tableEntry.parentSchema, + name: msg.tableEntry.tableName + }) + ), + table: msg.tableEntry.tableName, + columns: columns, + data: row, + previous_data: msg.previous_data?.[index] + }); + } + return null; + } + + private async writeChange( + batch: storage.BucketStorageBatch, + payload: WriteChangePayload + ): Promise { + switch (payload.type) { + case storage.SaveOperationTag.INSERT: + Metrics.getInstance().rows_replicated_total.add(1); + const record = common.toSQLiteRow(payload.data, payload.columns); + return await batch.save({ + tag: storage.SaveOperationTag.INSERT, + sourceTable: payload.sourceTable, + before: undefined, + beforeReplicaId: undefined, + after: record, + afterReplicaId: getUuidReplicaIdentityBson(record, payload.sourceTable.replicaIdColumns) + }); + case storage.SaveOperationTag.UPDATE: + Metrics.getInstance().rows_replicated_total.add(1); + // "before" may be null if the replica id columns are unchanged + // It's fine to treat that the same as an insert. + const beforeUpdated = payload.previous_data + ? common.toSQLiteRow(payload.previous_data, payload.columns) + : undefined; + const after = common.toSQLiteRow(payload.data, payload.columns); + + return await batch.save({ + tag: storage.SaveOperationTag.UPDATE, + sourceTable: payload.sourceTable, + before: beforeUpdated, + beforeReplicaId: beforeUpdated + ? getUuidReplicaIdentityBson(beforeUpdated, payload.sourceTable.replicaIdColumns) + : undefined, + after: common.toSQLiteRow(payload.data, payload.columns), + afterReplicaId: getUuidReplicaIdentityBson(after, payload.sourceTable.replicaIdColumns) + }); + + case storage.SaveOperationTag.DELETE: + Metrics.getInstance().rows_replicated_total.add(1); + const beforeDeleted = common.toSQLiteRow(payload.data, payload.columns); + + return await batch.save({ + tag: storage.SaveOperationTag.DELETE, + sourceTable: payload.sourceTable, + before: beforeDeleted, + beforeReplicaId: getUuidReplicaIdentityBson(beforeDeleted, payload.sourceTable.replicaIdColumns), + after: undefined, + afterReplicaId: undefined + }); + default: + return null; + } + } +} diff --git a/modules/module-mysql/src/replication/MySQLConnectionManager.ts b/modules/module-mysql/src/replication/MySQLConnectionManager.ts new file mode 100644 index 000000000..3693b9ce2 --- /dev/null +++ b/modules/module-mysql/src/replication/MySQLConnectionManager.ts @@ -0,0 +1,107 @@ +import { NormalizedMySQLConnectionConfig } from '../types/types.js'; +import mysqlPromise from 'mysql2/promise'; +import mysql, { FieldPacket, RowDataPacket } from 'mysql2'; +import * as mysql_utils from '../utils/mysql-utils.js'; +import ZongJi from '@powersync/mysql-zongji'; +import { logger } from '@powersync/lib-services-framework'; + +export class MySQLConnectionManager { + /** + * Pool that can create streamable connections + */ + private readonly pool: mysql.Pool; + /** + * Pool that can create promise-based connections + */ + private readonly promisePool: mysqlPromise.Pool; + + private binlogListeners: ZongJi[] = []; + + private isClosed = false; + + constructor( + public options: NormalizedMySQLConnectionConfig, + public poolOptions: mysqlPromise.PoolOptions + ) { + // The pool is lazy - no connections are opened until a query is performed. + this.pool = mysql_utils.createPool(options, poolOptions); + this.promisePool = this.pool.promise(); + } + + public get connectionTag() { + return this.options.tag; + } + + public get connectionId() { + return this.options.id; + } + + public get databaseName() { + return this.options.database; + } + + /** + * Create a new replication listener + */ + createBinlogListener(): ZongJi { + const listener = new ZongJi({ + host: this.options.hostname, + user: this.options.username, + password: this.options.password + }); + + this.binlogListeners.push(listener); + + return listener; + } + + /** + * Run a query using a connection from the pool + * A promise with the result is returned + * @param query + * @param params + */ + async query(query: string, params?: any[]): Promise<[RowDataPacket[], FieldPacket[]]> { + return this.promisePool.query(query, params); + } + + /** + * Get a streamable connection from this manager's pool + * The connection should be released when it is no longer needed + */ + async getStreamingConnection(): Promise { + return new Promise((resolve, reject) => { + this.pool.getConnection((err, connection) => { + if (err) { + reject(err); + } else { + resolve(connection); + } + }); + }); + } + + /** + * Get a promise connection from this manager's pool + * The connection should be released when it is no longer needed + */ + async getConnection(): Promise { + return this.promisePool.getConnection(); + } + + async end(): Promise { + if (!this.isClosed) { + for (const listener of this.binlogListeners) { + listener.stop(); + } + + try { + await this.promisePool.end(); + this.isClosed = true; + } catch (error) { + // We don't particularly care if any errors are thrown when shutting down the pool + logger.warn('Error shutting down MySQL connection pool', error); + } + } + } +} diff --git a/modules/module-mysql/src/replication/MySQLConnectionManagerFactory.ts b/modules/module-mysql/src/replication/MySQLConnectionManagerFactory.ts new file mode 100644 index 000000000..ea87f60ec --- /dev/null +++ b/modules/module-mysql/src/replication/MySQLConnectionManagerFactory.ts @@ -0,0 +1,28 @@ +import { logger } from '@powersync/lib-services-framework'; +import mysql from 'mysql2/promise'; +import { MySQLConnectionManager } from './MySQLConnectionManager.js'; +import { ResolvedConnectionConfig } from '../types/types.js'; + +export class MySQLConnectionManagerFactory { + private readonly connectionManagers: MySQLConnectionManager[]; + private readonly connectionConfig: ResolvedConnectionConfig; + + constructor(connectionConfig: ResolvedConnectionConfig) { + this.connectionConfig = connectionConfig; + this.connectionManagers = []; + } + + create(poolOptions: mysql.PoolOptions) { + const manager = new MySQLConnectionManager(this.connectionConfig, poolOptions); + this.connectionManagers.push(manager); + return manager; + } + + async shutdown() { + logger.info('Shutting down MySQL connection Managers...'); + for (const manager of this.connectionManagers) { + await manager.end(); + } + logger.info('MySQL connection Managers shutdown completed.'); + } +} diff --git a/modules/module-mysql/src/replication/MySQLErrorRateLimiter.ts b/modules/module-mysql/src/replication/MySQLErrorRateLimiter.ts new file mode 100644 index 000000000..8966cd201 --- /dev/null +++ b/modules/module-mysql/src/replication/MySQLErrorRateLimiter.ts @@ -0,0 +1,37 @@ +import { ErrorRateLimiter } from '@powersync/service-core'; +import { setTimeout } from 'timers/promises'; + +export class MySQLErrorRateLimiter implements ErrorRateLimiter { + nextAllowed: number = Date.now(); + + async waitUntilAllowed(options?: { signal?: AbortSignal | undefined } | undefined): Promise { + const delay = Math.max(0, this.nextAllowed - Date.now()); + // Minimum delay between connections, even without errors + this.setDelay(500); + await setTimeout(delay, undefined, { signal: options?.signal }); + } + + mayPing(): boolean { + return Date.now() >= this.nextAllowed; + } + + reportError(e: any): void { + const message = (e.message as string) ?? ''; + if (message.includes('password authentication failed')) { + // Wait 15 minutes, to avoid triggering Supabase's fail2ban + this.setDelay(900_000); + } else if (message.includes('ENOTFOUND')) { + // DNS lookup issue - incorrect URI or deleted instance + this.setDelay(120_000); + } else if (message.includes('ECONNREFUSED')) { + // Could be fail2ban or similar + this.setDelay(120_000); + } else { + this.setDelay(30_000); + } + } + + private setDelay(delay: number) { + this.nextAllowed = Math.max(this.nextAllowed, Date.now() + delay); + } +} diff --git a/modules/module-mysql/src/replication/zongji/zongji-utils.ts b/modules/module-mysql/src/replication/zongji/zongji-utils.ts new file mode 100644 index 000000000..36122b636 --- /dev/null +++ b/modules/module-mysql/src/replication/zongji/zongji-utils.ts @@ -0,0 +1,32 @@ +import { + BinLogEvent, + BinLogGTIDLogEvent, + BinLogMutationEvent, + BinLogRotationEvent, + BinLogUpdateEvent, + BinLogXidEvent +} from '@powersync/mysql-zongji'; + +export function eventIsGTIDLog(event: BinLogEvent): event is BinLogGTIDLogEvent { + return event.getEventName() == 'gtidlog'; +} + +export function eventIsXid(event: BinLogEvent): event is BinLogXidEvent { + return event.getEventName() == 'xid'; +} + +export function eventIsRotation(event: BinLogEvent): event is BinLogRotationEvent { + return event.getEventName() == 'rotate'; +} + +export function eventIsWriteMutation(event: BinLogEvent): event is BinLogMutationEvent { + return event.getEventName() == 'writerows'; +} + +export function eventIsDeleteMutation(event: BinLogEvent): event is BinLogMutationEvent { + return event.getEventName() == 'deleterows'; +} + +export function eventIsUpdateMutation(event: BinLogEvent): event is BinLogUpdateEvent { + return event.getEventName() == 'updaterows'; +} diff --git a/modules/module-mysql/src/replication/zongji/zongji.d.ts b/modules/module-mysql/src/replication/zongji/zongji.d.ts new file mode 100644 index 000000000..9a17f15e9 --- /dev/null +++ b/modules/module-mysql/src/replication/zongji/zongji.d.ts @@ -0,0 +1,119 @@ +declare module '@powersync/mysql-zongji' { + export type ZongjiOptions = { + host: string; + user: string; + password: string; + dateStrings?: boolean; + timeZone?: string; + }; + + interface DatabaseFilter { + [databaseName: string]: string[] | true; + } + + export type StartOptions = { + includeEvents?: string[]; + excludeEvents?: string[]; + /** + * Describe which databases and tables to include (Only for row events). Use database names as the key and pass an array of table names or true (for the entire database). + * Example: { 'my_database': ['allow_table', 'another_table'], 'another_db': true } + */ + includeSchema?: DatabaseFilter; + /** + * Object describing which databases and tables to exclude (Same format as includeSchema) + * Example: { 'other_db': ['disallowed_table'], 'ex_db': true } + */ + excludeSchema?: DatabaseFilter; + /** + * BinLog position filename to start reading events from + */ + filename?: string; + /** + * BinLog position offset to start reading events from in file specified + */ + position?: number; + + /** + * Unique server ID for this replication client. + */ + serverId?: number; + }; + + export type ColumnSchema = { + COLUMN_NAME: string; + COLLATION_NAME: string; + CHARACTER_SET_NAME: string; + COLUMN_COMMENT: string; + COLUMN_TYPE: string; + }; + + export type ColumnDefinition = { + name: string; + charset: string; + type: number; + metadata: Record; + }; + + export type TableMapEntry = { + columnSchemas: ColumnSchema[]; + parentSchema: string; + tableName: string; + columns: ColumnDefinition[]; + }; + + export type BaseBinLogEvent = { + timestamp: number; + getEventName(): string; + + /** + * Next position in BinLog file to read from after + * this event. + */ + nextPosition: number; + /** + * Size of this event + */ + size: number; + flags: number; + useChecksum: boolean; + }; + + export type BinLogRotationEvent = BaseBinLogEvent & { + binlogName: string; + position: number; + }; + + export type BinLogGTIDLogEvent = BaseBinLogEvent & { + serverId: Buffer; + transactionRange: number; + }; + + export type BinLogXidEvent = BaseBinLogEvent & { + xid: number; + }; + + export type BinLogMutationEvent = BaseBinLogEvent & { + tableId: number; + numberOfColumns: number; + tableMap: Record; + rows: Record[]; + }; + + export type BinLogUpdateEvent = Omit & { + rows: { + before: Record; + after: Record; + }[]; + }; + + export type BinLogEvent = BinLogRotationEvent | BinLogGTIDLogEvent | BinLogXidEvent | BinLogMutationEvent; + + export default class ZongJi { + constructor(options: ZongjiOptions); + + start(options: StartOptions): void; + stop(): void; + + on(type: 'binlog' | string, callback: (event: BinLogEvent) => void); + } +} diff --git a/modules/module-mysql/src/types/types.ts b/modules/module-mysql/src/types/types.ts new file mode 100644 index 000000000..43dd17696 --- /dev/null +++ b/modules/module-mysql/src/types/types.ts @@ -0,0 +1,106 @@ +import * as service_types from '@powersync/service-types'; +import * as t from 'ts-codec'; +import * as urijs from 'uri-js'; + +export const MYSQL_CONNECTION_TYPE = 'mysql' as const; + +export interface NormalizedMySQLConnectionConfig { + id: string; + tag: string; + + hostname: string; + port: number; + database: string; + + username: string; + password: string; + server_id: number; + + cacert?: string; + client_certificate?: string; + client_private_key?: string; +} + +export const MySQLConnectionConfig = service_types.configFile.DataSourceConfig.and( + t.object({ + type: t.literal(MYSQL_CONNECTION_TYPE), + uri: t.string.optional(), + hostname: t.string.optional(), + port: service_types.configFile.portCodec.optional(), + username: t.string.optional(), + password: t.string.optional(), + database: t.string.optional(), + server_id: t.number.optional(), + + cacert: t.string.optional(), + client_certificate: t.string.optional(), + client_private_key: t.string.optional() + }) +); + +/** + * Config input specified when starting services + */ +export type MySQLConnectionConfig = t.Decoded; + +/** + * Resolved version of {@link MySQLConnectionConfig} + */ +export type ResolvedConnectionConfig = MySQLConnectionConfig & NormalizedMySQLConnectionConfig; + +/** + * Validate and normalize connection options. + * + * Returns destructured options. + */ +export function normalizeConnectionConfig(options: MySQLConnectionConfig): NormalizedMySQLConnectionConfig { + let uri: urijs.URIComponents; + if (options.uri) { + uri = urijs.parse(options.uri); + if (uri.scheme != 'mysql') { + throw new Error(`Invalid URI - protocol must be mysql, got ${uri.scheme}`); + } + } else { + uri = urijs.parse('mysql:///'); + } + + const hostname = options.hostname ?? uri.host ?? ''; + const port = Number(options.port ?? uri.port ?? 3306); + + const database = options.database ?? uri.path?.substring(1) ?? ''; + + const [uri_username, uri_password] = (uri.userinfo ?? '').split(':'); + + const username = options.username ?? uri_username ?? ''; + const password = options.password ?? uri_password ?? ''; + + if (hostname == '') { + throw new Error(`hostname required`); + } + + if (username == '') { + throw new Error(`username required`); + } + + if (password == '') { + throw new Error(`password required`); + } + + if (database == '') { + throw new Error(`database required`); + } + + return { + id: options.id ?? 'default', + tag: options.tag ?? 'default', + + hostname, + port, + database, + + username, + password, + + server_id: options.server_id ?? 1 + }; +} diff --git a/modules/module-mysql/src/utils/mysql-utils.ts b/modules/module-mysql/src/utils/mysql-utils.ts new file mode 100644 index 000000000..a2279c234 --- /dev/null +++ b/modules/module-mysql/src/utils/mysql-utils.ts @@ -0,0 +1,84 @@ +import { logger } from '@powersync/lib-services-framework'; +import mysql from 'mysql2'; +import mysqlPromise from 'mysql2/promise'; +import * as types from '../types/types.js'; +import { coerce, gte } from 'semver'; + +export type RetriedQueryOptions = { + connection: mysqlPromise.Connection; + query: string; + params?: any[]; + retries?: number; +}; + +/** + * Retry a simple query - up to 2 attempts total. + */ +export async function retriedQuery(options: RetriedQueryOptions) { + const { connection, query, params = [], retries = 2 } = options; + for (let tries = retries; ; tries--) { + try { + logger.debug(`Executing query: ${query}`); + return connection.query(query, params); + } catch (e) { + if (tries == 1) { + throw e; + } + logger.warn('Query error, retrying', e); + } + } +} + +export function createPool(config: types.NormalizedMySQLConnectionConfig, options?: mysql.PoolOptions): mysql.Pool { + const sslOptions = { + ca: config.cacert, + key: config.client_private_key, + cert: config.client_certificate + }; + const hasSSLOptions = Object.values(sslOptions).some((v) => !!v); + return mysql.createPool({ + host: config.hostname, + user: config.username, + password: config.password, + database: config.database, + ssl: hasSSLOptions ? sslOptions : undefined, + supportBigNumbers: true, + decimalNumbers: true, + timezone: 'Z', // Ensure no auto timezone manipulation of the dates occur + jsonStrings: true, // Return JSON columns as strings + ...(options || {}) + }); +} + +/** + * Return a random server id for a given sync rule id. + * Expected format is: 00 + * The max value for server id in MySQL is 2^32 - 1. + * We use the GTID format to keep track of our position in the binlog, no state is kept by the MySQL server, therefore + * it is ok to use a randomised server id every time. + * @param syncRuleId + */ +export function createRandomServerId(syncRuleId: number): number { + return Number.parseInt(`${syncRuleId}00${Math.floor(Math.random() * 10000)}`); +} + +export async function getMySQLVersion(connection: mysqlPromise.Connection): Promise { + const [[versionResult]] = await retriedQuery({ + connection, + query: `SELECT VERSION() as version` + }); + + return versionResult.version as string; +} + +/** + * Check if the current MySQL version is newer or equal to the target version. + * @param version + * @param minimumVersion + */ +export function isVersionAtLeast(version: string, minimumVersion: string): boolean { + const coercedVersion = coerce(version); + const coercedMinimumVersion = coerce(minimumVersion); + + return gte(coercedVersion!, coercedMinimumVersion!, { loose: true }); +} diff --git a/modules/module-mysql/test/src/BinLogStream.test.ts b/modules/module-mysql/test/src/BinLogStream.test.ts new file mode 100644 index 000000000..44240d461 --- /dev/null +++ b/modules/module-mysql/test/src/BinLogStream.test.ts @@ -0,0 +1,306 @@ +import { putOp, removeOp } from '@core-tests/stream_utils.js'; +import { MONGO_STORAGE_FACTORY } from '@core-tests/util.js'; +import { BucketStorageFactory, Metrics } from '@powersync/service-core'; +import { describe, expect, test } from 'vitest'; +import { binlogStreamTest } from './BinlogStreamUtils.js'; +import { v4 as uuid } from 'uuid'; + +type StorageFactory = () => Promise; + +const BASIC_SYNC_RULES = ` +bucket_definitions: + global: + data: + - SELECT id, description FROM "test_data" +`; + +describe( + ' Binlog stream - mongodb', + function () { + defineBinlogStreamTests(MONGO_STORAGE_FACTORY); + }, + { timeout: 20_000 } +); + +function defineBinlogStreamTests(factory: StorageFactory) { + test( + 'Replicate basic values', + binlogStreamTest(factory, async (context) => { + const { connectionManager } = context; + await context.updateSyncRules(` + bucket_definitions: + global: + data: + - SELECT id, description, num FROM "test_data"`); + + await connectionManager.query(`CREATE TABLE test_data (id CHAR(36) PRIMARY KEY, description TEXT, num BIGINT)`); + + await context.replicateSnapshot(); + + const startRowCount = + (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0; + const startTxCount = + (await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0; + + context.startStreaming(); + const testId = uuid(); + await connectionManager.query( + `INSERT INTO test_data(id, description, num) VALUES('${testId}', 'test1', 1152921504606846976)` + ); + const data = await context.getBucketData('global[]'); + + expect(data).toMatchObject([putOp('test_data', { id: testId, description: 'test1', num: 1152921504606846976n })]); + const endRowCount = (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0; + const endTxCount = + (await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0; + expect(endRowCount - startRowCount).toEqual(1); + expect(endTxCount - startTxCount).toEqual(1); + }) + ); + + test( + 'replicating case sensitive table', + binlogStreamTest(factory, async (context) => { + const { connectionManager } = context; + await context.updateSyncRules(` + bucket_definitions: + global: + data: + - SELECT id, description FROM "test_DATA" + `); + + await connectionManager.query(`CREATE TABLE test_DATA (id CHAR(36) PRIMARY KEY, description text)`); + + await context.replicateSnapshot(); + + const startRowCount = + (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0; + const startTxCount = + (await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0; + + context.startStreaming(); + + const testId = uuid(); + await connectionManager.query(`INSERT INTO test_DATA(id, description) VALUES('${testId}','test1')`); + + const data = await context.getBucketData('global[]'); + + expect(data).toMatchObject([putOp('test_DATA', { id: testId, description: 'test1' })]); + const endRowCount = (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0; + const endTxCount = + (await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0; + expect(endRowCount - startRowCount).toEqual(1); + expect(endTxCount - startTxCount).toEqual(1); + }) + ); + + // TODO: Not supported yet + // test( + // 'replicating TRUNCATE', + // binlogStreamTest(factory, async (context) => { + // const { connectionManager } = context; + // const syncRuleContent = ` + // bucket_definitions: + // global: + // data: + // - SELECT id, description FROM "test_data" + // by_test_data: + // parameters: SELECT id FROM test_data WHERE id = token_parameters.user_id + // data: [] + // `; + // await context.updateSyncRules(syncRuleContent); + // await connectionManager.query(`DROP TABLE IF EXISTS test_data`); + // await connectionManager.query( + // `CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)` + // ); + // + // await context.replicateSnapshot(); + // context.startStreaming(); + // + // const [{ test_id }] = pgwireRows( + // await connectionManager.query(`INSERT INTO test_data(description) VALUES('test1') returning id as test_id`) + // ); + // await connectionManager.query(`TRUNCATE test_data`); + // + // const data = await context.getBucketData('global[]'); + // + // expect(data).toMatchObject([ + // putOp('test_data', { id: test_id, description: 'test1' }), + // removeOp('test_data', test_id) + // ]); + // }) + // ); + + test( + 'replicating changing primary key', + binlogStreamTest(factory, async (context) => { + const { connectionManager } = context; + await context.updateSyncRules(BASIC_SYNC_RULES); + + await connectionManager.query(`CREATE TABLE test_data (id CHAR(36) PRIMARY KEY, description text)`); + + await context.replicateSnapshot(); + context.startStreaming(); + + const testId1 = uuid(); + await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('${testId1}','test1')`); + + const testId2 = uuid(); + await connectionManager.query( + `UPDATE test_data SET id = '${testId2}', description = 'test2a' WHERE id = '${testId1}'` + ); + + // This update may fail replicating with: + // Error: Update on missing record public.test_data:074a601e-fc78-4c33-a15d-f89fdd4af31d :: {"g":1,"t":"651e9fbe9fec6155895057ec","k":"1a0b34da-fb8c-5e6f-8421-d7a3c5d4df4f"} + await connectionManager.query(`UPDATE test_data SET description = 'test2b' WHERE id = '${testId2}'`); + + // Re-use old id again + await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('${testId1}', 'test1b')`); + await connectionManager.query(`UPDATE test_data SET description = 'test1c' WHERE id = '${testId1}'`); + + const data = await context.getBucketData('global[]'); + expect(data).toMatchObject([ + // Initial insert + putOp('test_data', { id: testId1, description: 'test1' }), + // Update id, then description + removeOp('test_data', testId1), + putOp('test_data', { id: testId2, description: 'test2a' }), + putOp('test_data', { id: testId2, description: 'test2b' }), + // Re-use old id + putOp('test_data', { id: testId1, description: 'test1b' }), + putOp('test_data', { id: testId1, description: 'test1c' }) + ]); + }) + ); + + test( + 'initial sync', + binlogStreamTest(factory, async (context) => { + const { connectionManager } = context; + await context.updateSyncRules(BASIC_SYNC_RULES); + + await connectionManager.query(`CREATE TABLE test_data (id CHAR(36) PRIMARY KEY, description text)`); + + const testId = uuid(); + await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('${testId}','test1')`); + + await context.replicateSnapshot(); + + const data = await context.getBucketData('global[]'); + expect(data).toMatchObject([putOp('test_data', { id: testId, description: 'test1' })]); + }) + ); + + test( + 'snapshot with date values', + binlogStreamTest(factory, async (context) => { + const { connectionManager } = context; + await context.updateSyncRules(` + bucket_definitions: + global: + data: + - SELECT * FROM "test_data" + `); + + await connectionManager.query( + `CREATE TABLE test_data (id CHAR(36) PRIMARY KEY, description TEXT, date DATE, datetime DATETIME, timestamp TIMESTAMP)` + ); + + const testId = uuid(); + await connectionManager.query(` + INSERT INTO test_data(id, description, date, datetime, timestamp) VALUES('${testId}','testDates', '2023-03-06', '2023-03-06 15:47', '2023-03-06 15:47') + `); + + await context.replicateSnapshot(); + + const data = await context.getBucketData('global[]'); + expect(data).toMatchObject([ + putOp('test_data', { + id: testId, + description: 'testDates', + date: `2023-03-06`, + datetime: '2023-03-06T15:47:00.000Z', + timestamp: '2023-03-06T15:47:00.000Z' + }) + ]); + }) + ); + + test( + 'replication with date values', + binlogStreamTest(factory, async (context) => { + const { connectionManager } = context; + await context.updateSyncRules(` + bucket_definitions: + global: + data: + - SELECT * FROM "test_data" + `); + + await connectionManager.query( + `CREATE TABLE test_data (id CHAR(36) PRIMARY KEY, description TEXT, date DATE, datetime DATETIME, timestamp TIMESTAMP)` + ); + + await context.replicateSnapshot(); + + const startRowCount = + (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0; + const startTxCount = + (await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0; + + context.startStreaming(); + + const testId = uuid(); + await connectionManager.query(` + INSERT INTO test_data(id, description, date, datetime, timestamp) VALUES('${testId}','testDates', '2023-03-06', '2023-03-06 15:47', '2023-03-06 15:47') + `); + + const data = await context.getBucketData('global[]'); + expect(data).toMatchObject([ + putOp('test_data', { + id: testId, + description: 'testDates', + date: `2023-03-06`, + datetime: '2023-03-06T15:47:00.000Z', + timestamp: '2023-03-06T15:47:00.000Z' + }) + ]); + const endRowCount = (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0; + const endTxCount = + (await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0; + expect(endRowCount - startRowCount).toEqual(1); + expect(endTxCount - startTxCount).toEqual(1); + }) + ); + + test( + 'table not in sync rules', + binlogStreamTest(factory, async (context) => { + const { connectionManager } = context; + await context.updateSyncRules(BASIC_SYNC_RULES); + + await connectionManager.query(`CREATE TABLE test_donotsync (id CHAR(36) PRIMARY KEY, description text)`); + + await context.replicateSnapshot(); + + const startRowCount = + (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0; + const startTxCount = + (await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0; + + context.startStreaming(); + + await connectionManager.query(`INSERT INTO test_donotsync(id, description) VALUES('${uuid()}','test1')`); + const data = await context.getBucketData('global[]'); + + expect(data).toMatchObject([]); + const endRowCount = (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0; + const endTxCount = + (await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0; + + // There was a transaction, but we should not replicate any actual data + expect(endRowCount - startRowCount).toEqual(0); + expect(endTxCount - startTxCount).toEqual(1); + }) + ); +} diff --git a/modules/module-mysql/test/src/BinlogStreamUtils.ts b/modules/module-mysql/test/src/BinlogStreamUtils.ts new file mode 100644 index 000000000..c08f22c60 --- /dev/null +++ b/modules/module-mysql/test/src/BinlogStreamUtils.ts @@ -0,0 +1,157 @@ +import { + ActiveCheckpoint, + BucketStorageFactory, + OpId, + OplogEntry, + SyncRulesBucketStorage +} from '@powersync/service-core'; +import { TEST_CONNECTION_OPTIONS, clearTestDb } from './util.js'; +import { fromAsync } from '@core-tests/stream_utils.js'; +import { BinLogStream, BinLogStreamOptions } from '@module/replication/BinLogStream.js'; +import { MySQLConnectionManager } from '@module/replication/MySQLConnectionManager.js'; +import mysqlPromise from 'mysql2/promise'; +import { readExecutedGtid } from '@module/common/read-executed-gtid.js'; +import { logger } from '@powersync/lib-services-framework'; + +/** + * Tests operating on the binlog stream need to configure the stream and manage asynchronous + * replication, which gets a little tricky. + * + * This wraps a test in a function that configures all the context, and tears it down afterward. + */ +export function binlogStreamTest( + factory: () => Promise, + test: (context: BinlogStreamTestContext) => Promise +): () => Promise { + return async () => { + const f = await factory(); + const connectionManager = new MySQLConnectionManager(TEST_CONNECTION_OPTIONS, {}); + + const connection = await connectionManager.getConnection(); + await clearTestDb(connection); + connection.release(); + const context = new BinlogStreamTestContext(f, connectionManager); + try { + await test(context); + } finally { + await context.dispose(); + } + }; +} + +export class BinlogStreamTestContext { + private _binlogStream?: BinLogStream; + private abortController = new AbortController(); + private streamPromise?: Promise; + public storage?: SyncRulesBucketStorage; + private replicationDone = false; + + constructor( + public factory: BucketStorageFactory, + public connectionManager: MySQLConnectionManager + ) {} + + async dispose() { + this.abortController.abort(); + await this.streamPromise; + await this.connectionManager.end(); + } + + get connectionTag() { + return this.connectionManager.connectionTag; + } + + async updateSyncRules(content: string): Promise { + const syncRules = await this.factory.updateSyncRules({ content: content }); + this.storage = this.factory.getInstance(syncRules); + return this.storage!; + } + + get binlogStream(): BinLogStream { + if (this.storage == null) { + throw new Error('updateSyncRules() first'); + } + if (this._binlogStream) { + return this._binlogStream; + } + const options: BinLogStreamOptions = { + storage: this.storage, + connections: this.connectionManager, + abortSignal: this.abortController.signal + }; + this._binlogStream = new BinLogStream(options); + return this._binlogStream!; + } + + async replicateSnapshot() { + await this.binlogStream.initReplication(); + this.replicationDone = true; + } + + startStreaming() { + if (!this.replicationDone) { + throw new Error('Call replicateSnapshot() before startStreaming()'); + } + this.streamPromise = this.binlogStream.streamChanges(); + } + + async getCheckpoint(options?: { timeout?: number }): Promise { + const connection = await this.connectionManager.getConnection(); + let checkpoint = await Promise.race([ + getClientCheckpoint(connection, this.factory, { timeout: options?.timeout ?? 60_000 }), + this.streamPromise + ]); + connection.release(); + if (typeof checkpoint == undefined) { + // This indicates an issue with the test setup - streamingPromise completed instead + // of getClientCheckpoint() + throw new Error('Test failure - streamingPromise completed'); + } + return checkpoint as string; + } + + async getBucketsDataBatch(buckets: Record, options?: { timeout?: number }) { + const checkpoint = await this.getCheckpoint(options); + const map = new Map(Object.entries(buckets)); + return fromAsync(this.storage!.getBucketDataBatch(checkpoint, map)); + } + + async getBucketData(bucket: string, start = '0', options?: { timeout?: number }): Promise { + const checkpoint = await this.getCheckpoint(options); + const map = new Map([[bucket, start]]); + const batch = this.storage!.getBucketDataBatch(checkpoint, map); + const batches = await fromAsync(batch); + return batches[0]?.batch.data ?? []; + } +} + +export async function getClientCheckpoint( + connection: mysqlPromise.Connection, + bucketStorage: BucketStorageFactory, + options?: { timeout?: number } +): Promise { + const start = Date.now(); + const gtid = await readExecutedGtid(connection); + // This old API needs a persisted checkpoint id. + // Since we don't use LSNs anymore, the only way to get that is to wait. + + const timeout = options?.timeout ?? 50_000; + let lastCp: ActiveCheckpoint | null = null; + + logger.info('Expected Checkpoint: ' + gtid.comparable); + while (Date.now() - start < timeout) { + const cp = await bucketStorage.getActiveCheckpoint(); + lastCp = cp; + //logger.info('Last Checkpoint: ' + lastCp.lsn); + if (!cp.hasSyncRules()) { + throw new Error('No sync rules available'); + } + if (cp.lsn && cp.lsn >= gtid.comparable) { + return cp.checkpoint; + } + + await new Promise((resolve) => setTimeout(resolve, 30)); + } + + throw new Error(`Timeout while waiting for checkpoint ${gtid.comparable}. Last checkpoint: ${lastCp?.lsn}`); +} diff --git a/modules/module-mysql/test/src/env.ts b/modules/module-mysql/test/src/env.ts new file mode 100644 index 000000000..05fc76c42 --- /dev/null +++ b/modules/module-mysql/test/src/env.ts @@ -0,0 +1,7 @@ +import { utils } from '@powersync/lib-services-framework'; + +export const env = utils.collectEnvironmentVariables({ + MYSQL_TEST_URI: utils.type.string.default('mysql://root:mypassword@localhost:3306/mydatabase'), + CI: utils.type.boolean.default('false'), + SLOW_TESTS: utils.type.boolean.default('false') +}); diff --git a/modules/module-mysql/test/src/mysql-to-sqlite.test.ts b/modules/module-mysql/test/src/mysql-to-sqlite.test.ts new file mode 100644 index 000000000..9cebdccd2 --- /dev/null +++ b/modules/module-mysql/test/src/mysql-to-sqlite.test.ts @@ -0,0 +1,322 @@ +import { SqliteRow } from '@powersync/service-sync-rules'; +import { afterAll, describe, expect, test } from 'vitest'; +import { clearTestDb, TEST_CONNECTION_OPTIONS } from './util.js'; +import { eventIsWriteMutation, eventIsXid } from '@module/replication/zongji/zongji-utils.js'; +import * as common from '@module/common/common-index.js'; +import ZongJi, { BinLogEvent } from '@powersync/mysql-zongji'; +import { MySQLConnectionManager } from '@module/replication/MySQLConnectionManager.js'; +import { toColumnDescriptors } from '@module/common/common-index.js'; + +describe('MySQL Data Types', () => { + const connectionManager = new MySQLConnectionManager(TEST_CONNECTION_OPTIONS, {}); + + afterAll(async () => { + await connectionManager.end(); + }); + + async function setupTable() { + const connection = await connectionManager.getConnection(); + await clearTestDb(connection); + await connection.query(`CREATE TABLE test_data ( + tinyint_col TINYINT, + smallint_col SMALLINT, + mediumint_col MEDIUMINT, + int_col INT, + integer_col INTEGER, + bigint_col BIGINT, + float_col FLOAT, + double_col DOUBLE, + decimal_col DECIMAL(10,2), + numeric_col NUMERIC(10,2), + bit_col BIT(8), + boolean_col BOOLEAN, + serial_col SERIAL, + + date_col DATE, + datetime_col DATETIME(3), + timestamp_col TIMESTAMP(3), + time_col TIME, + year_col YEAR, + + char_col CHAR(10), + varchar_col VARCHAR(255), + binary_col BINARY(16), + varbinary_col VARBINARY(256), + tinyblob_col TINYBLOB, + blob_col BLOB, + mediumblob_col MEDIUMBLOB, + longblob_col LONGBLOB, + tinytext_col TINYTEXT, + text_col TEXT, + mediumtext_col MEDIUMTEXT, + longtext_col LONGTEXT, + enum_col ENUM('value1', 'value2', 'value3'), + set_col SET('value1', 'value2', 'value3'), + + json_col JSON, + + geometry_col GEOMETRY, + point_col POINT, + linestring_col LINESTRING, + polygon_col POLYGON, + multipoint_col MULTIPOINT, + multilinestring_col MULTILINESTRING, + multipolygon_col MULTIPOLYGON, + geometrycollection_col GEOMETRYCOLLECTION + )`); + + connection.release(); + } + + test('Number types mappings', async () => { + await setupTable(); + await connectionManager.query(` +INSERT INTO test_data ( + tinyint_col, + smallint_col, + mediumint_col, + int_col, + integer_col, + bigint_col, + double_col, + decimal_col, + numeric_col, + bit_col, + boolean_col + -- serial_col is auto-incremented and can be left out +) VALUES ( + 127, -- TINYINT maximum value + 32767, -- SMALLINT maximum value + 8388607, -- MEDIUMINT maximum value + 2147483647, -- INT maximum value + 2147483647, -- INTEGER maximum value + 9223372036854775807, -- BIGINT maximum value + 3.1415926535, -- DOUBLE example + 12345.67, -- DECIMAL(10,2) example + 12345.67, -- NUMERIC(10,2) example + b'10101010', -- BIT(8) example in binary notation + TRUE -- BOOLEAN value (alias for TINYINT(1)) + -- serial_col is auto-incremented +)`); + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(); + + const expectedResult = { + tinyint_col: 127n, + smallint_col: 32767n, + mediumint_col: 8388607n, + int_col: 2147483647n, + integer_col: 2147483647n, + bigint_col: 9223372036854775807n, + double_col: 3.1415926535, + decimal_col: 12345.67, + numeric_col: 12345.67, + bit_col: new Uint8Array([0b10101010]).valueOf(), + boolean_col: 1n, + serial_col: 1n + }; + expect(databaseRows[0]).toMatchObject(expectedResult); + expect(replicatedRows[0]).toMatchObject(expectedResult); + }); + + test('Float type mapping', async () => { + await setupTable(); + const expectedFloatValue = 3.14; + await connectionManager.query(`INSERT INTO test_data (float_col) VALUES (${expectedFloatValue})`); + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(); + + const allowedPrecision = 0.0001; + + const actualFloatValueDB = databaseRows[0].float_col; + let difference = Math.abs((actualFloatValueDB as number) - expectedFloatValue); + expect(difference).toBeLessThan(allowedPrecision); + + const actualFloatValueReplicated = replicatedRows[0].float_col; + difference = Math.abs((actualFloatValueReplicated as number) - expectedFloatValue); + expect(difference).toBeLessThan(allowedPrecision); + }); + + test('Character types mappings', async () => { + await setupTable(); + await connectionManager.query(` +INSERT INTO test_data ( + char_col, + varchar_col, + binary_col, + varbinary_col, + tinyblob_col, + blob_col, + mediumblob_col, + longblob_col, + tinytext_col, + text_col, + mediumtext_col, + longtext_col, + enum_col +) VALUES ( + 'CharData', -- CHAR(10) with padding spaces + 'Variable character data',-- VARCHAR(255) + 'ShortBin', -- BINARY(16) + 'VariableBinaryData', -- VARBINARY(256) + 'TinyBlobData', -- TINYBLOB + 'BlobData', -- BLOB + 'MediumBlobData', -- MEDIUMBLOB + 'LongBlobData', -- LONGBLOB + 'TinyTextData', -- TINYTEXT + 'TextData', -- TEXT + 'MediumTextData', -- MEDIUMTEXT + 'LongTextData', -- LONGTEXT + 'value1' -- ENUM('value1', 'value2', 'value3') +);`); + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(); + const expectedResult = { + char_col: 'CharData', + varchar_col: 'Variable character data', + binary_col: new Uint8Array([83, 104, 111, 114, 116, 66, 105, 110, 0, 0, 0, 0, 0, 0, 0, 0]), // Pad with 0 + varbinary_col: new Uint8Array([ + 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61 + ]), + tinyblob_col: new Uint8Array([0x54, 0x69, 0x6e, 0x79, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x61, 0x74, 0x61]), + blob_col: new Uint8Array([0x42, 0x6c, 0x6f, 0x62, 0x44, 0x61, 0x74, 0x61]), + mediumblob_col: new Uint8Array([ + 0x4d, 0x65, 0x64, 0x69, 0x75, 0x6d, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x61, 0x74, 0x61 + ]), + longblob_col: new Uint8Array([0x4c, 0x6f, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x61, 0x74, 0x61]), + tinytext_col: 'TinyTextData', + text_col: 'TextData', + mediumtext_col: 'MediumTextData', + longtext_col: 'LongTextData', + enum_col: 'value1' + }; + + expect(databaseRows[0]).toMatchObject(expectedResult); + expect(replicatedRows[0]).toMatchObject(expectedResult); + }); + + test('Date types mappings', async () => { + await setupTable(); + await connectionManager.query(` + INSERT INTO test_data(date_col, datetime_col, timestamp_col, time_col, year_col) + VALUES('2023-03-06', '2023-03-06 15:47', '2023-03-06 15:47', '15:47:00', '2023'); + `); + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(); + const expectedResult = { + date_col: '2023-03-06', + datetime_col: '2023-03-06T15:47:00.000Z', + timestamp_col: '2023-03-06T15:47:00.000Z', + time_col: '15:47:00', + year_col: 2023 + }; + + expect(databaseRows[0]).toMatchObject(expectedResult); + expect(replicatedRows[0]).toMatchObject(expectedResult); + }); + + test('Date types edge cases mappings', async () => { + await setupTable(); + + await connectionManager.query(`INSERT INTO test_data(timestamp_col) VALUES('1970-01-01 00:00:01')`); + await connectionManager.query(`INSERT INTO test_data(timestamp_col) VALUES('2038-01-19 03:14:07.499')`); + await connectionManager.query(`INSERT INTO test_data(datetime_col) VALUES('1000-01-01 00:00:00')`); + await connectionManager.query(`INSERT INTO test_data(datetime_col) VALUES('9999-12-31 23:59:59.499')`); + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(4); + const expectedResults = [ + { timestamp_col: '1970-01-01T00:00:01.000Z' }, + { timestamp_col: '2038-01-19T03:14:07.499Z' }, + { datetime_col: '1000-01-01T00:00:00.000Z' }, + { datetime_col: '9999-12-31T23:59:59.499Z' } + ]; + + for (let i = 0; i < expectedResults.length; i++) { + expect(databaseRows[i]).toMatchObject(expectedResults[i]); + expect(replicatedRows[i]).toMatchObject(expectedResults[i]); + } + }); + + test('Json types mappings', async () => { + await setupTable(); + + const expectedJSON = { name: 'John Doe', age: 30, married: true }; + const expectedSet = ['value1', 'value3']; + + // For convenience, we map the SET data type to a JSON Array + await connectionManager.query( + `INSERT INTO test_data (json_col, set_col) VALUES ('${JSON.stringify(expectedJSON)}', '${expectedSet.join(',')}')` + ); + + const databaseRows = await getDatabaseRows(connectionManager, 'test_data'); + const replicatedRows = await getReplicatedRows(); + + const actualDBJSONValue = JSON.parse(databaseRows[0].json_col as string); + const actualReplicatedJSONValue = JSON.parse(replicatedRows[0].json_col as string); + expect(actualDBJSONValue).toEqual(expectedJSON); + expect(actualReplicatedJSONValue).toEqual(expectedJSON); + + const actualDBSetValue = JSON.parse(databaseRows[0].set_col as string); + const actualReplicatedSetValue = JSON.parse(replicatedRows[0].set_col as string); + expect(actualDBSetValue).toEqual(expectedSet); + expect(actualReplicatedSetValue).toEqual(expectedSet); + }); +}); + +async function getDatabaseRows(connection: MySQLConnectionManager, tableName: string): Promise { + const [results, fields] = await connection.query(`SELECT * FROM ${tableName}`); + const columns = toColumnDescriptors(fields); + return results.map((row) => common.toSQLiteRow(row, columns)); +} + +/** + * Return all the inserts from the first transaction in the binlog stream. + */ +async function getReplicatedRows(expectedTransactionsCount?: number): Promise { + let transformed: SqliteRow[] = []; + const zongji = new ZongJi({ + host: TEST_CONNECTION_OPTIONS.hostname, + user: TEST_CONNECTION_OPTIONS.username, + password: TEST_CONNECTION_OPTIONS.password, + timeZone: 'Z' // Ensure no auto timezone manipulation of the dates occur + }); + + const completionPromise = new Promise((resolve, reject) => { + zongji.on('binlog', (evt: BinLogEvent) => { + try { + if (eventIsWriteMutation(evt)) { + const tableMapEntry = evt.tableMap[evt.tableId]; + const columns = toColumnDescriptors(tableMapEntry); + const records = evt.rows.map((row: Record) => common.toSQLiteRow(row, columns)); + transformed.push(...records); + } else if (eventIsXid(evt)) { + if (expectedTransactionsCount !== undefined) { + expectedTransactionsCount--; + if (expectedTransactionsCount == 0) { + zongji.stop(); + resolve(transformed); + } + } else { + zongji.stop(); + resolve(transformed); + } + } + } catch (e) { + reject(e); + } + }); + }); + + zongji.start({ + includeEvents: ['tablemap', 'writerows', 'xid'], + filename: 'mysql-bin.000001', + position: 0 + }); + + return completionPromise; +} diff --git a/modules/module-mysql/test/src/mysql-utils.test.ts b/modules/module-mysql/test/src/mysql-utils.test.ts new file mode 100644 index 000000000..039756267 --- /dev/null +++ b/modules/module-mysql/test/src/mysql-utils.test.ts @@ -0,0 +1,17 @@ +import { describe, expect, test } from 'vitest'; +import { isVersionAtLeast } from '@module/utils/mysql-utils.js'; + +describe('MySQL Utility Tests', () => { + test('Minimum version checking ', () => { + const newerVersion = '8.4.0'; + const olderVersion = '5.7'; + const sameVersion = '8.0'; + // Improperly formatted semantic versions should be handled gracefully if possible + const improperSemver = '5.7.42-0ubuntu0.18.04.1-log'; + + expect(isVersionAtLeast(newerVersion, '8.0')).toBeTruthy(); + expect(isVersionAtLeast(sameVersion, '8.0')).toBeTruthy(); + expect(isVersionAtLeast(olderVersion, '8.0')).toBeFalsy(); + expect(isVersionAtLeast(improperSemver, '5.7')).toBeTruthy(); + }); +}); diff --git a/modules/module-mysql/test/src/setup.ts b/modules/module-mysql/test/src/setup.ts new file mode 100644 index 000000000..b924cf736 --- /dev/null +++ b/modules/module-mysql/test/src/setup.ts @@ -0,0 +1,7 @@ +import { container } from '@powersync/lib-services-framework'; +import { beforeAll } from 'vitest'; + +beforeAll(() => { + // Executes for every test file + container.registerDefaults(); +}); diff --git a/modules/module-mysql/test/src/util.ts b/modules/module-mysql/test/src/util.ts new file mode 100644 index 000000000..f87f13e82 --- /dev/null +++ b/modules/module-mysql/test/src/util.ts @@ -0,0 +1,56 @@ +import * as types from '@module/types/types.js'; +import { BucketStorageFactory, Metrics, MongoBucketStorage } from '@powersync/service-core'; +import { env } from './env.js'; +import mysqlPromise from 'mysql2/promise'; +import { connectMongo } from '@core-tests/util.js'; +import { getMySQLVersion, isVersionAtLeast } from '@module/utils/mysql-utils.js'; + +export const TEST_URI = env.MYSQL_TEST_URI; + +export const TEST_CONNECTION_OPTIONS = types.normalizeConnectionConfig({ + type: 'mysql', + uri: TEST_URI +}); + +// The metrics need to be initialized before they can be used +await Metrics.initialise({ + disable_telemetry_sharing: true, + powersync_instance_id: 'test', + internal_metrics_endpoint: 'unused.for.tests.com' +}); +Metrics.getInstance().resetCounters(); + +export type StorageFactory = () => Promise; + +export const INITIALIZED_MONGO_STORAGE_FACTORY: StorageFactory = async () => { + const db = await connectMongo(); + + // None of the tests insert data into this collection, so it was never created + if (!(await db.db.listCollections({ name: db.bucket_parameters.collectionName }).hasNext())) { + await db.db.createCollection('bucket_parameters'); + } + + await db.clear(); + + return new MongoBucketStorage(db, { slot_name_prefix: 'test_' }); +}; + +export async function clearTestDb(connection: mysqlPromise.Connection) { + const version = await getMySQLVersion(connection); + if (isVersionAtLeast(version, '8.4.0')) { + await connection.query('RESET BINARY LOGS AND GTIDS'); + } else { + await connection.query('RESET MASTER'); + } + + const [result] = await connection.query( + `SELECT TABLE_NAME FROM information_schema.tables + WHERE TABLE_SCHEMA = '${TEST_CONNECTION_OPTIONS.database}'` + ); + for (let row of result) { + const name = row.TABLE_NAME; + if (name.startsWith('test_')) { + await connection.query(`DROP TABLE ${name}`); + } + } +} diff --git a/modules/module-mysql/test/tsconfig.json b/modules/module-mysql/test/tsconfig.json new file mode 100644 index 000000000..5257b2739 --- /dev/null +++ b/modules/module-mysql/test/tsconfig.json @@ -0,0 +1,28 @@ +{ + "extends": "../../../tsconfig.base.json", + "compilerOptions": { + "rootDir": "src", + "baseUrl": "./", + "noEmit": true, + "esModuleInterop": true, + "skipLibCheck": true, + "sourceMap": true, + "paths": { + "@/*": ["../../../packages/service-core/src/*"], + "@module/*": ["../src/*"], + "@core-tests/*": ["../../../packages/service-core/test/src/*"] + } + }, + "include": ["src", "../src/replication/zongji/zongji.d.ts"], + "references": [ + { + "path": "../" + }, + { + "path": "../../../packages/service-core/test" + }, + { + "path": "../../../packages/service-core/" + } + ] +} diff --git a/modules/module-mysql/tsconfig.json b/modules/module-mysql/tsconfig.json new file mode 100644 index 000000000..a9d72169d --- /dev/null +++ b/modules/module-mysql/tsconfig.json @@ -0,0 +1,26 @@ +{ + "extends": "../../tsconfig.base.json", + "compilerOptions": { + "rootDir": "src", + "outDir": "dist", + "esModuleInterop": true, + "skipLibCheck": true, + "sourceMap": true, + "typeRoots": ["./node_modules/@types", "./src/replication/zongji.d.ts"] + }, + "include": ["src"], + "references": [ + { + "path": "../../packages/types" + }, + { + "path": "../../packages/sync-rules" + }, + { + "path": "../../packages/service-core" + }, + { + "path": "../../libs/lib-services" + } + ] +} diff --git a/modules/module-mysql/vitest.config.ts b/modules/module-mysql/vitest.config.ts new file mode 100644 index 000000000..7a39c1f71 --- /dev/null +++ b/modules/module-mysql/vitest.config.ts @@ -0,0 +1,15 @@ +import { defineConfig } from 'vitest/config'; +import tsconfigPaths from 'vite-tsconfig-paths'; + +export default defineConfig({ + plugins: [tsconfigPaths()], + test: { + setupFiles: './test/src/setup.ts', + poolOptions: { + threads: { + singleThread: true + } + }, + pool: 'threads' + } +}); diff --git a/modules/module-postgres/CHANGELOG.md b/modules/module-postgres/CHANGELOG.md new file mode 100644 index 000000000..01e900aa0 --- /dev/null +++ b/modules/module-postgres/CHANGELOG.md @@ -0,0 +1 @@ +# @powersync/service-module-postgres diff --git a/modules/module-postgres/LICENSE b/modules/module-postgres/LICENSE new file mode 100644 index 000000000..c8efd46cc --- /dev/null +++ b/modules/module-postgres/LICENSE @@ -0,0 +1,67 @@ +# Functional Source License, Version 1.1, Apache 2.0 Future License + +## Abbreviation + +FSL-1.1-Apache-2.0 + +## Notice + +Copyright 2023-2024 Journey Mobile, Inc. + +## Terms and Conditions + +### Licensor ("We") + +The party offering the Software under these Terms and Conditions. + +### The Software + +The "Software" is each version of the software that we make available under these Terms and Conditions, as indicated by our inclusion of these Terms and Conditions with the Software. + +### License Grant + +Subject to your compliance with this License Grant and the Patents, Redistribution and Trademark clauses below, we hereby grant you the right to use, copy, modify, create derivative works, publicly perform, publicly display and redistribute the Software for any Permitted Purpose identified below. + +### Permitted Purpose + +A Permitted Purpose is any purpose other than a Competing Use. A Competing Use means making the Software available to others in a commercial product or service that: + +1. substitutes for the Software; +2. substitutes for any other product or service we offer using the Software that exists as of the date we make the Software available; or +3. offers the same or substantially similar functionality as the Software. + +Permitted Purposes specifically include using the Software: + +1. for your internal use and access; +2. for non-commercial education; +3. for non-commercial research; and +4. in connection with professional services that you provide to a licensee using the Software in accordance with these Terms and Conditions. + +### Patents + +To the extent your use for a Permitted Purpose would necessarily infringe our patents, the license grant above includes a license under our patents. If you make a claim against any party that the Software infringes or contributes to the infringement of any patent, then your patent license to the Software ends immediately. + +### Redistribution + +The Terms and Conditions apply to all copies, modifications and derivatives of the Software. +If you redistribute any copies, modifications or derivatives of the Software, you must include a copy of or a link to these Terms and Conditions and not remove any copyright notices provided in or with the Software. + +### Disclaimer + +THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTIES OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION WARRANTIES OF FITNESS FOR A PARTICULAR PURPOSE, MERCHANTABILITY, TITLE OR NON-INFRINGEMENT. +IN NO EVENT WILL WE HAVE ANY LIABILITY TO YOU ARISING OUT OF OR RELATED TO THE SOFTWARE, INCLUDING INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES, EVEN IF WE HAVE BEEN INFORMED OF THEIR POSSIBILITY IN ADVANCE. + +### Trademarks + +Except for displaying the License Details and identifying us as the origin of the Software, you have no right under these Terms and Conditions to use our trademarks, trade names, service marks or product names. + +## Grant of Future License + +We hereby irrevocably grant you an additional license to use the Software under the Apache License, Version 2.0 that is effective on the second anniversary of the date we make the Software available. On or after that date, you may use the Software under the Apache License, Version 2.0, in which case the following will apply: + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/modules/module-postgres/README.md b/modules/module-postgres/README.md new file mode 100644 index 000000000..78377396a --- /dev/null +++ b/modules/module-postgres/README.md @@ -0,0 +1,3 @@ +# PowerSync Service Module Postgres + +Postgres replication module for PowerSync diff --git a/modules/module-postgres/package.json b/modules/module-postgres/package.json new file mode 100644 index 000000000..b34b8dd80 --- /dev/null +++ b/modules/module-postgres/package.json @@ -0,0 +1,46 @@ +{ + "name": "@powersync/service-module-postgres", + "repository": "https://github.com/powersync-ja/powersync-service", + "types": "dist/index.d.ts", + "publishConfig": { + "access": "public" + }, + "version": "0.0.1", + "main": "dist/index.js", + "license": "FSL-1.1-Apache-2.0", + "type": "module", + "scripts": { + "build": "tsc -b", + "build:tests": "tsc -b test/tsconfig.json", + "clean": "rm -rf ./dist && tsc -b --clean", + "test": "vitest" + }, + "exports": { + ".": { + "import": "./dist/index.js", + "require": "./dist/index.js", + "default": "./dist/index.js" + }, + "./types": { + "import": "./dist/types/types.js", + "require": "./dist/types/types.js", + "default": "./dist/types/types.js" + } + }, + "dependencies": { + "@powersync/lib-services-framework": "workspace:*", + "@powersync/service-core": "workspace:*", + "@powersync/service-jpgwire": "workspace:*", + "@powersync/service-jsonbig": "workspace:*", + "@powersync/service-sync-rules": "workspace:*", + "@powersync/service-types": "workspace:*", + "pgwire": "github:kagis/pgwire#f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87", + "jose": "^4.15.1", + "ts-codec": "^1.2.2", + "uuid": "^9.0.1", + "uri-js": "^4.4.1" + }, + "devDependencies": { + "@types/uuid": "^9.0.4" + } +} diff --git a/modules/module-postgres/src/api/PostgresRouteAPIAdapter.ts b/modules/module-postgres/src/api/PostgresRouteAPIAdapter.ts new file mode 100644 index 000000000..11cd1cdbd --- /dev/null +++ b/modules/module-postgres/src/api/PostgresRouteAPIAdapter.ts @@ -0,0 +1,310 @@ +import { api, ParseSyncRulesOptions } from '@powersync/service-core'; +import * as pgwire from '@powersync/service-jpgwire'; + +import * as sync_rules from '@powersync/service-sync-rules'; +import * as service_types from '@powersync/service-types'; +import * as replication_utils from '../replication/replication-utils.js'; +import * as types from '../types/types.js'; +import * as pg_utils from '../utils/pgwire_utils.js'; +import { getDebugTableInfo } from '../replication/replication-utils.js'; +import { PUBLICATION_NAME } from '../replication/WalStream.js'; + +export class PostgresRouteAPIAdapter implements api.RouteAPI { + protected pool: pgwire.PgClient; + + connectionTag: string; + // TODO this should probably be configurable one day + publicationName = PUBLICATION_NAME; + + constructor(protected config: types.ResolvedConnectionConfig) { + this.pool = pgwire.connectPgWirePool(config, { + idleTimeout: 30_000 + }); + this.connectionTag = config.tag ?? sync_rules.DEFAULT_TAG; + } + + getParseSyncRulesOptions(): ParseSyncRulesOptions { + return { + defaultSchema: 'public' + }; + } + + async shutdown(): Promise { + await this.pool.end(); + } + + async getSourceConfig(): Promise { + return this.config; + } + + async getConnectionStatus(): Promise { + const base = { + id: this.config.id, + uri: types.baseUri(this.config) + }; + + try { + await pg_utils.retriedQuery(this.pool, `SELECT 'PowerSync connection test'`); + } catch (e) { + return { + ...base, + connected: false, + errors: [{ level: 'fatal', message: e.message }] + }; + } + + try { + await replication_utils.checkSourceConfiguration(this.pool, this.publicationName); + } catch (e) { + return { + ...base, + connected: true, + errors: [{ level: 'fatal', message: e.message }] + }; + } + + return { + ...base, + connected: true, + errors: [] + }; + } + + async executeQuery(query: string, params: any[]): Promise { + if (!this.config.debug_api) { + return service_types.internal_routes.ExecuteSqlResponse.encode({ + results: { + columns: [], + rows: [] + }, + success: false, + error: 'SQL querying is not enabled' + }); + } + + try { + const result = await this.pool.query({ + statement: query, + params: params.map(pg_utils.autoParameter) + }); + + return service_types.internal_routes.ExecuteSqlResponse.encode({ + success: true, + results: { + columns: result.columns.map((c) => c.name), + rows: result.rows.map((row) => { + return row.map((value) => { + const sqlValue = sync_rules.toSyncRulesValue(value); + if (typeof sqlValue == 'bigint') { + return Number(value); + } else if (sync_rules.isJsonValue(sqlValue)) { + return sqlValue; + } else { + return null; + } + }); + }) + } + }); + } catch (e) { + return service_types.internal_routes.ExecuteSqlResponse.encode({ + results: { + columns: [], + rows: [] + }, + success: false, + error: e.message + }); + } + } + + async getDebugTablesInfo( + tablePatterns: sync_rules.TablePattern[], + sqlSyncRules: sync_rules.SqlSyncRules + ): Promise { + let result: api.PatternResult[] = []; + + for (let tablePattern of tablePatterns) { + const schema = tablePattern.schema; + + let patternResult: api.PatternResult = { + schema: schema, + pattern: tablePattern.tablePattern, + wildcard: tablePattern.isWildcard + }; + result.push(patternResult); + + if (tablePattern.isWildcard) { + patternResult.tables = []; + const prefix = tablePattern.tablePrefix; + const results = await pg_utils.retriedQuery(this.pool, { + statement: `SELECT c.oid AS relid, c.relname AS table_name + FROM pg_class c + JOIN pg_namespace n ON n.oid = c.relnamespace + WHERE n.nspname = $1 + AND c.relkind = 'r' + AND c.relname LIKE $2`, + params: [ + { type: 'varchar', value: schema }, + { type: 'varchar', value: tablePattern.tablePattern } + ] + }); + + for (let row of pgwire.pgwireRows(results)) { + const name = row.table_name as string; + const relationId = row.relid as number; + if (!name.startsWith(prefix)) { + continue; + } + const details = await this.getDebugTableInfo(tablePattern, name, relationId, sqlSyncRules); + patternResult.tables.push(details); + } + } else { + const results = await pg_utils.retriedQuery(this.pool, { + statement: `SELECT c.oid AS relid, c.relname AS table_name + FROM pg_class c + JOIN pg_namespace n ON n.oid = c.relnamespace + WHERE n.nspname = $1 + AND c.relkind = 'r' + AND c.relname = $2`, + params: [ + { type: 'varchar', value: schema }, + { type: 'varchar', value: tablePattern.tablePattern } + ] + }); + if (results.rows.length == 0) { + // Table not found + patternResult.table = await this.getDebugTableInfo(tablePattern, tablePattern.name, null, sqlSyncRules); + } else { + const row = pgwire.pgwireRows(results)[0]; + const name = row.table_name as string; + const relationId = row.relid as number; + patternResult.table = await this.getDebugTableInfo(tablePattern, name, relationId, sqlSyncRules); + } + } + } + return result; + } + + protected async getDebugTableInfo( + tablePattern: sync_rules.TablePattern, + name: string, + relationId: number | null, + syncRules: sync_rules.SqlSyncRules + ): Promise { + return getDebugTableInfo({ + db: this.pool, + name: name, + publicationName: this.publicationName, + connectionTag: this.connectionTag, + tablePattern: tablePattern, + relationId: relationId, + syncRules: syncRules + }); + } + + async getReplicationLag(options: api.ReplicationLagOptions): Promise { + const { bucketStorage: slotName } = options; + const results = await pg_utils.retriedQuery(this.pool, { + statement: `SELECT + slot_name, + confirmed_flush_lsn, + pg_current_wal_lsn(), + (pg_current_wal_lsn() - confirmed_flush_lsn) AS lsn_distance +FROM pg_replication_slots WHERE slot_name = $1 LIMIT 1;`, + params: [{ type: 'varchar', value: slotName }] + }); + const [row] = pgwire.pgwireRows(results); + if (row) { + return Number(row.lsn_distance); + } + + throw new Error(`Could not determine replication lag for slot ${slotName}`); + } + + async getReplicationHead(): Promise { + const [{ lsn }] = pgwire.pgwireRows( + await pg_utils.retriedQuery(this.pool, `SELECT pg_logical_emit_message(false, 'powersync', 'ping') as lsn`) + ); + return String(lsn); + } + + async getConnectionSchema(): Promise { + // https://github.com/Borvik/vscode-postgres/blob/88ec5ed061a0c9bced6c5d4ec122d0759c3f3247/src/language/server.ts + const results = await pg_utils.retriedQuery( + this.pool, + `SELECT +tbl.schemaname, +tbl.tablename, +tbl.quoted_name, +json_agg(a ORDER BY attnum) as columns +FROM +( + SELECT + n.nspname as schemaname, + c.relname as tablename, + (quote_ident(n.nspname) || '.' || quote_ident(c.relname)) as quoted_name + FROM + pg_catalog.pg_class c + JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE + c.relkind = 'r' + AND n.nspname not in ('information_schema', 'pg_catalog', 'pg_toast') + AND n.nspname not like 'pg_temp_%' + AND n.nspname not like 'pg_toast_temp_%' + AND c.relnatts > 0 + AND has_schema_privilege(n.oid, 'USAGE') = true + AND has_table_privilege(quote_ident(n.nspname) || '.' || quote_ident(c.relname), 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') = true +) as tbl +LEFT JOIN ( + SELECT + attrelid, + attname, + format_type(atttypid, atttypmod) as data_type, + (SELECT typname FROM pg_catalog.pg_type WHERE oid = atttypid) as pg_type, + attnum, + attisdropped + FROM + pg_attribute +) as a ON ( + a.attrelid = tbl.quoted_name::regclass + AND a.attnum > 0 + AND NOT a.attisdropped + AND has_column_privilege(tbl.quoted_name, a.attname, 'SELECT, INSERT, UPDATE, REFERENCES') +) +GROUP BY schemaname, tablename, quoted_name` + ); + const rows = pgwire.pgwireRows(results); + + let schemas: Record = {}; + + for (let row of rows) { + const schema = (schemas[row.schemaname] ??= { + name: row.schemaname, + tables: [] + }); + const table: service_types.TableSchema = { + name: row.tablename, + columns: [] as any[] + }; + schema.tables.push(table); + + const columnInfo = JSON.parse(row.columns); + for (let column of columnInfo) { + let pg_type = column.pg_type as string; + if (pg_type.startsWith('_')) { + pg_type = `${pg_type.substring(1)}[]`; + } + table.columns.push({ + name: column.attname, + sqlite_type: sync_rules.expressionTypeFromPostgresType(pg_type).typeFlags, + type: column.data_type, + internal_type: column.data_type, + pg_type: pg_type + }); + } + } + + return Object.values(schemas); + } +} diff --git a/packages/service-core/src/auth/SupabaseKeyCollector.ts b/modules/module-postgres/src/auth/SupabaseKeyCollector.ts similarity index 68% rename from packages/service-core/src/auth/SupabaseKeyCollector.ts rename to modules/module-postgres/src/auth/SupabaseKeyCollector.ts index 559e0e7f9..c52a9abe2 100644 --- a/packages/service-core/src/auth/SupabaseKeyCollector.ts +++ b/modules/module-postgres/src/auth/SupabaseKeyCollector.ts @@ -1,10 +1,9 @@ -import * as jose from 'jose'; +import { auth } from '@powersync/service-core'; import * as pgwire from '@powersync/service-jpgwire'; -import { connectPgWirePool, pgwireRows } from '@powersync/service-jpgwire'; -import { KeyCollector } from './KeyCollector.js'; -import { KeyOptions, KeySpec } from './KeySpec.js'; -import { retriedQuery } from '../util/pgwire_utils.js'; -import { ResolvedConnection } from '../util/config/types.js'; +import * as jose from 'jose'; + +import * as types from '../types/types.js'; +import * as pgwire_utils from '../utils/pgwire_utils.js'; /** * Fetches key from the Supabase database. @@ -12,16 +11,16 @@ import { ResolvedConnection } from '../util/config/types.js'; * Unfortunately, despite the JWTs containing a kid, we have no way to lookup that kid * before receiving a valid token. */ -export class SupabaseKeyCollector implements KeyCollector { +export class SupabaseKeyCollector implements auth.KeyCollector { private pool: pgwire.PgClient; - private keyOptions: KeyOptions = { + private keyOptions: auth.KeyOptions = { requiresAudience: ['authenticated'], maxLifetimeSeconds: 86400 * 7 + 1200 // 1 week + 20 minutes margin }; - constructor(connection: ResolvedConnection) { - this.pool = connectPgWirePool(connection, { + constructor(connectionConfig: types.ResolvedConnectionConfig) { + this.pool = pgwire.connectPgWirePool(connectionConfig, { // To avoid overloading the source database with open connections, // limit to a single connection, and close the connection shortly // after using it. @@ -30,11 +29,15 @@ export class SupabaseKeyCollector implements KeyCollector { }); } + shutdown() { + return this.pool.end(); + } + async getKeys() { let row: { jwt_secret: string }; try { - const rows = pgwireRows( - await retriedQuery(this.pool, `SELECT current_setting('app.settings.jwt_secret') as jwt_secret`) + const rows = pgwire.pgwireRows( + await pgwire_utils.retriedQuery(this.pool, `SELECT current_setting('app.settings.jwt_secret') as jwt_secret`) ); row = rows[0] as any; } catch (e) { @@ -57,7 +60,7 @@ export class SupabaseKeyCollector implements KeyCollector { // While the secret is valid base64, the base64-encoded form is the secret value. k: Buffer.from(secret, 'utf8').toString('base64url') }; - const imported = await KeySpec.importKey(key, this.keyOptions); + const imported = await auth.KeySpec.importKey(key, this.keyOptions); return { keys: [imported], errors: [] diff --git a/modules/module-postgres/src/index.ts b/modules/module-postgres/src/index.ts new file mode 100644 index 000000000..3b0d87195 --- /dev/null +++ b/modules/module-postgres/src/index.ts @@ -0,0 +1 @@ +export * from './module/PostgresModule.js'; diff --git a/modules/module-postgres/src/module/PostgresModule.ts b/modules/module-postgres/src/module/PostgresModule.ts new file mode 100644 index 000000000..5b61275e2 --- /dev/null +++ b/modules/module-postgres/src/module/PostgresModule.ts @@ -0,0 +1,132 @@ +import { api, auth, ConfigurationFileSyncRulesProvider, modules, replication, system } from '@powersync/service-core'; +import * as jpgwire from '@powersync/service-jpgwire'; +import { PostgresRouteAPIAdapter } from '../api/PostgresRouteAPIAdapter.js'; +import { SupabaseKeyCollector } from '../auth/SupabaseKeyCollector.js'; +import { ConnectionManagerFactory } from '../replication/ConnectionManagerFactory.js'; +import { PgManager } from '../replication/PgManager.js'; +import { PostgresErrorRateLimiter } from '../replication/PostgresErrorRateLimiter.js'; +import { checkSourceConfiguration, cleanUpReplicationSlot } from '../replication/replication-utils.js'; +import { WalStreamReplicator } from '../replication/WalStreamReplicator.js'; +import * as types from '../types/types.js'; +import { PostgresConnectionConfig } from '../types/types.js'; +import { PUBLICATION_NAME } from '../replication/WalStream.js'; + +export class PostgresModule extends replication.ReplicationModule { + constructor() { + super({ + name: 'Postgres', + type: types.POSTGRES_CONNECTION_TYPE, + configSchema: types.PostgresConnectionConfig + }); + } + + async initialize(context: system.ServiceContextContainer): Promise { + await super.initialize(context); + + if (context.configuration.base_config.client_auth?.supabase) { + this.registerSupabaseAuth(context); + } + + // Record replicated bytes using global jpgwire metrics. + if (context.metrics) { + jpgwire.setMetricsRecorder({ + addBytesRead(bytes) { + context.metrics!.data_replicated_bytes.add(bytes); + } + }); + } + } + + protected createRouteAPIAdapter(): api.RouteAPI { + return new PostgresRouteAPIAdapter(this.resolveConfig(this.decodedConfig!)); + } + + protected createReplicator(context: system.ServiceContext): replication.AbstractReplicator { + const normalisedConfig = this.resolveConfig(this.decodedConfig!); + const syncRuleProvider = new ConfigurationFileSyncRulesProvider(context.configuration.sync_rules); + const connectionFactory = new ConnectionManagerFactory(normalisedConfig); + + return new WalStreamReplicator({ + id: this.getDefaultId(normalisedConfig.database), + syncRuleProvider: syncRuleProvider, + storageEngine: context.storageEngine, + connectionFactory: connectionFactory, + rateLimiter: new PostgresErrorRateLimiter() + }); + } + + /** + * Combines base config with normalized connection settings + */ + private resolveConfig(config: types.PostgresConnectionConfig): types.ResolvedConnectionConfig { + return { + ...config, + ...types.normalizeConnectionConfig(config) + }; + } + + async teardown(options: modules.TearDownOptions): Promise { + const normalisedConfig = this.resolveConfig(this.decodedConfig!); + const connectionManager = new PgManager(normalisedConfig, { + idleTimeout: 30_000, + maxSize: 1 + }); + + try { + if (options.syncRules) { + // TODO: In the future, once we have more replication types, we will need to check if these syncRules are for Postgres + for (let syncRules of options.syncRules) { + try { + await cleanUpReplicationSlot(syncRules.slot_name, connectionManager.pool); + } catch (e) { + // Not really much we can do here for failures, most likely the database is no longer accessible + this.logger.warn(`Failed to fully clean up Postgres replication slot: ${syncRules.slot_name}`, e); + } + } + } + } finally { + await connectionManager.end(); + } + } + + // TODO: This should rather be done by registering the key collector in some kind of auth engine + private registerSupabaseAuth(context: system.ServiceContextContainer) { + const { configuration } = context; + // Register the Supabase key collector(s) + configuration.connections + ?.map((baseConfig) => { + if (baseConfig.type != types.POSTGRES_CONNECTION_TYPE) { + return; + } + try { + return this.resolveConfig(types.PostgresConnectionConfig.decode(baseConfig as any)); + } catch (ex) { + this.logger.warn('Failed to decode configuration.', ex); + } + }) + .filter((c) => !!c) + .forEach((config) => { + const keyCollector = new SupabaseKeyCollector(config!); + context.lifeCycleEngine.withLifecycle(keyCollector, { + // Close the internal pool + stop: (collector) => collector.shutdown() + }); + configuration.client_keystore.collector.add(new auth.CachedKeyCollector(keyCollector)); + }); + } + + async testConnection(config: PostgresConnectionConfig): Promise { + this.decodeConfig(config); + const normalisedConfig = this.resolveConfig(this.decodedConfig!); + const connectionManager = new PgManager(normalisedConfig, { + idleTimeout: 30_000, + maxSize: 1 + }); + const connection = await connectionManager.snapshotConnection(); + try { + return checkSourceConfiguration(connection, PUBLICATION_NAME); + } finally { + await connectionManager.end(); + } + } +} diff --git a/modules/module-postgres/src/replication/ConnectionManagerFactory.ts b/modules/module-postgres/src/replication/ConnectionManagerFactory.ts new file mode 100644 index 000000000..0c46b9f24 --- /dev/null +++ b/modules/module-postgres/src/replication/ConnectionManagerFactory.ts @@ -0,0 +1,28 @@ +import { PgManager } from './PgManager.js'; +import { NormalizedPostgresConnectionConfig } from '../types/types.js'; +import { PgPoolOptions } from '@powersync/service-jpgwire'; +import { logger } from '@powersync/lib-services-framework'; + +export class ConnectionManagerFactory { + private readonly connectionManagers: PgManager[]; + private readonly dbConnectionConfig: NormalizedPostgresConnectionConfig; + + constructor(dbConnectionConfig: NormalizedPostgresConnectionConfig) { + this.dbConnectionConfig = dbConnectionConfig; + this.connectionManagers = []; + } + + create(poolOptions: PgPoolOptions) { + const manager = new PgManager(this.dbConnectionConfig, poolOptions); + this.connectionManagers.push(manager); + return manager; + } + + async shutdown() { + logger.info('Shutting down Postgres connection Managers...'); + for (const manager of this.connectionManagers) { + await manager.end(); + } + logger.info('Postgres connection Managers shutdown completed.'); + } +} diff --git a/packages/service-core/src/util/PgManager.ts b/modules/module-postgres/src/replication/PgManager.ts similarity index 72% rename from packages/service-core/src/util/PgManager.ts rename to modules/module-postgres/src/replication/PgManager.ts index 3c499f5ac..f89e14496 100644 --- a/packages/service-core/src/util/PgManager.ts +++ b/modules/module-postgres/src/replication/PgManager.ts @@ -1,5 +1,5 @@ import * as pgwire from '@powersync/service-jpgwire'; -import { NormalizedPostgresConnection } from '@powersync/service-types'; +import { NormalizedPostgresConnectionConfig } from '../types/types.js'; export class PgManager { /** @@ -9,11 +9,18 @@ export class PgManager { private connectionPromises: Promise[] = []; - constructor(public options: NormalizedPostgresConnection, public poolOptions: pgwire.PgPoolOptions) { + constructor( + public options: NormalizedPostgresConnectionConfig, + public poolOptions: pgwire.PgPoolOptions + ) { // The pool is lazy - no connections are opened until a query is performed. this.pool = pgwire.connectPgWirePool(this.options, poolOptions); } + public get connectionTag() { + return this.options.tag; + } + /** * Create a new replication connection. */ @@ -34,11 +41,12 @@ export class PgManager { return await p; } - async end() { + async end(): Promise { for (let result of await Promise.allSettled([ this.pool.end(), - ...this.connectionPromises.map((promise) => { - return promise.then((connection) => connection.end()); + ...this.connectionPromises.map(async (promise) => { + const connection = await promise; + return await connection.end(); }) ])) { // Throw the first error, if any @@ -51,8 +59,9 @@ export class PgManager { async destroy() { this.pool.destroy(); for (let result of await Promise.allSettled([ - ...this.connectionPromises.map((promise) => { - return promise.then((connection) => connection.destroy()); + ...this.connectionPromises.map(async (promise) => { + const connection = await promise; + return connection.destroy(); }) ])) { // Throw the first error, if any diff --git a/modules/module-postgres/src/replication/PgRelation.ts b/modules/module-postgres/src/replication/PgRelation.ts new file mode 100644 index 000000000..f6d9ac900 --- /dev/null +++ b/modules/module-postgres/src/replication/PgRelation.ts @@ -0,0 +1,31 @@ +import { storage } from '@powersync/service-core'; +import { PgoutputRelation } from '@powersync/service-jpgwire'; + +export type ReplicationIdentity = 'default' | 'nothing' | 'full' | 'index'; + +export function getReplicaIdColumns(relation: PgoutputRelation): storage.ColumnDescriptor[] { + if (relation.replicaIdentity == 'nothing') { + return []; + } else { + return relation.columns + .filter((c) => (c.flags & 0b1) != 0) + .map((c) => ({ name: c.name, typeId: c.typeOid }) satisfies storage.ColumnDescriptor); + } +} +export function getRelId(source: PgoutputRelation): number { + // Source types are wrong here + const relId = (source as any).relationOid as number; + if (!relId) { + throw new Error(`No relation id!`); + } + return relId; +} + +export function getPgOutputRelation(source: PgoutputRelation): storage.SourceEntityDescriptor { + return { + name: source.name, + schema: source.schema, + objectId: getRelId(source), + replicationColumns: getReplicaIdColumns(source) + } satisfies storage.SourceEntityDescriptor; +} diff --git a/modules/module-postgres/src/replication/PostgresErrorRateLimiter.ts b/modules/module-postgres/src/replication/PostgresErrorRateLimiter.ts new file mode 100644 index 000000000..9a86a0704 --- /dev/null +++ b/modules/module-postgres/src/replication/PostgresErrorRateLimiter.ts @@ -0,0 +1,44 @@ +import { setTimeout } from 'timers/promises'; +import { ErrorRateLimiter } from '@powersync/service-core'; + +export class PostgresErrorRateLimiter implements ErrorRateLimiter { + nextAllowed: number = Date.now(); + + async waitUntilAllowed(options?: { signal?: AbortSignal | undefined } | undefined): Promise { + const delay = Math.max(0, this.nextAllowed - Date.now()); + // Minimum delay between connections, even without errors + this.setDelay(500); + await setTimeout(delay, undefined, { signal: options?.signal }); + } + + mayPing(): boolean { + return Date.now() >= this.nextAllowed; + } + + reportError(e: any): void { + const message = (e.message as string) ?? ''; + if (message.includes('password authentication failed')) { + // Wait 15 minutes, to avoid triggering Supabase's fail2ban + this.setDelay(900_000); + } else if (message.includes('ENOTFOUND')) { + // DNS lookup issue - incorrect URI or deleted instance + this.setDelay(120_000); + } else if (message.includes('ECONNREFUSED')) { + // Could be fail2ban or similar + this.setDelay(120_000); + } else if ( + message.includes('Unable to do postgres query on ended pool') || + message.includes('Postgres unexpectedly closed connection') + ) { + // Connection timed out - ignore / immediately retry + // We don't explicitly set the delay to 0, since there could have been another error that + // we need to respect. + } else { + this.setDelay(30_000); + } + } + + private setDelay(delay: number) { + this.nextAllowed = Math.max(this.nextAllowed, Date.now() + delay); + } +} diff --git a/packages/service-core/src/replication/WalStream.ts b/modules/module-postgres/src/replication/WalStream.ts similarity index 74% rename from packages/service-core/src/replication/WalStream.ts rename to modules/module-postgres/src/replication/WalStream.ts index 46bb0ccc7..67436646d 100644 --- a/packages/service-core/src/replication/WalStream.ts +++ b/modules/module-postgres/src/replication/WalStream.ts @@ -1,20 +1,18 @@ -import * as pgwire from '@powersync/service-jpgwire'; import { container, errors, logger } from '@powersync/lib-services-framework'; -import { SqliteRow, SqlSyncRules, TablePattern, toSyncRulesRow } from '@powersync/service-sync-rules'; - -import * as storage from '../storage/storage-index.js'; -import * as util from '../util/util-index.js'; - -import { getPgOutputRelation, getRelId, PgRelation } from './PgRelation.js'; -import { getReplicationIdentityColumns } from './util.js'; -import { WalConnection } from './WalConnection.js'; -import { Metrics } from '../metrics/Metrics.js'; +import { getUuidReplicaIdentityBson, Metrics, SourceEntityDescriptor, storage } from '@powersync/service-core'; +import * as pgwire from '@powersync/service-jpgwire'; +import { DatabaseInputRow, SqliteRow, SqlSyncRules, TablePattern, toSyncRulesRow } from '@powersync/service-sync-rules'; +import * as pg_utils from '../utils/pgwire_utils.js'; +import { PgManager } from './PgManager.js'; +import { getPgOutputRelation, getRelId } from './PgRelation.js'; +import { checkSourceConfiguration, getReplicationIdentityColumns } from './replication-utils.js'; export const ZERO_LSN = '00000000/00000000'; +export const PUBLICATION_NAME = 'powersync'; +export const POSTGRES_DEFAULT_SCHEMA = 'public'; export interface WalStreamOptions { - connections: util.PgManager; - factory: storage.BucketStorageFactory; + connections: PgManager; storage: storage.SyncRulesBucketStorage; abort_signal: AbortSignal; } @@ -33,29 +31,27 @@ export class WalStream { sync_rules: SqlSyncRules; group_id: number; - wal_connection: WalConnection; connection_id = 1; private readonly storage: storage.SyncRulesBucketStorage; - private slot_name: string; + private readonly slot_name: string; - private connections: util.PgManager; + private connections: PgManager; private abort_signal: AbortSignal; - private relation_cache = new Map(); + private relation_cache = new Map(); private startedStreaming = false; constructor(options: WalStreamOptions) { this.storage = options.storage; - this.sync_rules = options.storage.sync_rules; + this.sync_rules = options.storage.getParsedSyncRules({ defaultSchema: POSTGRES_DEFAULT_SCHEMA }); this.group_id = options.storage.group_id; this.slot_name = options.storage.slot_name; this.connections = options.connections; - this.wal_connection = new WalConnection({ db: this.connections.pool, sync_rules: this.sync_rules }); this.abort_signal = options.abort_signal; this.abort_signal.addEventListener( 'abort', @@ -64,7 +60,7 @@ export class WalStream { // Ping to speed up cancellation of streaming replication // We're not using pg_snapshot here, since it could be in the middle of // an initial replication transaction. - const promise = util.retriedQuery( + const promise = pg_utils.retriedQuery( this.connections.pool, `SELECT * FROM pg_logical_emit_message(false, 'powersync', 'ping')` ); @@ -81,14 +77,6 @@ export class WalStream { ); } - get publication_name() { - return this.wal_connection.publication_name; - } - - get connectionTag() { - return this.wal_connection.connectionTag; - } - get stopped() { return this.abort_signal.aborted; } @@ -99,7 +87,7 @@ export class WalStream { tablePattern: TablePattern ): Promise { const schema = tablePattern.schema; - if (tablePattern.connectionTag != this.connectionTag) { + if (tablePattern.connectionTag != this.connections.connectionTag) { return []; } @@ -151,13 +139,13 @@ export class WalStream { const rs = await db.query({ statement: `SELECT 1 FROM pg_publication_tables WHERE pubname = $1 AND schemaname = $2 AND tablename = $3`, params: [ - { type: 'varchar', value: this.publication_name }, + { type: 'varchar', value: PUBLICATION_NAME }, { type: 'varchar', value: tablePattern.schema }, { type: 'varchar', value: name } ] }); if (rs.rows.length == 0) { - logger.info(`Skipping ${tablePattern.schema}.${name} - not part of ${this.publication_name} publication`); + logger.info(`Skipping ${tablePattern.schema}.${name} - not part of ${PUBLICATION_NAME} publication`); continue; } @@ -168,10 +156,9 @@ export class WalStream { { name, schema, - relationId: relid, - replicaIdentity: cresult.replicationIdentity, - replicationColumns: cresult.columns - }, + objectId: relid, + replicationColumns: cresult.replicationColumns + } as SourceEntityDescriptor, false ); @@ -181,7 +168,7 @@ export class WalStream { } async initSlot(): Promise { - await this.wal_connection.checkSourceConfiguration(); + await checkSourceConfiguration(this.connections.pool, PUBLICATION_NAME); const slotName = this.slot_name; @@ -217,7 +204,7 @@ export class WalStream { statement: `SELECT 1 FROM pg_catalog.pg_logical_slot_peek_binary_changes($1, NULL, 1000, 'proto_version', '1', 'publication_names', $2)`, params: [ { type: 'varchar', value: slotName }, - { type: 'varchar', value: this.publication_name } + { type: 'varchar', value: PUBLICATION_NAME } ] }); @@ -354,21 +341,23 @@ WHERE oid = $1::regclass`, async initialReplication(db: pgwire.PgConnection, lsn: string) { const sourceTables = this.sync_rules.getSourceTables(); - await this.storage.startBatch({}, async (batch) => { - for (let tablePattern of sourceTables) { - const tables = await this.getQualifiedTableNames(batch, db, tablePattern); - for (let table of tables) { - await this.snapshotTable(batch, db, table); - await batch.markSnapshotDone([table], lsn); - - await touch(); + await this.storage.startBatch( + { zeroLSN: ZERO_LSN, defaultSchema: POSTGRES_DEFAULT_SCHEMA, storeCurrentData: true }, + async (batch) => { + for (let tablePattern of sourceTables) { + const tables = await this.getQualifiedTableNames(batch, db, tablePattern); + for (let table of tables) { + await this.snapshotTable(batch, db, table); + await batch.markSnapshotDone([table], lsn); + await touch(); + } } + await batch.commit(lsn); } - await batch.commit(lsn); - }); + ); } - static *getQueryData(results: Iterable): Generator { + static *getQueryData(results: Iterable): Generator { for (let row of results) { yield toSyncRulesRow(row); } @@ -379,7 +368,7 @@ WHERE oid = $1::regclass`, const estimatedCount = await this.estimatedCount(db, table); let at = 0; let lastLogIndex = 0; - const cursor = await db.stream({ statement: `SELECT * FROM ${table.escapedIdentifier}` }); + const cursor = db.stream({ statement: `SELECT * FROM ${table.escapedIdentifier}` }); let columns: { i: number; name: string }[] = []; // pgwire streams rows in chunks. // These chunks can be quite small (as little as 16KB), so we don't flush chunks automatically. @@ -394,7 +383,7 @@ WHERE oid = $1::regclass`, } const rows = chunk.rows.map((row) => { - let q: pgwire.DatabaseInputRow = {}; + let q: DatabaseInputRow = {}; for (let c of columns) { q[c.name] = row[c.i]; } @@ -408,10 +397,18 @@ WHERE oid = $1::regclass`, throw new Error(`Aborted initial replication of ${this.slot_name}`); } - for (let record of WalStream.getQueryData(rows)) { + for (const record of WalStream.getQueryData(rows)) { // This auto-flushes when the batch reaches its size limit - await batch.save({ tag: 'insert', sourceTable: table, before: undefined, after: record }); + await batch.save({ + tag: storage.SaveOperationTag.INSERT, + sourceTable: table, + before: undefined, + beforeReplicaId: undefined, + after: record, + afterReplicaId: getUuidReplicaIdentityBson(record, table.replicaIdColumns) + }); } + at += rows.length; Metrics.getInstance().rows_replicated_total.add(rows.length); @@ -421,18 +418,18 @@ WHERE oid = $1::regclass`, await batch.flush(); } - async handleRelation(batch: storage.BucketStorageBatch, relation: PgRelation, snapshot: boolean) { - if (relation.relationId == null || typeof relation.relationId != 'number') { - throw new Error('relationId expected'); + async handleRelation(batch: storage.BucketStorageBatch, descriptor: SourceEntityDescriptor, snapshot: boolean) { + if (!descriptor.objectId && typeof descriptor.objectId != 'number') { + throw new Error('objectId expected'); } const result = await this.storage.resolveTable({ group_id: this.group_id, connection_id: this.connection_id, - connection_tag: this.connectionTag, - relation: relation, + connection_tag: this.connections.connectionTag, + entity_descriptor: descriptor, sync_rules: this.sync_rules }); - this.relation_cache.set(relation.relationId, result.table); + this.relation_cache.set(descriptor.objectId, result.table); // Drop conflicting tables. This includes for example renamed tables. await batch.drop(result.dropTables); @@ -501,20 +498,41 @@ WHERE oid = $1::regclass`, if (msg.tag == 'insert') { Metrics.getInstance().rows_replicated_total.add(1); - const baseRecord = util.constructAfterRecord(msg); - return await batch.save({ tag: 'insert', sourceTable: table, before: undefined, after: baseRecord }); + const baseRecord = pg_utils.constructAfterRecord(msg); + return await batch.save({ + tag: storage.SaveOperationTag.INSERT, + sourceTable: table, + before: undefined, + beforeReplicaId: undefined, + after: baseRecord, + afterReplicaId: getUuidReplicaIdentityBson(baseRecord, table.replicaIdColumns) + }); } else if (msg.tag == 'update') { Metrics.getInstance().rows_replicated_total.add(1); // "before" may be null if the replica id columns are unchanged // It's fine to treat that the same as an insert. - const before = util.constructBeforeRecord(msg); - const after = util.constructAfterRecord(msg); - return await batch.save({ tag: 'update', sourceTable: table, before: before, after: after }); + const before = pg_utils.constructBeforeRecord(msg); + const after = pg_utils.constructAfterRecord(msg); + return await batch.save({ + tag: storage.SaveOperationTag.UPDATE, + sourceTable: table, + before: before, + beforeReplicaId: before ? getUuidReplicaIdentityBson(before, table.replicaIdColumns) : undefined, + after: after, + afterReplicaId: getUuidReplicaIdentityBson(after, table.replicaIdColumns) + }); } else if (msg.tag == 'delete') { Metrics.getInstance().rows_replicated_total.add(1); - const before = util.constructBeforeRecord(msg)!; - - return await batch.save({ tag: 'delete', sourceTable: table, before: before, after: undefined }); + const before = pg_utils.constructBeforeRecord(msg)!; + + return await batch.save({ + tag: storage.SaveOperationTag.DELETE, + sourceTable: table, + before: before, + beforeReplicaId: getUuidReplicaIdentityBson(before, table.replicaIdColumns), + after: undefined, + afterReplicaId: undefined + }); } } else if (msg.tag == 'truncate') { let tables: storage.SourceTable[] = []; @@ -554,7 +572,7 @@ WHERE oid = $1::regclass`, slot: this.slot_name, options: { proto_version: '1', - publication_names: this.publication_name + publication_names: PUBLICATION_NAME } }); this.startedStreaming = true; @@ -562,56 +580,58 @@ WHERE oid = $1::regclass`, // Auto-activate as soon as initial replication is done await this.storage.autoActivate(); - await this.storage.startBatch({}, async (batch) => { - // Replication never starts in the middle of a transaction - let inTx = false; - let count = 0; + await this.storage.startBatch( + { zeroLSN: ZERO_LSN, defaultSchema: POSTGRES_DEFAULT_SCHEMA, storeCurrentData: true }, + async (batch) => { + // Replication never starts in the middle of a transaction + let inTx = false; + let count = 0; - for await (const chunk of replicationStream.pgoutputDecode()) { - await touch(); + for await (const chunk of replicationStream.pgoutputDecode()) { + await touch(); - if (this.abort_signal.aborted) { - break; - } + if (this.abort_signal.aborted) { + break; + } - // chunkLastLsn may come from normal messages in the chunk, - // or from a PrimaryKeepalive message. - const { messages, lastLsn: chunkLastLsn } = chunk; - - for (const msg of messages) { - if (msg.tag == 'relation') { - await this.handleRelation(batch, getPgOutputRelation(msg), true); - } else if (msg.tag == 'begin') { - inTx = true; - } else if (msg.tag == 'commit') { - Metrics.getInstance().transactions_replicated_total.add(1); - inTx = false; - await batch.commit(msg.lsn!); - await this.ack(msg.lsn!, replicationStream); - } else { - if (count % 100 == 0) { - logger.info(`${this.slot_name} replicating op ${count} ${msg.lsn}`); - } + // chunkLastLsn may come from normal messages in the chunk, + // or from a PrimaryKeepalive message. + const { messages, lastLsn: chunkLastLsn } = chunk; + for (const msg of messages) { + if (msg.tag == 'relation') { + await this.handleRelation(batch, getPgOutputRelation(msg), true); + } else if (msg.tag == 'begin') { + inTx = true; + } else if (msg.tag == 'commit') { + Metrics.getInstance().transactions_replicated_total.add(1); + inTx = false; + await batch.commit(msg.lsn!); + await this.ack(msg.lsn!, replicationStream); + } else { + if (count % 100 == 0) { + logger.info(`${this.slot_name} replicating op ${count} ${msg.lsn}`); + } - count += 1; - const result = await this.writeChange(batch, msg); + count += 1; + await this.writeChange(batch, msg); + } } - } - if (!inTx) { - // In a transaction, we ack and commit according to the transaction progress. - // Outside transactions, we use the PrimaryKeepalive messages to advance progress. - // Big caveat: This _must not_ be used to skip individual messages, since this LSN - // may be in the middle of the next transaction. - // It must only be used to associate checkpoints with LSNs. - if (await batch.keepalive(chunkLastLsn)) { - await this.ack(chunkLastLsn, replicationStream); + if (!inTx) { + // In a transaction, we ack and commit according to the transaction progress. + // Outside transactions, we use the PrimaryKeepalive messages to advance progress. + // Big caveat: This _must not_ be used to skip individual messages, since this LSN + // may be in the middle of the next transaction. + // It must only be used to associate checkpoints with LSNs. + if (await batch.keepalive(chunkLastLsn)) { + await this.ack(chunkLastLsn, replicationStream); + } } - } - Metrics.getInstance().chunks_replicated_total.add(1); + Metrics.getInstance().chunks_replicated_total.add(1); + } } - }); + ); } async ack(lsn: string, replicationStream: pgwire.ReplicationStream) { diff --git a/packages/service-core/src/replication/WalStreamRunner.ts b/modules/module-postgres/src/replication/WalStreamReplicationJob.ts similarity index 58% rename from packages/service-core/src/replication/WalStreamRunner.ts rename to modules/module-postgres/src/replication/WalStreamReplicationJob.ts index ce3ff8759..40247452a 100644 --- a/packages/service-core/src/replication/WalStreamRunner.ts +++ b/modules/module-postgres/src/replication/WalStreamReplicationJob.ts @@ -1,73 +1,78 @@ -import * as pgwire from '@powersync/service-jpgwire'; - -import * as storage from '../storage/storage-index.js'; -import * as util from '../util/util-index.js'; - -import { ErrorRateLimiter } from './ErrorRateLimiter.js'; +import { container } from '@powersync/lib-services-framework'; +import { PgManager } from './PgManager.js'; import { MissingReplicationSlotError, WalStream } from './WalStream.js'; -import { ResolvedConnection } from '../util/config/types.js'; -import { container, logger } from '@powersync/lib-services-framework'; - -export interface WalStreamRunnerOptions { - factory: storage.BucketStorageFactory; - storage: storage.SyncRulesBucketStorage; - source_db: ResolvedConnection; - lock: storage.ReplicationLock; - rateLimiter?: ErrorRateLimiter; -} - -export class WalStreamRunner { - private abortController = new AbortController(); - private runPromise?: Promise; +import { replication } from '@powersync/service-core'; +import { ConnectionManagerFactory } from './ConnectionManagerFactory.js'; - private connections: util.PgManager | null = null; +export interface WalStreamReplicationJobOptions extends replication.AbstractReplicationJobOptions { + connectionFactory: ConnectionManagerFactory; +} - private rateLimiter?: ErrorRateLimiter; +export class WalStreamReplicationJob extends replication.AbstractReplicationJob { + private connectionFactory: ConnectionManagerFactory; + private readonly connectionManager: PgManager; - constructor(public options: WalStreamRunnerOptions) { - this.rateLimiter = options.rateLimiter; + constructor(options: WalStreamReplicationJobOptions) { + super(options); + this.connectionFactory = options.connectionFactory; + this.connectionManager = this.connectionFactory.create({ + // Pool connections are only used intermittently. + idleTimeout: 30_000, + maxSize: 2 + }); } - start() { - this.runPromise = this.run(); + /** + * Postgres on RDS writes performs a WAL checkpoint every 5 minutes by default, which creates a new 64MB file. + * + * The old WAL files are only deleted once no replication slot still references it. + * + * Unfortunately, when there are no changes to the db, the database creates new WAL files without the replication slot + * advancing**. + * + * As a workaround, we write a new message every couple of minutes, to make sure that the replication slot advances. + * + * **This may be a bug in pgwire or how we're using it. + */ + async keepAlive() { + try { + await this.connectionManager.pool.query(`SELECT * FROM pg_logical_emit_message(false, 'powersync', 'ping')`); + } catch (e) { + this.logger.warn(`KeepAlive failed, unable to post to WAL`, e); + } } - get slot_name() { + get slotName() { return this.options.storage.slot_name; } - get stopped() { - return this.abortController.signal.aborted; - } - - async run() { + async replicate() { try { await this.replicateLoop(); } catch (e) { // Fatal exception container.reporter.captureException(e, { metadata: { - replication_slot: this.slot_name + replication_slot: this.slotName } }); - logger.error(`Replication failed on ${this.slot_name}`, e); + this.logger.error(`Replication failed on ${this.slotName}`, e); if (e instanceof MissingReplicationSlotError) { // This stops replication on this slot, and creates a new slot - await this.options.storage.factory.slotRemoved(this.slot_name); + await this.options.storage.factory.slotRemoved(this.slotName); } } finally { this.abortController.abort(); } - await this.options.lock.release(); } async replicateLoop() { - while (!this.stopped) { + while (!this.isStopped) { await this.replicateOnce(); - if (!this.stopped) { + if (!this.isStopped) { await new Promise((resolve) => setTimeout(resolve, 5000)); } } @@ -77,26 +82,24 @@ export class WalStreamRunner { // New connections on every iteration (every error with retry), // otherwise we risk repeating errors related to the connection, // such as caused by cached PG schemas. - let connections = new util.PgManager(this.options.source_db, { + const connectionManager = this.connectionFactory.create({ // Pool connections are only used intermittently. idleTimeout: 30_000, maxSize: 2 }); - this.connections = connections; try { await this.rateLimiter?.waitUntilAllowed({ signal: this.abortController.signal }); - if (this.stopped) { + if (this.isStopped) { return; } const stream = new WalStream({ abort_signal: this.abortController.signal, - factory: this.options.factory, storage: this.options.storage, - connections + connections: connectionManager }); await stream.replicate(); } catch (e) { - logger.error(`Replication error`, e); + this.logger.error(`Replication error`, e); if (e.cause != null) { // Example: // PgError.conn_ended: Unable to do postgres query on ended connection @@ -118,7 +121,7 @@ export class WalStreamRunner { // [Symbol(pg.ErrorResponse)]: undefined // } // Without this additional log, the cause would not be visible in the logs. - logger.error(`cause`, e.cause); + this.logger.error(`cause`, e.cause); } if (e instanceof MissingReplicationSlotError) { throw e; @@ -126,55 +129,14 @@ export class WalStreamRunner { // Report the error if relevant, before retrying container.reporter.captureException(e, { metadata: { - replication_slot: this.slot_name + replication_slot: this.slotName } }); // This sets the retry delay this.rateLimiter?.reportError(e); } } finally { - this.connections = null; - if (connections != null) { - await connections.end(); - } - } - } - - /** - * This will also release the lock if start() was called earlier. - */ - async stop(options?: { force?: boolean }) { - logger.info(`${this.slot_name} Stopping replication`); - // End gracefully - this.abortController.abort(); - - if (options?.force) { - // destroy() is more forceful. - await this.connections?.destroy(); + await connectionManager.end(); } - await this.runPromise; - } - - /** - * Terminate this replication stream. This drops the replication slot and deletes the replication data. - * - * Stops replication if needed. - */ - async terminate(options?: { force?: boolean }) { - logger.info(`${this.slot_name} Terminating replication`); - await this.stop(options); - - const slotName = this.slot_name; - const db = await pgwire.connectPgWire(this.options.source_db, { type: 'standard' }); - try { - await db.query({ - statement: 'SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots WHERE slot_name = $1', - params: [{ type: 'varchar', value: slotName }] - }); - } finally { - await db.end(); - } - - await this.options.storage.terminate(); } } diff --git a/modules/module-postgres/src/replication/WalStreamReplicator.ts b/modules/module-postgres/src/replication/WalStreamReplicator.ts new file mode 100644 index 000000000..14a21725e --- /dev/null +++ b/modules/module-postgres/src/replication/WalStreamReplicator.ts @@ -0,0 +1,45 @@ +import { replication, storage } from '@powersync/service-core'; +import { ConnectionManagerFactory } from './ConnectionManagerFactory.js'; +import { cleanUpReplicationSlot } from './replication-utils.js'; +import { WalStreamReplicationJob } from './WalStreamReplicationJob.js'; + +export interface WalStreamReplicatorOptions extends replication.AbstractReplicatorOptions { + connectionFactory: ConnectionManagerFactory; +} + +export class WalStreamReplicator extends replication.AbstractReplicator { + private readonly connectionFactory: ConnectionManagerFactory; + + constructor(options: WalStreamReplicatorOptions) { + super(options); + this.connectionFactory = options.connectionFactory; + } + + createJob(options: replication.CreateJobOptions): WalStreamReplicationJob { + return new WalStreamReplicationJob({ + id: this.createJobId(options.storage.group_id), + storage: options.storage, + connectionFactory: this.connectionFactory, + lock: options.lock, + rateLimiter: this.rateLimiter + }); + } + + async cleanUp(syncRulesStorage: storage.SyncRulesBucketStorage): Promise { + const connectionManager = this.connectionFactory.create({ + idleTimeout: 30_000, + maxSize: 1 + }); + try { + // TODO: Slot_name will likely have to come from a different source in the future + await cleanUpReplicationSlot(syncRulesStorage.slot_name, connectionManager.pool); + } finally { + await connectionManager.end(); + } + } + + async stop(): Promise { + await super.stop(); + await this.connectionFactory.shutdown(); + } +} diff --git a/modules/module-postgres/src/replication/replication-index.ts b/modules/module-postgres/src/replication/replication-index.ts new file mode 100644 index 000000000..545553c1e --- /dev/null +++ b/modules/module-postgres/src/replication/replication-index.ts @@ -0,0 +1,5 @@ +export * from './PgRelation.js'; +export * from './replication-utils.js'; +export * from './WalStream.js'; +export * from './WalStreamReplicator.js'; +export * from './WalStreamReplicationJob.js'; diff --git a/modules/module-postgres/src/replication/replication-utils.ts b/modules/module-postgres/src/replication/replication-utils.ts new file mode 100644 index 000000000..c6b1e3fe1 --- /dev/null +++ b/modules/module-postgres/src/replication/replication-utils.ts @@ -0,0 +1,329 @@ +import * as pgwire from '@powersync/service-jpgwire'; + +import { PatternResult, storage } from '@powersync/service-core'; +import * as pgwire_utils from '../utils/pgwire_utils.js'; +import { ReplicationIdentity } from './PgRelation.js'; +import * as sync_rules from '@powersync/service-sync-rules'; +import * as service_types from '@powersync/service-types'; +import * as pg_utils from '../utils/pgwire_utils.js'; +import * as util from '../utils/pgwire_utils.js'; +import { logger } from '@powersync/lib-services-framework'; + +export interface ReplicaIdentityResult { + replicationColumns: storage.ColumnDescriptor[]; + replicationIdentity: ReplicationIdentity; +} + +export async function getPrimaryKeyColumns( + db: pgwire.PgClient, + relationId: number, + mode: 'primary' | 'replident' +): Promise { + const indexFlag = mode == 'primary' ? `i.indisprimary` : `i.indisreplident`; + const attrRows = await pgwire_utils.retriedQuery(db, { + statement: `SELECT a.attname as name, a.atttypid as typeid, t.typname as type, a.attnum as attnum + FROM pg_index i + JOIN pg_attribute a ON a.attrelid = i.indrelid AND a.attnum = ANY (i.indkey) + JOIN pg_type t ON a.atttypid = t.oid + WHERE i.indrelid = $1::oid + AND ${indexFlag} + AND a.attnum > 0 + ORDER BY a.attnum`, + params: [{ value: relationId, type: 'int4' }] + }); + + return attrRows.rows.map((row) => { + return { + name: row[0] as string, + typeId: row[1] as number + } satisfies storage.ColumnDescriptor; + }); +} + +export async function getAllColumns(db: pgwire.PgClient, relationId: number): Promise { + const attrRows = await pgwire_utils.retriedQuery(db, { + statement: `SELECT a.attname as name, a.atttypid as typeid, t.typname as type, a.attnum as attnum + FROM pg_attribute a + JOIN pg_type t ON a.atttypid = t.oid + WHERE a.attrelid = $1::oid + AND attnum > 0 + ORDER BY a.attnum`, + params: [{ type: 'varchar', value: relationId }] + }); + return attrRows.rows.map((row) => { + return { + name: row[0] as string, + typeId: row[1] as number + } satisfies storage.ColumnDescriptor; + }); +} + +export async function getReplicationIdentityColumns( + db: pgwire.PgClient, + relationId: number +): Promise { + const rows = await pgwire_utils.retriedQuery(db, { + statement: `SELECT CASE relreplident + WHEN 'd' THEN 'default' + WHEN 'n' THEN 'nothing' + WHEN 'f' THEN 'full' + WHEN 'i' THEN 'index' + END AS replica_identity +FROM pg_class +WHERE oid = $1::oid LIMIT 1`, + params: [{ type: 'int8', value: relationId }] + }); + const idType: string = rows.rows[0]?.[0]; + if (idType == 'nothing' || idType == null) { + return { replicationIdentity: 'nothing', replicationColumns: [] }; + } else if (idType == 'full') { + return { replicationIdentity: 'full', replicationColumns: await getAllColumns(db, relationId) }; + } else if (idType == 'default') { + return { + replicationIdentity: 'default', + replicationColumns: await getPrimaryKeyColumns(db, relationId, 'primary') + }; + } else if (idType == 'index') { + return { + replicationIdentity: 'index', + replicationColumns: await getPrimaryKeyColumns(db, relationId, 'replident') + }; + } else { + return { replicationIdentity: 'nothing', replicationColumns: [] }; + } +} + +export async function checkSourceConfiguration(db: pgwire.PgClient, publicationName: string): Promise { + // Check basic config + await pgwire_utils.retriedQuery( + db, + `DO $$ +BEGIN +if current_setting('wal_level') is distinct from 'logical' then +raise exception 'wal_level must be set to ''logical'', your database has it set to ''%''. Please edit your config file and restart PostgreSQL.', current_setting('wal_level'); +end if; +if (current_setting('max_replication_slots')::int >= 1) is not true then +raise exception 'Your max_replication_slots setting is too low, it must be greater than 1. Please edit your config file and restart PostgreSQL.'; +end if; +if (current_setting('max_wal_senders')::int >= 1) is not true then +raise exception 'Your max_wal_senders setting is too low, it must be greater than 1. Please edit your config file and restart PostgreSQL.'; +end if; +end; +$$ LANGUAGE plpgsql;` + ); + + // Check that publication exists + const rs = await pgwire_utils.retriedQuery(db, { + statement: `SELECT * FROM pg_publication WHERE pubname = $1`, + params: [{ type: 'varchar', value: publicationName }] + }); + const row = pgwire.pgwireRows(rs)[0]; + if (row == null) { + throw new Error( + `Publication '${publicationName}' does not exist. Run: \`CREATE PUBLICATION ${publicationName} FOR ALL TABLES\`, or read the documentation for details.` + ); + } + if (row.pubinsert == false || row.pubupdate == false || row.pubdelete == false || row.pubtruncate == false) { + throw new Error( + `Publication '${publicationName}' does not publish all changes. Create a publication using \`WITH (publish = "insert, update, delete, truncate")\` (the default).` + ); + } + if (row.pubviaroot) { + throw new Error(`'${publicationName}' uses publish_via_partition_root, which is not supported.`); + } +} + +export interface GetDebugTablesInfoOptions { + db: pgwire.PgClient; + publicationName: string; + connectionTag: string; + tablePatterns: sync_rules.TablePattern[]; + syncRules: sync_rules.SqlSyncRules; +} + +export async function getDebugTablesInfo(options: GetDebugTablesInfoOptions): Promise { + const { db, publicationName, connectionTag, tablePatterns, syncRules } = options; + let result: PatternResult[] = []; + + for (let tablePattern of tablePatterns) { + const schema = tablePattern.schema; + + let patternResult: PatternResult = { + schema: schema, + pattern: tablePattern.tablePattern, + wildcard: tablePattern.isWildcard + }; + result.push(patternResult); + + if (tablePattern.isWildcard) { + patternResult.tables = []; + const prefix = tablePattern.tablePrefix; + const results = await util.retriedQuery(db, { + statement: `SELECT c.oid AS relid, c.relname AS table_name + FROM pg_class c + JOIN pg_namespace n ON n.oid = c.relnamespace + WHERE n.nspname = $1 + AND c.relkind = 'r' + AND c.relname LIKE $2`, + params: [ + { type: 'varchar', value: schema }, + { type: 'varchar', value: tablePattern.tablePattern } + ] + }); + + for (let row of pgwire.pgwireRows(results)) { + const name = row.table_name as string; + const relationId = row.relid as number; + if (!name.startsWith(prefix)) { + continue; + } + const details = await getDebugTableInfo({ + db, + name, + publicationName, + connectionTag, + tablePattern, + relationId, + syncRules: syncRules + }); + patternResult.tables.push(details); + } + } else { + const results = await util.retriedQuery(db, { + statement: `SELECT c.oid AS relid, c.relname AS table_name + FROM pg_class c + JOIN pg_namespace n ON n.oid = c.relnamespace + WHERE n.nspname = $1 + AND c.relkind = 'r' + AND c.relname = $2`, + params: [ + { type: 'varchar', value: schema }, + { type: 'varchar', value: tablePattern.tablePattern } + ] + }); + if (results.rows.length == 0) { + // Table not found + patternResult.table = await getDebugTableInfo({ + db, + name: tablePattern.name, + publicationName, + connectionTag, + tablePattern, + relationId: null, + syncRules: syncRules + }); + } else { + const row = pgwire.pgwireRows(results)[0]; + const name = row.table_name as string; + const relationId = row.relid as number; + patternResult.table = await getDebugTableInfo({ + db, + name, + publicationName, + connectionTag, + tablePattern, + relationId, + syncRules: syncRules + }); + } + } + } + return result; +} + +export interface GetDebugTableInfoOptions { + db: pgwire.PgClient; + name: string; + publicationName: string; + connectionTag: string; + tablePattern: sync_rules.TablePattern; + relationId: number | null; + syncRules: sync_rules.SqlSyncRules; +} + +export async function getDebugTableInfo(options: GetDebugTableInfoOptions): Promise { + const { db, name, publicationName, connectionTag, tablePattern, relationId, syncRules } = options; + const schema = tablePattern.schema; + let id_columns_result: ReplicaIdentityResult | undefined = undefined; + let id_columns_error = null; + + if (relationId != null) { + try { + id_columns_result = await getReplicationIdentityColumns(db, relationId); + } catch (e) { + id_columns_error = { level: 'fatal', message: e.message }; + } + } + + const id_columns = id_columns_result?.replicationColumns ?? []; + + const sourceTable = new storage.SourceTable(0, connectionTag, relationId ?? 0, schema, name, id_columns, true); + + const syncData = syncRules.tableSyncsData(sourceTable); + const syncParameters = syncRules.tableSyncsParameters(sourceTable); + + if (relationId == null) { + return { + schema: schema, + name: name, + pattern: tablePattern.isWildcard ? tablePattern.tablePattern : undefined, + replication_id: [], + data_queries: syncData, + parameter_queries: syncParameters, + // Also + errors: [{ level: 'warning', message: `Table ${sourceTable.qualifiedName} not found.` }] + }; + } + if (id_columns.length == 0 && id_columns_error == null) { + let message = `No replication id found for ${sourceTable.qualifiedName}. Replica identity: ${id_columns_result?.replicationIdentity}.`; + if (id_columns_result?.replicationIdentity == 'default') { + message += ' Configure a primary key on the table.'; + } + id_columns_error = { level: 'fatal', message }; + } + + let selectError = null; + try { + await pg_utils.retriedQuery(db, `SELECT * FROM ${sourceTable.escapedIdentifier} LIMIT 1`); + } catch (e) { + selectError = { level: 'fatal', message: e.message }; + } + + let replicateError = null; + + const publications = await pg_utils.retriedQuery(db, { + statement: `SELECT tablename FROM pg_publication_tables WHERE pubname = $1 AND schemaname = $2 AND tablename = $3`, + params: [ + { type: 'varchar', value: publicationName }, + { type: 'varchar', value: tablePattern.schema }, + { type: 'varchar', value: name } + ] + }); + if (publications.rows.length == 0) { + replicateError = { + level: 'fatal', + message: `Table ${sourceTable.qualifiedName} is not part of publication '${publicationName}'. Run: \`ALTER PUBLICATION ${publicationName} ADD TABLE ${sourceTable.qualifiedName}\`.` + }; + } + + return { + schema: schema, + name: name, + pattern: tablePattern.isWildcard ? tablePattern.tablePattern : undefined, + replication_id: id_columns.map((c) => c.name), + data_queries: syncData, + parameter_queries: syncParameters, + errors: [id_columns_error, selectError, replicateError].filter( + (error) => error != null + ) as service_types.ReplicationError[] + }; +} + +export async function cleanUpReplicationSlot(slotName: string, db: pgwire.PgClient): Promise { + logger.info(`Cleaning up Postgres replication slot: ${slotName}...`); + + await db.query({ + statement: 'SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots WHERE slot_name = $1', + params: [{ type: 'varchar', value: slotName }] + }); +} diff --git a/modules/module-postgres/src/types/types.ts b/modules/module-postgres/src/types/types.ts new file mode 100644 index 000000000..6ab1c4196 --- /dev/null +++ b/modules/module-postgres/src/types/types.ts @@ -0,0 +1,158 @@ +import * as service_types from '@powersync/service-types'; +import * as t from 'ts-codec'; +import * as urijs from 'uri-js'; + +export const POSTGRES_CONNECTION_TYPE = 'postgresql' as const; + +export interface NormalizedPostgresConnectionConfig { + id: string; + tag: string; + + hostname: string; + port: number; + database: string; + + username: string; + password: string; + + sslmode: 'verify-full' | 'verify-ca' | 'disable'; + cacert: string | undefined; + + client_certificate: string | undefined; + client_private_key: string | undefined; +} + +export const PostgresConnectionConfig = service_types.configFile.DataSourceConfig.and( + t.object({ + type: t.literal(POSTGRES_CONNECTION_TYPE), + /** Unique identifier for the connection - optional when a single connection is present. */ + id: t.string.optional(), + /** Tag used as reference in sync rules. Defaults to "default". Does not have to be unique. */ + tag: t.string.optional(), + uri: t.string.optional(), + hostname: t.string.optional(), + port: service_types.configFile.portCodec.optional(), + username: t.string.optional(), + password: t.string.optional(), + database: t.string.optional(), + + /** Defaults to verify-full */ + sslmode: t.literal('verify-full').or(t.literal('verify-ca')).or(t.literal('disable')).optional(), + /** Required for verify-ca, optional for verify-full */ + cacert: t.string.optional(), + + client_certificate: t.string.optional(), + client_private_key: t.string.optional(), + + /** Expose database credentials */ + demo_database: t.boolean.optional(), + + /** + * Prefix for the slot name. Defaults to "powersync_" + */ + slot_name_prefix: t.string.optional() + }) +); + +/** + * Config input specified when starting services + */ +export type PostgresConnectionConfig = t.Decoded; + +/** + * Resolved version of {@link PostgresConnectionConfig} + */ +export type ResolvedConnectionConfig = PostgresConnectionConfig & NormalizedPostgresConnectionConfig; + +/** + * Validate and normalize connection options. + * + * Returns destructured options. + */ +export function normalizeConnectionConfig(options: PostgresConnectionConfig): NormalizedPostgresConnectionConfig { + let uri: urijs.URIComponents; + if (options.uri) { + uri = urijs.parse(options.uri); + if (uri.scheme != 'postgresql' && uri.scheme != 'postgres') { + `Invalid URI - protocol must be postgresql, got ${uri.scheme}`; + } else if (uri.scheme != 'postgresql') { + uri.scheme = 'postgresql'; + } + } else { + uri = urijs.parse('postgresql:///'); + } + + const hostname = options.hostname ?? uri.host ?? ''; + const port = validatePort(options.port ?? uri.port ?? 5432); + + const database = options.database ?? uri.path?.substring(1) ?? ''; + + const [uri_username, uri_password] = (uri.userinfo ?? '').split(':'); + + const username = options.username ?? uri_username ?? ''; + const password = options.password ?? uri_password ?? ''; + + const sslmode = options.sslmode ?? 'verify-full'; // Configuration not supported via URI + const cacert = options.cacert; + + if (sslmode == 'verify-ca' && cacert == null) { + throw new Error('Explicit cacert is required for sslmode=verify-ca'); + } + + if (hostname == '') { + throw new Error(`hostname required`); + } + + if (username == '') { + throw new Error(`username required`); + } + + if (password == '') { + throw new Error(`password required`); + } + + if (database == '') { + throw new Error(`database required`); + } + + return { + id: options.id ?? 'default', + tag: options.tag ?? 'default', + + hostname, + port, + database, + + username, + password, + sslmode, + cacert, + + client_certificate: options.client_certificate ?? undefined, + client_private_key: options.client_private_key ?? undefined + }; +} + +/** + * Check whether the port is in a "safe" range. + * + * We do not support connecting to "privileged" ports. + */ +export function validatePort(port: string | number): number { + if (typeof port == 'string') { + port = parseInt(port); + } + if (port < 1024) { + throw new Error(`Port ${port} not supported`); + } + return port; +} + +/** + * Construct a postgres URI, without username, password or ssl options. + * + * Only contains hostname, port, database. + */ +export function baseUri(options: NormalizedPostgresConnectionConfig) { + return `postgresql://${options.hostname}:${options.port}/${options.database}`; +} diff --git a/packages/service-core/src/util/migration_lib.ts b/modules/module-postgres/src/utils/migration_lib.ts similarity index 100% rename from packages/service-core/src/util/migration_lib.ts rename to modules/module-postgres/src/utils/migration_lib.ts diff --git a/packages/service-core/src/util/pgwire_utils.ts b/modules/module-postgres/src/utils/pgwire_utils.ts similarity index 51% rename from packages/service-core/src/util/pgwire_utils.ts rename to modules/module-postgres/src/utils/pgwire_utils.ts index 9aa042980..9a349e06c 100644 --- a/packages/service-core/src/util/pgwire_utils.ts +++ b/modules/module-postgres/src/utils/pgwire_utils.ts @@ -1,11 +1,8 @@ // Adapted from https://github.com/kagis/pgwire/blob/0dc927f9f8990a903f238737326e53ba1c8d094f/mod.js#L2218 -import * as bson from 'bson'; -import * as uuid from 'uuid'; import * as pgwire from '@powersync/service-jpgwire'; -import { SqliteJsonValue, SqliteRow, ToastableSqliteRow, toSyncRulesRow } from '@powersync/service-sync-rules'; +import { SqliteJsonValue, SqliteRow, toSyncRulesRow } from '@powersync/service-sync-rules'; -import * as replication from '../replication/replication-index.js'; import { logger } from '@powersync/lib-services-framework'; /** @@ -19,19 +16,6 @@ export function constructAfterRecord(message: pgwire.PgoutputInsert | pgwire.Pgo return toSyncRulesRow(record); } -export function hasToastedValues(row: ToastableSqliteRow) { - for (let key in row) { - if (typeof row[key] == 'undefined') { - return true; - } - } - return false; -} - -export function isCompleteRow(row: ToastableSqliteRow): row is SqliteRow { - return !hasToastedValues(row); -} - /** * pgwire message -> SQLite row. * @param message @@ -45,56 +29,6 @@ export function constructBeforeRecord(message: pgwire.PgoutputDelete | pgwire.Pg return toSyncRulesRow(record); } -function getRawReplicaIdentity( - tuple: ToastableSqliteRow, - columns: replication.ReplicationColumn[] -): Record { - let result: Record = {}; - for (let column of columns) { - const name = column.name; - result[name] = tuple[name]; - } - return result; -} -const ID_NAMESPACE = 'a396dd91-09fc-4017-a28d-3df722f651e9'; - -export function getUuidReplicaIdentityString( - tuple: ToastableSqliteRow, - columns: replication.ReplicationColumn[] -): string { - const rawIdentity = getRawReplicaIdentity(tuple, columns); - - return uuidForRow(rawIdentity); -} - -export function uuidForRow(row: SqliteRow): string { - // Important: This must not change, since it will affect how ids are generated. - // Use BSON so that it's a well-defined format without encoding ambiguities. - const repr = bson.serialize(row); - return uuid.v5(repr, ID_NAMESPACE); -} - -export function getUuidReplicaIdentityBson( - tuple: ToastableSqliteRow, - columns: replication.ReplicationColumn[] -): bson.UUID { - if (columns.length == 0) { - // REPLICA IDENTITY NOTHING - generate random id - return new bson.UUID(uuid.v4()); - } - const rawIdentity = getRawReplicaIdentity(tuple, columns); - - return uuidForRowBson(rawIdentity); -} - -export function uuidForRowBson(row: SqliteRow): bson.UUID { - // Important: This must not change, since it will affect how ids are generated. - // Use BSON so that it's a well-defined format without encoding ambiguities. - const repr = bson.serialize(row); - const buffer = Buffer.alloc(16); - return new bson.UUID(uuid.v5(repr, ID_NAMESPACE, buffer)); -} - export function escapeIdentifier(identifier: string) { return `"${identifier.replace(/"/g, '""').replace(/\./g, '"."')}"`; } diff --git a/modules/module-postgres/src/utils/populate_test_data.ts b/modules/module-postgres/src/utils/populate_test_data.ts new file mode 100644 index 000000000..1d1c15de8 --- /dev/null +++ b/modules/module-postgres/src/utils/populate_test_data.ts @@ -0,0 +1,37 @@ +import { Worker } from 'node:worker_threads'; + +import * as pgwire from '@powersync/service-jpgwire'; + +// This util is actually for tests only, but we need it compiled to JS for the service to work, so it's placed in the service. + +export interface PopulateDataOptions { + connection: pgwire.NormalizedConnectionConfig; + num_transactions: number; + per_transaction: number; + size: number; +} + +export async function populateData(options: PopulateDataOptions) { + const WORKER_TIMEOUT = 30_000; + + const worker = new Worker(new URL('./populate_test_data_worker.js', import.meta.url), { + workerData: options + }); + const timeout = setTimeout(() => { + // Exits with code 1 below + worker.terminate(); + }, WORKER_TIMEOUT); + try { + return await new Promise((resolve, reject) => { + worker.on('message', resolve); + worker.on('error', reject); + worker.on('exit', (code) => { + if (code !== 0) { + reject(new Error(`Populating data failed with exit code ${code}`)); + } + }); + }); + } finally { + clearTimeout(timeout); + } +} diff --git a/packages/service-core/src/util/populate_test_data.ts b/modules/module-postgres/src/utils/populate_test_data_worker.ts similarity index 52% rename from packages/service-core/src/util/populate_test_data.ts rename to modules/module-postgres/src/utils/populate_test_data_worker.ts index f53648831..5fd161103 100644 --- a/packages/service-core/src/util/populate_test_data.ts +++ b/modules/module-postgres/src/utils/populate_test_data_worker.ts @@ -1,23 +1,20 @@ import * as crypto from 'crypto'; -import { Worker, isMainThread, parentPort, workerData } from 'node:worker_threads'; +import { isMainThread, parentPort, workerData } from 'node:worker_threads'; -import { connectPgWire } from '@powersync/service-jpgwire'; -import { NormalizedPostgresConnection } from '@powersync/service-types'; +import * as pgwire from '@powersync/service-jpgwire'; +import type { PopulateDataOptions } from './populate_test_data.js'; // This util is actually for tests only, but we need it compiled to JS for the service to work, so it's placed in the service. -export interface PopulateDataOptions { - connection: NormalizedPostgresConnection; - num_transactions: number; - per_transaction: number; - size: number; -} - if (isMainThread || parentPort == null) { - // Not a worker - ignore + // Must not be imported - only expected to run in a worker + throw new Error('Do not import this file'); } else { try { const options = workerData as PopulateDataOptions; + if (options == null) { + throw new Error('loaded worker without options'); + } const result = await populateDataInner(options); parentPort.postMessage(result); @@ -32,7 +29,7 @@ if (isMainThread || parentPort == null) { async function populateDataInner(options: PopulateDataOptions) { // Dedicated connection so we can release the memory easily - const initialDb = await connectPgWire(options.connection, { type: 'standard' }); + const initialDb = await pgwire.connectPgWire(options.connection, { type: 'standard' }); const largeDescription = crypto.randomBytes(options.size / 2).toString('hex'); let operation_count = 0; for (let i = 0; i < options.num_transactions; i++) { @@ -51,28 +48,3 @@ async function populateDataInner(options: PopulateDataOptions) { await initialDb.end(); return operation_count; } - -export async function populateData(options: PopulateDataOptions) { - const WORKER_TIMEOUT = 30_000; - - const worker = new Worker(new URL('./populate_test_data.js', import.meta.url), { - workerData: options - }); - const timeout = setTimeout(() => { - // Exits with code 1 below - worker.terminate(); - }, WORKER_TIMEOUT); - try { - return await new Promise((resolve, reject) => { - worker.on('message', resolve); - worker.on('error', reject); - worker.on('exit', (code) => { - if (code !== 0) { - reject(new Error(`Populating data failed with exit code ${code}`)); - } - }); - }); - } finally { - clearTimeout(timeout); - } -} diff --git a/packages/service-core/test/src/__snapshots__/pg_test.test.ts.snap b/modules/module-postgres/test/src/__snapshots__/pg_test.test.ts.snap similarity index 100% rename from packages/service-core/test/src/__snapshots__/pg_test.test.ts.snap rename to modules/module-postgres/test/src/__snapshots__/pg_test.test.ts.snap diff --git a/modules/module-postgres/test/src/env.ts b/modules/module-postgres/test/src/env.ts new file mode 100644 index 000000000..fa8f76ca1 --- /dev/null +++ b/modules/module-postgres/test/src/env.ts @@ -0,0 +1,7 @@ +import { utils } from '@powersync/lib-services-framework'; + +export const env = utils.collectEnvironmentVariables({ + PG_TEST_URL: utils.type.string.default('postgres://postgres:postgres@localhost:5432/powersync_test'), + CI: utils.type.boolean.default('false'), + SLOW_TESTS: utils.type.boolean.default('false') +}); diff --git a/packages/service-core/test/src/large_batch.test.ts b/modules/module-postgres/test/src/large_batch.test.ts similarity index 97% rename from packages/service-core/test/src/large_batch.test.ts rename to modules/module-postgres/test/src/large_batch.test.ts index edbc28610..2d20534b3 100644 --- a/packages/service-core/test/src/large_batch.test.ts +++ b/modules/module-postgres/test/src/large_batch.test.ts @@ -1,8 +1,9 @@ +import { MONGO_STORAGE_FACTORY, StorageFactory } from '@core-tests/util.js'; import { describe, expect, test } from 'vitest'; import { env } from './env.js'; -import { MONGO_STORAGE_FACTORY, StorageFactory, TEST_CONNECTION_OPTIONS } from './util.js'; +import { TEST_CONNECTION_OPTIONS } from './util.js'; import { walStreamTest } from './wal_stream_utils.js'; -import { populateData } from '../../dist/util/populate_test_data.js'; +import { populateData } from '../../dist/utils/populate_test_data.js'; describe('batch replication tests - mongodb', function () { // These are slow but consistent tests. diff --git a/packages/service-core/test/src/pg_test.test.ts b/modules/module-postgres/test/src/pg_test.test.ts similarity index 97% rename from packages/service-core/test/src/pg_test.test.ts rename to modules/module-postgres/test/src/pg_test.test.ts index 5ea9eb041..866adb3de 100644 --- a/packages/service-core/test/src/pg_test.test.ts +++ b/modules/module-postgres/test/src/pg_test.test.ts @@ -1,10 +1,9 @@ -import { describe, expect, test } from 'vitest'; -import { WalStream } from '../../src/replication/WalStream.js'; +import { constructAfterRecord } from '@module/utils/pgwire_utils.js'; import * as pgwire from '@powersync/service-jpgwire'; -import { clearTestDb, connectPgPool, connectPgWire, TEST_URI } from './util.js'; -import { constructAfterRecord } from '../../src/util/pgwire_utils.js'; import { SqliteRow } from '@powersync/service-sync-rules'; -import { getConnectionSchema } from '../../src/api/schema.js'; +import { describe, expect, test } from 'vitest'; +import { clearTestDb, connectPgPool, connectPgWire, TEST_URI } from './util.js'; +import { WalStream } from '@module/replication/WalStream.js'; describe('pg data types', () => { async function setupTable(db: pgwire.PgClient) { @@ -427,8 +426,9 @@ VALUES(10, ARRAY['null']::TEXT[]); await setupTable(db); - const schema = await getConnectionSchema(db); - expect(schema).toMatchSnapshot(); + // TODO need a test for adapter + // const schema = await api.getConnectionsSchema(db); + // expect(schema).toMatchSnapshot(); }); }); diff --git a/packages/service-core/test/src/schema_changes.test.ts b/modules/module-postgres/test/src/schema_changes.test.ts similarity index 98% rename from packages/service-core/test/src/schema_changes.test.ts rename to modules/module-postgres/test/src/schema_changes.test.ts index d14272a77..5318f7f10 100644 --- a/packages/service-core/test/src/schema_changes.test.ts +++ b/modules/module-postgres/test/src/schema_changes.test.ts @@ -1,14 +1,12 @@ +import { compareIds, putOp, removeOp } from '@core-tests/stream_utils.js'; import { describe, expect, test } from 'vitest'; -import { BucketStorageFactory } from '../../src/storage/BucketStorage.js'; -import { MONGO_STORAGE_FACTORY } from './util.js'; -import { compareIds, putOp, removeOp, walStreamTest } from './wal_stream_utils.js'; - -type StorageFactory = () => Promise; +import { walStreamTest } from './wal_stream_utils.js'; +import { INITIALIZED_MONGO_STORAGE_FACTORY, StorageFactory } from './util.js'; describe( 'schema changes', function () { - defineTests(MONGO_STORAGE_FACTORY); + defineTests(INITIALIZED_MONGO_STORAGE_FACTORY); }, { timeout: 20_000 } ); diff --git a/modules/module-postgres/test/src/setup.ts b/modules/module-postgres/test/src/setup.ts new file mode 100644 index 000000000..b924cf736 --- /dev/null +++ b/modules/module-postgres/test/src/setup.ts @@ -0,0 +1,7 @@ +import { container } from '@powersync/lib-services-framework'; +import { beforeAll } from 'vitest'; + +beforeAll(() => { + // Executes for every test file + container.registerDefaults(); +}); diff --git a/packages/service-core/test/src/slow_tests.test.ts b/modules/module-postgres/test/src/slow_tests.test.ts similarity index 93% rename from packages/service-core/test/src/slow_tests.test.ts rename to modules/module-postgres/test/src/slow_tests.test.ts index f3e1a3d5d..7c5bad017 100644 --- a/packages/service-core/test/src/slow_tests.test.ts +++ b/modules/module-postgres/test/src/slow_tests.test.ts @@ -1,17 +1,16 @@ import * as bson from 'bson'; -import * as mongo from 'mongodb'; import { afterEach, describe, expect, test } from 'vitest'; import { WalStream, WalStreamOptions } from '../../src/replication/WalStream.js'; -import { getClientCheckpoint } from '../../src/util/utils.js'; import { env } from './env.js'; -import { MONGO_STORAGE_FACTORY, StorageFactory, TEST_CONNECTION_OPTIONS, clearTestDb, connectPgPool } from './util.js'; +import { clearTestDb, connectPgPool, getClientCheckpoint, TEST_CONNECTION_OPTIONS } from './util.js'; import * as pgwire from '@powersync/service-jpgwire'; import { SqliteRow } from '@powersync/service-sync-rules'; -import { MongoBucketStorage } from '../../src/storage/MongoBucketStorage.js'; -import { PgManager } from '../../src/util/PgManager.js'; -import { mapOpEntry } from '@/storage/storage-index.js'; -import { reduceBucket, validateCompactedBucket, validateBucket } from './bucket_validation.js'; + +import { mapOpEntry, MongoBucketStorage } from '@/storage/storage-index.js'; +import { reduceBucket, validateCompactedBucket } from '@core-tests/bucket_validation.js'; +import { MONGO_STORAGE_FACTORY, StorageFactory } from '@core-tests/util.js'; +import { PgManager } from '@module/replication/PgManager.js'; import * as timers from 'node:timers/promises'; describe('slow tests - mongodb', function () { @@ -83,13 +82,12 @@ bucket_definitions: - SELECT * FROM "test_data" `; const syncRules = await f.updateSyncRules({ content: syncRuleContent }); - const storage = f.getInstance(syncRules.parsed()); + using storage = f.getInstance(syncRules); abortController = new AbortController(); const options: WalStreamOptions = { abort_signal: abortController.signal, connections, - storage: storage, - factory: f + storage: storage }; walStream = new WalStream(options); @@ -195,7 +193,7 @@ bucket_definitions: // Check that all inserts have been deleted again const docs = await f.db.current_data.find().toArray(); const transformed = docs.map((doc) => { - return bson.deserialize((doc.data as mongo.Binary).buffer) as SqliteRow; + return bson.deserialize(doc.data.buffer) as SqliteRow; }); expect(transformed).toEqual([]); @@ -236,7 +234,7 @@ bucket_definitions: - SELECT id, description FROM "test_data" `; const syncRules = await f.updateSyncRules({ content: syncRuleContent }); - const storage = f.getInstance(syncRules.parsed()); + using storage = f.getInstance(syncRules); // 1. Setup some base data that will be replicated in initial replication await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`); @@ -266,8 +264,7 @@ bucket_definitions: const options: WalStreamOptions = { abort_signal: abortController.signal, connections, - storage: storage, - factory: f + storage: storage }; walStream = new WalStream(options); diff --git a/modules/module-postgres/test/src/util.ts b/modules/module-postgres/test/src/util.ts new file mode 100644 index 000000000..c8142739d --- /dev/null +++ b/modules/module-postgres/test/src/util.ts @@ -0,0 +1,107 @@ +import { connectMongo } from '@core-tests/util.js'; +import * as types from '@module/types/types.js'; +import * as pg_utils from '@module/utils/pgwire_utils.js'; +import { logger } from '@powersync/lib-services-framework'; +import { BucketStorageFactory, Metrics, MongoBucketStorage, OpId } from '@powersync/service-core'; +import * as pgwire from '@powersync/service-jpgwire'; +import { pgwireRows } from '@powersync/service-jpgwire'; +import { env } from './env.js'; + +// The metrics need to be initialized before they can be used +await Metrics.initialise({ + disable_telemetry_sharing: true, + powersync_instance_id: 'test', + internal_metrics_endpoint: 'unused.for.tests.com' +}); +Metrics.getInstance().resetCounters(); + +export const TEST_URI = env.PG_TEST_URL; + +export const TEST_CONNECTION_OPTIONS = types.normalizeConnectionConfig({ + type: 'postgresql', + uri: TEST_URI, + sslmode: 'disable' +}); + +export type StorageFactory = () => Promise; + +export const INITIALIZED_MONGO_STORAGE_FACTORY: StorageFactory = async () => { + const db = await connectMongo(); + + // None of the PG tests insert data into this collection, so it was never created + if (!(await db.db.listCollections({ name: db.bucket_parameters.collectionName }).hasNext())) { + await db.db.createCollection('bucket_parameters'); + } + + await db.clear(); + + return new MongoBucketStorage(db, { + slot_name_prefix: 'test_' + }); +}; + +export async function clearTestDb(db: pgwire.PgClient) { + await db.query( + "select pg_drop_replication_slot(slot_name) from pg_replication_slots where active = false and slot_name like 'test_%'" + ); + + await db.query(`CREATE EXTENSION IF NOT EXISTS "uuid-ossp"`); + try { + await db.query(`DROP PUBLICATION powersync`); + } catch (e) { + // Ignore + } + + await db.query(`CREATE PUBLICATION powersync FOR ALL TABLES`); + + const tableRows = pgwire.pgwireRows( + await db.query(`SELECT table_name FROM information_schema.tables where table_schema = 'public'`) + ); + for (let row of tableRows) { + const name = row.table_name; + if (name.startsWith('test_')) { + await db.query(`DROP TABLE public.${pg_utils.escapeIdentifier(name)}`); + } + } +} + +export async function connectPgWire(type?: 'replication' | 'standard') { + const db = await pgwire.connectPgWire(TEST_CONNECTION_OPTIONS, { type }); + return db; +} + +export function connectPgPool() { + const db = pgwire.connectPgWirePool(TEST_CONNECTION_OPTIONS); + return db; +} + +export async function getClientCheckpoint( + db: pgwire.PgClient, + bucketStorage: BucketStorageFactory, + options?: { timeout?: number } +): Promise { + const start = Date.now(); + + const [{ lsn }] = pgwireRows(await db.query(`SELECT pg_logical_emit_message(false, 'powersync', 'ping') as lsn`)); + + // This old API needs a persisted checkpoint id. + // Since we don't use LSNs anymore, the only way to get that is to wait. + + const timeout = options?.timeout ?? 50_000; + + logger.info(`Waiting for LSN checkpoint: ${lsn}`); + while (Date.now() - start < timeout) { + const cp = await bucketStorage.getActiveCheckpoint(); + if (!cp.hasSyncRules()) { + throw new Error('No sync rules available'); + } + if (cp.lsn && cp.lsn >= lsn) { + logger.info(`Got write checkpoint: ${lsn} : ${cp.checkpoint}`); + return cp.checkpoint; + } + + await new Promise((resolve) => setTimeout(resolve, 30)); + } + + throw new Error('Timeout while waiting for checkpoint'); +} diff --git a/packages/service-core/test/src/validation.test.ts b/modules/module-postgres/test/src/validation.test.ts similarity index 75% rename from packages/service-core/test/src/validation.test.ts rename to modules/module-postgres/test/src/validation.test.ts index e9f914a12..b7b7b23f2 100644 --- a/packages/service-core/test/src/validation.test.ts +++ b/modules/module-postgres/test/src/validation.test.ts @@ -1,7 +1,7 @@ +import { MONGO_STORAGE_FACTORY } from '@core-tests/util.js'; import { expect, test } from 'vitest'; -import { MONGO_STORAGE_FACTORY } from './util.js'; import { walStreamTest } from './wal_stream_utils.js'; -import { WalConnection } from '../../src/replication/WalConnection.js'; +import { getDebugTablesInfo } from '@module/replication/replication-utils.js'; // Not quite a walStreamTest, but it helps to manage the connection test( @@ -22,13 +22,14 @@ bucket_definitions: const syncRules = await context.factory.updateSyncRules({ content: syncRuleContent }); - const walConnection = new WalConnection({ + const tablePatterns = syncRules.parsed({ defaultSchema: 'public' }).sync_rules.getSourceTables(); + const tableInfo = await getDebugTablesInfo({ db: pool, - sync_rules: syncRules.parsed().sync_rules + publicationName: context.publicationName, + connectionTag: context.connectionTag, + tablePatterns: tablePatterns, + syncRules: syncRules.parsed({ defaultSchema: 'public' }).sync_rules }); - - const tablePatterns = syncRules.parsed().sync_rules.getSourceTables(); - const tableInfo = await walConnection.getDebugTablesInfo(tablePatterns); expect(tableInfo).toEqual([ { schema: 'public', diff --git a/packages/service-core/test/src/wal_stream.test.ts b/modules/module-postgres/test/src/wal_stream.test.ts similarity index 97% rename from packages/service-core/test/src/wal_stream.test.ts rename to modules/module-postgres/test/src/wal_stream.test.ts index a6cb83fa1..a55454654 100644 --- a/packages/service-core/test/src/wal_stream.test.ts +++ b/modules/module-postgres/test/src/wal_stream.test.ts @@ -1,10 +1,10 @@ +import { putOp, removeOp } from '@core-tests/stream_utils.js'; +import { MONGO_STORAGE_FACTORY } from '@core-tests/util.js'; +import { BucketStorageFactory, Metrics } from '@powersync/service-core'; +import { pgwireRows } from '@powersync/service-jpgwire'; import * as crypto from 'crypto'; import { describe, expect, test } from 'vitest'; -import { BucketStorageFactory } from '@/storage/BucketStorage.js'; -import { MONGO_STORAGE_FACTORY } from './util.js'; -import { putOp, removeOp, walStreamTest } from './wal_stream_utils.js'; -import { pgwireRows } from '@powersync/service-jpgwire'; -import { Metrics } from '@/metrics/Metrics.js'; +import { walStreamTest } from './wal_stream_utils.js'; type StorageFactory = () => Promise; diff --git a/packages/service-core/test/src/wal_stream_utils.ts b/modules/module-postgres/test/src/wal_stream_utils.ts similarity index 54% rename from packages/service-core/test/src/wal_stream_utils.ts rename to modules/module-postgres/test/src/wal_stream_utils.ts index 7c639a6e5..23eced2e7 100644 --- a/packages/service-core/test/src/wal_stream_utils.ts +++ b/modules/module-postgres/test/src/wal_stream_utils.ts @@ -1,11 +1,9 @@ +import { fromAsync } from '@core-tests/stream_utils.js'; +import { PgManager } from '@module/replication/PgManager.js'; +import { PUBLICATION_NAME, WalStream, WalStreamOptions } from '@module/replication/WalStream.js'; +import { BucketStorageFactory, SyncRulesBucketStorage } from '@powersync/service-core'; import * as pgwire from '@powersync/service-jpgwire'; -import { WalStream, WalStreamOptions } from '../../src/replication/WalStream.js'; -import { BucketStorageFactory, SyncRulesBucketStorage } from '../../src/storage/BucketStorage.js'; -import { OplogEntry } from '../../src/util/protocol-types.js'; -import { getClientCheckpoint } from '../../src/util/utils.js'; -import { TEST_CONNECTION_OPTIONS, clearTestDb } from './util.js'; -import { PgManager } from '../../src/util/PgManager.js'; -import { JSONBig } from '@powersync/service-jsonbig'; +import { clearTestDb, getClientCheckpoint, TEST_CONNECTION_OPTIONS } from './util.js'; /** * Tests operating on the wal stream need to configure the stream and manage asynchronous @@ -19,40 +17,48 @@ export function walStreamTest( ): () => Promise { return async () => { const f = await factory(); - const connections = new PgManager(TEST_CONNECTION_OPTIONS, {}); + const connectionManager = new PgManager(TEST_CONNECTION_OPTIONS, {}); - await clearTestDb(connections.pool); - const context = new WalStreamTestContext(f, connections); - try { - await test(context); - } finally { - await context.dispose(); - } + await clearTestDb(connectionManager.pool); + await using context = new WalStreamTestContext(f, connectionManager); + await test(context); }; } -export class WalStreamTestContext { +export class WalStreamTestContext implements AsyncDisposable { private _walStream?: WalStream; private abortController = new AbortController(); private streamPromise?: Promise; public storage?: SyncRulesBucketStorage; private replicationConnection?: pgwire.PgConnection; - constructor(public factory: BucketStorageFactory, public connections: PgManager) {} + constructor( + public factory: BucketStorageFactory, + public connectionManager: PgManager + ) {} - async dispose() { + async [Symbol.asyncDispose]() { this.abortController.abort(); await this.streamPromise; - this.connections.destroy(); + await this.connectionManager.destroy(); + this.storage?.[Symbol.dispose](); } get pool() { - return this.connections.pool; + return this.connectionManager.pool; + } + + get connectionTag() { + return this.connectionManager.connectionTag; + } + + get publicationName() { + return PUBLICATION_NAME; } async updateSyncRules(content: string) { const syncRules = await this.factory.updateSyncRules({ content: content }); - this.storage = this.factory.getInstance(syncRules.parsed()); + this.storage = this.factory.getInstance(syncRules); return this.storage!; } @@ -65,8 +71,7 @@ export class WalStreamTestContext { } const options: WalStreamOptions = { storage: this.storage, - factory: this.factory, - connections: this.connections, + connections: this.connectionManager, abort_signal: this.abortController.signal }; this._walStream = new WalStream(options); @@ -74,7 +79,7 @@ export class WalStreamTestContext { } async replicateSnapshot() { - this.replicationConnection = await this.connections.replicationConnection(); + this.replicationConnection = await this.connectionManager.replicationConnection(); await this.walStream.initReplication(this.replicationConnection); await this.storage!.autoActivate(); } @@ -88,7 +93,7 @@ export class WalStreamTestContext { async getCheckpoint(options?: { timeout?: number }) { let checkpoint = await Promise.race([ - getClientCheckpoint(this.connections.pool, this.factory, { timeout: options?.timeout ?? 15_000 }), + getClientCheckpoint(this.pool, this.factory, { timeout: options?.timeout ?? 15_000 }), this.streamPromise ]); if (typeof checkpoint == undefined) { @@ -109,48 +114,8 @@ export class WalStreamTestContext { start ??= '0'; let checkpoint = await this.getCheckpoint(options); const map = new Map([[bucket, start]]); - const batch = await this.storage!.getBucketDataBatch(checkpoint, map); + const batch = this.storage!.getBucketDataBatch(checkpoint, map); const batches = await fromAsync(batch); return batches[0]?.batch.data ?? []; } } - -export function putOp(table: string, data: Record): Partial { - return { - op: 'PUT', - object_type: table, - object_id: data.id, - data: JSONBig.stringify(data) - }; -} - -export function removeOp(table: string, id: string): Partial { - return { - op: 'REMOVE', - object_type: table, - object_id: id - }; -} - -export function compareIds(a: OplogEntry, b: OplogEntry) { - return a.object_id!.localeCompare(b.object_id!); -} - -export async function fromAsync(source: Iterable | AsyncIterable): Promise { - const items: T[] = []; - for await (const item of source) { - items.push(item); - } - return items; -} - -export async function oneFromAsync(source: Iterable | AsyncIterable): Promise { - const items: T[] = []; - for await (const item of source) { - items.push(item); - } - if (items.length != 1) { - throw new Error(`One item expected, got: ${items.length}`); - } - return items[0]; -} diff --git a/modules/module-postgres/test/tsconfig.json b/modules/module-postgres/test/tsconfig.json new file mode 100644 index 000000000..18898c4ee --- /dev/null +++ b/modules/module-postgres/test/tsconfig.json @@ -0,0 +1,28 @@ +{ + "extends": "../../../tsconfig.base.json", + "compilerOptions": { + "rootDir": "src", + "baseUrl": "./", + "noEmit": true, + "esModuleInterop": true, + "skipLibCheck": true, + "sourceMap": true, + "paths": { + "@/*": ["../../../packages/service-core/src/*"], + "@module/*": ["../src/*"], + "@core-tests/*": ["../../../packages/service-core/test/src/*"] + } + }, + "include": ["src"], + "references": [ + { + "path": "../" + }, + { + "path": "../../../packages/service-core/test" + }, + { + "path": "../../../packages/service-core/" + } + ] +} diff --git a/modules/module-postgres/tsconfig.json b/modules/module-postgres/tsconfig.json new file mode 100644 index 000000000..9ceadec40 --- /dev/null +++ b/modules/module-postgres/tsconfig.json @@ -0,0 +1,31 @@ +{ + "extends": "../../tsconfig.base.json", + "compilerOptions": { + "rootDir": "src", + "outDir": "dist", + "esModuleInterop": true, + "skipLibCheck": true, + "sourceMap": true + }, + "include": ["src"], + "references": [ + { + "path": "../../packages/types" + }, + { + "path": "../../packages/jsonbig" + }, + { + "path": "../../packages/jpgwire" + }, + { + "path": "../../packages/sync-rules" + }, + { + "path": "../../packages/service-core" + }, + { + "path": "../../libs/lib-services" + } + ] +} diff --git a/modules/module-postgres/vitest.config.ts b/modules/module-postgres/vitest.config.ts new file mode 100644 index 000000000..7a39c1f71 --- /dev/null +++ b/modules/module-postgres/vitest.config.ts @@ -0,0 +1,15 @@ +import { defineConfig } from 'vitest/config'; +import tsconfigPaths from 'vite-tsconfig-paths'; + +export default defineConfig({ + plugins: [tsconfigPaths()], + test: { + setupFiles: './test/src/setup.ts', + poolOptions: { + threads: { + singleThread: true + } + }, + pool: 'threads' + } +}); diff --git a/package.json b/package.json index 54bae275f..1846299d9 100644 --- a/package.json +++ b/package.json @@ -21,20 +21,22 @@ "test": "pnpm run -r test" }, "devDependencies": { - "@changesets/cli": "^2.27.3", - "@types/node": "18.11.11", + "@changesets/cli": "^2.27.8", + "@types/node": "^22.5.5", "async": "^3.2.4", "bson": "^6.6.0", "concurrently": "^8.2.2", "inquirer": "^9.2.7", - "npm-check-updates": "^16.10.15", - "prettier": "^2.8.8", + "npm-check-updates": "^17.1.2", + "prettier": "^3.3.3", "rsocket-core": "1.0.0-alpha.3", "rsocket-websocket-client": "1.0.0-alpha.3", "semver": "^7.5.4", "tsc-watch": "^6.2.0", "ts-node-dev": "^2.0.0", - "typescript": "~5.2.2", + "typescript": "^5.6.2", + "vite-tsconfig-paths": "^4.3.2", + "vitest": "^2.1.1", "ws": "^8.2.3" } } diff --git a/packages/jpgwire/ca/README.md b/packages/jpgwire/ca/README.md index 6e406e6e2..6bd7a532b 100644 --- a/packages/jpgwire/ca/README.md +++ b/packages/jpgwire/ca/README.md @@ -1,4 +1,3 @@ - ## AWS RDS https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html#UsingWithRDS.SSL.CertificatesAllRegions @@ -11,12 +10,13 @@ https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/how-to-connec https://learn.microsoft.com/en-us/azure/postgresql/single-server/concepts-certificate-rotation Includes: - * BaltimoreCyberTrustRoot - * DigiCertGlobalRootG2 Root CA - * Microsoft RSA Root Certificate Authority 2017 - * Microsoft ECC Root Certificate Authority 2017 - * DigiCert Global Root G3 - * DigiCert Global Root CA + +- BaltimoreCyberTrustRoot +- DigiCertGlobalRootG2 Root CA +- Microsoft RSA Root Certificate Authority 2017 +- Microsoft ECC Root Certificate Authority 2017 +- DigiCert Global Root G3 +- DigiCert Global Root CA ## Supabase diff --git a/packages/jpgwire/package.json b/packages/jpgwire/package.json index c5df87c70..d0b47430e 100644 --- a/packages/jpgwire/package.json +++ b/packages/jpgwire/package.json @@ -20,7 +20,8 @@ "dependencies": { "@powersync/service-jsonbig": "workspace:^", "@powersync/service-types": "workspace:^", - "date-fns": "^3.6.0", + "@powersync/service-sync-rules": "workspace:^", + "date-fns": "^4.1.0", "pgwire": "github:kagis/pgwire#f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87" } } diff --git a/packages/jpgwire/src/pgwire_types.ts b/packages/jpgwire/src/pgwire_types.ts index aa21d7fd5..a93aeba66 100644 --- a/packages/jpgwire/src/pgwire_types.ts +++ b/packages/jpgwire/src/pgwire_types.ts @@ -3,6 +3,7 @@ import type { PgoutputRelation } from 'pgwire/mod.js'; import { dateToSqlite, lsnMakeComparable, timestampToSqlite, timestamptzToSqlite } from './util.js'; import { JsonContainer } from '@powersync/service-jsonbig'; +import { DatabaseInputRow } from '@powersync/service-sync-rules'; export class PgType { static decode(text: string, typeOid: number) { @@ -253,23 +254,3 @@ export function decodeTuple(relation: PgoutputRelation, tupleRaw: Record = { -readonly [P in keyof T]: T[P]; }; -export interface PgWireConnectionOptions extends NormalizedPostgresConnection { +export interface PgWireConnectionOptions extends NormalizedConnectionConfig { resolved_ip?: string; } diff --git a/packages/jpgwire/tsconfig.json b/packages/jpgwire/tsconfig.json index f84b49829..7da72934d 100644 --- a/packages/jpgwire/tsconfig.json +++ b/packages/jpgwire/tsconfig.json @@ -15,6 +15,9 @@ }, { "path": "../jsonbig" + }, + { + "path": "../sync-rules" } ] } diff --git a/packages/jsonbig/README.md b/packages/jsonbig/README.md index f18b78151..3490fb45a 100644 --- a/packages/jsonbig/README.md +++ b/packages/jsonbig/README.md @@ -1,6 +1,7 @@ # powersync-jsonbig JSON is used everywhere, including: + 1. PostgreSQL (json/jsonb types) 2. Sync rules input (values are normalized to JSON text). 3. Sync rule transformations (extracting values, constructing objects in the future) @@ -9,10 +10,12 @@ JSON is used everywhere, including: Where we can, JSON data is kept as strings and not parsed. This is so that: + 1. We don't add parsing / serializing overhead. 2. We don't change the data. Specifically: + 1. The SQLite type system makes a distinction between INTEGER and REAL values. We try to preserve this. 2. Integers in SQLite can be up to 64-bit. diff --git a/packages/rsocket-router/package.json b/packages/rsocket-router/package.json index 43114d2c9..039abc5d3 100644 --- a/packages/rsocket-router/package.json +++ b/packages/rsocket-router/package.json @@ -28,8 +28,6 @@ "@types/uuid": "^9.0.4", "@types/ws": "~8.2.0", "bson": "^6.6.0", - "rsocket-websocket-client": "1.0.0-alpha.3", - "typescript": "~5.2.2", - "vitest": "^0.34.6" + "rsocket-websocket-client": "1.0.0-alpha.3" } } diff --git a/packages/service-core/package.json b/packages/service-core/package.json index 2efb16ac5..df1e5976a 100644 --- a/packages/service-core/package.json +++ b/packages/service-core/package.json @@ -12,7 +12,7 @@ "scripts": { "build": "tsc -b", "build:tests": "tsc -b test/tsconfig.json", - "test": "vitest --no-threads", + "test": "vitest", "clean": "rm -rf ./dist && tsc -b --clean" }, "dependencies": { @@ -23,7 +23,6 @@ "@opentelemetry/resources": "^1.24.1", "@opentelemetry/sdk-metrics": "1.24.1", "@powersync/lib-services-framework": "workspace:*", - "@powersync/service-jpgwire": "workspace:*", "@powersync/service-jsonbig": "workspace:*", "@powersync/service-rsocket-router": "workspace:*", "@powersync/service-sync-rules": "workspace:*", @@ -40,8 +39,8 @@ "lru-cache": "^10.2.2", "mongodb": "^6.7.0", "node-fetch": "^3.3.2", - "pgwire": "github:kagis/pgwire#f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87", "ts-codec": "^1.2.2", + "uri-js": "^4.4.1", "uuid": "^9.0.1", "winston": "^3.13.0", "yaml": "^2.3.2" @@ -51,9 +50,6 @@ "@types/lodash": "^4.17.5", "@types/uuid": "^9.0.4", "fastify": "4.23.2", - "fastify-plugin": "^4.5.1", - "typescript": "^5.2.2", - "vite-tsconfig-paths": "^4.3.2", - "vitest": "^0.34.6" + "fastify-plugin": "^4.5.1" } } diff --git a/packages/service-core/src/api/RouteAPI.ts b/packages/service-core/src/api/RouteAPI.ts new file mode 100644 index 000000000..c4212aa2b --- /dev/null +++ b/packages/service-core/src/api/RouteAPI.ts @@ -0,0 +1,78 @@ +import { SqlSyncRules, TablePattern } from '@powersync/service-sync-rules'; +import * as types from '@powersync/service-types'; +import { ParseSyncRulesOptions, SyncRulesBucketStorage } from '../storage/BucketStorage.js'; + +export interface PatternResult { + schema: string; + pattern: string; + wildcard: boolean; + tables?: types.TableInfo[]; + table?: types.TableInfo; +} + +export interface ReplicationLagOptions { + bucketStorage: SyncRulesBucketStorage; +} + +/** + * Describes all the methods currently required to service the sync API endpoints. + */ +export interface RouteAPI { + /** + * @returns basic identification of the connection + */ + getSourceConfig(): Promise; + + /** + * Checks the current connection status of the data source. + * This is usually some test query to verify the source can be reached. + */ + getConnectionStatus(): Promise; + + /** + * Generates replication table information from a given pattern of tables. + * + * @param tablePatterns A set of table patterns which typically come from + * the tables listed in sync rules definitions. + * + * @param sqlSyncRules + * @returns A result of all the tables and columns which should be replicated + * based off the input patterns. Certain tests are executed on the + * tables to ensure syncing should function according to the input + * pattern. Debug errors and warnings are reported per table. + */ + getDebugTablesInfo(tablePatterns: TablePattern[], sqlSyncRules: SqlSyncRules): Promise; + + /** + * @returns The replication lag: that is the amount of data which has not been + * replicated yet, in bytes. + */ + getReplicationLag(options: ReplicationLagOptions): Promise; + + /** + * Get the current LSN or equivalent replication HEAD position identifier + */ + getReplicationHead(): Promise; + + /** + * @returns The schema for tables inside the connected database. This is typically + * used to validate sync rules. + */ + getConnectionSchema(): Promise; + + /** + * Executes a query and return the result from the data source. This is currently used in the + * admin API which is exposed in Collide. + */ + executeQuery(query: string, params: any[]): Promise; + + /** + * Close any resources that need graceful termination. + */ + shutdown(): Promise; + + /** + * Get the default schema (or database) when only a table name is specified in sync rules. + */ + getParseSyncRulesOptions(): ParseSyncRulesOptions; +} diff --git a/packages/service-core/src/api/api-index.ts b/packages/service-core/src/api/api-index.ts index f6063e867..0f90b1738 100644 --- a/packages/service-core/src/api/api-index.ts +++ b/packages/service-core/src/api/api-index.ts @@ -1,2 +1,3 @@ export * from './diagnostics.js'; +export * from './RouteAPI.js'; export * from './schema.js'; diff --git a/packages/service-core/src/api/diagnostics.ts b/packages/service-core/src/api/diagnostics.ts index 46a7cde98..72231c9ce 100644 --- a/packages/service-core/src/api/diagnostics.ts +++ b/packages/service-core/src/api/diagnostics.ts @@ -1,51 +1,9 @@ +import { logger } from '@powersync/lib-services-framework'; import { DEFAULT_TAG, SourceTableInterface, SqlSyncRules } from '@powersync/service-sync-rules'; -import { pgwireRows } from '@powersync/service-jpgwire'; -import { ConnectionStatus, SyncRulesStatus, TableInfo, baseUri } from '@powersync/service-types'; +import { SyncRulesStatus, TableInfo } from '@powersync/service-types'; -import * as replication from '../replication/replication-index.js'; import * as storage from '../storage/storage-index.js'; -import * as util from '../util/util-index.js'; - -import { CorePowerSyncSystem } from '../system/CorePowerSyncSystem.js'; -import { logger } from '@powersync/lib-services-framework'; - -export async function getConnectionStatus(system: CorePowerSyncSystem): Promise { - if (system.pgwire_pool == null) { - return null; - } - - const pool = system.requirePgPool(); - - const base = { - id: system.config.connection!.id, - postgres_uri: baseUri(system.config.connection!) - }; - try { - await util.retriedQuery(pool, `SELECT 'PowerSync connection test'`); - } catch (e) { - return { - ...base, - connected: false, - errors: [{ level: 'fatal', message: e.message }] - }; - } - - try { - await replication.checkSourceConfiguration(pool); - } catch (e) { - return { - ...base, - connected: true, - errors: [{ level: 'fatal', message: e.message }] - }; - } - - return { - ...base, - connected: true, - errors: [] - }; -} +import { RouteAPI } from './RouteAPI.js'; export interface DiagnosticsOptions { /** @@ -66,9 +24,12 @@ export interface DiagnosticsOptions { check_connection: boolean; } +export const DEFAULT_DATASOURCE_ID = 'default'; + export async function getSyncRulesStatus( + bucketStorage: storage.BucketStorageFactory, + apiHandler: RouteAPI, sync_rules: storage.PersistedSyncRulesContent | null, - system: CorePowerSyncSystem, options: DiagnosticsOptions ): Promise { if (sync_rules == null) { @@ -82,7 +43,7 @@ export async function getSyncRulesStatus( let rules: SqlSyncRules; let persisted: storage.PersistedSyncRules; try { - persisted = sync_rules.parsed(); + persisted = sync_rules.parsed(apiHandler.getParseSyncRulesOptions()); rules = persisted.sync_rules; } catch (e) { return { @@ -92,21 +53,19 @@ export async function getSyncRulesStatus( }; } - const systemStorage = live_status ? await system.storage.getInstance(persisted) : undefined; + const sourceConfig = await apiHandler.getSourceConfig(); + // This method can run under some situations if no connection is configured yet. + // It will return a default tag in such a case. This default tag is not module specific. + const tag = sourceConfig.tag ?? DEFAULT_TAG; + using systemStorage = live_status ? bucketStorage.getInstance(sync_rules) : undefined; const status = await systemStorage?.getStatus(); let replication_lag_bytes: number | undefined = undefined; let tables_flat: TableInfo[] = []; if (check_connection) { - const pool = system.requirePgPool(); - const source_table_patterns = rules.getSourceTables(); - const wc = new replication.WalConnection({ - db: pool, - sync_rules: rules - }); - const resolved_tables = await wc.getDebugTablesInfo(source_table_patterns); + const resolved_tables = await apiHandler.getDebugTablesInfo(source_table_patterns, rules); tables_flat = resolved_tables.flatMap((info) => { if (info.table) { return [info.table]; @@ -119,19 +78,9 @@ export async function getSyncRulesStatus( if (systemStorage) { try { - const results = await util.retriedQuery(pool, { - statement: `SELECT - slot_name, - confirmed_flush_lsn, - pg_current_wal_lsn(), - (pg_current_wal_lsn() - confirmed_flush_lsn) AS lsn_distance - FROM pg_replication_slots WHERE slot_name = $1 LIMIT 1;`, - params: [{ type: 'varchar', value: systemStorage!.slot_name }] + replication_lag_bytes = await apiHandler.getReplicationLag({ + bucketStorage: systemStorage }); - const [row] = pgwireRows(results); - if (row) { - replication_lag_bytes = Number(row.lsn_distance); - } } catch (e) { // Ignore logger.warn(`Unable to get replication lag`, e); @@ -139,7 +88,6 @@ export async function getSyncRulesStatus( } } else { const source_table_patterns = rules.getSourceTables(); - const tag = system.config.connection!.tag ?? DEFAULT_TAG; tables_flat = source_table_patterns.map((pattern): TableInfo => { if (pattern.isWildcard) { @@ -190,8 +138,8 @@ export async function getSyncRulesStatus( content: include_content ? sync_rules.sync_rules_content : undefined, connections: [ { - id: system.config.connection!.id, - tag: system.config.connection!.tag ?? DEFAULT_TAG, + id: sourceConfig.id ?? DEFAULT_DATASOURCE_ID, + tag: tag, slot_name: sync_rules.slot_name, initial_replication_done: status?.snapshot_done ?? false, // TODO: Rename? diff --git a/packages/service-core/src/api/schema.ts b/packages/service-core/src/api/schema.ts index e3ffbb744..5469973b2 100644 --- a/packages/service-core/src/api/schema.ts +++ b/packages/service-core/src/api/schema.ts @@ -1,99 +1,27 @@ -import type * as pgwire from '@powersync/service-jpgwire'; -import { pgwireRows } from '@powersync/service-jpgwire'; -import { DatabaseSchema, internal_routes } from '@powersync/service-types'; +import { internal_routes } from '@powersync/service-types'; -import * as util from '../util/util-index.js'; -import { CorePowerSyncSystem } from '../system/CorePowerSyncSystem.js'; +import * as api from '../api/api-index.js'; -export async function getConnectionsSchema(system: CorePowerSyncSystem): Promise { - if (system.config.connection == null) { - return { connections: [] }; +export async function getConnectionsSchema(api: api.RouteAPI): Promise { + if (!api) { + return { + connections: [], + defaultConnectionTag: 'default', + defaultSchema: '' + }; } - const schemas = await getConnectionSchema(system.requirePgPool()); + + const baseConfig = await api.getSourceConfig(); + return { connections: [ { - schemas, - tag: system.config.connection!.tag, - id: system.config.connection!.id + id: baseConfig.id, + tag: baseConfig.tag, + schemas: await api.getConnectionSchema() } - ] + ], + defaultConnectionTag: baseConfig.tag!, + defaultSchema: api.getParseSyncRulesOptions().defaultSchema }; } - -export async function getConnectionSchema(db: pgwire.PgClient): Promise { - // https://github.com/Borvik/vscode-postgres/blob/88ec5ed061a0c9bced6c5d4ec122d0759c3f3247/src/language/server.ts - const results = await util.retriedQuery( - db, - `SELECT - tbl.schemaname, - tbl.tablename, - tbl.quoted_name, - json_agg(a ORDER BY attnum) as columns -FROM - ( - SELECT - n.nspname as schemaname, - c.relname as tablename, - (quote_ident(n.nspname) || '.' || quote_ident(c.relname)) as quoted_name - FROM - pg_catalog.pg_class c - JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace - WHERE - c.relkind = 'r' - AND n.nspname not in ('information_schema', 'pg_catalog', 'pg_toast') - AND n.nspname not like 'pg_temp_%' - AND n.nspname not like 'pg_toast_temp_%' - AND c.relnatts > 0 - AND has_schema_privilege(n.oid, 'USAGE') = true - AND has_table_privilege(quote_ident(n.nspname) || '.' || quote_ident(c.relname), 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') = true - ) as tbl - LEFT JOIN ( - SELECT - attrelid, - attname, - format_type(atttypid, atttypmod) as data_type, - (SELECT typname FROM pg_catalog.pg_type WHERE oid = atttypid) as pg_type, - attnum, - attisdropped - FROM - pg_attribute - ) as a ON ( - a.attrelid = tbl.quoted_name::regclass - AND a.attnum > 0 - AND NOT a.attisdropped - AND has_column_privilege(tbl.quoted_name, a.attname, 'SELECT, INSERT, UPDATE, REFERENCES') - ) -GROUP BY schemaname, tablename, quoted_name` - ); - const rows = pgwireRows(results); - - let schemas: Record = {}; - - for (let row of rows) { - const schema = (schemas[row.schemaname] ??= { - name: row.schemaname, - tables: [] - }); - const table = { - name: row.tablename, - columns: [] as any[] - }; - schema.tables.push(table); - - const columnInfo = JSON.parse(row.columns); - for (let column of columnInfo) { - let pg_type = column.pg_type as string; - if (pg_type.startsWith('_')) { - pg_type = `${pg_type.substring(1)}[]`; - } - table.columns.push({ - name: column.attname, - type: column.data_type, - pg_type: pg_type - }); - } - } - - return Object.values(schemas); -} diff --git a/packages/service-core/src/auth/KeyStore.ts b/packages/service-core/src/auth/KeyStore.ts index 9392d1cd3..aed671a13 100644 --- a/packages/service-core/src/auth/KeyStore.ts +++ b/packages/service-core/src/auth/KeyStore.ts @@ -1,9 +1,9 @@ +import { logger } from '@powersync/lib-services-framework'; import * as jose from 'jose'; import secs from '../util/secs.js'; -import { KeyOptions, KeySpec, SUPPORTED_ALGORITHMS } from './KeySpec.js'; -import { KeyCollector } from './KeyCollector.js'; import { JwtPayload } from './JwtPayload.js'; -import { logger } from '@powersync/lib-services-framework'; +import { KeyCollector } from './KeyCollector.js'; +import { KeyOptions, KeySpec, SUPPORTED_ALGORITHMS } from './KeySpec.js'; /** * KeyStore to get keys and verify tokens. @@ -32,10 +32,13 @@ import { logger } from '@powersync/lib-services-framework'; * If we have a matching kid, we can generally get a detailed error (e.g. signature verification failed, invalid algorithm, etc). * If we don't have a matching kid, we'll generally just get an error "Could not find an appropriate key...". */ -export class KeyStore { - private collector: KeyCollector; +export class KeyStore { + /** + * @internal + */ + collector: Collector; - constructor(collector: KeyCollector) { + constructor(collector: Collector) { this.collector = collector; } diff --git a/packages/service-core/src/auth/RemoteJWKSCollector.ts b/packages/service-core/src/auth/RemoteJWKSCollector.ts index 8971c7c5b..1419d3a49 100644 --- a/packages/service-core/src/auth/RemoteJWKSCollector.ts +++ b/packages/service-core/src/auth/RemoteJWKSCollector.ts @@ -22,7 +22,10 @@ export type RemoteJWKSCollectorOptions = { export class RemoteJWKSCollector implements KeyCollector { private url: URL; - constructor(url: string, protected options?: RemoteJWKSCollectorOptions) { + constructor( + url: string, + protected options?: RemoteJWKSCollectorOptions + ) { try { this.url = new URL(url); } catch (e) { diff --git a/packages/service-core/src/auth/auth-index.ts b/packages/service-core/src/auth/auth-index.ts index efb35acfa..dae123d1a 100644 --- a/packages/service-core/src/auth/auth-index.ts +++ b/packages/service-core/src/auth/auth-index.ts @@ -7,4 +7,3 @@ export * from './KeyStore.js'; export * from './LeakyBucket.js'; export * from './RemoteJWKSCollector.js'; export * from './StaticKeyCollector.js'; -export * from './SupabaseKeyCollector.js'; diff --git a/packages/service-core/src/db/mongo.ts b/packages/service-core/src/db/mongo.ts index ca2253028..f687705bd 100644 --- a/packages/service-core/src/db/mongo.ts +++ b/packages/service-core/src/db/mongo.ts @@ -2,6 +2,7 @@ import * as mongo from 'mongodb'; import * as timers from 'timers/promises'; import { configFile } from '@powersync/service-types'; +import { normalizeMongoConfig } from '../storage/storage-index.js'; /** * Time for new connection to timeout. @@ -30,10 +31,11 @@ export const MONGO_OPERATION_TIMEOUT_MS = 30_000; export const MONGO_CLEAR_OPERATION_TIMEOUT_MS = 5_000; export function createMongoClient(config: configFile.PowerSyncConfig['storage']) { - return new mongo.MongoClient(config.uri, { + const normalized = normalizeMongoConfig(config); + return new mongo.MongoClient(normalized.uri, { auth: { - username: config.username, - password: config.password + username: normalized.username, + password: normalized.password }, // Time for connection to timeout connectTimeoutMS: MONGO_CONNECT_TIMEOUT_MS, diff --git a/packages/service-core/src/entry/cli-entry.ts b/packages/service-core/src/entry/cli-entry.ts index a53431edc..a9f693c77 100644 --- a/packages/service-core/src/entry/cli-entry.ts +++ b/packages/service-core/src/entry/cli-entry.ts @@ -1,10 +1,11 @@ import { Command } from 'commander'; +import { logger } from '@powersync/lib-services-framework'; import * as utils from '../util/util-index.js'; +import { registerCompactAction } from './commands/compact-action.js'; import { registerMigrationAction } from './commands/migrate-action.js'; +import { registerStartAction } from './commands/start-action.js'; import { registerTearDownAction } from './commands/teardown-action.js'; -import { registerCompactAction, registerStartAction } from './entry-index.js'; -import { logger } from '@powersync/lib-services-framework'; /** * Generates a Commander program which serves as the entry point diff --git a/packages/service-core/src/entry/commands/compact-action.ts b/packages/service-core/src/entry/commands/compact-action.ts index 2fff227b3..12327fbaa 100644 --- a/packages/service-core/src/entry/commands/compact-action.ts +++ b/packages/service-core/src/entry/commands/compact-action.ts @@ -2,8 +2,8 @@ import { Command } from 'commander'; import { logger } from '@powersync/lib-services-framework'; import * as v8 from 'v8'; -import { createPowerSyncMongo, MongoBucketStorage } from '../../storage/storage-index.js'; -import { loadConfig } from '../../util/config.js'; +import * as storage from '../../storage/storage-index.js'; +import * as utils from '../../util/util-index.js'; import { extractRunnerOptions, wrapConfigCommand } from './config-command.js'; const COMMAND_NAME = 'compact'; @@ -21,28 +21,40 @@ const HEAP_LIMIT = v8.getHeapStatistics().heap_size_limit; const COMPACT_MEMORY_LIMIT_MB = Math.min(HEAP_LIMIT / 1024 / 1024 - 128, 1024); export function registerCompactAction(program: Command) { - const compactCommand = program.command(COMMAND_NAME); + const compactCommand = program + .command(COMMAND_NAME) + .option(`-b, --buckets [buckets]`, 'Bucket or bucket definition name (optional, comma-separate multiple names)'); wrapConfigCommand(compactCommand); return compactCommand.description('Compact storage').action(async (options) => { + const buckets = options.buckets?.split(','); + if (buckets != null) { + logger.info('Compacting storage for all buckets...'); + } else { + logger.info(`Compacting storage for ${buckets.join(', ')}...`); + } const runnerConfig = extractRunnerOptions(options); - - const config = await loadConfig(runnerConfig); - const { storage } = config; - const psdb = createPowerSyncMongo(storage); + const configuration = await utils.loadConfig(runnerConfig); + logger.info('Successfully loaded configuration...'); + const { storage: storageConfig } = configuration; + logger.info('Connecting to storage...'); + const psdb = storage.createPowerSyncMongo(storageConfig); const client = psdb.client; await client.connect(); try { - const bucketStorage = new MongoBucketStorage(psdb, { slot_name_prefix: config.slot_name_prefix }); - const active = await bucketStorage.getActiveSyncRules(); + const bucketStorage = new storage.MongoBucketStorage(psdb, { + slot_name_prefix: configuration.slot_name_prefix + }); + const active = await bucketStorage.getActiveSyncRulesContent(); if (active == null) { logger.info('No active instance to compact'); return; } - const p = bucketStorage.getInstance(active); - await p.compact({ memoryLimitMB: COMPACT_MEMORY_LIMIT_MB }); - logger.info('done'); + using p = bucketStorage.getInstance(active); + logger.info('Performing compaction...'); + await p.compact({ memoryLimitMB: COMPACT_MEMORY_LIMIT_MB, compactBuckets: buckets }); + logger.info('Successfully compacted storage.'); } catch (e) { logger.error(`Failed to compact: ${e.toString()}`); process.exit(1); diff --git a/packages/service-core/src/entry/commands/migrate-action.ts b/packages/service-core/src/entry/commands/migrate-action.ts index 2347e63d3..29ed5bbee 100644 --- a/packages/service-core/src/entry/commands/migrate-action.ts +++ b/packages/service-core/src/entry/commands/migrate-action.ts @@ -1,9 +1,8 @@ +import { logger } from '@powersync/lib-services-framework'; import { Command } from 'commander'; +import * as migrations from '../../migrations/migrations-index.js'; import { extractRunnerOptions, wrapConfigCommand } from './config-command.js'; -import { migrate } from '../../migrations/migrations.js'; -import { Direction } from '../../migrations/definitions.js'; -import { logger } from '@powersync/lib-services-framework'; const COMMAND_NAME = 'migrate'; @@ -15,13 +14,11 @@ export function registerMigrationAction(program: Command) { return migrationCommand .description('Run migrations') .argument('', 'Migration direction. `up` or `down`') - .action(async (direction: Direction, options) => { - const runnerConfig = extractRunnerOptions(options); - + .action(async (direction: migrations.Direction, options) => { try { - await migrate({ + await migrations.migrate({ direction, - runner_config: runnerConfig + runner_config: extractRunnerOptions(options) }); process.exit(0); diff --git a/packages/service-core/src/entry/commands/teardown-action.ts b/packages/service-core/src/entry/commands/teardown-action.ts index 3f0e402aa..2515f91af 100644 --- a/packages/service-core/src/entry/commands/teardown-action.ts +++ b/packages/service-core/src/entry/commands/teardown-action.ts @@ -1,7 +1,7 @@ import { Command } from 'commander'; -import { extractRunnerOptions, wrapConfigCommand } from './config-command.js'; import { teardown } from '../../runner/teardown.js'; +import { extractRunnerOptions, wrapConfigCommand } from './config-command.js'; const COMMAND_NAME = 'teardown'; @@ -12,7 +12,7 @@ export function registerTearDownAction(program: Command) { return teardownCommand .argument('[ack]', 'Type `TEARDOWN` to confirm teardown should occur') - .description('Terminate all replicating sync rules, deleting the replication slots') + .description('Terminate all replicating sync rules, clear remote configuration and remove all data') .action(async (ack, options) => { if (ack !== 'TEARDOWN') { throw new Error('TEARDOWN was not acknowledged.'); diff --git a/packages/service-core/src/index.ts b/packages/service-core/src/index.ts index b82d5d731..a365d77d4 100644 --- a/packages/service-core/src/index.ts +++ b/packages/service-core/src/index.ts @@ -18,8 +18,11 @@ export * as framework from '@powersync/lib-services-framework'; export * from './metrics/Metrics.js'; export * as metrics from './metrics/Metrics.js'; -export * from './migrations/migrations.js'; export * as migrations from './migrations/migrations-index.js'; +export * from './migrations/migrations.js'; + +export * from './modules/modules-index.js'; +export * as modules from './modules/modules-index.js'; export * from './replication/replication-index.js'; export * as replication from './replication/replication-index.js'; @@ -33,7 +36,7 @@ export * as storage from './storage/storage-index.js'; export * from './sync/sync-index.js'; export * as sync from './sync/sync-index.js'; -export * from './system/CorePowerSyncSystem.js'; +export * from './system/system-index.js'; export * as system from './system/system-index.js'; export * from './util/util-index.js'; diff --git a/packages/service-core/src/metrics/Metrics.ts b/packages/service-core/src/metrics/Metrics.ts index 2ec759104..98888f0db 100644 --- a/packages/service-core/src/metrics/Metrics.ts +++ b/packages/service-core/src/metrics/Metrics.ts @@ -1,13 +1,11 @@ import { Attributes, Counter, ObservableGauge, UpDownCounter, ValueType } from '@opentelemetry/api'; -import { PrometheusExporter } from '@opentelemetry/exporter-prometheus'; -import { MeterProvider, MetricReader, PeriodicExportingMetricReader } from '@opentelemetry/sdk-metrics'; import { OTLPMetricExporter } from '@opentelemetry/exporter-metrics-otlp-http'; -import * as jpgwire from '@powersync/service-jpgwire'; -import * as util from '../util/util-index.js'; -import * as storage from '../storage/storage-index.js'; -import { CorePowerSyncSystem } from '../system/CorePowerSyncSystem.js'; +import { PrometheusExporter } from '@opentelemetry/exporter-prometheus'; import { Resource } from '@opentelemetry/resources'; +import { MeterProvider, MetricReader, PeriodicExportingMetricReader } from '@opentelemetry/sdk-metrics'; import { logger } from '@powersync/lib-services-framework'; +import * as storage from '../storage/storage-index.js'; +import * as util from '../util/util-index.js'; export interface MetricsOptions { disable_telemetry_sharing: boolean; @@ -202,7 +200,7 @@ Anonymous telemetry is currently: ${options.disable_telemetry_sharing ? 'disable this.concurrent_connections.add(0); } - public configureReplicationMetrics(system: CorePowerSyncSystem) { + public configureReplicationMetrics(bucketStorage: storage.BucketStorageFactory) { // Rate limit collection of these stats, since it may be an expensive query const MINIMUM_INTERVAL = 60_000; @@ -211,7 +209,7 @@ Anonymous telemetry is currently: ${options.disable_telemetry_sharing ? 'disable function getMetrics() { if (cachedRequest == null || Date.now() - cacheTimestamp > MINIMUM_INTERVAL) { - cachedRequest = system.storage.getStorageMetrics().catch((e) => { + cachedRequest = bucketStorage.getStorageMetrics().catch((e) => { logger.error(`Failed to get storage metrics`, e); return null; }); @@ -240,14 +238,6 @@ Anonymous telemetry is currently: ${options.disable_telemetry_sharing ? 'disable result.observe(metrics.replication_size_bytes); } }); - - const class_scoped_data_replicated_bytes = this.data_replicated_bytes; - // Record replicated bytes using global jpgwire metrics. - jpgwire.setMetricsRecorder({ - addBytesRead(bytes) { - class_scoped_data_replicated_bytes.add(bytes); - } - }); } public async getMetricValueForTests(name: string): Promise { diff --git a/packages/service-core/src/migrations/db/migrations/1684951997326-init.ts b/packages/service-core/src/migrations/db/migrations/1684951997326-init.ts index e41adf260..9042f0b55 100644 --- a/packages/service-core/src/migrations/db/migrations/1684951997326-init.ts +++ b/packages/service-core/src/migrations/db/migrations/1684951997326-init.ts @@ -2,8 +2,11 @@ import * as mongo from '../../../db/mongo.js'; import * as storage from '../../../storage/storage-index.js'; import * as utils from '../../../util/util-index.js'; -export const up = async (context?: utils.MigrationContext) => { - const config = await utils.loadConfig(context?.runner_config); +export const up = async (context: utils.MigrationContext) => { + const { runner_config } = context; + + const config = await utils.loadConfig(runner_config); + const database = storage.createPowerSyncMongo(config.storage); await mongo.waitForAuth(database.db); try { @@ -20,8 +23,10 @@ export const up = async (context?: utils.MigrationContext) => { } }; -export const down = async (context?: utils.MigrationContext) => { - const config = await utils.loadConfig(context?.runner_config); +export const down = async (context: utils.MigrationContext) => { + const { runner_config } = context; + const config = await utils.loadConfig(runner_config); + const database = storage.createPowerSyncMongo(config.storage); try { if (await database.bucket_parameters.indexExists('lookup')) { diff --git a/packages/service-core/src/migrations/db/migrations/1702295701188-sync-rule-state.ts b/packages/service-core/src/migrations/db/migrations/1702295701188-sync-rule-state.ts index 4a802e7de..d0782ce5a 100644 --- a/packages/service-core/src/migrations/db/migrations/1702295701188-sync-rule-state.ts +++ b/packages/service-core/src/migrations/db/migrations/1702295701188-sync-rule-state.ts @@ -23,9 +23,11 @@ interface LegacySyncRulesDocument extends storage.SyncRuleDocument { auto_activate?: boolean; } -export const up = async (context?: utils.MigrationContext) => { - const config = await utils.loadConfig(context?.runner_config); +export const up = async (context: utils.MigrationContext) => { + const { runner_config } = context; + const config = await utils.loadConfig(runner_config); const db = storage.createPowerSyncMongo(config.storage); + await mongo.waitForAuth(db.db); try { // We keep the old flags for existing deployments still shutting down. @@ -68,8 +70,9 @@ export const up = async (context?: utils.MigrationContext) => { } }; -export const down = async (context?: utils.MigrationContext) => { - const config = await utils.loadConfig(context?.runner_config); +export const down = async (context: utils.MigrationContext) => { + const { runner_config } = context; + const config = await utils.loadConfig(runner_config); const db = storage.createPowerSyncMongo(config.storage); try { diff --git a/packages/service-core/src/migrations/db/migrations/1711543888062-write-checkpoint-index.ts b/packages/service-core/src/migrations/db/migrations/1711543888062-write-checkpoint-index.ts index a73f0c7d7..be568408a 100644 --- a/packages/service-core/src/migrations/db/migrations/1711543888062-write-checkpoint-index.ts +++ b/packages/service-core/src/migrations/db/migrations/1711543888062-write-checkpoint-index.ts @@ -1,8 +1,9 @@ import * as storage from '../../../storage/storage-index.js'; import * as utils from '../../../util/util-index.js'; -export const up = async (context?: utils.MigrationContext) => { - const config = await utils.loadConfig(context?.runner_config); +export const up = async (context: utils.MigrationContext) => { + const { runner_config } = context; + const config = await utils.loadConfig(runner_config); const db = storage.createPowerSyncMongo(config.storage); try { @@ -17,8 +18,9 @@ export const up = async (context?: utils.MigrationContext) => { } }; -export const down = async (context?: utils.MigrationContext) => { - const config = await utils.loadConfig(context?.runner_config); +export const down = async (context: utils.MigrationContext) => { + const { runner_config } = context; + const config = await utils.loadConfig(runner_config); const db = storage.createPowerSyncMongo(config.storage); diff --git a/packages/service-core/src/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.ts b/packages/service-core/src/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.ts new file mode 100644 index 000000000..2bac37fcc --- /dev/null +++ b/packages/service-core/src/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.ts @@ -0,0 +1,37 @@ +import * as storage from '../../../storage/storage-index.js'; +import * as utils from '../../../util/util-index.js'; + +const INDEX_NAME = 'user_sync_rule_unique'; + +export const up = async (context: utils.MigrationContext) => { + const { runner_config } = context; + const config = await utils.loadConfig(runner_config); + const db = storage.createPowerSyncMongo(config.storage); + + try { + await db.custom_write_checkpoints.createIndex( + { + user_id: 1, + sync_rules_id: 1 + }, + { name: INDEX_NAME, unique: true } + ); + } finally { + await db.client.close(); + } +}; + +export const down = async (context: utils.MigrationContext) => { + const { runner_config } = context; + const config = await utils.loadConfig(runner_config); + + const db = storage.createPowerSyncMongo(config.storage); + + try { + if (await db.custom_write_checkpoints.indexExists(INDEX_NAME)) { + await db.custom_write_checkpoints.dropIndex(INDEX_NAME); + } + } finally { + await db.client.close(); + } +}; diff --git a/packages/service-core/src/migrations/migrations.ts b/packages/service-core/src/migrations/migrations.ts index 3a1343844..d2abdc6ea 100644 --- a/packages/service-core/src/migrations/migrations.ts +++ b/packages/service-core/src/migrations/migrations.ts @@ -2,13 +2,13 @@ import * as fs from 'fs/promises'; import * as path from 'path'; import { fileURLToPath } from 'url'; +import { logger } from '@powersync/lib-services-framework'; import * as db from '../db/db-index.js'; -import * as util from '../util/util-index.js'; import * as locks from '../locks/locks-index.js'; +import * as util from '../util/util-index.js'; import { Direction } from './definitions.js'; -import { createMongoMigrationStore } from './store/migration-store.js'; import { execute, writeLogsToStore } from './executor.js'; -import { logger } from '@powersync/lib-services-framework'; +import { createMongoMigrationStore } from './store/migration-store.js'; const DEFAULT_MONGO_LOCK_COLLECTION = 'locks'; const MONGO_LOCK_PROCESS = 'migrations'; @@ -23,18 +23,23 @@ export type MigrationOptions = { runner_config: util.RunnerConfig; }; +export type AutomaticMigrationParams = { + config: util.ResolvedPowerSyncConfig; + runner_config: util.RunnerConfig; +}; + /** * Loads migrations and injects a custom context for loading the specified * runner configuration. */ -const loadMigrations = async (dir: string, runner_config: util.RunnerConfig) => { +const loadMigrations = async (dir: string, runnerConfig: util.RunnerConfig) => { const files = await fs.readdir(dir); const migrations = files.filter((file) => { return path.extname(file) === '.js'; }); const context: util.MigrationContext = { - runner_config + runner_config: runnerConfig }; return await Promise.all( @@ -55,14 +60,13 @@ const loadMigrations = async (dir: string, runner_config: util.RunnerConfig) => export const migrate = async (options: MigrationOptions) => { const { direction, runner_config } = options; + const config = await util.loadConfig(runner_config); + const { storage } = config; /** * Try and get Mongo from config file. * But this might not be available in Journey Micro as we use the standard Mongo. */ - const config = await util.loadConfig(runner_config); - const { storage } = config; - const client = db.mongo.createMongoClient(storage); logger.info('Connecting to MongoDB'); await client.connect(); @@ -124,3 +128,15 @@ export const migrate = async (options: MigrationOptions) => { logger.info('Done with migrations'); } }; + +/** + * Ensures automatic migrations are executed + */ +export const ensureAutomaticMigrations = async (params: AutomaticMigrationParams) => { + if (!params.config.migrations?.disable_auto_migration) { + await migrate({ + direction: Direction.Up, + runner_config: params.runner_config + }); + } +}; diff --git a/packages/service-core/src/modules/AbstractModule.ts b/packages/service-core/src/modules/AbstractModule.ts new file mode 100644 index 000000000..0cdb8c626 --- /dev/null +++ b/packages/service-core/src/modules/AbstractModule.ts @@ -0,0 +1,37 @@ +import { ServiceContextContainer } from '../system/ServiceContext.js'; +import { logger } from '@powersync/lib-services-framework'; +import winston from 'winston'; +import { PersistedSyncRulesContent } from '../storage/BucketStorage.js'; + +export interface TearDownOptions { + /** + * If required, tear down any configuration/state for the specific sync rules + */ + syncRules?: PersistedSyncRulesContent[]; +} + +export interface AbstractModuleOptions { + name: string; +} + +export abstract class AbstractModule { + protected logger: winston.Logger; + + protected constructor(protected options: AbstractModuleOptions) { + this.logger = logger.child({ name: `Module:${options.name}` }); + } + + /** + * Initialize the module using any required services from the ServiceContext + */ + public abstract initialize(context: ServiceContextContainer): Promise; + + /** + * Permanently clean up and dispose of any configuration or state for this module. + */ + public abstract teardown(options: TearDownOptions): Promise; + + public get name() { + return this.options.name; + } +} diff --git a/packages/service-core/src/modules/ModuleManager.ts b/packages/service-core/src/modules/ModuleManager.ts new file mode 100644 index 000000000..427acd810 --- /dev/null +++ b/packages/service-core/src/modules/ModuleManager.ts @@ -0,0 +1,34 @@ +import { logger } from '@powersync/lib-services-framework'; +import * as system from '../system/system-index.js'; +import { AbstractModule, TearDownOptions } from './AbstractModule.js'; +/** + * The module manager keeps track of activated modules + */ +export class ModuleManager { + private readonly modules: Map = new Map(); + + public register(modules: AbstractModule[]) { + for (const module of modules) { + if (this.modules.has(module.name)) { + logger.warn(`Module ${module.name} already registered, skipping.`); + continue; + } + this.modules.set(module.name, module); + logger.info(`Successfully registered Module ${module.name}.`); + } + } + + async initialize(serviceContext: system.ServiceContextContainer) { + logger.info(`Initializing modules...`); + for (const module of this.modules.values()) { + await module.initialize(serviceContext); + } + logger.info(`Successfully Initialized modules.`); + } + + async tearDown(options: TearDownOptions) { + for (const module of this.modules.values()) { + await module.teardown(options); + } + } +} diff --git a/packages/service-core/src/modules/modules-index.ts b/packages/service-core/src/modules/modules-index.ts new file mode 100644 index 000000000..1b32990e8 --- /dev/null +++ b/packages/service-core/src/modules/modules-index.ts @@ -0,0 +1,2 @@ +export * from './ModuleManager.js'; +export * from './AbstractModule.js'; diff --git a/packages/service-core/src/replication/AbstractReplicationJob.ts b/packages/service-core/src/replication/AbstractReplicationJob.ts new file mode 100644 index 000000000..8226e5422 --- /dev/null +++ b/packages/service-core/src/replication/AbstractReplicationJob.ts @@ -0,0 +1,79 @@ +import { container, logger } from '@powersync/lib-services-framework'; +import winston from 'winston'; +import * as storage from '../storage/storage-index.js'; +import { ErrorRateLimiter } from './ErrorRateLimiter.js'; + +export interface AbstractReplicationJobOptions { + id: string; + storage: storage.SyncRulesBucketStorage; + lock: storage.ReplicationLock; + rateLimiter: ErrorRateLimiter; +} + +export abstract class AbstractReplicationJob { + protected logger: winston.Logger; + protected abortController = new AbortController(); + protected isReplicatingPromise: Promise | null = null; + + protected constructor(protected options: AbstractReplicationJobOptions) { + this.logger = logger.child({ name: `ReplicationJob: ${this.id}` }); + } + + /** + * Copy the initial data set from the data source if required and then keep it in sync. + */ + abstract replicate(): Promise; + + /** + * Ensure the connection to the data source remains active + */ + abstract keepAlive(): Promise; + + /** + * Start the replication process + */ + public async start(): Promise { + this.isReplicatingPromise = this.replicate() + .catch((ex) => { + container.reporter.captureException(ex, { + metadata: { + replicator: this.id + } + }); + this.logger.error(`Replication failed.`, ex); + }) + .finally(async () => { + this.abortController.abort(); + await this.options.lock.release(); + }); + } + + /** + * Safely stop the replication process + */ + public async stop(): Promise { + this.logger.info(`Stopping replication job for sync rule iteration: ${this.storage.group_id}`); + this.abortController.abort(); + await this.isReplicatingPromise; + } + + public get id() { + return this.options.id; + } + + public get storage() { + return this.options.storage; + } + + protected get lock() { + return this.options.lock; + } + + protected get rateLimiter() { + return this.options.rateLimiter; + } + + public get isStopped(): boolean { + return this.abortController.signal.aborted; + } +} diff --git a/packages/service-core/src/replication/AbstractReplicator.ts b/packages/service-core/src/replication/AbstractReplicator.ts new file mode 100644 index 000000000..fcc3fa0ec --- /dev/null +++ b/packages/service-core/src/replication/AbstractReplicator.ts @@ -0,0 +1,228 @@ +import { container, logger } from '@powersync/lib-services-framework'; +import { hrtime } from 'node:process'; +import winston from 'winston'; +import * as storage from '../storage/storage-index.js'; +import { StorageEngine } from '../storage/storage-index.js'; +import { SyncRulesProvider } from '../util/config/sync-rules/sync-rules-provider.js'; +import { AbstractReplicationJob } from './AbstractReplicationJob.js'; +import { ErrorRateLimiter } from './ErrorRateLimiter.js'; + +// 5 minutes +const PING_INTERVAL = 1_000_000_000n * 300n; + +export interface CreateJobOptions { + lock: storage.ReplicationLock; + storage: storage.SyncRulesBucketStorage; +} + +export interface AbstractReplicatorOptions { + id: string; + storageEngine: StorageEngine; + syncRuleProvider: SyncRulesProvider; + /** + * This limits the effect of retries when there is a persistent issue. + */ + rateLimiter: ErrorRateLimiter; +} + +/** + * A replicator manages the mechanics for replicating data from a data source to a storage bucket. + * This includes copying across the original data set and then keeping it in sync with the data source using Replication Jobs. + * It also handles any changes to the sync rules. + */ +export abstract class AbstractReplicator { + protected logger: winston.Logger; + /** + * Map of replication jobs by sync rule id. Usually there is only one running job, but there could be two when + * transitioning to a new set of sync rules. + * @private + */ + private replicationJobs = new Map(); + private stopped = false; + + // First ping is only after 5 minutes, not when starting + private lastPing = hrtime.bigint(); + + protected constructor(private options: AbstractReplicatorOptions) { + this.logger = logger.child({ name: `Replicator:${options.id}` }); + } + + abstract createJob(options: CreateJobOptions): T; + + /** + * Clean up any configuration or state for the specified sync rule on the datasource. + * Should be a no-op if the configuration has already been cleared + */ + abstract cleanUp(syncRuleStorage: storage.SyncRulesBucketStorage): Promise; + + public get id() { + return this.options.id; + } + + protected get storage() { + return this.options.storageEngine.activeBucketStorage; + } + + protected get syncRuleProvider() { + return this.options.syncRuleProvider; + } + + protected get rateLimiter() { + return this.options.rateLimiter; + } + + public async start(): Promise { + this.runLoop().catch((e) => { + this.logger.error('Data source fatal replication error', e); + container.reporter.captureException(e); + setTimeout(() => { + process.exit(1); + }, 1000); + }); + } + + public async stop(): Promise { + this.stopped = true; + let promises: Promise[] = []; + for (const job of this.replicationJobs.values()) { + promises.push(job.stop()); + } + await Promise.all(promises); + } + + private async runLoop() { + const syncRules = await this.syncRuleProvider.get(); + let configuredLock: storage.ReplicationLock | undefined = undefined; + if (syncRules != null) { + this.logger.info('Loaded sync rules'); + try { + // Configure new sync rules, if they have changed. + // In that case, also immediately take out a lock, so that another process doesn't start replication on it. + const { lock } = await this.storage.configureSyncRules(syncRules, { + lock: true + }); + if (lock) { + configuredLock = lock; + } + } catch (e) { + // Log, but continue with previous sync rules + this.logger.error(`Failed to update sync rules from configuration`, e); + } + } else { + this.logger.info('No sync rules configured - configure via API'); + } + while (!this.stopped) { + await container.probes.touch(); + try { + await this.refresh({ configured_lock: configuredLock }); + // The lock is only valid on the first refresh. + configuredLock = undefined; + + // Ensure that the replication jobs' connections are kept alive. + // We don't ping while in error retry back-off, to avoid having too failures. + if (this.rateLimiter.mayPing()) { + const now = hrtime.bigint(); + if (now - this.lastPing >= PING_INTERVAL) { + for (const activeJob of this.replicationJobs.values()) { + await activeJob.keepAlive(); + } + + this.lastPing = now; + } + } + } catch (e) { + this.logger.error('Failed to refresh replication jobs', e); + } + await new Promise((resolve) => setTimeout(resolve, 5000)); + } + } + + private async refresh(options?: { configured_lock?: storage.ReplicationLock }) { + if (this.stopped) { + return; + } + + let configuredLock = options?.configured_lock; + + const existingJobs = new Map(this.replicationJobs.entries()); + const replicatingSyncRules = await this.storage.getReplicatingSyncRules(); + const newJobs = new Map(); + for (let syncRules of replicatingSyncRules) { + const existingJob = existingJobs.get(syncRules.id); + if (existingJob && !existingJob.isStopped) { + // No change + existingJobs.delete(syncRules.id); + newJobs.set(syncRules.id, existingJob); + } else if (existingJob && existingJob.isStopped) { + // Stopped (e.g. fatal error). + // Remove from the list. Next refresh call will restart the job. + existingJobs.delete(syncRules.id); + } else { + // New sync rules were found (or resume after restart) + try { + let lock: storage.ReplicationLock; + if (configuredLock?.sync_rules_id == syncRules.id) { + lock = configuredLock; + } else { + lock = await syncRules.lock(); + } + const storage = this.storage.getInstance(syncRules); + const newJob = this.createJob({ + lock: lock, + storage: storage + }); + + newJobs.set(syncRules.id, newJob); + newJob.start(); + } catch (e) { + // Could be a sync rules parse error, + // for example from stricter validation that was added. + // This will be retried every couple of seconds. + // When new (valid) sync rules are deployed and processed, this one be disabled. + this.logger.error('Failed to start replication for new sync rules', e); + } + } + } + + this.replicationJobs = newJobs; + + // Terminate any orphaned jobs that no longer have sync rules + for (let job of existingJobs.values()) { + // Old - stop and clean up + try { + await job.stop(); + await this.terminateSyncRules(job.storage); + job.storage[Symbol.dispose](); + } catch (e) { + // This will be retried + this.logger.warn('Failed to terminate old replication job}', e); + } + } + + // Sync rules stopped previously or by a different process. + const stopped = await this.storage.getStoppedSyncRules(); + for (let syncRules of stopped) { + try { + using syncRuleStorage = this.storage.getInstance(syncRules); + await this.terminateSyncRules(syncRuleStorage); + } catch (e) { + this.logger.warn(`Failed clean up replication config for sync rule: ${syncRules.id}`, e); + } + } + } + + protected createJobId(syncRuleId: number) { + return `${this.id}-${syncRuleId}`; + } + + protected async terminateSyncRules(syncRuleStorage: storage.SyncRulesBucketStorage) { + this.logger.info(`Terminating sync rules: ${syncRuleStorage.group_id}...`); + try { + await this.cleanUp(syncRuleStorage); + await syncRuleStorage.terminate(); + this.logger.info(`Successfully terminated sync rules: ${syncRuleStorage.group_id}`); + } catch (e) { + this.logger.warn(`Failed clean up replication config for sync rules: ${syncRuleStorage.group_id}`, e); + } + } +} diff --git a/packages/service-core/src/replication/ErrorRateLimiter.ts b/packages/service-core/src/replication/ErrorRateLimiter.ts index a94cbd9ed..700919ec1 100644 --- a/packages/service-core/src/replication/ErrorRateLimiter.ts +++ b/packages/service-core/src/replication/ErrorRateLimiter.ts @@ -1,50 +1,6 @@ -import { setTimeout } from 'timers/promises'; - export interface ErrorRateLimiter { waitUntilAllowed(options?: { signal?: AbortSignal }): Promise; reportError(e: any): void; mayPing(): boolean; } - -export class DefaultErrorRateLimiter implements ErrorRateLimiter { - nextAllowed: number = Date.now(); - - async waitUntilAllowed(options?: { signal?: AbortSignal | undefined } | undefined): Promise { - const delay = Math.max(0, this.nextAllowed - Date.now()); - // Minimum delay between connections, even without errors - this.setDelay(500); - await setTimeout(delay, undefined, { signal: options?.signal }); - } - - mayPing(): boolean { - return Date.now() >= this.nextAllowed; - } - - reportError(e: any): void { - const message = (e.message as string) ?? ''; - if (message.includes('password authentication failed')) { - // Wait 15 minutes, to avoid triggering Supabase's fail2ban - this.setDelay(900_000); - } else if (message.includes('ENOTFOUND')) { - // DNS lookup issue - incorrect URI or deleted instance - this.setDelay(120_000); - } else if (message.includes('ECONNREFUSED')) { - // Could be fail2ban or similar - this.setDelay(120_000); - } else if ( - message.includes('Unable to do postgres query on ended pool') || - message.includes('Postgres unexpectedly closed connection') - ) { - // Connection timed out - ignore / immediately retry - // We don't explicitly set the delay to 0, since there could have been another error that - // we need to respect. - } else { - this.setDelay(30_000); - } - } - - private setDelay(delay: number) { - this.nextAllowed = Math.max(this.nextAllowed, Date.now() + delay); - } -} diff --git a/packages/service-core/src/replication/PgRelation.ts b/packages/service-core/src/replication/PgRelation.ts deleted file mode 100644 index 1635487c0..000000000 --- a/packages/service-core/src/replication/PgRelation.ts +++ /dev/null @@ -1,42 +0,0 @@ -import { PgoutputRelation } from '@powersync/service-jpgwire'; - -export interface PgRelation { - readonly relationId: number; - readonly schema: string; - readonly name: string; - readonly replicaIdentity: ReplicationIdentity; - readonly replicationColumns: ReplicationColumn[]; -} - -export type ReplicationIdentity = 'default' | 'nothing' | 'full' | 'index'; - -export interface ReplicationColumn { - readonly name: string; - readonly typeOid: number; -} - -export function getReplicaIdColumns(relation: PgoutputRelation): ReplicationColumn[] { - if (relation.replicaIdentity == 'nothing') { - return []; - } else { - return relation.columns.filter((c) => (c.flags & 0b1) != 0).map((c) => ({ name: c.name, typeOid: c.typeOid })); - } -} -export function getRelId(source: PgoutputRelation): number { - // Source types are wrong here - const relId = (source as any).relationOid as number; - if (relId == null || typeof relId != 'number') { - throw new Error(`No relation id!`); - } - return relId; -} - -export function getPgOutputRelation(source: PgoutputRelation): PgRelation { - return { - name: source.name, - schema: source.schema, - relationId: getRelId(source), - replicaIdentity: source.replicaIdentity, - replicationColumns: getReplicaIdColumns(source) - }; -} diff --git a/packages/service-core/src/replication/ReplicationEngine.ts b/packages/service-core/src/replication/ReplicationEngine.ts new file mode 100644 index 000000000..9b36f4a40 --- /dev/null +++ b/packages/service-core/src/replication/ReplicationEngine.ts @@ -0,0 +1,43 @@ +import { logger } from '@powersync/lib-services-framework'; +import { AbstractReplicator } from './AbstractReplicator.js'; + +export class ReplicationEngine { + private readonly replicators: Map = new Map(); + + /** + * Register a Replicator with the engine + * + * @param replicator + */ + public register(replicator: AbstractReplicator) { + if (this.replicators.has(replicator.id)) { + throw new Error(`Replicator with id: ${replicator.id} already registered`); + } + logger.info(`Successfully registered Replicator ${replicator.id} with ReplicationEngine`); + this.replicators.set(replicator.id, replicator); + } + + /** + * Start replication on all managed Replicators + */ + public start(): void { + logger.info('Starting Replication Engine...'); + for (const replicator of this.replicators.values()) { + logger.info(`Starting Replicator: ${replicator.id}`); + replicator.start(); + } + logger.info('Successfully started Replication Engine.'); + } + + /** + * Stop replication on all managed Replicators + */ + public async shutDown(): Promise { + logger.info('Shutting down Replication Engine...'); + for (const replicator of this.replicators.values()) { + logger.info(`Stopping Replicator: ${replicator.id}`); + await replicator.stop(); + } + logger.info('Successfully shut down Replication Engine.'); + } +} diff --git a/packages/service-core/src/replication/ReplicationModule.ts b/packages/service-core/src/replication/ReplicationModule.ts new file mode 100644 index 000000000..5b5bca8de --- /dev/null +++ b/packages/service-core/src/replication/ReplicationModule.ts @@ -0,0 +1,122 @@ +import { DataSourceConfig } from '@powersync/service-types/dist/config/PowerSyncConfig.js'; +import * as t from 'ts-codec'; + +import * as types from '@powersync/service-types'; +import * as api from '../api/api-index.js'; +import * as modules from '../modules/modules-index.js'; +import * as system from '../system/system-index.js'; +import { schema } from '@powersync/lib-services-framework'; +import { AbstractReplicator } from './AbstractReplicator.js'; +import { TearDownOptions } from '../modules/modules-index.js'; + +/** + * Provides a common interface for testing the connection to a DataSource. + */ +export interface ConnectionTester { + /** + * Confirm if a connection can be established to the datasource for the provided datasource configuration + * @param config + */ + testConnection(config: TConfig): Promise; +} + +export interface ReplicationModuleOptions extends modules.AbstractModuleOptions { + type: string; + configSchema: t.AnyCodec; +} + +/** + * A replication module describes all the functionality that PowerSync requires to + * replicate data from a DataSource. Whenever a new data source is added to powersync this class should be extended. + */ +export abstract class ReplicationModule + extends modules.AbstractModule + implements ConnectionTester +{ + protected type: string; + protected configSchema: t.AnyCodec; + protected decodedConfig: TConfig | undefined; + + /** + * @protected + * @param options + */ + protected constructor(options: ReplicationModuleOptions) { + super(options); + this.type = options.type; + this.configSchema = options.configSchema; + } + + /** + * Create the RouteAPI adapter for the DataSource required to service the sync API + * endpoints. + */ + protected abstract createRouteAPIAdapter(): api.RouteAPI; + + /** + * Create the Replicator to be used by the ReplicationEngine. + */ + protected abstract createReplicator(context: system.ServiceContext): AbstractReplicator; + + public abstract testConnection(config: TConfig): Promise; + + /** + * Register this module's Replicators and RouteAPI adapters if the required configuration is present. + */ + public async initialize(context: system.ServiceContext): Promise { + if (!context.configuration.connections) { + // No data source configuration found in the config skip for now + return; + } + + const matchingConfig = context.configuration.connections.filter((dataSource) => dataSource.type === this.type); + if (!matchingConfig.length) { + // No configuration for this module was found + return; + } + + if (matchingConfig.length > 1) { + this.logger.warning( + `Multiple data sources of type ${this.type} found in the configuration. Only the first will be used.` + ); + } + + try { + const baseMatchingConfig = matchingConfig[0] as TConfig; + // If decoding fails, log the error and continue, no replication will happen for this data source + this.decodeConfig(baseMatchingConfig); + + context.replicationEngine?.register(this.createReplicator(context)); + context.routerEngine?.registerAPI(this.createRouteAPIAdapter()); + } catch (e) { + this.logger.error('Failed to initialize.', e); + } + } + + protected decodeConfig(config: TConfig): void { + this.validateConfig(config); + this.decodedConfig = this.configSchema.decode(config); + } + + private validateConfig(config: TConfig): void { + const validator = schema + .parseJSONSchema( + // This generates a schema for the encoded form of the codec + t.generateJSONSchema(this.configSchema, { + allowAdditional: true, + parsers: [types.configFile.portParser] + }) + ) + .validator(); + + const valid = validator.validate(config); + + if (!valid.valid) { + throw new Error(`Failed to validate Module ${this.name} configuration: ${valid.errors.join(', ')}`); + } + } + + protected getDefaultId(dataSourceName: string): string { + return `${this.type}-${dataSourceName}`; + } +} diff --git a/packages/service-core/src/replication/WalConnection.ts b/packages/service-core/src/replication/WalConnection.ts deleted file mode 100644 index 5fa3cf8a7..000000000 --- a/packages/service-core/src/replication/WalConnection.ts +++ /dev/null @@ -1,227 +0,0 @@ -import * as pgwire from '@powersync/service-jpgwire'; -import { pgwireRows } from '@powersync/service-jpgwire'; -import { DEFAULT_TAG, SqlSyncRules, TablePattern } from '@powersync/service-sync-rules'; -import { ReplicationError, TableInfo } from '@powersync/service-types'; - -import * as storage from '../storage/storage-index.js'; -import * as util from '../util/util-index.js'; - -import { ReplicaIdentityResult, getReplicationIdentityColumns } from './util.js'; -/** - * Connection that _manages_ WAL, but does not do streaming. - */ -export class WalConnection { - db: pgwire.PgClient; - connectionTag = DEFAULT_TAG; - publication_name = 'powersync'; - - sync_rules: SqlSyncRules; - - /** - * db can be a PgConnection or PgPool. - * - * No transactions are used here, but it is up to the client to ensure - * nothing here is called in the middle of another transaction if - * PgConnection is used. - */ - constructor(options: { db: pgwire.PgClient; sync_rules: SqlSyncRules }) { - this.db = options.db; - this.sync_rules = options.sync_rules; - } - - async checkSourceConfiguration() { - await checkSourceConfiguration(this.db); - } - - async getDebugTableInfo(tablePattern: TablePattern, name: string, relationId: number | null): Promise { - const schema = tablePattern.schema; - let id_columns_result: ReplicaIdentityResult | undefined = undefined; - let id_columns_error = null; - - if (relationId != null) { - try { - id_columns_result = await getReplicationIdentityColumns(this.db, relationId); - } catch (e) { - id_columns_error = { level: 'fatal', message: e.message }; - } - } - - const id_columns = id_columns_result?.columns ?? []; - - const sourceTable = new storage.SourceTable(0, this.connectionTag, relationId ?? 0, schema, name, id_columns, true); - - const syncData = this.sync_rules.tableSyncsData(sourceTable); - const syncParameters = this.sync_rules.tableSyncsParameters(sourceTable); - - if (relationId == null) { - return { - schema: schema, - name: name, - pattern: tablePattern.isWildcard ? tablePattern.tablePattern : undefined, - replication_id: [], - data_queries: syncData, - parameter_queries: syncParameters, - // Also - errors: [{ level: 'warning', message: `Table ${sourceTable.qualifiedName} not found.` }] - }; - } - if (id_columns.length == 0 && id_columns_error == null) { - let message = `No replication id found for ${sourceTable.qualifiedName}. Replica identity: ${id_columns_result?.replicationIdentity}.`; - if (id_columns_result?.replicationIdentity == 'default') { - message += ' Configure a primary key on the table.'; - } - id_columns_error = { level: 'fatal', message }; - } - - let selectError = null; - try { - await util.retriedQuery(this.db, `SELECT * FROM ${sourceTable.escapedIdentifier} LIMIT 1`); - } catch (e) { - selectError = { level: 'fatal', message: e.message }; - } - - let replicateError = null; - - const publications = await util.retriedQuery(this.db, { - statement: `SELECT tablename FROM pg_publication_tables WHERE pubname = $1 AND schemaname = $2 AND tablename = $3`, - params: [ - { type: 'varchar', value: this.publication_name }, - { type: 'varchar', value: tablePattern.schema }, - { type: 'varchar', value: name } - ] - }); - if (publications.rows.length == 0) { - replicateError = { - level: 'fatal', - message: `Table ${sourceTable.qualifiedName} is not part of publication '${this.publication_name}'. Run: \`ALTER PUBLICATION ${this.publication_name} ADD TABLE ${sourceTable.qualifiedName}\`.` - }; - } - - return { - schema: schema, - name: name, - pattern: tablePattern.isWildcard ? tablePattern.tablePattern : undefined, - replication_id: id_columns.map((c) => c.name), - data_queries: syncData, - parameter_queries: syncParameters, - errors: [id_columns_error, selectError, replicateError].filter((error) => error != null) as ReplicationError[] - }; - } - - async getDebugTablesInfo(tablePatterns: TablePattern[]): Promise { - let result: PatternResult[] = []; - - for (let tablePattern of tablePatterns) { - const schema = tablePattern.schema; - - let patternResult: PatternResult = { - schema: schema, - pattern: tablePattern.tablePattern, - wildcard: tablePattern.isWildcard - }; - result.push(patternResult); - - if (tablePattern.isWildcard) { - patternResult.tables = []; - const prefix = tablePattern.tablePrefix; - const results = await util.retriedQuery(this.db, { - statement: `SELECT c.oid AS relid, c.relname AS table_name - FROM pg_class c - JOIN pg_namespace n ON n.oid = c.relnamespace - WHERE n.nspname = $1 - AND c.relkind = 'r' - AND c.relname LIKE $2`, - params: [ - { type: 'varchar', value: schema }, - { type: 'varchar', value: tablePattern.tablePattern } - ] - }); - - for (let row of pgwireRows(results)) { - const name = row.table_name as string; - const relationId = row.relid as number; - if (!name.startsWith(prefix)) { - continue; - } - const details = await this.getDebugTableInfo(tablePattern, name, relationId); - patternResult.tables.push(details); - } - } else { - const results = await util.retriedQuery(this.db, { - statement: `SELECT c.oid AS relid, c.relname AS table_name - FROM pg_class c - JOIN pg_namespace n ON n.oid = c.relnamespace - WHERE n.nspname = $1 - AND c.relkind = 'r' - AND c.relname = $2`, - params: [ - { type: 'varchar', value: schema }, - { type: 'varchar', value: tablePattern.tablePattern } - ] - }); - if (results.rows.length == 0) { - // Table not found - const details = await this.getDebugTableInfo(tablePattern, tablePattern.name, null); - patternResult.table = details; - } else { - const row = pgwireRows(results)[0]; - const name = row.table_name as string; - const relationId = row.relid as number; - patternResult.table = await this.getDebugTableInfo(tablePattern, name, relationId); - } - } - } - return result; - } -} - -export interface PatternResult { - schema: string; - pattern: string; - wildcard: boolean; - tables?: TableInfo[]; - table?: TableInfo; -} - -export async function checkSourceConfiguration(db: pgwire.PgClient) { - // TODO: configurable - const publication_name = 'powersync'; - - // Check basic config - await util.retriedQuery( - db, - `DO $$ -BEGIN -if current_setting('wal_level') is distinct from 'logical' then -raise exception 'wal_level must be set to ''logical'', your database has it set to ''%''. Please edit your config file and restart PostgreSQL.', current_setting('wal_level'); -end if; -if (current_setting('max_replication_slots')::int >= 1) is not true then -raise exception 'Your max_replication_slots setting is too low, it must be greater than 1. Please edit your config file and restart PostgreSQL.'; -end if; -if (current_setting('max_wal_senders')::int >= 1) is not true then -raise exception 'Your max_wal_senders setting is too low, it must be greater than 1. Please edit your config file and restart PostgreSQL.'; -end if; -end; -$$ LANGUAGE plpgsql;` - ); - - // Check that publication exists - const rs = await util.retriedQuery(db, { - statement: `SELECT * FROM pg_publication WHERE pubname = $1`, - params: [{ type: 'varchar', value: publication_name }] - }); - const row = pgwireRows(rs)[0]; - if (row == null) { - throw new Error( - `Publication '${publication_name}' does not exist. Run: \`CREATE PUBLICATION ${publication_name} FOR ALL TABLES\`, or read the documentation for details.` - ); - } - if (row.pubinsert == false || row.pubupdate == false || row.pubdelete == false || row.pubtruncate == false) { - throw new Error( - `Publication '${publication_name}' does not publish all changes. Create a publication using \`WITH (publish = "insert, update, delete, truncate")\` (the default).` - ); - } - if (row.pubviaroot) { - throw new Error(`'${publication_name}' uses publish_via_partition_root, which is not supported.`); - } -} diff --git a/packages/service-core/src/replication/WalStreamManager.ts b/packages/service-core/src/replication/WalStreamManager.ts deleted file mode 100644 index 10dc1c268..000000000 --- a/packages/service-core/src/replication/WalStreamManager.ts +++ /dev/null @@ -1,213 +0,0 @@ -import * as pgwire from '@powersync/service-jpgwire'; -import { hrtime } from 'node:process'; - -import * as storage from '../storage/storage-index.js'; -import * as util from '../util/util-index.js'; - -import { DefaultErrorRateLimiter } from './ErrorRateLimiter.js'; -import { WalStreamRunner } from './WalStreamRunner.js'; -import { CorePowerSyncSystem } from '../system/CorePowerSyncSystem.js'; -import { container, logger } from '@powersync/lib-services-framework'; - -// 5 minutes -const PING_INTERVAL = 1_000_000_000n * 300n; - -export class WalStreamManager { - private streams = new Map(); - - private system: CorePowerSyncSystem; - - private stopped = false; - - // First ping is only after 5 minutes, not when starting - private lastPing = hrtime.bigint(); - - private storage: storage.BucketStorageFactory; - - /** - * This limits the effect of retries when there is a persistent issue. - */ - private rateLimiter = new DefaultErrorRateLimiter(); - - constructor(system: CorePowerSyncSystem) { - this.system = system; - this.storage = system.storage; - } - - start() { - this.runLoop().catch((e) => { - logger.error(`Fatal WalStream error`, e); - container.reporter.captureException(e); - setTimeout(() => { - process.exit(1); - }, 1000); - }); - } - - async stop() { - this.stopped = true; - let promises: Promise[] = []; - for (let stream of this.streams.values()) { - promises.push(stream.stop()); - } - await Promise.all(promises); - } - - private async runLoop() { - const configured_sync_rules = await util.loadSyncRules(this.system.config); - let configured_lock: storage.ReplicationLock | undefined = undefined; - if (configured_sync_rules != null) { - logger.info('Loading sync rules from configuration'); - try { - // Configure new sync rules, if it has changed. - // In that case, also immediately take out a lock, so that another process doesn't start replication on it. - const { updated, persisted_sync_rules, lock } = await this.storage.configureSyncRules(configured_sync_rules!, { - lock: true - }); - if (lock) { - configured_lock = lock; - } - } catch (e) { - // Log, but continue with previous sync rules - logger.error(`Failed to load sync rules from configuration`, e); - } - } else { - logger.info('No sync rules configured - configure via API'); - } - while (!this.stopped) { - await container.probes.touch(); - try { - const pool = this.system.pgwire_pool; - if (pool) { - await this.refresh({ configured_lock }); - // The lock is only valid on the first refresh. - configured_lock = undefined; - - // TODO: Ping on all connections when we have multiple - // Perhaps WalStreamRunner would be a better place to do pings? - // We don't ping while in error retry back-off, to avoid having too - // many authentication failures. - if (this.rateLimiter.mayPing()) { - await this.ping(pool); - } - } - } catch (e) { - logger.error(`Failed to refresh wal streams`, e); - } - await new Promise((resolve) => setTimeout(resolve, 5000)); - } - } - - /** - * Postgres on RDS writes performs a WAL checkpoint every 5 minutes by default, which creates a new 64MB file. - * - * The old WAL files are only deleted once no replication slot still references it. - * - * Unfortunately, when there are no changes to the db, the database creates new WAL files without the replication slot - * advancing**. - * - * As a workaround, we write a new message every couple of minutes, to make sure that the replication slot advances. - * - * **This may be a bug in pgwire or how we're using it. - */ - private async ping(db: pgwire.PgClient) { - const now = hrtime.bigint(); - if (now - this.lastPing >= PING_INTERVAL) { - try { - await db.query(`SELECT * FROM pg_logical_emit_message(false, 'powersync', 'ping')`); - } catch (e) { - logger.warn(`Failed to ping`, e); - } - this.lastPing = now; - } - } - - private async refresh(options?: { configured_lock?: storage.ReplicationLock }) { - if (this.stopped) { - return; - } - - let configured_lock = options?.configured_lock; - - const existingStreams = new Map(this.streams.entries()); - const replicating = await this.storage.getReplicatingSyncRules(); - const newStreams = new Map(); - for (let syncRules of replicating) { - const existing = existingStreams.get(syncRules.id); - if (existing && !existing.stopped) { - // No change - existingStreams.delete(syncRules.id); - newStreams.set(syncRules.id, existing); - } else if (existing && existing.stopped) { - // Stopped (e.g. fatal error, slot rename). - // Remove from the list. Next refresh call will restart the stream. - existingStreams.delete(syncRules.id); - } else { - // New (or resume after restart) - try { - let lock: storage.ReplicationLock; - if (configured_lock?.sync_rules_id == syncRules.id) { - lock = configured_lock; - } else { - lock = await syncRules.lock(); - } - const parsed = syncRules.parsed(); - const storage = this.storage.getInstance(parsed); - const stream = new WalStreamRunner({ - factory: this.storage, - storage: storage, - source_db: this.system.config.connection!, - lock, - rateLimiter: this.rateLimiter - }); - newStreams.set(syncRules.id, stream); - stream.start(); - } catch (e) { - // Could be a sync rules parse error, - // for example from stricter validation that was added. - // This will be retried every couple of seconds. - // When new (valid) sync rules are deployed and processed, this one be disabled. - logger.error(`Failed to start replication for ${syncRules.slot_name}`, e); - } - } - } - - this.streams = newStreams; - - // TODO: Should this termination be happening in the "background" instead? - // That becomes tricky to manage - - for (let stream of existingStreams.values()) { - // Old - stop and remove. - try { - await stream.terminate(); - } catch (e) { - // This will be retried - logger.warn(`Failed to terminate ${stream.slot_name}`, e); - } - } - - // Sync rules stopped previously or by a different process. - const stopped = await this.storage.getStoppedSyncRules(); - for (let syncRules of stopped) { - try { - const lock = await syncRules.lock(); - try { - const parsed = syncRules.parsed(); - const storage = this.storage.getInstance(parsed); - const stream = new WalStreamRunner({ - factory: this.storage, - storage: storage, - source_db: this.system.config.connection!, - lock - }); - await stream.terminate(); - } finally { - await lock.release(); - } - } catch (e) { - logger.warn(`Failed to terminate ${syncRules.slot_name}`, e); - } - } - } -} diff --git a/packages/service-core/src/replication/replication-index.ts b/packages/service-core/src/replication/replication-index.ts index 967ea71ce..0b37534c9 100644 --- a/packages/service-core/src/replication/replication-index.ts +++ b/packages/service-core/src/replication/replication-index.ts @@ -1,7 +1,5 @@ +export * from './AbstractReplicationJob.js'; +export * from './AbstractReplicator.js'; export * from './ErrorRateLimiter.js'; -export * from './PgRelation.js'; -export * from './util.js'; -export * from './WalConnection.js'; -export * from './WalStream.js'; -export * from './WalStreamManager.js'; -export * from './WalStreamRunner.js'; +export * from './ReplicationEngine.js'; +export * from './ReplicationModule.js'; diff --git a/packages/service-core/src/replication/util.ts b/packages/service-core/src/replication/util.ts deleted file mode 100644 index 8c719205a..000000000 --- a/packages/service-core/src/replication/util.ts +++ /dev/null @@ -1,76 +0,0 @@ -import * as pgwire from '@powersync/service-jpgwire'; - -import * as util from '../util/util-index.js'; -import { ReplicationColumn, ReplicationIdentity } from './PgRelation.js'; - -export interface ReplicaIdentityResult { - columns: ReplicationColumn[]; - replicationIdentity: ReplicationIdentity; -} - -export async function getPrimaryKeyColumns( - db: pgwire.PgClient, - relationId: number, - mode: 'primary' | 'replident' -): Promise { - const indexFlag = mode == 'primary' ? `i.indisprimary` : `i.indisreplident`; - const attrRows = await util.retriedQuery(db, { - statement: `SELECT a.attname as name, a.atttypid as typeid, a.attnum as attnum - FROM pg_index i - JOIN pg_attribute a - ON a.attrelid = i.indrelid - AND a.attnum = ANY (i.indkey) - WHERE i.indrelid = $1::oid - AND ${indexFlag} - AND a.attnum > 0 - ORDER BY a.attnum`, - params: [{ value: relationId, type: 'int4' }] - }); - - return attrRows.rows.map((row) => { - return { name: row[0] as string, typeOid: row[1] as number }; - }); -} - -export async function getAllColumns(db: pgwire.PgClient, relationId: number): Promise { - const attrRows = await util.retriedQuery(db, { - statement: `SELECT a.attname as name, a.atttypid as typeid, a.attnum as attnum - FROM pg_attribute a - WHERE a.attrelid = $1::oid - AND attnum > 0 - ORDER BY a.attnum`, - params: [{ type: 'varchar', value: relationId }] - }); - return attrRows.rows.map((row) => { - return { name: row[0] as string, typeOid: row[1] as number }; - }); -} - -export async function getReplicationIdentityColumns( - db: pgwire.PgClient, - relationId: number -): Promise { - const rows = await util.retriedQuery(db, { - statement: `SELECT CASE relreplident - WHEN 'd' THEN 'default' - WHEN 'n' THEN 'nothing' - WHEN 'f' THEN 'full' - WHEN 'i' THEN 'index' - END AS replica_identity -FROM pg_class -WHERE oid = $1::oid LIMIT 1`, - params: [{ type: 'int8', value: relationId }] - }); - const idType: string = rows.rows[0]?.[0]; - if (idType == 'nothing' || idType == null) { - return { replicationIdentity: 'nothing', columns: [] }; - } else if (idType == 'full') { - return { replicationIdentity: 'full', columns: await getAllColumns(db, relationId) }; - } else if (idType == 'default') { - return { replicationIdentity: 'default', columns: await getPrimaryKeyColumns(db, relationId, 'primary') }; - } else if (idType == 'index') { - return { replicationIdentity: 'index', columns: await getPrimaryKeyColumns(db, relationId, 'replident') }; - } else { - return { replicationIdentity: 'nothing', columns: [] }; - } -} diff --git a/packages/service-core/src/routes/RouterEngine.ts b/packages/service-core/src/routes/RouterEngine.ts new file mode 100644 index 000000000..28b35173f --- /dev/null +++ b/packages/service-core/src/routes/RouterEngine.ts @@ -0,0 +1,120 @@ +import { logger } from '@powersync/lib-services-framework'; + +import * as api from '../api/api-index.js'; + +import { ADMIN_ROUTES } from './endpoints/admin.js'; +import { CHECKPOINT_ROUTES } from './endpoints/checkpointing.js'; +import { syncStreamReactive } from './endpoints/socket-route.js'; +import { SYNC_RULES_ROUTES } from './endpoints/sync-rules.js'; +import { SYNC_STREAM_ROUTES } from './endpoints/sync-stream.js'; +import { SocketRouteGenerator } from './router-socket.js'; +import { RouteDefinition } from './router.js'; + +export type RouterSetupResponse = { + onShutdown: () => Promise; +}; + +export type RouterEngineRoutes = { + api_routes: RouteDefinition[]; + stream_routes: RouteDefinition[]; + socket_routes: SocketRouteGenerator[]; +}; + +export type RouterSetup = (routes: RouterEngineRoutes) => Promise; + +/** + * Serves as a registry from which SyncAPIs can be retrieved based on Replication DataSource type + * Initially only one SyncAPI per DataSource type is supported + */ +export class RouterEngine { + closed: boolean; + routes: RouterEngineRoutes; + + protected stopHandlers: Set<() => void>; + + /** + * A final cleanup handler to be executed after all stopHandlers + */ + protected cleanupHandler: (() => Promise) | null; + + private api: api.RouteAPI | null; + + constructor() { + this.api = null; + this.stopHandlers = new Set(); + this.cleanupHandler = null; + this.closed = false; + + // Default routes + this.routes = { + api_routes: [...ADMIN_ROUTES, ...CHECKPOINT_ROUTES, ...SYNC_RULES_ROUTES], + stream_routes: [...SYNC_STREAM_ROUTES], + socket_routes: [syncStreamReactive] + }; + } + + public registerAPI(api: api.RouteAPI) { + if (this.api) { + logger.warn('A RouteAPI has already been registered. Overriding existing implementation'); + } + + this.api = api; + } + + public getAPI(): api.RouteAPI { + if (!this.api) { + throw new Error('No RouteAPI adapter has been registered yet.'); + } + return this.api; + } + + /** + * Starts the router given the configuration provided + */ + async start(setup: RouterSetup) { + logger.info('Starting Router Engine...'); + const { onShutdown } = await setup(this.routes); + this.cleanupHandler = onShutdown; + logger.info('Successfully started Router Engine.'); + } + + /** + * Runs all stop handlers then final cleanup. + */ + async shutDown() { + logger.info('Shutting down Router Engine...'); + // Close open streams, so that they don't block the server from closing. + // Note: This does not work well when streaming requests are queued. In that case, the server still doesn't + // close in the 30-second timeout. + this.closed = true; + + logger.info(`Closing ${this.stopHandlers.size} streams.`); + for (let handler of this.stopHandlers) { + handler(); + } + + logger.info(`Running cleanup.`); + + // Typically closes the server + await this.cleanupHandler?.(); + + // Close the api handlers + await this.api?.shutdown(); + logger.info('Successfully shut down Router Engine.'); + } + + /** + * Add a stop handler callback to be executed when the router engine is being + * shutdown. + */ + addStopHandler(handler: () => void): () => void { + if (this.closed) { + handler(); + return () => {}; + } + this.stopHandlers.add(handler); + return () => { + this.stopHandlers.delete(handler); + }; + } +} diff --git a/packages/service-core/src/routes/auth.ts b/packages/service-core/src/routes/auth.ts index 896811b71..f6a384b65 100644 --- a/packages/service-core/src/routes/auth.ts +++ b/packages/service-core/src/routes/auth.ts @@ -1,9 +1,9 @@ import * as jose from 'jose'; import * as auth from '../auth/auth-index.js'; +import { ServiceContext } from '../system/ServiceContext.js'; import * as util from '../util/util-index.js'; import { BasicRouterRequest, Context, RequestEndpointHandlerPayload } from './router.js'; -import { CorePowerSyncSystem } from '../system/CorePowerSyncSystem.js'; export function endpoint(req: BasicRouterRequest) { const protocol = req.headers['x-forwarded-proto'] ?? req.protocol; @@ -108,7 +108,7 @@ export async function authorizeUser(context: Context, authHeader: string = '') { }; } - const { context: tokenContext, errors } = await generateContext(context.system, token); + const { context: tokenContext, errors } = await generateContext(context.service_context, token); if (!tokenContext) { return { @@ -121,14 +121,14 @@ export async function authorizeUser(context: Context, authHeader: string = '') { return { authorized: true }; } -export async function generateContext(system: CorePowerSyncSystem, token: string) { - const config = system.config; +export async function generateContext(serviceContext: ServiceContext, token: string) { + const { configuration } = serviceContext; let tokenPayload: auth.JwtPayload; try { - const maxAge = config.token_max_expiration; - tokenPayload = await system.client_keystore.verifyJwt(token, { - defaultAudiences: config.jwt_audiences, + const maxAge = configuration.token_max_expiration; + tokenPayload = await configuration.client_keystore.verifyJwt(token, { + defaultAudiences: configuration.jwt_audiences, maxAge: maxAge }); return { @@ -149,9 +149,14 @@ export async function generateContext(system: CorePowerSyncSystem, token: string * @deprecated */ export const authDevUser = async (payload: RequestEndpointHandlerPayload) => { - const context = payload.context; + const { + context: { + service_context: { configuration } + } + } = payload; + const token = getTokenFromHeader(payload.request.headers.authorization as string); - if (!context.system.config.dev.demo_auth) { + if (!configuration.dev.demo_auth) { return { authorized: false, errors: ['Authentication disabled'] @@ -170,7 +175,7 @@ export const authDevUser = async (payload: RequestEndpointHandlerPayload) => { let tokenPayload: auth.JwtPayload; try { - tokenPayload = await context.system.dev_client_keystore.verifyJwt(token, { + tokenPayload = await configuration.dev_client_keystore.verifyJwt(token, { defaultAudiences: audience, maxAge: '31d' }); @@ -186,8 +191,12 @@ export const authDevUser = async (payload: RequestEndpointHandlerPayload) => { }; export const authApi = (payload: RequestEndpointHandlerPayload) => { - const context = payload.context; - const api_keys = context.system.config.api_tokens; + const { + context: { + service_context: { configuration } + } + } = payload; + const api_keys = configuration.api_tokens; if (api_keys.length == 0) { return { authorized: false, diff --git a/packages/service-core/src/routes/configure-fastify.ts b/packages/service-core/src/routes/configure-fastify.ts index 3ff0f463d..5ac45c0a7 100644 --- a/packages/service-core/src/routes/configure-fastify.ts +++ b/packages/service-core/src/routes/configure-fastify.ts @@ -9,7 +9,7 @@ import { PROBES_ROUTES } from './endpoints/probes.js'; import { SYNC_RULES_ROUTES } from './endpoints/sync-rules.js'; import { SYNC_STREAM_ROUTES } from './endpoints/sync-stream.js'; import { createRequestQueueHook, CreateRequestQueueParams } from './hooks.js'; -import { RouteDefinition } from './router.js'; +import { RouteDefinition, RouterServiceContext } from './router.js'; /** * A list of route definitions to be registered as endpoints. @@ -17,7 +17,7 @@ import { RouteDefinition } from './router.js'; */ export type RouteRegistrationOptions = { routes: RouteDefinition[]; - queueOptions: CreateRequestQueueParams; + queue_options: CreateRequestQueueParams; }; /** @@ -26,25 +26,25 @@ export type RouteRegistrationOptions = { */ export type RouteDefinitions = { api?: Partial; - syncStream?: Partial; + sync_stream?: Partial; }; export type FastifyServerConfig = { - system: system.CorePowerSyncSystem; + service_context: system.ServiceContext; routes?: RouteDefinitions; }; export const DEFAULT_ROUTE_OPTIONS = { api: { routes: [...ADMIN_ROUTES, ...CHECKPOINT_ROUTES, ...SYNC_RULES_ROUTES, ...PROBES_ROUTES], - queueOptions: { + queue_options: { concurrency: 10, max_queue_depth: 20 } }, - syncStream: { + sync_stream: { routes: [...SYNC_STREAM_ROUTES], - queueOptions: { + queue_options: { concurrency: 200, max_queue_depth: 0 } @@ -56,7 +56,20 @@ export const DEFAULT_ROUTE_OPTIONS = { * concurrency queue limits or override routes. */ export function configureFastifyServer(server: fastify.FastifyInstance, options: FastifyServerConfig) { - const { system, routes = DEFAULT_ROUTE_OPTIONS } = options; + const { service_context, routes = DEFAULT_ROUTE_OPTIONS } = options; + + const generateContext = async () => { + const { routerEngine } = service_context; + if (!routerEngine) { + throw new Error(`RouterEngine has not been registered`); + } + + return { + user_id: undefined, + service_context: service_context as RouterServiceContext + }; + }; + /** * Fastify creates an encapsulated context for each `.register` call. * Creating a separate context here to separate the concurrency limits for Admin APIs @@ -64,20 +77,11 @@ export function configureFastifyServer(server: fastify.FastifyInstance, options: * https://github.com/fastify/fastify/blob/main/docs/Reference/Encapsulation.md */ server.register(async function (childContext) { - registerFastifyRoutes( - childContext, - async () => { - return { - user_id: undefined, - system: system - }; - }, - routes.api?.routes ?? DEFAULT_ROUTE_OPTIONS.api.routes - ); + registerFastifyRoutes(childContext, generateContext, routes.api?.routes ?? DEFAULT_ROUTE_OPTIONS.api.routes); // Limit the active concurrent requests childContext.addHook( 'onRequest', - createRequestQueueHook(routes.api?.queueOptions ?? DEFAULT_ROUTE_OPTIONS.api.queueOptions) + createRequestQueueHook(routes.api?.queue_options ?? DEFAULT_ROUTE_OPTIONS.api.queue_options) ); }); @@ -85,18 +89,13 @@ export function configureFastifyServer(server: fastify.FastifyInstance, options: server.register(async function (childContext) { registerFastifyRoutes( childContext, - async () => { - return { - user_id: undefined, - system: system - }; - }, - routes.syncStream?.routes ?? DEFAULT_ROUTE_OPTIONS.syncStream.routes + generateContext, + routes.sync_stream?.routes ?? DEFAULT_ROUTE_OPTIONS.sync_stream.routes ); // Limit the active concurrent requests childContext.addHook( 'onRequest', - createRequestQueueHook(routes.syncStream?.queueOptions ?? DEFAULT_ROUTE_OPTIONS.syncStream.queueOptions) + createRequestQueueHook(routes.sync_stream?.queue_options ?? DEFAULT_ROUTE_OPTIONS.sync_stream.queue_options) ); }); } diff --git a/packages/service-core/src/routes/configure-rsocket.ts b/packages/service-core/src/routes/configure-rsocket.ts index 93f650a11..05572dfc5 100644 --- a/packages/service-core/src/routes/configure-rsocket.ts +++ b/packages/service-core/src/routes/configure-rsocket.ts @@ -4,22 +4,22 @@ import * as http from 'http'; import { errors, logger } from '@powersync/lib-services-framework'; import { ReactiveSocketRouter, RSocketRequestMeta } from '@powersync/service-rsocket-router'; -import { CorePowerSyncSystem } from '../system/CorePowerSyncSystem.js'; +import { ServiceContext } from '../system/ServiceContext.js'; import { generateContext, getTokenFromHeader } from './auth.js'; import { syncStreamReactive } from './endpoints/socket-route.js'; import { RSocketContextMeta, SocketRouteGenerator } from './router-socket.js'; -import { Context } from './router.js'; +import { Context, RouterServiceContext } from './router.js'; export type RSockerRouterConfig = { - system: CorePowerSyncSystem; + service_context: ServiceContext; server: http.Server; - routeGenerators?: SocketRouteGenerator[]; + route_generators?: SocketRouteGenerator[]; }; export const DEFAULT_SOCKET_ROUTES = [syncStreamReactive]; export function configureRSocket(router: ReactiveSocketRouter, options: RSockerRouterConfig) { - const { routeGenerators = DEFAULT_SOCKET_ROUTES, server, system } = options; + const { route_generators = DEFAULT_SOCKET_ROUTES, server, service_context } = options; router.applyWebSocketEndpoints(server, { contextProvider: async (data: Buffer) => { @@ -32,16 +32,21 @@ export function configureRSocket(router: ReactiveSocketRouter, options: try { const extracted_token = getTokenFromHeader(token); if (extracted_token != null) { - const { context, errors: token_errors } = await generateContext(system, extracted_token); + const { context, errors: token_errors } = await generateContext(options.service_context, extracted_token); if (context?.token_payload == null) { throw new errors.AuthorizationError(token_errors ?? 'Authentication required'); } + + if (!service_context.routerEngine) { + throw new Error(`RouterEngine has not been registered`); + } + return { token, user_agent, ...context, token_errors: token_errors, - system + service_context: service_context as RouterServiceContext }; } else { throw new errors.AuthorizationError('No token provided'); @@ -51,7 +56,7 @@ export function configureRSocket(router: ReactiveSocketRouter, options: throw ex; } }, - endpoints: routeGenerators.map((generator) => generator(router)), + endpoints: route_generators.map((generator) => generator(router)), metaDecoder: async (meta: Buffer) => { return RSocketRequestMeta.decode(deserialize(meta) as any); }, diff --git a/packages/service-core/src/routes/endpoints/admin.ts b/packages/service-core/src/routes/endpoints/admin.ts index 65c056cc3..c08b1068d 100644 --- a/packages/service-core/src/routes/endpoints/admin.ts +++ b/packages/service-core/src/routes/endpoints/admin.ts @@ -1,11 +1,9 @@ import { errors, router, schema } from '@powersync/lib-services-framework'; -import { SqlSyncRules, SqliteValue, StaticSchema, isJsonValue, toSyncRulesValue } from '@powersync/service-sync-rules'; +import { SqlSyncRules, StaticSchema } from '@powersync/service-sync-rules'; import { internal_routes } from '@powersync/service-types'; import * as api from '../../api/api-index.js'; -import * as util from '../../util/util-index.js'; - -import { PersistedSyncRulesContent } from '../../storage/BucketStorage.js'; +import * as storage from '../../storage/storage-index.js'; import { authApi } from '../auth.js'; import { routeDefinition } from '../router.js'; @@ -15,47 +13,27 @@ export const executeSql = routeDefinition({ authorize: authApi, validator: schema.createTsCodecValidator(internal_routes.ExecuteSqlRequest, { allowAdditional: true }), handler: async (payload) => { - const connection = payload.context.system.config.connection; - if (connection == null || !connection.debug_api) { - return internal_routes.ExecuteSqlResponse.encode({ - results: { - columns: [], - rows: [] - }, - success: false, - error: 'SQL querying is not enabled' - }); - } - - const pool = payload.context.system.requirePgPool(); - - const { query, args } = payload.params.sql; + const { + params: { + sql: { query, args } + } + } = payload; - try { - const result = await pool.query({ - statement: query, - params: args.map(util.autoParameter) - }); + const apiHandler = payload.context.service_context.routerEngine!.getAPI(); - return internal_routes.ExecuteSqlResponse.encode({ - success: true, - results: { - columns: result.columns.map((c) => c.name), - rows: result.rows.map((row) => { - return row.map((value) => mapColumnValue(toSyncRulesValue(value))); - }) - } - }); - } catch (e) { + const sourceConfig = await apiHandler.getSourceConfig(); + if (!sourceConfig.debug_api) { return internal_routes.ExecuteSqlResponse.encode({ results: { columns: [], rows: [] }, success: false, - error: e.message + error: 'SQL querying is not enabled' }); } + + return internal_routes.ExecuteSqlResponse.encode(await apiHandler.executeQuery(query, args)); } }); @@ -65,34 +43,45 @@ export const diagnostics = routeDefinition({ authorize: authApi, validator: schema.createTsCodecValidator(internal_routes.DiagnosticsRequest, { allowAdditional: true }), handler: async (payload) => { + const { context } = payload; + const { service_context } = context; const include_content = payload.params.sync_rules_content ?? false; - const system = payload.context.system; - const status = await api.getConnectionStatus(system); - if (status == null) { + const apiHandler = service_context.routerEngine!.getAPI(); + + const status = await apiHandler.getConnectionStatus(); + if (!status) { return internal_routes.DiagnosticsResponse.encode({ connections: [] }); } - const { storage } = system; - const active = await storage.getActiveSyncRulesContent(); - const next = await storage.getNextSyncRulesContent(); + const { + storageEngine: { activeBucketStorage } + } = service_context; + const active = await activeBucketStorage.getActiveSyncRulesContent(); + const next = await activeBucketStorage.getNextSyncRulesContent(); - const active_status = await api.getSyncRulesStatus(active, system, { + const active_status = await api.getSyncRulesStatus(activeBucketStorage, apiHandler, active, { include_content, check_connection: status.connected, live_status: true }); - const next_status = await api.getSyncRulesStatus(next, system, { + const next_status = await api.getSyncRulesStatus(activeBucketStorage, apiHandler, next, { include_content, check_connection: status.connected, live_status: true }); return internal_routes.DiagnosticsResponse.encode({ - connections: [status], + connections: [ + { + ...status, + // TODO update this in future + postgres_uri: status.uri + } + ], active_sync_rules: active_status, deploying_sync_rules: next_status }); @@ -105,9 +94,9 @@ export const getSchema = routeDefinition({ authorize: authApi, validator: schema.createTsCodecValidator(internal_routes.GetSchemaRequest, { allowAdditional: true }), handler: async (payload) => { - const system = payload.context.system; + const apiHandler = payload.context.service_context.routerEngine!.getAPI(); - return internal_routes.GetSchemaResponse.encode(await api.getConnectionsSchema(system)); + return internal_routes.GetSchemaResponse.encode(await api.getConnectionsSchema(apiHandler)); } }); @@ -117,15 +106,19 @@ export const reprocess = routeDefinition({ authorize: authApi, validator: schema.createTsCodecValidator(internal_routes.ReprocessRequest, { allowAdditional: true }), handler: async (payload) => { - const system = payload.context.system; - - const storage = system.storage; - const next = await storage.getNextSyncRules(); + const { + context: { service_context } + } = payload; + const { + storageEngine: { activeBucketStorage } + } = service_context; + const apiHandler = service_context.routerEngine!.getAPI(); + const next = await activeBucketStorage.getNextSyncRules(apiHandler.getParseSyncRulesOptions()); if (next != null) { throw new Error(`Busy processing sync rules - cannot reprocess`); } - const active = await storage.getActiveSyncRules(); + const active = await activeBucketStorage.getActiveSyncRules(apiHandler.getParseSyncRulesOptions()); if (active == null) { throw new errors.JourneyError({ status: 422, @@ -134,15 +127,18 @@ export const reprocess = routeDefinition({ }); } - const new_rules = await storage.updateSyncRules({ + const new_rules = await activeBucketStorage.updateSyncRules({ content: active.sync_rules.content }); + const baseConfig = await apiHandler.getSourceConfig(); + return internal_routes.ReprocessResponse.encode({ connections: [ { - tag: system.config.connection!.tag, - id: system.config.connection!.id, + // Previously the connection was asserted with `!` + tag: baseConfig.tag, + id: baseConfig.id, slot_name: new_rules.slot_name } ] @@ -156,14 +152,16 @@ export const validate = routeDefinition({ authorize: authApi, validator: schema.createTsCodecValidator(internal_routes.ValidateRequest, { allowAdditional: true }), handler: async (payload) => { - const system = payload.context.system; - + const { + context: { service_context } + } = payload; const content = payload.params.sync_rules; + const apiHandler = service_context.routerEngine!.getAPI(); - const schemaData = await api.getConnectionsSchema(system); + const schemaData = await api.getConnectionsSchema(apiHandler); const schema = new StaticSchema(schemaData.connections); - const sync_rules: PersistedSyncRulesContent = { + const sync_rules: storage.PersistedSyncRulesContent = { // Dummy values id: 0, slot_name: '', @@ -171,7 +169,10 @@ export const validate = routeDefinition({ parsed() { return { ...this, - sync_rules: SqlSyncRules.fromYaml(content, { throwOnError: false, schema }) + sync_rules: SqlSyncRules.fromYaml(content, { + ...apiHandler.getParseSyncRulesOptions(), + schema + }) }; }, sync_rules_content: content, @@ -180,19 +181,24 @@ export const validate = routeDefinition({ } }; - const connectionStatus = await api.getConnectionStatus(system); - if (connectionStatus == null) { + const connectionStatus = await apiHandler.getConnectionStatus(); + if (!connectionStatus) { return internal_routes.ValidateResponse.encode({ errors: [{ level: 'fatal', message: 'No connection configured' }], connections: [] }); } - const status = (await api.getSyncRulesStatus(sync_rules, system, { - include_content: false, - check_connection: connectionStatus?.connected, - live_status: false - }))!; + const status = (await api.getSyncRulesStatus( + service_context.storageEngine.activeBucketStorage, + apiHandler, + sync_rules, + { + include_content: false, + check_connection: connectionStatus.connected, + live_status: false + } + ))!; if (connectionStatus == null) { status.errors.push({ level: 'fatal', message: 'No connection configured' }); @@ -202,14 +208,4 @@ export const validate = routeDefinition({ } }); -function mapColumnValue(value: SqliteValue) { - if (typeof value == 'bigint') { - return Number(value); - } else if (isJsonValue(value)) { - return value; - } else { - return null; - } -} - export const ADMIN_ROUTES = [executeSql, diagnostics, getSchema, reprocess, validate]; diff --git a/packages/service-core/src/routes/endpoints/checkpointing.ts b/packages/service-core/src/routes/endpoints/checkpointing.ts index cb45c5825..3ce913ad2 100644 --- a/packages/service-core/src/routes/endpoints/checkpointing.ts +++ b/packages/service-core/src/routes/endpoints/checkpointing.ts @@ -1,6 +1,7 @@ +import { logger, router, schema } from '@powersync/lib-services-framework'; import * as t from 'ts-codec'; -import { router, schema } from '@powersync/lib-services-framework'; +import * as framework from '@powersync/lib-services-framework'; import * as util from '../../util/util-index.js'; import { authUser } from '../auth.js'; import { routeDefinition } from '../router.js'; @@ -15,13 +16,33 @@ export const writeCheckpoint = routeDefinition({ authorize: authUser, validator: schema.createTsCodecValidator(WriteCheckpointRequest, { allowAdditional: true }), handler: async (payload) => { - const system = payload.context.system; - const storage = system.storage; + const { + context: { service_context } + } = payload; + const apiHandler = service_context.routerEngine!.getAPI(); - const checkpoint = await util.getClientCheckpoint(system.requirePgPool(), storage); - return { - checkpoint - }; + // This old API needs a persisted checkpoint id. + // Since we don't use LSNs anymore, the only way to get that is to wait. + const start = Date.now(); + + const head = await apiHandler.getReplicationHead(); + + const timeout = 50_000; + + logger.info(`Waiting for LSN checkpoint: ${head}`); + while (Date.now() - start < timeout) { + const cp = await service_context.storageEngine.activeBucketStorage.getActiveCheckpoint(); + if (!cp.hasSyncRules()) { + throw new Error('No sync rules available'); + } + if (cp.lsn && cp.lsn >= head) { + logger.info(`Got write checkpoint: ${head} : ${cp.checkpoint}`); + return { checkpoint: cp.checkpoint }; + } + + await new Promise((resolve) => setTimeout(resolve, 30)); + } + throw new Error('Timeout while waiting for checkpoint'); } }); @@ -31,13 +52,32 @@ export const writeCheckpoint2 = routeDefinition({ authorize: authUser, validator: schema.createTsCodecValidator(WriteCheckpointRequest, { allowAdditional: true }), handler: async (payload) => { - const { user_id, system } = payload.context; + const { user_id, service_context } = payload.context; + + const apiHandler = service_context.routerEngine!.getAPI(); + const client_id = payload.params.client_id; const full_user_id = util.checkpointUserId(user_id, client_id); - const storage = system.storage; - const write_checkpoint = await util.createWriteCheckpoint(system.requirePgPool(), storage, full_user_id); + + const currentCheckpoint = await apiHandler.getReplicationHead(); + const { + storageEngine: { activeBucketStorage } + } = service_context; + + const activeSyncRules = await activeBucketStorage.getActiveSyncRulesContent(); + if (!activeSyncRules) { + throw new framework.errors.ValidationError(`Cannot create Write Checkpoint since no sync rules are active.`); + } + + using syncBucketStorage = activeBucketStorage.getInstance(activeSyncRules); + const writeCheckpoint = await syncBucketStorage.createManagedWriteCheckpoint({ + user_id: full_user_id, + heads: { '1': currentCheckpoint } + }); + logger.info(`Write checkpoint 2: ${JSON.stringify({ currentCheckpoint, id: String(full_user_id) })}`); + return { - write_checkpoint: String(write_checkpoint) + write_checkpoint: String(writeCheckpoint) }; } }); diff --git a/packages/service-core/src/routes/endpoints/socket-route.ts b/packages/service-core/src/routes/endpoints/socket-route.ts index 78ec5b360..cdcdb8cf4 100644 --- a/packages/service-core/src/routes/endpoints/socket-route.ts +++ b/packages/service-core/src/routes/endpoints/socket-route.ts @@ -12,7 +12,8 @@ export const syncStreamReactive: SocketRouteGenerator = (router) => router.reactiveStream(SyncRoutes.STREAM, { validator: schema.createTsCodecValidator(util.StreamingSyncRequest, { allowAdditional: true }), handler: async ({ context, params, responder, observer, initialN, signal: upstreamSignal }) => { - const { system } = context; + const { service_context } = context; + const { routerEngine } = service_context; // Create our own controller that we can abort directly const controller = new AbortController(); @@ -31,7 +32,7 @@ export const syncStreamReactive: SocketRouteGenerator = (router) => } }); - if (system.closed) { + if (routerEngine!.closed) { responder.onError( new errors.JourneyError({ status: 503, @@ -45,9 +46,11 @@ export const syncStreamReactive: SocketRouteGenerator = (router) => const syncParams = new RequestParameters(context.token_payload!, params.parameters ?? {}); - const storage = system.storage; + const { + storageEngine: { activeBucketStorage } + } = service_context; // Sanity check before we start the stream - const cp = await storage.getActiveCheckpoint(); + const cp = await activeBucketStorage.getActiveCheckpoint(); if (!cp.hasSyncRules()) { responder.onError( new errors.JourneyError({ @@ -60,7 +63,7 @@ export const syncStreamReactive: SocketRouteGenerator = (router) => return; } - const removeStopHandler = system.addStopHandler(() => { + const removeStopHandler = routerEngine!.addStopHandler(() => { controller.abort(); }); @@ -68,7 +71,8 @@ export const syncStreamReactive: SocketRouteGenerator = (router) => const tracker = new sync.RequestTracker(); try { for await (const data of sync.streamResponse({ - storage, + storage: activeBucketStorage, + parseOptions: routerEngine!.getAPI().getParseSyncRulesOptions(), params: { ...params, binary_data: true // always true for web sockets diff --git a/packages/service-core/src/routes/endpoints/sync-rules.ts b/packages/service-core/src/routes/endpoints/sync-rules.ts index a5eedfb56..c902171cf 100644 --- a/packages/service-core/src/routes/endpoints/sync-rules.ts +++ b/packages/service-core/src/routes/endpoints/sync-rules.ts @@ -1,10 +1,9 @@ -import * as t from 'ts-codec'; -import type { FastifyPluginAsync } from 'fastify'; -import * as pgwire from '@powersync/service-jpgwire'; import { errors, router, schema } from '@powersync/lib-services-framework'; import { SqlSyncRules, SyncRulesErrors } from '@powersync/service-sync-rules'; +import type { FastifyPluginAsync } from 'fastify'; +import * as t from 'ts-codec'; -import * as replication from '../../replication/replication-index.js'; +import { RouteAPI } from '../../api/RouteAPI.js'; import { authApi } from '../auth.js'; import { routeDefinition } from '../router.js'; @@ -39,7 +38,10 @@ export const deploySyncRules = routeDefinition({ plugins: [yamlPlugin], validator: schema.createTsCodecValidator(DeploySyncRulesRequest, { allowAdditional: true }), handler: async (payload) => { - if (payload.context.system.config.sync_rules.present) { + const { service_context } = payload.context; + const { storageEngine } = service_context; + + if (service_context.configuration.sync_rules.present) { // If sync rules are configured via the config, disable deploy via the API. throw new errors.JourneyError({ status: 422, @@ -51,7 +53,12 @@ export const deploySyncRules = routeDefinition({ const content = payload.params.content; try { - SqlSyncRules.fromYaml(payload.params.content); + const apiHandler = service_context.routerEngine!.getAPI(); + SqlSyncRules.fromYaml(payload.params.content, { + ...apiHandler.getParseSyncRulesOptions(), + // We don't do any schema-level validation at this point + schema: undefined + }); } catch (e) { throw new errors.JourneyError({ status: 422, @@ -61,7 +68,7 @@ export const deploySyncRules = routeDefinition({ }); } - const sync_rules = await payload.context.system.storage.updateSyncRules({ + const sync_rules = await storageEngine.activeBucketStorage.updateSyncRules({ content: content }); @@ -84,8 +91,10 @@ export const validateSyncRules = routeDefinition({ validator: schema.createTsCodecValidator(ValidateSyncRulesRequest, { allowAdditional: true }), handler: async (payload) => { const content = payload.params.content; + const { service_context } = payload.context; + const apiHandler = service_context.routerEngine!.getAPI(); - const info = await debugSyncRules(payload.context.system.requirePgPool(), content); + const info = await debugSyncRules(apiHandler, content); return replyPrettyJson(info); } @@ -96,8 +105,12 @@ export const currentSyncRules = routeDefinition({ method: router.HTTPMethod.GET, authorize: authApi, handler: async (payload) => { - const storage = payload.context.system.storage; - const sync_rules = await storage.getActiveSyncRulesContent(); + const { service_context } = payload.context; + const { + storageEngine: { activeBucketStorage } + } = service_context; + + const sync_rules = await activeBucketStorage.getActiveSyncRulesContent(); if (!sync_rules) { throw new errors.JourneyError({ status: 422, @@ -105,12 +118,12 @@ export const currentSyncRules = routeDefinition({ description: 'No active sync rules' }); } - const info = await debugSyncRules(payload.context.system.requirePgPool(), sync_rules.sync_rules_content); - const next = await storage.getNextSyncRulesContent(); - const next_info = next - ? await debugSyncRules(payload.context.system.requirePgPool(), next.sync_rules_content) - : null; + const apiHandler = service_context.routerEngine!.getAPI(); + const info = await debugSyncRules(apiHandler, sync_rules.sync_rules_content); + const next = await activeBucketStorage.getNextSyncRulesContent(); + + const next_info = next ? await debugSyncRules(apiHandler, next.sync_rules_content) : null; const response = { current: { @@ -140,8 +153,11 @@ export const reprocessSyncRules = routeDefinition({ authorize: authApi, validator: schema.createTsCodecValidator(ReprocessSyncRulesRequest), handler: async (payload) => { - const storage = payload.context.system.storage; - const sync_rules = await storage.getActiveSyncRules(); + const { + storageEngine: { activeBucketStorage } + } = payload.context.service_context; + const apiHandler = payload.context.service_context.routerEngine!.getAPI(); + const sync_rules = await activeBucketStorage.getActiveSyncRules(apiHandler.getParseSyncRulesOptions()); if (sync_rules == null) { throw new errors.JourneyError({ status: 422, @@ -150,7 +166,7 @@ export const reprocessSyncRules = routeDefinition({ }); } - const new_rules = await storage.updateSyncRules({ + const new_rules = await activeBucketStorage.updateSyncRules({ content: sync_rules.sync_rules.content }); return { @@ -169,15 +185,15 @@ function replyPrettyJson(payload: any) { }); } -async function debugSyncRules(db: pgwire.PgClient, sync_rules: string) { +async function debugSyncRules(apiHandler: RouteAPI, sync_rules: string) { try { - const rules = SqlSyncRules.fromYaml(sync_rules); - const source_table_patterns = rules.getSourceTables(); - const wc = new replication.WalConnection({ - db: db, - sync_rules: rules + const rules = SqlSyncRules.fromYaml(sync_rules, { + ...apiHandler.getParseSyncRulesOptions(), + // No schema-based validation at this point + schema: undefined }); - const resolved_tables = await wc.getDebugTablesInfo(source_table_patterns); + const source_table_patterns = rules.getSourceTables(); + const resolved_tables = await apiHandler.getDebugTablesInfo(source_table_patterns, rules); return { valid: true, diff --git a/packages/service-core/src/routes/endpoints/sync-stream.ts b/packages/service-core/src/routes/endpoints/sync-stream.ts index 4e614be88..83d8f3994 100644 --- a/packages/service-core/src/routes/endpoints/sync-stream.ts +++ b/packages/service-core/src/routes/endpoints/sync-stream.ts @@ -8,7 +8,6 @@ import * as util from '../../util/util-index.js'; import { Metrics } from '../../metrics/Metrics.js'; import { authUser } from '../auth.js'; import { routeDefinition } from '../router.js'; -import { RequestTracker } from '../../sync/RequestTracker.js'; export enum SyncRoutes { STREAM = '/sync/stream' @@ -20,12 +19,13 @@ export const syncStreamed = routeDefinition({ authorize: authUser, validator: schema.createTsCodecValidator(util.StreamingSyncRequest, { allowAdditional: true }), handler: async (payload) => { - const system = payload.context.system; + const { service_context } = payload.context; + const { routerEngine, storageEngine } = service_context; const headers = payload.request.headers; const userAgent = headers['x-user-agent'] ?? headers['user-agent']; const clientId = payload.params.client_id; - if (system.closed) { + if (routerEngine!.closed) { throw new errors.JourneyError({ status: 503, code: 'SERVICE_UNAVAILABLE', @@ -36,9 +36,8 @@ export const syncStreamed = routeDefinition({ const params: util.StreamingSyncRequest = payload.params; const syncParams = new RequestParameters(payload.context.token_payload!, payload.params.parameters ?? {}); - const storage = system.storage; // Sanity check before we start the stream - const cp = await storage.getActiveCheckpoint(); + const cp = await storageEngine.activeBucketStorage.getActiveCheckpoint(); if (!cp.hasSyncRules()) { throw new errors.JourneyError({ status: 500, @@ -47,14 +46,15 @@ export const syncStreamed = routeDefinition({ }); } const controller = new AbortController(); - const tracker = new RequestTracker(); + const tracker = new sync.RequestTracker(); try { Metrics.getInstance().concurrent_connections.add(1); const stream = Readable.from( sync.transformToBytesTracked( sync.ndjson( sync.streamResponse({ - storage, + storage: storageEngine.activeBucketStorage, + parseOptions: routerEngine!.getAPI().getParseSyncRulesOptions(), params, syncParams, token: payload.context.token_payload!, @@ -67,7 +67,7 @@ export const syncStreamed = routeDefinition({ { objectMode: false, highWaterMark: 16 * 1024 } ); - const deregister = system.addStopHandler(() => { + const deregister = routerEngine!.addStopHandler(() => { // This error is not currently propagated to the client controller.abort(); stream.destroy(new Error('Shutting down system')); diff --git a/packages/service-core/src/routes/router.ts b/packages/service-core/src/routes/router.ts index 9cfdb3351..98bfd330f 100644 --- a/packages/service-core/src/routes/router.ts +++ b/packages/service-core/src/routes/router.ts @@ -1,13 +1,19 @@ import { router } from '@powersync/lib-services-framework'; import type { JwtPayload } from '../auth/auth-index.js'; -import type { CorePowerSyncSystem } from '../system/CorePowerSyncSystem.js'; +import { ServiceContext } from '../system/ServiceContext.js'; +import { RouterEngine } from './RouterEngine.js'; +/** + * The {@link RouterEngine} must be provided for these routes + */ +export type RouterServiceContext = ServiceContext & { routerEngine: RouterEngine }; /** * Common context for routes */ export type Context = { user_id?: string; - system: CorePowerSyncSystem; + + service_context: RouterServiceContext; token_payload?: JwtPayload; token_errors?: string[]; @@ -41,7 +47,6 @@ export type RequestEndpointHandlerPayload< }; export type RouteDefinition = RequestEndpoint; - /** * Helper function for making generics work well when defining routes */ diff --git a/packages/service-core/src/routes/routes-index.ts b/packages/service-core/src/routes/routes-index.ts index b1c14c2a5..fb395086b 100644 --- a/packages/service-core/src/routes/routes-index.ts +++ b/packages/service-core/src/routes/routes-index.ts @@ -6,3 +6,4 @@ export * as hooks from './hooks.js'; export * from './route-register.js'; export * from './router-socket.js'; export * from './router.js'; +export * from './RouterEngine.js'; diff --git a/packages/service-core/src/runner/teardown.ts b/packages/service-core/src/runner/teardown.ts index 8060a4047..b6b7bb5ac 100644 --- a/packages/service-core/src/runner/teardown.ts +++ b/packages/service-core/src/runner/teardown.ts @@ -1,108 +1,70 @@ // Script to tear down the data when deleting an instance. -// This deletes: -// 1. The replication slots on the source postgres instance (if available). -// 2. The mongo database. +// This should: +// 1. Attempt to clean up any remote configuration of data sources that was set up. +// 2. Delete the storage -import * as timers from 'timers/promises'; - -import * as db from '../db/db-index.js'; +import { container, logger } from '@powersync/lib-services-framework'; +import timers from 'timers/promises'; +import * as modules from '../modules/modules-index.js'; import * as storage from '../storage/storage-index.js'; +import * as system from '../system/system-index.js'; import * as utils from '../util/util-index.js'; -import * as replication from '../replication/replication-index.js'; -import { logger } from '@powersync/lib-services-framework'; -/** - * Attempt to terminate a single sync rules instance. - * - * This may fail with a lock error. - */ -async function terminateReplicator( - storageFactory: storage.BucketStorageFactory, - connection: utils.ResolvedConnection, - syncRules: storage.PersistedSyncRulesContent -) { - // The lock may still be active if the current replication instance - // hasn't stopped yet. - const lock = await syncRules.lock(); +export async function teardown(runnerConfig: utils.RunnerConfig) { try { - const parsed = syncRules.parsed(); - const storage = storageFactory.getInstance(parsed); - const stream = new replication.WalStreamRunner({ - factory: storageFactory, - storage: storage, - source_db: connection, - lock - }); + logger.info(`Tearing down PowerSync instance...`); + const config = await utils.loadConfig(runnerConfig); + const serviceContext = new system.ServiceContextContainer(config); + const moduleManager = container.getImplementation(modules.ModuleManager); + await moduleManager.initialize(serviceContext); + // This is mostly done to ensure that the storage is ready + await serviceContext.lifeCycleEngine.start(); - logger.info(`Terminating replication slot ${stream.slot_name}`); - await stream.terminate(); - logger.info(`Terminated replication slot ${stream.slot_name}`); - } finally { - await lock.release(); + await terminateSyncRules(serviceContext.storageEngine.activeBucketStorage, moduleManager); + await serviceContext.storageEngine.activeStorage.tearDown(); + logger.info(`Teardown complete.`); + process.exit(0); + } catch (e) { + logger.error(`Teardown failure`, e); + process.exit(1); } } -/** - * Terminate all replicating sync rules, deleting the replication slots. - * - * Retries lock and other errors for up to two minutes. - * - * This is a best-effot attempt. In some cases it may not be possible to delete the replication - * slot, such as when the postgres instance is unreachable. - */ -async function terminateReplicators( - storageFactory: storage.BucketStorageFactory, - connection: utils.ResolvedConnection -) { +async function terminateSyncRules(storageFactory: storage.BucketStorageFactory, moduleManager: modules.ModuleManager) { + logger.info(`Terminating sync rules...`); const start = Date.now(); - while (Date.now() - start < 12_000) { + const locks: storage.ReplicationLock[] = []; + while (Date.now() - start < 120_000) { let retry = false; - const replicationRules = await storageFactory.getReplicatingSyncRules(); - for (let syncRules of replicationRules) { - try { - await terminateReplicator(storageFactory, connection, syncRules); - } catch (e) { - retry = true; - console.error(e); - logger.warn(`Failed to terminate ${syncRules.slot_name}`, e); + const replicatingSyncRules = await storageFactory.getReplicatingSyncRules(); + // Lock all the replicating sync rules + for (const replicatingSyncRule of replicatingSyncRules) { + const lock = await replicatingSyncRule.lock(); + locks.push(lock); + } + + const stoppedSyncRules = await storageFactory.getStoppedSyncRules(); + const combinedSyncRules = [...replicatingSyncRules, ...stoppedSyncRules]; + try { + // Clean up any module specific configuration for the sync rules + await moduleManager.tearDown({ syncRules: combinedSyncRules }); + + // Mark the sync rules as terminated + for (let syncRules of combinedSyncRules) { + using syncRulesStorage = storageFactory.getInstance(syncRules); + // The storage will be dropped at the end of the teardown, so we don't need to clear it here + await syncRulesStorage.terminate({ clearStorage: false }); + } + } catch (e) { + retry = true; + for (const lock of locks) { + await lock.release(); } } + if (!retry) { break; } await timers.setTimeout(5_000); } } - -export async function teardown(runnerConfig: utils.RunnerConfig) { - const config = await utils.loadConfig(runnerConfig); - const mongoDB = storage.createPowerSyncMongo(config.storage); - try { - logger.info(`Waiting for auth`); - await db.mongo.waitForAuth(mongoDB.db); - - const bucketStorage = new storage.MongoBucketStorage(mongoDB, { slot_name_prefix: config.slot_name_prefix }); - const connection = config.connection; - - logger.info(`Terminating replication slots`); - - if (connection) { - await terminateReplicators(bucketStorage, connection); - } - - const database = mongoDB.db; - logger.info(`Dropping database ${database.namespace}`); - await database.dropDatabase(); - logger.info(`Done`); - await mongoDB.client.close(); - - // If there was an error connecting to postgress, the process may stay open indefinitely. - // This forces an exit. - // We do not consider those errors a teardown failure. - process.exit(0); - } catch (e) { - logger.error(`Teardown failure`, e); - await mongoDB.client.close(); - process.exit(1); - } -} diff --git a/packages/service-core/src/storage/BucketStorage.ts b/packages/service-core/src/storage/BucketStorage.ts index b3f567bd1..735367700 100644 --- a/packages/service-core/src/storage/BucketStorage.ts +++ b/packages/service-core/src/storage/BucketStorage.ts @@ -1,3 +1,4 @@ +import { DisposableListener, DisposableObserverClient } from '@powersync/lib-services-framework'; import { EvaluatedParameters, EvaluatedRow, @@ -7,12 +8,19 @@ import { SqliteRow, ToastableSqliteRow } from '@powersync/service-sync-rules'; - -import * as replication from '../replication/replication-index.js'; import * as util from '../util/util-index.js'; +import { ReplicationEventPayload } from './ReplicationEventPayload.js'; +import { SourceEntityDescriptor } from './SourceEntity.js'; import { SourceTable } from './SourceTable.js'; +import { BatchedCustomWriteCheckpointOptions, ReplicaId } from './storage-index.js'; +import { SyncStorageWriteCheckpointAPI } from './WriteCheckpointAPI.js'; + +export interface BucketStorageFactoryListener extends DisposableListener { + syncStorageCreated: (storage: SyncRulesBucketStorage) => void; + replicationEvent: (event: ReplicationEventPayload) => void; +} -export interface BucketStorageFactory { +export interface BucketStorageFactory extends DisposableObserverClient { /** * Update sync rules from configuration, if changed. */ @@ -24,7 +32,7 @@ export interface BucketStorageFactory { /** * Get a storage instance to query sync data for specific sync rules. */ - getInstance(options: PersistedSyncRules): SyncRulesBucketStorage; + getInstance(options: PersistedSyncRulesContent): SyncRulesBucketStorage; /** * Deploy new sync rules. @@ -48,7 +56,7 @@ export interface BucketStorageFactory { /** * Get the sync rules used for querying. */ - getActiveSyncRules(): Promise; + getActiveSyncRules(options: ParseSyncRulesOptions): Promise; /** * Get the sync rules used for querying. @@ -58,7 +66,7 @@ export interface BucketStorageFactory { /** * Get the sync rules that will be active next once done with initial replicatino. */ - getNextSyncRules(): Promise; + getNextSyncRules(options: ParseSyncRulesOptions): Promise; /** * Get the sync rules that will be active next once done with initial replicatino. @@ -81,10 +89,9 @@ export interface BucketStorageFactory { */ getActiveCheckpoint(): Promise; - createWriteCheckpoint(user_id: string, lsns: Record): Promise; - - lastWriteCheckpoint(user_id: string, lsn: string): Promise; - + /** + * Yields the latest user write checkpoint whenever the sync checkpoint updates. + */ watchWriteCheckpoint(user_id: string, signal: AbortSignal): AsyncIterable; /** @@ -98,20 +105,22 @@ export interface BucketStorageFactory { getPowerSyncInstanceId(): Promise; } -export interface WriteCheckpoint { - base: ActiveCheckpoint; - writeCheckpoint: bigint | null; -} - -export interface ActiveCheckpoint { +export interface ReplicationCheckpoint { readonly checkpoint: util.OpId; - readonly lsn: string; + readonly lsn: string | null; +} +export interface ActiveCheckpoint extends ReplicationCheckpoint { hasSyncRules(): boolean; getBucketStorage(): Promise; } +export interface WriteCheckpoint { + base: ActiveCheckpoint; + writeCheckpoint: bigint | null; +} + export interface StorageMetrics { /** * Size of operations (bucket_data) @@ -131,6 +140,10 @@ export interface StorageMetrics { replication_size_bytes: number; } +export interface ParseSyncRulesOptions { + defaultSchema: string; +} + export interface PersistedSyncRulesContent { readonly id: number; readonly sync_rules_content: string; @@ -140,7 +153,7 @@ export interface PersistedSyncRulesContent { readonly last_keepalive_ts?: Date | null; readonly last_checkpoint_ts?: Date | null; - parsed(): PersistedSyncRules; + parsed(options: ParseSyncRulesOptions): PersistedSyncRules; lock(): Promise; } @@ -157,18 +170,6 @@ export interface PersistedSyncRules { readonly slot_name: string; } -export class DefaultPersistedSyncRules implements PersistedSyncRules { - public readonly checkpoint_lsn: string | null; - - constructor(public readonly id: number, public readonly sync_rules: SqlSyncRules, checkpoint_lsn: string | null) { - this.checkpoint_lsn = checkpoint_lsn; - } - - get slot_name(): string { - return `powersync_${this.id}`; - } -} - export interface UpdateSyncRulesOptions { content: string; lock?: boolean; @@ -198,8 +199,27 @@ export interface BucketDataBatchOptions { chunkLimitBytes?: number; } -export interface SyncRulesBucketStorage { - readonly sync_rules: SqlSyncRules; +export interface StartBatchOptions extends ParseSyncRulesOptions { + zeroLSN: string; + /** + * Whether or not to store a copy of the current data. + * + * This is needed if we need to apply partial updates, for example + * when we get TOAST values from Postgres. + * + * This is not needed when we get the full document from the source + * database, for example from MongoDB. + */ + storeCurrentData: boolean; +} + +export interface SyncRulesBucketStorageListener extends DisposableListener { + batchStarted: (batch: BucketStorageBatch) => void; +} + +export interface SyncRulesBucketStorage + extends DisposableObserverClient, + SyncStorageWriteCheckpointAPI { readonly group_id: number; readonly slot_name: string; @@ -207,9 +227,14 @@ export interface SyncRulesBucketStorage { resolveTable(options: ResolveTableOptions): Promise; - startBatch(options: {}, callback: (batch: BucketStorageBatch) => Promise): Promise; + startBatch( + options: StartBatchOptions, + callback: (batch: BucketStorageBatch) => Promise + ): Promise; + + getCheckpoint(): Promise; - getCheckpoint(): Promise<{ checkpoint: util.OpId; lsn: string }>; + getParsedSyncRules(options: ParseSyncRulesOptions): SqlSyncRules; getParameterSets(checkpoint: util.OpId, lookups: SqliteJsonValue[][]): Promise; @@ -244,7 +269,7 @@ export interface SyncRulesBucketStorage { * * Must only be called on stopped sync rules. */ - terminate(): Promise; + terminate(options?: TerminateOptions): Promise; getStatus(): Promise; @@ -277,7 +302,7 @@ export interface ResolveTableOptions { group_id: number; connection_id: number; connection_tag: string; - relation: replication.PgRelation; + entity_descriptor: SourceEntityDescriptor; sync_rules: SqlSyncRules; } @@ -291,7 +316,11 @@ export interface FlushedResult { flushed_op: string; } -export interface BucketStorageBatch { +export interface BucketBatchStorageListener extends DisposableListener { + replicationEvent: (payload: ReplicationEventPayload) => void; +} + +export interface BucketStorageBatch extends DisposableObserverClient { /** * Save an op, and potentially flush. * @@ -337,7 +366,17 @@ export interface BucketStorageBatch { */ keepalive(lsn: string): Promise; + /** + * Get the last checkpoint LSN, from either commit or keepalive. + */ + lastCheckpointLsn: string | null; + markSnapshotDone(tables: SourceTable[], no_checkpoint_before_lsn: string): Promise; + + /** + * Queues the creation of a custom Write Checkpoint. This will be persisted after operations are flushed. + */ + addCustomWriteCheckpoint(checkpoint: BatchedCustomWriteCheckpointOptions): void; } export interface SaveParameterData { @@ -355,23 +394,34 @@ export interface SaveBucketData { evaluated: EvaluatedRow[]; } +export type SaveOp = 'insert' | 'update' | 'delete'; + export type SaveOptions = SaveInsert | SaveUpdate | SaveDelete; +export enum SaveOperationTag { + INSERT = 'insert', + UPDATE = 'update', + DELETE = 'delete' +} + export interface SaveInsert { - tag: 'insert'; + tag: SaveOperationTag.INSERT; sourceTable: SourceTable; before?: undefined; + beforeReplicaId?: undefined; after: SqliteRow; + afterReplicaId: ReplicaId; } export interface SaveUpdate { - tag: 'update'; + tag: SaveOperationTag.UPDATE; sourceTable: SourceTable; /** * This is only present when the id has changed, and will only contain replica identity columns. */ before?: SqliteRow; + beforeReplicaId?: ReplicaId; /** * A null value means null column. @@ -379,13 +429,16 @@ export interface SaveUpdate { * An undefined value means it's a TOAST value - must be copied from another record. */ after: ToastableSqliteRow; + afterReplicaId: ReplicaId; } export interface SaveDelete { - tag: 'delete'; + tag: SaveOperationTag.DELETE; sourceTable: SourceTable; - before: SqliteRow; + before?: SqliteRow; + beforeReplicaId: ReplicaId; after?: undefined; + afterReplicaId?: undefined; } export interface SyncBucketDataBatch { @@ -429,6 +482,15 @@ export interface CompactOptions { * If specified, compact only the specific buckets. * * If not specified, compacts all buckets. + * + * These can be individual bucket names, or bucket definition names. */ compactBuckets?: string[]; } + +export interface TerminateOptions { + /** + * If true, also clear the storage before terminating. + */ + clearStorage: boolean; +} diff --git a/packages/service-core/src/storage/MongoBucketStorage.ts b/packages/service-core/src/storage/MongoBucketStorage.ts index 919fb24e9..bfebaba31 100644 --- a/packages/service-core/src/storage/MongoBucketStorage.ts +++ b/packages/service-core/src/storage/MongoBucketStorage.ts @@ -1,36 +1,39 @@ -import * as mongo from 'mongodb'; -import * as timers from 'timers/promises'; -import { LRUCache } from 'lru-cache/min'; import { SqlSyncRules } from '@powersync/service-sync-rules'; import { wrapWithAbort } from 'ix/asynciterable/operators/withabort.js'; +import { LRUCache } from 'lru-cache/min'; +import * as mongo from 'mongodb'; +import * as timers from 'timers/promises'; -import * as replication from '../replication/replication-index.js'; +import * as locks from '../locks/locks-index.js'; import * as sync from '../sync/sync-index.js'; import * as util from '../util/util-index.js'; -import * as locks from '../locks/locks-index.js'; +import { DisposableObserver, logger } from '@powersync/lib-services-framework'; +import { v4 as uuid } from 'uuid'; import { ActiveCheckpoint, BucketStorageFactory, + BucketStorageFactoryListener, + ParseSyncRulesOptions, PersistedSyncRules, PersistedSyncRulesContent, StorageMetrics, UpdateSyncRulesOptions, WriteCheckpoint } from './BucketStorage.js'; +import { PowerSyncMongo } from './mongo/db.js'; +import { SyncRuleDocument, SyncRuleState } from './mongo/models.js'; import { MongoPersistedSyncRulesContent } from './mongo/MongoPersistedSyncRulesContent.js'; import { MongoSyncBucketStorage } from './mongo/MongoSyncBucketStorage.js'; -import { PowerSyncMongo, PowerSyncMongoOptions } from './mongo/db.js'; -import { SyncRuleDocument, SyncRuleState } from './mongo/models.js'; import { generateSlotName } from './mongo/util.js'; -import { v4 as uuid } from 'uuid'; -import { logger } from '@powersync/lib-services-framework'; - -export interface MongoBucketStorageOptions extends PowerSyncMongoOptions {} -export class MongoBucketStorage implements BucketStorageFactory { +export class MongoBucketStorage + extends DisposableObserver + implements BucketStorageFactory +{ private readonly client: mongo.MongoClient; private readonly session: mongo.ClientSession; + // TODO: This is still Postgres specific and needs to be reworked public readonly slot_name_prefix: string; private readonly storageCache = new LRUCache({ @@ -47,26 +50,44 @@ export class MongoBucketStorage implements BucketStorageFactory { return undefined; } const rules = new MongoPersistedSyncRulesContent(this.db, doc2); - const storage = this.getInstance(rules.parsed()); - return storage; + return this.getInstance(rules); + }, + dispose: (storage) => { + storage[Symbol.dispose](); } }); public readonly db: PowerSyncMongo; - constructor(db: PowerSyncMongo, options: { slot_name_prefix: string }) { + constructor( + db: PowerSyncMongo, + options: { + slot_name_prefix: string; + } + ) { + super(); this.client = db.client; this.db = db; this.session = this.client.startSession(); this.slot_name_prefix = options.slot_name_prefix; } - getInstance(options: PersistedSyncRules): MongoSyncBucketStorage { - let { id, sync_rules, slot_name } = options; + getInstance(options: PersistedSyncRulesContent): MongoSyncBucketStorage { + let { id, slot_name } = options; if ((typeof id as any) == 'bigint') { id = Number(id); } - return new MongoSyncBucketStorage(this, id, sync_rules, slot_name); + const storage = new MongoSyncBucketStorage(this, id, options, slot_name); + this.iterateListeners((cb) => cb.syncStorageCreated?.(storage)); + storage.registerListener({ + batchStarted: (batch) => { + // This nested listener will be automatically disposed when the storage is disposed + batch.registerManagedListener(storage, { + replicationEvent: (payload) => this.iterateListeners((cb) => cb.replicationEvent?.(payload)) + }); + } + }); + return storage; } async configureSyncRules(sync_rules: string, options?: { lock?: boolean }) { @@ -136,7 +157,12 @@ export class MongoBucketStorage implements BucketStorageFactory { async updateSyncRules(options: UpdateSyncRulesOptions): Promise { // Parse and validate before applying any changes - const parsed = SqlSyncRules.fromYaml(options.content); + const parsed = SqlSyncRules.fromYaml(options.content, { + // No schema-based validation at this point + schema: undefined, + defaultSchema: 'not_applicable', // Not needed for validation + throwOnError: true + }); let rules: MongoPersistedSyncRulesContent | undefined = undefined; @@ -204,9 +230,9 @@ export class MongoBucketStorage implements BucketStorageFactory { return new MongoPersistedSyncRulesContent(this.db, doc); } - async getActiveSyncRules(): Promise { + async getActiveSyncRules(options: ParseSyncRulesOptions): Promise { const content = await this.getActiveSyncRulesContent(); - return content?.parsed() ?? null; + return content?.parsed(options) ?? null; } async getNextSyncRulesContent(): Promise { @@ -223,9 +249,9 @@ export class MongoBucketStorage implements BucketStorageFactory { return new MongoPersistedSyncRulesContent(this.db, doc); } - async getNextSyncRules(): Promise { + async getNextSyncRules(options: ParseSyncRulesOptions): Promise { const content = await this.getNextSyncRulesContent(); - return content?.parsed() ?? null; + return content?.parsed(options) ?? null; } async getReplicatingSyncRules(): Promise { @@ -252,32 +278,6 @@ export class MongoBucketStorage implements BucketStorageFactory { }); } - async createWriteCheckpoint(user_id: string, lsns: Record): Promise { - const doc = await this.db.write_checkpoints.findOneAndUpdate( - { - user_id: user_id - }, - { - $set: { - lsns: lsns - }, - $inc: { - client_id: 1n - } - }, - { upsert: true, returnDocument: 'after' } - ); - return doc!.client_id; - } - - async lastWriteCheckpoint(user_id: string, lsn: string): Promise { - const lastWriteCheckpoint = await this.db.write_checkpoints.findOne({ - user_id: user_id, - 'lsns.1': { $lte: lsn } - }); - return lastWriteCheckpoint?.client_id ?? null; - } - async getActiveCheckpoint(): Promise { const doc = await this.db.sync_rules.findOne( { @@ -303,7 +303,7 @@ export class MongoBucketStorage implements BucketStorageFactory { } }; - const active_sync_rules = await this.getActiveSyncRules(); + const active_sync_rules = await this.getActiveSyncRules({ defaultSchema: 'public' }); if (active_sync_rules == null) { return { operations_size_bytes: 0, @@ -379,7 +379,7 @@ export class MongoBucketStorage implements BucketStorageFactory { private makeActiveCheckpoint(doc: SyncRuleDocument | null) { return { checkpoint: util.timestampToOpId(doc?.last_checkpoint ?? 0n), - lsn: doc?.last_checkpoint_lsn ?? replication.ZERO_LSN, + lsn: doc?.last_checkpoint_lsn ?? null, hasSyncRules() { return doc != null; }, @@ -389,7 +389,7 @@ export class MongoBucketStorage implements BucketStorageFactory { } return (await this.storageCache.fetch(doc._id)) ?? null; } - }; + } satisfies ActiveCheckpoint; } /** @@ -479,6 +479,7 @@ export class MongoBucketStorage implements BucketStorageFactory { if (doc == null) { continue; } + const op = this.makeActiveCheckpoint(doc); // Check for LSN / checkpoint changes - ignore other metadata changes if (lastOp == null || op.lsn != lastOp.lsn || op.checkpoint != lastOp.checkpoint) { @@ -508,8 +509,19 @@ export class MongoBucketStorage implements BucketStorageFactory { // What is important is: // 1. checkpoint (op_id) changes. // 2. write checkpoint changes for the specific user + const bucketStorage = await cp.getBucketStorage(); + if (!bucketStorage) { + continue; + } + + const lsnFilters: Record = lsn ? { 1: lsn } : {}; - const currentWriteCheckpoint = await this.lastWriteCheckpoint(user_id, lsn ?? ''); + const currentWriteCheckpoint = await bucketStorage.lastWriteCheckpoint({ + user_id, + heads: { + ...lsnFilters + } + }); if (currentWriteCheckpoint == lastWriteCheckpoint && checkpoint == lastCheckpoint) { // No change - wait for next one diff --git a/packages/service-core/src/storage/ReplicationEventPayload.ts b/packages/service-core/src/storage/ReplicationEventPayload.ts new file mode 100644 index 000000000..c2fe0aa84 --- /dev/null +++ b/packages/service-core/src/storage/ReplicationEventPayload.ts @@ -0,0 +1,16 @@ +import * as sync_rules from '@powersync/service-sync-rules'; +import { BucketStorageBatch, SaveOp } from './BucketStorage.js'; +import { SourceTable } from './SourceTable.js'; + +export type EventData = { + op: SaveOp; + before?: sync_rules.SqliteRow; + after?: sync_rules.SqliteRow; +}; + +export type ReplicationEventPayload = { + batch: BucketStorageBatch; + data: EventData; + event: sync_rules.SqlEventDescriptor; + table: SourceTable; +}; diff --git a/packages/service-core/src/storage/SourceEntity.ts b/packages/service-core/src/storage/SourceEntity.ts new file mode 100644 index 000000000..2b0031831 --- /dev/null +++ b/packages/service-core/src/storage/SourceEntity.ts @@ -0,0 +1,22 @@ +export interface ColumnDescriptor { + name: string; + /** + * The type of the column ie VARCHAR, INT, etc + */ + type?: string; + /** + * Some data sources have a type id that can be used to identify the type of the column + */ + typeId?: number; +} + +// TODO: This needs to be consolidated with SourceTable into something new. +export interface SourceEntityDescriptor { + /** + * The internal id of the data source structure in the database + */ + objectId: number | string; + schema: string; + name: string; + replicationColumns: ColumnDescriptor[]; +} diff --git a/packages/service-core/src/storage/SourceTable.ts b/packages/service-core/src/storage/SourceTable.ts index 960ee4881..f514f9081 100644 --- a/packages/service-core/src/storage/SourceTable.ts +++ b/packages/service-core/src/storage/SourceTable.ts @@ -1,10 +1,8 @@ -import { DEFAULT_SCHEMA, DEFAULT_TAG } from '@powersync/service-sync-rules'; - -import * as replication from '../replication/replication-index.js'; +import { DEFAULT_TAG } from '@powersync/service-sync-rules'; import * as util from '../util/util-index.js'; +import { ColumnDescriptor } from './SourceEntity.js'; export class SourceTable { - static readonly DEFAULT_SCHEMA = DEFAULT_SCHEMA; static readonly DEFAULT_TAG = DEFAULT_TAG; /** @@ -25,14 +23,23 @@ export class SourceTable { */ public syncParameters = true; + /** + * True if the table is used in sync rules for events. + * + * This value is resolved externally, and cached here. + * + * Defaults to true for tests. + */ + public syncEvent = true; + constructor( public readonly id: any, public readonly connectionTag: string, - public readonly relationId: number, + public readonly objectId: number | string, public readonly schema: string, public readonly table: string, - public readonly replicaIdColumns: replication.ReplicationColumn[], + public readonly replicaIdColumns: ColumnDescriptor[], public readonly snapshotComplete: boolean ) {} @@ -55,6 +62,6 @@ export class SourceTable { } get syncAny() { - return this.syncData || this.syncParameters; + return this.syncData || this.syncParameters || this.syncEvent; } } diff --git a/packages/service-core/src/storage/StorageEngine.ts b/packages/service-core/src/storage/StorageEngine.ts new file mode 100644 index 000000000..a6639211a --- /dev/null +++ b/packages/service-core/src/storage/StorageEngine.ts @@ -0,0 +1,62 @@ +import { DisposableListener, DisposableObserver, logger } from '@powersync/lib-services-framework'; +import { ResolvedPowerSyncConfig } from '../util/util-index.js'; +import { BucketStorageFactory } from './BucketStorage.js'; +import { ActiveStorage, BucketStorageProvider } from './StorageProvider.js'; + +export type StorageEngineOptions = { + configuration: ResolvedPowerSyncConfig; +}; + +export interface StorageEngineListener extends DisposableListener { + storageActivated: (storage: BucketStorageFactory) => void; +} + +export class StorageEngine extends DisposableObserver { + // TODO: This will need to revisited when we actually support multiple storage providers. + private storageProviders: Map = new Map(); + private currentActiveStorage: ActiveStorage | null = null; + + constructor(private options: StorageEngineOptions) { + super(); + } + + get activeBucketStorage(): BucketStorageFactory { + return this.activeStorage.storage; + } + + get activeStorage(): ActiveStorage { + if (!this.currentActiveStorage) { + throw new Error(`No storage provider has been initialized yet.`); + } + + return this.currentActiveStorage; + } + + /** + * Register a provider which generates a {@link BucketStorageFactory} + * given the matching config specified in the loaded {@link ResolvedPowerSyncConfig} + */ + registerProvider(provider: BucketStorageProvider) { + this.storageProviders.set(provider.type, provider); + } + + public async start(): Promise { + logger.info('Starting Storage Engine...'); + const { configuration } = this.options; + this.currentActiveStorage = await this.storageProviders.get(configuration.storage.type)!.getStorage({ + resolvedConfig: configuration + }); + this.iterateListeners((cb) => cb.storageActivated?.(this.activeBucketStorage)); + logger.info(`Successfully activated storage: ${configuration.storage.type}.`); + logger.info('Successfully started Storage Engine.'); + } + + /** + * Shutdown the storage engine, safely shutting down any activated storage providers. + */ + public async shutDown(): Promise { + logger.info('Shutting down Storage Engine...'); + await this.currentActiveStorage?.shutDown(); + logger.info('Successfully shut down Storage Engine.'); + } +} diff --git a/packages/service-core/src/storage/StorageProvider.ts b/packages/service-core/src/storage/StorageProvider.ts new file mode 100644 index 000000000..385a042fd --- /dev/null +++ b/packages/service-core/src/storage/StorageProvider.ts @@ -0,0 +1,27 @@ +import * as util from '../util/util-index.js'; +import { BucketStorageFactory } from './BucketStorage.js'; + +export interface ActiveStorage { + storage: BucketStorageFactory; + shutDown(): Promise; + + /** + * Tear down / drop the storage permanently + */ + tearDown(): Promise; +} + +export interface GetStorageOptions { + // TODO: This should just be the storage config. Update once the slot name prefix coupling has been removed from the storage + resolvedConfig: util.ResolvedPowerSyncConfig; +} + +export interface BucketStorageProvider { + /** + * The storage type that this provider provides. + * The type should match the `type` field in the config. + */ + type: string; + + getStorage(options: GetStorageOptions): Promise; +} diff --git a/packages/service-core/src/storage/WriteCheckpointAPI.ts b/packages/service-core/src/storage/WriteCheckpointAPI.ts new file mode 100644 index 000000000..d38ac3979 --- /dev/null +++ b/packages/service-core/src/storage/WriteCheckpointAPI.ts @@ -0,0 +1,85 @@ +export enum WriteCheckpointMode { + /** + * Raw mappings of `user_id` to `write_checkpoint`s should + * be supplied for each set of sync rules. + */ + CUSTOM = 'custom', + /** + * Write checkpoints are stored as a mapping of `user_id` plus + * replication HEAD (lsn in Postgres) to an automatically generated + * incrementing `write_checkpoint` (stored as`client_id`). + */ + MANAGED = 'managed' +} + +export interface BaseWriteCheckpointIdentifier { + /** + * Identifier for User's account. + */ + user_id: string; +} + +export interface CustomWriteCheckpointFilters extends BaseWriteCheckpointIdentifier { + /** + * Sync rules which were active when this checkpoint was created. + */ + sync_rules_id: number; +} + +export interface BatchedCustomWriteCheckpointOptions extends BaseWriteCheckpointIdentifier { + /** + * A supplied incrementing Write Checkpoint number + */ + checkpoint: bigint; +} + +export interface CustomWriteCheckpointOptions extends BatchedCustomWriteCheckpointOptions { + /** + * Sync rules which were active when this checkpoint was created. + */ + sync_rules_id: number; +} + +/** + * Managed Write Checkpoints are a mapping of User ID to replication HEAD + */ +export interface ManagedWriteCheckpointFilters extends BaseWriteCheckpointIdentifier { + /** + * Replication HEAD(s) at the creation of the checkpoint. + */ + heads: Record; +} + +export type ManagedWriteCheckpointOptions = ManagedWriteCheckpointFilters; + +export type SyncStorageLastWriteCheckpointFilters = BaseWriteCheckpointIdentifier | ManagedWriteCheckpointFilters; +export type LastWriteCheckpointFilters = CustomWriteCheckpointFilters | ManagedWriteCheckpointFilters; + +export interface BaseWriteCheckpointAPI { + readonly writeCheckpointMode: WriteCheckpointMode; + setWriteCheckpointMode(mode: WriteCheckpointMode): void; + createManagedWriteCheckpoint(checkpoint: ManagedWriteCheckpointOptions): Promise; +} + +/** + * Write Checkpoint API to be used in conjunction with a {@link SyncRulesBucketStorage}. + * This storage corresponds with a set of sync rules. These APIs don't require specifying a + * sync rules id. + */ +export interface SyncStorageWriteCheckpointAPI extends BaseWriteCheckpointAPI { + batchCreateCustomWriteCheckpoints(checkpoints: BatchedCustomWriteCheckpointOptions[]): Promise; + createCustomWriteCheckpoint(checkpoint: BatchedCustomWriteCheckpointOptions): Promise; + lastWriteCheckpoint(filters: SyncStorageLastWriteCheckpointFilters): Promise; +} + +/** + * Write Checkpoint API which is interfaced directly with the storage layer. This requires + * sync rules identifiers for custom write checkpoints. + */ +export interface WriteCheckpointAPI extends BaseWriteCheckpointAPI { + batchCreateCustomWriteCheckpoints(checkpoints: CustomWriteCheckpointOptions[]): Promise; + createCustomWriteCheckpoint(checkpoint: CustomWriteCheckpointOptions): Promise; + lastWriteCheckpoint(filters: LastWriteCheckpointFilters): Promise; +} + +export const DEFAULT_WRITE_CHECKPOINT_MODE = WriteCheckpointMode.MANAGED; diff --git a/packages/service-core/src/storage/mongo/MongoBucketBatch.ts b/packages/service-core/src/storage/mongo/MongoBucketBatch.ts index a1b6181f2..54bb81443 100644 --- a/packages/service-core/src/storage/mongo/MongoBucketBatch.ts +++ b/packages/service-core/src/storage/mongo/MongoBucketBatch.ts @@ -1,18 +1,25 @@ -import { SqliteRow, SqlSyncRules } from '@powersync/service-sync-rules'; +import { SqlEventDescriptor, SqliteRow, SqlSyncRules } from '@powersync/service-sync-rules'; import * as bson from 'bson'; import * as mongo from 'mongodb'; +import { container, DisposableObserver, errors, logger } from '@powersync/lib-services-framework'; import * as util from '../../util/util-index.js'; -import * as replication from '../../replication/replication-index.js'; -import { container, errors, logger } from '@powersync/lib-services-framework'; -import { BucketStorageBatch, FlushedResult, mergeToast, SaveOptions } from '../BucketStorage.js'; +import { + BucketBatchStorageListener, + BucketStorageBatch, + FlushedResult, + mergeToast, + SaveOptions +} from '../BucketStorage.js'; import { SourceTable } from '../SourceTable.js'; +import { BatchedCustomWriteCheckpointOptions, CustomWriteCheckpointOptions } from '../WriteCheckpointAPI.js'; import { PowerSyncMongo } from './db.js'; -import { CurrentBucket, CurrentDataDocument, SourceKey } from './models.js'; +import { CurrentBucket, CurrentDataDocument, SourceKey, SyncRuleDocument } from './models.js'; import { MongoIdSequence } from './MongoIdSequence.js'; +import { batchCreateCustomWriteCheckpoints } from './MongoWriteCheckpointAPI.js'; import { cacheKey, OperationBatch, RecordOperation } from './OperationBatch.js'; import { PersistedBatch } from './PersistedBatch.js'; -import { BSON_DESERIALIZE_OPTIONS, idPrefixFilter, serializeLookup } from './util.js'; +import { BSON_DESERIALIZE_OPTIONS, idPrefixFilter, replicaIdEquals, serializeLookup } from './util.js'; /** * 15MB @@ -25,7 +32,18 @@ const MAX_ROW_SIZE = 15 * 1024 * 1024; // // In the future, we can investigate allowing multiple replication streams operating independently. const replicationMutex = new util.Mutex(); -export class MongoBucketBatch implements BucketStorageBatch { + +export interface MongoBucketBatchOptions { + db: PowerSyncMongo; + syncRules: SqlSyncRules; + groupId: number; + slotName: string; + lastCheckpointLsn: string | null; + noCheckpointBeforeLsn: string; + storeCurrentData: boolean; +} + +export class MongoBucketBatch extends DisposableObserver implements BucketStorageBatch { private readonly client: mongo.MongoClient; public readonly db: PowerSyncMongo; public readonly session: mongo.ClientSession; @@ -34,8 +52,10 @@ export class MongoBucketBatch implements BucketStorageBatch { private readonly group_id: number; private readonly slot_name: string; + private readonly storeCurrentData: boolean; private batch: OperationBatch | null = null; + private write_checkpoint_batch: CustomWriteCheckpointOptions[] = []; /** * Last LSN received associated with a checkpoint. @@ -55,22 +75,29 @@ export class MongoBucketBatch implements BucketStorageBatch { */ public last_flushed_op: bigint | null = null; - constructor( - db: PowerSyncMongo, - sync_rules: SqlSyncRules, - group_id: number, - slot_name: string, - last_checkpoint_lsn: string | null, - no_checkpoint_before_lsn: string | null - ) { - this.db = db; - this.client = db.client; - this.sync_rules = sync_rules; - this.group_id = group_id; - this.slot_name = slot_name; + constructor(options: MongoBucketBatchOptions) { + super(); + this.client = options.db.client; + this.db = options.db; + this.group_id = options.groupId; + this.last_checkpoint_lsn = options.lastCheckpointLsn; + this.no_checkpoint_before_lsn = options.noCheckpointBeforeLsn; this.session = this.client.startSession(); - this.last_checkpoint_lsn = last_checkpoint_lsn; - this.no_checkpoint_before_lsn = no_checkpoint_before_lsn ?? replication.ZERO_LSN; + this.slot_name = options.slotName; + this.sync_rules = options.syncRules; + this.storeCurrentData = options.storeCurrentData; + this.batch = new OperationBatch(); + } + + addCustomWriteCheckpoint(checkpoint: BatchedCustomWriteCheckpointOptions): void { + this.write_checkpoint_batch.push({ + ...checkpoint, + sync_rules_id: this.group_id + }); + } + + get lastCheckpointLsn() { + return this.last_checkpoint_lsn; } async flush(): Promise { @@ -83,6 +110,8 @@ export class MongoBucketBatch implements BucketStorageBatch { result = r; } } + await batchCreateCustomWriteCheckpoints(this.db, this.write_checkpoint_batch); + this.write_checkpoint_batch = []; return result; } @@ -118,38 +147,44 @@ export class MongoBucketBatch implements BucketStorageBatch { batch: OperationBatch, op_seq: MongoIdSequence ): Promise { - // 1. Find sizes of current_data documents, to assist in intelligent batching without - // exceeding memory limits. - // - // A previous attempt tried to do batching by the results of the current_data query - // (automatically limited to 48MB(?) per batch by MongoDB). The issue is that it changes - // the order of processing, which then becomes really tricky to manage. - // This now takes 2+ queries, but doesn't have any issues with order of operations. - const sizeLookups: SourceKey[] = batch.batch.map((r) => { - return { g: this.group_id, t: r.record.sourceTable.id, k: r.beforeId }; - }); + let sizes: Map | undefined = undefined; + if (this.storeCurrentData) { + // We skip this step if we don't store current_data, since the sizes will + // always be small in that case. + + // Find sizes of current_data documents, to assist in intelligent batching without + // exceeding memory limits. + // + // A previous attempt tried to do batching by the results of the current_data query + // (automatically limited to 48MB(?) per batch by MongoDB). The issue is that it changes + // the order of processing, which then becomes really tricky to manage. + // This now takes 2+ queries, but doesn't have any issues with order of operations. + const sizeLookups: SourceKey[] = batch.batch.map((r) => { + return { g: this.group_id, t: r.record.sourceTable.id, k: r.beforeId }; + }); - const sizes = new Map(); + sizes = new Map(); - const sizeCursor: mongo.AggregationCursor<{ _id: SourceKey; size: number }> = this.db.current_data.aggregate( - [ - { - $match: { - _id: { $in: sizeLookups } - } - }, - { - $project: { - _id: 1, - size: { $bsonSize: '$$ROOT' } + const sizeCursor: mongo.AggregationCursor<{ _id: SourceKey; size: number }> = this.db.current_data.aggregate( + [ + { + $match: { + _id: { $in: sizeLookups } + } + }, + { + $project: { + _id: 1, + size: { $bsonSize: '$$ROOT' } + } } - } - ], - { session } - ); - for await (let doc of sizeCursor.stream()) { - const key = cacheKey(doc._id.t, doc._id.k); - sizes.set(key, doc.size); + ], + { session } + ); + for await (let doc of sizeCursor.stream()) { + const key = cacheKey(doc._id.t, doc._id.k); + sizes.set(key, doc.size); + } } // If set, we need to start a new transaction with this batch. @@ -157,6 +192,7 @@ export class MongoBucketBatch implements BucketStorageBatch { let transactionSize = 0; // Now batch according to the sizes + // This is a single batch if storeCurrentData == false for await (let b of batch.batched(sizes)) { if (resumeBatch) { for (let op of b) { @@ -194,7 +230,7 @@ export class MongoBucketBatch implements BucketStorageBatch { if (nextData != null) { // Update our current_data and size cache current_data_lookup.set(op.internalAfterKey!, nextData); - sizes.set(op.internalAfterKey!, nextData.data.length()); + sizes?.set(op.internalAfterKey!, nextData.data.length()); } if (persistedBatch!.shouldFlushTransaction()) { @@ -244,14 +280,18 @@ export class MongoBucketBatch implements BucketStorageBatch { existing_buckets = []; existing_lookups = []; // Log to help with debugging if there was a consistency issue - logger.warn( - `Cannot find previous record for update on ${record.sourceTable.qualifiedName}: ${beforeId} / ${record.before?.id}` - ); + if (this.storeCurrentData) { + logger.warn( + `Cannot find previous record for update on ${record.sourceTable.qualifiedName}: ${beforeId} / ${record.before?.id}` + ); + } } else { - const data = bson.deserialize((result.data as mongo.Binary).buffer, BSON_DESERIALIZE_OPTIONS) as SqliteRow; existing_buckets = result.buckets; existing_lookups = result.lookups; - after = mergeToast(after!, data); + if (this.storeCurrentData) { + const data = bson.deserialize((result.data as mongo.Binary).buffer, BSON_DESERIALIZE_OPTIONS) as SqliteRow; + after = mergeToast(after!, data); + } } } else if (record.tag == 'delete') { const result = current_data; @@ -260,9 +300,11 @@ export class MongoBucketBatch implements BucketStorageBatch { existing_buckets = []; existing_lookups = []; // Log to help with debugging if there was a consistency issue - logger.warn( - `Cannot find previous record for delete on ${record.sourceTable.qualifiedName}: ${beforeId} / ${record.before?.id}` - ); + if (this.storeCurrentData) { + logger.warn( + `Cannot find previous record for delete on ${record.sourceTable.qualifiedName}: ${beforeId} / ${record.before?.id}` + ); + } } else { existing_buckets = result.buckets; existing_lookups = result.lookups; @@ -270,7 +312,9 @@ export class MongoBucketBatch implements BucketStorageBatch { } let afterData: bson.Binary | undefined; - if (afterId) { + if (afterId != null && !this.storeCurrentData) { + afterData = new bson.Binary(bson.serialize({})); + } else if (afterId != null) { try { // This will fail immediately if the record is > 16MB. afterData = new bson.Binary(bson.serialize(after!)); @@ -301,7 +345,7 @@ export class MongoBucketBatch implements BucketStorageBatch { } // 2. Save bucket data - if (beforeId != null && (afterId == null || !beforeId.equals(afterId))) { + if (beforeId != null && (afterId == null || !replicaIdEquals(beforeId, afterId))) { // Source ID updated if (sourceTable.syncData) { // Delete old record @@ -431,7 +475,7 @@ export class MongoBucketBatch implements BucketStorageBatch { }; } - if (afterId == null || !beforeId.equals(afterId)) { + if (afterId == null || !replicaIdEquals(beforeId, afterId)) { // Either a delete (afterId == null), or replaced the old replication id batch.deleteCurrentData(before_key); } @@ -528,14 +572,15 @@ export class MongoBucketBatch implements BucketStorageBatch { }); } - async abort() { + async [Symbol.asyncDispose]() { await this.session.endSession(); + super[Symbol.dispose](); } async commit(lsn: string): Promise { await this.flush(); - if (this.last_checkpoint_lsn != null && lsn <= this.last_checkpoint_lsn) { + if (this.last_checkpoint_lsn != null && lsn < this.last_checkpoint_lsn) { // When re-applying transactions, don't create a new checkpoint until // we are past the last transaction. logger.info(`Re-applied transaction ${lsn} - skipping checkpoint`); @@ -546,26 +591,29 @@ export class MongoBucketBatch implements BucketStorageBatch { return false; } + const now = new Date(); + const update: Partial = { + last_checkpoint_lsn: lsn, + last_checkpoint_ts: now, + last_keepalive_ts: now, + snapshot_done: true, + last_fatal_error: null + }; + if (this.persisted_op != null) { - const now = new Date(); - await this.db.sync_rules.updateOne( - { - _id: this.group_id - }, - { - $set: { - last_checkpoint: this.persisted_op, - last_checkpoint_lsn: lsn, - last_checkpoint_ts: now, - last_keepalive_ts: now, - snapshot_done: true, - last_fatal_error: null - } - }, - { session: this.session } - ); - this.persisted_op = null; + update.last_checkpoint = this.persisted_op; } + + await this.db.sync_rules.updateOne( + { + _id: this.group_id + }, + { + $set: update + }, + { session: this.session } + ); + this.persisted_op = null; this.last_checkpoint_lsn = lsn; return true; } @@ -606,6 +654,29 @@ export class MongoBucketBatch implements BucketStorageBatch { } async save(record: SaveOptions): Promise { + const { after, before, sourceTable, tag } = record; + for (const event of this.getTableEvents(sourceTable)) { + this.iterateListeners((cb) => + cb.replicationEvent?.({ + batch: this, + table: sourceTable, + data: { + op: tag, + after: after && util.isCompleteRow(after) ? after : undefined, + before: before && util.isCompleteRow(before) ? before : undefined + }, + event + }) + ); + } + + /** + * Return if the table is just an event table + */ + if (!sourceTable.syncData && !sourceTable.syncParameters) { + return null; + } + logger.debug(`Saving ${record.tag}:${record.before?.id}/${record.after?.id}`); this.batch ??= new OperationBatch(); @@ -743,7 +814,7 @@ export class MongoBucketBatch implements BucketStorageBatch { const copy = new SourceTable( table.id, table.connectionTag, - table.relationId, + table.objectId, table.schema, table.table, table.replicaIdColumns, @@ -754,6 +825,15 @@ export class MongoBucketBatch implements BucketStorageBatch { return copy; }); } + + /** + * Gets relevant {@link SqlEventDescriptor}s for the given {@link SourceTable} + */ + protected getTableEvents(table: SourceTable): SqlEventDescriptor[] { + return this.sync_rules.event_descriptors.filter((evt) => + [...evt.getSourceTables()].some((sourceTable) => sourceTable.matches(table)) + ); + } } export function currentBucketKey(b: CurrentBucket) { diff --git a/packages/service-core/src/storage/mongo/MongoCompactor.ts b/packages/service-core/src/storage/mongo/MongoCompactor.ts index 3c52936ba..102e1daf0 100644 --- a/packages/service-core/src/storage/mongo/MongoCompactor.ts +++ b/packages/service-core/src/storage/mongo/MongoCompactor.ts @@ -4,6 +4,7 @@ import { addChecksums } from '../../util/utils.js'; import { PowerSyncMongo } from './db.js'; import { BucketDataDocument, BucketDataKey } from './models.js'; import { CompactOptions } from '../BucketStorage.js'; +import { cacheKey } from './OperationBatch.js'; interface CurrentBucketState { /** Bucket name */ @@ -57,7 +58,11 @@ export class MongoCompactor { private maxOpId: bigint | undefined; private buckets: string[] | undefined; - constructor(private db: PowerSyncMongo, private group_id: number, options?: MongoCompactOptions) { + constructor( + private db: PowerSyncMongo, + private group_id: number, + options?: MongoCompactOptions + ) { this.idLimitBytes = (options?.memoryLimitMB ?? DEFAULT_MEMORY_LIMIT_MB) * 1024 * 1024; this.moveBatchLimit = options?.moveBatchLimit ?? DEFAULT_MOVE_BATCH_LIMIT; this.moveBatchQueryLimit = options?.moveBatchQueryLimit ?? DEFAULT_MOVE_BATCH_QUERY_LIMIT; @@ -89,17 +94,33 @@ export class MongoCompactor { let currentState: CurrentBucketState | null = null; + let bucketLower: string | MinKey; + let bucketUpper: string | MaxKey; + + if (bucket == null) { + bucketLower = new MinKey(); + bucketUpper = new MaxKey(); + } else if (bucket.includes('[')) { + // Exact bucket name + bucketLower = bucket; + bucketUpper = bucket; + } else { + // Bucket definition name + bucketLower = `${bucket}[`; + bucketUpper = `${bucket}[\uFFFF`; + } + // Constant lower bound const lowerBound: BucketDataKey = { g: this.group_id, - b: bucket ?? (new MinKey() as any), + b: bucketLower as string, o: new MinKey() as any }; // Upper bound is adjusted for each batch let upperBound: BucketDataKey = { g: this.group_id, - b: bucket ?? (new MaxKey() as any), + b: bucketUpper as string, o: new MaxKey() as any }; @@ -168,7 +189,7 @@ export class MongoCompactor { let isPersistentPut = doc.op == 'PUT'; if (doc.op == 'REMOVE' || doc.op == 'PUT') { - const key = `${doc.table}/${doc.row_id}/${doc.source_table}/${doc.source_key?.toHexString()}`; + const key = `${doc.table}/${doc.row_id}/${cacheKey(doc.source_table!, doc.source_key!)}`; const targetOp = currentState.seen.get(key); if (targetOp) { // Will convert to MOVE, so don't count as PUT diff --git a/packages/service-core/src/storage/mongo/MongoPersistedSyncRulesContent.ts b/packages/service-core/src/storage/mongo/MongoPersistedSyncRulesContent.ts index a32cf6fc1..0a68fe29b 100644 --- a/packages/service-core/src/storage/mongo/MongoPersistedSyncRulesContent.ts +++ b/packages/service-core/src/storage/mongo/MongoPersistedSyncRulesContent.ts @@ -1,7 +1,7 @@ import { SqlSyncRules } from '@powersync/service-sync-rules'; import * as mongo from 'mongodb'; -import { PersistedSyncRulesContent } from '../BucketStorage.js'; +import { ParseSyncRulesOptions, PersistedSyncRulesContent } from '../BucketStorage.js'; import { MongoPersistedSyncRules } from './MongoPersistedSyncRules.js'; import { MongoSyncRulesLock } from './MongoSyncRulesLock.js'; import { PowerSyncMongo } from './db.js'; @@ -19,7 +19,10 @@ export class MongoPersistedSyncRulesContent implements PersistedSyncRulesContent public current_lock: MongoSyncRulesLock | null = null; - constructor(private db: PowerSyncMongo, doc: mongo.WithId) { + constructor( + private db: PowerSyncMongo, + doc: mongo.WithId + ) { this.id = doc._id; this.sync_rules_content = doc.content; this.last_checkpoint_lsn = doc.last_checkpoint_lsn; @@ -30,10 +33,10 @@ export class MongoPersistedSyncRulesContent implements PersistedSyncRulesContent this.last_keepalive_ts = doc.last_keepalive_ts; } - parsed() { + parsed(options: ParseSyncRulesOptions) { return new MongoPersistedSyncRules( this.id, - SqlSyncRules.fromYaml(this.sync_rules_content), + SqlSyncRules.fromYaml(this.sync_rules_content, options), this.last_checkpoint_lsn, this.slot_name ); diff --git a/packages/service-core/src/storage/mongo/MongoStorageProvider.ts b/packages/service-core/src/storage/mongo/MongoStorageProvider.ts new file mode 100644 index 000000000..b4e84f206 --- /dev/null +++ b/packages/service-core/src/storage/mongo/MongoStorageProvider.ts @@ -0,0 +1,31 @@ +import { logger } from '@powersync/lib-services-framework'; +import * as db from '../../db/db-index.js'; +import { MongoBucketStorage } from '../MongoBucketStorage.js'; +import { ActiveStorage, BucketStorageProvider, GetStorageOptions } from '../StorageProvider.js'; +import { PowerSyncMongo } from './db.js'; + +export class MongoStorageProvider implements BucketStorageProvider { + get type() { + return 'mongodb'; + } + + async getStorage(options: GetStorageOptions): Promise { + const { resolvedConfig } = options; + + const client = db.mongo.createMongoClient(resolvedConfig.storage); + + const database = new PowerSyncMongo(client, { database: resolvedConfig.storage.database }); + + return { + storage: new MongoBucketStorage(database, { + // TODO currently need the entire resolved config due to this + slot_name_prefix: resolvedConfig.slot_name_prefix + }), + shutDown: () => client.close(), + tearDown: () => { + logger.info(`Tearing down storage: ${database.db.namespace}...`); + return database.db.dropDatabase(); + } + } satisfies ActiveStorage; + } +} diff --git a/packages/service-core/src/storage/mongo/MongoSyncBucketStorage.ts b/packages/service-core/src/storage/mongo/MongoSyncBucketStorage.ts index 762b05f63..88da4540d 100644 --- a/packages/service-core/src/storage/mongo/MongoSyncBucketStorage.ts +++ b/packages/service-core/src/storage/mongo/MongoSyncBucketStorage.ts @@ -2,8 +2,9 @@ import { SqliteJsonRow, SqliteJsonValue, SqlSyncRules } from '@powersync/service import * as bson from 'bson'; import * as mongo from 'mongodb'; +import { DisposableObserver, logger } from '@powersync/lib-services-framework'; +import * as timers from 'timers/promises'; import * as db from '../../db/db-index.js'; -import * as replication from '../../replication/WalStream.js'; import * as util from '../../util/util-index.js'; import { BucketDataBatchOptions, @@ -12,24 +13,39 @@ import { DEFAULT_DOCUMENT_BATCH_LIMIT, DEFAULT_DOCUMENT_CHUNK_LIMIT_BYTES, FlushedResult, + ParseSyncRulesOptions, + PersistedSyncRulesContent, + ReplicationCheckpoint, ResolveTableOptions, ResolveTableResult, + StartBatchOptions, SyncBucketDataBatch, SyncRulesBucketStorage, - SyncRuleStatus + SyncRulesBucketStorageListener, + SyncRuleStatus, + TerminateOptions } from '../BucketStorage.js'; import { ChecksumCache, FetchPartialBucketChecksum, PartialChecksum, PartialChecksumMap } from '../ChecksumCache.js'; import { MongoBucketStorage } from '../MongoBucketStorage.js'; import { SourceTable } from '../SourceTable.js'; +import { + BatchedCustomWriteCheckpointOptions, + ManagedWriteCheckpointOptions, + SyncStorageLastWriteCheckpointFilters, + WriteCheckpointAPI, + WriteCheckpointMode +} from '../WriteCheckpointAPI.js'; import { PowerSyncMongo } from './db.js'; import { BucketDataDocument, BucketDataKey, SourceKey, SyncRuleState } from './models.js'; import { MongoBucketBatch } from './MongoBucketBatch.js'; import { MongoCompactor } from './MongoCompactor.js'; +import { MongoWriteCheckpointAPI } from './MongoWriteCheckpointAPI.js'; import { BSON_DESERIALIZE_OPTIONS, idPrefixFilter, mapOpEntry, readSingleBatch, serializeLookup } from './util.js'; -import { logger } from '@powersync/lib-services-framework'; -import * as timers from 'timers/promises'; -export class MongoSyncBucketStorage implements SyncRulesBucketStorage { +export class MongoSyncBucketStorage + extends DisposableObserver + implements SyncRulesBucketStorage +{ private readonly db: PowerSyncMongo; private checksumCache = new ChecksumCache({ fetchChecksums: (batch) => { @@ -37,16 +53,70 @@ export class MongoSyncBucketStorage implements SyncRulesBucketStorage { } }); + private parsedSyncRulesCache: { parsed: SqlSyncRules; options: ParseSyncRulesOptions } | undefined; + private writeCheckpointAPI: WriteCheckpointAPI; + constructor( public readonly factory: MongoBucketStorage, public readonly group_id: number, - public readonly sync_rules: SqlSyncRules, - public readonly slot_name: string + private readonly sync_rules: PersistedSyncRulesContent, + public readonly slot_name: string, + writeCheckpointMode: WriteCheckpointMode = WriteCheckpointMode.MANAGED ) { + super(); this.db = factory.db; + this.writeCheckpointAPI = new MongoWriteCheckpointAPI({ + db: this.db, + mode: writeCheckpointMode + }); + } + + get writeCheckpointMode() { + return this.writeCheckpointAPI.writeCheckpointMode; + } + + setWriteCheckpointMode(mode: WriteCheckpointMode): void { + this.writeCheckpointAPI.setWriteCheckpointMode(mode); + } + + batchCreateCustomWriteCheckpoints(checkpoints: BatchedCustomWriteCheckpointOptions[]): Promise { + return this.writeCheckpointAPI.batchCreateCustomWriteCheckpoints( + checkpoints.map((checkpoint) => ({ ...checkpoint, sync_rules_id: this.group_id })) + ); + } + + createCustomWriteCheckpoint(checkpoint: BatchedCustomWriteCheckpointOptions): Promise { + return this.writeCheckpointAPI.createCustomWriteCheckpoint({ + ...checkpoint, + sync_rules_id: this.group_id + }); } - async getCheckpoint() { + createManagedWriteCheckpoint(checkpoint: ManagedWriteCheckpointOptions): Promise { + return this.writeCheckpointAPI.createManagedWriteCheckpoint(checkpoint); + } + + lastWriteCheckpoint(filters: SyncStorageLastWriteCheckpointFilters): Promise { + return this.writeCheckpointAPI.lastWriteCheckpoint({ + ...filters, + sync_rules_id: this.group_id + }); + } + + getParsedSyncRules(options: ParseSyncRulesOptions): SqlSyncRules { + const { parsed, options: cachedOptions } = this.parsedSyncRulesCache ?? {}; + /** + * Check if the cached sync rules, if present, had the same options. + * Parse sync rules if the options are different or if there is no cached value. + */ + if (!parsed || options.defaultSchema != cachedOptions?.defaultSchema) { + this.parsedSyncRulesCache = { parsed: this.sync_rules.parsed(options).sync_rules, options }; + } + + return this.parsedSyncRulesCache!.parsed; + } + + async getCheckpoint(): Promise { const doc = await this.db.sync_rules.findOne( { _id: this.group_id }, { @@ -55,11 +125,14 @@ export class MongoSyncBucketStorage implements SyncRulesBucketStorage { ); return { checkpoint: util.timestampToOpId(doc?.last_checkpoint ?? 0n), - lsn: doc?.last_checkpoint_lsn ?? replication.ZERO_LSN + lsn: doc?.last_checkpoint_lsn ?? null }; } - async startBatch(options: {}, callback: (batch: BucketStorageBatch) => Promise): Promise { + async startBatch( + options: StartBatchOptions, + callback: (batch: BucketStorageBatch) => Promise + ): Promise { const doc = await this.db.sync_rules.findOne( { _id: this.group_id @@ -68,35 +141,36 @@ export class MongoSyncBucketStorage implements SyncRulesBucketStorage { ); const checkpoint_lsn = doc?.last_checkpoint_lsn ?? null; - const batch = new MongoBucketBatch( - this.db, - this.sync_rules, - this.group_id, - this.slot_name, - checkpoint_lsn, - doc?.no_checkpoint_before ?? null - ); - try { - await callback(batch); - await batch.flush(); - await batch.abort(); - if (batch.last_flushed_op) { - return { flushed_op: String(batch.last_flushed_op) }; - } else { - return null; - } - } catch (e) { - await batch.abort(); - throw e; + await using batch = new MongoBucketBatch({ + db: this.db, + syncRules: this.sync_rules.parsed(options).sync_rules, + groupId: this.group_id, + slotName: this.slot_name, + lastCheckpointLsn: checkpoint_lsn, + noCheckpointBeforeLsn: doc?.no_checkpoint_before ?? options.zeroLSN, + storeCurrentData: options.storeCurrentData + }); + this.iterateListeners((cb) => cb.batchStarted?.(batch)); + + await callback(batch); + await batch.flush(); + if (batch.last_flushed_op) { + return { flushed_op: String(batch.last_flushed_op) }; + } else { + return null; } } async resolveTable(options: ResolveTableOptions): Promise { - const { group_id, connection_id, connection_tag, relation } = options; + const { group_id, connection_id, connection_tag, entity_descriptor } = options; - const { schema, name: table, relationId, replicationColumns } = relation; + const { schema, name: table, objectId, replicationColumns } = entity_descriptor; - const columns = replicationColumns.map((column) => ({ name: column.name, type_oid: column.typeOid })); + const columns = replicationColumns.map((column) => ({ + name: column.name, + type: column.type, + type_oid: column.typeId + })); let result: ResolveTableResult | null = null; await this.db.client.withSession(async (session) => { const col = this.db.source_tables; @@ -104,7 +178,7 @@ export class MongoSyncBucketStorage implements SyncRulesBucketStorage { { group_id: group_id, connection_id: connection_id, - relation_id: relationId, + relation_id: objectId, schema_name: schema, table_name: table, replica_id_columns2: columns @@ -116,7 +190,7 @@ export class MongoSyncBucketStorage implements SyncRulesBucketStorage { _id: new bson.ObjectId(), group_id: group_id, connection_id: connection_id, - relation_id: relationId, + relation_id: objectId, schema_name: schema, table_name: table, replica_id_columns: null, @@ -129,12 +203,13 @@ export class MongoSyncBucketStorage implements SyncRulesBucketStorage { const sourceTable = new SourceTable( doc._id, connection_tag, - relationId, + objectId, schema, table, replicationColumns, doc.snapshot_done ?? true ); + sourceTable.syncEvent = options.sync_rules.tableTriggersEvent(sourceTable); sourceTable.syncData = options.sync_rules.tableSyncsData(sourceTable); sourceTable.syncParameters = options.sync_rules.tableSyncsParameters(sourceTable); @@ -144,7 +219,7 @@ export class MongoSyncBucketStorage implements SyncRulesBucketStorage { group_id: group_id, connection_id: connection_id, _id: { $ne: doc._id }, - $or: [{ relation_id: relationId }, { schema_name: schema, table_name: table }] + $or: [{ relation_id: objectId }, { schema_name: schema, table_name: table }] }, { session } ) @@ -159,7 +234,7 @@ export class MongoSyncBucketStorage implements SyncRulesBucketStorage { doc.relation_id ?? 0, doc.schema_name, doc.table_name, - doc.replica_id_columns2?.map((c) => ({ name: c.name, typeOid: c.type_oid })) ?? [], + doc.replica_id_columns2?.map((c) => ({ name: c.name, typeOid: c.type_oid, type: c.type })) ?? [], doc.snapshot_done ?? true ) ) @@ -398,9 +473,11 @@ export class MongoSyncBucketStorage implements SyncRulesBucketStorage { ); } - async terminate() { - await this.clear(); - + async terminate(options?: TerminateOptions) { + // Default is to clear the storage except when explicitly requested not to. + if (!options || options?.clearStorage) { + await this.clear(); + } await this.db.sync_rules.updateOne( { _id: this.group_id diff --git a/packages/service-core/src/storage/mongo/MongoSyncRulesLock.ts b/packages/service-core/src/storage/mongo/MongoSyncRulesLock.ts index 5a9711ab3..76fee7e1e 100644 --- a/packages/service-core/src/storage/mongo/MongoSyncRulesLock.ts +++ b/packages/service-core/src/storage/mongo/MongoSyncRulesLock.ts @@ -9,7 +9,7 @@ import { logger } from '@powersync/lib-services-framework'; * replicates those sync rules at a time. */ export class MongoSyncRulesLock implements ReplicationLock { - private readonly refreshInterval: NodeJS.Timer; + private readonly refreshInterval: NodeJS.Timeout; static async createLock(db: PowerSyncMongo, sync_rules: PersistedSyncRulesContent): Promise { const lockId = crypto.randomBytes(8).toString('hex'); @@ -30,12 +30,16 @@ export class MongoSyncRulesLock implements ReplicationLock { ); if (doc == null) { - throw new Error(`Replication slot ${sync_rules.slot_name} is locked by another process`); + throw new Error(`Sync rules: ${sync_rules.id} have been locked by another process for replication.`); } return new MongoSyncRulesLock(db, sync_rules.id, lockId); } - constructor(private db: PowerSyncMongo, public sync_rules_id: number, private lock_id: string) { + constructor( + private db: PowerSyncMongo, + public sync_rules_id: number, + private lock_id: string + ) { this.refreshInterval = setInterval(async () => { try { await this.refresh(); diff --git a/packages/service-core/src/storage/mongo/MongoWriteCheckpointAPI.ts b/packages/service-core/src/storage/mongo/MongoWriteCheckpointAPI.ts new file mode 100644 index 000000000..966eb77be --- /dev/null +++ b/packages/service-core/src/storage/mongo/MongoWriteCheckpointAPI.ts @@ -0,0 +1,151 @@ +import * as framework from '@powersync/lib-services-framework'; +import { + CustomWriteCheckpointFilters, + CustomWriteCheckpointOptions, + LastWriteCheckpointFilters, + ManagedWriteCheckpointFilters, + ManagedWriteCheckpointOptions, + WriteCheckpointAPI, + WriteCheckpointMode +} from '../WriteCheckpointAPI.js'; +import { PowerSyncMongo } from './db.js'; + +export type MongoCheckpointAPIOptions = { + db: PowerSyncMongo; + mode: WriteCheckpointMode; +}; + +export class MongoWriteCheckpointAPI implements WriteCheckpointAPI { + readonly db: PowerSyncMongo; + private _mode: WriteCheckpointMode; + + constructor(options: MongoCheckpointAPIOptions) { + this.db = options.db; + this._mode = options.mode; + } + + get writeCheckpointMode() { + return this._mode; + } + + setWriteCheckpointMode(mode: WriteCheckpointMode): void { + this._mode = mode; + } + + async batchCreateCustomWriteCheckpoints(checkpoints: CustomWriteCheckpointOptions[]): Promise { + return batchCreateCustomWriteCheckpoints(this.db, checkpoints); + } + + async createCustomWriteCheckpoint(options: CustomWriteCheckpointOptions): Promise { + if (this.writeCheckpointMode !== WriteCheckpointMode.CUSTOM) { + throw new framework.errors.ValidationError( + `Creating a custom Write Checkpoint when the current Write Checkpoint mode is set to "${this.writeCheckpointMode}"` + ); + } + + const { checkpoint, user_id, sync_rules_id } = options; + const doc = await this.db.custom_write_checkpoints.findOneAndUpdate( + { + user_id: user_id, + sync_rules_id + }, + { + $set: { + checkpoint + } + }, + { upsert: true, returnDocument: 'after' } + ); + return doc!.checkpoint; + } + + async createManagedWriteCheckpoint(checkpoint: ManagedWriteCheckpointOptions): Promise { + if (this.writeCheckpointMode !== WriteCheckpointMode.MANAGED) { + throw new framework.errors.ValidationError( + `Attempting to create a managed Write Checkpoint when the current Write Checkpoint mode is set to "${this.writeCheckpointMode}"` + ); + } + + const { user_id, heads: lsns } = checkpoint; + const doc = await this.db.write_checkpoints.findOneAndUpdate( + { + user_id: user_id + }, + { + $set: { + lsns + }, + $inc: { + client_id: 1n + } + }, + { upsert: true, returnDocument: 'after' } + ); + return doc!.client_id; + } + + async lastWriteCheckpoint(filters: LastWriteCheckpointFilters): Promise { + switch (this.writeCheckpointMode) { + case WriteCheckpointMode.CUSTOM: + if (false == 'sync_rules_id' in filters) { + throw new framework.errors.ValidationError(`Sync rules ID is required for custom Write Checkpoint filtering`); + } + return this.lastCustomWriteCheckpoint(filters); + case WriteCheckpointMode.MANAGED: + if (false == 'heads' in filters) { + throw new framework.errors.ValidationError( + `Replication HEAD is required for managed Write Checkpoint filtering` + ); + } + return this.lastManagedWriteCheckpoint(filters); + } + } + + protected async lastCustomWriteCheckpoint(filters: CustomWriteCheckpointFilters) { + const { user_id, sync_rules_id } = filters; + const lastWriteCheckpoint = await this.db.custom_write_checkpoints.findOne({ + user_id, + sync_rules_id + }); + return lastWriteCheckpoint?.checkpoint ?? null; + } + + protected async lastManagedWriteCheckpoint(filters: ManagedWriteCheckpointFilters) { + const { user_id, heads } = filters; + // TODO: support multiple heads when we need to support multiple connections + const lsn = heads['1']; + if (lsn == null) { + // Can happen if we haven't replicated anything yet. + return null; + } + const lastWriteCheckpoint = await this.db.write_checkpoints.findOne({ + user_id: user_id, + 'lsns.1': { $lte: lsn } + }); + return lastWriteCheckpoint?.client_id ?? null; + } +} + +export async function batchCreateCustomWriteCheckpoints( + db: PowerSyncMongo, + checkpoints: CustomWriteCheckpointOptions[] +): Promise { + if (!checkpoints.length) { + return; + } + + await db.custom_write_checkpoints.bulkWrite( + checkpoints.map((checkpointOptions) => ({ + updateOne: { + filter: { user_id: checkpointOptions.user_id, sync_rules_id: checkpointOptions.sync_rules_id }, + update: { + $set: { + checkpoint: checkpointOptions.checkpoint, + sync_rules_id: checkpointOptions.sync_rules_id + } + }, + upsert: true + } + })) + ); +} diff --git a/packages/service-core/src/storage/mongo/OperationBatch.ts b/packages/service-core/src/storage/mongo/OperationBatch.ts index 9e1edbb6d..127562d54 100644 --- a/packages/service-core/src/storage/mongo/OperationBatch.ts +++ b/packages/service-core/src/storage/mongo/OperationBatch.ts @@ -1,8 +1,9 @@ -import * as bson from 'bson'; import { ToastableSqliteRow } from '@powersync/service-sync-rules'; +import * as bson from 'bson'; -import * as util from '../../util/util-index.js'; import { SaveOptions } from '../BucketStorage.js'; +import { isUUID } from './util.js'; +import { ReplicaId } from './models.js'; /** * Maximum number of operations in a batch. @@ -42,7 +43,16 @@ export class OperationBatch { return this.batch.length >= MAX_BATCH_COUNT || this.currentSize > MAX_RECORD_BATCH_SIZE; } - *batched(sizes: Map): Generator { + /** + * + * @param sizes Map of source key to estimated size of the current_data document, or undefined if current_data is not persisted. + * + */ + *batched(sizes: Map | undefined): Generator { + if (sizes == null) { + yield this.batch; + return; + } let currentBatch: RecordOperation[] = []; let currentBatchSize = 0; for (let op of this.batch) { @@ -63,18 +73,15 @@ export class OperationBatch { } export class RecordOperation { - public readonly afterId: bson.UUID | null; - public readonly beforeId: bson.UUID; + public readonly afterId: ReplicaId | null; + public readonly beforeId: ReplicaId; public readonly internalBeforeKey: string; public readonly internalAfterKey: string | null; public readonly estimatedSize: number; constructor(public readonly record: SaveOptions) { - const after = record.after; - const afterId = after ? util.getUuidReplicaIdentityBson(after, record.sourceTable.replicaIdColumns!) : null; - const beforeId = record.before - ? util.getUuidReplicaIdentityBson(record.before, record.sourceTable.replicaIdColumns!) - : afterId!; + const afterId = record.afterReplicaId ?? null; + const beforeId = record.beforeReplicaId ?? record.afterReplicaId; this.afterId = afterId; this.beforeId = beforeId; this.internalBeforeKey = cacheKey(record.sourceTable.id, beforeId); @@ -84,8 +91,17 @@ export class RecordOperation { } } -export function cacheKey(table: bson.ObjectId, id: bson.UUID) { - return `${table.toHexString()}.${id.toHexString()}`; +/** + * In-memory cache key - must not be persisted. + */ +export function cacheKey(table: bson.ObjectId, id: ReplicaId) { + if (isUUID(id)) { + return `${table.toHexString()}.${id.toHexString()}`; + } else if (typeof id == 'string') { + return `${table.toHexString()}.${id}`; + } else { + return `${table.toHexString()}.${(bson.serialize({ id: id }) as Buffer).toString('base64')}`; + } } /** diff --git a/packages/service-core/src/storage/mongo/PersistedBatch.ts b/packages/service-core/src/storage/mongo/PersistedBatch.ts index 486c9d800..106dcadfd 100644 --- a/packages/service-core/src/storage/mongo/PersistedBatch.ts +++ b/packages/service-core/src/storage/mongo/PersistedBatch.ts @@ -13,9 +13,10 @@ import { BucketParameterDocument, CurrentBucket, CurrentDataDocument, - SourceKey + SourceKey, + ReplicaId } from './models.js'; -import { serializeLookup } from './util.js'; +import { replicaIdToSubkey, serializeLookup } from './util.js'; import { logger } from '@powersync/lib-services-framework'; /** @@ -53,13 +54,16 @@ export class PersistedBatch { */ currentSize = 0; - constructor(private group_id: number, writtenSize: number) { + constructor( + private group_id: number, + writtenSize: number + ) { this.currentSize = writtenSize; } saveBucketData(options: { op_seq: MongoIdSequence; - sourceKey: bson.UUID; + sourceKey: ReplicaId; table: SourceTable; evaluated: EvaluatedRow[]; before_buckets: CurrentBucket[]; @@ -70,7 +74,7 @@ export class PersistedBatch { remaining_buckets.set(key, b); } - const dchecksum = util.hashDelete(`${options.table.id}/${options.sourceKey}`); + const dchecksum = util.hashDelete(replicaIdToSubkey(options.table.id, options.sourceKey)); for (let k of options.evaluated) { const key = currentBucketKey(k); @@ -134,7 +138,7 @@ export class PersistedBatch { saveParameterData(data: { op_seq: MongoIdSequence; - sourceKey: bson.UUID; + sourceKey: ReplicaId; sourceTable: SourceTable; evaluated: EvaluatedParameters[]; existing_lookups: bson.Binary[]; diff --git a/packages/service-core/src/storage/mongo/config.ts b/packages/service-core/src/storage/mongo/config.ts new file mode 100644 index 000000000..8ff241e25 --- /dev/null +++ b/packages/service-core/src/storage/mongo/config.ts @@ -0,0 +1,40 @@ +import * as urijs from 'uri-js'; + +export interface MongoConnectionConfig { + uri: string; + username?: string; + password?: string; + database?: string; +} + +/** + * Validate and normalize connection options. + * + * Returns destructured options. + * + * For use by both storage and mongo module. + */ +export function normalizeMongoConfig(options: MongoConnectionConfig) { + let uri = urijs.parse(options.uri); + + const database = options.database ?? uri.path?.substring(1) ?? ''; + + const userInfo = uri.userinfo?.split(':'); + + const username = options.username ?? userInfo?.[0]; + const password = options.password ?? userInfo?.[1]; + + if (database == '') { + throw new Error(`database required`); + } + + delete uri.userinfo; + + return { + uri: urijs.serialize(uri), + database, + + username, + password + }; +} diff --git a/packages/service-core/src/storage/mongo/db.ts b/packages/service-core/src/storage/mongo/db.ts index 05b0ab6fc..99bad0948 100644 --- a/packages/service-core/src/storage/mongo/db.ts +++ b/packages/service-core/src/storage/mongo/db.ts @@ -1,11 +1,13 @@ import * as mongo from 'mongodb'; +import { configFile } from '@powersync/service-types'; import * as db from '../../db/db-index.js'; import * as locks from '../../locks/locks-index.js'; import { BucketDataDocument, BucketParameterDocument, CurrentDataDocument, + CustomWriteCheckpointDocument, IdSequenceDocument, InstanceDocument, SourceTableDocument, @@ -13,7 +15,6 @@ import { WriteCheckpointDocument } from './models.js'; import { BSON_DESERIALIZE_OPTIONS } from './util.js'; -import { configFile } from '@powersync/service-types'; export interface PowerSyncMongoOptions { /** @@ -33,6 +34,7 @@ export class PowerSyncMongo { readonly op_id_sequence: mongo.Collection; readonly sync_rules: mongo.Collection; readonly source_tables: mongo.Collection; + readonly custom_write_checkpoints: mongo.Collection; readonly write_checkpoints: mongo.Collection; readonly instance: mongo.Collection; readonly locks: mongo.Collection; @@ -54,6 +56,7 @@ export class PowerSyncMongo { this.op_id_sequence = db.collection('op_id_sequence'); this.sync_rules = db.collection('sync_rules'); this.source_tables = db.collection('source_tables'); + this.custom_write_checkpoints = db.collection('custom_write_checkpoints'); this.write_checkpoints = db.collection('write_checkpoints'); this.instance = db.collection('instance'); this.locks = this.db.collection('locks'); diff --git a/packages/service-core/src/storage/mongo/models.ts b/packages/service-core/src/storage/mongo/models.ts index 8ac4ed301..a85886c4e 100644 --- a/packages/service-core/src/storage/mongo/models.ts +++ b/packages/service-core/src/storage/mongo/models.ts @@ -1,5 +1,14 @@ -import * as bson from 'bson'; import { SqliteJsonValue } from '@powersync/service-sync-rules'; +import * as bson from 'bson'; + +/** + * Replica id uniquely identifying a row on the source database. + * + * Can be any value serializable to BSON. + * + * If the value is an entire document, the data serialized to a v5 UUID may be a good choice here. + */ +export type ReplicaId = bson.UUID | bson.Document | any; export interface SourceKey { /** group_id */ @@ -7,7 +16,7 @@ export interface SourceKey { /** source table id */ t: bson.ObjectId; /** source key */ - k: bson.UUID; + k: ReplicaId; } export interface BucketDataKey { @@ -43,7 +52,7 @@ export interface BucketDataDocument { _id: BucketDataKey; op: OpType; source_table?: bson.ObjectId; - source_key?: bson.UUID; + source_key?: ReplicaId; table?: string; row_id?: string; checksum: number; @@ -57,11 +66,11 @@ export interface SourceTableDocument { _id: bson.ObjectId; group_id: number; connection_id: number; - relation_id: number | undefined; + relation_id: number | string | undefined; schema_name: string; table_name: string; replica_id_columns: string[] | null; - replica_id_columns2: { name: string; type_oid: number }[] | undefined; + replica_id_columns2: { name: string; type_oid?: number; type?: string }[] | undefined; snapshot_done: boolean | undefined; } @@ -150,6 +159,13 @@ export interface SyncRuleDocument { content: string; } +export interface CustomWriteCheckpointDocument { + _id: bson.ObjectId; + user_id: string; + checkpoint: bigint; + sync_rules_id: number; +} + export interface WriteCheckpointDocument { _id: bson.ObjectId; user_id: string; diff --git a/packages/service-core/src/storage/mongo/util.ts b/packages/service-core/src/storage/mongo/util.ts index dc59833dd..d6b36a7dc 100644 --- a/packages/service-core/src/storage/mongo/util.ts +++ b/packages/service-core/src/storage/mongo/util.ts @@ -2,9 +2,10 @@ import { SqliteJsonValue } from '@powersync/service-sync-rules'; import * as bson from 'bson'; import * as crypto from 'crypto'; import * as mongo from 'mongodb'; +import * as uuid from 'uuid'; import { OplogEntry } from '../../util/protocol-types.js'; -import { timestampToOpId } from '../../util/utils.js'; -import { BucketDataDocument } from './models.js'; +import { ID_NAMESPACE, timestampToOpId } from '../../util/utils.js'; +import { BucketDataDocument, ReplicaId } from './models.js'; /** * Lookup serialization must be number-agnostic. I.e. normalize numbers, instead of preserving numbers. @@ -98,7 +99,7 @@ export function mapOpEntry(row: BucketDataDocument): OplogEntry { object_type: row.table, object_id: row.row_id, checksum: Number(row.checksum), - subkey: `${row.source_table}/${row.source_key!.toHexString()}`, + subkey: replicaIdToSubkey(row.source_table!, row.source_key!), data: row.data }; } else { @@ -111,3 +112,47 @@ export function mapOpEntry(row: BucketDataDocument): OplogEntry { }; } } + +/** + * Returns true if two ReplicaId values are the same (serializes to the same BSON value). + */ +export function replicaIdEquals(a: ReplicaId, b: ReplicaId) { + if (a === b) { + return true; + } else if (typeof a == 'string' && typeof b == 'string') { + return a == b; + } else if (isUUID(a) && isUUID(b)) { + return a.equals(b); + } else if (a == null && b == null) { + return true; + } else if (a != null || b != null) { + return false; + } else { + // There are many possible primitive values, this covers them all + return (bson.serialize({ id: a }) as Buffer).equals(bson.serialize({ id: b })); + } +} + +export function replicaIdToSubkey(table: bson.ObjectId, id: ReplicaId): string { + if (isUUID(id)) { + // Special case for UUID for backwards-compatiblity + return `${table.toHexString()}/${id.toHexString()}`; + } else { + // Hashed UUID from the table and id + const repr = bson.serialize({ table, id }); + return uuid.v5(repr, ID_NAMESPACE); + } +} + +/** + * True if this is a bson.UUID. + * + * Works even with multiple copies of the bson package. + */ +export function isUUID(value: any): value is bson.UUID { + if (value == null || typeof value != 'object') { + return false; + } + const uuid = value as bson.UUID; + return uuid._bsontype == 'Binary' && uuid.sub_type == bson.Binary.SUBTYPE_UUID; +} diff --git a/packages/service-core/src/storage/storage-index.ts b/packages/service-core/src/storage/storage-index.ts index 7ec8de933..231bb3849 100644 --- a/packages/service-core/src/storage/storage-index.ts +++ b/packages/service-core/src/storage/storage-index.ts @@ -1,15 +1,21 @@ -export * from './SourceTable.js'; -export * from './MongoBucketStorage.js'; export * from './BucketStorage.js'; +export * from './MongoBucketStorage.js'; +export * from './ReplicationEventPayload.js'; +export * from './SourceEntity.js'; +export * from './SourceTable.js'; +export * from './StorageEngine.js'; +export * from './mongo/config.js'; export * from './mongo/db.js'; export * from './mongo/models.js'; export * from './mongo/MongoBucketBatch.js'; export * from './mongo/MongoIdSequence.js'; export * from './mongo/MongoPersistedSyncRules.js'; export * from './mongo/MongoPersistedSyncRulesContent.js'; +export * from './mongo/MongoStorageProvider.js'; export * from './mongo/MongoSyncBucketStorage.js'; export * from './mongo/MongoSyncRulesLock.js'; export * from './mongo/OperationBatch.js'; export * from './mongo/PersistedBatch.js'; export * from './mongo/util.js'; +export * from './WriteCheckpointAPI.js'; diff --git a/packages/service-core/src/sync/sync.ts b/packages/service-core/src/sync/sync.ts index a04c53182..8f2f900a0 100644 --- a/packages/service-core/src/sync/sync.ts +++ b/packages/service-core/src/sync/sync.ts @@ -1,6 +1,7 @@ import { JSONBig, JsonContainer } from '@powersync/service-jsonbig'; import { RequestParameters } from '@powersync/service-sync-rules'; import { Semaphore, withTimeout } from 'async-mutex'; + import { AbortError } from 'ix/aborterror.js'; import * as auth from '../auth/auth-index.js'; @@ -35,6 +36,7 @@ export interface SyncStreamParameters { params: util.StreamingSyncRequest; syncParams: RequestParameters; token: auth.JwtPayload; + parseOptions: storage.ParseSyncRulesOptions; /** * If this signal is aborted, the stream response ends as soon as possible, without error. */ @@ -47,7 +49,7 @@ export interface SyncStreamParameters { export async function* streamResponse( options: SyncStreamParameters ): AsyncIterable { - const { storage, params, syncParams, token, tokenStreamOptions, tracker, signal } = options; + const { storage, params, syncParams, token, tokenStreamOptions, tracker, signal, parseOptions } = options; // We also need to be able to abort, so we create our own controller. const controller = new AbortController(); if (signal) { @@ -63,7 +65,7 @@ export async function* streamResponse( } } const ki = tokenStream(token, controller.signal, tokenStreamOptions); - const stream = streamResponseInner(storage, params, syncParams, tracker, controller.signal); + const stream = streamResponseInner(storage, params, syncParams, tracker, parseOptions, controller.signal); // Merge the two streams, and abort as soon as one of the streams end. const merged = mergeAsyncIterables([stream, ki], controller.signal); @@ -87,6 +89,7 @@ async function* streamResponseInner( params: util.StreamingSyncRequest, syncParams: RequestParameters, tracker: RequestTracker, + parseOptions: storage.ParseSyncRulesOptions, signal: AbortSignal ): AsyncGenerator { // Bucket state of bucket id -> op_id. @@ -115,9 +118,9 @@ async function* streamResponseInner( // Sync rules deleted in the meantime - try again with the next checkpoint. continue; } - const sync_rules = storage.sync_rules; + const syncRules = storage.getParsedSyncRules(parseOptions); - const allBuckets = await sync_rules.queryBucketIds({ + const allBuckets = await syncRules.queryBucketIds({ getParameterSets(lookups) { return storage.getParameterSets(checkpoint, lookups); }, diff --git a/packages/service-core/src/sync/util.ts b/packages/service-core/src/sync/util.ts index 362706487..2437d38f5 100644 --- a/packages/service-core/src/sync/util.ts +++ b/packages/service-core/src/sync/util.ts @@ -1,7 +1,6 @@ import * as timers from 'timers/promises'; import * as util from '../util/util-index.js'; -import { Metrics } from '../metrics/Metrics.js'; import { RequestTracker } from './RequestTracker.js'; export type TokenStreamOptions = { diff --git a/packages/service-core/src/system/CorePowerSyncSystem.ts b/packages/service-core/src/system/CorePowerSyncSystem.ts deleted file mode 100644 index 8c37f137a..000000000 --- a/packages/service-core/src/system/CorePowerSyncSystem.ts +++ /dev/null @@ -1,64 +0,0 @@ -import * as pgwire from '@powersync/service-jpgwire'; -import { LifeCycledSystem, container, logger } from '@powersync/lib-services-framework'; - -import * as storage from '../storage/storage-index.js'; -import * as utils from '../util/util-index.js'; - -export abstract class CorePowerSyncSystem extends LifeCycledSystem { - abstract storage: storage.BucketStorageFactory; - abstract pgwire_pool?: pgwire.PgClient; - closed: boolean; - - protected stopHandlers: Set<() => void> = new Set(); - - constructor(public config: utils.ResolvedPowerSyncConfig) { - super(); - this.closed = false; - } - - get client_keystore() { - return this.config.client_keystore; - } - - get dev_client_keystore() { - return this.config.dev_client_keystore; - } - - /** - * Adds a termination handler which will call handlers registered via - * [addStopHandler]. - * This should be called after the server is started and it's termination handler is added. - * This is so that the handler is run before the server's handler, allowing streams to be interrupted on exit - */ - addTerminationHandler() { - container.terminationHandler.handleTerminationSignal(async () => { - // Close open streams, so that they don't block the server from closing. - // Note: This does not work well when streaming requests are queued. In that case, the server still doesn't - // close in the 30-second timeout. - this.closed = true; - logger.info(`Closing ${this.stopHandlers.size} streams`); - for (let handler of this.stopHandlers) { - handler(); - } - }); - } - - addStopHandler(handler: () => void): () => void { - if (this.closed) { - handler(); - return () => {}; - } - this.stopHandlers.add(handler); - return () => { - this.stopHandlers.delete(handler); - }; - } - - requirePgPool() { - if (this.pgwire_pool == null) { - throw new Error('No source connection configured'); - } else { - return this.pgwire_pool!; - } - } -} diff --git a/packages/service-core/src/system/ServiceContext.ts b/packages/service-core/src/system/ServiceContext.ts new file mode 100644 index 000000000..642c3a9ae --- /dev/null +++ b/packages/service-core/src/system/ServiceContext.ts @@ -0,0 +1,68 @@ +import { LifeCycledSystem, ServiceIdentifier, container } from '@powersync/lib-services-framework'; + +import * as metrics from '../metrics/Metrics.js'; +import * as replication from '../replication/replication-index.js'; +import * as routes from '../routes/routes-index.js'; +import * as storage from '../storage/storage-index.js'; +import * as utils from '../util/util-index.js'; + +export interface ServiceContext { + configuration: utils.ResolvedPowerSyncConfig; + lifeCycleEngine: LifeCycledSystem; + metrics: metrics.Metrics | null; + replicationEngine: replication.ReplicationEngine | null; + routerEngine: routes.RouterEngine | null; + storageEngine: storage.StorageEngine; +} + +/** + * Context which allows for registering and getting implementations + * of various service engines. + * This controls registering, initializing and the lifecycle of various services. + */ +export class ServiceContextContainer implements ServiceContext { + lifeCycleEngine: LifeCycledSystem; + storageEngine: storage.StorageEngine; + + constructor(public configuration: utils.ResolvedPowerSyncConfig) { + this.lifeCycleEngine = new LifeCycledSystem(); + + this.storageEngine = new storage.StorageEngine({ + configuration + }); + this.lifeCycleEngine.withLifecycle(this.storageEngine, { + start: (storageEngine) => storageEngine.start(), + stop: (storageEngine) => storageEngine.shutDown() + }); + + // Mongo storage is available as an option by default TODO: Consider moving this to a Mongo Storage Module + this.storageEngine.registerProvider(new storage.MongoStorageProvider()); + } + + get replicationEngine(): replication.ReplicationEngine | null { + return container.getOptional(replication.ReplicationEngine); + } + + get routerEngine(): routes.RouterEngine | null { + return container.getOptional(routes.RouterEngine); + } + + get metrics(): metrics.Metrics | null { + return container.getOptional(metrics.Metrics); + } + + /** + * Allows for registering core and generic implementations of services/helpers. + * This uses the framework container under the hood. + */ + register(identifier: ServiceIdentifier, implementation: T) { + container.register(identifier, implementation); + } + + /** + * Gets the implementation of an identifiable service. + */ + get(identifier: ServiceIdentifier) { + return container.getImplementation(identifier); + } +} diff --git a/packages/service-core/src/system/system-index.ts b/packages/service-core/src/system/system-index.ts index f40b2a60e..cf72dd126 100644 --- a/packages/service-core/src/system/system-index.ts +++ b/packages/service-core/src/system/system-index.ts @@ -1 +1 @@ -export * from './CorePowerSyncSystem.js'; +export * from './ServiceContext.js'; diff --git a/packages/service-core/src/util/config.ts b/packages/service-core/src/util/config.ts index f11e798a3..58c189d89 100644 --- a/packages/service-core/src/util/config.ts +++ b/packages/service-core/src/util/config.ts @@ -1,31 +1,14 @@ import * as fs from 'fs/promises'; -import { baseUri } from '@powersync/service-types'; -import { ResolvedConnection, ResolvedPowerSyncConfig, RunnerConfig } from './config/types.js'; -import { CompoundConfigCollector } from './config/compound-config-collector.js'; +import { container } from '@powersync/lib-services-framework'; +import { ResolvedPowerSyncConfig, RunnerConfig } from './config/types.js'; +import { CompoundConfigCollector } from './util-index.js'; /** - * Build a single URI from full postgres credentials. + * Loads the resolved config using the registered config collector */ -export function buildDemoPgUri(options: ResolvedConnection): string { - if (!options.debug_api) { - throw new Error('Not supported'); - } - - const uri = new URL(baseUri(options)); - uri.username = options.username; - uri.password = options.password; - if (options.sslmode != 'disable') { - // verify-full is tricky to actually use on a client, since they won't have the cert - // Just use "require" by default - // uri.searchParams.set('sslmode', options.sslmode); - uri.searchParams.set('sslmode', 'require'); - } - return uri.toString(); -} - -export function loadConfig(runnerConfig: RunnerConfig = {}) { - const collector = new CompoundConfigCollector(); +export async function loadConfig(runnerConfig: RunnerConfig) { + const collector = container.getImplementation(CompoundConfigCollector); return collector.collectConfig(runnerConfig); } diff --git a/packages/service-core/src/util/config/compound-config-collector.ts b/packages/service-core/src/util/config/compound-config-collector.ts index 3ae856604..7c2b70c87 100644 --- a/packages/service-core/src/util/config/compound-config-collector.ts +++ b/packages/service-core/src/util/config/compound-config-collector.ts @@ -1,17 +1,15 @@ -import { configFile, normalizeConnection } from '@powersync/service-types'; -import { ConfigCollector } from './collectors/config-collector.js'; -import { ResolvedConnection, ResolvedPowerSyncConfig, RunnerConfig, SyncRulesConfig } from './types.js'; +import { logger } from '@powersync/lib-services-framework'; +import { configFile } from '@powersync/service-types'; import * as auth from '../../auth/auth-index.js'; -import { SyncRulesCollector } from './sync-rules/sync-collector.js'; +import { ConfigCollector } from './collectors/config-collector.js'; import { Base64ConfigCollector } from './collectors/impl/base64-config-collector.js'; +import { FallbackConfigCollector } from './collectors/impl/fallback-config-collector.js'; import { FileSystemConfigCollector } from './collectors/impl/filesystem-config-collector.js'; import { Base64SyncRulesCollector } from './sync-rules/impl/base64-sync-rules-collector.js'; -import { InlineSyncRulesCollector } from './sync-rules/impl/inline-sync-rules-collector.js'; import { FileSystemSyncRulesCollector } from './sync-rules/impl/filesystem-sync-rules-collector.js'; -import { FallbackConfigCollector } from './collectors/impl/fallback-config-collector.js'; -import { logger } from '@powersync/lib-services-framework'; - -const POWERSYNC_DEV_KID = 'powersync-dev'; +import { InlineSyncRulesCollector } from './sync-rules/impl/inline-sync-rules-collector.js'; +import { SyncRulesCollector } from './sync-rules/sync-collector.js'; +import { ResolvedPowerSyncConfig, RunnerConfig, SyncRulesConfig } from './types.js'; export type CompoundConfigCollectorOptions = { /** @@ -28,6 +26,17 @@ export type CompoundConfigCollectorOptions = { syncRulesCollectors: SyncRulesCollector[]; }; +export type ConfigCollectedEvent = { + base_config: configFile.PowerSyncConfig; + resolved_config: ResolvedPowerSyncConfig; +}; + +export type ConfigCollectorListener = { + configCollected?: (event: ConfigCollectedEvent) => Promise; +}; + +const POWERSYNC_DEV_KID = 'powersync-dev'; + const DEFAULT_COLLECTOR_OPTIONS: CompoundConfigCollectorOptions = { configCollectors: [new Base64ConfigCollector(), new FileSystemConfigCollector(), new FallbackConfigCollector()], syncRulesCollectors: [ @@ -43,24 +52,14 @@ export class CompoundConfigCollector { /** * Collects and resolves base config */ - async collectConfig(runner_config: RunnerConfig = {}): Promise { - const baseConfig = await this.collectBaseConfig(runner_config); + async collectConfig(runnerConfig: RunnerConfig = {}): Promise { + const baseConfig = await this.collectBaseConfig(runnerConfig); - const connections = baseConfig.replication?.connections ?? []; - if (connections.length > 1) { - throw new Error('Only a single replication connection is supported currently'); + const dataSources = baseConfig.replication?.connections ?? []; + if (dataSources.length > 1) { + throw new Error('Only a single replication data source is supported currently'); } - const mapped = connections.map((c) => { - const conf: ResolvedConnection = { - type: 'postgresql' as const, - ...normalizeConnection(c), - debug_api: c.debug_api ?? false - }; - - return conf; - }); - const collectors = new auth.CompoundKeyCollector(); const keyStore = new auth.KeyStore(collectors); @@ -69,10 +68,6 @@ export class CompoundConfigCollector { collectors.add(staticCollector); - if (baseConfig.client_auth?.supabase && mapped.length > 0) { - collectors.add(new auth.CachedKeyCollector(new auth.SupabaseKeyCollector(mapped[0]))); - } - let jwks_uris = baseConfig.client_auth?.jwks_uri ?? []; if (typeof jwks_uris == 'string') { jwks_uris = [jwks_uris]; @@ -93,12 +88,13 @@ export class CompoundConfigCollector { devKey = await auth.KeySpec.importKey(baseDevKey); } - const sync_rules = await this.collectSyncRules(baseConfig, runner_config); + const sync_rules = await this.collectSyncRules(baseConfig, runnerConfig); let jwt_audiences: string[] = baseConfig.client_auth?.audience ?? []; let config: ResolvedPowerSyncConfig = { - connection: mapped[0], + base_config: baseConfig, + connections: baseConfig.replication?.connections || [], storage: baseConfig.storage, client_keystore: keyStore, // Dev tokens only use the static keys, no external key sources @@ -124,8 +120,12 @@ export class CompoundConfigCollector { internal_service_endpoint: baseConfig.telemetry?.internal_service_endpoint ?? 'https://pulse.journeyapps.com/v1/metrics' }, - slot_name_prefix: connections[0]?.slot_name_prefix ?? 'powersync_' + // TODO maybe move this out of the connection or something + // slot_name_prefix: connections[0]?.slot_name_prefix ?? 'powersync_' + slot_name_prefix: 'powersync_', + parameters: baseConfig.parameters ?? {} }; + return config; } diff --git a/packages/service-core/src/util/config/sync-rules/sync-rules-provider.ts b/packages/service-core/src/util/config/sync-rules/sync-rules-provider.ts new file mode 100644 index 000000000..150a1cd1b --- /dev/null +++ b/packages/service-core/src/util/config/sync-rules/sync-rules-provider.ts @@ -0,0 +1,18 @@ +import { SyncRulesConfig } from '../types.js'; +import fs from 'fs/promises'; + +export interface SyncRulesProvider { + get(): Promise; +} + +export class ConfigurationFileSyncRulesProvider implements SyncRulesProvider { + constructor(private config: SyncRulesConfig) {} + + async get(): Promise { + if (this.config.content) { + return this.config.content; + } else if (this.config.path) { + return await fs.readFile(this.config.path, 'utf-8'); + } + } +} diff --git a/packages/service-core/src/util/config/types.ts b/packages/service-core/src/util/config/types.ts index 94a09ba6c..99829526d 100644 --- a/packages/service-core/src/util/config/types.ts +++ b/packages/service-core/src/util/config/types.ts @@ -1,4 +1,6 @@ -import { NormalizedPostgresConnection, configFile } from '@powersync/service-types'; +import { configFile } from '@powersync/service-types'; +import { PowerSyncConfig } from '@powersync/service-types/src/config/PowerSyncConfig.js'; +import { CompoundKeyCollector } from '../../auth/CompoundKeyCollector.js'; import { KeySpec } from '../../auth/KeySpec.js'; import { KeyStore } from '../../auth/KeyStore.js'; @@ -20,8 +22,6 @@ export type MigrationContext = { export type Runner = (config: RunnerConfig) => Promise; -export type ResolvedConnection = configFile.PostgresConnection & NormalizedPostgresConnection; - export type SyncRulesConfig = { present: boolean; content?: string; @@ -29,7 +29,8 @@ export type SyncRulesConfig = { }; export type ResolvedPowerSyncConfig = { - connection?: ResolvedConnection; + base_config: PowerSyncConfig; + connections?: configFile.DataSourceConfig[]; storage: configFile.StorageConfig; dev: { demo_auth: boolean; @@ -41,7 +42,7 @@ export type ResolvedPowerSyncConfig = { */ dev_key?: KeySpec; }; - client_keystore: KeyStore; + client_keystore: KeyStore; /** * Keystore for development tokens. */ @@ -63,4 +64,5 @@ export type ResolvedPowerSyncConfig = { /** Prefix for postgres replication slot names. May eventually be connection-specific. */ slot_name_prefix: string; + parameters: Record; }; diff --git a/packages/service-core/src/util/util-index.ts b/packages/service-core/src/util/util-index.ts index c726fd99a..50ad85dc7 100644 --- a/packages/service-core/src/util/util-index.ts +++ b/packages/service-core/src/util/util-index.ts @@ -1,25 +1,22 @@ export * from './alerting.js'; export * from './env.js'; export * from './memory-tracking.js'; -export * from './migration_lib.js'; export * from './Mutex.js'; -export * from './PgManager.js'; -export * from './pgwire_utils.js'; -export * from './populate_test_data.js'; export * from './protocol-types.js'; export * from './secs.js'; export * from './utils.js'; export * from './config.js'; -export * from './config/types.js'; export * from './config/compound-config-collector.js'; +export * from './config/types.js'; export * from './config/collectors/config-collector.js'; export * from './config/collectors/impl/base64-config-collector.js'; export * from './config/collectors/impl/fallback-config-collector.js'; export * from './config/collectors/impl/filesystem-config-collector.js'; -export * from './config/sync-rules/sync-collector.js'; export * from './config/sync-rules/impl/base64-sync-rules-collector.js'; export * from './config/sync-rules/impl/filesystem-sync-rules-collector.js'; export * from './config/sync-rules/impl/inline-sync-rules-collector.js'; +export * from './config/sync-rules/sync-collector.js'; +export * from './config/sync-rules/sync-rules-provider.js'; diff --git a/packages/service-core/src/util/utils.ts b/packages/service-core/src/util/utils.ts index 15baf9296..ec46a3e2b 100644 --- a/packages/service-core/src/util/utils.ts +++ b/packages/service-core/src/util/utils.ts @@ -1,15 +1,21 @@ +import * as sync_rules from '@powersync/service-sync-rules'; +import * as bson from 'bson'; import crypto from 'crypto'; +import * as uuid from 'uuid'; +import { BucketChecksum, OpId } from './protocol-types.js'; -import { logger } from '@powersync/lib-services-framework'; -import * as pgwire from '@powersync/service-jpgwire'; -import { pgwireRows } from '@powersync/service-jpgwire'; -import { PartialChecksum } from '../storage/ChecksumCache.js'; import * as storage from '../storage/storage-index.js'; -import { retriedQuery } from './pgwire_utils.js'; -import { BucketChecksum, OpId } from './protocol-types.js'; + +import { PartialChecksum } from '../storage/ChecksumCache.js'; export type ChecksumMap = Map; +export const ID_NAMESPACE = 'a396dd91-09fc-4017-a28d-3df722f651e9'; + +export function escapeIdentifier(identifier: string) { + return `"${identifier.replace(/"/g, '""').replace(/\./g, '"."')}"`; +} + export function hashData(type: string, id: string, data: string): number { const hash = crypto.createHash('sha256'); hash.update(`put.${type}.${id}.${data}`); @@ -83,49 +89,50 @@ export function addBucketChecksums(a: BucketChecksum, b: PartialChecksum | null) } } -export async function getClientCheckpoint( - db: pgwire.PgClient, - bucketStorage: storage.BucketStorageFactory, - options?: { timeout?: number } -): Promise { - const start = Date.now(); +function getRawReplicaIdentity( + tuple: sync_rules.ToastableSqliteRow, + columns: storage.ColumnDescriptor[] +): Record { + let result: Record = {}; + for (let column of columns) { + const name = column.name; + result[name] = tuple[name]; + } + return result; +} - const [{ lsn }] = pgwireRows(await db.query(`SELECT pg_logical_emit_message(false, 'powersync', 'ping') as lsn`)); +export function getUuidReplicaIdentityBson( + tuple: sync_rules.ToastableSqliteRow, + columns: storage.ColumnDescriptor[] +): bson.UUID { + if (columns.length == 0) { + // REPLICA IDENTITY NOTHING - generate random id + return new bson.UUID(uuid.v4()); + } + const rawIdentity = getRawReplicaIdentity(tuple, columns); - // This old API needs a persisted checkpoint id. - // Since we don't use LSNs anymore, the only way to get that is to wait. + return uuidForRowBson(rawIdentity); +} - const timeout = options?.timeout ?? 50_000; +export function uuidForRowBson(row: sync_rules.SqliteRow): bson.UUID { + // Important: This must not change, since it will affect how ids are generated. + // Use BSON so that it's a well-defined format without encoding ambiguities. + const repr = bson.serialize(row); + const buffer = Buffer.alloc(16); + return new bson.UUID(uuid.v5(repr, ID_NAMESPACE, buffer)); +} - logger.info(`Waiting for LSN checkpoint: ${lsn}`); - while (Date.now() - start < timeout) { - const cp = await bucketStorage.getActiveCheckpoint(); - if (!cp.hasSyncRules()) { - throw new Error('No sync rules available'); - } - if (cp.lsn >= lsn) { - logger.info(`Got write checkpoint: ${lsn} : ${cp.checkpoint}`); - return cp.checkpoint; +export function hasToastedValues(row: sync_rules.ToastableSqliteRow) { + for (let key in row) { + if (typeof row[key] == 'undefined') { + return true; } - - await new Promise((resolve) => setTimeout(resolve, 30)); } - - throw new Error('Timeout while waiting for checkpoint'); + return false; } -export async function createWriteCheckpoint( - db: pgwire.PgClient, - bucketStorage: storage.BucketStorageFactory, - user_id: string -): Promise { - const [{ lsn }] = pgwireRows( - await retriedQuery(db, `SELECT pg_logical_emit_message(false, 'powersync', 'ping') as lsn`) - ); - - const id = await bucketStorage.createWriteCheckpoint(user_id, { '1': lsn }); - logger.info(`Write checkpoint 2: ${JSON.stringify({ lsn, id: String(id) })}`); - return id; +export function isCompleteRow(row: sync_rules.ToastableSqliteRow): row is sync_rules.SqliteRow { + return !hasToastedValues(row); } export function checkpointUserId(user_id: string | undefined, client_id: string | undefined) { diff --git a/packages/service-core/test/src/__snapshots__/sync.test.ts.snap b/packages/service-core/test/src/__snapshots__/sync.test.ts.snap index 806abf375..233f10df8 100644 --- a/packages/service-core/test/src/__snapshots__/sync.test.ts.snap +++ b/packages/service-core/test/src/__snapshots__/sync.test.ts.snap @@ -56,21 +56,21 @@ exports[`sync - mongodb > compacting data - invalidate checkpoint 2`] = ` "data": [ { "checksum": 1859363232n, - "data": "{\\"id\\":\\"t1\\",\\"description\\":\\"Test 1b\\"}", + "data": "{"id":"t1","description":"Test 1b"}", "object_id": "t1", "object_type": "test", "op": "PUT", "op_id": "3", - "subkey": "6544e3899293153fa7b38331/117ab485-4b42-58a2-ab32-0053a22c3423", + "subkey": "e5aa2ddc-1328-58fa-a000-0b5ed31eaf1a", }, { "checksum": 3028503153n, - "data": "{\\"id\\":\\"t2\\",\\"description\\":\\"Test 2b\\"}", + "data": "{"id":"t2","description":"Test 2b"}", "object_id": "t2", "object_type": "test", "op": "PUT", "op_id": "4", - "subkey": "6544e3899293153fa7b38331/ec27c691-b47a-5d92-927a-9944feb89eee", + "subkey": "13423353-9f27-59b4-baf0-64a5e09f1769", }, ], "has_more": false, @@ -146,21 +146,21 @@ exports[`sync - mongodb > sync global data 1`] = ` "data": [ { "checksum": 920318466n, - "data": "{\\"id\\":\\"t1\\",\\"description\\":\\"Test 1\\"}", + "data": "{"id":"t1","description":"Test 1"}", "object_id": "t1", "object_type": "test", "op": "PUT", "op_id": "1", - "subkey": "6544e3899293153fa7b38331/117ab485-4b42-58a2-ab32-0053a22c3423", + "subkey": "e5aa2ddc-1328-58fa-a000-0b5ed31eaf1a", }, { "checksum": 3280762209n, - "data": "{\\"id\\":\\"t2\\",\\"description\\":\\"Test 2\\"}", + "data": "{"id":"t2","description":"Test 2"}", "object_id": "t2", "object_type": "test", "op": "PUT", "op_id": "2", - "subkey": "6544e3899293153fa7b38331/ec27c691-b47a-5d92-927a-9944feb89eee", + "subkey": "13423353-9f27-59b4-baf0-64a5e09f1769", }, ], "has_more": false, @@ -199,7 +199,7 @@ exports[`sync - mongodb > sync legacy non-raw data 1`] = ` "checksum": 3442149460n, "data": { "description": "Test -\\"string\\"", +"string"", "id": "t1", "large_num": 12345678901234567890n, }, @@ -207,7 +207,7 @@ exports[`sync - mongodb > sync legacy non-raw data 1`] = ` "object_type": "test", "op": "PUT", "op_id": "1", - "subkey": "6544e3899293153fa7b38331/117ab485-4b42-58a2-ab32-0053a22c3423", + "subkey": "e5aa2ddc-1328-58fa-a000-0b5ed31eaf1a", }, ], "has_more": false, @@ -268,12 +268,12 @@ exports[`sync - mongodb > sync updates to global data 2`] = ` "data": [ { "checksum": 920318466n, - "data": "{\\"id\\":\\"t1\\",\\"description\\":\\"Test 1\\"}", + "data": "{"id":"t1","description":"Test 1"}", "object_id": "t1", "object_type": "test", "op": "PUT", "op_id": "1", - "subkey": "6544e3899293153fa7b38331/117ab485-4b42-58a2-ab32-0053a22c3423", + "subkey": "e5aa2ddc-1328-58fa-a000-0b5ed31eaf1a", }, ], "has_more": false, @@ -311,12 +311,12 @@ exports[`sync - mongodb > sync updates to global data 3`] = ` "data": [ { "checksum": 3280762209n, - "data": "{\\"id\\":\\"t2\\",\\"description\\":\\"Test 2\\"}", + "data": "{"id":"t2","description":"Test 2"}", "object_id": "t2", "object_type": "test", "op": "PUT", "op_id": "2", - "subkey": "6544e3899293153fa7b38331/ec27c691-b47a-5d92-927a-9944feb89eee", + "subkey": "13423353-9f27-59b4-baf0-64a5e09f1769", }, ], "has_more": false, diff --git a/packages/service-core/test/src/auth.test.ts b/packages/service-core/test/src/auth.test.ts index e80970ed9..709cfcb51 100644 --- a/packages/service-core/test/src/auth.test.ts +++ b/packages/service-core/test/src/auth.test.ts @@ -1,11 +1,11 @@ -import { describe, expect, test } from 'vitest'; -import { StaticKeyCollector } from '../../src/auth/StaticKeyCollector.js'; +import { CachedKeyCollector } from '@/auth/CachedKeyCollector.js'; +import { KeyResult } from '@/auth/KeyCollector.js'; +import { KeySpec } from '@/auth/KeySpec.js'; +import { KeyStore } from '@/auth/KeyStore.js'; +import { RemoteJWKSCollector } from '@/auth/RemoteJWKSCollector.js'; +import { StaticKeyCollector } from '@/auth/StaticKeyCollector.js'; import * as jose from 'jose'; -import { KeyStore } from '../../src/auth/KeyStore.js'; -import { KeySpec } from '../../src/auth/KeySpec.js'; -import { RemoteJWKSCollector } from '../../src/auth/RemoteJWKSCollector.js'; -import { KeyResult } from '../../src/auth/KeyCollector.js'; -import { CachedKeyCollector } from '../../src/auth/CachedKeyCollector.js'; +import { describe, expect, test } from 'vitest'; const publicKey: jose.JWK = { use: 'sig', diff --git a/packages/service-core/test/src/broadcast_iterable.test.ts b/packages/service-core/test/src/broadcast_iterable.test.ts index 0e70aac28..4d2cced7a 100644 --- a/packages/service-core/test/src/broadcast_iterable.test.ts +++ b/packages/service-core/test/src/broadcast_iterable.test.ts @@ -1,3 +1,4 @@ +import { BroadcastIterable, IterableSource } from '@/sync/BroadcastIterable.js'; import { AsyncIterableX, interval } from 'ix/asynciterable/index.js'; import { delayEach } from 'ix/asynciterable/operators/delayeach.js'; import { take } from 'ix/asynciterable/operators/take.js'; @@ -5,7 +6,6 @@ import { wrapWithAbort } from 'ix/asynciterable/operators/withabort.js'; import { toArray } from 'ix/asynciterable/toarray.js'; import * as timers from 'timers/promises'; import { describe, expect, test } from 'vitest'; -import { BroadcastIterable, IterableSource } from '../../src/sync/BroadcastIterable.js'; describe('BroadcastIterable', () => { test('should iterate', async () => { diff --git a/packages/service-core/test/src/compacting.test.ts b/packages/service-core/test/src/compacting.test.ts index f6ae9c1cb..b5caf72a3 100644 --- a/packages/service-core/test/src/compacting.test.ts +++ b/packages/service-core/test/src/compacting.test.ts @@ -1,9 +1,9 @@ -import { SqlSyncRules } from '@powersync/service-sync-rules'; -import { describe, expect, test } from 'vitest'; -import { makeTestTable, MONGO_STORAGE_FACTORY } from './util.js'; -import { oneFromAsync } from './wal_stream_utils.js'; +import { SaveOperationTag } from '@/storage/BucketStorage.js'; import { MongoCompactOptions } from '@/storage/mongo/MongoCompactor.js'; -import { reduceBucket, validateCompactedBucket, validateBucket } from './bucket_validation.js'; +import { describe, expect, test } from 'vitest'; +import { validateCompactedBucket } from './bucket_validation.js'; +import { oneFromAsync } from './stream_utils.js'; +import { BATCH_OPTIONS, makeTestTable, MONGO_STORAGE_FACTORY, rid, testRules } from './util.js'; const TEST_TABLE = makeTestTable('test', ['id']); @@ -18,37 +18,40 @@ function compactTests(compactOptions: MongoCompactOptions) { const factory = MONGO_STORAGE_FACTORY; test('compacting (1)', async () => { - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules = testRules(` bucket_definitions: global: data: [select * from test] `); - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); + const storage = (await factory()).getInstance(sync_rules); - const result = await storage.startBatch({}, async (batch) => { + const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 't1' - } + }, + afterReplicaId: rid('t1') }); await batch.save({ sourceTable: TEST_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 't2' - } + }, + afterReplicaId: rid('t2') }); await batch.save({ sourceTable: TEST_TABLE, - tag: 'update', + tag: SaveOperationTag.UPDATE, after: { id: 't2' - } + }, + afterReplicaId: rid('t2') }); }); @@ -112,45 +115,49 @@ bucket_definitions: }); test('compacting (2)', async () => { - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules = testRules(` bucket_definitions: global: data: [select * from test] `); - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); + const storage = (await factory()).getInstance(sync_rules); - const result = await storage.startBatch({}, async (batch) => { + const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 't1' - } + }, + afterReplicaId: rid('t1') }); await batch.save({ sourceTable: TEST_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 't2' - } + }, + afterReplicaId: rid('t2') }); await batch.save({ sourceTable: TEST_TABLE, - tag: 'delete', + tag: SaveOperationTag.DELETE, before: { id: 't1' - } + }, + beforeReplicaId: rid('t1') }); await batch.save({ sourceTable: TEST_TABLE, - tag: 'update', + tag: SaveOperationTag.UPDATE, after: { id: 't2' - } + }, + afterReplicaId: rid('t2') }); }); @@ -213,51 +220,54 @@ bucket_definitions: }); test('compacting (3)', async () => { - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules = testRules(` bucket_definitions: global: data: [select * from test] `); - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); + const storage = (await factory()).getInstance(sync_rules); - const result = await storage.startBatch({}, async (batch) => { + const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 't1' - } + }, + afterReplicaId: 't1' }); await batch.save({ sourceTable: TEST_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 't2' - } + }, + afterReplicaId: 't2' }); await batch.save({ sourceTable: TEST_TABLE, - tag: 'delete', + tag: SaveOperationTag.DELETE, before: { id: 't1' - } + }, + beforeReplicaId: 't1' }); }); const checkpoint1 = result!.flushed_op; const checksumBefore = await storage.getChecksums(checkpoint1, ['global[]']); - console.log('before', checksumBefore); - const result2 = await storage.startBatch({}, async (batch) => { + const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: 'delete', + tag: SaveOperationTag.DELETE, before: { id: 't2' - } + }, + beforeReplicaId: 't2' }); }); const checkpoint2 = result2!.flushed_op; @@ -271,7 +281,7 @@ bucket_definitions: expect(batchAfter.targetOp).toEqual(4n); expect(dataAfter).toMatchObject([ { - checksum: 857217610, + checksum: 1874612650, op: 'CLEAR', op_id: '4' } @@ -279,7 +289,7 @@ bucket_definitions: expect(checksumAfter.get('global[]')).toEqual({ bucket: 'global[]', count: 1, - checksum: 857217610 + checksum: 1874612650 }); }); } diff --git a/packages/service-core/test/src/data_storage.test.ts b/packages/service-core/test/src/data_storage.test.ts index 4abbc79d1..df8249b6a 100644 --- a/packages/service-core/test/src/data_storage.test.ts +++ b/packages/service-core/test/src/data_storage.test.ts @@ -1,8 +1,19 @@ -import { RequestParameters, SqlSyncRules } from '@powersync/service-sync-rules'; +import { BucketDataBatchOptions, SaveOperationTag } from '@/storage/BucketStorage.js'; +import { getUuidReplicaIdentityBson } from '@/util/util-index.js'; +import { RequestParameters } from '@powersync/service-sync-rules'; import { describe, expect, test } from 'vitest'; -import { BucketDataBatchOptions } from '../../src/storage/BucketStorage.js'; -import { getBatchData, getBatchMeta, makeTestTable, MONGO_STORAGE_FACTORY, StorageFactory } from './util.js'; -import { fromAsync, oneFromAsync } from './wal_stream_utils.js'; +import { fromAsync, oneFromAsync } from './stream_utils.js'; +import { + BATCH_OPTIONS, + getBatchData, + getBatchMeta, + makeTestTable, + MONGO_STORAGE_FACTORY, + PARSE_OPTIONS, + rid, + StorageFactory, + testRules +} from './util.js'; const TEST_TABLE = makeTestTable('test', ['id']); @@ -12,7 +23,7 @@ describe('store - mongodb', function () { function defineDataStorageTests(factory: StorageFactory) { test('save and load parameters', async () => { - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules = testRules(` bucket_definitions: mybucket: parameters: @@ -20,29 +31,31 @@ bucket_definitions: data: [] `); - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); + const storage = (await factory()).getInstance(sync_rules); - const result = await storage.startBatch({}, async (batch) => { + const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 't2', id1: 'user3', id2: 'user4', group_id: 'group2a' - } + }, + afterReplicaId: rid('t2') }); await batch.save({ sourceTable: TEST_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 't1', id1: 'user1', id2: 'user2', group_id: 'group1a' - } + }, + afterReplicaId: rid('t1') }); }); @@ -55,34 +68,38 @@ bucket_definitions: }); test('it should use the latest version', async () => { - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules = testRules( + ` bucket_definitions: mybucket: parameters: - SELECT group_id FROM test WHERE id = token_parameters.user_id data: [] - `); + ` + ); - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); + const storage = (await factory()).getInstance(sync_rules); - const result1 = await storage.startBatch({}, async (batch) => { + const result1 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'user1', group_id: 'group1' - } + }, + afterReplicaId: rid('user1') }); }); - const result2 = await storage.startBatch({}, async (batch) => { + const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'user1', group_id: 'group2' - } + }, + afterReplicaId: rid('user1') }); }); @@ -103,27 +120,30 @@ bucket_definitions: }); test('save and load parameters with different number types', async () => { - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules = testRules( + ` bucket_definitions: mybucket: parameters: - SELECT group_id FROM test WHERE n1 = token_parameters.n1 and f2 = token_parameters.f2 and f3 = token_parameters.f3 data: [] - `); + ` + ); - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); + const storage = (await factory()).getInstance(sync_rules); - const result = await storage.startBatch({}, async (batch) => { + const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 't1', group_id: 'group1', n1: 314n, f2: 314, f3: 3.14 - } + }, + afterReplicaId: rid('t1') }); }); @@ -144,37 +164,41 @@ bucket_definitions: // This specific case tested here cannot happen with postgres in practice, but we still // test this to ensure correct deserialization. - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules = testRules( + ` bucket_definitions: mybucket: parameters: - SELECT group_id FROM test WHERE n1 = token_parameters.n1 data: [] - `); + ` + ); - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); + const storage = (await factory()).getInstance(sync_rules); - const result = await storage.startBatch({}, async (batch) => { + const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 't1', group_id: 'group1', n1: 1152921504606846976n // 2^60 - } + }, + afterReplicaId: rid('t1') }); await batch.save({ sourceTable: TEST_TABLE, - tag: 'update', + tag: SaveOperationTag.UPDATE, after: { id: 't1', group_id: 'group1', // Simulate a TOAST value, even though it can't happen for values like this // in practice. n1: undefined - } + }, + afterReplicaId: rid('t1') }); }); @@ -187,31 +211,32 @@ bucket_definitions: }); test('removing row', async () => { - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules = testRules( + ` bucket_definitions: global: data: - SELECT id, description FROM "%" -`); - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); +` + ); + const storage = (await factory()).getInstance(sync_rules); - const result = await storage.startBatch({}, async (batch) => { + const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; await batch.save({ sourceTable, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'test1', description: 'test1' - } + }, + afterReplicaId: rid('test1') }); await batch.save({ sourceTable, - tag: 'delete', - before: { - id: 'test1' - } + tag: SaveOperationTag.DELETE, + beforeReplicaId: rid('test1') }); }); @@ -247,25 +272,29 @@ bucket_definitions: test('save and load parameters with workspaceId', async () => { const WORKSPACE_TABLE = makeTestTable('workspace', ['id']); - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules_content = testRules( + ` bucket_definitions: by_workspace: parameters: - SELECT id as workspace_id FROM workspace WHERE workspace."userId" = token_parameters.user_id data: [] - `); + ` + ); + const sync_rules = sync_rules_content.parsed(PARSE_OPTIONS).sync_rules; - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); + const storage = (await factory()).getInstance(sync_rules_content); - const result = await storage.startBatch({}, async (batch) => { + const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: WORKSPACE_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'workspace1', userId: 'u1' - } + }, + afterReplicaId: rid('workspace1') }); }); @@ -293,43 +322,49 @@ bucket_definitions: test('save and load parameters with dynamic global buckets', async () => { const WORKSPACE_TABLE = makeTestTable('workspace'); - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules_content = testRules( + ` bucket_definitions: by_public_workspace: parameters: - SELECT id as workspace_id FROM workspace WHERE workspace.visibility = 'public' data: [] - `); + ` + ); + const sync_rules = sync_rules_content.parsed(PARSE_OPTIONS).sync_rules; - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); + const storage = (await factory()).getInstance(sync_rules_content); - const result = await storage.startBatch({}, async (batch) => { + const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: WORKSPACE_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'workspace1', visibility: 'public' - } + }, + afterReplicaId: rid('workspace1') }); await batch.save({ sourceTable: WORKSPACE_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'workspace2', visibility: 'private' - } + }, + afterReplicaId: rid('workspace2') }); await batch.save({ sourceTable: WORKSPACE_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'workspace3', visibility: 'public' - } + }, + afterReplicaId: rid('workspace3') }); }); @@ -359,7 +394,8 @@ bucket_definitions: test('multiple parameter queries', async () => { const WORKSPACE_TABLE = makeTestTable('workspace'); - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules_content = testRules( + ` bucket_definitions: by_workspace: parameters: @@ -368,47 +404,53 @@ bucket_definitions: - SELECT id as workspace_id FROM workspace WHERE workspace.user_id = token_parameters.user_id data: [] - `); + ` + ); + const sync_rules = sync_rules_content.parsed(PARSE_OPTIONS).sync_rules; - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); + const storage = (await factory()).getInstance(sync_rules_content); - const result = await storage.startBatch({}, async (batch) => { + const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: WORKSPACE_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'workspace1', visibility: 'public' - } + }, + afterReplicaId: rid('workspace1') }); await batch.save({ sourceTable: WORKSPACE_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'workspace2', visibility: 'private' - } + }, + afterReplicaId: rid('workspace2') }); await batch.save({ sourceTable: WORKSPACE_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'workspace3', user_id: 'u1', visibility: 'private' - } + }, + afterReplicaId: rid('workspace3') }); await batch.save({ sourceTable: WORKSPACE_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'workspace4', user_id: 'u2', visibility: 'private' - } + }, + afterReplicaId: rid('workspace4') }); }); @@ -445,43 +487,48 @@ bucket_definitions: }); test('changing client ids', async () => { - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules = testRules( + ` bucket_definitions: global: data: - SELECT client_id as id, description FROM "%" -`); - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); +` + ); + const storage = (await factory()).getInstance(sync_rules); const sourceTable = TEST_TABLE; - const result = await storage.startBatch({}, async (batch) => { + const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'test1', client_id: 'client1a', description: 'test1a' - } + }, + afterReplicaId: rid('test1') }); await batch.save({ sourceTable, - tag: 'update', + tag: SaveOperationTag.UPDATE, after: { id: 'test1', client_id: 'client1b', description: 'test1b' - } + }, + afterReplicaId: rid('test1') }); await batch.save({ sourceTable, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'test2', client_id: 'client2', description: 'test2' - } + }, + afterReplicaId: rid('test2') }); }); const checkpoint = result!.flushed_op; @@ -502,48 +549,47 @@ bucket_definitions: }); test('re-apply delete', async () => { - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules = testRules( + ` bucket_definitions: global: data: - SELECT id, description FROM "%" -`); - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); +` + ); + const storage = (await factory()).getInstance(sync_rules); - await storage.startBatch({}, async (batch) => { + await storage.startBatch(BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; await batch.save({ sourceTable, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'test1', description: 'test1' - } + }, + afterReplicaId: rid('test1') }); }); - await storage.startBatch({}, async (batch) => { + await storage.startBatch(BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; await batch.save({ sourceTable, - tag: 'delete', - before: { - id: 'test1' - } + tag: SaveOperationTag.DELETE, + beforeReplicaId: rid('test1') }); }); - const result = await storage.startBatch({}, async (batch) => { + const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; await batch.save({ sourceTable, - tag: 'delete', - before: { - id: 'test1' - } + tag: SaveOperationTag.DELETE, + beforeReplicaId: rid('test1') }); }); @@ -577,84 +623,87 @@ bucket_definitions: }); test('re-apply update + delete', async () => { - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules = testRules( + ` bucket_definitions: global: data: - SELECT id, description FROM "%" -`); - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); +` + ); + const storage = (await factory()).getInstance(sync_rules); - await storage.startBatch({}, async (batch) => { + await storage.startBatch(BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; await batch.save({ sourceTable, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'test1', description: 'test1' - } + }, + afterReplicaId: rid('test1') }); }); - await storage.startBatch({}, async (batch) => { + await storage.startBatch(BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; await batch.save({ sourceTable, - tag: 'update', + tag: SaveOperationTag.UPDATE, after: { id: 'test1', description: undefined - } + }, + afterReplicaId: rid('test1') }); await batch.save({ sourceTable, - tag: 'update', + tag: SaveOperationTag.UPDATE, after: { id: 'test1', description: undefined - } + }, + afterReplicaId: rid('test1') }); await batch.save({ sourceTable, - tag: 'delete', - before: { - id: 'test1' - } + tag: SaveOperationTag.DELETE, + beforeReplicaId: rid('test1') }); }); - const result = await storage.startBatch({}, async (batch) => { + const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; await batch.save({ sourceTable, - tag: 'update', + tag: SaveOperationTag.UPDATE, after: { id: 'test1', description: undefined - } + }, + afterReplicaId: rid('test1') }); await batch.save({ sourceTable, - tag: 'update', + tag: SaveOperationTag.UPDATE, after: { id: 'test1', description: undefined - } + }, + afterReplicaId: rid('test1') }); await batch.save({ sourceTable, - tag: 'delete', - before: { - id: 'test1' - } + tag: SaveOperationTag.DELETE, + beforeReplicaId: rid('test1') }); }); @@ -691,26 +740,29 @@ bucket_definitions: }); test('truncate parameters', async () => { - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules = testRules( + ` bucket_definitions: mybucket: parameters: - SELECT group_id FROM test WHERE id1 = token_parameters.user_id OR id2 = token_parameters.user_id data: [] - `); + ` + ); - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); + const storage = (await factory()).getInstance(sync_rules); - await storage.startBatch({}, async (batch) => { + await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 't2', id1: 'user3', id2: 'user4', group_id: 'group2a' - } + }, + afterReplicaId: rid('t2') }); await batch.truncate([TEST_TABLE]); @@ -731,106 +783,120 @@ bucket_definitions: // 1. Not getting the correct "current_data" state for each operation. // 2. Output order not being correct. - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules = testRules( + ` bucket_definitions: global: data: - SELECT id, description FROM "test" -`); - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); +` + ); + const storage = (await factory()).getInstance(sync_rules); // Pre-setup - const result1 = await storage.startBatch({}, async (batch) => { + const result1 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; await batch.save({ sourceTable, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'test1', description: 'test1a' - } + }, + afterReplicaId: rid('test1') }); await batch.save({ sourceTable, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'test2', description: 'test2a' - } + }, + afterReplicaId: rid('test2') }); }); const checkpoint1 = result1?.flushed_op ?? '0'; // Test batch - const result2 = await storage.startBatch({}, async (batch) => { + const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; // b await batch.save({ sourceTable, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'test1', description: 'test1b' - } + }, + afterReplicaId: rid('test1') }); await batch.save({ sourceTable, - tag: 'update', + tag: SaveOperationTag.UPDATE, before: { id: 'test1' }, + beforeReplicaId: rid('test1'), after: { id: 'test2', description: 'test2b' - } + }, + afterReplicaId: rid('test2') }); await batch.save({ sourceTable, - tag: 'update', + tag: SaveOperationTag.UPDATE, before: { id: 'test2' }, + beforeReplicaId: rid('test2'), after: { id: 'test3', description: 'test3b' - } + }, + + afterReplicaId: rid('test3') }); // c await batch.save({ sourceTable, - tag: 'update', + tag: SaveOperationTag.UPDATE, after: { id: 'test2', description: 'test2c' - } + }, + afterReplicaId: rid('test2') }); // d await batch.save({ sourceTable, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'test4', description: 'test4d' - } + }, + afterReplicaId: rid('test4') }); await batch.save({ sourceTable, - tag: 'update', + tag: SaveOperationTag.UPDATE, before: { id: 'test4' }, + beforeReplicaId: rid('test4'), after: { id: 'test5', description: 'test5d' - } + }, + afterReplicaId: rid('test5') }); }); @@ -865,55 +931,67 @@ bucket_definitions: }); test('changed data with replica identity full', async () => { - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules = testRules( + ` bucket_definitions: global: data: - SELECT id, description FROM "test" -`); - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); +` + ); + function rid2(id: string, description: string) { + return getUuidReplicaIdentityBson({ id, description }, [ + { name: 'id', type: 'VARCHAR', typeId: 25 }, + { name: 'description', type: 'VARCHAR', typeId: 25 } + ]); + } + const storage = (await factory()).getInstance(sync_rules); const sourceTable = makeTestTable('test', ['id', 'description']); // Pre-setup - const result1 = await storage.startBatch({}, async (batch) => { + const result1 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'test1', description: 'test1a' - } + }, + afterReplicaId: rid2('test1', 'test1a') }); }); const checkpoint1 = result1?.flushed_op ?? '0'; - const result2 = await storage.startBatch({}, async (batch) => { + const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { // Unchanged, but has a before id await batch.save({ sourceTable, - tag: 'update', + tag: SaveOperationTag.UPDATE, before: { id: 'test1', description: 'test1a' }, + beforeReplicaId: rid2('test1', 'test1a'), after: { id: 'test1', description: 'test1b' - } + }, + afterReplicaId: rid2('test1', 'test1b') }); }); - const result3 = await storage.startBatch({}, async (batch) => { + const result3 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { // Delete await batch.save({ sourceTable, - tag: 'delete', + tag: SaveOperationTag.DELETE, before: { id: 'test1', description: 'test1b' }, + beforeReplicaId: rid2('test1', 'test1b'), after: undefined }); }); @@ -957,55 +1035,68 @@ bucket_definitions: }); test('unchanged data with replica identity full', async () => { - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules = testRules( + ` bucket_definitions: global: data: - SELECT id, description FROM "test" -`); - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); +` + ); + function rid2(id: string, description: string) { + return getUuidReplicaIdentityBson({ id, description }, [ + { name: 'id', type: 'VARCHAR', typeId: 25 }, + { name: 'description', type: 'VARCHAR', typeId: 25 } + ]); + } + + const storage = (await factory()).getInstance(sync_rules); const sourceTable = makeTestTable('test', ['id', 'description']); // Pre-setup - const result1 = await storage.startBatch({}, async (batch) => { + const result1 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'test1', description: 'test1a' - } + }, + afterReplicaId: rid2('test1', 'test1a') }); }); const checkpoint1 = result1?.flushed_op ?? '0'; - const result2 = await storage.startBatch({}, async (batch) => { + const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { // Unchanged, but has a before id await batch.save({ sourceTable, - tag: 'update', + tag: SaveOperationTag.UPDATE, before: { id: 'test1', description: 'test1a' }, + beforeReplicaId: rid2('test1', 'test1a'), after: { id: 'test1', description: 'test1a' - } + }, + afterReplicaId: rid2('test1', 'test1a') }); }); - const result3 = await storage.startBatch({}, async (batch) => { + const result3 = await storage.startBatch(BATCH_OPTIONS, async (batch) => { // Delete await batch.save({ sourceTable, - tag: 'delete', + tag: SaveOperationTag.DELETE, before: { id: 'test1', description: 'test1a' }, + beforeReplicaId: rid2('test1', 'test1a'), after: undefined }); }); @@ -1046,54 +1137,60 @@ bucket_definitions: // but large enough in size to be split over multiple returned batches. // The specific batch splits is an implementation detail of the storage driver, // and the test will have to updated when other implementations are added. - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules = testRules( + ` bucket_definitions: global: data: - SELECT id, description FROM "%" -`); - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); +` + ); + const storage = (await factory()).getInstance(sync_rules); - const result = await storage.startBatch({}, async (batch) => { + const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; const largeDescription = '0123456789'.repeat(12_000_00); await batch.save({ sourceTable, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'test1', description: 'test1' - } + }, + afterReplicaId: rid('test1') }); await batch.save({ sourceTable, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'large1', description: largeDescription - } + }, + afterReplicaId: rid('large1') }); // Large enough to split the returned batch await batch.save({ sourceTable, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'large2', description: largeDescription - } + }, + afterReplicaId: rid('large2') }); await batch.save({ sourceTable, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'test3', description: 'test3' - } + }, + afterReplicaId: rid('test3') }); }); @@ -1138,54 +1235,60 @@ bucket_definitions: // Test syncing a batch of data that is small in count, // but large enough in size to be split over multiple returned chunks. // Similar to the above test, but splits over 1MB chunks. - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules = testRules( + ` bucket_definitions: global: data: - SELECT id, description FROM "%" -`); - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); +` + ); + const storage = (await factory()).getInstance(sync_rules); - const result = await storage.startBatch({}, async (batch) => { + const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; const largeDescription = '0123456789'.repeat(2_000_00); await batch.save({ sourceTable, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'test1', description: 'test1' - } + }, + afterReplicaId: rid('test1') }); await batch.save({ sourceTable, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'large1', description: largeDescription - } + }, + afterReplicaId: rid('large1') }); // Large enough to split the returned batch await batch.save({ sourceTable, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'large2', description: largeDescription - } + }, + afterReplicaId: rid('large2') }); await batch.save({ sourceTable, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 'test3', description: 'test3' - } + }, + afterReplicaId: rid('test3') }); }); @@ -1227,25 +1330,28 @@ bucket_definitions: test('long batch', async () => { // Test syncing a batch of data that is limited by count. - const sync_rules = SqlSyncRules.fromYaml(` + const sync_rules = testRules( + ` bucket_definitions: global: data: - SELECT id, description FROM "%" -`); - const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' }); +` + ); + const storage = (await factory()).getInstance(sync_rules); - const result = await storage.startBatch({}, async (batch) => { + const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { const sourceTable = TEST_TABLE; for (let i = 1; i <= 6; i++) { await batch.save({ sourceTable, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: `test${i}`, description: `test${i}` - } + }, + afterReplicaId: `test${i}` }); } }); @@ -1295,6 +1401,44 @@ bucket_definitions: expect(getBatchMeta(batch3)).toEqual(null); }); + test('batch should be disposed automatically', async () => { + const sync_rules = testRules(` + bucket_definitions: + global: + data: [] + `); + + const storage = (await factory()).getInstance(sync_rules); + + let isDisposed = false; + await storage.startBatch(BATCH_OPTIONS, async (batch) => { + batch.registerListener({ + disposed: () => { + isDisposed = true; + } + }); + }); + expect(isDisposed).true; + + isDisposed = false; + let errorCaught = false; + try { + await storage.startBatch(BATCH_OPTIONS, async (batch) => { + batch.registerListener({ + disposed: () => { + isDisposed = true; + } + }); + throw new Error(`Testing exceptions`); + }); + } catch (ex) { + errorCaught = true; + expect(ex.message.includes('Testing')).true; + } + expect(errorCaught).true; + expect(isDisposed).true; + }); + test('empty storage metrics', async () => { const f = await factory({ dropAll: true }); @@ -1306,7 +1450,7 @@ bucket_definitions: }); const r = await f.configureSyncRules('bucket_definitions: {}'); - const storage = f.getInstance(r.persisted_sync_rules!.parsed()); + const storage = f.getInstance(r.persisted_sync_rules!); await storage.autoActivate(); const metrics2 = await f.getStorageMetrics(); @@ -1316,4 +1460,40 @@ bucket_definitions: replication_size_bytes: 0 }); }); + + test('invalidate cached parsed sync rules', async () => { + const sync_rules_content = testRules( + ` +bucket_definitions: + by_workspace: + parameters: + - SELECT id as workspace_id FROM workspace WHERE + workspace."userId" = token_parameters.user_id + data: [] + ` + ); + + const bucketStorageFactory = await factory(); + const syncBucketStorage = bucketStorageFactory.getInstance(sync_rules_content); + + const parsedSchema1 = syncBucketStorage.getParsedSyncRules({ + defaultSchema: 'public' + }); + + const parsedSchema2 = syncBucketStorage.getParsedSyncRules({ + defaultSchema: 'public' + }); + + // These should be cached, this will be the same instance + expect(parsedSchema2).equals(parsedSchema1); + expect(parsedSchema1.getSourceTables()[0].schema).equals('public'); + + const parsedSchema3 = syncBucketStorage.getParsedSyncRules({ + defaultSchema: 'databasename' + }); + + // The cache should not be used + expect(parsedSchema3).not.equals(parsedSchema2); + expect(parsedSchema3.getSourceTables()[0].schema).equals('databasename'); + }); } diff --git a/packages/service-core/test/src/env.ts b/packages/service-core/test/src/env.ts index 4e9e1694a..1c86eae37 100644 --- a/packages/service-core/test/src/env.ts +++ b/packages/service-core/test/src/env.ts @@ -2,7 +2,5 @@ import { utils } from '@powersync/lib-services-framework'; export const env = utils.collectEnvironmentVariables({ MONGO_TEST_URL: utils.type.string.default('mongodb://localhost:27017/powersync_test'), - PG_TEST_URL: utils.type.string.default('postgres://postgres:postgres@localhost:5432/powersync_test'), - CI: utils.type.boolean.default('false'), - SLOW_TESTS: utils.type.boolean.default('false') + CI: utils.type.boolean.default('false') }); diff --git a/packages/service-core/test/src/merge_iterable.test.ts b/packages/service-core/test/src/merge_iterable.test.ts index 55550be9e..bd5123763 100644 --- a/packages/service-core/test/src/merge_iterable.test.ts +++ b/packages/service-core/test/src/merge_iterable.test.ts @@ -1,11 +1,6 @@ +import { mergeAsyncIterablesNew, mergeAsyncIterablesOld } from '@/sync/merge.js'; import * as timers from 'timers/promises'; import { describe, expect, test } from 'vitest'; -import { - FixedMergeAsyncIterable, - mergeAsyncIterables, - mergeAsyncIterablesNew, - mergeAsyncIterablesOld -} from '../../src/sync/merge.js'; type MergeIteratorFunction = (source: AsyncIterable[]) => AsyncIterable; diff --git a/packages/service-core/test/src/routes/probes.integration.test.ts b/packages/service-core/test/src/routes/probes.integration.test.ts index 5b1b3e276..7a3419e70 100644 --- a/packages/service-core/test/src/routes/probes.integration.test.ts +++ b/packages/service-core/test/src/routes/probes.integration.test.ts @@ -6,8 +6,8 @@ import * as system from '../../../src/system/system-index.js'; import { configureFastifyServer } from '../../../src/index.js'; import { ProbeRoutes } from '../../../src/routes/endpoints/probes.js'; -vi.mock("@powersync/lib-services-framework", async () => { - const actual = await vi.importActual("@powersync/lib-services-framework") as any; +vi.mock('@powersync/lib-services-framework', async () => { + const actual = (await vi.importActual('@powersync/lib-services-framework')) as any; return { ...actual, container: { @@ -15,18 +15,18 @@ vi.mock("@powersync/lib-services-framework", async () => { probes: { state: vi.fn() } - }, - } -}) + } + }; +}); describe('Probe Routes Integration', () => { let app: FastifyInstance; - let mockSystem: system.CorePowerSyncSystem; + let mockSystem: system.ServiceContext; beforeEach(async () => { app = Fastify(); - mockSystem = {} as system.CorePowerSyncSystem; - await configureFastifyServer(app, { system: mockSystem }); + mockSystem = { routerEngine: {} } as system.ServiceContext; + await configureFastifyServer(app, { service_context: mockSystem }); await app.ready(); }); @@ -46,7 +46,7 @@ describe('Probe Routes Integration', () => { const response = await app.inject({ method: 'GET', - url: ProbeRoutes.STARTUP, + url: ProbeRoutes.STARTUP }); expect(response.statusCode).toBe(200); @@ -67,7 +67,7 @@ describe('Probe Routes Integration', () => { const response = await app.inject({ method: 'GET', - url: ProbeRoutes.STARTUP, + url: ProbeRoutes.STARTUP }); expect(response.statusCode).toBe(400); @@ -90,7 +90,7 @@ describe('Probe Routes Integration', () => { const response = await app.inject({ method: 'GET', - url: ProbeRoutes.LIVENESS, + url: ProbeRoutes.LIVENESS }); expect(response.statusCode).toBe(200); @@ -111,7 +111,7 @@ describe('Probe Routes Integration', () => { const response = await app.inject({ method: 'GET', - url: ProbeRoutes.LIVENESS, + url: ProbeRoutes.LIVENESS }); expect(response.statusCode).toBe(400); @@ -134,7 +134,7 @@ describe('Probe Routes Integration', () => { const response = await app.inject({ method: 'GET', - url: ProbeRoutes.READINESS, + url: ProbeRoutes.READINESS }); expect(response.statusCode).toBe(200); @@ -155,7 +155,7 @@ describe('Probe Routes Integration', () => { const response = await app.inject({ method: 'GET', - url: ProbeRoutes.READINESS, + url: ProbeRoutes.READINESS }); expect(response.statusCode).toBe(400); @@ -172,17 +172,19 @@ describe('Probe Routes Integration', () => { vi.mocked(container.probes.state).mockReturnValue(mockState); // Create array of 15 concurrent requests (default concurrency is 10) - const requests = Array(15).fill(null).map(() => - app.inject({ - method: 'GET', - url: ProbeRoutes.STARTUP, - }) - ); + const requests = Array(15) + .fill(null) + .map(() => + app.inject({ + method: 'GET', + url: ProbeRoutes.STARTUP + }) + ); const responses = await Promise.all(requests); // All requests should complete successfully - responses.forEach(response => { + responses.forEach((response) => { expect(response.statusCode).toBe(200); expect(JSON.parse(response.payload)).toEqual({ ...mockState, @@ -196,18 +198,20 @@ describe('Probe Routes Integration', () => { vi.mocked(container.probes.state).mockReturnValue(mockState); // Create array of 35 concurrent requests (default max_queue_depth is 20) - const requests = Array(35).fill(null).map(() => - app.inject({ - method: 'GET', - url: ProbeRoutes.STARTUP, - }) - ); + const requests = Array(35) + .fill(null) + .map(() => + app.inject({ + method: 'GET', + url: ProbeRoutes.STARTUP + }) + ); const responses = await Promise.all(requests); // Some requests should succeed and some should fail with 429 - const successCount = responses.filter(r => r.statusCode === 200).length; - const queueFullCount = responses.filter(r => r.statusCode === 429).length; + const successCount = responses.filter((r) => r.statusCode === 200).length; + const queueFullCount = responses.filter((r) => r.statusCode === 429).length; expect(successCount).toBeGreaterThan(0); expect(queueFullCount).toBeGreaterThan(0); @@ -222,7 +226,7 @@ describe('Probe Routes Integration', () => { const response = await app.inject({ method: 'GET', - url: ProbeRoutes.STARTUP, + url: ProbeRoutes.STARTUP }); expect(response.headers['content-type']).toMatch(/application\/json/); diff --git a/packages/service-core/test/src/setup.ts b/packages/service-core/test/src/setup.ts index 8e4ece4c9..b924cf736 100644 --- a/packages/service-core/test/src/setup.ts +++ b/packages/service-core/test/src/setup.ts @@ -2,6 +2,6 @@ import { container } from '@powersync/lib-services-framework'; import { beforeAll } from 'vitest'; beforeAll(() => { - // Your setup code here + // Executes for every test file container.registerDefaults(); }); diff --git a/packages/service-core/test/src/stream_utils.ts b/packages/service-core/test/src/stream_utils.ts new file mode 100644 index 000000000..bcbc9168e --- /dev/null +++ b/packages/service-core/test/src/stream_utils.ts @@ -0,0 +1,42 @@ +import { OplogEntry } from '@/util/protocol-types.js'; +import { JSONBig } from '@powersync/service-jsonbig'; + +export function putOp(table: string, data: Record): Partial { + return { + op: 'PUT', + object_type: table, + object_id: data.id, + data: JSONBig.stringify(data) + }; +} + +export function removeOp(table: string, id: string): Partial { + return { + op: 'REMOVE', + object_type: table, + object_id: id + }; +} + +export function compareIds(a: OplogEntry, b: OplogEntry) { + return a.object_id!.localeCompare(b.object_id!); +} + +export async function oneFromAsync(source: Iterable | AsyncIterable): Promise { + const items: T[] = []; + for await (const item of source) { + items.push(item); + } + if (items.length != 1) { + throw new Error(`One item expected, got: ${items.length}`); + } + return items[0]; +} + +export async function fromAsync(source: Iterable | AsyncIterable): Promise { + const items: T[] = []; + for await (const item of source) { + items.push(item); + } + return items; +} diff --git a/packages/service-core/test/src/sync.test.ts b/packages/service-core/test/src/sync.test.ts index c367169ab..75e5f550f 100644 --- a/packages/service-core/test/src/sync.test.ts +++ b/packages/service-core/test/src/sync.test.ts @@ -1,12 +1,12 @@ +import { SaveOperationTag } from '@/storage/storage-index.js'; import { RequestTracker } from '@/sync/RequestTracker.js'; +import { streamResponse, SyncStreamParameters } from '@/sync/sync.js'; import { StreamingSyncLine } from '@/util/protocol-types.js'; -import { lsnMakeComparable } from '@powersync/service-jpgwire'; import { JSONBig } from '@powersync/service-jsonbig'; import { RequestParameters } from '@powersync/service-sync-rules'; import * as timers from 'timers/promises'; import { describe, expect, test } from 'vitest'; -import { streamResponse } from '../../src/sync/sync.js'; -import { makeTestTable, MONGO_STORAGE_FACTORY, StorageFactory } from './util.js'; +import { BATCH_OPTIONS, makeTestTable, MONGO_STORAGE_FACTORY, PARSE_OPTIONS, StorageFactory } from './util.js'; describe('sync - mongodb', function () { defineTests(MONGO_STORAGE_FACTORY); @@ -31,29 +31,31 @@ function defineTests(factory: StorageFactory) { content: BASIC_SYNC_RULES }); - const storage = await f.getInstance(syncRules.parsed()); + const storage = f.getInstance(syncRules); await storage.autoActivate(); - const result = await storage.startBatch({}, async (batch) => { + const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 't1', description: 'Test 1' - } + }, + afterReplicaId: 't1' }); await batch.save({ sourceTable: TEST_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 't2', description: 'Test 2' - } + }, + afterReplicaId: 't2' }); - await batch.commit(lsnMakeComparable('0/1')); + await batch.commit('0/1'); }); const stream = streamResponse({ @@ -63,6 +65,7 @@ function defineTests(factory: StorageFactory) { include_checksum: true, raw_data: true }, + parseOptions: PARSE_OPTIONS, tracker, syncParams: new RequestParameters({ sub: '' }, {}), token: { exp: Date.now() / 1000 + 10 } as any @@ -79,21 +82,22 @@ function defineTests(factory: StorageFactory) { content: BASIC_SYNC_RULES }); - const storage = await f.getInstance(syncRules.parsed()); + const storage = await f.getInstance(syncRules); await storage.autoActivate(); - const result = await storage.startBatch({}, async (batch) => { + const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 't1', description: 'Test\n"string"', large_num: 12345678901234567890n - } + }, + afterReplicaId: 't1' }); - await batch.commit(lsnMakeComparable('0/1')); + await batch.commit('0/1'); }); const stream = streamResponse({ @@ -103,6 +107,7 @@ function defineTests(factory: StorageFactory) { include_checksum: true, raw_data: false }, + parseOptions: PARSE_OPTIONS, tracker, syncParams: new RequestParameters({ sub: '' }, {}), token: { exp: Date.now() / 1000 + 10 } as any @@ -121,7 +126,7 @@ function defineTests(factory: StorageFactory) { content: BASIC_SYNC_RULES }); - const storage = await f.getInstance(syncRules.parsed()); + const storage = await f.getInstance(syncRules); await storage.autoActivate(); const stream = streamResponse({ @@ -131,6 +136,7 @@ function defineTests(factory: StorageFactory) { include_checksum: true, raw_data: true }, + parseOptions: PARSE_OPTIONS, tracker, syncParams: new RequestParameters({ sub: '' }, {}), token: { exp: 0 } as any @@ -147,7 +153,7 @@ function defineTests(factory: StorageFactory) { content: BASIC_SYNC_RULES }); - const storage = await f.getInstance(syncRules.parsed()); + const storage = await f.getInstance(syncRules); await storage.autoActivate(); const stream = streamResponse({ @@ -157,6 +163,7 @@ function defineTests(factory: StorageFactory) { include_checksum: true, raw_data: true }, + parseOptions: PARSE_OPTIONS, tracker, syncParams: new RequestParameters({ sub: '' }, {}), token: { exp: Date.now() / 1000 + 10 } as any @@ -165,32 +172,34 @@ function defineTests(factory: StorageFactory) { expect(await getCheckpointLines(iter)).toMatchSnapshot(); - await storage.startBatch({}, async (batch) => { + await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 't1', description: 'Test 1' - } + }, + afterReplicaId: 't1' }); - await batch.commit(lsnMakeComparable('0/1')); + await batch.commit('0/1'); }); expect(await getCheckpointLines(iter)).toMatchSnapshot(); - await storage.startBatch({}, async (batch) => { + await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 't2', description: 'Test 2' - } + }, + afterReplicaId: 't2' }); - await batch.commit(lsnMakeComparable('0/2')); + await batch.commit('0/2'); }); expect(await getCheckpointLines(iter)).toMatchSnapshot(); @@ -205,7 +214,7 @@ function defineTests(factory: StorageFactory) { content: BASIC_SYNC_RULES }); - const storage = await f.getInstance(syncRules.parsed()); + const storage = await f.getInstance(syncRules); await storage.autoActivate(); const exp = Date.now() / 1000 + 0.1; @@ -217,6 +226,7 @@ function defineTests(factory: StorageFactory) { include_checksum: true, raw_data: true }, + parseOptions: PARSE_OPTIONS, tracker, syncParams: new RequestParameters({ sub: '' }, {}), token: { exp: exp } as any @@ -242,29 +252,31 @@ function defineTests(factory: StorageFactory) { content: BASIC_SYNC_RULES }); - const storage = await f.getInstance(syncRules.parsed()); + const storage = await f.getInstance(syncRules); await storage.autoActivate(); - await storage.startBatch({}, async (batch) => { + await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 't1', description: 'Test 1' - } + }, + afterReplicaId: 't1' }); await batch.save({ sourceTable: TEST_TABLE, - tag: 'insert', + tag: SaveOperationTag.INSERT, after: { id: 't2', description: 'Test 2' - } + }, + afterReplicaId: 't2' }); - await batch.commit(lsnMakeComparable('0/1')); + await batch.commit('0/1'); }); const stream = streamResponse({ @@ -274,6 +286,7 @@ function defineTests(factory: StorageFactory) { include_checksum: true, raw_data: true }, + parseOptions: PARSE_OPTIONS, tracker, syncParams: new RequestParameters({ sub: '' }, {}), token: { exp: Date.now() / 1000 + 10 } as any @@ -293,26 +306,28 @@ function defineTests(factory: StorageFactory) { // Now we save additional data AND compact before continuing. // This invalidates the checkpoint we've received above. - await storage.startBatch({}, async (batch) => { + await storage.startBatch(BATCH_OPTIONS, async (batch) => { await batch.save({ sourceTable: TEST_TABLE, - tag: 'update', + tag: SaveOperationTag.UPDATE, after: { id: 't1', description: 'Test 1b' - } + }, + afterReplicaId: 't1' }); await batch.save({ sourceTable: TEST_TABLE, - tag: 'update', + tag: SaveOperationTag.UPDATE, after: { id: 't2', description: 'Test 2b' - } + }, + afterReplicaId: 't2' }); - await batch.commit(lsnMakeComparable('0/2')); + await batch.commit('0/2'); }); await storage.compact(); @@ -366,6 +381,67 @@ function defineTests(factory: StorageFactory) { }) }); }); + + test('write checkpoint', async () => { + const f = await factory(); + + const syncRules = await f.updateSyncRules({ + content: BASIC_SYNC_RULES + }); + + const storage = f.getInstance(syncRules); + await storage.autoActivate(); + + await storage.startBatch(BATCH_OPTIONS, async (batch) => { + // <= the managed write checkpoint LSN below + await batch.commit('0/1'); + }); + + const checkpoint = await storage.createManagedWriteCheckpoint({ + user_id: 'test', + heads: { '1': '1/0' } + }); + + const params: SyncStreamParameters = { + storage: f, + params: { + buckets: [], + include_checksum: true, + raw_data: true + }, + parseOptions: PARSE_OPTIONS, + tracker, + syncParams: new RequestParameters({ sub: 'test' }, {}), + token: { sub: 'test', exp: Date.now() / 1000 + 10 } as any + }; + const stream1 = streamResponse(params); + const lines1 = await consumeCheckpointLines(stream1); + + // If write checkpoints are not correctly filtered, this may already + // contain the write checkpoint. + expect(lines1[0]).toMatchObject({ + checkpoint: expect.objectContaining({ + last_op_id: '0', + write_checkpoint: undefined + }) + }); + + await storage.startBatch(BATCH_OPTIONS, async (batch) => { + // must be >= the managed write checkpoint LSN + await batch.commit('1/0'); + }); + + // At this point the LSN has advanced, so the write checkpoint should be + // included in the next checkpoint message. + const stream2 = streamResponse(params); + const lines2 = await consumeCheckpointLines(stream2); + expect(lines2[0]).toMatchObject({ + checkpoint: expect.objectContaining({ + last_op_id: '0', + write_checkpoint: `${checkpoint}` + }) + }); + }); } /** diff --git a/packages/service-core/test/src/util.ts b/packages/service-core/test/src/util.ts index c2722bbd1..138ee5d03 100644 --- a/packages/service-core/test/src/util.ts +++ b/packages/service-core/test/src/util.ts @@ -1,16 +1,20 @@ -import * as pgwire from '@powersync/service-jpgwire'; -import { normalizeConnection } from '@powersync/service-types'; -import * as mongo from 'mongodb'; -import { BucketStorageFactory, SyncBucketDataBatch } from '../../src/storage/BucketStorage.js'; -import { MongoBucketStorage } from '../../src/storage/MongoBucketStorage.js'; -import { PowerSyncMongo } from '../../src/storage/mongo/db.js'; -import { escapeIdentifier } from '../../src/util/pgwire_utils.js'; -import { env } from './env.js'; import { Metrics } from '@/metrics/Metrics.js'; -import { hashData } from '@/util/utils.js'; +import { + BucketStorageFactory, + ParseSyncRulesOptions, + PersistedSyncRulesContent, + StartBatchOptions, + SyncBucketDataBatch +} from '@/storage/BucketStorage.js'; +import { MongoBucketStorage } from '@/storage/MongoBucketStorage.js'; import { SourceTable } from '@/storage/SourceTable.js'; -import * as bson from 'bson'; +import { PowerSyncMongo } from '@/storage/mongo/db.js'; import { SyncBucketData } from '@/util/protocol-types.js'; +import { getUuidReplicaIdentityBson, hashData } from '@/util/utils.js'; +import { SqlSyncRules } from '@powersync/service-sync-rules'; +import * as bson from 'bson'; +import * as mongo from 'mongodb'; +import { env } from './env.js'; // The metrics need to be initialised before they can be used await Metrics.initialise({ @@ -20,8 +24,6 @@ await Metrics.initialise({ }); Metrics.getInstance().resetCounters(); -export const TEST_URI = env.PG_TEST_URL; - export interface StorageOptions { /** * By default, collections are only cleared/ @@ -41,45 +43,34 @@ export const MONGO_STORAGE_FACTORY: StorageFactory = async (options?: StorageOpt return new MongoBucketStorage(db, { slot_name_prefix: 'test_' }); }; -export async function clearTestDb(db: pgwire.PgClient) { - await db.query( - "select pg_drop_replication_slot(slot_name) from pg_replication_slots where active = false and slot_name like 'test_%'" - ); +export const ZERO_LSN = '0/0'; - await db.query(`CREATE EXTENSION IF NOT EXISTS "uuid-ossp"`); - try { - await db.query(`DROP PUBLICATION powersync`); - } catch (e) { - // Ignore - } +export const PARSE_OPTIONS: ParseSyncRulesOptions = { + defaultSchema: 'public' +}; - await db.query(`CREATE PUBLICATION powersync FOR ALL TABLES`); +export const BATCH_OPTIONS: StartBatchOptions = { + ...PARSE_OPTIONS, + zeroLSN: ZERO_LSN, + storeCurrentData: true +}; - const tableRows = pgwire.pgwireRows( - await db.query(`SELECT table_name FROM information_schema.tables where table_schema = 'public'`) - ); - for (let row of tableRows) { - const name = row.table_name; - if (name.startsWith('test_')) { - await db.query(`DROP TABLE public.${escapeIdentifier(name)}`); +export function testRules(content: string): PersistedSyncRulesContent { + return { + id: 1, + sync_rules_content: content, + slot_name: 'test', + parsed(options) { + return { + id: 1, + sync_rules: SqlSyncRules.fromYaml(content, options), + slot_name: 'test' + }; + }, + lock() { + throw new Error('Not implemented'); } - } -} - -export const TEST_CONNECTION_OPTIONS = normalizeConnection({ - type: 'postgresql', - uri: TEST_URI, - sslmode: 'disable' -}); - -export async function connectPgWire(type?: 'replication' | 'standard') { - const db = await pgwire.connectPgWire(TEST_CONNECTION_OPTIONS, { type }); - return db; -} - -export function connectPgPool() { - const db = pgwire.connectPgWirePool(TEST_CONNECTION_OPTIONS); - return db; + }; } export async function connectMongo() { @@ -90,8 +81,7 @@ export async function connectMongo() { socketTimeoutMS: env.CI ? 15_000 : 5_000, serverSelectionTimeoutMS: env.CI ? 15_000 : 2_500 }); - const db = new PowerSyncMongo(client); - return db; + return new PowerSyncMongo(client); } export function makeTestTable(name: string, columns?: string[] | undefined) { @@ -101,9 +91,9 @@ export function makeTestTable(name: string, columns?: string[] | undefined) { id, SourceTable.DEFAULT_TAG, relId, - SourceTable.DEFAULT_SCHEMA, + 'public', name, - (columns ?? ['id']).map((column) => ({ name: column, typeOid: 25 })), + (columns ?? ['id']).map((column) => ({ name: column, type: 'VARCHAR', typeId: 25 })), true ); } @@ -149,3 +139,10 @@ function getFirst(batch: SyncBucketData[] | SyncBucketDataBatch[] | SyncBucketDa return first as SyncBucketData; } } + +/** + * Replica id in the old Postgres format, for backwards-compatible tests. + */ +export function rid(id: string): bson.UUID { + return getUuidReplicaIdentityBson({ id: id }, [{ name: 'id', type: 'VARCHAR', typeId: 25 }]); +} diff --git a/packages/service-core/test/tsconfig.json b/packages/service-core/test/tsconfig.json index 4e77a4239..124a1cbe5 100644 --- a/packages/service-core/test/tsconfig.json +++ b/packages/service-core/test/tsconfig.json @@ -2,8 +2,8 @@ "extends": "../../../tsconfig.base.json", "compilerOptions": { "rootDir": "src", - "noEmit": true, "baseUrl": "./", + "outDir": "dist", "esModuleInterop": true, "skipLibCheck": true, "sourceMap": true, diff --git a/packages/service-core/vitest.config.ts b/packages/service-core/vitest.config.ts index b392696b7..7a39c1f71 100644 --- a/packages/service-core/vitest.config.ts +++ b/packages/service-core/vitest.config.ts @@ -4,6 +4,12 @@ import tsconfigPaths from 'vite-tsconfig-paths'; export default defineConfig({ plugins: [tsconfigPaths()], test: { - setupFiles: './test/src/setup.ts' + setupFiles: './test/src/setup.ts', + poolOptions: { + threads: { + singleThread: true + } + }, + pool: 'threads' } }); diff --git a/packages/sync-rules/.gitignore b/packages/sync-rules/.gitignore new file mode 100644 index 000000000..b73759d7b --- /dev/null +++ b/packages/sync-rules/.gitignore @@ -0,0 +1 @@ +schema/ \ No newline at end of file diff --git a/packages/sync-rules/package.json b/packages/sync-rules/package.json index 580d103d5..085f36f7b 100644 --- a/packages/sync-rules/package.json +++ b/packages/sync-rules/package.json @@ -9,12 +9,14 @@ "access": "public" }, "files": [ - "dist/**/*" + "dist/**/*", + "schema/*" ], "type": "module", "scripts": { "clean": "rm -r ./dist && tsc -b --clean", - "build": "tsc -b", + "build:tsc": "tsc -b", + "build": "pnpm build:tsc && node scripts/compile-schema.js", "build:tests": "tsc -b test/tsconfig.json", "test": "vitest" }, @@ -26,7 +28,7 @@ "yaml": "^2.3.1" }, "devDependencies": { - "@types/node": "18.19.50", - "vitest": "^2.0.5" + "@types/node": "^22.5.5", + "vitest": "^2.1.1" } } diff --git a/packages/sync-rules/scripts/compile-schema.js b/packages/sync-rules/scripts/compile-schema.js new file mode 100644 index 000000000..267de50d5 --- /dev/null +++ b/packages/sync-rules/scripts/compile-schema.js @@ -0,0 +1,11 @@ +import fs from 'fs'; +import path from 'path'; +import { fileURLToPath } from 'url'; +import { syncRulesSchema } from '../dist/json_schema.js'; + +const __dirname = path.dirname(fileURLToPath(import.meta.url)); +const schemaDir = path.join(__dirname, '../schema'); + +fs.mkdirSync(schemaDir, { recursive: true }); + +fs.writeFileSync(path.join(schemaDir, 'sync_rules.json'), JSON.stringify(syncRulesSchema, null, '\t')); diff --git a/packages/sync-rules/src/BaseSqlDataQuery.ts b/packages/sync-rules/src/BaseSqlDataQuery.ts new file mode 100644 index 000000000..342314d9b --- /dev/null +++ b/packages/sync-rules/src/BaseSqlDataQuery.ts @@ -0,0 +1,125 @@ +import { SelectedColumn } from 'pgsql-ast-parser'; +import { SqlRuleError } from './errors.js'; +import { ColumnDefinition } from './ExpressionType.js'; +import { SourceTableInterface } from './SourceTableInterface.js'; +import { SqlTools } from './sql_filters.js'; +import { TablePattern } from './TablePattern.js'; +import { QueryParameters, QuerySchema, SourceSchema, SourceSchemaTable, SqliteJsonRow, SqliteRow } from './types.js'; +import { filterJsonRow } from './utils.js'; + +export interface RowValueExtractor { + extract(tables: QueryParameters, into: SqliteRow): void; + getTypes(schema: QuerySchema, into: Record): void; +} + +export class BaseSqlDataQuery { + sourceTable?: TablePattern; + table?: string; + sql?: string; + columns?: SelectedColumn[]; + extractors: RowValueExtractor[] = []; + descriptor_name?: string; + bucket_parameters?: string[]; + tools?: SqlTools; + + ruleId?: string; + + errors: SqlRuleError[] = []; + + constructor() {} + + applies(table: SourceTableInterface) { + return this.sourceTable?.matches(table); + } + + addSpecialParameters(table: SourceTableInterface, row: SqliteRow) { + if (this.sourceTable!.isWildcard) { + return { + ...row, + _table_suffix: this.sourceTable!.suffix(table.table) + }; + } else { + return row; + } + } + + getOutputName(sourceTable: string) { + if (this.isUnaliasedWildcard()) { + // Wildcard without alias - use source + return sourceTable; + } else { + return this.table!; + } + } + + isUnaliasedWildcard() { + return this.sourceTable!.isWildcard && this.table == this.sourceTable!.tablePattern; + } + + columnOutputNames(): string[] { + return this.columns!.map((c) => { + return this.tools!.getOutputName(c); + }); + } + + getColumnOutputs(schema: SourceSchema): { name: string; columns: ColumnDefinition[] }[] { + let result: { name: string; columns: ColumnDefinition[] }[] = []; + + if (this.isUnaliasedWildcard()) { + // Separate results + for (let schemaTable of schema.getTables(this.sourceTable!)) { + let output: Record = {}; + + this.getColumnOutputsFor(schemaTable, output); + + result.push({ + name: this.getOutputName(schemaTable.table), + columns: Object.values(output) + }); + } + } else { + // Merged results + let output: Record = {}; + for (let schemaTable of schema.getTables(this.sourceTable!)) { + this.getColumnOutputsFor(schemaTable, output); + } + result.push({ + name: this.table!, + columns: Object.values(output) + }); + } + + return result; + } + + protected transformRow(tables: QueryParameters): SqliteJsonRow { + let result: SqliteRow = {}; + for (let extractor of this.extractors) { + extractor.extract(tables, result); + } + return filterJsonRow(result); + } + + protected getColumnOutputsFor(schemaTable: SourceSchemaTable, output: Record) { + const querySchema: QuerySchema = { + getColumn: (table, column) => { + if (table == this.table!) { + return schemaTable.getColumn(column); + } else { + // TODO: bucket parameters? + return undefined; + } + }, + getColumns: (table) => { + if (table == this.table!) { + return schemaTable.getColumns(); + } else { + return []; + } + } + }; + for (let extractor of this.extractors) { + extractor.getTypes(querySchema, output); + } + } +} diff --git a/packages/sync-rules/src/DartSchemaGenerator.ts b/packages/sync-rules/src/DartSchemaGenerator.ts index e25fedf20..1a90884de 100644 --- a/packages/sync-rules/src/DartSchemaGenerator.ts +++ b/packages/sync-rules/src/DartSchemaGenerator.ts @@ -1,5 +1,5 @@ import { ColumnDefinition, TYPE_INTEGER, TYPE_REAL, TYPE_TEXT } from './ExpressionType.js'; -import { SchemaGenerator } from './SchemaGenerator.js'; +import { GenerateSchemaOptions, SchemaGenerator } from './SchemaGenerator.js'; import { SqlSyncRules } from './SqlSyncRules.js'; import { SourceSchema } from './types.js'; @@ -9,18 +9,34 @@ export class DartSchemaGenerator extends SchemaGenerator { readonly mediaType = 'text/x-dart'; readonly fileName = 'schema.dart'; - generate(source: SqlSyncRules, schema: SourceSchema): string { + generate(source: SqlSyncRules, schema: SourceSchema, options?: GenerateSchemaOptions): string { const tables = super.getAllTables(source, schema); return `Schema([ - ${tables.map((table) => this.generateTable(table.name, table.columns)).join(',\n ')} + ${tables.map((table) => this.generateTable(table.name, table.columns, options)).join(',\n ')} ]); `; } - private generateTable(name: string, columns: ColumnDefinition[]): string { + private generateTable(name: string, columns: ColumnDefinition[], options?: GenerateSchemaOptions): string { + const generated = columns.map((c, i) => { + const last = i == columns.length - 1; + const base = this.generateColumn(c); + let withFormatting: string; + if (last) { + withFormatting = ` ${base}`; + } else { + withFormatting = ` ${base},`; + } + + if (options?.includeTypeComments && c.originalType != null) { + return `${withFormatting} // ${c.originalType}`; + } else { + return withFormatting; + } + }); return `Table('${name}', [ - ${columns.map((c) => this.generateColumn(c)).join(',\n ')} +${generated.join('\n')} ])`; } diff --git a/packages/sync-rules/src/ExpressionType.ts b/packages/sync-rules/src/ExpressionType.ts index 87b52c6e9..ad46c4408 100644 --- a/packages/sync-rules/src/ExpressionType.ts +++ b/packages/sync-rules/src/ExpressionType.ts @@ -4,11 +4,12 @@ export const TYPE_TEXT = 2; export const TYPE_INTEGER = 4; export const TYPE_REAL = 8; -export type SqliteType = 'null' | 'blob' | 'text' | 'integer' | 'real'; +export type SqliteType = 'null' | 'blob' | 'text' | 'integer' | 'real' | 'numeric'; export interface ColumnDefinition { name: string; type: ExpressionType; + originalType?: string; } export class ExpressionType { @@ -34,7 +35,7 @@ export class ExpressionType { return new ExpressionType(typeFlags); } - static fromTypeText(type: SqliteType | 'numeric') { + static fromTypeText(type: SqliteType) { if (type == 'null') { return ExpressionType.NONE; } else if (type == 'blob') { @@ -72,3 +73,28 @@ export class ExpressionType { return this.typeFlags == TYPE_NONE; } } + +/** + * Here only for backwards-compatibility only. + */ +export function expressionTypeFromPostgresType(type: string | undefined): ExpressionType { + if (type?.endsWith('[]')) { + return ExpressionType.TEXT; + } + switch (type) { + case 'bool': + return ExpressionType.INTEGER; + case 'bytea': + return ExpressionType.BLOB; + case 'int2': + case 'int4': + case 'int8': + case 'oid': + return ExpressionType.INTEGER; + case 'float4': + case 'float8': + return ExpressionType.REAL; + default: + return ExpressionType.TEXT; + } +} diff --git a/packages/sync-rules/src/SchemaGenerator.ts b/packages/sync-rules/src/SchemaGenerator.ts index 91bf48942..18e111b57 100644 --- a/packages/sync-rules/src/SchemaGenerator.ts +++ b/packages/sync-rules/src/SchemaGenerator.ts @@ -2,6 +2,10 @@ import { ColumnDefinition } from './ExpressionType.js'; import { SqlSyncRules } from './SqlSyncRules.js'; import { SourceSchema } from './types.js'; +export interface GenerateSchemaOptions { + includeTypeComments?: boolean; +} + export abstract class SchemaGenerator { protected getAllTables(source: SqlSyncRules, schema: SourceSchema) { let tables: Record> = {}; @@ -33,5 +37,5 @@ export abstract class SchemaGenerator { abstract readonly mediaType: string; abstract readonly fileName: string; - abstract generate(source: SqlSyncRules, schema: SourceSchema): string; + abstract generate(source: SqlSyncRules, schema: SourceSchema, options?: GenerateSchemaOptions): string; } diff --git a/packages/sync-rules/src/SqlBucketDescriptor.ts b/packages/sync-rules/src/SqlBucketDescriptor.ts index 459178ee1..0af7b1e33 100644 --- a/packages/sync-rules/src/SqlBucketDescriptor.ts +++ b/packages/sync-rules/src/SqlBucketDescriptor.ts @@ -2,6 +2,7 @@ import { IdSequence } from './IdSequence.js'; import { SourceTableInterface } from './SourceTableInterface.js'; import { SqlDataQuery } from './SqlDataQuery.js'; import { SqlParameterQuery } from './SqlParameterQuery.js'; +import { SyncRulesOptions } from './SqlSyncRules.js'; import { StaticSqlParameterQuery } from './StaticSqlParameterQuery.js'; import { TablePattern } from './TablePattern.js'; import { SqlRuleError } from './errors.js'; @@ -29,7 +30,10 @@ export class SqlBucketDescriptor { name: string; bucket_parameters?: string[]; - constructor(name: string, public idSequence: IdSequence) { + constructor( + name: string, + public idSequence: IdSequence + ) { this.name = name; } @@ -42,11 +46,11 @@ export class SqlBucketDescriptor { parameterIdSequence = new IdSequence(); - addDataQuery(sql: string, schema?: SourceSchema): QueryParseResult { + addDataQuery(sql: string, options: SyncRulesOptions): QueryParseResult { if (this.bucket_parameters == null) { throw new Error('Bucket parameters must be defined'); } - const dataRows = SqlDataQuery.fromSql(this.name, this.bucket_parameters, sql, schema); + const dataRows = SqlDataQuery.fromSql(this.name, this.bucket_parameters, sql, options); dataRows.ruleId = this.idSequence.nextId(); @@ -58,8 +62,8 @@ export class SqlBucketDescriptor { }; } - addParameterQuery(sql: string, schema: SourceSchema | undefined, options: QueryParseOptions): QueryParseResult { - const parameterQuery = SqlParameterQuery.fromSql(this.name, sql, schema, options); + addParameterQuery(sql: string, options: QueryParseOptions): QueryParseResult { + const parameterQuery = SqlParameterQuery.fromSql(this.name, sql, options); if (this.bucket_parameters == null) { this.bucket_parameters = parameterQuery.bucket_parameters; } else { diff --git a/packages/sync-rules/src/SqlDataQuery.ts b/packages/sync-rules/src/SqlDataQuery.ts index dc5e494f2..6f082a96a 100644 --- a/packages/sync-rules/src/SqlDataQuery.ts +++ b/packages/sync-rules/src/SqlDataQuery.ts @@ -1,34 +1,25 @@ import { JSONBig } from '@powersync/service-jsonbig'; -import { parse, SelectedColumn } from 'pgsql-ast-parser'; +import { parse } from 'pgsql-ast-parser'; +import { BaseSqlDataQuery } from './BaseSqlDataQuery.js'; import { SqlRuleError } from './errors.js'; -import { ColumnDefinition, ExpressionType } from './ExpressionType.js'; +import { ExpressionType } from './ExpressionType.js'; import { SourceTableInterface } from './SourceTableInterface.js'; import { SqlTools } from './sql_filters.js'; import { castAsText } from './sql_functions.js'; import { checkUnsupportedFeatures, isClauseError } from './sql_support.js'; +import { SyncRulesOptions } from './SqlSyncRules.js'; import { TablePattern } from './TablePattern.js'; -import { - EvaluationResult, - ParameterMatchClause, - QueryParameters, - QuerySchema, - SourceSchema, - SourceSchemaTable, - SqliteJsonRow, - SqliteRow -} from './types.js'; -import { filterJsonRow, getBucketId, isSelectStatement } from './utils.js'; import { TableQuerySchema } from './TableQuerySchema.js'; +import { EvaluationResult, ParameterMatchClause, QuerySchema, SqliteRow } from './types.js'; +import { getBucketId, isSelectStatement } from './utils.js'; -interface RowValueExtractor { - extract(tables: QueryParameters, into: SqliteRow): void; - getTypes(schema: QuerySchema, into: Record): void; -} +export class SqlDataQuery extends BaseSqlDataQuery { + filter?: ParameterMatchClause; -export class SqlDataQuery { - static fromSql(descriptor_name: string, bucket_parameters: string[], sql: string, schema?: SourceSchema) { + static fromSql(descriptor_name: string, bucket_parameters: string[], sql: string, options: SyncRulesOptions) { const parsed = parse(sql, { locationTracking: true }); const rows = new SqlDataQuery(); + const schema = options.schema; if (parsed.length > 1) { throw new SqlRuleError('Only a single SELECT statement is supported', sql, parsed[1]?._location); @@ -50,7 +41,7 @@ export class SqlDataQuery { } const alias: string = tableRef.alias ?? tableRef.name; - const sourceTable = new TablePattern(tableRef.schema, tableRef.name); + const sourceTable = new TablePattern(tableRef.schema ?? options.defaultSchema, tableRef.name); let querySchema: QuerySchema | undefined = undefined; if (schema) { const tables = schema.getTables(sourceTable); @@ -122,7 +113,9 @@ export class SqlDataQuery { output[name] = clause.evaluate(tables); }, getTypes(schema, into) { - into[name] = { name, type: clause.getType(schema) }; + const def = clause.getColumnDefinition(schema); + + into[name] = { name, type: def?.type ?? ExpressionType.NONE, originalType: def?.originalType }; } }); } else { @@ -151,7 +144,7 @@ export class SqlDataQuery { // Not performing schema-based validation - assume there is an id hasId = true; } else { - const idType = querySchema.getType(alias, 'id'); + const idType = querySchema.getColumn(alias, 'id')?.type ?? ExpressionType.NONE; if (!idType.isNone()) { hasId = true; } @@ -170,50 +163,6 @@ export class SqlDataQuery { return rows; } - sourceTable?: TablePattern; - table?: string; - sql?: string; - columns?: SelectedColumn[]; - extractors: RowValueExtractor[] = []; - filter?: ParameterMatchClause; - descriptor_name?: string; - bucket_parameters?: string[]; - tools?: SqlTools; - - ruleId?: string; - - errors: SqlRuleError[] = []; - - constructor() {} - - applies(table: SourceTableInterface) { - return this.sourceTable?.matches(table); - } - - addSpecialParameters(table: SourceTableInterface, row: SqliteRow) { - if (this.sourceTable!.isWildcard) { - return { - ...row, - _table_suffix: this.sourceTable!.suffix(table.table) - }; - } else { - return row; - } - } - - getOutputName(sourceTable: string) { - if (this.isUnaliasedWildcard()) { - // Wildcard without alias - use source - return sourceTable; - } else { - return this.table!; - } - } - - isUnaliasedWildcard() { - return this.sourceTable!.isWildcard && this.table == this.sourceTable!.tablePattern; - } - evaluateRow(table: SourceTableInterface, row: SqliteRow): EvaluationResult[] { try { const tables = { [this.table!]: this.addSpecialParameters(table, row) }; @@ -248,71 +197,4 @@ export class SqlDataQuery { return [{ error: e.message ?? `Evaluating data query failed` }]; } } - - private transformRow(tables: QueryParameters): SqliteJsonRow { - let result: SqliteRow = {}; - for (let extractor of this.extractors) { - extractor.extract(tables, result); - } - return filterJsonRow(result); - } - - columnOutputNames(): string[] { - return this.columns!.map((c) => { - return this.tools!.getOutputName(c); - }); - } - - getColumnOutputs(schema: SourceSchema): { name: string; columns: ColumnDefinition[] }[] { - let result: { name: string; columns: ColumnDefinition[] }[] = []; - - if (this.isUnaliasedWildcard()) { - // Separate results - for (let schemaTable of schema.getTables(this.sourceTable!)) { - let output: Record = {}; - - this.getColumnOutputsFor(schemaTable, output); - - result.push({ - name: this.getOutputName(schemaTable.table), - columns: Object.values(output) - }); - } - } else { - // Merged results - let output: Record = {}; - for (let schemaTable of schema.getTables(this.sourceTable!)) { - this.getColumnOutputsFor(schemaTable, output); - } - result.push({ - name: this.table!, - columns: Object.values(output) - }); - } - - return result; - } - - private getColumnOutputsFor(schemaTable: SourceSchemaTable, output: Record) { - const querySchema: QuerySchema = { - getType: (table, column) => { - if (table == this.table!) { - return schemaTable.getType(column) ?? ExpressionType.NONE; - } else { - // TODO: bucket parameters? - return ExpressionType.NONE; - } - }, - getColumns: (table) => { - if (table == this.table!) { - return schemaTable.getColumns(); - } else { - return []; - } - } - }; - for (let extractor of this.extractors) { - extractor.getTypes(querySchema, output); - } - } } diff --git a/packages/sync-rules/src/SqlParameterQuery.ts b/packages/sync-rules/src/SqlParameterQuery.ts index db22937a9..92c6cf527 100644 --- a/packages/sync-rules/src/SqlParameterQuery.ts +++ b/packages/sync-rules/src/SqlParameterQuery.ts @@ -23,6 +23,8 @@ import { SqliteRow } from './types.js'; import { filterJsonRow, getBucketId, isJsonValue, isSelectStatement } from './utils.js'; +import { SyncRulesOptions } from './SqlSyncRules.js'; +import { TableValuedFunctionSqlParameterQuery } from './TableValuedFunctionSqlParameterQuery.js'; /** * Represents a parameter query, such as: @@ -34,11 +36,11 @@ export class SqlParameterQuery { static fromSql( descriptor_name: string, sql: string, - schema?: SourceSchema, - options?: QueryParseOptions + options: QueryParseOptions ): SqlParameterQuery | StaticSqlParameterQuery { const parsed = parse(sql, { locationTracking: true }); const rows = new SqlParameterQuery(); + const schema = options?.schema; if (parsed.length > 1) { throw new SqlRuleError('Only a single SELECT statement is supported', sql, parsed[1]?._location); @@ -56,11 +58,16 @@ export class SqlParameterQuery { rows.errors.push(...checkUnsupportedFeatures(sql, q)); - if (q.from.length != 1 || q.from[0].type != 'table') { + if (q.from.length != 1) { throw new SqlRuleError('Must SELECT from a single table', sql, q.from?.[0]._location); + } else if (q.from[0].type == 'call') { + const from = q.from[0]; + return TableValuedFunctionSqlParameterQuery.fromSql(descriptor_name, sql, from, q, options); + } else if (q.from[0].type == 'statement') { + throw new SqlRuleError('Subqueries are not supported yet', sql, q.from?.[0]._location); } - const tableRef = q.from?.[0].name; + const tableRef = q.from[0].name; if (tableRef?.name == null) { throw new SqlRuleError('Must SELECT from a single table', sql, q.from?.[0]._location); } @@ -70,7 +77,7 @@ export class SqlParameterQuery { new SqlRuleError('Table aliases not supported in parameter queries', sql, q.from?.[0]._location) ); } - const sourceTable = new TablePattern(tableRef.schema, tableRef.name); + const sourceTable = new TablePattern(tableRef.schema ?? options.defaultSchema, tableRef.name); let querySchema: QuerySchema | undefined = undefined; if (schema) { const tables = schema.getTables(sourceTable); @@ -143,7 +150,7 @@ export class SqlParameterQuery { rows.tools = tools; rows.errors.push(...tools.errors); - if (rows.usesDangerousRequestParameters && !options?.accept_potentially_dangerous_queries) { + if (rows.usesDangerousRequestParameters && !options.accept_potentially_dangerous_queries) { let err = new SqlRuleError( "Potentially dangerous query based on parameters set by the client. The client can send any value for these parameters so it's not a good place to do authorization.", sql diff --git a/packages/sync-rules/src/SqlSyncRules.ts b/packages/sync-rules/src/SqlSyncRules.ts index 15b84adc6..ca5f05d85 100644 --- a/packages/sync-rules/src/SqlSyncRules.ts +++ b/packages/sync-rules/src/SqlSyncRules.ts @@ -1,5 +1,6 @@ -import { LineCounter, parseDocument, Scalar, YAMLMap, YAMLSeq } from 'yaml'; +import { isScalar, LineCounter, parseDocument, Scalar, YAMLMap, YAMLSeq } from 'yaml'; import { SqlRuleError, SyncRulesErrors, YamlError } from './errors.js'; +import { SqlEventDescriptor } from './events/SqlEventDescriptor.js'; import { IdSequence } from './IdSequence.js'; import { validateSyncRulesSchema } from './json_schema.js'; import { SourceTableInterface } from './SourceTableInterface.js'; @@ -25,15 +26,28 @@ import { const ACCEPT_POTENTIALLY_DANGEROUS_QUERIES = Symbol('ACCEPT_POTENTIALLY_DANGEROUS_QUERIES'); +export interface SyncRulesOptions { + schema?: SourceSchema; + /** + * The default schema to use when only a table name is specified. + * + * 'public' for Postgres, default database for MongoDB/MySQL. + */ + defaultSchema: string; + + throwOnError?: boolean; +} + export class SqlSyncRules implements SyncRules { bucket_descriptors: SqlBucketDescriptor[] = []; + event_descriptors: SqlEventDescriptor[] = []; idSequence = new IdSequence(); content: string; errors: YamlError[] = []; - static validate(yaml: string, options?: { schema?: SourceSchema }): YamlError[] { + static validate(yaml: string, options: SyncRulesOptions): YamlError[] { try { const rules = this.fromYaml(yaml, options); return rules.errors; @@ -48,9 +62,9 @@ export class SqlSyncRules implements SyncRules { } } - static fromYaml(yaml: string, options?: { throwOnError?: boolean; schema?: SourceSchema }) { - const throwOnError = options?.throwOnError ?? true; - const schema = options?.schema; + static fromYaml(yaml: string, options: SyncRulesOptions) { + const throwOnError = options.throwOnError ?? true; + const schema = options.schema; const lineCounter = new LineCounter(); const parsed = parseDocument(yaml, { @@ -98,7 +112,8 @@ export class SqlSyncRules implements SyncRules { const accept_potentially_dangerous_queries = value.get('accept_potentially_dangerous_queries', true)?.value == true; - const options: QueryParseOptions = { + const queryOptions: QueryParseOptions = { + ...options, accept_potentially_dangerous_queries }; const parameters = value.get('parameters', true) as unknown; @@ -108,16 +123,16 @@ export class SqlSyncRules implements SyncRules { if (parameters instanceof Scalar) { rules.withScalar(parameters, (q) => { - return descriptor.addParameterQuery(q, schema, options); + return descriptor.addParameterQuery(q, queryOptions); }); } else if (parameters instanceof YAMLSeq) { for (let item of parameters.items) { rules.withScalar(item, (q) => { - return descriptor.addParameterQuery(q, schema, options); + return descriptor.addParameterQuery(q, queryOptions); }); } } else { - descriptor.addParameterQuery('SELECT', schema, options); + descriptor.addParameterQuery('SELECT', queryOptions); } if (!(dataQueries instanceof YAMLSeq)) { @@ -126,12 +141,41 @@ export class SqlSyncRules implements SyncRules { } for (let query of dataQueries.items) { rules.withScalar(query, (q) => { - return descriptor.addDataQuery(q, schema); + return descriptor.addDataQuery(q, queryOptions); }); } rules.bucket_descriptors.push(descriptor); } + const eventMap = parsed.get('event_definitions') as YAMLMap; + for (const event of eventMap?.items ?? []) { + const { key, value } = event as { key: Scalar; value: YAMLSeq }; + + if (false == value instanceof YAMLMap) { + rules.errors.push(new YamlError(new Error(`Event definitions must be objects.`))); + continue; + } + + const payloads = value.get('payloads') as YAMLSeq; + if (false == payloads instanceof YAMLSeq) { + rules.errors.push(new YamlError(new Error(`Event definition payloads must be an array.`))); + continue; + } + + const eventDescriptor = new SqlEventDescriptor(key.toString(), rules.idSequence); + for (let item of payloads.items) { + if (!isScalar(item)) { + rules.errors.push(new YamlError(new Error(`Payload queries for events must be scalar.`))); + continue; + } + rules.withScalar(item, (q) => { + return eventDescriptor.addSourceQuery(q, options); + }); + } + + rules.event_descriptors.push(eventDescriptor); + } + // Validate that there are no additional properties. // Since these errors don't contain line numbers, do this last. const valid = validateSyncRulesSchema(parsed.toJSON()); @@ -277,18 +321,42 @@ export class SqlSyncRules implements SyncRules { } getSourceTables(): TablePattern[] { - let sourceTables = new Map(); - for (let bucket of this.bucket_descriptors) { - for (let r of bucket.getSourceTables()) { + const sourceTables = new Map(); + for (const bucket of this.bucket_descriptors) { + for (const r of bucket.getSourceTables()) { const key = `${r.connectionTag}.${r.schema}.${r.tablePattern}`; sourceTables.set(key, r); } } + + for (const event of this.event_descriptors) { + for (const r of event.getSourceTables()) { + const key = `${r.connectionTag}.${r.schema}.${r.tablePattern}`; + sourceTables.set(key, r); + } + } + return [...sourceTables.values()]; } + getEventTables(): TablePattern[] { + const eventTables = new Map(); + + for (const event of this.event_descriptors) { + for (const r of event.getSourceTables()) { + const key = `${r.connectionTag}.${r.schema}.${r.tablePattern}`; + eventTables.set(key, r); + } + } + return [...eventTables.values()]; + } + + tableTriggersEvent(table: SourceTableInterface): boolean { + return this.event_descriptors.some((bucket) => bucket.tableTriggersEvent(table)); + } + tableSyncsData(table: SourceTableInterface): boolean { - for (let bucket of this.bucket_descriptors) { + for (const bucket of this.bucket_descriptors) { if (bucket.tableSyncsData(table)) { return true; } diff --git a/packages/sync-rules/src/StaticSchema.ts b/packages/sync-rules/src/StaticSchema.ts index a807d471c..aa27114c6 100644 --- a/packages/sync-rules/src/StaticSchema.ts +++ b/packages/sync-rules/src/StaticSchema.ts @@ -1,4 +1,4 @@ -import { ColumnDefinition, ExpressionType } from './ExpressionType.js'; +import { ColumnDefinition, ExpressionType, expressionTypeFromPostgresType, SqliteType } from './ExpressionType.js'; import { SourceTableInterface } from './SourceTableInterface.js'; import { TablePattern } from './TablePattern.js'; import { SourceSchema, SourceSchemaTable } from './types.js'; @@ -14,11 +14,28 @@ export interface SourceTableDefinition { } export interface SourceColumnDefinition { + /** + * Column name. + */ name: string; + + /** + * Option 1: SQLite type flags - see ExpressionType.typeFlags. + * Option 2: SQLite type name in lowercase - 'text' | 'integer' | 'real' | 'numeric' | 'blob' | 'null' + */ + sqlite_type?: number | SqliteType; + /** - * Postgres type. + * Type name from the source database, e.g. "character varying(255)[]" */ - pg_type: string; + internal_type?: string; + + /** + * Postgres type, kept for backwards-compatibility. + * + * @deprecated - use internal_type instead + */ + pg_type?: string; } export interface SourceConnectionDefinition { @@ -43,8 +60,8 @@ class SourceTableDetails implements SourceTableInterface, SourceSchemaTable { ); } - getType(column: string): ExpressionType | undefined { - return this.columns[column]?.type; + getColumn(column: string): ColumnDefinition | undefined { + return this.columns[column]; } getColumns(): ColumnDefinition[] { @@ -75,28 +92,20 @@ export class StaticSchema implements SourceSchema { function mapColumn(column: SourceColumnDefinition): ColumnDefinition { return { name: column.name, - type: mapType(column.pg_type) + type: mapColumnType(column), + originalType: column.internal_type }; } -function mapType(type: string | undefined): ExpressionType { - if (type?.endsWith('[]')) { - return ExpressionType.TEXT; - } - switch (type) { - case 'bool': - return ExpressionType.INTEGER; - case 'bytea': - return ExpressionType.BLOB; - case 'int2': - case 'int4': - case 'int8': - case 'oid': - return ExpressionType.INTEGER; - case 'float4': - case 'float8': - return ExpressionType.REAL; - default: - return ExpressionType.TEXT; +function mapColumnType(column: SourceColumnDefinition): ExpressionType { + if (typeof column.sqlite_type == 'number') { + return ExpressionType.of(column.sqlite_type); + } else if (typeof column.sqlite_type == 'string') { + return ExpressionType.fromTypeText(column.sqlite_type); + } else if (column.pg_type != null) { + // We still handle these types for backwards-compatibility of old schemas + return expressionTypeFromPostgresType(column.pg_type); + } else { + throw new Error(`Cannot determine SQLite type of ${JSON.stringify(column)}`); } } diff --git a/packages/sync-rules/src/TablePattern.ts b/packages/sync-rules/src/TablePattern.ts index d6d3494ba..55c90ec9e 100644 --- a/packages/sync-rules/src/TablePattern.ts +++ b/packages/sync-rules/src/TablePattern.ts @@ -1,7 +1,6 @@ import { SourceTableInterface } from './SourceTableInterface.js'; export const DEFAULT_TAG = 'default'; -export const DEFAULT_SCHEMA = 'public'; /** * Some pattern matching SourceTables. @@ -12,8 +11,7 @@ export class TablePattern { public readonly schema: string; public readonly tablePattern: string; - constructor(schema: string | undefined, tablePattern: string) { - schema ??= DEFAULT_SCHEMA; + constructor(schema: string, tablePattern: string) { const splitSchema = schema.split('.'); if (splitSchema.length > 2) { throw new Error(`Invalid schema: ${schema}`); diff --git a/packages/sync-rules/src/TableQuerySchema.ts b/packages/sync-rules/src/TableQuerySchema.ts index 748ebaab6..5c68ae1e9 100644 --- a/packages/sync-rules/src/TableQuerySchema.ts +++ b/packages/sync-rules/src/TableQuerySchema.ts @@ -1,20 +1,23 @@ -import { ColumnDefinition, ExpressionType } from './ExpressionType.js'; +import { ColumnDefinition } from './ExpressionType.js'; import { QuerySchema, SourceSchemaTable } from './types.js'; export class TableQuerySchema implements QuerySchema { - constructor(private tables: SourceSchemaTable[], private alias: string) {} + constructor( + private tables: SourceSchemaTable[], + private alias: string + ) {} - getType(table: string, column: string): ExpressionType { + getColumn(table: string, column: string): ColumnDefinition | undefined { if (table != this.alias) { - return ExpressionType.NONE; + return undefined; } for (let table of this.tables) { - const t = table.getType(column); + const t = table.getColumn(column); if (t != null) { return t; } } - return ExpressionType.NONE; + return undefined; } getColumns(table: string): ColumnDefinition[] { diff --git a/packages/sync-rules/src/TableValuedFunctionSqlParameterQuery.ts b/packages/sync-rules/src/TableValuedFunctionSqlParameterQuery.ts new file mode 100644 index 000000000..5537fe2bb --- /dev/null +++ b/packages/sync-rules/src/TableValuedFunctionSqlParameterQuery.ts @@ -0,0 +1,196 @@ +import { FromCall, SelectedColumn, SelectFromStatement } from 'pgsql-ast-parser'; +import { SqlRuleError } from './errors.js'; +import { SqlTools } from './sql_filters.js'; +import { checkUnsupportedFeatures, isClauseError, isParameterValueClause, sqliteBool } from './sql_support.js'; +import { TABLE_VALUED_FUNCTIONS, TableValuedFunction } from './TableValuedFunctions.js'; +import { + ParameterValueClause, + ParameterValueSet, + QueryParseOptions, + RequestParameters, + SqliteJsonValue, + SqliteRow +} from './types.js'; +import { getBucketId, isJsonValue } from './utils.js'; + +/** + * Represents a parameter query using a table-valued function. + * + * Right now this only supports json_each: + * + * SELECT json_each.value as v FROM json_each(request.parameters() -> 'array') + * + * This can currently not be combined with parameter table queries or multiple table-valued functions. + */ +export class TableValuedFunctionSqlParameterQuery { + static fromSql( + descriptor_name: string, + sql: string, + call: FromCall, + q: SelectFromStatement, + options?: QueryParseOptions + ): TableValuedFunctionSqlParameterQuery { + const query = new TableValuedFunctionSqlParameterQuery(); + + query.errors.push(...checkUnsupportedFeatures(sql, q)); + + if (!(call.function.name in TABLE_VALUED_FUNCTIONS)) { + query.errors.push(new SqlRuleError(`Table-valued function ${call.function.name} is not defined.`, sql, call)); + return query; + } + + const callTable = call.alias?.name ?? call.function.name; + const callExpression = call.args[0]; + + const tools = new SqlTools({ + table: callTable, + parameter_tables: ['token_parameters', 'user_parameters', callTable], + supports_parameter_expressions: true, + sql + }); + const where = q.where; + + const filter = tools.compileParameterValueExtractor(where); + const callClause = tools.compileParameterValueExtractor(callExpression); + const columns = q.columns ?? []; + const bucket_parameters = columns.map((column) => tools.getOutputName(column)); + + query.sql = sql; + query.descriptor_name = descriptor_name; + query.bucket_parameters = bucket_parameters; + query.columns = columns; + query.tools = tools; + query.function = TABLE_VALUED_FUNCTIONS[call.function.name]!; + query.callTableName = callTable; + if (!isClauseError(callClause)) { + query.callClause = callClause; + } + if (!isClauseError(filter)) { + query.filter = filter; + } + + for (let column of columns) { + if (column.alias != null) { + tools.checkSpecificNameCase(column.alias); + } + const name = tools.getSpecificOutputName(column); + const extractor = tools.compileParameterValueExtractor(column.expr); + if (isClauseError(extractor)) { + // Error logged already + continue; + } + query.parameter_extractors[name] = extractor; + } + + query.errors.push(...tools.errors); + + if (query.usesDangerousRequestParameters && !options?.accept_potentially_dangerous_queries) { + let err = new SqlRuleError( + "Potentially dangerous query based on parameters set by the client. The client can send any value for these parameters so it's not a good place to do authorization.", + sql + ); + err.type = 'warning'; + query.errors.push(err); + } + return query; + } + + sql?: string; + columns?: SelectedColumn[]; + parameter_extractors: Record = {}; + descriptor_name?: string; + /** _Output_ bucket parameters */ + bucket_parameters?: string[]; + id?: string; + tools?: SqlTools; + + filter?: ParameterValueClause; + callClause?: ParameterValueClause; + function?: TableValuedFunction; + callTableName?: string; + + errors: SqlRuleError[] = []; + + getStaticBucketIds(parameters: RequestParameters): string[] { + if (this.filter == null || this.callClause == null) { + // Error in filter clause + return []; + } + + const valueString = this.callClause.lookupParameterValue(parameters); + const rows = this.function!.call([valueString]); + let total: string[] = []; + for (let row of rows) { + total.push(...this.getIndividualBucketIds(row, parameters)); + } + return total; + } + + private getIndividualBucketIds(row: SqliteRow, parameters: RequestParameters): string[] { + const mergedParams: ParameterValueSet = { + raw_token_payload: parameters.raw_token_payload, + raw_user_parameters: parameters.raw_user_parameters, + user_id: parameters.user_id, + lookup: (table, column) => { + if (table == this.callTableName) { + return row[column]!; + } else { + return parameters.lookup(table, column); + } + } + }; + const filterValue = this.filter!.lookupParameterValue(mergedParams); + if (sqliteBool(filterValue) === 0n) { + return []; + } + + let result: Record = {}; + for (let name of this.bucket_parameters!) { + const value = this.parameter_extractors[name].lookupParameterValue(mergedParams); + if (isJsonValue(value)) { + result[`bucket.${name}`] = value; + } else { + throw new Error(`Invalid parameter value: ${value}`); + } + } + + return [getBucketId(this.descriptor_name!, this.bucket_parameters!, result)]; + } + + get hasAuthenticatedBucketParameters(): boolean { + // select where request.jwt() ->> 'role' == 'authorized' + // we do not count this as a sufficient check + // const authenticatedFilter = this.filter!.usesAuthenticatedRequestParameters; + + // select request.user_id() as user_id + const authenticatedExtractor = + Object.values(this.parameter_extractors).find( + (clause) => isParameterValueClause(clause) && clause.usesAuthenticatedRequestParameters + ) != null; + + // select value from json_each(request.jwt() ->> 'project_ids') + const authenticatedArgument = this.callClause?.usesAuthenticatedRequestParameters ?? false; + + return authenticatedExtractor || authenticatedArgument; + } + + get usesUnauthenticatedRequestParameters(): boolean { + // select where request.parameters() ->> 'include_comments' + const unauthenticatedFilter = this.filter?.usesUnauthenticatedRequestParameters; + + // select request.parameters() ->> 'project_id' + const unauthenticatedExtractor = + Object.values(this.parameter_extractors).find( + (clause) => isParameterValueClause(clause) && clause.usesUnauthenticatedRequestParameters + ) != null; + + // select value from json_each(request.parameters() ->> 'project_ids') + const unauthenticatedArgument = this.callClause?.usesUnauthenticatedRequestParameters ?? false; + + return unauthenticatedFilter || unauthenticatedExtractor || unauthenticatedArgument; + } + + get usesDangerousRequestParameters() { + return this.usesUnauthenticatedRequestParameters && !this.hasAuthenticatedBucketParameters; + } +} diff --git a/packages/sync-rules/src/TableValuedFunctions.ts b/packages/sync-rules/src/TableValuedFunctions.ts new file mode 100644 index 000000000..e3e40165c --- /dev/null +++ b/packages/sync-rules/src/TableValuedFunctions.ts @@ -0,0 +1,45 @@ +import { SqliteJsonValue, SqliteRow, SqliteValue } from './types.js'; +import { jsonValueToSqlite } from './utils.js'; + +export interface TableValuedFunction { + readonly name: string; + call: (args: SqliteValue[]) => SqliteRow[]; + detail: string; + documentation: string; +} + +export const JSON_EACH: TableValuedFunction = { + name: 'json_each', + call(args: SqliteValue[]) { + if (args.length != 1) { + throw new Error(`json_each expects 1 argument, got ${args.length}`); + } + const valueString = args[0]; + if (valueString === null) { + return []; + } else if (typeof valueString !== 'string') { + throw new Error(`Expected json_each to be called with a string, got ${valueString}`); + } + let values: SqliteJsonValue[] = []; + try { + values = JSON.parse(valueString); + } catch (e) { + throw new Error('Expected JSON string'); + } + if (!Array.isArray(values)) { + throw new Error('Expected an array'); + } + + return values.map((v) => { + return { + value: jsonValueToSqlite(v) + }; + }); + }, + detail: 'Each element of a JSON array', + documentation: 'Returns each element of a JSON array as a separate row.' +}; + +export const TABLE_VALUED_FUNCTIONS: Record = { + json_each: JSON_EACH +}; diff --git a/packages/sync-rules/src/TsSchemaGenerator.ts b/packages/sync-rules/src/TsSchemaGenerator.ts index 9e3e56e51..1a0dba58c 100644 --- a/packages/sync-rules/src/TsSchemaGenerator.ts +++ b/packages/sync-rules/src/TsSchemaGenerator.ts @@ -1,5 +1,5 @@ import { ColumnDefinition, TYPE_INTEGER, TYPE_REAL, TYPE_TEXT } from './ExpressionType.js'; -import { SchemaGenerator } from './SchemaGenerator.js'; +import { GenerateSchemaOptions, SchemaGenerator } from './SchemaGenerator.js'; import { SqlSyncRules } from './SqlSyncRules.js'; import { SourceSchema } from './types.js'; @@ -47,12 +47,12 @@ export class TsSchemaGenerator extends SchemaGenerator { } } - generate(source: SqlSyncRules, schema: SourceSchema): string { + generate(source: SqlSyncRules, schema: SourceSchema, options?: GenerateSchemaOptions): string { const tables = super.getAllTables(source, schema); return `${this.generateImports()} -${tables.map((table) => this.generateTable(table.name, table.columns)).join('\n\n')} +${tables.map((table) => this.generateTable(table.name, table.columns, options)).join('\n\n')} export const AppSchema = new Schema({ ${tables.map((table) => table.name).join(',\n ')} @@ -81,11 +81,28 @@ ${this.generateTypeExports()}`; } } - private generateTable(name: string, columns: ColumnDefinition[]): string { + private generateTable(name: string, columns: ColumnDefinition[], options?: GenerateSchemaOptions): string { + const generated = columns.map((c, i) => { + const last = i == columns.length - 1; + const base = this.generateColumn(c); + let withFormatting: string; + if (last) { + withFormatting = ` ${base}`; + } else { + withFormatting = ` ${base},`; + } + + if (options?.includeTypeComments && c.originalType != null) { + return `${withFormatting} // ${c.originalType}`; + } else { + return withFormatting; + } + }); + return `const ${name} = new Table( { // id column (text) is automatically included - ${columns.map((c) => this.generateColumn(c)).join(',\n ')} +${generated.join('\n')} }, { indexes: {} } );`; diff --git a/packages/sync-rules/src/errors.ts b/packages/sync-rules/src/errors.ts index d92da1c30..88a86547f 100644 --- a/packages/sync-rules/src/errors.ts +++ b/packages/sync-rules/src/errors.ts @@ -28,7 +28,11 @@ export class SqlRuleError extends Error { location?: ErrorLocation; type: 'warning' | 'fatal' = 'fatal'; - constructor(message: string, public sql: string, location?: NodeLocation | Expr) { + constructor( + message: string, + public sql: string, + location?: NodeLocation | Expr + ) { super(message); this.location = getLocation(location) ?? { start: 0, end: sql.length }; @@ -39,7 +43,10 @@ export class YamlError extends Error { location: ErrorLocation; type: 'warning' | 'fatal' = 'fatal'; - constructor(public source: Error, location?: ErrorLocation) { + constructor( + public source: Error, + location?: ErrorLocation + ) { super(source.message); if (location == null && source instanceof yaml.YAMLError) { diff --git a/packages/sync-rules/src/events/SqlEventDescriptor.ts b/packages/sync-rules/src/events/SqlEventDescriptor.ts new file mode 100644 index 000000000..c57dd7d09 --- /dev/null +++ b/packages/sync-rules/src/events/SqlEventDescriptor.ts @@ -0,0 +1,68 @@ +import { SqlRuleError } from '../errors.js'; +import { IdSequence } from '../IdSequence.js'; +import { SourceTableInterface } from '../SourceTableInterface.js'; +import { QueryParseResult } from '../SqlBucketDescriptor.js'; +import { SyncRulesOptions } from '../SqlSyncRules.js'; +import { TablePattern } from '../TablePattern.js'; +import { EvaluateRowOptions } from '../types.js'; +import { EvaluatedEventRowWithErrors, SqlEventSourceQuery } from './SqlEventSourceQuery.js'; + +/** + * A sync rules event which is triggered from a SQL table change. + */ +export class SqlEventDescriptor { + name: string; + source_queries: SqlEventSourceQuery[] = []; + + constructor( + name: string, + public idSequence: IdSequence + ) { + this.name = name; + } + + addSourceQuery(sql: string, options: SyncRulesOptions): QueryParseResult { + const source = SqlEventSourceQuery.fromSql(this.name, sql, options); + + // Each source query should be for a unique table + const existingSourceQuery = this.source_queries.find((q) => q.table == source.table); + if (existingSourceQuery) { + return { + parsed: false, + errors: [new SqlRuleError('Each payload query should query a unique table', sql)] + }; + } + + source.ruleId = this.idSequence.nextId(); + this.source_queries.push(source); + + return { + parsed: true, + errors: source.errors + }; + } + + evaluateRowWithErrors(options: EvaluateRowOptions): EvaluatedEventRowWithErrors { + // There should only be 1 payload result per source query + const matchingQuery = this.source_queries.find((q) => q.applies(options.sourceTable)); + if (!matchingQuery) { + return { + errors: [{ error: `No marching source query found for table ${options.sourceTable.table}` }] + }; + } + + return matchingQuery.evaluateRowWithErrors(options.sourceTable, options.record); + } + + getSourceTables(): Set { + let result = new Set(); + for (let query of this.source_queries) { + result.add(query.sourceTable!); + } + return result; + } + + tableTriggersEvent(table: SourceTableInterface): boolean { + return this.source_queries.some((query) => query.applies(table)); + } +} diff --git a/packages/sync-rules/src/events/SqlEventSourceQuery.ts b/packages/sync-rules/src/events/SqlEventSourceQuery.ts new file mode 100644 index 000000000..c5ca1e1ab --- /dev/null +++ b/packages/sync-rules/src/events/SqlEventSourceQuery.ts @@ -0,0 +1,142 @@ +import { parse } from 'pgsql-ast-parser'; +import { BaseSqlDataQuery } from '../BaseSqlDataQuery.js'; +import { SqlRuleError } from '../errors.js'; +import { ExpressionType } from '../ExpressionType.js'; +import { SourceTableInterface } from '../SourceTableInterface.js'; +import { SqlTools } from '../sql_filters.js'; +import { checkUnsupportedFeatures, isClauseError } from '../sql_support.js'; +import { SyncRulesOptions } from '../SqlSyncRules.js'; +import { TablePattern } from '../TablePattern.js'; +import { TableQuerySchema } from '../TableQuerySchema.js'; +import { EvaluationError, QuerySchema, SqliteJsonRow, SqliteRow } from '../types.js'; +import { isSelectStatement } from '../utils.js'; + +export type EvaluatedEventSourceRow = { + data: SqliteJsonRow; + ruleId?: string; +}; + +export type EvaluatedEventRowWithErrors = { + result?: EvaluatedEventSourceRow; + errors: EvaluationError[]; +}; + +/** + * Defines how a Replicated Row is mapped to source parameters for events. + */ +export class SqlEventSourceQuery extends BaseSqlDataQuery { + static fromSql(descriptor_name: string, sql: string, options: SyncRulesOptions) { + const parsed = parse(sql, { locationTracking: true }); + const rows = new SqlEventSourceQuery(); + const schema = options.schema; + + if (parsed.length > 1) { + throw new SqlRuleError('Only a single SELECT statement is supported', sql, parsed[1]?._location); + } + const q = parsed[0]; + if (!isSelectStatement(q)) { + throw new SqlRuleError('Only SELECT statements are supported', sql, q._location); + } + + rows.errors.push(...checkUnsupportedFeatures(sql, q)); + + if (q.from == null || q.from.length != 1 || q.from[0].type != 'table') { + throw new SqlRuleError('Must SELECT from a single table', sql, q.from?.[0]._location); + } + + const tableRef = q.from?.[0].name; + if (tableRef?.name == null) { + throw new SqlRuleError('Must SELECT from a single table', sql, q.from?.[0]._location); + } + const alias: string = tableRef.alias ?? tableRef.name; + + const sourceTable = new TablePattern(tableRef.schema ?? options.defaultSchema, tableRef.name); + let querySchema: QuerySchema | undefined = undefined; + if (schema) { + const tables = schema.getTables(sourceTable); + if (tables.length == 0) { + const e = new SqlRuleError( + `Table ${sourceTable.schema}.${sourceTable.tablePattern} not found`, + sql, + q.from?.[0]?._location + ); + e.type = 'warning'; + + rows.errors.push(e); + } else { + querySchema = new TableQuerySchema(tables, alias); + } + } + + const tools = new SqlTools({ + table: alias, + parameter_tables: [], + value_tables: [alias], + sql, + schema: querySchema + }); + + rows.sourceTable = sourceTable; + rows.table = alias; + rows.sql = sql; + rows.descriptor_name = descriptor_name; + rows.columns = q.columns ?? []; + rows.tools = tools; + + for (let column of q.columns ?? []) { + const name = tools.getOutputName(column); + if (name != '*') { + const clause = tools.compileRowValueExtractor(column.expr); + if (isClauseError(clause)) { + // Error logged already + continue; + } + rows.extractors.push({ + extract: (tables, output) => { + output[name] = clause.evaluate(tables); + }, + getTypes(schema, into) { + const def = clause.getColumnDefinition(schema); + into[name] = { name, type: def?.type ?? ExpressionType.NONE, originalType: def?.originalType }; + } + }); + } else { + rows.extractors.push({ + extract: (tables, output) => { + const row = tables[alias]; + for (let key in row) { + if (key.startsWith('_')) { + continue; + } + output[key] ??= row[key]; + } + }, + getTypes(schema, into) { + for (let column of schema.getColumns(alias)) { + into[column.name] ??= column; + } + } + }); + } + } + rows.errors.push(...tools.errors); + return rows; + } + + evaluateRowWithErrors(table: SourceTableInterface, row: SqliteRow): EvaluatedEventRowWithErrors { + try { + const tables = { [this.table!]: this.addSpecialParameters(table, row) }; + + const data = this.transformRow(tables); + return { + result: { + data, + ruleId: this.ruleId + }, + errors: [] + }; + } catch (e) { + return { errors: [e.message ?? `Evaluating data query failed`] }; + } + } +} diff --git a/packages/sync-rules/src/index.ts b/packages/sync-rules/src/index.ts index d72a1e58c..877d52c37 100644 --- a/packages/sync-rules/src/index.ts +++ b/packages/sync-rules/src/index.ts @@ -1,20 +1,22 @@ +export * from './DartSchemaGenerator.js'; export * from './errors.js'; +export * from './events/SqlEventDescriptor.js'; +export * from './events/SqlEventSourceQuery.js'; +export * from './ExpressionType.js'; +export * from './generators.js'; export * from './IdSequence.js'; +export * from './JsLegacySchemaGenerator.js'; +export * from './json_schema.js'; +export * from './request_functions.js'; +export * from './SchemaGenerator.js'; export * from './SourceTableInterface.js'; export * from './sql_filters.js'; export * from './sql_functions.js'; +export * from './SqlDataQuery.js'; +export * from './SqlParameterQuery.js'; export * from './SqlSyncRules.js'; +export * from './StaticSchema.js'; export * from './TablePattern.js'; +export * from './TsSchemaGenerator.js'; export * from './types.js'; export * from './utils.js'; -export * from './SqlParameterQuery.js'; -export * from './json_schema.js'; -export * from './StaticSchema.js'; -export * from './ExpressionType.js'; -export * from './SchemaGenerator.js'; -export * from './DartSchemaGenerator.js'; -export * from './JsLegacySchemaGenerator.js'; -export * from './TsSchemaGenerator.js'; -export * from './generators.js'; -export * from './SqlDataQuery.js'; -export * from './request_functions.js'; diff --git a/packages/sync-rules/src/json_schema.ts b/packages/sync-rules/src/json_schema.ts index dfe614825..b2f9367a9 100644 --- a/packages/sync-rules/src/json_schema.ts +++ b/packages/sync-rules/src/json_schema.ts @@ -44,6 +44,35 @@ export const syncRulesSchema: ajvModule.Schema = { additionalProperties: false } } + }, + event_definitions: { + type: 'object', + description: 'Record of sync replication event definitions', + examples: [ + { + write_checkpoints: { + payloads: ['select user_id, client_id, checkpoint from checkpoints'] + } + } + ], + patternProperties: { + '.*': { + type: ['object'], + required: ['payloads'], + examples: [{ payloads: ['select user_id, client_id, checkpoint from checkpoints'] }], + properties: { + payloads: { + description: 'Queries which extract event payload fields from replicated table rows.', + type: 'array', + items: { + type: 'string' + } + }, + additionalProperties: false, + uniqueItems: true + } + } + } } }, required: ['bucket_definitions'], diff --git a/packages/sync-rules/src/request_functions.ts b/packages/sync-rules/src/request_functions.ts index 941daeb98..b99c88911 100644 --- a/packages/sync-rules/src/request_functions.ts +++ b/packages/sync-rules/src/request_functions.ts @@ -1,9 +1,9 @@ import { ExpressionType } from './ExpressionType.js'; -import { RequestParameters, SqliteValue } from './types.js'; +import { ParameterValueSet, SqliteValue } from './types.js'; export interface SqlParameterFunction { readonly debugName: string; - call: (parameters: RequestParameters) => SqliteValue; + call: (parameters: ParameterValueSet) => SqliteValue; getReturnType(): ExpressionType; /** request.user_id(), request.jwt(), token_parameters.* */ usesAuthenticatedRequestParameters: boolean; @@ -15,7 +15,7 @@ export interface SqlParameterFunction { const request_parameters: SqlParameterFunction = { debugName: 'request.parameters', - call(parameters: RequestParameters) { + call(parameters: ParameterValueSet) { return parameters.raw_user_parameters; }, getReturnType() { @@ -30,7 +30,7 @@ const request_parameters: SqlParameterFunction = { const request_jwt: SqlParameterFunction = { debugName: 'request.jwt', - call(parameters: RequestParameters) { + call(parameters: ParameterValueSet) { return parameters.raw_token_payload; }, getReturnType() { @@ -44,7 +44,7 @@ const request_jwt: SqlParameterFunction = { const request_user_id: SqlParameterFunction = { debugName: 'request.user_id', - call(parameters: RequestParameters) { + call(parameters: ParameterValueSet) { return parameters.user_id; }, getReturnType() { diff --git a/packages/sync-rules/src/sql_filters.ts b/packages/sync-rules/src/sql_filters.ts index 4ebe9fb6a..14b41c824 100644 --- a/packages/sync-rules/src/sql_filters.ts +++ b/packages/sync-rules/src/sql_filters.ts @@ -1,7 +1,9 @@ -import { Expr, ExprRef, Name, NodeLocation, QName, QNameAliased, SelectedColumn, parse } from 'pgsql-ast-parser'; +import { JSONBig } from '@powersync/service-jsonbig'; +import { Expr, ExprRef, Name, NodeLocation, QName, QNameAliased, SelectedColumn } from 'pgsql-ast-parser'; import { nil } from 'pgsql-ast-parser/src/utils.js'; -import { ExpressionType, TYPE_NONE } from './ExpressionType.js'; +import { ExpressionType } from './ExpressionType.js'; import { SqlRuleError } from './errors.js'; +import { REQUEST_FUNCTIONS } from './request_functions.js'; import { BASIC_OPERATORS, OPERATOR_IN, @@ -13,6 +15,7 @@ import { SQL_FUNCTIONS, SqlFunction, castOperator, + getOperatorFunction, sqliteTypeOf } from './sql_functions.js'; import { @@ -20,7 +23,6 @@ import { SQLITE_TRUE, andFilters, compileStaticOperator, - getOperatorFunction, isClauseError, isParameterMatchClause, isParameterValueClause, @@ -44,8 +46,6 @@ import { TrueIfParametersMatch } from './types.js'; import { isJsonValue } from './utils.js'; -import { JSONBig } from '@powersync/service-jsonbig'; -import { REQUEST_FUNCTIONS } from './request_functions.js'; export const MATCH_CONST_FALSE: TrueIfParametersMatch = []; export const MATCH_CONST_TRUE: TrueIfParametersMatch = [{}]; @@ -200,8 +200,8 @@ export class SqlTools { evaluate(tables: QueryParameters): SqliteValue { return tables[table]?.[column]; }, - getType(schema) { - return schema.getType(table, column); + getColumnDefinition(schema) { + return schema.getColumn(table, column); } } satisfies RowValueClause; } else { @@ -500,7 +500,8 @@ export class SqlTools { if (expr.type != 'ref') { return false; } - return this.parameter_tables.includes(expr.table?.name ?? ''); + const tableName = expr.table?.name ?? this.default_table; + return this.parameter_tables.includes(tableName ?? ''); } /** @@ -577,21 +578,20 @@ export class SqlTools { private checkRef(table: string, ref: ExprRef) { if (this.schema) { - const type = this.schema.getType(table, ref.name); - if (type.typeFlags == TYPE_NONE) { + const type = this.schema.getColumn(table, ref.name); + if (type == null) { this.warn(`Column not found: ${ref.name}`, ref); } } } getParameterRefClause(expr: ExprRef): ParameterValueClause { - const table = expr.table!.name; + const table = (expr.table?.name ?? this.default_table)!; const column = expr.name; return { key: `${table}.${column}`, lookupParameterValue: (parameters) => { - const pt: SqliteJsonRow | undefined = (parameters as any)[table]; - return pt?.[column] ?? null; + return parameters.lookup(table, column); }, usesAuthenticatedRequestParameters: table == 'token_parameters', usesUnauthenticatedRequestParameters: table == 'user_parameters' @@ -607,18 +607,17 @@ export class SqlTools { * * Only "value" tables are supported here, not parameter values. */ - getTableName(ref: ExprRef) { + getTableName(ref: ExprRef): string { if (this.refHasSchema(ref)) { throw new SqlRuleError(`Specifying schema in column references is not supported`, this.sql, ref); } - if (ref.table?.name == null && this.default_table != null) { - return this.default_table; - } else if (this.value_tables.includes(ref.table?.name ?? '')) { - return ref.table!.name; + const tableName = ref.table?.name ?? this.default_table; + if (this.value_tables.includes(tableName ?? '')) { + return tableName!; } else if (ref.table?.name == null) { throw new SqlRuleError(`Table name required`, this.sql, ref); } else { - throw new SqlRuleError(`Undefined table ${ref.table?.name}`, this.sql, ref); + throw new SqlRuleError(`Undefined table ${tableName}`, this.sql, ref); } } @@ -659,9 +658,11 @@ export class SqlTools { const args = argClauses.map((e) => (e as RowValueClause).evaluate(tables)); return fnImpl.call(...args); }, - getType(schema) { - const argTypes = argClauses.map((e) => (e as RowValueClause).getType(schema)); - return fnImpl.getReturnType(argTypes); + getColumnDefinition(schema) { + const argTypes = argClauses.map( + (e) => (e as RowValueClause).getColumnDefinition(schema)?.type ?? ExpressionType.NONE + ); + return { name: `${fnImpl}()`, type: fnImpl.getReturnType(argTypes) }; } } satisfies RowValueClause; } else if (argsType == 'param') { @@ -748,6 +749,8 @@ function staticValue(expr: Expr): SqliteValue { return expr.value ? SQLITE_TRUE : SQLITE_FALSE; } else if (expr.type == 'integer') { return BigInt(expr.value); + } else if (expr.type == 'null') { + return null; } else { return (expr as any).value; } @@ -758,8 +761,11 @@ function staticValueClause(value: SqliteValue): StaticValueClause { value: value, // RowValueClause compatibility evaluate: () => value, - getType() { - return ExpressionType.fromTypeText(sqliteTypeOf(value)); + getColumnDefinition() { + return { + name: 'literal', + type: ExpressionType.fromTypeText(sqliteTypeOf(value)) + }; }, // ParamterValueClause compatibility key: JSONBig.stringify(value), diff --git a/packages/sync-rules/src/sql_functions.ts b/packages/sync-rules/src/sql_functions.ts index de74b0e5f..62ebc7bae 100644 --- a/packages/sync-rules/src/sql_functions.ts +++ b/packages/sync-rules/src/sql_functions.ts @@ -1,5 +1,5 @@ import { JSONBig } from '@powersync/service-jsonbig'; -import { getOperatorFunction, SQLITE_FALSE, SQLITE_TRUE, sqliteBool, sqliteNot } from './sql_support.js'; +import { SQLITE_FALSE, SQLITE_TRUE, sqliteBool, sqliteNot } from './sql_support.js'; import { SqliteValue } from './types.js'; import { jsonValueToSqlite } from './utils.js'; // Declares @syncpoint/wkx module @@ -44,6 +44,18 @@ export interface DocumentedSqlFunction extends SqlFunction { documentation?: string; } +export function getOperatorFunction(op: string): SqlFunction { + return { + debugName: `operator${op}`, + call(...args: SqliteValue[]) { + return evaluateOperator(op, args[0], args[1]); + }, + getReturnType(args) { + return getOperatorReturnType(op, args[0], args[1]); + } + }; +} + const upper: DocumentedSqlFunction = { debugName: 'upper', call(value: SqliteValue) { @@ -811,9 +823,6 @@ export function jsonExtract(sourceValue: SqliteValue, path: SqliteValue, operato if (operator == '->') { // -> must always stringify return JSONBig.stringify(value); - } else if (typeof value == 'object' || Array.isArray(value)) { - // Objects and arrays must be stringified - return JSONBig.stringify(value); } else { // Plain scalar value - simple conversion. return jsonValueToSqlite(value as string | number | bigint | boolean | null); diff --git a/packages/sync-rules/src/sql_support.ts b/packages/sync-rules/src/sql_support.ts index 9d381f363..b64aacd4f 100644 --- a/packages/sync-rules/src/sql_support.ts +++ b/packages/sync-rules/src/sql_support.ts @@ -1,3 +1,8 @@ +import { SelectFromStatement } from 'pgsql-ast-parser'; +import { SqlRuleError } from './errors.js'; +import { ExpressionType } from './ExpressionType.js'; +import { MATCH_CONST_FALSE, MATCH_CONST_TRUE } from './sql_filters.js'; +import { evaluateOperator, getOperatorReturnType } from './sql_functions.js'; import { ClauseError, CompiledClause, @@ -6,16 +11,11 @@ import { ParameterMatchClause, ParameterValueClause, QueryParameters, - SqliteValue, RowValueClause, + SqliteValue, StaticValueClause, TrueIfParametersMatch } from './types.js'; -import { MATCH_CONST_FALSE, MATCH_CONST_TRUE } from './sql_filters.js'; -import { SqlFunction, evaluateOperator, getOperatorReturnType } from './sql_functions.js'; -import { SelectFromStatement } from 'pgsql-ast-parser'; -import { SqlRuleError } from './errors.js'; -import { ExpressionType } from './ExpressionType.js'; export function isParameterMatchClause(clause: CompiledClause): clause is ParameterMatchClause { return Array.isArray((clause as ParameterMatchClause).inputParameters); @@ -66,22 +66,14 @@ export function compileStaticOperator(op: string, left: RowValueClause, right: R const rightValue = right.evaluate(tables); return evaluateOperator(op, leftValue, rightValue); }, - getType(schema) { - const typeLeft = left.getType(schema); - const typeRight = right.getType(schema); - return getOperatorReturnType(op, typeLeft, typeRight); - } - }; -} - -export function getOperatorFunction(op: string): SqlFunction { - return { - debugName: `operator${op}`, - call(...args: SqliteValue[]) { - return evaluateOperator(op, args[0], args[1]); - }, - getReturnType(args) { - return getOperatorReturnType(op, args[0], args[1]); + getColumnDefinition(schema) { + const typeLeft = left.getColumnDefinition(schema)?.type ?? ExpressionType.NONE; + const typeRight = right.getColumnDefinition(schema)?.type ?? ExpressionType.NONE; + const type = getOperatorReturnType(op, typeLeft, typeRight); + return { + name: '?', + type + }; } }; } @@ -95,8 +87,8 @@ export function andFilters(a: CompiledClause, b: CompiledClause): CompiledClause const bValue = sqliteBool(b.evaluate(tables)); return sqliteBool(aValue && bValue); }, - getType() { - return ExpressionType.INTEGER; + getColumnDefinition() { + return { name: 'and', type: ExpressionType.INTEGER }; } } satisfies RowValueClause; } @@ -156,8 +148,8 @@ export function orFilters(a: CompiledClause, b: CompiledClause): CompiledClause const bValue = sqliteBool(b.evaluate(tables)); return sqliteBool(aValue || bValue); }, - getType() { - return ExpressionType.INTEGER; + getColumnDefinition() { + return { name: 'or', type: ExpressionType.INTEGER }; } } satisfies RowValueClause; } diff --git a/packages/sync-rules/src/types.ts b/packages/sync-rules/src/types.ts index e54d508df..b506c09c4 100644 --- a/packages/sync-rules/src/types.ts +++ b/packages/sync-rules/src/types.ts @@ -3,6 +3,7 @@ import { SourceTableInterface } from './SourceTableInterface.js'; import { ColumnDefinition, ExpressionType } from './ExpressionType.js'; import { TablePattern } from './TablePattern.js'; import { toSyncRulesParameters } from './utils.js'; +import { SyncRulesOptions } from './SqlSyncRules.js'; export interface SyncRules { evaluateRow(options: EvaluateRowOptions): EvaluationResult[]; @@ -10,7 +11,7 @@ export interface SyncRules { evaluateParameterRow(table: SourceTableInterface, row: SqliteRow): EvaluatedParametersResult[]; } -export interface QueryParseOptions { +export interface QueryParseOptions extends SyncRulesOptions { accept_potentially_dangerous_queries?: boolean; } @@ -72,7 +73,23 @@ export interface RequestJwtPayload { [key: string]: any; } -export class RequestParameters { +export interface ParameterValueSet { + lookup(table: string, column: string): SqliteValue; + + /** + * JSON string of raw request parameters. + */ + raw_user_parameters: string; + + /** + * JSON string of raw request parameters. + */ + raw_token_payload: string; + + user_id: string; +} + +export class RequestParameters implements ParameterValueSet { token_parameters: SqliteJsonRow; user_parameters: SqliteJsonRow; @@ -105,6 +122,15 @@ export class RequestParameters { this.raw_user_parameters = JSONBig.stringify(clientParameters); this.user_parameters = toSyncRulesParameters(clientParameters); } + + lookup(table: string, column: string): SqliteJsonValue { + if (table == 'token_parameters') { + return this.token_parameters[column]; + } else if (table == 'user_parameters') { + return this.user_parameters[column]; + } + throw new Error(`Unknown table: ${table}`); + } } /** @@ -199,7 +225,7 @@ export interface InputParameter { * * Only relevant for parameter queries. */ - parametersToLookupValue(parameters: RequestParameters): SqliteValue; + parametersToLookupValue(parameters: ParameterValueSet): SqliteValue; } export interface EvaluateRowOptions { @@ -275,11 +301,11 @@ export interface ParameterValueClause { * * Only relevant for parameter queries. */ - lookupParameterValue(parameters: RequestParameters): SqliteValue; + lookupParameterValue(parameters: ParameterValueSet): SqliteValue; } export interface QuerySchema { - getType(table: string, column: string): ExpressionType; + getColumn(table: string, column: string): ColumnDefinition | undefined; getColumns(table: string): ColumnDefinition[]; } @@ -291,7 +317,7 @@ export interface QuerySchema { */ export interface RowValueClause { evaluate(tables: QueryParameters): SqliteValue; - getType(schema: QuerySchema): ExpressionType; + getColumnDefinition(schema: QuerySchema): ColumnDefinition | undefined; } /** @@ -321,7 +347,7 @@ export interface QueryBucketIdOptions { export interface SourceSchemaTable { table: string; - getType(column: string): ExpressionType | undefined; + getColumn(column: string): ColumnDefinition | undefined; getColumns(): ColumnDefinition[]; } export interface SourceSchema { diff --git a/packages/sync-rules/src/utils.ts b/packages/sync-rules/src/utils.ts index 3cf15bc6e..06e34f540 100644 --- a/packages/sync-rules/src/utils.ts +++ b/packages/sync-rules/src/utils.ts @@ -51,9 +51,12 @@ export function filterJsonRow(data: SqliteRow): SqliteJsonRow { * * Types specifically not supported in output are `boolean` and `undefined`. */ -export function jsonValueToSqlite(value: null | undefined | string | number | bigint | boolean): SqliteValue { +export function jsonValueToSqlite(value: null | undefined | string | number | bigint | boolean | any): SqliteValue { if (typeof value == 'boolean') { return value ? SQLITE_TRUE : SQLITE_FALSE; + } else if (typeof value == 'object' || Array.isArray(value)) { + // Objects and arrays must be stringified + return JSONBig.stringify(value); } else { return value ?? null; } diff --git a/packages/sync-rules/test/src/data_queries.test.ts b/packages/sync-rules/test/src/data_queries.test.ts index b313baaae..848245334 100644 --- a/packages/sync-rules/test/src/data_queries.test.ts +++ b/packages/sync-rules/test/src/data_queries.test.ts @@ -1,11 +1,11 @@ import { describe, expect, test } from 'vitest'; import { ExpressionType, SqlDataQuery } from '../../src/index.js'; -import { ASSETS, BASIC_SCHEMA } from './util.js'; +import { ASSETS, BASIC_SCHEMA, PARSE_OPTIONS } from './util.js'; describe('data queries', () => { test('bucket parameters = query', function () { const sql = 'SELECT * FROM assets WHERE assets.org_id = bucket.org_id'; - const query = SqlDataQuery.fromSql('mybucket', ['org_id'], sql); + const query = SqlDataQuery.fromSql('mybucket', ['org_id'], sql, PARSE_OPTIONS); expect(query.errors).toEqual([]); expect(query.evaluateRow(ASSETS, { id: 'asset1', org_id: 'org1' })).toEqual([ @@ -22,7 +22,7 @@ describe('data queries', () => { test('bucket parameters IN query', function () { const sql = 'SELECT * FROM assets WHERE bucket.category IN assets.categories'; - const query = SqlDataQuery.fromSql('mybucket', ['category'], sql); + const query = SqlDataQuery.fromSql('mybucket', ['category'], sql, PARSE_OPTIONS); expect(query.errors).toEqual([]); expect(query.evaluateRow(ASSETS, { id: 'asset1', categories: JSON.stringify(['red', 'green']) })).toMatchObject([ @@ -43,7 +43,7 @@ describe('data queries', () => { test('static IN data query', function () { const sql = `SELECT * FROM assets WHERE 'green' IN assets.categories`; - const query = SqlDataQuery.fromSql('mybucket', [], sql); + const query = SqlDataQuery.fromSql('mybucket', [], sql, PARSE_OPTIONS); expect(query.errors).toEqual([]); expect(query.evaluateRow(ASSETS, { id: 'asset1', categories: JSON.stringify(['red', 'green']) })).toMatchObject([ @@ -59,7 +59,7 @@ describe('data queries', () => { test('data IN static query', function () { const sql = `SELECT * FROM assets WHERE assets.condition IN '["good","great"]'`; - const query = SqlDataQuery.fromSql('mybucket', [], sql); + const query = SqlDataQuery.fromSql('mybucket', [], sql, PARSE_OPTIONS); expect(query.errors).toEqual([]); expect(query.evaluateRow(ASSETS, { id: 'asset1', condition: 'good' })).toMatchObject([ @@ -75,7 +75,7 @@ describe('data queries', () => { test('table alias', function () { const sql = 'SELECT * FROM assets as others WHERE others.org_id = bucket.org_id'; - const query = SqlDataQuery.fromSql('mybucket', ['org_id'], sql); + const query = SqlDataQuery.fromSql('mybucket', ['org_id'], sql, PARSE_OPTIONS); expect(query.errors).toEqual([]); expect(query.evaluateRow(ASSETS, { id: 'asset1', org_id: 'org1' })).toEqual([ @@ -91,7 +91,12 @@ describe('data queries', () => { test('types', () => { const schema = BASIC_SCHEMA; - const q1 = SqlDataQuery.fromSql('q1', ['user_id'], `SELECT * FROM assets WHERE owner_id = bucket.user_id`); + const q1 = SqlDataQuery.fromSql( + 'q1', + ['user_id'], + `SELECT * FROM assets WHERE owner_id = bucket.user_id`, + PARSE_OPTIONS + ); expect(q1.getColumnOutputs(schema)).toEqual([ { name: 'assets', @@ -116,7 +121,8 @@ describe('data queries', () => { count * '4' as count4, name ->> '$.attr' as json_value, ifnull(name, 2.0) as maybe_name - FROM assets WHERE owner_id = bucket.user_id` + FROM assets WHERE owner_id = bucket.user_id`, + PARSE_OPTIONS ); expect(q2.getColumnOutputs(schema)).toEqual([ { @@ -141,7 +147,7 @@ describe('data queries', () => { 'q1', ['user_id'], 'SELECT id, name, count FROM assets WHERE owner_id = bucket.user_id', - schema + { ...PARSE_OPTIONS, schema } ); expect(q1.errors).toEqual([]); @@ -149,7 +155,7 @@ describe('data queries', () => { 'q2', ['user_id'], 'SELECT id, upper(description) as d FROM assets WHERE other_id = bucket.user_id', - schema + { ...PARSE_OPTIONS, schema } ); expect(q2.errors).toMatchObject([ { @@ -166,16 +172,16 @@ describe('data queries', () => { 'q3', ['user_id'], 'SELECT id, description, * FROM nope WHERE other_id = bucket.user_id', - schema + { ...PARSE_OPTIONS, schema } ); expect(q3.errors).toMatchObject([ { - message: `Table public.nope not found`, + message: `Table test_schema.nope not found`, type: 'warning' } ]); - const q4 = SqlDataQuery.fromSql('q4', [], 'SELECT * FROM other', schema); + const q4 = SqlDataQuery.fromSql('q4', [], 'SELECT * FROM other', { ...PARSE_OPTIONS, schema }); expect(q4.errors).toMatchObject([ { message: `Query must return an "id" column`, @@ -183,13 +189,13 @@ describe('data queries', () => { } ]); - const q5 = SqlDataQuery.fromSql('q5', [], 'SELECT other_id as id, * FROM other', schema); + const q5 = SqlDataQuery.fromSql('q5', [], 'SELECT other_id as id, * FROM other', { ...PARSE_OPTIONS, schema }); expect(q5.errors).toMatchObject([]); }); test('invalid query - invalid IN', function () { const sql = 'SELECT * FROM assets WHERE assets.category IN bucket.categories'; - const query = SqlDataQuery.fromSql('mybucket', ['categories'], sql); + const query = SqlDataQuery.fromSql('mybucket', ['categories'], sql, PARSE_OPTIONS); expect(query.errors).toMatchObject([ { type: 'fatal', message: 'Cannot use bucket parameters on the right side of IN operators' } ]); @@ -197,7 +203,7 @@ describe('data queries', () => { test('invalid query - not all parameters used', function () { const sql = 'SELECT * FROM assets WHERE 1'; - const query = SqlDataQuery.fromSql('mybucket', ['org_id'], sql); + const query = SqlDataQuery.fromSql('mybucket', ['org_id'], sql, PARSE_OPTIONS); expect(query.errors).toMatchObject([ { type: 'fatal', message: 'Query must cover all bucket parameters. Expected: ["bucket.org_id"] Got: []' } ]); @@ -205,7 +211,7 @@ describe('data queries', () => { test('invalid query - parameter not defined', function () { const sql = 'SELECT * FROM assets WHERE assets.org_id = bucket.org_id'; - const query = SqlDataQuery.fromSql('mybucket', [], sql); + const query = SqlDataQuery.fromSql('mybucket', [], sql, PARSE_OPTIONS); expect(query.errors).toMatchObject([ { type: 'fatal', message: 'Query must cover all bucket parameters. Expected: [] Got: ["bucket.org_id"]' } ]); @@ -213,25 +219,25 @@ describe('data queries', () => { test('invalid query - function on parameter (1)', function () { const sql = 'SELECT * FROM assets WHERE assets.org_id = upper(bucket.org_id)'; - const query = SqlDataQuery.fromSql('mybucket', ['org_id'], sql); + const query = SqlDataQuery.fromSql('mybucket', ['org_id'], sql, PARSE_OPTIONS); expect(query.errors).toMatchObject([{ type: 'fatal', message: 'Cannot use bucket parameters in expressions' }]); }); test('invalid query - function on parameter (2)', function () { const sql = 'SELECT * FROM assets WHERE assets.org_id = upper(bucket.org_id)'; - const query = SqlDataQuery.fromSql('mybucket', [], sql); + const query = SqlDataQuery.fromSql('mybucket', [], sql, PARSE_OPTIONS); expect(query.errors).toMatchObject([{ type: 'fatal', message: 'Cannot use bucket parameters in expressions' }]); }); test('invalid query - match clause in select', () => { const sql = 'SELECT id, (bucket.org_id = assets.org_id) as org_matches FROM assets where org_id = bucket.org_id'; - const query = SqlDataQuery.fromSql('mybucket', ['org_id'], sql); + const query = SqlDataQuery.fromSql('mybucket', ['org_id'], sql, PARSE_OPTIONS); expect(query.errors[0].message).toMatch(/Parameter match expression is not allowed here/); }); test('case-sensitive queries (1)', () => { const sql = 'SELECT * FROM Assets'; - const query = SqlDataQuery.fromSql('mybucket', [], sql); + const query = SqlDataQuery.fromSql('mybucket', [], sql, PARSE_OPTIONS); expect(query.errors).toMatchObject([ { message: `Unquoted identifiers are converted to lower-case. Use "Assets" instead.` } ]); @@ -239,7 +245,7 @@ describe('data queries', () => { test('case-sensitive queries (2)', () => { const sql = 'SELECT *, Name FROM assets'; - const query = SqlDataQuery.fromSql('mybucket', [], sql); + const query = SqlDataQuery.fromSql('mybucket', [], sql, PARSE_OPTIONS); expect(query.errors).toMatchObject([ { message: `Unquoted identifiers are converted to lower-case. Use "Name" instead.` } ]); @@ -247,7 +253,7 @@ describe('data queries', () => { test('case-sensitive queries (3)', () => { const sql = 'SELECT * FROM assets WHERE Archived = False'; - const query = SqlDataQuery.fromSql('mybucket', [], sql); + const query = SqlDataQuery.fromSql('mybucket', [], sql, PARSE_OPTIONS); expect(query.errors).toMatchObject([ { message: `Unquoted identifiers are converted to lower-case. Use "Archived" instead.` } ]); @@ -256,7 +262,7 @@ describe('data queries', () => { test.skip('case-sensitive queries (4)', () => { // Cannot validate table alias yet const sql = 'SELECT * FROM assets as myAssets'; - const query = SqlDataQuery.fromSql('mybucket', [], sql); + const query = SqlDataQuery.fromSql('mybucket', [], sql, PARSE_OPTIONS); expect(query.errors).toMatchObject([ { message: `Unquoted identifiers are converted to lower-case. Use "myAssets" instead.` } ]); @@ -265,7 +271,7 @@ describe('data queries', () => { test.skip('case-sensitive queries (5)', () => { // Cannot validate table alias yet const sql = 'SELECT * FROM assets myAssets'; - const query = SqlDataQuery.fromSql('mybucket', [], sql); + const query = SqlDataQuery.fromSql('mybucket', [], sql, PARSE_OPTIONS); expect(query.errors).toMatchObject([ { message: `Unquoted identifiers are converted to lower-case. Use "myAssets" instead.` } ]); @@ -274,7 +280,7 @@ describe('data queries', () => { test.skip('case-sensitive queries (6)', () => { // Cannot validate anything with a schema yet const sql = 'SELECT * FROM public.ASSETS'; - const query = SqlDataQuery.fromSql('mybucket', [], sql); + const query = SqlDataQuery.fromSql('mybucket', [], sql, PARSE_OPTIONS); expect(query.errors).toMatchObject([ { message: `Unquoted identifiers are converted to lower-case. Use "ASSETS" instead.` } ]); @@ -283,7 +289,7 @@ describe('data queries', () => { test.skip('case-sensitive queries (7)', () => { // Cannot validate schema yet const sql = 'SELECT * FROM PUBLIC.assets'; - const query = SqlDataQuery.fromSql('mybucket', [], sql); + const query = SqlDataQuery.fromSql('mybucket', [], sql, PARSE_OPTIONS); expect(query.errors).toMatchObject([ { message: `Unquoted identifiers are converted to lower-case. Use "PUBLIC" instead.` } ]); diff --git a/packages/sync-rules/test/src/generate_schema.test.ts b/packages/sync-rules/test/src/generate_schema.test.ts new file mode 100644 index 000000000..d742a7f8b --- /dev/null +++ b/packages/sync-rules/test/src/generate_schema.test.ts @@ -0,0 +1,172 @@ +import { describe, expect, test } from 'vitest'; +import { + DEFAULT_TAG, + DartSchemaGenerator, + JsLegacySchemaGenerator, + SqlSyncRules, + StaticSchema, + TsSchemaGenerator +} from '../../src/index.js'; + +import { PARSE_OPTIONS } from './util.js'; + +describe('schema generation', () => { + const schema = new StaticSchema([ + { + tag: DEFAULT_TAG, + schemas: [ + { + name: 'test_schema', + tables: [ + { + name: 'assets', + columns: [ + { name: 'id', sqlite_type: 'text', internal_type: 'uuid' }, + { name: 'name', sqlite_type: 'text', internal_type: 'text' }, + { name: 'count', sqlite_type: 'integer', internal_type: 'int4' }, + { name: 'owner_id', sqlite_type: 'text', internal_type: 'uuid' } + ] + } + ] + } + ] + } + ]); + + const rules = SqlSyncRules.fromYaml( + ` +bucket_definitions: + mybucket: + data: + - SELECT * FROM assets as assets1 + - SELECT id, name, count FROM assets as assets2 + - SELECT id, owner_id as other_id, foo FROM assets as ASSETS2 + `, + PARSE_OPTIONS + ); + + test('dart', () => { + expect(new DartSchemaGenerator().generate(rules, schema)).toEqual(`Schema([ + Table('assets1', [ + Column.text('name'), + Column.integer('count'), + Column.text('owner_id') + ]), + Table('assets2', [ + Column.text('name'), + Column.integer('count'), + Column.text('other_id'), + Column.text('foo') + ]) +]); +`); + + expect(new DartSchemaGenerator().generate(rules, schema, { includeTypeComments: true })).toEqual(`Schema([ + Table('assets1', [ + Column.text('name'), // text + Column.integer('count'), // int4 + Column.text('owner_id') // uuid + ]), + Table('assets2', [ + Column.text('name'), // text + Column.integer('count'), // int4 + Column.text('other_id'), // uuid + Column.text('foo') + ]) +]); +`); + }); + + test('js legacy', () => { + expect(new JsLegacySchemaGenerator().generate(rules, schema)).toEqual(`new Schema([ + new Table({ + name: 'assets1', + columns: [ + new Column({ name: 'name', type: ColumnType.TEXT }), + new Column({ name: 'count', type: ColumnType.INTEGER }), + new Column({ name: 'owner_id', type: ColumnType.TEXT }) + ] + }), + new Table({ + name: 'assets2', + columns: [ + new Column({ name: 'name', type: ColumnType.TEXT }), + new Column({ name: 'count', type: ColumnType.INTEGER }), + new Column({ name: 'other_id', type: ColumnType.TEXT }), + new Column({ name: 'foo', type: ColumnType.TEXT }) + ] + }) +]) +`); + }); + + test('ts', () => { + expect(new TsSchemaGenerator().generate(rules, schema, {})).toEqual( + `import { column, Schema, Table } from '@powersync/web'; +// OR: import { column, Schema, Table } from '@powersync/react-native'; + +const assets1 = new Table( + { + // id column (text) is automatically included + name: column.text, + count: column.integer, + owner_id: column.text + }, + { indexes: {} } +); + +const assets2 = new Table( + { + // id column (text) is automatically included + name: column.text, + count: column.integer, + other_id: column.text, + foo: column.text + }, + { indexes: {} } +); + +export const AppSchema = new Schema({ + assets1, + assets2 +}); + +export type Database = (typeof AppSchema)['types']; +` + ); + + expect(new TsSchemaGenerator().generate(rules, schema, { includeTypeComments: true })).toEqual( + `import { column, Schema, Table } from '@powersync/web'; +// OR: import { column, Schema, Table } from '@powersync/react-native'; + +const assets1 = new Table( + { + // id column (text) is automatically included + name: column.text, // text + count: column.integer, // int4 + owner_id: column.text // uuid + }, + { indexes: {} } +); + +const assets2 = new Table( + { + // id column (text) is automatically included + name: column.text, // text + count: column.integer, // int4 + other_id: column.text, // uuid + foo: column.text + }, + { indexes: {} } +); + +export const AppSchema = new Schema({ + assets1, + assets2 +}); + +export type Database = (typeof AppSchema)['types']; +` + ); + }); +}); diff --git a/packages/sync-rules/test/src/parameter_queries.test.ts b/packages/sync-rules/test/src/parameter_queries.test.ts index 04f24f8f0..59d0f1147 100644 --- a/packages/sync-rules/test/src/parameter_queries.test.ts +++ b/packages/sync-rules/test/src/parameter_queries.test.ts @@ -1,11 +1,11 @@ import { describe, expect, test } from 'vitest'; import { SqlParameterQuery } from '../../src/index.js'; -import { BASIC_SCHEMA, normalizeTokenParameters } from './util.js'; +import { BASIC_SCHEMA, normalizeTokenParameters, PARSE_OPTIONS } from './util.js'; describe('parameter queries', () => { test('token_parameters IN query', function () { const sql = 'SELECT id as group_id FROM groups WHERE token_parameters.user_id IN groups.user_ids'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); query.id = '1'; expect(query.evaluateParameterRow({ id: 'group1', user_ids: JSON.stringify(['user1', 'user2']) })).toEqual([ @@ -37,7 +37,7 @@ describe('parameter queries', () => { test('IN token_parameters query', function () { const sql = 'SELECT id as region_id FROM regions WHERE name IN token_parameters.region_names'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); query.id = '1'; expect(query.evaluateParameterRow({ id: 'region1', name: 'colorado' })).toEqual([ @@ -65,7 +65,7 @@ describe('parameter queries', () => { test('queried numeric parameters', () => { const sql = 'SELECT users.int1, users.float1, users.float2 FROM users WHERE users.int1 = token_parameters.int1 AND users.float1 = token_parameters.float1 AND users.float2 = token_parameters.float2'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); query.id = '1'; // Note: We don't need to worry about numeric vs decimal types in the lookup - JSONB handles normalization for us. @@ -95,7 +95,7 @@ describe('parameter queries', () => { test('plain token_parameter (baseline)', () => { const sql = 'SELECT id from users WHERE filter_param = token_parameters.user_id'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); expect(query.evaluateParameterRow({ id: 'test_id', filter_param: 'test_param' })).toEqual([ @@ -111,7 +111,7 @@ describe('parameter queries', () => { test('function on token_parameter', () => { const sql = 'SELECT id from users WHERE filter_param = upper(token_parameters.user_id)'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); expect(query.evaluateParameterRow({ id: 'test_id', filter_param: 'test_param' })).toEqual([ @@ -127,7 +127,7 @@ describe('parameter queries', () => { test('token parameter member operator', () => { const sql = "SELECT id from users WHERE filter_param = token_parameters.some_param ->> 'description'"; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); expect(query.evaluateParameterRow({ id: 'test_id', filter_param: 'test_param' })).toEqual([ @@ -145,7 +145,7 @@ describe('parameter queries', () => { test('token parameter and binary operator', () => { const sql = 'SELECT id from users WHERE filter_param = token_parameters.some_param + 2'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); expect(query.getLookups(normalizeTokenParameters({ some_param: 3 }))).toEqual([['mybucket', undefined, 5n]]); @@ -153,7 +153,7 @@ describe('parameter queries', () => { test('token parameter IS NULL as filter', () => { const sql = 'SELECT id from users WHERE filter_param = (token_parameters.some_param IS NULL)'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); expect(query.getLookups(normalizeTokenParameters({ some_param: null }))).toEqual([['mybucket', undefined, 1n]]); @@ -162,7 +162,7 @@ describe('parameter queries', () => { test('direct token parameter', () => { const sql = 'SELECT FROM users WHERE token_parameters.some_param'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); expect(query.evaluateParameterRow({ id: 'user1' })).toEqual([ @@ -182,7 +182,7 @@ describe('parameter queries', () => { test('token parameter IS NULL', () => { const sql = 'SELECT FROM users WHERE token_parameters.some_param IS NULL'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); expect(query.evaluateParameterRow({ id: 'user1' })).toEqual([ @@ -202,7 +202,7 @@ describe('parameter queries', () => { test('token parameter IS NOT NULL', () => { const sql = 'SELECT FROM users WHERE token_parameters.some_param IS NOT NULL'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); expect(query.evaluateParameterRow({ id: 'user1' })).toEqual([ @@ -222,7 +222,7 @@ describe('parameter queries', () => { test('token parameter NOT', () => { const sql = 'SELECT FROM users WHERE NOT token_parameters.is_admin'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); expect(query.evaluateParameterRow({ id: 'user1' })).toEqual([ @@ -242,7 +242,7 @@ describe('parameter queries', () => { test('row filter and token parameter IS NULL', () => { const sql = 'SELECT FROM users WHERE users.id = token_parameters.user_id AND token_parameters.some_param IS NULL'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); expect(query.evaluateParameterRow({ id: 'user1' })).toEqual([ @@ -262,7 +262,7 @@ describe('parameter queries', () => { test('row filter and direct token parameter', () => { const sql = 'SELECT FROM users WHERE users.id = token_parameters.user_id AND token_parameters.some_param'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); expect(query.evaluateParameterRow({ id: 'user1' })).toEqual([ @@ -282,7 +282,7 @@ describe('parameter queries', () => { test('cast', () => { const sql = 'SELECT FROM users WHERE users.id = cast(token_parameters.user_id as text)'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); expect(query.getLookups(normalizeTokenParameters({ user_id: 'user1' }))).toEqual([ @@ -293,7 +293,7 @@ describe('parameter queries', () => { test('IS NULL row filter', () => { const sql = 'SELECT id FROM users WHERE role IS NULL'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); expect(query.evaluateParameterRow({ id: 'user1', role: null })).toEqual([ @@ -311,7 +311,7 @@ describe('parameter queries', () => { // Not supported: token_parameters.is_admin != false // Support could be added later. const sql = 'SELECT FROM users WHERE users.id = token_parameters.user_id AND token_parameters.is_admin'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); query.id = '1'; @@ -334,7 +334,7 @@ describe('parameter queries', () => { test('token filter (2)', () => { const sql = 'SELECT users.id AS user_id, token_parameters.is_admin as is_admin FROM users WHERE users.id = token_parameters.user_id AND token_parameters.is_admin'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); query.id = '1'; @@ -357,7 +357,7 @@ describe('parameter queries', () => { test('case-sensitive parameter queries (1)', () => { const sql = 'SELECT users."userId" AS user_id FROM users WHERE users."userId" = token_parameters.user_id'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); query.id = '1'; @@ -375,7 +375,7 @@ describe('parameter queries', () => { // This may change in the future - we should check against expected behavior for // Postgres and/or SQLite. const sql = 'SELECT users.userId AS user_id FROM users WHERE users.userId = token_parameters.user_id'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toMatchObject([ { message: `Unquoted identifiers are converted to lower-case. Use "userId" instead.` }, { message: `Unquoted identifiers are converted to lower-case. Use "userId" instead.` } @@ -394,7 +394,7 @@ describe('parameter queries', () => { test('case-sensitive parameter queries (3)', () => { const sql = 'SELECT user_id FROM users WHERE Users.user_id = token_parameters.user_id'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toMatchObject([ { message: `Unquoted identifiers are converted to lower-case. Use "Users" instead.` } ]); @@ -402,7 +402,7 @@ describe('parameter queries', () => { test('case-sensitive parameter queries (4)', () => { const sql = 'SELECT Users.user_id FROM users WHERE user_id = token_parameters.user_id'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toMatchObject([ { message: `Unquoted identifiers are converted to lower-case. Use "Users" instead.` } ]); @@ -410,7 +410,7 @@ describe('parameter queries', () => { test('case-sensitive parameter queries (5)', () => { const sql = 'SELECT user_id FROM Users WHERE user_id = token_parameters.user_id'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toMatchObject([ { message: `Unquoted identifiers are converted to lower-case. Use "Users" instead.` } ]); @@ -418,7 +418,7 @@ describe('parameter queries', () => { test('case-sensitive parameter queries (6)', () => { const sql = 'SELECT userId FROM users'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toMatchObject([ { message: `Unquoted identifiers are converted to lower-case. Use "userId" instead.` } ]); @@ -426,7 +426,7 @@ describe('parameter queries', () => { test('case-sensitive parameter queries (7)', () => { const sql = 'SELECT user_id as userId FROM users'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toMatchObject([ { message: `Unquoted identifiers are converted to lower-case. Use "userId" instead.` } ]); @@ -434,7 +434,7 @@ describe('parameter queries', () => { test('dynamic global parameter query', () => { const sql = "SELECT workspaces.id AS workspace_id FROM workspaces WHERE visibility = 'public'"; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); query.id = '1'; @@ -453,7 +453,7 @@ describe('parameter queries', () => { // This is treated as two separate lookup index values const sql = 'SELECT id from users WHERE filter_param = upper(token_parameters.user_id) AND filter_param = lower(token_parameters.user_id)'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); expect(query.evaluateParameterRow({ id: 'test_id', filter_param: 'test_param' })).toEqual([ @@ -473,7 +473,7 @@ describe('parameter queries', () => { // This is treated as the same index lookup value, can use OR with the two clauses const sql = 'SELECT id from users WHERE filter_param1 = upper(token_parameters.user_id) OR filter_param2 = upper(token_parameters.user_id)'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); expect(query.evaluateParameterRow({ id: 'test_id', filter_param1: 'test1', filter_param2: 'test2' })).toEqual([ @@ -492,8 +492,9 @@ describe('parameter queries', () => { test('request.parameters()', function () { const sql = "SELECT FROM posts WHERE category = request.parameters() ->> 'category_id'"; - const query = SqlParameterQuery.fromSql('mybucket', sql, undefined, { - accept_potentially_dangerous_queries: true + const query = SqlParameterQuery.fromSql('mybucket', sql, { + accept_potentially_dangerous_queries: true, + ...PARSE_OPTIONS }) as SqlParameterQuery; expect(query.errors).toEqual([]); query.id = '1'; @@ -508,8 +509,9 @@ describe('parameter queries', () => { test('nested request.parameters() (1)', function () { const sql = "SELECT FROM posts WHERE category = request.parameters() -> 'details' ->> 'category'"; - const query = SqlParameterQuery.fromSql('mybucket', sql, undefined, { - accept_potentially_dangerous_queries: true + const query = SqlParameterQuery.fromSql('mybucket', sql, { + accept_potentially_dangerous_queries: true, + ...PARSE_OPTIONS }) as SqlParameterQuery; expect(query.errors).toEqual([]); query.id = '1'; @@ -520,8 +522,9 @@ describe('parameter queries', () => { test('nested request.parameters() (2)', function () { const sql = "SELECT FROM posts WHERE category = request.parameters() ->> 'details.category'"; - const query = SqlParameterQuery.fromSql('mybucket', sql, undefined, { - accept_potentially_dangerous_queries: true + const query = SqlParameterQuery.fromSql('mybucket', sql, { + accept_potentially_dangerous_queries: true, + ...PARSE_OPTIONS }) as SqlParameterQuery; expect(query.errors).toEqual([]); query.id = '1'; @@ -533,8 +536,9 @@ describe('parameter queries', () => { test('IN request.parameters()', function () { // Can use -> or ->> here const sql = "SELECT id as region_id FROM regions WHERE name IN request.parameters() -> 'region_names'"; - const query = SqlParameterQuery.fromSql('mybucket', sql, undefined, { - accept_potentially_dangerous_queries: true + const query = SqlParameterQuery.fromSql('mybucket', sql, { + accept_potentially_dangerous_queries: true, + ...PARSE_OPTIONS }) as SqlParameterQuery; expect(query.errors).toEqual([]); query.id = '1'; @@ -565,7 +569,7 @@ describe('parameter queries', () => { test('user_parameters in SELECT', function () { const sql = 'SELECT id, user_parameters.other_id as other_id FROM users WHERE id = token_parameters.user_id'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); query.id = '1'; expect(query.evaluateParameterRow({ id: 'user1' })).toEqual([ @@ -581,7 +585,7 @@ describe('parameter queries', () => { test('request.parameters() in SELECT', function () { const sql = "SELECT id, request.parameters() ->> 'other_id' as other_id FROM users WHERE id = token_parameters.user_id"; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); query.id = '1'; expect(query.evaluateParameterRow({ id: 'user1' })).toEqual([ @@ -596,7 +600,7 @@ describe('parameter queries', () => { test('request.jwt()', function () { const sql = "SELECT FROM users WHERE id = request.jwt() ->> 'sub'"; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); const requestParams = normalizeTokenParameters({ user_id: 'user1' }); @@ -605,7 +609,7 @@ describe('parameter queries', () => { test('request.user_id()', function () { const sql = 'SELECT FROM users WHERE id = request.user_id()'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); const requestParams = normalizeTokenParameters({ user_id: 'user1' }); @@ -617,68 +621,67 @@ describe('parameter queries', () => { // into separate queries, but it's a significant change. For now, developers should do that manually. const sql = "SELECT workspaces.id AS workspace_id FROM workspaces WHERE workspaces.user_id = token_parameters.user_id OR visibility = 'public'"; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors[0].message).toMatch(/must use the same parameters/); }); test('invalid OR in parameter queries (2)', () => { const sql = 'SELECT id from users WHERE filter_param = upper(token_parameters.user_id) OR filter_param = lower(token_parameters.user_id)'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors[0].message).toMatch(/must use the same parameters/); }); test('invalid parameter match clause (1)', () => { const sql = 'SELECT FROM users WHERE (id = token_parameters.user_id) = false'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors[0].message).toMatch(/Parameter match clauses cannot be used here/); }); test('invalid parameter match clause (2)', () => { const sql = 'SELECT FROM users WHERE NOT (id = token_parameters.user_id)'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors[0].message).toMatch(/Parameter match clauses cannot be used here/); }); test('invalid parameter match clause (3)', () => { // May be supported in the future const sql = 'SELECT FROM users WHERE token_parameters.start_at < users.created_at'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors[0].message).toMatch(/Cannot use table values and parameters in the same clauses/); }); test('invalid parameter match clause (4)', () => { const sql = 'SELECT FROM users WHERE json_extract(users.description, token_parameters.path)'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors[0].message).toMatch(/Cannot use table values and parameters in the same clauses/); }); test('invalid parameter match clause (5)', () => { const sql = 'SELECT (user_parameters.role = posts.roles) as r FROM posts'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors[0].message).toMatch(/Parameter match expression is not allowed here/); }); test('invalid function schema', () => { const sql = 'SELECT FROM users WHERE something.length(users.id) = 0'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors[0].message).toMatch(/Function 'something.length' is not defined/); }); test('validate columns', () => { const schema = BASIC_SCHEMA; - const q1 = SqlParameterQuery.fromSql( - 'q4', - 'SELECT id FROM assets WHERE owner_id = token_parameters.user_id', + const q1 = SqlParameterQuery.fromSql('q4', 'SELECT id FROM assets WHERE owner_id = token_parameters.user_id', { + ...PARSE_OPTIONS, schema - ); + }); expect(q1.errors).toMatchObject([]); const q2 = SqlParameterQuery.fromSql( 'q5', 'SELECT id as asset_id FROM assets WHERE other_id = token_parameters.user_id', - schema + { ...PARSE_OPTIONS, schema } ); expect(q2.errors).toMatchObject([ @@ -692,7 +695,7 @@ describe('parameter queries', () => { describe('dangerous queries', function () { function testDangerousQuery(sql: string) { test(sql, function () { - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toMatchObject([ { message: @@ -704,7 +707,7 @@ describe('parameter queries', () => { } function testSafeQuery(sql: string) { test(sql, function () { - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); expect(query.usesDangerousRequestParameters).toEqual(false); }); diff --git a/packages/sync-rules/test/src/static_parameter_queries.test.ts b/packages/sync-rules/test/src/static_parameter_queries.test.ts index b21533f9d..a82dd9106 100644 --- a/packages/sync-rules/test/src/static_parameter_queries.test.ts +++ b/packages/sync-rules/test/src/static_parameter_queries.test.ts @@ -1,12 +1,12 @@ import { describe, expect, test } from 'vitest'; import { RequestParameters, SqlParameterQuery } from '../../src/index.js'; import { StaticSqlParameterQuery } from '../../src/StaticSqlParameterQuery.js'; -import { normalizeTokenParameters } from './util.js'; +import { normalizeTokenParameters, PARSE_OPTIONS } from './util.js'; describe('static parameter queries', () => { test('basic query', function () { const sql = 'SELECT token_parameters.user_id'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as StaticSqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as StaticSqlParameterQuery; expect(query.errors).toEqual([]); expect(query.bucket_parameters!).toEqual(['user_id']); expect(query.getStaticBucketIds(normalizeTokenParameters({ user_id: 'user1' }))).toEqual(['mybucket["user1"]']); @@ -14,7 +14,7 @@ describe('static parameter queries', () => { test('global query', function () { const sql = 'SELECT'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as StaticSqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as StaticSqlParameterQuery; expect(query.errors).toEqual([]); expect(query.bucket_parameters!).toEqual([]); expect(query.getStaticBucketIds(normalizeTokenParameters({ user_id: 'user1' }))).toEqual(['mybucket[]']); @@ -22,7 +22,7 @@ describe('static parameter queries', () => { test('query with filter', function () { const sql = 'SELECT token_parameters.user_id WHERE token_parameters.is_admin'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as StaticSqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as StaticSqlParameterQuery; expect(query.errors).toEqual([]); expect(query.getStaticBucketIds(normalizeTokenParameters({ user_id: 'user1', is_admin: true }))).toEqual([ 'mybucket["user1"]' @@ -32,7 +32,7 @@ describe('static parameter queries', () => { test('function in select clause', function () { const sql = 'SELECT upper(token_parameters.user_id) as upper_id'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as StaticSqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as StaticSqlParameterQuery; expect(query.errors).toEqual([]); expect(query.getStaticBucketIds(normalizeTokenParameters({ user_id: 'user1' }))).toEqual(['mybucket["USER1"]']); expect(query.bucket_parameters!).toEqual(['upper_id']); @@ -40,7 +40,7 @@ describe('static parameter queries', () => { test('function in filter clause', function () { const sql = "SELECT WHERE upper(token_parameters.role) = 'ADMIN'"; - const query = SqlParameterQuery.fromSql('mybucket', sql) as StaticSqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as StaticSqlParameterQuery; expect(query.errors).toEqual([]); expect(query.getStaticBucketIds(normalizeTokenParameters({ role: 'admin' }))).toEqual(['mybucket[]']); expect(query.getStaticBucketIds(normalizeTokenParameters({ role: 'user' }))).toEqual([]); @@ -48,7 +48,7 @@ describe('static parameter queries', () => { test('comparison in filter clause', function () { const sql = 'SELECT WHERE token_parameters.id1 = token_parameters.id2'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as StaticSqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as StaticSqlParameterQuery; expect(query.errors).toEqual([]); expect(query.getStaticBucketIds(normalizeTokenParameters({ id1: 't1', id2: 't1' }))).toEqual(['mybucket[]']); expect(query.getStaticBucketIds(normalizeTokenParameters({ id1: 't1', id2: 't2' }))).toEqual([]); @@ -56,7 +56,8 @@ describe('static parameter queries', () => { test('request.parameters()', function () { const sql = "SELECT request.parameters() ->> 'org_id' as org_id"; - const query = SqlParameterQuery.fromSql('mybucket', sql, undefined, { + const query = SqlParameterQuery.fromSql('mybucket', sql, { + ...PARSE_OPTIONS, accept_potentially_dangerous_queries: true }) as StaticSqlParameterQuery; expect(query.errors).toEqual([]); @@ -66,7 +67,7 @@ describe('static parameter queries', () => { test('request.jwt()', function () { const sql = "SELECT request.jwt() ->> 'sub' as user_id"; - const query = SqlParameterQuery.fromSql('mybucket', sql) as StaticSqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as StaticSqlParameterQuery; expect(query.errors).toEqual([]); expect(query.bucket_parameters).toEqual(['user_id']); @@ -75,7 +76,7 @@ describe('static parameter queries', () => { test('request.user_id()', function () { const sql = 'SELECT request.user_id() as user_id'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as StaticSqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as StaticSqlParameterQuery; expect(query.errors).toEqual([]); expect(query.bucket_parameters).toEqual(['user_id']); @@ -84,28 +85,28 @@ describe('static parameter queries', () => { test('static value', function () { const sql = `SELECT WHERE 1`; - const query = SqlParameterQuery.fromSql('mybucket', sql) as StaticSqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as StaticSqlParameterQuery; expect(query.errors).toEqual([]); expect(query.getStaticBucketIds(new RequestParameters({ sub: '' }, {}))).toEqual(['mybucket[]']); }); test('static expression (1)', function () { const sql = `SELECT WHERE 1 = 1`; - const query = SqlParameterQuery.fromSql('mybucket', sql) as StaticSqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as StaticSqlParameterQuery; expect(query.errors).toEqual([]); expect(query.getStaticBucketIds(new RequestParameters({ sub: '' }, {}))).toEqual(['mybucket[]']); }); test('static expression (2)', function () { const sql = `SELECT WHERE 1 != 1`; - const query = SqlParameterQuery.fromSql('mybucket', sql) as StaticSqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as StaticSqlParameterQuery; expect(query.errors).toEqual([]); expect(query.getStaticBucketIds(new RequestParameters({ sub: '' }, {}))).toEqual([]); }); test('static IN expression', function () { const sql = `SELECT WHERE 'admin' IN '["admin", "superuser"]'`; - const query = SqlParameterQuery.fromSql('mybucket', sql, undefined, {}) as StaticSqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as StaticSqlParameterQuery; expect(query.errors).toEqual([]); expect(query.getStaticBucketIds(new RequestParameters({ sub: '' }, {}))).toEqual(['mybucket[]']); }); @@ -113,7 +114,7 @@ describe('static parameter queries', () => { test('IN for permissions in request.jwt() (1)', function () { // Can use -> or ->> here const sql = `SELECT 'read:users' IN (request.jwt() ->> 'permissions') as access_granted`; - const query = SqlParameterQuery.fromSql('mybucket', sql) as StaticSqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as StaticSqlParameterQuery; expect(query.errors).toEqual([]); expect( query.getStaticBucketIds(new RequestParameters({ sub: '', permissions: ['write', 'read:users'] }, {})) @@ -126,7 +127,7 @@ describe('static parameter queries', () => { test('IN for permissions in request.jwt() (2)', function () { // Can use -> or ->> here const sql = `SELECT WHERE 'read:users' IN (request.jwt() ->> 'permissions')`; - const query = SqlParameterQuery.fromSql('mybucket', sql, undefined, {}) as StaticSqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as StaticSqlParameterQuery; expect(query.errors).toEqual([]); expect( query.getStaticBucketIds(new RequestParameters({ sub: '', permissions: ['write', 'read:users'] }, {})) @@ -138,7 +139,7 @@ describe('static parameter queries', () => { test('IN for permissions in request.jwt() (3)', function () { const sql = `SELECT WHERE request.jwt() ->> 'role' IN '["admin", "superuser"]'`; - const query = SqlParameterQuery.fromSql('mybucket', sql, undefined, {}) as StaticSqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as StaticSqlParameterQuery; expect(query.errors).toEqual([]); expect(query.getStaticBucketIds(new RequestParameters({ sub: '', role: 'superuser' }, {}))).toEqual(['mybucket[]']); expect(query.getStaticBucketIds(new RequestParameters({ sub: '', role: 'superadmin' }, {}))).toEqual([]); @@ -146,7 +147,7 @@ describe('static parameter queries', () => { test('case-sensitive queries (1)', () => { const sql = 'SELECT request.user_id() as USER_ID'; - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toMatchObject([ { message: `Unquoted identifiers are converted to lower-case. Use "USER_ID" instead.` } ]); @@ -155,7 +156,7 @@ describe('static parameter queries', () => { describe('dangerous queries', function () { function testDangerousQuery(sql: string) { test(sql, function () { - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toMatchObject([ { message: @@ -167,7 +168,7 @@ describe('static parameter queries', () => { } function testSafeQuery(sql: string) { test(sql, function () { - const query = SqlParameterQuery.fromSql('mybucket', sql) as SqlParameterQuery; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; expect(query.errors).toEqual([]); expect(query.usesDangerousRequestParameters).toEqual(false); }); diff --git a/packages/sync-rules/test/src/sync_rules.test.ts b/packages/sync-rules/test/src/sync_rules.test.ts index e5fafd1f9..54d507440 100644 --- a/packages/sync-rules/test/src/sync_rules.test.ts +++ b/packages/sync-rules/test/src/sync_rules.test.ts @@ -1,29 +1,24 @@ import { describe, expect, test } from 'vitest'; -import { - DEFAULT_SCHEMA, - DEFAULT_TAG, - DartSchemaGenerator, - JsLegacySchemaGenerator, - SqlSyncRules, - StaticSchema, - TsSchemaGenerator -} from '../../src/index.js'; - -import { ASSETS, BASIC_SCHEMA, TestSourceTable, USERS, normalizeTokenParameters } from './util.js'; +import { SqlSyncRules } from '../../src/index.js'; + +import { ASSETS, BASIC_SCHEMA, PARSE_OPTIONS, TestSourceTable, USERS, normalizeTokenParameters } from './util.js'; describe('sync rules', () => { test('parse empty sync rules', () => { - const rules = SqlSyncRules.fromYaml('bucket_definitions: {}'); + const rules = SqlSyncRules.fromYaml('bucket_definitions: {}', PARSE_OPTIONS); expect(rules.bucket_descriptors).toEqual([]); }); test('parse global sync rules', () => { - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: data: - SELECT id, description FROM assets - `); + `, + PARSE_OPTIONS + ); const bucket = rules.bucket_descriptors[0]; expect(bucket.name).toEqual('mybucket'); expect(bucket.bucket_parameters).toEqual([]); @@ -46,12 +41,15 @@ bucket_definitions: }); test('parse global sync rules with filter', () => { - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: parameters: SELECT WHERE token_parameters.is_admin data: [] - `); + `, + PARSE_OPTIONS + ); const bucket = rules.bucket_descriptors[0]; expect(bucket.bucket_parameters).toEqual([]); const param_query = bucket.global_parameter_queries[0]; @@ -66,12 +64,15 @@ bucket_definitions: }); test('parse global sync rules with table filter', () => { - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: parameters: SELECT FROM users WHERE users.id = token_parameters.user_id AND users.is_admin data: [] - `); + `, + PARSE_OPTIONS + ); const bucket = rules.bucket_descriptors[0]; expect(bucket.bucket_parameters).toEqual([]); const param_query = bucket.parameter_queries[0]; @@ -86,13 +87,16 @@ bucket_definitions: }); test('parse bucket with parameters', () => { - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: parameters: SELECT token_parameters.user_id, user_parameters.device_id data: - SELECT id, description FROM assets WHERE assets.user_id = bucket.user_id AND assets.device_id = bucket.device_id AND NOT assets.archived - `); + `, + PARSE_OPTIONS + ); const bucket = rules.bucket_descriptors[0]; expect(bucket.bucket_parameters).toEqual(['user_id', 'device_id']); const param_query = bucket.global_parameter_queries[0]; @@ -129,13 +133,16 @@ bucket_definitions: }); test('parse bucket with parameters and OR condition', () => { - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: parameters: SELECT token_parameters.user_id data: - SELECT id, description FROM assets WHERE assets.user_id = bucket.user_id OR assets.owner_id = bucket.user_id - `); + `, + PARSE_OPTIONS + ); const bucket = rules.bucket_descriptors[0]; expect(bucket.bucket_parameters).toEqual(['user_id']); const param_query = bucket.global_parameter_queries[0]; @@ -182,80 +189,104 @@ bucket_definitions: test('parse bucket with parameters and invalid OR condition', () => { expect(() => { - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: parameters: SELECT token_parameters.user_id data: - SELECT id, description FROM assets WHERE assets.user_id = bucket.user_id AND (assets.user_id = bucket.foo OR assets.other_id = bucket.bar) - `); + `, + PARSE_OPTIONS + ); }).toThrowError(/must use the same parameters/); }); test('reject unsupported queries', () => { expect( - SqlSyncRules.validate(` + SqlSyncRules.validate( + ` bucket_definitions: mybucket: parameters: SELECT token_parameters.user_id LIMIT 1 data: [] - `) + `, + PARSE_OPTIONS + ) ).toMatchObject([{ message: 'LIMIT is not supported' }]); expect( - SqlSyncRules.validate(` + SqlSyncRules.validate( + ` bucket_definitions: mybucket: data: - SELECT DISTINCT id, description FROM assets - `) + `, + PARSE_OPTIONS + ) ).toMatchObject([{ message: 'DISTINCT is not supported' }]); expect( - SqlSyncRules.validate(` + SqlSyncRules.validate( + ` bucket_definitions: mybucket: parameters: SELECT token_parameters.user_id OFFSET 10 data: [] - `) + `, + PARSE_OPTIONS + ) ).toMatchObject([{ message: 'LIMIT is not supported' }]); expect(() => { - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: parameters: SELECT token_parameters.user_id FOR UPDATE SKIP LOCKED data: [] - `); + `, + PARSE_OPTIONS + ); }).toThrowError(/SKIP is not supported/); expect(() => { - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: parameters: SELECT token_parameters.user_id FOR UPDATE data: [] - `); + `, + PARSE_OPTIONS + ); }).toThrowError(/FOR is not supported/); expect(() => { - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: data: - SELECT id, description FROM assets ORDER BY id - `); + `, + PARSE_OPTIONS + ); }).toThrowError(/ORDER BY is not supported/); }); test('transforming things', () => { - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: parameters: SELECT upper(token_parameters.user_id) AS user_id data: - SELECT id, upper(description) AS description_upper FROM assets WHERE upper(assets.user_id) = bucket.user_id AND NOT assets.archived - `); + `, + PARSE_OPTIONS + ); const bucket = rules.bucket_descriptors[0]; expect(bucket.bucket_parameters).toEqual(['user_id']); expect(rules.getStaticBucketIds(normalizeTokenParameters({ user_id: 'user1' }))).toEqual(['mybucket["USER1"]']); @@ -281,13 +312,16 @@ bucket_definitions: test('transforming things with upper-case functions', () => { // Testing that we can use different case for the function names - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: parameters: SELECT UPPER(token_parameters.user_id) AS user_id data: - SELECT id, UPPER(description) AS description_upper FROM assets WHERE UPPER(assets.user_id) = bucket.user_id AND NOT assets.archived - `); + `, + PARSE_OPTIONS + ); const bucket = rules.bucket_descriptors[0]; expect(bucket.bucket_parameters).toEqual(['user_id']); expect(rules.getStaticBucketIds(normalizeTokenParameters({ user_id: 'user1' }))).toEqual(['mybucket["USER1"]']); @@ -312,12 +346,15 @@ bucket_definitions: }); test('transforming json', () => { - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: data: - SELECT id, data ->> 'count' AS count, data -> 'bool' AS bool1, data ->> 'bool' AS bool2, 'true' ->> '$' as bool3, json_extract(data, '$.bool') AS bool4 FROM assets - `); + `, + PARSE_OPTIONS + ); expect( rules.evaluateRow({ sourceTable: ASSETS, @@ -342,13 +379,16 @@ bucket_definitions: }); test('IN json', () => { - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: parameters: SELECT token_parameters.region_id data: - SELECT id, description FROM assets WHERE bucket.region_id IN assets.region_ids - `); + `, + PARSE_OPTIONS + ); expect( rules.evaluateRow({ @@ -384,14 +424,17 @@ bucket_definitions: }); test('direct boolean param', () => { - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: parameters: SELECT token_parameters.is_admin data: - SELECT id, description, role, 'admin' as rule FROM assets WHERE bucket.is_admin - SELECT id, description, role, 'normal' as rule FROM assets WHERE (bucket.is_admin OR bucket.is_admin = false) AND assets.role != 'admin' - `); + `, + PARSE_OPTIONS + ); expect( rules.evaluateRow({ sourceTable: ASSETS, record: { id: 'asset1', description: 'test', role: 'admin' } }) @@ -455,12 +498,15 @@ bucket_definitions: }); test('some math', () => { - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: data: - SELECT id, (5 / 2) AS int, (5 / 2.0) AS float, (CAST(5 AS real) / 2) AS float2 FROM assets - `); + `, + PARSE_OPTIONS + ); expect(rules.evaluateRow({ sourceTable: ASSETS, record: { id: 'asset1' } })).toEqual([ { @@ -479,13 +525,16 @@ bucket_definitions: }); test('bucket with static numeric parameters', () => { - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: parameters: SELECT token_parameters.int1, token_parameters.float1, token_parameters.float2 data: - SELECT id FROM assets WHERE assets.int1 = bucket.int1 AND assets.float1 = bucket.float1 AND assets.float2 = bucket.float2 - `); + `, + PARSE_OPTIONS + ); expect(rules.getStaticBucketIds(normalizeTokenParameters({ int1: 314, float1: 3.14, float2: 314 }))).toEqual([ 'mybucket[314,3.14,314]' ]); @@ -506,24 +555,30 @@ bucket_definitions: }); test('static parameter query with function on token_parameter', () => { - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: parameters: SELECT upper(token_parameters.user_id) as upper data: [] - `); + `, + PARSE_OPTIONS + ); expect(rules.errors).toEqual([]); expect(rules.getStaticBucketIds(normalizeTokenParameters({ user_id: 'test' }))).toEqual(['mybucket["TEST"]']); }); test('custom table and id', () => { - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: data: - SELECT client_id AS id, description FROM assets_123 as assets WHERE assets.archived = false - SELECT other_id AS id, description FROM assets_123 as assets - `); + `, + PARSE_OPTIONS + ); expect( rules.evaluateRow({ @@ -555,12 +610,15 @@ bucket_definitions: }); test('wildcard table', () => { - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: data: - SELECT client_id AS id, description, _table_suffix as suffix, * FROM "assets_%" as assets WHERE assets.archived = false AND _table_suffix > '100' - `); + `, + PARSE_OPTIONS + ); expect( rules.evaluateRow({ @@ -586,12 +644,15 @@ bucket_definitions: }); test('wildcard without alias', () => { - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: data: - SELECT *, _table_suffix as suffix, * FROM "%" WHERE archived = false - `); + `, + PARSE_OPTIONS + ); expect( rules.evaluateRow({ @@ -615,16 +676,19 @@ bucket_definitions: }); test('should filter schemas', () => { - const rules = SqlSyncRules.fromYaml(` + const rules = SqlSyncRules.fromYaml( + ` bucket_definitions: mybucket: data: - SELECT id FROM "assets" # Yes - - SELECT id FROM "public"."assets" # yes - - SELECT id FROM "default.public"."assets" # yes + - SELECT id FROM "test_schema"."assets" # yes + - SELECT id FROM "default.test_schema"."assets" # yes - SELECT id FROM "other"."assets" # no - - SELECT id FROM "other.public"."assets" # no - `); + - SELECT id FROM "other.test_schema"."assets" # no + `, + PARSE_OPTIONS + ); expect( rules.evaluateRow({ @@ -670,7 +734,7 @@ bucket_definitions: parameters: SELECT id FROM assets WHERE other_id = token_parameters.user_id data: [] `, - { schema: BASIC_SCHEMA } + { schema: BASIC_SCHEMA, ...PARSE_OPTIONS } ); expect(rules.errors).toMatchObject([ @@ -689,7 +753,7 @@ bucket_definitions: parameters: SELECT request.parameters() ->> 'project_id' as project_id data: [] `, - { schema: BASIC_SCHEMA } + { schema: BASIC_SCHEMA, ...PARSE_OPTIONS } ); expect(rules.errors).toMatchObject([ @@ -710,112 +774,9 @@ bucket_definitions: parameters: SELECT request.parameters() ->> 'project_id' as project_id data: [] `, - { schema: BASIC_SCHEMA } + { schema: BASIC_SCHEMA, ...PARSE_OPTIONS } ); expect(rules.errors).toEqual([]); }); - - test('schema generation', () => { - const schema = new StaticSchema([ - { - tag: DEFAULT_TAG, - schemas: [ - { - name: DEFAULT_SCHEMA, - tables: [ - { - name: 'assets', - columns: [ - { name: 'id', pg_type: 'uuid' }, - { name: 'name', pg_type: 'text' }, - { name: 'count', pg_type: 'int4' }, - { name: 'owner_id', pg_type: 'uuid' } - ] - } - ] - } - ] - } - ]); - - const rules = SqlSyncRules.fromYaml(` -bucket_definitions: - mybucket: - data: - - SELECT * FROM assets as assets1 - - SELECT id, name, count FROM assets as assets2 - - SELECT id, owner_id as other_id, foo FROM assets as ASSETS2 - `); - - expect(new DartSchemaGenerator().generate(rules, schema)).toEqual(`Schema([ - Table('assets1', [ - Column.text('name'), - Column.integer('count'), - Column.text('owner_id') - ]), - Table('assets2', [ - Column.text('name'), - Column.integer('count'), - Column.text('other_id'), - Column.text('foo') - ]) -]); -`); - - expect(new JsLegacySchemaGenerator().generate(rules, schema)).toEqual(`new Schema([ - new Table({ - name: 'assets1', - columns: [ - new Column({ name: 'name', type: ColumnType.TEXT }), - new Column({ name: 'count', type: ColumnType.INTEGER }), - new Column({ name: 'owner_id', type: ColumnType.TEXT }) - ] - }), - new Table({ - name: 'assets2', - columns: [ - new Column({ name: 'name', type: ColumnType.TEXT }), - new Column({ name: 'count', type: ColumnType.INTEGER }), - new Column({ name: 'other_id', type: ColumnType.TEXT }), - new Column({ name: 'foo', type: ColumnType.TEXT }) - ] - }) -]) -`); - - expect(new TsSchemaGenerator().generate(rules, schema)).toEqual( - `import { column, Schema, Table } from '@powersync/web'; -// OR: import { column, Schema, Table } from '@powersync/react-native'; - -const assets1 = new Table( - { - // id column (text) is automatically included - name: column.text, - count: column.integer, - owner_id: column.text - }, - { indexes: {} } -); - -const assets2 = new Table( - { - // id column (text) is automatically included - name: column.text, - count: column.integer, - other_id: column.text, - foo: column.text - }, - { indexes: {} } -); - -export const AppSchema = new Schema({ - assets1, - assets2 -}); - -export type Database = (typeof AppSchema)['types']; -` - ); - }); }); diff --git a/packages/sync-rules/test/src/table_valued_function_queries.test.ts b/packages/sync-rules/test/src/table_valued_function_queries.test.ts new file mode 100644 index 000000000..aaa33cac4 --- /dev/null +++ b/packages/sync-rules/test/src/table_valued_function_queries.test.ts @@ -0,0 +1,146 @@ +import { describe, expect, test } from 'vitest'; +import { RequestParameters, SqlParameterQuery } from '../../src/index.js'; +import { StaticSqlParameterQuery } from '../../src/StaticSqlParameterQuery.js'; +import { PARSE_OPTIONS } from './util.js'; + +describe('table-valued function queries', () => { + test('json_each(array param)', function () { + const sql = "SELECT json_each.value as v FROM json_each(request.parameters() -> 'array')"; + const query = SqlParameterQuery.fromSql('mybucket', sql, { + ...PARSE_OPTIONS, + accept_potentially_dangerous_queries: true + }) as StaticSqlParameterQuery; + expect(query.errors).toEqual([]); + expect(query.bucket_parameters).toEqual(['v']); + + expect(query.getStaticBucketIds(new RequestParameters({ sub: '' }, { array: [1, 2, 3] }))).toEqual([ + 'mybucket[1]', + 'mybucket[2]', + 'mybucket[3]' + ]); + }); + + test('json_each(static string)', function () { + const sql = `SELECT json_each.value as v FROM json_each('[1,2,3]')`; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as StaticSqlParameterQuery; + expect(query.errors).toEqual([]); + expect(query.bucket_parameters).toEqual(['v']); + + expect(query.getStaticBucketIds(new RequestParameters({ sub: '' }, {}))).toEqual([ + 'mybucket[1]', + 'mybucket[2]', + 'mybucket[3]' + ]); + }); + + test('json_each(null)', function () { + const sql = `SELECT json_each.value as v FROM json_each(null)`; + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as StaticSqlParameterQuery; + expect(query.errors).toEqual([]); + expect(query.bucket_parameters).toEqual(['v']); + + expect(query.getStaticBucketIds(new RequestParameters({ sub: '' }, {}))).toEqual([]); + }); + + test('json_each with fn alias', function () { + const sql = "SELECT e.value FROM json_each(request.parameters() -> 'array') e"; + const query = SqlParameterQuery.fromSql('mybucket', sql, { + ...PARSE_OPTIONS, + accept_potentially_dangerous_queries: true + }) as StaticSqlParameterQuery; + expect(query.errors).toEqual([]); + expect(query.bucket_parameters).toEqual(['value']); + + expect(query.getStaticBucketIds(new RequestParameters({ sub: '' }, { array: [1, 2, 3] }))).toEqual([ + 'mybucket[1]', + 'mybucket[2]', + 'mybucket[3]' + ]); + }); + + test('json_each with direct value', function () { + const sql = "SELECT value FROM json_each(request.parameters() -> 'array')"; + const query = SqlParameterQuery.fromSql('mybucket', sql, { + ...PARSE_OPTIONS, + accept_potentially_dangerous_queries: true + }) as StaticSqlParameterQuery; + expect(query.errors).toEqual([]); + expect(query.bucket_parameters).toEqual(['value']); + + expect(query.getStaticBucketIds(new RequestParameters({ sub: '' }, { array: [1, 2, 3] }))).toEqual([ + 'mybucket[1]', + 'mybucket[2]', + 'mybucket[3]' + ]); + }); + + test('json_each in filters (1)', function () { + const sql = "SELECT value as v FROM json_each(request.parameters() -> 'array') e WHERE e.value >= 2"; + const query = SqlParameterQuery.fromSql('mybucket', sql, { + ...PARSE_OPTIONS, + accept_potentially_dangerous_queries: true + }) as StaticSqlParameterQuery; + expect(query.errors).toEqual([]); + expect(query.bucket_parameters).toEqual(['v']); + + expect(query.getStaticBucketIds(new RequestParameters({ sub: '' }, { array: [1, 2, 3] }))).toEqual([ + 'mybucket[2]', + 'mybucket[3]' + ]); + }); + + test('json_each with nested json', function () { + const sql = + "SELECT value ->> 'id' as project_id FROM json_each(request.jwt() -> 'projects') WHERE (value ->> 'role') = 'admin'"; + const query = SqlParameterQuery.fromSql('mybucket', sql, { + ...PARSE_OPTIONS, + accept_potentially_dangerous_queries: true + }) as StaticSqlParameterQuery; + expect(query.errors).toEqual([]); + expect(query.bucket_parameters).toEqual(['project_id']); + + expect( + query.getStaticBucketIds( + new RequestParameters( + { + sub: '', + projects: [ + { id: 1, role: 'admin' }, + { id: 2, role: 'user' } + ] + }, + {} + ) + ) + ).toEqual(['mybucket[1]']); + }); + + describe('dangerous queries', function () { + function testDangerousQuery(sql: string) { + test(sql, function () { + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; + expect(query.errors).toMatchObject([ + { + message: + "Potentially dangerous query based on parameters set by the client. The client can send any value for these parameters so it's not a good place to do authorization." + } + ]); + expect(query.usesDangerousRequestParameters).toEqual(true); + }); + } + function testSafeQuery(sql: string) { + test(sql, function () { + const query = SqlParameterQuery.fromSql('mybucket', sql, PARSE_OPTIONS) as SqlParameterQuery; + expect(query.errors).toEqual([]); + expect(query.usesDangerousRequestParameters).toEqual(false); + }); + } + + testSafeQuery('select value from json_each(request.user_id())'); + testDangerousQuery("select value from json_each(request.parameters() ->> 'project_ids')"); + testSafeQuery("select request.user_id() as user_id, value FROM json_each(request.parameters() ->> 'project_ids')"); + testSafeQuery( + "select request.parameters() ->> 'something' as something, value as project_id FROM json_each(request.jwt() ->> 'project_ids')" + ); + }); +}); diff --git a/packages/sync-rules/test/src/util.ts b/packages/sync-rules/test/src/util.ts index 1b4b12117..e1ed5b804 100644 --- a/packages/sync-rules/test/src/util.ts +++ b/packages/sync-rules/test/src/util.ts @@ -1,5 +1,4 @@ import { - DEFAULT_SCHEMA, DEFAULT_TAG, RequestJwtPayload, RequestParameters, @@ -9,11 +8,15 @@ import { export class TestSourceTable implements SourceTableInterface { readonly connectionTag = DEFAULT_TAG; - readonly schema = DEFAULT_SCHEMA; + readonly schema = 'test_schema'; constructor(public readonly table: string) {} } +export const PARSE_OPTIONS = { + defaultSchema: 'test_schema' +}; + export const ASSETS = new TestSourceTable('assets'); export const USERS = new TestSourceTable('users'); @@ -22,7 +25,7 @@ export const BASIC_SCHEMA = new StaticSchema([ tag: DEFAULT_TAG, schemas: [ { - name: DEFAULT_SCHEMA, + name: 'test_schema', tables: [ { name: 'assets', diff --git a/packages/types/src/config/PowerSyncConfig.ts b/packages/types/src/config/PowerSyncConfig.ts index a0bf765b7..876f1b09f 100644 --- a/packages/types/src/config/PowerSyncConfig.ts +++ b/packages/types/src/config/PowerSyncConfig.ts @@ -3,7 +3,7 @@ import * as t from 'ts-codec'; /** * Users might specify ports as strings if using YAML custom tag environment substitutions */ -const portCodec = t.codec( +export const portCodec = t.codec( 'Port', (value) => value, (value) => (typeof value == 'number' ? value : parseInt(value)) @@ -19,39 +19,39 @@ export const portParser = { }) }; -export const postgresConnection = t.object({ - type: t.literal('postgresql'), +export const DataSourceConfig = t.object({ + // Unique string identifier for the data source + type: t.string, /** Unique identifier for the connection - optional when a single connection is present. */ id: t.string.optional(), - /** Tag used as reference in sync rules. Defaults to "default". Does not have to be unique. */ + /** Additional meta tag for connection */ tag: t.string.optional(), - uri: t.string.optional(), - hostname: t.string.optional(), - port: portCodec.optional(), - username: t.string.optional(), - password: t.string.optional(), - database: t.string.optional(), - - /** Defaults to verify-full */ - sslmode: t.literal('verify-full').or(t.literal('verify-ca')).or(t.literal('disable')).optional(), - /** Required for verify-ca, optional for verify-full */ - cacert: t.string.optional(), - - client_certificate: t.string.optional(), - client_private_key: t.string.optional(), - - /** Expose database credentials */ - demo_database: t.boolean.optional(), - /** Expose "execute-sql" */ - debug_api: t.boolean.optional(), - /** - * Prefix for the slot name. Defaults to "powersync_" + * Allows for debug query execution */ - slot_name_prefix: t.string.optional() + debug_api: t.boolean.optional() }); -export type PostgresConnection = t.Decoded; +export type DataSourceConfig = t.Decoded; + +/** + * Resolved version of {@link DataSourceConfig} where the optional + * `id` and `tag` field is now required. + */ +export const ResolvedDataSourceConfig = DataSourceConfig.and( + t.object({ + id: t.string, + tag: t.string + }) +); + +export type ResolvedDataSourceConfig = t.Decoded; + +/** + * This essentially allows any extra fields on this type + */ +export const genericDataSourceConfig = DataSourceConfig.and(t.record(t.any)); +export type GenericDataSourceConfig = t.Decoded; export const jwkRSA = t.object({ kty: t.literal('RSA'), @@ -95,7 +95,8 @@ export type StorageConfig = t.Decoded; export const powerSyncConfig = t.object({ replication: t .object({ - connections: t.array(postgresConnection).optional() + // This uses the generic config which may have additional fields + connections: t.array(genericDataSourceConfig).optional() }) .optional(), @@ -147,7 +148,9 @@ export const powerSyncConfig = t.object({ disable_telemetry_sharing: t.boolean, internal_service_endpoint: t.string.optional() }) - .optional() + .optional(), + + parameters: t.record(t.number.or(t.string).or(t.boolean).or(t.Null)).optional() }); export type PowerSyncConfig = t.Decoded; diff --git a/packages/types/src/config/normalize.ts b/packages/types/src/config/normalize.ts index 284b3077c..e69de29bb 100644 --- a/packages/types/src/config/normalize.ts +++ b/packages/types/src/config/normalize.ts @@ -1,113 +0,0 @@ -import * as urijs from 'uri-js'; -import { PostgresConnection } from './PowerSyncConfig.js'; - -/** - * Validate and normalize connection options. - * - * Returns destructured options. - */ -export function normalizeConnection(options: PostgresConnection): NormalizedPostgresConnection { - let uri: urijs.URIComponents; - if (options.uri) { - uri = urijs.parse(options.uri); - if (uri.scheme != 'postgresql' && uri.scheme != 'postgres') { - `Invalid URI - protocol must be postgresql, got ${uri.scheme}`; - } else if (uri.scheme != 'postgresql') { - uri.scheme = 'postgresql'; - } - } else { - uri = urijs.parse('postgresql:///'); - } - - const hostname = options.hostname ?? uri.host ?? ''; - const port = validatePort(options.port ?? uri.port ?? 5432); - - const database = options.database ?? uri.path?.substring(1) ?? ''; - - const [uri_username, uri_password] = (uri.userinfo ?? '').split(':'); - - const username = options.username ?? uri_username ?? ''; - const password = options.password ?? uri_password ?? ''; - - const sslmode = options.sslmode ?? 'verify-full'; // Configuration not supported via URI - const cacert = options.cacert; - - if (sslmode == 'verify-ca' && cacert == null) { - throw new Error('Explicit cacert is required for sslmode=verify-ca'); - } - - if (hostname == '') { - throw new Error(`hostname required`); - } - - if (username == '') { - throw new Error(`username required`); - } - - if (password == '') { - throw new Error(`password required`); - } - - if (database == '') { - throw new Error(`database required`); - } - - return { - id: options.id ?? 'default', - tag: options.tag ?? 'default', - - hostname, - port, - database, - - username, - password, - sslmode, - cacert, - - client_certificate: options.client_certificate ?? undefined, - client_private_key: options.client_private_key ?? undefined - }; -} - -export interface NormalizedPostgresConnection { - id: string; - tag: string; - - hostname: string; - port: number; - database: string; - - username: string; - password: string; - - sslmode: 'verify-full' | 'verify-ca' | 'disable'; - cacert: string | undefined; - - client_certificate: string | undefined; - client_private_key: string | undefined; -} - -/** - * Check whether the port is in a "safe" range. - * - * We do not support connecting to "privileged" ports. - */ -export function validatePort(port: string | number): number { - if (typeof port == 'string') { - port = parseInt(port); - } - if (port < 1024) { - throw new Error(`Port ${port} not supported`); - } - return port; -} - -/** - * Construct a postgres URI, without username, password or ssl options. - * - * Only contains hostname, port, database. - */ -export function baseUri(options: NormalizedPostgresConnection) { - return `postgresql://${options.hostname}:${options.port}/${options.database}`; -} diff --git a/packages/types/src/definitions.ts b/packages/types/src/definitions.ts index d24aa76c9..dc3bdf879 100644 --- a/packages/types/src/definitions.ts +++ b/packages/types/src/definitions.ts @@ -82,27 +82,66 @@ export const ConnectionStatus = t.object({ }); export type ConnectionStatus = t.Encoded; -export const DatabaseSchema = t.object({ +export const ConnectionStatusV2 = t.object({ + id: t.string, + uri: t.string, + connected: t.boolean, + /** Connection-level errors */ + errors: t.array(ReplicationError) +}); +export type ConnectionStatusV2 = t.Encoded; + +export enum SqliteSchemaTypeText { + null = 'null', + blob = 'blob', + text = 'text', + integer = 'integer', + real = 'real', + numeric = 'numeric' +} + +export const TableSchema = t.object({ name: t.string, - tables: t.array( + columns: t.array( t.object({ name: t.string, - columns: t.array( - t.object({ - name: t.string, - /** - * Full type name, e.g. "character varying(255)[]" - */ - type: t.string, - /** - * Internal postgres type, e.g. "varchar[]". - */ - pg_type: t.string - }) - ) + + /** + * Option 1: SQLite type flags - see ExpressionType.typeFlags. + * Option 2: SQLite type name in lowercase - 'text' | 'integer' | 'real' | 'numeric' | 'blob' | 'null' + */ + sqlite_type: t.number.or(t.Enum(SqliteSchemaTypeText)), + + /** + * Type name from the source database, e.g. "character varying(255)[]" + */ + internal_type: t.string, + + /** + * Description for the field if available. + */ + description: t.string.optional(), + + /** + * Full type name, e.g. "character varying(255)[]" + * @deprecated - use internal_type + */ + type: t.string, + + /** + * Internal postgres type, e.g. "varchar[]". + * @deprecated - use internal_type instead + */ + pg_type: t.string }) ) }); +export type TableSchema = t.Encoded; + +export const DatabaseSchema = t.object({ + name: t.string, + tables: t.array(TableSchema) +}); export type DatabaseSchema = t.Encoded; export const InstanceSchema = t.object({ @@ -112,6 +151,8 @@ export const InstanceSchema = t.object({ tag: t.string, schemas: t.array(DatabaseSchema) }) - ) + ), + defaultConnectionTag: t.string, + defaultSchema: t.string }); export type InstanceSchema = t.Encoded; diff --git a/packages/types/src/index.ts b/packages/types/src/index.ts index 6ce5aa153..f57c90eb3 100644 --- a/packages/types/src/index.ts +++ b/packages/types/src/index.ts @@ -1,5 +1,4 @@ export * as configFile from './config/PowerSyncConfig.js'; -export { PostgresConnection } from './config/PowerSyncConfig.js'; export * from './definitions.js'; export * from './config/normalize.js'; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 80def96eb..012efe2c1 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -9,29 +9,29 @@ importers: .: devDependencies: '@changesets/cli': - specifier: ^2.27.3 - version: 2.27.3 + specifier: ^2.27.8 + version: 2.27.8 '@types/node': - specifier: 18.11.11 - version: 18.11.11 + specifier: ^22.5.5 + version: 22.5.5 async: specifier: ^3.2.4 version: 3.2.5 bson: specifier: ^6.6.0 - version: 6.7.0 + version: 6.8.0 concurrently: specifier: ^8.2.2 version: 8.2.2 inquirer: specifier: ^9.2.7 - version: 9.2.22 + version: 9.3.5 npm-check-updates: - specifier: ^16.10.15 - version: 16.14.20 + specifier: ^17.1.2 + version: 17.1.3 prettier: - specifier: ^2.8.8 - version: 2.8.8 + specifier: ^3.3.3 + version: 3.3.3 rsocket-core: specifier: 1.0.0-alpha.3 version: 1.0.0-alpha.3 @@ -43,28 +43,34 @@ importers: version: 7.6.2 ts-node-dev: specifier: ^2.0.0 - version: 2.0.0(@types/node@18.11.11)(typescript@5.2.2) + version: 2.0.0(@types/node@22.5.5)(typescript@5.6.2) tsc-watch: specifier: ^6.2.0 - version: 6.2.0(typescript@5.2.2) + version: 6.2.0(typescript@5.6.2) typescript: - specifier: ~5.2.2 - version: 5.2.2 + specifier: ^5.6.2 + version: 5.6.2 + vite-tsconfig-paths: + specifier: ^4.3.2 + version: 4.3.2(typescript@5.6.2)(vite@5.3.3(@types/node@22.5.5)) + vitest: + specifier: ^2.1.1 + version: 2.1.1(@types/node@22.5.5) ws: specifier: ^8.2.3 - version: 8.2.3 + version: 8.18.0 libs/lib-services: dependencies: ajv: specifier: ^8.12.0 - version: 8.14.0 + version: 8.16.0 better-ajv-errors: specifier: ^1.2.0 - version: 1.2.0(ajv@8.14.0) + version: 1.2.0(ajv@8.16.0) bson: specifier: ^6.6.0 - version: 6.7.0 + version: 6.8.0 dotenv: specifier: ^16.4.5 version: 16.4.5 @@ -74,31 +80,163 @@ importers: ts-codec: specifier: ^1.2.2 version: 1.2.2 + uuid: + specifier: ^9.0.1 + version: 9.0.1 winston: specifier: ^3.13.0 - version: 3.13.0 + version: 3.13.1 zod: specifier: ^3.23.8 version: 3.23.8 devDependencies: '@types/lodash': specifier: ^4.17.5 - version: 4.17.5 + version: 4.17.6 + '@types/uuid': + specifier: ^9.0.4 + version: 9.0.8 vitest: - specifier: ^0.34.6 - version: 0.34.6 + specifier: ^2.1.1 + version: 2.1.1(@types/node@22.5.5) + + modules/module-mongodb: + dependencies: + '@powersync/lib-services-framework': + specifier: workspace:* + version: link:../../libs/lib-services + '@powersync/service-core': + specifier: workspace:* + version: link:../../packages/service-core + '@powersync/service-jsonbig': + specifier: workspace:* + version: link:../../packages/jsonbig + '@powersync/service-sync-rules': + specifier: workspace:* + version: link:../../packages/sync-rules + '@powersync/service-types': + specifier: workspace:* + version: link:../../packages/types + mongodb: + specifier: ^6.7.0 + version: 6.8.0(socks@2.8.3) + ts-codec: + specifier: ^1.2.2 + version: 1.2.2 + uri-js: + specifier: ^4.4.1 + version: 4.4.1 + uuid: + specifier: ^9.0.1 + version: 9.0.1 + devDependencies: + '@types/uuid': + specifier: ^9.0.4 + version: 9.0.8 + + modules/module-mysql: + dependencies: + '@powersync/lib-services-framework': + specifier: workspace:* + version: link:../../libs/lib-services + '@powersync/mysql-zongji': + specifier: ^0.1.0 + version: 0.1.0 + '@powersync/service-core': + specifier: workspace:* + version: link:../../packages/service-core + '@powersync/service-jsonbig': + specifier: workspace:* + version: link:../../packages/jsonbig + '@powersync/service-sync-rules': + specifier: workspace:* + version: link:../../packages/sync-rules + '@powersync/service-types': + specifier: workspace:* + version: link:../../packages/types + async: + specifier: ^3.2.4 + version: 3.2.5 + mysql2: + specifier: ^3.11.0 + version: 3.11.3 + semver: + specifier: ^7.5.4 + version: 7.6.2 + ts-codec: + specifier: ^1.2.2 + version: 1.2.2 + uri-js: + specifier: ^4.4.1 + version: 4.4.1 + uuid: + specifier: ^9.0.1 + version: 9.0.1 + devDependencies: + '@types/async': + specifier: ^3.2.24 + version: 3.2.24 + '@types/semver': + specifier: ^7.5.4 + version: 7.5.8 + '@types/uuid': + specifier: ^9.0.4 + version: 9.0.8 + + modules/module-postgres: + dependencies: + '@powersync/lib-services-framework': + specifier: workspace:* + version: link:../../libs/lib-services + '@powersync/service-core': + specifier: workspace:* + version: link:../../packages/service-core + '@powersync/service-jpgwire': + specifier: workspace:* + version: link:../../packages/jpgwire + '@powersync/service-jsonbig': + specifier: workspace:* + version: link:../../packages/jsonbig + '@powersync/service-sync-rules': + specifier: workspace:* + version: link:../../packages/sync-rules + '@powersync/service-types': + specifier: workspace:* + version: link:../../packages/types + jose: + specifier: ^4.15.1 + version: 4.15.9 + pgwire: + specifier: github:kagis/pgwire#f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87 + version: https://codeload.github.com/kagis/pgwire/tar.gz/f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87 + ts-codec: + specifier: ^1.2.2 + version: 1.2.2 + uri-js: + specifier: ^4.4.1 + version: 4.4.1 + uuid: + specifier: ^9.0.1 + version: 9.0.1 + devDependencies: + '@types/uuid': + specifier: ^9.0.4 + version: 9.0.8 packages/jpgwire: dependencies: '@powersync/service-jsonbig': specifier: workspace:^ version: link:../jsonbig + '@powersync/service-sync-rules': + specifier: workspace:^ + version: link:../sync-rules '@powersync/service-types': specifier: workspace:^ version: link:../types date-fns: - specifier: ^3.6.0 - version: 3.6.0 + specifier: ^4.1.0 + version: 4.1.0 pgwire: specifier: github:kagis/pgwire#f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87 version: https://codeload.github.com/kagis/pgwire/tar.gz/f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87 @@ -125,7 +263,7 @@ importers: version: 9.0.1 ws: specifier: ^8.17.0 - version: 8.17.0 + version: 8.18.0 devDependencies: '@types/uuid': specifier: ^9.0.4 @@ -135,16 +273,10 @@ importers: version: 8.2.3 bson: specifier: ^6.6.0 - version: 6.7.0 + version: 6.8.0 rsocket-websocket-client: specifier: 1.0.0-alpha.3 version: 1.0.0-alpha.3 - typescript: - specifier: ~5.2.2 - version: 5.2.2 - vitest: - specifier: ^0.34.6 - version: 0.34.6 packages/service-core: dependencies: @@ -162,16 +294,13 @@ importers: version: 0.51.1(@opentelemetry/api@1.8.0) '@opentelemetry/resources': specifier: ^1.24.1 - version: 1.24.1(@opentelemetry/api@1.8.0) + version: 1.25.1(@opentelemetry/api@1.8.0) '@opentelemetry/sdk-metrics': specifier: 1.24.1 version: 1.24.1(@opentelemetry/api@1.8.0) '@powersync/lib-services-framework': specifier: workspace:* version: link:../../libs/lib-services - '@powersync/service-jpgwire': - specifier: workspace:* - version: link:../jpgwire '@powersync/service-jsonbig': specifier: workspace:* version: link:../jsonbig @@ -192,7 +321,7 @@ importers: version: 0.5.0 bson: specifier: ^6.6.0 - version: 6.7.0 + version: 6.8.0 commander: specifier: ^12.0.0 version: 12.1.0 @@ -207,41 +336,41 @@ importers: version: 5.0.0 jose: specifier: ^4.15.1 - version: 4.15.5 + version: 4.15.9 lodash: specifier: ^4.17.21 version: 4.17.21 lru-cache: specifier: ^10.2.2 - version: 10.2.2 + version: 10.4.3 mongodb: specifier: ^6.7.0 - version: 6.7.0(socks@2.8.3) + version: 6.8.0(socks@2.8.3) node-fetch: specifier: ^3.3.2 version: 3.3.2 - pgwire: - specifier: github:kagis/pgwire#f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87 - version: https://codeload.github.com/kagis/pgwire/tar.gz/f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87 ts-codec: specifier: ^1.2.2 version: 1.2.2 + uri-js: + specifier: ^4.4.1 + version: 4.4.1 uuid: specifier: ^9.0.1 version: 9.0.1 winston: specifier: ^3.13.0 - version: 3.13.0 + version: 3.13.1 yaml: specifier: ^2.3.2 - version: 2.4.2 + version: 2.4.5 devDependencies: '@types/async': specifier: ^3.2.24 version: 3.2.24 '@types/lodash': specifier: ^4.17.5 - version: 4.17.5 + version: 4.17.6 '@types/uuid': specifier: ^9.0.4 version: 9.0.8 @@ -251,15 +380,6 @@ importers: fastify-plugin: specifier: ^4.5.1 version: 4.5.1 - typescript: - specifier: ^5.2.2 - version: 5.4.5 - vite-tsconfig-paths: - specifier: ^4.3.2 - version: 4.3.2(typescript@5.4.5)(vite@5.2.11(@types/node@18.11.11)) - vitest: - specifier: ^0.34.6 - version: 0.34.6 packages/sync-rules: dependencies: @@ -271,20 +391,20 @@ importers: version: 0.5.2 ajv: specifier: ^8.12.0 - version: 8.14.0 + version: 8.16.0 pgsql-ast-parser: specifier: ^11.1.0 version: 11.2.0 yaml: specifier: ^2.3.1 - version: 2.4.2 + version: 2.4.5 devDependencies: '@types/node': - specifier: 18.19.50 - version: 18.19.50 + specifier: ^22.5.5 + version: 22.5.5 vitest: - specifier: ^2.0.5 - version: 2.0.5(@types/node@18.19.50) + specifier: ^2.1.1 + version: 2.1.1(@types/node@22.5.5) packages/types: dependencies: @@ -321,6 +441,15 @@ importers: '@powersync/service-jsonbig': specifier: workspace:* version: link:../packages/jsonbig + '@powersync/service-module-mongodb': + specifier: workspace:* + version: link:../modules/module-mongodb + '@powersync/service-module-mysql': + specifier: workspace:* + version: link:../modules/module-mysql + '@powersync/service-module-postgres': + specifier: workspace:* + version: link:../modules/module-postgres '@powersync/service-rsocket-router': specifier: workspace:* version: link:../packages/rsocket-router @@ -332,13 +461,13 @@ importers: version: link:../packages/types '@sentry/node': specifier: ^8.9.2 - version: 8.9.2 + version: 8.17.0 async-mutex: specifier: ^0.5.0 version: 0.5.0 bson: specifier: ^6.6.0 - version: 6.7.0 + version: 6.8.0 commander: specifier: ^12.0.0 version: 12.1.0 @@ -356,13 +485,13 @@ importers: version: 5.0.0 jose: specifier: ^4.15.1 - version: 4.15.5 + version: 4.15.9 lru-cache: specifier: ^10.0.1 - version: 10.2.2 + version: 10.4.3 mongodb: specifier: ^6.7.0 - version: 6.7.0(socks@2.8.3) + version: 6.8.0(socks@2.8.3) node-fetch: specifier: ^3.3.2 version: 3.3.2 @@ -377,14 +506,14 @@ importers: version: 9.0.1 winston: specifier: ^3.13.0 - version: 3.13.0 + version: 3.13.1 yaml: specifier: ^2.3.2 - version: 2.4.2 + version: 2.4.5 devDependencies: '@sentry/types': specifier: ^8.9.2 - version: 8.9.2 + version: 8.17.0 '@types/uuid': specifier: ^9.0.4 version: 9.0.8 @@ -393,19 +522,13 @@ importers: version: 2.4.1 nodemon: specifier: ^3.0.1 - version: 3.1.1 + version: 3.1.4 npm-check-updates: specifier: ^16.14.4 version: 16.14.20 ts-node: specifier: ^10.9.1 - version: 10.9.2(@types/node@18.19.50)(typescript@5.2.2) - typescript: - specifier: ~5.2.2 - version: 5.2.2 - vitest: - specifier: ^0.34.6 - version: 0.34.6 + version: 10.9.2(@types/node@22.5.5)(typescript@5.6.2) test-client: dependencies: @@ -417,82 +540,78 @@ importers: version: 12.1.0 jose: specifier: ^4.15.1 - version: 4.15.5 + version: 4.15.9 yaml: specifier: ^2.5.0 version: 2.5.0 devDependencies: '@types/node': - specifier: 18.11.11 - version: 18.11.11 - typescript: - specifier: ^5.2.2 - version: 5.2.2 + specifier: ^22.5.5 + version: 22.5.5 packages: - '@ampproject/remapping@2.3.0': - resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==} - engines: {node: '>=6.0.0'} - - '@babel/code-frame@7.24.6': - resolution: {integrity: sha512-ZJhac6FkEd1yhG2AHOmfcXG4ceoLltoCVJjN5XsWN9BifBQr+cHJbWi0h68HZuSORq+3WtJ2z0hwF2NG1b5kcA==} + '@babel/code-frame@7.24.7': + resolution: {integrity: sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==} engines: {node: '>=6.9.0'} - '@babel/helper-validator-identifier@7.24.6': - resolution: {integrity: sha512-4yA7s865JHaqUdRbnaxarZREuPTHrjpDT+pXoAZ1yhyo6uFnIEpS8VMu16siFOHDpZNKYv5BObhsB//ycbICyw==} + '@babel/helper-validator-identifier@7.24.7': + resolution: {integrity: sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==} engines: {node: '>=6.9.0'} - '@babel/highlight@7.24.6': - resolution: {integrity: sha512-2YnuOp4HAk2BsBrJJvYCbItHx0zWscI1C3zgWkz+wDyD9I7GIVrfnLyrR4Y1VR+7p+chAEcrgRQYZAGIKMV7vQ==} + '@babel/highlight@7.24.7': + resolution: {integrity: sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==} engines: {node: '>=6.9.0'} - '@babel/runtime@7.24.6': - resolution: {integrity: sha512-Ja18XcETdEl5mzzACGd+DKgaGJzPTCow7EglgwTmHdwokzDFYh/MHua6lU6DV/hjF2IaOJ4oX2nqnjG7RElKOw==} + '@babel/runtime@7.24.8': + resolution: {integrity: sha512-5F7SDGs1T72ZczbRwbGO9lQi0NLjQxzl6i4lJxLxfW9U5UluCSyEJeniWvnhl3/euNiqQVbo8zruhsDfid0esA==} engines: {node: '>=6.9.0'} - '@changesets/apply-release-plan@7.0.1': - resolution: {integrity: sha512-aPdSq/R++HOyfEeBGjEe6LNG8gs0KMSyRETD/J2092OkNq8mOioAxyKjMbvVUdzgr/HTawzMOz7lfw339KnsCA==} + '@changesets/apply-release-plan@7.0.5': + resolution: {integrity: sha512-1cWCk+ZshEkSVEZrm2fSj1Gz8sYvxgUL4Q78+1ZZqeqfuevPTPk033/yUZ3df8BKMohkqqHfzj0HOOrG0KtXTw==} - '@changesets/assemble-release-plan@6.0.0': - resolution: {integrity: sha512-4QG7NuisAjisbW4hkLCmGW2lRYdPrKzro+fCtZaILX+3zdUELSvYjpL4GTv0E4aM9Mef3PuIQp89VmHJ4y2bfw==} + '@changesets/assemble-release-plan@6.0.4': + resolution: {integrity: sha512-nqICnvmrwWj4w2x0fOhVj2QEGdlUuwVAwESrUo5HLzWMI1rE5SWfsr9ln+rDqWB6RQ2ZyaMZHUcU7/IRaUJS+Q==} '@changesets/changelog-git@0.2.0': resolution: {integrity: sha512-bHOx97iFI4OClIT35Lok3sJAwM31VbUM++gnMBV16fdbtBhgYu4dxsphBF/0AZZsyAHMrnM0yFcj5gZM1py6uQ==} - '@changesets/cli@2.27.3': - resolution: {integrity: sha512-ve/VpWApILlSs8cr0okNx5C2LKRawI9XZgvfmf58S8sar2nhx5DPJREFXYZBahs0FeTfvH0rdVl+nGe8QF45Ig==} + '@changesets/cli@2.27.8': + resolution: {integrity: sha512-gZNyh+LdSsI82wBSHLQ3QN5J30P4uHKJ4fXgoGwQxfXwYFTJzDdvIJasZn8rYQtmKhyQuiBj4SSnLuKlxKWq4w==} hasBin: true - '@changesets/config@3.0.0': - resolution: {integrity: sha512-o/rwLNnAo/+j9Yvw9mkBQOZySDYyOr/q+wptRLcAVGlU6djOeP9v1nlalbL9MFsobuBVQbZCTp+dIzdq+CLQUA==} + '@changesets/config@3.0.3': + resolution: {integrity: sha512-vqgQZMyIcuIpw9nqFIpTSNyc/wgm/Lu1zKN5vECy74u95Qx/Wa9g27HdgO4NkVAaq+BGA8wUc/qvbvVNs93n6A==} '@changesets/errors@0.2.0': resolution: {integrity: sha512-6BLOQUscTpZeGljvyQXlWOItQyU71kCdGz7Pi8H8zdw6BI0g3m43iL4xKUVPWtG+qrrL9DTjpdn8eYuCQSRpow==} - '@changesets/get-dependents-graph@2.0.0': - resolution: {integrity: sha512-cafUXponivK4vBgZ3yLu944mTvam06XEn2IZGjjKc0antpenkYANXiiE6GExV/yKdsCnE8dXVZ25yGqLYZmScA==} + '@changesets/get-dependents-graph@2.1.2': + resolution: {integrity: sha512-sgcHRkiBY9i4zWYBwlVyAjEM9sAzs4wYVwJUdnbDLnVG3QwAaia1Mk5P8M7kraTOZN+vBET7n8KyB0YXCbFRLQ==} - '@changesets/get-release-plan@4.0.0': - resolution: {integrity: sha512-9L9xCUeD/Tb6L/oKmpm8nyzsOzhdNBBbt/ZNcjynbHC07WW4E1eX8NMGC5g5SbM5z/V+MOrYsJ4lRW41GCbg3w==} + '@changesets/get-release-plan@4.0.4': + resolution: {integrity: sha512-SicG/S67JmPTrdcc9Vpu0wSQt7IiuN0dc8iR5VScnnTVPfIaLvKmEGRvIaF0kcn8u5ZqLbormZNTO77bCEvyWw==} '@changesets/get-version-range-type@0.4.0': resolution: {integrity: sha512-hwawtob9DryoGTpixy1D3ZXbGgJu1Rhr+ySH2PvTLHvkZuQ7sRT4oQwMh0hbqZH1weAooedEjRsbrWcGLCeyVQ==} - '@changesets/git@3.0.0': - resolution: {integrity: sha512-vvhnZDHe2eiBNRFHEgMiGd2CT+164dfYyrJDhwwxTVD/OW0FUD6G7+4DIx1dNwkwjHyzisxGAU96q0sVNBns0w==} + '@changesets/git@3.0.1': + resolution: {integrity: sha512-pdgHcYBLCPcLd82aRcuO0kxCDbw/yISlOtkmwmE8Odo1L6hSiZrBOsRl84eYG7DRCab/iHnOkWqExqc4wxk2LQ==} - '@changesets/logger@0.1.0': - resolution: {integrity: sha512-pBrJm4CQm9VqFVwWnSqKEfsS2ESnwqwH+xR7jETxIErZcfd1u2zBSqrHbRHR7xjhSgep9x2PSKFKY//FAshA3g==} + '@changesets/logger@0.1.1': + resolution: {integrity: sha512-OQtR36ZlnuTxKqoW4Sv6x5YIhOmClRd5pWsjZsddYxpWs517R0HkyiefQPIytCVh4ZcC5x9XaG8KTdd5iRQUfg==} '@changesets/parse@0.4.0': resolution: {integrity: sha512-TS/9KG2CdGXS27S+QxbZXgr8uPsP4yNJYb4BC2/NeFUj80Rni3TeD2qwWmabymxmrLo7JEsytXH1FbpKTbvivw==} - '@changesets/pre@2.0.0': - resolution: {integrity: sha512-HLTNYX/A4jZxc+Sq8D1AMBsv+1qD6rmmJtjsCJa/9MSRybdxh0mjbTvE6JYZQ/ZiQ0mMlDOlGPXTm9KLTU3jyw==} + '@changesets/pre@2.0.1': + resolution: {integrity: sha512-vvBJ/If4jKM4tPz9JdY2kGOgWmCowUYOi5Ycv8dyLnEE8FgpYYUo1mgJZxcdtGGP3aG8rAQulGLyyXGSLkIMTQ==} + + '@changesets/read@0.6.1': + resolution: {integrity: sha512-jYMbyXQk3nwP25nRzQQGa1nKLY0KfoOV7VLgwucI0bUO8t8ZLCr6LZmgjXsiKuRDc+5A6doKPr9w2d+FEJ55zQ==} - '@changesets/read@0.6.0': - resolution: {integrity: sha512-ZypqX8+/im1Fm98K4YcZtmLKgjs1kDQ5zHpc2U1qdtNBmZZfo/IBiG162RoP0CUF05tvp2y4IspH11PLnPxuuw==} + '@changesets/should-skip-package@0.1.1': + resolution: {integrity: sha512-H9LjLbF6mMHLtJIc/eHR9Na+MifJ3VxtgP/Y+XLn4BF7tDTEN1HNYtH6QMcjP1uxp9sjaFYmW8xqloaCi/ckTg==} '@changesets/types@4.1.0': resolution: {integrity: sha512-LDQvVDv5Kb50ny2s25Fhm3d9QSZimsoUGBsUioj6MC3qbMUCuC8GPIvk/M6IvXx3lYhAs0lwWUQLb+VIEUCECw==} @@ -500,8 +619,8 @@ packages: '@changesets/types@6.0.0': resolution: {integrity: sha512-b1UkfNulgKoWfqyHtzKS5fOZYSJO+77adgL7DLRDr+/7jhChN+QcHnbjiQVOz/U+Ts3PGNySq7diAItzDgugfQ==} - '@changesets/write@0.3.1': - resolution: {integrity: sha512-SyGtMXzH3qFqlHKcvFY2eX+6b0NGiFcNav8AFsYwy5l8hejOeoeTDemu5Yjmke2V5jpzY+pBvM0vCCQ3gdZpfw==} + '@changesets/write@0.3.2': + resolution: {integrity: sha512-kDxDrPNpUgsjDbWBvUo27PzKX4gqeKOlhibaOXDJA6kuBisGqNHv/HwGJrAu8U/dSf8ZEFIeHIPtvSlZI1kULw==} '@colors/colors@1.5.0': resolution: {integrity: sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==} @@ -518,146 +637,146 @@ packages: '@dabh/diagnostics@2.0.3': resolution: {integrity: sha512-hrlQOIi7hAfzsMqlGSFyVucrx38O+j6wiGOf//H2ecvIEqYN4ADBSS2iLMh5UFyDunCNniUIPk/q3riFv45xRA==} - '@esbuild/aix-ppc64@0.20.2': - resolution: {integrity: sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==} + '@esbuild/aix-ppc64@0.21.5': + resolution: {integrity: sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==} engines: {node: '>=12'} cpu: [ppc64] os: [aix] - '@esbuild/android-arm64@0.20.2': - resolution: {integrity: sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg==} + '@esbuild/android-arm64@0.21.5': + resolution: {integrity: sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==} engines: {node: '>=12'} cpu: [arm64] os: [android] - '@esbuild/android-arm@0.20.2': - resolution: {integrity: sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w==} + '@esbuild/android-arm@0.21.5': + resolution: {integrity: sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==} engines: {node: '>=12'} cpu: [arm] os: [android] - '@esbuild/android-x64@0.20.2': - resolution: {integrity: sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg==} + '@esbuild/android-x64@0.21.5': + resolution: {integrity: sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==} engines: {node: '>=12'} cpu: [x64] os: [android] - '@esbuild/darwin-arm64@0.20.2': - resolution: {integrity: sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA==} + '@esbuild/darwin-arm64@0.21.5': + resolution: {integrity: sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==} engines: {node: '>=12'} cpu: [arm64] os: [darwin] - '@esbuild/darwin-x64@0.20.2': - resolution: {integrity: sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA==} + '@esbuild/darwin-x64@0.21.5': + resolution: {integrity: sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==} engines: {node: '>=12'} cpu: [x64] os: [darwin] - '@esbuild/freebsd-arm64@0.20.2': - resolution: {integrity: sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw==} + '@esbuild/freebsd-arm64@0.21.5': + resolution: {integrity: sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==} engines: {node: '>=12'} cpu: [arm64] os: [freebsd] - '@esbuild/freebsd-x64@0.20.2': - resolution: {integrity: sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw==} + '@esbuild/freebsd-x64@0.21.5': + resolution: {integrity: sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==} engines: {node: '>=12'} cpu: [x64] os: [freebsd] - '@esbuild/linux-arm64@0.20.2': - resolution: {integrity: sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A==} + '@esbuild/linux-arm64@0.21.5': + resolution: {integrity: sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==} engines: {node: '>=12'} cpu: [arm64] os: [linux] - '@esbuild/linux-arm@0.20.2': - resolution: {integrity: sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg==} + '@esbuild/linux-arm@0.21.5': + resolution: {integrity: sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==} engines: {node: '>=12'} cpu: [arm] os: [linux] - '@esbuild/linux-ia32@0.20.2': - resolution: {integrity: sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig==} + '@esbuild/linux-ia32@0.21.5': + resolution: {integrity: sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==} engines: {node: '>=12'} cpu: [ia32] os: [linux] - '@esbuild/linux-loong64@0.20.2': - resolution: {integrity: sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ==} + '@esbuild/linux-loong64@0.21.5': + resolution: {integrity: sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==} engines: {node: '>=12'} cpu: [loong64] os: [linux] - '@esbuild/linux-mips64el@0.20.2': - resolution: {integrity: sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA==} + '@esbuild/linux-mips64el@0.21.5': + resolution: {integrity: sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==} engines: {node: '>=12'} cpu: [mips64el] os: [linux] - '@esbuild/linux-ppc64@0.20.2': - resolution: {integrity: sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg==} + '@esbuild/linux-ppc64@0.21.5': + resolution: {integrity: sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==} engines: {node: '>=12'} cpu: [ppc64] os: [linux] - '@esbuild/linux-riscv64@0.20.2': - resolution: {integrity: sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg==} + '@esbuild/linux-riscv64@0.21.5': + resolution: {integrity: sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==} engines: {node: '>=12'} cpu: [riscv64] os: [linux] - '@esbuild/linux-s390x@0.20.2': - resolution: {integrity: sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ==} + '@esbuild/linux-s390x@0.21.5': + resolution: {integrity: sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==} engines: {node: '>=12'} cpu: [s390x] os: [linux] - '@esbuild/linux-x64@0.20.2': - resolution: {integrity: sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw==} + '@esbuild/linux-x64@0.21.5': + resolution: {integrity: sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==} engines: {node: '>=12'} cpu: [x64] os: [linux] - '@esbuild/netbsd-x64@0.20.2': - resolution: {integrity: sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ==} + '@esbuild/netbsd-x64@0.21.5': + resolution: {integrity: sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==} engines: {node: '>=12'} cpu: [x64] os: [netbsd] - '@esbuild/openbsd-x64@0.20.2': - resolution: {integrity: sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ==} + '@esbuild/openbsd-x64@0.21.5': + resolution: {integrity: sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==} engines: {node: '>=12'} cpu: [x64] os: [openbsd] - '@esbuild/sunos-x64@0.20.2': - resolution: {integrity: sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w==} + '@esbuild/sunos-x64@0.21.5': + resolution: {integrity: sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==} engines: {node: '>=12'} cpu: [x64] os: [sunos] - '@esbuild/win32-arm64@0.20.2': - resolution: {integrity: sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ==} + '@esbuild/win32-arm64@0.21.5': + resolution: {integrity: sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==} engines: {node: '>=12'} cpu: [arm64] os: [win32] - '@esbuild/win32-ia32@0.20.2': - resolution: {integrity: sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ==} + '@esbuild/win32-ia32@0.21.5': + resolution: {integrity: sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==} engines: {node: '>=12'} cpu: [ia32] os: [win32] - '@esbuild/win32-x64@0.20.2': - resolution: {integrity: sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ==} + '@esbuild/win32-x64@0.21.5': + resolution: {integrity: sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==} engines: {node: '>=12'} cpu: [x64] os: [win32] - '@fastify/ajv-compiler@3.5.0': - resolution: {integrity: sha512-ebbEtlI7dxXF5ziNdr05mOY8NnDiPB1XvAlLHctRt/Rc+C3LCOVW5imUVX+mhvUhnNzmPBHewUkOFgGlCxgdAA==} + '@fastify/ajv-compiler@3.6.0': + resolution: {integrity: sha512-LwdXQJjmMD+GwLOkP7TVC68qa+pSSogeWWmznRJ/coyTcfe9qA05AHFSe1eZFwK6q+xVRpChnvFUkf1iYaSZsQ==} '@fastify/cors@8.4.1': resolution: {integrity: sha512-iYQJtrY3pFiDS5mo5zRaudzg2OcUdJ96PD6xfkKOOEilly5nnrFZx/W6Sce2T79xxlEn2qpU3t5+qS2phS369w==} @@ -678,35 +797,20 @@ packages: resolution: {integrity: sha512-RE815I4arJFtt+FVeU1Tgp9/Xvecacji8w/V6XtXsWWH/wz/eNkNbhb+ny/+PlVZjV0rxQpRSQKNKE3lcktHEA==} engines: {node: '>=10.10.0'} - '@inquirer/figures@1.0.2': - resolution: {integrity: sha512-4F1MBwVr3c/m4bAUef6LgkvBfSjzwH+OfldgHqcuacWwSUetFebM2wi58WfG9uk1rR98U6GwLed4asLJbwdV5w==} + '@inquirer/figures@1.0.3': + resolution: {integrity: sha512-ErXXzENMH5pJt5/ssXV0DfWUZqly8nGzf0UcBV9xTnP+KyffE2mqyxIMBrZ8ijQck2nU0TQm40EQB53YreyWHw==} engines: {node: '>=18'} '@isaacs/cliui@8.0.2': resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==} engines: {node: '>=12'} - '@jest/schemas@29.6.3': - resolution: {integrity: sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==} - engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - - '@jridgewell/gen-mapping@0.3.5': - resolution: {integrity: sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==} - engines: {node: '>=6.0.0'} - '@jridgewell/resolve-uri@3.1.2': resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} engines: {node: '>=6.0.0'} - '@jridgewell/set-array@1.2.1': - resolution: {integrity: sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==} - engines: {node: '>=6.0.0'} - - '@jridgewell/sourcemap-codec@1.4.15': - resolution: {integrity: sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==} - - '@jridgewell/trace-mapping@0.3.25': - resolution: {integrity: sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==} + '@jridgewell/sourcemap-codec@1.5.0': + resolution: {integrity: sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==} '@jridgewell/trace-mapping@0.3.9': resolution: {integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==} @@ -714,10 +818,6 @@ packages: '@js-sdsl/ordered-set@4.4.2': resolution: {integrity: sha512-ieYQ8WlBPKYzEo81H3q0DFbd8WtFRXXABb4+vRCF0AO3WWtJZFxYvRGdipUXGrd6tlSySmqhcPuO3J6SCodCxg==} - '@ljharb/through@2.3.13': - resolution: {integrity: sha512-/gKJun8NNiWGZJkGzI/Ragc53cOdcLNdzjLaIa+GEjguQs0ulsurx8WN0jijdK9yPqDvziX995sMRLyLt1uZMQ==} - engines: {node: '>= 0.4'} - '@manypkg/find-root@1.1.0': resolution: {integrity: sha512-mki5uBvhHzO8kYYix/WRy2WX8S3B5wdVSc9D6KcU5lQNglP2yt58/VfLuAK49glRXChosY8ap2oJ1qgma3GUVA==} @@ -777,8 +877,8 @@ packages: resolution: {integrity: sha512-E3skn949Pk1z2XtXu/lxf6QAZpawuTM/IUEXcAzpiUkTd73Hmvw26FiN3cJuTmkpM5hZzHwkomVdtrh/n/zzwA==} engines: {node: '>=14'} - '@opentelemetry/api-logs@0.52.0': - resolution: {integrity: sha512-HxjD7xH9iAE4OyhNaaSec65i1H6QZYBWSwWkowFfsc5YAcDvJG30/J1sRKXEQqdmUcKTXEAnA66UciqZha/4+Q==} + '@opentelemetry/api-logs@0.52.1': + resolution: {integrity: sha512-qnSqB2DQ9TPP96dl8cDubDvrUyWc0/sK81xHTK8eSUspzDM3bsewX903qclQFvVhgStjRWdC5bLb3kQqMkfV5A==} engines: {node: '>=14'} '@opentelemetry/api@1.6.0': @@ -793,8 +893,8 @@ packages: resolution: {integrity: sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==} engines: {node: '>=8.0.0'} - '@opentelemetry/context-async-hooks@1.25.0': - resolution: {integrity: sha512-sBW313mnMyFg0cp/40BRzrZBWG+581s2j5gIsa5fgGadswyILk4mNFATsqrCOpAx945RDuZ2B7ThQLgor9OpfA==} + '@opentelemetry/context-async-hooks@1.25.1': + resolution: {integrity: sha512-UW/ge9zjvAEmRWVapOP0qyCvPulWU6cQxGxDbWEFfGOj1VBBZAuOqTo3X6yWmDTD3Xe15ysCZChHncr2xFMIfQ==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': '>=1.0.0 <1.10.0' @@ -811,8 +911,8 @@ packages: peerDependencies: '@opentelemetry/api': '>=1.0.0 <1.9.0' - '@opentelemetry/core@1.25.0': - resolution: {integrity: sha512-n0B3s8rrqGrasTgNkXLKXzN0fXo+6IYP7M5b7AMsrZM33f/y6DS6kJ0Btd7SespASWq8bgL3taLo0oe0vB52IQ==} + '@opentelemetry/core@1.25.1': + resolution: {integrity: sha512-GeT/l6rBYWVQ4XArluLVB6WWQ8flHbdb6r2FCHC3smtdOAbrJBIv35tpV/yp9bmYUJf+xmZpu9DRTIeJVhFbEQ==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': '>=1.0.0 <1.10.0' @@ -835,110 +935,104 @@ packages: peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-connect@0.37.0': - resolution: {integrity: sha512-SeQktDIH5rNzjiEiazWiJAIXkmnLOnNV7wwHpahrqE0Ph+Z3heqMfxRtoMtbdJSIYLfcNZYO51AjxZ00IXufdw==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': ^1.3.0 - - '@opentelemetry/instrumentation-express@0.40.1': - resolution: {integrity: sha512-+RKMvVe2zw3kIXRup9c1jFu3T4d0fs5aKy015TpiMyoCKX1UMu3Z0lfgYtuyiSTANvg5hZnDbWmQmqSPj9VTvg==} + '@opentelemetry/instrumentation-connect@0.38.0': + resolution: {integrity: sha512-2/nRnx3pjYEmdPIaBwtgtSviTKHWnDZN3R+TkRUnhIVrvBKVcq+I5B2rtd6mr6Fe9cHlZ9Ojcuh7pkNh/xdWWg==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-fastify@0.37.0': - resolution: {integrity: sha512-WRjwzNZgupSzbEYvo9s+QuHJRqZJjVdNxSEpGBwWK8RKLlHGwGVAu0gcc2gPamJWUJsGqPGvahAPWM18ZkWj6A==} + '@opentelemetry/instrumentation-express@0.41.0': + resolution: {integrity: sha512-/B7fbMdaf3SYe5f1P973tkqd6s7XZirjpfkoJ63E7nltU30qmlgm9tY5XwZOzAFI0rHS9tbrFI2HFPAvQUFe/A==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-graphql@0.41.0': - resolution: {integrity: sha512-R/gXeljgIhaRDKquVkKYT5QHPnFouM8ooyePZEP0kqyaVAedtR1V7NfAUJbxfTG5fBQa5wdmLjvu63+tzRXZCA==} + '@opentelemetry/instrumentation-fastify@0.38.0': + resolution: {integrity: sha512-HBVLpTSYpkQZ87/Df3N0gAw7VzYZV3n28THIBrJWfuqw3Or7UqdhnjeuMIPQ04BKk3aZc0cWn2naSQObbh5vXw==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-hapi@0.39.0': - resolution: {integrity: sha512-ik2nA9Yj2s2ay+aNY+tJsKCsEx6Tsc2g/MK0iWBW5tibwrWKTy1pdVt5sB3kd5Gkimqj23UV5+FH2JFcQLeKug==} + '@opentelemetry/instrumentation-graphql@0.42.0': + resolution: {integrity: sha512-N8SOwoKL9KQSX7z3gOaw5UaTeVQcfDO1c21csVHnmnmGUoqsXbArK2B8VuwPWcv6/BC/i3io+xTo7QGRZ/z28Q==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-http@0.52.0': - resolution: {integrity: sha512-E6ywZuxTa4LnVXZGwL1oj3e2Eog1yIaNqa8KjKXoGkDNKte9/SjQnePXOmhQYI0A9nf0UyFbP9aKd+yHrkJXUA==} + '@opentelemetry/instrumentation-hapi@0.40.0': + resolution: {integrity: sha512-8U/w7Ifumtd2bSN1OLaSwAAFhb9FyqWUki3lMMB0ds+1+HdSxYBe9aspEJEgvxAqOkrQnVniAPTEGf1pGM7SOw==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-ioredis@0.41.0': - resolution: {integrity: sha512-rxiLloU8VyeJGm5j2fZS8ShVdB82n7VNP8wTwfUQqDwRfHCnkzGr+buKoxuhGD91gtwJ91RHkjHA1Eg6RqsUTg==} + '@opentelemetry/instrumentation-http@0.52.1': + resolution: {integrity: sha512-dG/aevWhaP+7OLv4BQQSEKMJv8GyeOp3Wxl31NHqE8xo9/fYMfEljiZphUHIfyg4gnZ9swMyWjfOQs5GUQe54Q==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-koa@0.41.0': - resolution: {integrity: sha512-mbPnDt7ELvpM2S0vixYUsde7122lgegLOJQxx8iJQbB8YHal/xnTh9v7IfArSVzIDo+E+080hxZyUZD4boOWkw==} + '@opentelemetry/instrumentation-ioredis@0.42.0': + resolution: {integrity: sha512-P11H168EKvBB9TUSasNDOGJCSkpT44XgoM6d3gRIWAa9ghLpYhl0uRkS8//MqPzcJVHr3h3RmfXIpiYLjyIZTw==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-mongodb@0.45.0': - resolution: {integrity: sha512-xnZP9+ayeB1JJyNE9cIiwhOJTzNEsRhXVdLgfzmrs48Chhhk026mQdM5CITfyXSCfN73FGAIB8d91+pflJEfWQ==} + '@opentelemetry/instrumentation-koa@0.42.0': + resolution: {integrity: sha512-H1BEmnMhho8o8HuNRq5zEI4+SIHDIglNB7BPKohZyWG4fWNuR7yM4GTlR01Syq21vODAS7z5omblScJD/eZdKw==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-mongoose@0.39.0': - resolution: {integrity: sha512-J1r66A7zJklPPhMtrFOO7/Ud2p0Pv5u8+r23Cd1JUH6fYPmftNJVsLp2urAt6PHK4jVqpP/YegN8wzjJ2mZNPQ==} + '@opentelemetry/instrumentation-mongodb@0.46.0': + resolution: {integrity: sha512-VF/MicZ5UOBiXrqBslzwxhN7TVqzu1/LN/QDpkskqM0Zm0aZ4CVRbUygL8d7lrjLn15x5kGIe8VsSphMfPJzlA==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-mysql2@0.39.0': - resolution: {integrity: sha512-Iypuq2z6TCfriAXCIZjRq8GTFCKhQv5SpXbmI+e60rYdXw8NHtMH4NXcGF0eKTuoCsC59IYSTUvDQYDKReaszA==} + '@opentelemetry/instrumentation-mongoose@0.40.0': + resolution: {integrity: sha512-niRi5ZUnkgzRhIGMOozTyoZIvJKNJyhijQI4nF4iFSb+FUx2v5fngfR+8XLmdQAO7xmsD8E5vEGdDVYVtKbZew==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-mysql@0.39.0': - resolution: {integrity: sha512-8snHPh83rhrDf31v9Kq0Nf+ts8hdr7NguuszRqZomZBHgE0+UyXZSkXHAAFZoBPPRMGyM68uaFE5hVtFl+wOcA==} + '@opentelemetry/instrumentation-mysql2@0.40.0': + resolution: {integrity: sha512-0xfS1xcqUmY7WE1uWjlmI67Xg3QsSUlNT+AcXHeA4BDUPwZtWqF4ezIwLgpVZfHOnkAEheqGfNSWd1PIu3Wnfg==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-nestjs-core@0.38.0': - resolution: {integrity: sha512-M381Df1dM8aqihZz2yK+ugvMFK5vlHG/835dc67Sx2hH4pQEQYDA2PpFPTgc9AYYOydQaj7ClFQunESimjXDgg==} + '@opentelemetry/instrumentation-mysql@0.40.0': + resolution: {integrity: sha512-d7ja8yizsOCNMYIJt5PH/fKZXjb/mS48zLROO4BzZTtDfhNCl2UM/9VIomP2qkGIFVouSJrGr/T00EzY7bPtKA==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-pg@0.42.0': - resolution: {integrity: sha512-sjgcM8CswYy8zxHgXv4RAZ09DlYhQ+9TdlourUs63Df/ek5RrB1ZbjznqW7PB6c3TyJJmX6AVtPTjAsROovEjA==} + '@opentelemetry/instrumentation-nestjs-core@0.39.0': + resolution: {integrity: sha512-mewVhEXdikyvIZoMIUry8eb8l3HUjuQjSjVbmLVTt4NQi35tkpnHQrG9bTRBrl3403LoWZ2njMPJyg4l6HfKvA==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation-redis-4@0.40.0': - resolution: {integrity: sha512-0ieQYJb6yl35kXA75LQUPhHtGjtQU9L85KlWa7d4ohBbk/iQKZ3X3CFl5jC5vNMq/GGPB3+w3IxNvALlHtrp7A==} + '@opentelemetry/instrumentation-pg@0.43.0': + resolution: {integrity: sha512-og23KLyoxdnAeFs1UWqzSonuCkePUzCX30keSYigIzJe/6WSYA8rnEI5lobcxPEzg+GcU06J7jzokuEHbjVJNw==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation@0.43.0': - resolution: {integrity: sha512-S1uHE+sxaepgp+t8lvIDuRgyjJWisAb733198kwQTUc9ZtYQ2V2gmyCtR1x21ePGVLoMiX/NWY7WA290hwkjJQ==} + '@opentelemetry/instrumentation-redis-4@0.41.0': + resolution: {integrity: sha512-H7IfGTqW2reLXqput4yzAe8YpDC0fmVNal95GHMLOrS89W+qWUKIqxolSh63hJyfmwPSFwXASzj7wpSk8Az+Dg==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation@0.51.1': - resolution: {integrity: sha512-JIrvhpgqY6437QIqToyozrUG1h5UhwHkaGK/WAX+fkrpyPtc+RO5FkRtUd9BH0MibabHHvqsnBGKfKVijbmp8w==} + '@opentelemetry/instrumentation@0.46.0': + resolution: {integrity: sha512-a9TijXZZbk0vI5TGLZl+0kxyFfrXHhX6Svtz7Pp2/VBlCSKrazuULEyoJQrOknJyFWNMEmbbJgOciHCCpQcisw==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation@0.52.0': - resolution: {integrity: sha512-LPwSIrw+60cheWaXsfGL8stBap/AppKQJFE+qqRvzYrgttXFH2ofoIMxWadeqPTq4BYOXM/C7Bdh/T+B60xnlQ==} + '@opentelemetry/instrumentation@0.52.1': + resolution: {integrity: sha512-uXJbYU/5/MBHjMp1FqrILLRuiJCs3Ofk0MeRDk8g1S1gD47U8X3JnSwcMO1rtRo1x1a7zKaQHaoYu49p/4eSKw==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': ^1.3.0 @@ -971,8 +1065,8 @@ packages: peerDependencies: '@opentelemetry/api': '>=1.0.0 <1.9.0' - '@opentelemetry/resources@1.25.0': - resolution: {integrity: sha512-iHjydPMYJ+Li1auveJCq2rp5U2h6Mhq8BidiyE0jfVlDTFyR1ny8AfJHfmFzJ/RAM8vT8L7T21kcmGybxZC7lQ==} + '@opentelemetry/resources@1.25.1': + resolution: {integrity: sha512-pkZT+iFYIZsVn6+GzM0kSX+u3MSLCY9md+lIJOoKl/P+gJFfxJte/60Usdp8Ce4rOs8GduUpSPNe1ddGyDT1sQ==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': '>=1.0.0 <1.10.0' @@ -1002,8 +1096,8 @@ packages: peerDependencies: '@opentelemetry/api': '>=1.0.0 <1.9.0' - '@opentelemetry/sdk-trace-base@1.25.0': - resolution: {integrity: sha512-6+g2fiRQUG39guCsKVeY8ToeuUf3YUnPkN6DXRA1qDmFLprlLvZm9cS6+chgbW70cZJ406FTtSCDnJwxDC5sGQ==} + '@opentelemetry/sdk-trace-base@1.25.1': + resolution: {integrity: sha512-C8k4hnEbc5FamuZQ92nTOp8X/diCY56XUTnMiv9UTuJitCzaNNHAVsdm5+HLCdI8SLQsLWIrG38tddMxLVoftw==} engines: {node: '>=14'} peerDependencies: '@opentelemetry/api': '>=1.0.0 <1.10.0' @@ -1016,8 +1110,8 @@ packages: resolution: {integrity: sha512-VkliWlS4/+GHLLW7J/rVBA00uXus1SWvwFvcUDxDwmFxYfg/2VI6ekwdXS28cjI8Qz2ky2BzG8OUHo+WeYIWqw==} engines: {node: '>=14'} - '@opentelemetry/semantic-conventions@1.25.0': - resolution: {integrity: sha512-M+kkXKRAIAiAP6qYyesfrC5TOmDpDVtsxuGfPcqd9B/iBrac+E14jYwrgm0yZBUIbIP2OnqC3j+UgkXLm1vxUQ==} + '@opentelemetry/semantic-conventions@1.25.1': + resolution: {integrity: sha512-ZDjMJJQRlyk8A1KZFCc+bCbsyrn1wTwdNt56F7twdfUfnHUZUq77/WfONCj8p72NZOyP7pNTdUWSTYC3GTbuuQ==} engines: {node: '>=14'} '@opentelemetry/sql-common@0.40.1': @@ -1042,113 +1136,117 @@ packages: resolution: {integrity: sha512-UA91GwWPhFExt3IizW6bOeY/pQ0BkuNwKjk9iQW9KqxluGCrg4VenZ0/L+2Y0+ZOtme72EVvg6v0zo3AMQRCeA==} engines: {node: '>=12'} - '@prisma/instrumentation@5.15.0': - resolution: {integrity: sha512-fCWOOOajTKOUEp43gRmBqwt6oN9bPJcLiloi2OG/2ED0N5z62Cuza6FDrlm3SJHQAXYlXqLE0HLdEE5WcUkOzg==} + '@powersync/mysql-zongji@0.1.0': + resolution: {integrity: sha512-2GjOxVws+wtbb+xFUJe4Ozzkp/f0Gsna0fje9art76bmz6yfLCW4K3Mf2/M310xMnAIp8eP9hsJ6DYwwZCo1RA==} + engines: {node: '>=20.0.0'} - '@rollup/rollup-android-arm-eabi@4.18.0': - resolution: {integrity: sha512-Tya6xypR10giZV1XzxmH5wr25VcZSncG0pZIjfePT0OVBvqNEurzValetGNarVrGiq66EBVAFn15iYX4w6FKgQ==} + '@prisma/instrumentation@5.16.1': + resolution: {integrity: sha512-4m5gRFWnQb8s/yTyGbMZkL7A5uJgqOWcWJxapwcAD0T0kh5sGPEVSQl/zTQvE9aduXhFAxOtC3gO+R8Hb5xO1Q==} + + '@rollup/rollup-android-arm-eabi@4.18.1': + resolution: {integrity: sha512-lncuC4aHicncmbORnx+dUaAgzee9cm/PbIqgWz1PpXuwc+sa1Ct83tnqUDy/GFKleLiN7ZIeytM6KJ4cAn1SxA==} cpu: [arm] os: [android] - '@rollup/rollup-android-arm64@4.18.0': - resolution: {integrity: sha512-avCea0RAP03lTsDhEyfy+hpfr85KfyTctMADqHVhLAF3MlIkq83CP8UfAHUssgXTYd+6er6PaAhx/QGv4L1EiA==} + '@rollup/rollup-android-arm64@4.18.1': + resolution: {integrity: sha512-F/tkdw0WSs4ojqz5Ovrw5r9odqzFjb5LIgHdHZG65dFI1lWTWRVy32KDJLKRISHgJvqUeUhdIvy43fX41znyDg==} cpu: [arm64] os: [android] - '@rollup/rollup-darwin-arm64@4.18.0': - resolution: {integrity: sha512-IWfdwU7KDSm07Ty0PuA/W2JYoZ4iTj3TUQjkVsO/6U+4I1jN5lcR71ZEvRh52sDOERdnNhhHU57UITXz5jC1/w==} + '@rollup/rollup-darwin-arm64@4.18.1': + resolution: {integrity: sha512-vk+ma8iC1ebje/ahpxpnrfVQJibTMyHdWpOGZ3JpQ7Mgn/3QNHmPq7YwjZbIE7km73dH5M1e6MRRsnEBW7v5CQ==} cpu: [arm64] os: [darwin] - '@rollup/rollup-darwin-x64@4.18.0': - resolution: {integrity: sha512-n2LMsUz7Ynu7DoQrSQkBf8iNrjOGyPLrdSg802vk6XT3FtsgX6JbE8IHRvposskFm9SNxzkLYGSq9QdpLYpRNA==} + '@rollup/rollup-darwin-x64@4.18.1': + resolution: {integrity: sha512-IgpzXKauRe1Tafcej9STjSSuG0Ghu/xGYH+qG6JwsAUxXrnkvNHcq/NL6nz1+jzvWAnQkuAJ4uIwGB48K9OCGA==} cpu: [x64] os: [darwin] - '@rollup/rollup-linux-arm-gnueabihf@4.18.0': - resolution: {integrity: sha512-C/zbRYRXFjWvz9Z4haRxcTdnkPt1BtCkz+7RtBSuNmKzMzp3ZxdM28Mpccn6pt28/UWUCTXa+b0Mx1k3g6NOMA==} + '@rollup/rollup-linux-arm-gnueabihf@4.18.1': + resolution: {integrity: sha512-P9bSiAUnSSM7EmyRK+e5wgpqai86QOSv8BwvkGjLwYuOpaeomiZWifEos517CwbG+aZl1T4clSE1YqqH2JRs+g==} cpu: [arm] os: [linux] - '@rollup/rollup-linux-arm-musleabihf@4.18.0': - resolution: {integrity: sha512-l3m9ewPgjQSXrUMHg93vt0hYCGnrMOcUpTz6FLtbwljo2HluS4zTXFy2571YQbisTnfTKPZ01u/ukJdQTLGh9A==} + '@rollup/rollup-linux-arm-musleabihf@4.18.1': + resolution: {integrity: sha512-5RnjpACoxtS+aWOI1dURKno11d7krfpGDEn19jI8BuWmSBbUC4ytIADfROM1FZrFhQPSoP+KEa3NlEScznBTyQ==} cpu: [arm] os: [linux] - '@rollup/rollup-linux-arm64-gnu@4.18.0': - resolution: {integrity: sha512-rJ5D47d8WD7J+7STKdCUAgmQk49xuFrRi9pZkWoRD1UeSMakbcepWXPF8ycChBoAqs1pb2wzvbY6Q33WmN2ftw==} + '@rollup/rollup-linux-arm64-gnu@4.18.1': + resolution: {integrity: sha512-8mwmGD668m8WaGbthrEYZ9CBmPug2QPGWxhJxh/vCgBjro5o96gL04WLlg5BA233OCWLqERy4YUzX3bJGXaJgQ==} cpu: [arm64] os: [linux] - '@rollup/rollup-linux-arm64-musl@4.18.0': - resolution: {integrity: sha512-be6Yx37b24ZwxQ+wOQXXLZqpq4jTckJhtGlWGZs68TgdKXJgw54lUUoFYrg6Zs/kjzAQwEwYbp8JxZVzZLRepQ==} + '@rollup/rollup-linux-arm64-musl@4.18.1': + resolution: {integrity: sha512-dJX9u4r4bqInMGOAQoGYdwDP8lQiisWb9et+T84l2WXk41yEej8v2iGKodmdKimT8cTAYt0jFb+UEBxnPkbXEQ==} cpu: [arm64] os: [linux] - '@rollup/rollup-linux-powerpc64le-gnu@4.18.0': - resolution: {integrity: sha512-hNVMQK+qrA9Todu9+wqrXOHxFiD5YmdEi3paj6vP02Kx1hjd2LLYR2eaN7DsEshg09+9uzWi2W18MJDlG0cxJA==} + '@rollup/rollup-linux-powerpc64le-gnu@4.18.1': + resolution: {integrity: sha512-V72cXdTl4EI0x6FNmho4D502sy7ed+LuVW6Ym8aI6DRQ9hQZdp5sj0a2usYOlqvFBNKQnLQGwmYnujo2HvjCxQ==} cpu: [ppc64] os: [linux] - '@rollup/rollup-linux-riscv64-gnu@4.18.0': - resolution: {integrity: sha512-ROCM7i+m1NfdrsmvwSzoxp9HFtmKGHEqu5NNDiZWQtXLA8S5HBCkVvKAxJ8U+CVctHwV2Gb5VUaK7UAkzhDjlg==} + '@rollup/rollup-linux-riscv64-gnu@4.18.1': + resolution: {integrity: sha512-f+pJih7sxoKmbjghrM2RkWo2WHUW8UbfxIQiWo5yeCaCM0TveMEuAzKJte4QskBp1TIinpnRcxkquY+4WuY/tg==} cpu: [riscv64] os: [linux] - '@rollup/rollup-linux-s390x-gnu@4.18.0': - resolution: {integrity: sha512-0UyyRHyDN42QL+NbqevXIIUnKA47A+45WyasO+y2bGJ1mhQrfrtXUpTxCOrfxCR4esV3/RLYyucGVPiUsO8xjg==} + '@rollup/rollup-linux-s390x-gnu@4.18.1': + resolution: {integrity: sha512-qb1hMMT3Fr/Qz1OKovCuUM11MUNLUuHeBC2DPPAWUYYUAOFWaxInaTwTQmc7Fl5La7DShTEpmYwgdt2hG+4TEg==} cpu: [s390x] os: [linux] - '@rollup/rollup-linux-x64-gnu@4.18.0': - resolution: {integrity: sha512-xuglR2rBVHA5UsI8h8UbX4VJ470PtGCf5Vpswh7p2ukaqBGFTnsfzxUBetoWBWymHMxbIG0Cmx7Y9qDZzr648w==} + '@rollup/rollup-linux-x64-gnu@4.18.1': + resolution: {integrity: sha512-7O5u/p6oKUFYjRbZkL2FLbwsyoJAjyeXHCU3O4ndvzg2OFO2GinFPSJFGbiwFDaCFc+k7gs9CF243PwdPQFh5g==} cpu: [x64] os: [linux] - '@rollup/rollup-linux-x64-musl@4.18.0': - resolution: {integrity: sha512-LKaqQL9osY/ir2geuLVvRRs+utWUNilzdE90TpyoX0eNqPzWjRm14oMEE+YLve4k/NAqCdPkGYDaDF5Sw+xBfg==} + '@rollup/rollup-linux-x64-musl@4.18.1': + resolution: {integrity: sha512-pDLkYITdYrH/9Cv/Vlj8HppDuLMDUBmgsM0+N+xLtFd18aXgM9Nyqupb/Uw+HeidhfYg2lD6CXvz6CjoVOaKjQ==} cpu: [x64] os: [linux] - '@rollup/rollup-win32-arm64-msvc@4.18.0': - resolution: {integrity: sha512-7J6TkZQFGo9qBKH0pk2cEVSRhJbL6MtfWxth7Y5YmZs57Pi+4x6c2dStAUvaQkHQLnEQv1jzBUW43GvZW8OFqA==} + '@rollup/rollup-win32-arm64-msvc@4.18.1': + resolution: {integrity: sha512-W2ZNI323O/8pJdBGil1oCauuCzmVd9lDmWBBqxYZcOqWD6aWqJtVBQ1dFrF4dYpZPks6F+xCZHfzG5hYlSHZ6g==} cpu: [arm64] os: [win32] - '@rollup/rollup-win32-ia32-msvc@4.18.0': - resolution: {integrity: sha512-Txjh+IxBPbkUB9+SXZMpv+b/vnTEtFyfWZgJ6iyCmt2tdx0OF5WhFowLmnh8ENGNpfUlUZkdI//4IEmhwPieNg==} + '@rollup/rollup-win32-ia32-msvc@4.18.1': + resolution: {integrity: sha512-ELfEX1/+eGZYMaCIbK4jqLxO1gyTSOIlZr6pbC4SRYFaSIDVKOnZNMdoZ+ON0mrFDp4+H5MhwNC1H/AhE3zQLg==} cpu: [ia32] os: [win32] - '@rollup/rollup-win32-x64-msvc@4.18.0': - resolution: {integrity: sha512-UOo5FdvOL0+eIVTgS4tIdbW+TtnBLWg1YBCcU2KWM7nuNwRz9bksDX1bekJJCpu25N1DVWaCwnT39dVQxzqS8g==} + '@rollup/rollup-win32-x64-msvc@4.18.1': + resolution: {integrity: sha512-yjk2MAkQmoaPYCSu35RLJ62+dz358nE83VfTePJRp8CG7aMg25mEJYpXFiD+NcevhX8LxD5OP5tktPXnXN7GDw==} cpu: [x64] os: [win32] - '@sentry/core@8.9.2': - resolution: {integrity: sha512-ixm8NISFlPlEo3FjSaqmq4nnd13BRHoafwJ5MG+okCz6BKGZ1SexEggP42/QpGvDprUUHnfncG6WUMgcarr1zA==} + '@sentry/core@8.17.0': + resolution: {integrity: sha512-s62O0Re6WcvaVbH1IEeAWmj/ca8UhaRoFaDnc5TR68reOycBrgnqCNq3qHxBsELOA6NJowoK+T29DDGs9QVXhQ==} engines: {node: '>=14.18'} - '@sentry/node@8.9.2': - resolution: {integrity: sha512-Q+JBpR4yx3eUyyhwgugucfRtPg65gYvzJGEmjzcnDJXJqX8ms4HPpNv9o2Om7A4014JxIibUdrQ+p5idcT7SZA==} + '@sentry/node@8.17.0': + resolution: {integrity: sha512-HJ7B/zlpGMOIN+TnLzp6gbOpOzTk3Co19N39Y17T9MrR+5Z4eHdgEKWORFyE0Wy2KYKkVRwJ5zZJbfldc0EsEA==} engines: {node: '>=14.18'} - '@sentry/opentelemetry@8.9.2': - resolution: {integrity: sha512-Q6SHDQhrsBPcMi7ejqVdNTkt6SCTIhpGsFN8QR7daH3uvM0X2O7ciCuO9gRNRTEkflEINV4SBZEjANYH7BkRAg==} + '@sentry/opentelemetry@8.17.0': + resolution: {integrity: sha512-SKHfvHECIs7kqcXVRypXC6bQ7AQ4TTILamamZS5Ro1FP+i+yT8qEIoVWljoFZUIyO4J42mAP98THa1lCPK4BXA==} engines: {node: '>=14.18'} peerDependencies: '@opentelemetry/api': ^1.9.0 - '@opentelemetry/core': ^1.25.0 - '@opentelemetry/instrumentation': ^0.52.0 - '@opentelemetry/sdk-trace-base': ^1.25.0 - '@opentelemetry/semantic-conventions': ^1.25.0 + '@opentelemetry/core': ^1.25.1 + '@opentelemetry/instrumentation': ^0.52.1 + '@opentelemetry/sdk-trace-base': ^1.25.1 + '@opentelemetry/semantic-conventions': ^1.25.1 - '@sentry/types@8.9.2': - resolution: {integrity: sha512-+LFOyQGl+zk5SZRGZD2MEURf7i5RHgP/mt3s85Rza+vz8M211WJ0YsjkIGUJFSY842nged5QLx4JysLaBlLymg==} + '@sentry/types@8.17.0': + resolution: {integrity: sha512-v0nI0+ajiGTijhF1W/ryn2+zFVFr6VPn6lao3W4qKj9MlltIHa4/uuGzTaiCFwoPw7g5bZ1Q09SStpDXVMkz2A==} engines: {node: '>=14.18'} - '@sentry/utils@8.9.2': - resolution: {integrity: sha512-A4srR9mEBFdVXwSEKjQ94msUbVkMr8JeFiEj9ouOFORw/Y/ux/WV2bWVD/ZI9wq0TcTNK8L1wBgU8UMS5lIq3A==} + '@sentry/utils@8.17.0': + resolution: {integrity: sha512-HHtAPLOlvzhwgfYzxtuPnLUoGRMtMrFvopkii74zmx/1ZD4VN4PYPB2E5KFf3c18pTovw+kxF0ux6VrGiyAHsw==} engines: {node: '>=14.18'} '@sigstore/bundle@1.1.0': @@ -1167,9 +1265,6 @@ packages: resolution: {integrity: sha512-2bRovzs0nJZFlCN3rXirE4gwxCn97JNjMmwpecqlbgV9WcxX7WRuIrgzx/X7Ib7MYRbyUTpBYE0s2x6AmZXnlg==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} - '@sinclair/typebox@0.27.8': - resolution: {integrity: sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==} - '@sindresorhus/is@5.6.0': resolution: {integrity: sha512-TV7t8GKYaJWsn00tFDqBw8+Uqmr8A0fRU1tvTQhyZzGv0sJCGRQL3JGMI3ucuKo3XIZdUP+Lx7/gh2t3lewy7g==} engines: {node: '>=14.16'} @@ -1205,71 +1300,20 @@ packages: resolution: {integrity: sha512-qaGV9ltJP0EO25YfFUPhxRVK0evXFIAGicsVXuRim4Ed9cjPxYhNnNJ49SFmbeLgtxpslIkX317IgpfcHPVj/A==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} - '@types/accepts@1.3.7': - resolution: {integrity: sha512-Pay9fq2lM2wXPWbteBsRAGiWH2hig4ZE2asK+mm7kUzlxRTfL961rj89I6zV/E3PcIkDqyuBEcMxFT7rccugeQ==} - '@types/async@3.2.24': resolution: {integrity: sha512-8iHVLHsCCOBKjCF2KwFe0p9Z3rfM9mL+sSP8btyR5vTjJRAqpBYD28/ZLgXPf0pjG1VxOvtCV/BgXkQbpSe8Hw==} - '@types/body-parser@1.19.5': - resolution: {integrity: sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg==} - - '@types/chai-subset@1.3.5': - resolution: {integrity: sha512-c2mPnw+xHtXDoHmdtcCXGwyLMiauiAyxWMzhGpqHC4nqI/Y5G2XhTampslK2rb59kpcuHon03UH8W6iYUzw88A==} - - '@types/chai@4.3.16': - resolution: {integrity: sha512-PatH4iOdyh3MyWtmHVFXLWCCIhUbopaltqddG9BzB+gMIzee2MJrvd+jouii9Z3wzQJruGWAm7WOMjgfG8hQlQ==} - '@types/connect@3.4.36': resolution: {integrity: sha512-P63Zd/JUGq+PdrM1lv0Wv5SBYeA2+CORvbrXbngriYY0jzLUWfQMQQxOhjONEz/wlHOAxOdY7CY65rgQdTjq2w==} - '@types/connect@3.4.38': - resolution: {integrity: sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==} - - '@types/content-disposition@0.5.8': - resolution: {integrity: sha512-QVSSvno3dE0MgO76pJhmv4Qyi/j0Yk9pBp0Y7TJ2Tlj+KCgJWY6qX7nnxCOLkZ3VYRSIk1WTxCvwUSdx6CCLdg==} - - '@types/cookies@0.9.0': - resolution: {integrity: sha512-40Zk8qR147RABiQ7NQnBzWzDcjKzNrntB5BAmeGCb2p/MIyOE+4BVvc17wumsUqUw00bJYqoXFHYygQnEFh4/Q==} - '@types/estree@1.0.5': resolution: {integrity: sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==} - '@types/express-serve-static-core@4.19.1': - resolution: {integrity: sha512-ej0phymbFLoCB26dbbq5PGScsf2JAJ4IJHjG10LalgUV36XKTmA4GdA+PVllKvRk0sEKt64X8975qFnkSi0hqA==} - - '@types/express@4.17.21': - resolution: {integrity: sha512-ejlPM315qwLpaQlQDTjPdsUFSc6ZsP4AN6AlWnogPjQ7CVi7PYF3YVz+CY3jE2pwYf7E/7HlDAN0rV2GxTG0HQ==} - - '@types/http-assert@1.5.5': - resolution: {integrity: sha512-4+tE/lwdAahgZT1g30Jkdm9PzFRde0xwxBNUyRsCitRvCQB90iuA2uJYdUnhnANRcqGXaWOGY4FEoxeElNAK2g==} - '@types/http-cache-semantics@4.0.4': resolution: {integrity: sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA==} - '@types/http-errors@2.0.4': - resolution: {integrity: sha512-D0CFMMtydbJAegzOyHjtiKPLlvnm3iTZyZRSZoLq2mRhDdmLfIWOCYPfQJ4cu2erKghU++QvjcUjp/5h7hESpA==} - - '@types/keygrip@1.0.6': - resolution: {integrity: sha512-lZuNAY9xeJt7Bx4t4dx0rYCDqGPW8RXhQZK1td7d4H6E9zYbLoOtjBvfwdTKpsyxQI/2jv+armjX/RW+ZNpXOQ==} - - '@types/koa-compose@3.2.8': - resolution: {integrity: sha512-4Olc63RY+MKvxMwVknCUDhRQX1pFQoBZ/lXcRLP69PQkEpze/0cr8LNqJQe5NFb/b19DWi2a5bTi2VAlQzhJuA==} - - '@types/koa@2.14.0': - resolution: {integrity: sha512-DTDUyznHGNHAl+wd1n0z1jxNajduyTh8R53xoewuerdBzGo6Ogj6F2299BFtrexJw4NtgjsI5SMPCmV9gZwGXA==} - - '@types/koa__router@12.0.3': - resolution: {integrity: sha512-5YUJVv6NwM1z7m6FuYpKfNLTZ932Z6EF6xy2BbtpJSyn13DKNQEkXVffFVSnJHxvwwWh2SAeumpjAYUELqgjyw==} - - '@types/lodash@4.17.5': - resolution: {integrity: sha512-MBIOHVZqVqgfro1euRDWX7OO0fBVUUMrN6Pwm8LQsz8cWhEpihlvR70ENj3f40j58TNxZaWv2ndSkInykNBBJw==} - - '@types/mime@1.3.5': - resolution: {integrity: sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==} - - '@types/minimist@1.2.5': - resolution: {integrity: sha512-hov8bUuiLiyFPGyFPE1lwWhmzYbirOXQNNo40+y3zow8aFVTeyn3VWL0VFFfdNddA8S4Vf0Tc062rzyNr7Paag==} + '@types/lodash@4.17.6': + resolution: {integrity: sha512-OpXEVoCKSS3lQqjx9GGGOapBeuW5eUboYHRlHP9urXPX25IKZ6AnP5ZRxtVf63iieUbsHxLn8NQ5Nlftc6yzAA==} '@types/mysql@2.15.22': resolution: {integrity: sha512-wK1pzsJVVAjYCSZWQoWHziQZbNggXFDUEIGf54g4ZM/ERuP86uGdWeKZWMYlqTPMZfHJJvLPyogXGvCOg87yLQ==} @@ -1283,14 +1327,8 @@ packages: '@types/node@15.14.9': resolution: {integrity: sha512-qjd88DrCxupx/kJD5yQgZdcYKZKSIGBVDIBE1/LTGcNm3d2Np/jxojkdePDdfnBHJc5W7vSMpbJ1aB7p/Py69A==} - '@types/node@18.11.11': - resolution: {integrity: sha512-KJ021B1nlQUBLopzZmPBVuGU9un7WJd/W4ya7Ih02B4Uwky5Nja0yGYav2EfYIk0RR2Q9oVhf60S2XR1BCWJ2g==} - - '@types/node@18.19.50': - resolution: {integrity: sha512-xonK+NRrMBRtkL1hVCc3G+uXtjh1Al4opBLjqVmipe5ZAaBYWW6cNAiBVZ1BvmkBhep698rP3UM3aRAdSALuhg==} - - '@types/normalize-package-data@2.4.4': - resolution: {integrity: sha512-37i+OaWTh9qeK4LSHPsyRC7NahnGotNuZvjLSgcPzblpHB3rrCJxAOgI5gCdKm7coonsaX1Of0ILiTcnZjbfxA==} + '@types/node@22.5.5': + resolution: {integrity: sha512-Xjs4y5UPO/CLdzpgR6GirZJx36yScjh73+2NlLlkFRSoQN8B0DpfXPdZGnvVmLRLOsqDpOfTNv7D9trgGhmOIA==} '@types/pg-pool@2.0.4': resolution: {integrity: sha512-qZAvkv1K3QbmHHFYSNRYPkRjOWRLBYrL4B9c+wG0GSVGBw0NtJwPcgx/DSddeDJvRGMHCEQ4VMEVfuJ/0gZ3XQ==} @@ -1298,26 +1336,14 @@ packages: '@types/pg@8.6.1': resolution: {integrity: sha512-1Kc4oAGzAl7uqUStZCDvaLFqZrW9qWSjXOmBfdgyBP5La7Us6Mg4GBvRlSoaZMhQF/zSj1C8CtKMBkoiT8eL8w==} - '@types/qs@6.9.15': - resolution: {integrity: sha512-uXHQKES6DQKKCLh441Xv/dwxOq1TVS3JPUMlEqoEglvlhR6Mxnlew/Xq/LRVHpLyk7iK3zODe1qYHIMltO7XGg==} - - '@types/range-parser@1.2.7': - resolution: {integrity: sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==} - '@types/semver-utils@1.1.3': resolution: {integrity: sha512-T+YwkslhsM+CeuhYUxyAjWm7mJ5am/K10UX40RuA6k6Lc7eGtq8iY2xOzy7Vq0GOqhl/xZl5l2FwURZMTPTUww==} '@types/semver@7.5.8': resolution: {integrity: sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ==} - '@types/send@0.17.4': - resolution: {integrity: sha512-x2EM6TJOybec7c52BX0ZspPodMsQUd5L6PRwOunVyVUhXiBSKf3AezDL8Dgvgt5o0UfKNfuA0eMLr2wLT4AiBA==} - - '@types/serve-static@1.15.7': - resolution: {integrity: sha512-W8Ym+h8nhuRwaKPaDw34QUkwsGi6Rc4yYqvKFo5rm2FUEhCFbzVWrxXUxuKK8TASjWsysJY0nsmNCGhCOIsrOw==} - - '@types/shimmer@1.0.5': - resolution: {integrity: sha512-9Hp0ObzwwO57DpLFF0InUjUm/II8GmKAvzbefxQTihCb7KI6yc9yzf0nLc4mVdby5N4DRCgQM2wCup9KTieeww==} + '@types/shimmer@1.2.0': + resolution: {integrity: sha512-UE7oxhQLLd9gub6JKIAhDq06T0F6FnztwMNRvYgjeQSBeMc1ZG/tA47EwfduvkuQS8apbkM/lpLpWsaCeYsXVg==} '@types/strip-bom@3.0.0': resolution: {integrity: sha512-xevGOReSYGM7g/kUBZzPqCrR/KYAo+F0yiPc85WFTJa0MSLtyFTVTU6cJu/aV4mid7IffDIWqo69THF2o4JiEQ==} @@ -1340,38 +1366,39 @@ packages: '@types/ws@8.2.3': resolution: {integrity: sha512-ahRJZquUYCdOZf/rCsWg88S0/+cb9wazUBHv6HZEe3XdYaBe2zr/slM8J28X07Hn88Pnm4ezo7N8/ofnOgrPVQ==} - '@vitest/expect@0.34.6': - resolution: {integrity: sha512-QUzKpUQRc1qC7qdGo7rMK3AkETI7w18gTCUrsNnyjjJKYiuUB9+TQK3QnR1unhCnWRC0AbKv2omLGQDF/mIjOw==} - - '@vitest/expect@2.0.5': - resolution: {integrity: sha512-yHZtwuP7JZivj65Gxoi8upUN2OzHTi3zVfjwdpu2WrvCZPLwsJ2Ey5ILIPccoW23dd/zQBlJ4/dhi7DWNyXCpA==} + '@vitest/expect@2.1.1': + resolution: {integrity: sha512-YeueunS0HiHiQxk+KEOnq/QMzlUuOzbU1Go+PgAsHvvv3tUkJPm9xWt+6ITNTlzsMXUjmgm5T+U7KBPK2qQV6w==} - '@vitest/pretty-format@2.0.5': - resolution: {integrity: sha512-h8k+1oWHfwTkyTkb9egzwNMfJAEx4veaPSnMeKbVSjp4euqGSbQlm5+6VHwTr7u4FJslVVsUG5nopCaAYdOmSQ==} - - '@vitest/runner@0.34.6': - resolution: {integrity: sha512-1CUQgtJSLF47NnhN+F9X2ycxUP0kLHQ/JWvNHbeBfwW8CzEGgeskzNnHDyv1ieKTltuR6sdIHV+nmR6kPxQqzQ==} - - '@vitest/runner@2.0.5': - resolution: {integrity: sha512-TfRfZa6Bkk9ky4tW0z20WKXFEwwvWhRY+84CnSEtq4+3ZvDlJyY32oNTJtM7AW9ihW90tX/1Q78cb6FjoAs+ig==} + '@vitest/mocker@2.1.1': + resolution: {integrity: sha512-LNN5VwOEdJqCmJ/2XJBywB11DLlkbY0ooDJW3uRX5cZyYCrc4PI/ePX0iQhE3BiEGiQmK4GE7Q/PqCkkaiPnrA==} + peerDependencies: + '@vitest/spy': 2.1.1 + msw: ^2.3.5 + vite: ^5.0.0 + peerDependenciesMeta: + msw: + optional: true + vite: + optional: true - '@vitest/snapshot@0.34.6': - resolution: {integrity: sha512-B3OZqYn6k4VaN011D+ve+AA4whM4QkcwcrwaKwAbyyvS/NB1hCWjFIBQxAQQSQir9/RtyAAGuq+4RJmbn2dH4w==} + '@vitest/pretty-format@2.1.1': + resolution: {integrity: sha512-SjxPFOtuINDUW8/UkElJYQSFtnWX7tMksSGW0vfjxMneFqxVr8YJ979QpMbDW7g+BIiq88RAGDjf7en6rvLPPQ==} - '@vitest/snapshot@2.0.5': - resolution: {integrity: sha512-SgCPUeDFLaM0mIUHfaArq8fD2WbaXG/zVXjRupthYfYGzc8ztbFbu6dUNOblBG7XLMR1kEhS/DNnfCZ2IhdDew==} + '@vitest/runner@2.1.1': + resolution: {integrity: sha512-uTPuY6PWOYitIkLPidaY5L3t0JJITdGTSwBtwMjKzo5O6RCOEncz9PUN+0pDidX8kTHYjO0EwUIvhlGpnGpxmA==} - '@vitest/spy@0.34.6': - resolution: {integrity: sha512-xaCvneSaeBw/cz8ySmF7ZwGvL0lBjfvqc1LpQ/vcdHEvpLn3Ff1vAvjw+CoGn0802l++5L/pxb7whwcWAw+DUQ==} + '@vitest/snapshot@2.1.1': + resolution: {integrity: sha512-BnSku1WFy7r4mm96ha2FzN99AZJgpZOWrAhtQfoxjUU5YMRpq1zmHRq7a5K9/NjqonebO7iVDla+VvZS8BOWMw==} - '@vitest/spy@2.0.5': - resolution: {integrity: sha512-c/jdthAhvJdpfVuaexSrnawxZz6pywlTPe84LUB2m/4t3rl2fTo9NFGBG4oWgaD+FTgDDV8hJ/nibT7IfH3JfA==} + '@vitest/spy@2.1.1': + resolution: {integrity: sha512-ZM39BnZ9t/xZ/nF4UwRH5il0Sw93QnZXd9NAZGRpIgj0yvVwPpLd702s/Cx955rGaMlyBQkZJ2Ir7qyY48VZ+g==} - '@vitest/utils@0.34.6': - resolution: {integrity: sha512-IG5aDD8S6zlvloDsnzHw0Ut5xczlF+kv2BOTo+iXfPr54Yhi5qbVOgGB1hZaVq4iJ4C/MZ2J0y15IlsV/ZcI0A==} + '@vitest/utils@2.1.1': + resolution: {integrity: sha512-Y6Q9TsI+qJ2CC0ZKj6VBb+T8UPz593N113nnUykqwANqhgf3QkZeHFlusgKLTqrnVHbj/XDKZcDHol+dxVT+rQ==} - '@vitest/utils@2.0.5': - resolution: {integrity: sha512-d8HKbqIcya+GR67mkZbrzhS5kKhtp8dQLcmRZLGTscGVg7yImT82cIrhtn2L8+VujWcy6KZweApgNmPsTAO/UQ==} + '@vlasky/mysql@2.18.6': + resolution: {integrity: sha512-c+qz/zzqecteQLchoje0E0rjLla935d6hHPpMKmfyQJnHlycLpR49ekS6s/zUAt8w0Um5hFglKXm4+PeJTVhaQ==} + engines: {node: '>= 0.6'} abbrev@1.1.1: resolution: {integrity: sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==} @@ -1393,12 +1420,12 @@ packages: peerDependencies: acorn: ^8 - acorn-walk@8.3.2: - resolution: {integrity: sha512-cjkyv4OtNCIeqhHrfS81QWXoCBPExR/J62oyEqepVw8WaQeSqpW2uhuLPh1m9eWhDuOo/jUXVTlifvesOWp/4A==} + acorn-walk@8.3.3: + resolution: {integrity: sha512-MxXdReSRhGO7VlFe1bRG/oI7/mdLV9B9JJT0N8vZOhF7gFRR5l3M8W9G8JxmKV+JC5mGqJ0QvqfSOLsCPa4nUw==} engines: {node: '>=0.4.0'} - acorn@8.11.3: - resolution: {integrity: sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==} + acorn@8.12.1: + resolution: {integrity: sha512-tcpGyI9zbizT9JbV6oYE477V6mTlXvvi0T0G3SNIYE2apm/G5huBa1+K89VGeovbg+jycCrfhl3ADxErOuO6Jg==} engines: {node: '>=0.4.0'} hasBin: true @@ -1430,8 +1457,8 @@ packages: ajv: optional: true - ajv@8.14.0: - resolution: {integrity: sha512-oYs1UUtO97ZO2lJ4bwnWeQW8/zvOIQLGKcvPTsWmvc2SYgBb+upuNS5NxoLaMU4h8Ju3Nbj6Cq8mD2LQoqVKFA==} + ajv@8.16.0: + resolution: {integrity: sha512-F0twR8U1ZU67JIEtekUcLkXkoO5mMMmgGD8sK/xUFzJ805jxHQl92hImFAqqXMyMYjSPOyUPAwHYhB72g5sTXw==} ansi-align@3.0.1: resolution: {integrity: sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==} @@ -1460,10 +1487,6 @@ packages: resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} engines: {node: '>=8'} - ansi-styles@5.2.0: - resolution: {integrity: sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==} - engines: {node: '>=10'} - ansi-styles@6.2.1: resolution: {integrity: sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==} engines: {node: '>=12'} @@ -1489,29 +1512,10 @@ packages: argparse@2.0.1: resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} - array-buffer-byte-length@1.0.1: - resolution: {integrity: sha512-ahC5W1xgou+KTXix4sAO8Ki12Q+jf4i0+tmk3sC+zgcynshkHxzpXdImBehiUYKKKDwvfFiJl1tZt6ewscS1Mg==} - engines: {node: '>= 0.4'} - array-union@2.1.0: resolution: {integrity: sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==} engines: {node: '>=8'} - array.prototype.flat@1.3.2: - resolution: {integrity: sha512-djYB+Zx2vLewY8RWlNCUdHjDXs2XOgm602S9E7P/UpHgfeHL00cRiIF+IN/G/aUJ7kGPb6yO/ErDI5V2s8iycA==} - engines: {node: '>= 0.4'} - - arraybuffer.prototype.slice@1.0.3: - resolution: {integrity: sha512-bMxMKAjg13EBSVscxTaYA4mRc5t1UAXa2kXiGTNfZ079HIWXEkKmkgFrh/nJqamaLSrXO5H4WFFkPEaLJWbs3A==} - engines: {node: '>= 0.4'} - - arrify@1.0.1: - resolution: {integrity: sha512-3CYzex9M9FGQjCGMGyi6/31c8GJbgb0qGyrx5HWxPd0aCwh4cB2YjMb2Xf9UuoogrMrlO9cTqnB5rI5GHZTcUA==} - engines: {node: '>=0.10.0'} - - assertion-error@1.1.0: - resolution: {integrity: sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==} - assertion-error@2.0.1: resolution: {integrity: sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==} engines: {node: '>=12'} @@ -1526,13 +1530,13 @@ packages: resolution: {integrity: sha512-kNOjDqAh7px0XWNI+4QbzoiR/nTkHAWNud2uvnJquD1/x5a7EQZMJT0AczqK0Qn67oY/TTQ1LbUKajZpp3I9tQ==} engines: {node: '>=8.0.0'} - available-typed-arrays@1.0.7: - resolution: {integrity: sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==} - engines: {node: '>= 0.4'} - avvio@8.3.2: resolution: {integrity: sha512-st8e519GWHa/azv8S87mcJvZs4WsgTBjOw/Ih1CP6u+8SZvcOeAYNG6JbsIrAUUJJ7JfmrnOkR8ipDS+u9SIRQ==} + aws-ssl-profiles@1.1.2: + resolution: {integrity: sha512-NZKeq9AfyQvEeNlN0zSYAaWrmBffJh3IELMZfRpJVWgrpEbtEpnjvzqBPf+mxoI287JohRDoa+/nsfqqiZmF6g==} + engines: {node: '>= 6.0.0'} + balanced-match@1.0.2: resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} @@ -1549,6 +1553,13 @@ packages: resolution: {integrity: sha512-pbnl5XzGBdrFU/wT4jqmJVPn2B6UHPBOhzMQkY/SPUPB6QtUXtmBHBIwCbXJol93mOpGMnQyP/+BB19q04xj7g==} engines: {node: '>=4'} + big-integer@1.6.51: + resolution: {integrity: sha512-GPEid2Y9QU1Exl1rpO9B2IPJGHPSupF5GnVIP0blYvNOMer2bTvSWs1jGOUg04hTmu67nmLsQ9TBo1puaotBHg==} + engines: {node: '>=0.6'} + + bignumber.js@9.1.1: + resolution: {integrity: sha512-pHm4LsMJ6lzgNGVfZHjMoO8sdoRhOzOH4MLmY65Jg70bpxCKu5iOHNJyfF6OyvYw7t8Fpf35RuzUyqnQsj8Vig==} + binary-extensions@2.3.0: resolution: {integrity: sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==} engines: {node: '>=8'} @@ -1570,11 +1581,8 @@ packages: resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==} engines: {node: '>=8'} - breakword@1.0.6: - resolution: {integrity: sha512-yjxDAYyK/pBvws9H4xKYpLDpYKEH6CzrBPAuXq3x18I+c/2MkVtT3qAr7Oloi6Dss9qNhPVueAAVU1CSeNDIXw==} - - bson@6.7.0: - resolution: {integrity: sha512-w2IquM5mYzYZv6rs3uN2DZTOBe2a0zXLj53TGDqwF4l6Sz/XsISrisXOJihArF9+BZ6Cq/GjVht7Sjfmri7ytQ==} + bson@6.8.0: + resolution: {integrity: sha512-iOJg8pr7wq2tg/zSlCCHMi3hMm5JTOxLTagf3zxhcenHsFp+c6uOs6K7W5UE7A4QIJGtqh/ZovFNMP4mOPJynQ==} engines: {node: '>=16.20.1'} buffer-from@1.1.2: @@ -1606,26 +1614,10 @@ packages: resolution: {integrity: sha512-zkDT5WAF4hSSoUgyfg5tFIxz8XQK+25W/TLVojJTMKBaxevLBBtLxgqguAuVQB8PVW79FVjHcU+GJ9tVbDZ9mQ==} engines: {node: '>=14.16'} - call-bind@1.0.7: - resolution: {integrity: sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==} - engines: {node: '>= 0.4'} - - camelcase-keys@6.2.2: - resolution: {integrity: sha512-YrwaA0vEKazPBkn0ipTiMpSajYDSe+KjQfrjhcBMxJt/znbvlHd8Pw/Vamaz5EB4Wfhs3SUR3Z9mwRu/P3s3Yg==} - engines: {node: '>=8'} - - camelcase@5.3.1: - resolution: {integrity: sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==} - engines: {node: '>=6'} - camelcase@7.0.1: resolution: {integrity: sha512-xlx1yCK2Oc1APsPXDL2LdlNP6+uu8OCDdhOBSVT279M/S+y75O30C2VuD8T2ogdePBBl7PfPF4504tnLgX3zfw==} engines: {node: '>=14.16'} - chai@4.4.1: - resolution: {integrity: sha512-13sOfMv2+DWduEU+/xbun3LScLoqN17nBeTLUsmDfKdoiC1fr0n9PU4guu4AhRcOVFk/sW8LyZWHuhWtQZiF+g==} - engines: {node: '>=4'} - chai@5.1.1: resolution: {integrity: sha512-pT1ZgP8rPNqUgieVaEY+ryQr6Q4HXNg8Ei9UnLUrjN4IA7dvQC5JB+/kxVcPNDHyBcc/26CXPkbNzq3qwrOEKA==} engines: {node: '>=12'} @@ -1645,9 +1637,6 @@ packages: chardet@0.7.0: resolution: {integrity: sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==} - check-error@1.0.3: - resolution: {integrity: sha512-iKEoDYaRmd1mxM90a2OEfWhjsjPpYPuQ+lMYsoxB126+t8fw7ySEO48nmDg5COTjxDI65/Y2OWpeEHk3ZOe8zg==} - check-error@2.1.1: resolution: {integrity: sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==} engines: {node: '>= 16'} @@ -1691,9 +1680,6 @@ packages: resolution: {integrity: sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ==} engines: {node: '>= 12'} - cliui@6.0.0: - resolution: {integrity: sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==} - cliui@7.0.4: resolution: {integrity: sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==} @@ -1750,9 +1736,6 @@ packages: engines: {node: ^14.13.0 || >=16.0.0} hasBin: true - confbox@0.1.7: - resolution: {integrity: sha512-uJcB/FKZtBMCJpK8MQji6bJHgu1tixKPxRLeGkNzBoOZzpnZUJm0jm2/sBDWcuBx1dYgxV4JU+g5hmNxCyAmdA==} - config-chain@1.1.13: resolution: {integrity: sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==} @@ -1792,44 +1775,19 @@ packages: resolution: {integrity: sha512-x8dy3RnvYdlUcPOjkEHqozhiwzKNSq7GcPuXFbnyMOCHxX8V3OgIg/pYuabl2sbUPfIJaeAQB7PMOK8DFIdoRA==} engines: {node: '>=12'} - csv-generate@3.4.3: - resolution: {integrity: sha512-w/T+rqR0vwvHqWs/1ZyMDWtHHSJaN06klRqJXBEpDJaM/+dZkso0OKh1VcuuYvK3XM53KysVNq8Ko/epCK8wOw==} - - csv-parse@4.16.3: - resolution: {integrity: sha512-cO1I/zmz4w2dcKHVvpCr7JVRu8/FymG5OEpmvsZYlccYolPBLoVGKUHgNoc4ZGkFeFlWGEDmMyBM+TTqRdW/wg==} - - csv-stringify@5.6.5: - resolution: {integrity: sha512-PjiQ659aQ+fUTQqSrd1XEDnOr52jh30RBurfzkscaE2tPaFsDH5wOAHJiw8XAHphRknCwMUE9KRayc4K/NbO8A==} - - csv@5.5.3: - resolution: {integrity: sha512-QTaY0XjjhTQOdguARF0lGKm5/mEq9PD9/VhZZegHDIBq2tQwgNpHc3dneD4mGo2iJs+fTKv5Bp0fZ+BRuY3Z0g==} - engines: {node: '>= 0.1.90'} - data-uri-to-buffer@4.0.1: resolution: {integrity: sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==} engines: {node: '>= 12'} - data-view-buffer@1.0.1: - resolution: {integrity: sha512-0lht7OugA5x3iJLOWFhWK/5ehONdprk0ISXqVFn/NFrDu+cuc8iADFrGQz5BnRK7LLU3JmkbXSxaqX+/mXYtUA==} - engines: {node: '>= 0.4'} - - data-view-byte-length@1.0.1: - resolution: {integrity: sha512-4J7wRJD3ABAzr8wP+OcIcqq2dlUKp4DVflx++hs5h5ZKydWMI6/D/fAot+yh6g2tHh8fLFTvNOaVN357NvSrOQ==} - engines: {node: '>= 0.4'} - - data-view-byte-offset@1.0.0: - resolution: {integrity: sha512-t/Ygsytq+R995EJ5PZlD4Cu56sWa8InXySaViRzw9apusqsOO2bQP+SbYzAhR0pFKoB+43lYy8rWban9JSuXnA==} - engines: {node: '>= 0.4'} - date-fns@2.30.0: resolution: {integrity: sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==} engines: {node: '>=0.11'} - date-fns@3.6.0: - resolution: {integrity: sha512-fRHTG8g/Gif+kSh50gaGEdToemgfj74aRX3swtiouboip5JDLAyDE9F11nHMIcvOaXeOC6D7SpNhi7uFyB7Uww==} + date-fns@4.1.0: + resolution: {integrity: sha512-Ukq0owbQXxa/U3EGtsdVBkR1w7KOQ5gIBqdH2hkvknzZPYvBxb/aa6E8L7tmjFtkwZBu3UXBbjIgPo/Ez4xaNg==} - debug@4.3.4: - resolution: {integrity: sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==} + debug@4.3.5: + resolution: {integrity: sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg==} engines: {node: '>=6.0'} peerDependencies: supports-color: '*' @@ -1846,22 +1804,10 @@ packages: supports-color: optional: true - decamelize-keys@1.1.1: - resolution: {integrity: sha512-WiPxgEirIV0/eIOMcnFBA3/IJZAZqKnwAwWyvvdi4lsr1WCN22nhdf/3db3DoZcUjTV2SqfzIwNyp6y2xs3nmg==} - engines: {node: '>=0.10.0'} - - decamelize@1.2.0: - resolution: {integrity: sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==} - engines: {node: '>=0.10.0'} - decompress-response@6.0.0: resolution: {integrity: sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==} engines: {node: '>=10'} - deep-eql@4.1.3: - resolution: {integrity: sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw==} - engines: {node: '>=6'} - deep-eql@5.0.2: resolution: {integrity: sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==} engines: {node: '>=6'} @@ -1877,25 +1823,17 @@ packages: resolution: {integrity: sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==} engines: {node: '>=10'} - define-data-property@1.1.4: - resolution: {integrity: sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==} - engines: {node: '>= 0.4'} - - define-properties@1.2.1: - resolution: {integrity: sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==} - engines: {node: '>= 0.4'} - delegates@1.0.0: resolution: {integrity: sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==} + denque@2.1.0: + resolution: {integrity: sha512-HVQE3AAb/pxF8fQAoiqpvg9i3evqug3hoiwakOyZAwJm+6vZehbkYXZ0l4JxS+I3QxM97v5aaRNhj8v5oBhekw==} + engines: {node: '>=0.10'} + detect-indent@6.1.0: resolution: {integrity: sha512-reYkTUJAZb9gUuZ2RvVCNhVHdg62RHnJ7WJl8ftMi4diZ6NWlciOzQN88pUhSELEwflJht4oQDv0F0BMlwaYtA==} engines: {node: '>=8'} - diff-sequences@29.6.3: - resolution: {integrity: sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==} - engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - diff@4.0.2: resolution: {integrity: sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==} engines: {node: '>=0.3.1'} @@ -1947,38 +1885,8 @@ packages: err-code@2.0.3: resolution: {integrity: sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==} - error-ex@1.3.2: - resolution: {integrity: sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==} - - es-abstract@1.23.3: - resolution: {integrity: sha512-e+HfNH61Bj1X9/jLc5v1owaLYuHdeHHSQlkhCBiTK8rBvKaULl/beGMxwrMXjpYrv4pz22BlY570vVePA2ho4A==} - engines: {node: '>= 0.4'} - - es-define-property@1.0.0: - resolution: {integrity: sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==} - engines: {node: '>= 0.4'} - - es-errors@1.3.0: - resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} - engines: {node: '>= 0.4'} - - es-object-atoms@1.0.0: - resolution: {integrity: sha512-MZ4iQ6JwHOBQjahnjwaC1ZtIBH+2ohjamzAO3oaHcXYup7qxjF2fixyH+Q71voWHeOkI2q/TnJao/KfXYIZWbw==} - engines: {node: '>= 0.4'} - - es-set-tostringtag@2.0.3: - resolution: {integrity: sha512-3T8uNMC3OQTHkFUsFq8r/BwAXLHvU/9O9mE0fBc/MY5iq/8H7ncvO947LmYA6ldWw9Uh8Yhf25zu6n7nML5QWQ==} - engines: {node: '>= 0.4'} - - es-shim-unscopables@1.0.2: - resolution: {integrity: sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw==} - - es-to-primitive@1.2.1: - resolution: {integrity: sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==} - engines: {node: '>= 0.4'} - - esbuild@0.20.2: - resolution: {integrity: sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g==} + esbuild@0.21.5: + resolution: {integrity: sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==} engines: {node: '>=12'} hasBin: true @@ -2013,10 +1921,6 @@ packages: resolution: {integrity: sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==} engines: {node: '>=0.8.x'} - execa@8.0.1: - resolution: {integrity: sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==} - engines: {node: '>=16.17'} - exponential-backoff@3.1.1: resolution: {integrity: sha512-dX7e/LHVJ6W3DE1MHWi9S1EYzDESENfLrYohG2G++ovZrYOkm4Knwa0mc1cn84xJOR4KEU0WSchhLbd0UklbHw==} @@ -2040,8 +1944,8 @@ packages: resolution: {integrity: sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==} engines: {node: '>=8.6.0'} - fast-json-stringify@5.16.0: - resolution: {integrity: sha512-A4bg6E15QrkuVO3f0SwIASgzMzR6XC4qTyTqhf3hYXy0iazbAdZKwkE+ox4WgzKyzM6ygvbdq3r134UjOaaAnA==} + fast-json-stringify@5.16.1: + resolution: {integrity: sha512-KAdnLvy1yu/XrRtP+LJnxbBGrhN+xXu+gt3EUvZhYGKCr3lFHq/7UFJHHFgmJKoqlh6B40bZLEv7w46B0mqn1g==} fast-memoize@2.5.2: resolution: {integrity: sha512-Ue0LwpDYErFbmNnZSF0UH6eImUwDmogUO1jyE+JbN2gsQz/jICm1Ve7t9QT0rNSsfJt+Hs4/S3GnsDVjL4HVrw==} @@ -2053,8 +1957,8 @@ packages: resolution: {integrity: sha512-dwsoQlS7h9hMeYUq1W++23NDcBLV4KqONnITDV9DjfS3q1SgDGVrBdvvTLUotWtPSD7asWDV9/CmsZPy8Hf70A==} engines: {node: '>=6'} - fast-uri@2.3.0: - resolution: {integrity: sha512-eel5UKGn369gGEWOqBShmFJWfq/xSJvsgDzgLYC845GneayWvXBf0lJCBn5qTABfewy1ZDPoaR5OZCP+kssfuw==} + fast-uri@2.4.0: + resolution: {integrity: sha512-ypuAmmMKInk5q7XcepxlnUWDLWv4GFtaJqAzWKqn62IpQ3pejtr5dTVbt3vwqVaMKmkNR55sTT+CqUKIaT21BA==} fastify-plugin@4.5.1: resolution: {integrity: sha512-stRHYGeuqpEZTL1Ef0Ovr2ltazUT9g844X5z/zEBFLG8RYlpDiOCIG+ATvYEp+/zmc7sN29mcIMp8gvYplYPIQ==} @@ -2088,17 +1992,11 @@ packages: resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} engines: {node: '>=10'} - find-yarn-workspace-root2@1.2.16: - resolution: {integrity: sha512-hr6hb1w8ePMpPVUK39S4RlwJzi+xPLuVuG8XlwXU3KD5Yn3qgBWVfy3AzNlDhWvE1EORCE65/Qm26rFQt3VLVA==} - fn.name@1.1.0: resolution: {integrity: sha512-GRnmB5gPyJpAhTQdSZTSp9uaPSvl09KoYcMQtsB9rQoOmzs9dH6ffeccH+Z+cv6P68Hu5bC6JjRh4Ah/mHSNRw==} - for-each@0.3.3: - resolution: {integrity: sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==} - - foreground-child@3.1.1: - resolution: {integrity: sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==} + foreground-child@3.2.1: + resolution: {integrity: sha512-PXUUyLqrR2XCWICfv6ukppP96sdFwWbNEnfEMt7jNsISjMsvaLNinAHNDYyvkyU+SZG2BTSbT5NjG+vZslfGTA==} engines: {node: '>=14'} form-data-encoder@2.1.4: @@ -2147,18 +2045,14 @@ packages: function-bind@1.1.2: resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} - function.prototype.name@1.1.6: - resolution: {integrity: sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==} - engines: {node: '>= 0.4'} - - functions-have-names@1.2.3: - resolution: {integrity: sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==} - gauge@4.0.4: resolution: {integrity: sha512-f9m+BEN5jkg6a0fZjleidjN51VE1X+mPFQ2DJ0uv1V39oCLCbsGe6yjbBnp7eK7z/+GAon99a3nHuqbuuthyPg==} engines: {node: ^12.13.0 || ^14.15.0 || >=16.0.0} deprecated: This package is no longer supported. + generate-function@2.3.1: + resolution: {integrity: sha512-eeB5GfMNeevm/GRYq20ShmsaGcmI81kIX2K9XQx5miC8KdHaC6Jm0qQ8ZNeGOi7wYB8OsdxKs+Y2oVuTFuVwKQ==} + get-caller-file@2.0.5: resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} engines: {node: 6.* || 8.* || >= 10.*} @@ -2166,10 +2060,6 @@ packages: get-func-name@2.0.2: resolution: {integrity: sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==} - get-intrinsic@1.2.4: - resolution: {integrity: sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==} - engines: {node: '>= 0.4'} - get-stdin@8.0.0: resolution: {integrity: sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg==} engines: {node: '>=10'} @@ -2178,21 +2068,12 @@ packages: resolution: {integrity: sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==} engines: {node: '>=10'} - get-stream@8.0.1: - resolution: {integrity: sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA==} - engines: {node: '>=16'} - - get-symbol-description@1.0.2: - resolution: {integrity: sha512-g0QYk1dZBxGwk+Ngc+ltRH2IBp2f7zBkBMBJZCDerh6EhlhSR6+9irMCuT/09zD6qkarHUSn529sK/yL4S27mg==} - engines: {node: '>= 0.4'} - glob-parent@5.1.2: resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} engines: {node: '>= 6'} - glob@10.4.1: - resolution: {integrity: sha512-2jelhlq3E4ho74ZyVLN03oKdAZVUa6UDZzFLVH1H7dnoax+y9qyaq8zBkfDIggjniU19z0wU18y16jMB2eyVIw==} - engines: {node: '>=16 || 14 >=14.18'} + glob@10.4.5: + resolution: {integrity: sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==} hasBin: true glob@7.2.3: @@ -2208,10 +2089,6 @@ packages: resolution: {integrity: sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA==} engines: {node: '>=10'} - globalthis@1.0.4: - resolution: {integrity: sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==} - engines: {node: '>= 0.4'} - globby@11.1.0: resolution: {integrity: sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==} engines: {node: '>=10'} @@ -2219,9 +2096,6 @@ packages: globrex@0.1.2: resolution: {integrity: sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg==} - gopd@1.0.1: - resolution: {integrity: sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==} - got@12.6.1: resolution: {integrity: sha512-mThBblvlAF1d4O5oqyvN+ZxLAYwIJK7bpMxgYqPD9okW0C3qm5FFn7k811QrcuEBwaogR3ngOFoCfs6mRv7teQ==} engines: {node: '>=14.16'} @@ -2232,16 +2106,6 @@ packages: graceful-fs@4.2.11: resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} - grapheme-splitter@1.0.4: - resolution: {integrity: sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==} - - hard-rejection@2.1.0: - resolution: {integrity: sha512-VIZB+ibDhx7ObhAe7OVtoEbuP4h/MuOTHJ+J8h/eBXotJYl0fBgR72xDFCKgIh22OJZIOVNxBMWuhAr10r8HdA==} - engines: {node: '>=6'} - - has-bigints@1.0.2: - resolution: {integrity: sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==} - has-flag@3.0.0: resolution: {integrity: sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==} engines: {node: '>=4'} @@ -2250,21 +2114,6 @@ packages: resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} engines: {node: '>=8'} - has-property-descriptors@1.0.2: - resolution: {integrity: sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==} - - has-proto@1.0.3: - resolution: {integrity: sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==} - engines: {node: '>= 0.4'} - - has-symbols@1.0.3: - resolution: {integrity: sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==} - engines: {node: '>= 0.4'} - - has-tostringtag@1.0.2: - resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==} - engines: {node: '>= 0.4'} - has-unicode@2.0.1: resolution: {integrity: sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==} @@ -2276,9 +2125,6 @@ packages: resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} engines: {node: '>= 0.4'} - hosted-git-info@2.8.9: - resolution: {integrity: sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==} - hosted-git-info@5.2.1: resolution: {integrity: sha512-xIcQYMnhcx2Nr4JTjsFmwwnr9vldugPy9uVm0o87bjqqWMv9GaqsTeT+i99wTl0mk1uLxJtHxLb8kymqTENQsw==} engines: {node: ^12.13.0 || ^14.15.0 || >=16.0.0} @@ -2305,10 +2151,6 @@ packages: human-id@1.0.2: resolution: {integrity: sha512-UNopramDEhHJD+VR+ehk8rOslwSfByxPIZyJRfV739NDhN5LF1fa1MqnzKm2lGTQRjNrjK19Q5fhkgIfjlVUKw==} - human-signals@5.0.0: - resolution: {integrity: sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==} - engines: {node: '>=16.17.0'} - humanize-ms@1.2.1: resolution: {integrity: sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==} @@ -2334,14 +2176,11 @@ packages: resolution: {integrity: sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==} engines: {node: '>= 4'} - import-in-the-middle@1.4.2: - resolution: {integrity: sha512-9WOz1Yh/cvO/p69sxRmhyQwrIGGSp7EIdcb+fFNVi7CzQGQB8U1/1XrKVSbEd/GNOAeM0peJtmi7+qphe7NvAw==} - - import-in-the-middle@1.7.4: - resolution: {integrity: sha512-Lk+qzWmiQuRPPulGQeK5qq0v32k2bHnWrRPFgqyvhw7Kkov5L6MOLOIU3pcWeujc9W4q54Cp3Q2WV16eQkc7Bg==} + import-in-the-middle@1.7.1: + resolution: {integrity: sha512-1LrZPDtW+atAxH42S6288qyDFNQ2YCty+2mxEPRtfazH6Z5QwkaBSTS2ods7hnVJioF6rkRfNoA6A/MstpFXLg==} - import-in-the-middle@1.8.0: - resolution: {integrity: sha512-/xQjze8szLNnJ5rvHSzn+dcVXqCAU6Plbk4P24U/jwPmg1wy7IIp9OjKIO5tYue8GSPhDpPDiApQjvBUmWwhsQ==} + import-in-the-middle@1.9.0: + resolution: {integrity: sha512-Ng1SJINJDBzyUEkx9Mj32XD8G0TQCUb5TMoL9V91CTn6F3wYZLygLuhNFrv0cNMBZaeptnL1zecV6XrIdHJ+xQ==} import-lazy@4.0.0: resolution: {integrity: sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==} @@ -2376,14 +2215,10 @@ packages: resolution: {integrity: sha512-X7rqawQBvfdjS10YU1y1YVreA3SsLrW9dX2CewP2EbBJM4ypVNLDkO5y04gejPwKIY9lR+7r9gn3rFPt/kmWFg==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} - inquirer@9.2.22: - resolution: {integrity: sha512-SqLLa/Oe5rZUagTR9z+Zd6izyatHglbmbvVofo1KzuVB54YHleWzeHNLoR7FOICGOeQSqeLh1cordb3MzhGcEw==} + inquirer@9.3.5: + resolution: {integrity: sha512-SVRCRovA7KaT6nqWB2mCNpTvU4cuZ0hOXo5KPyiyOcNNUIZwq/JKtvXuDJNaxfuJKabBYRu1ecHze0YEwDYoRQ==} engines: {node: '>=18'} - internal-slot@1.0.7: - resolution: {integrity: sha512-NGnrKwXzSms2qUUih/ILZ5JBqNTSa1+ZmP6flaIp6KmSElgE9qdndzS3cqjrDovwFdmwsGsLdeFgB6suw+1e9g==} - engines: {node: '>= 0.4'} - ip-address@9.0.5: resolution: {integrity: sha512-zHtQzGojZXTwZTHQqra+ETKd4Sn3vgi7uBmlPoXVWZqYvuKmtI0l/VZTjqGmJY9x88GGOaZ9+G9ES8hC4T4X8g==} engines: {node: '>= 12'} @@ -2396,44 +2231,19 @@ packages: resolution: {integrity: sha512-Ag3wB2o37wslZS19hZqorUnrnzSkpOVy+IiiDEiTqNubEYpYuHWIf6K4psgN2ZWKExS4xhVCrRVfb/wfW8fWJA==} engines: {node: '>= 10'} - is-array-buffer@3.0.4: - resolution: {integrity: sha512-wcjaerHw0ydZwfhiKbXJWLDY8A7yV7KhjQOpb83hGgGfId/aQa4TOvwyzn2PuswW2gPCYEL/nEAiSVpdOj1lXw==} - engines: {node: '>= 0.4'} - - is-arrayish@0.2.1: - resolution: {integrity: sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==} - is-arrayish@0.3.2: resolution: {integrity: sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==} - is-bigint@1.0.4: - resolution: {integrity: sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==} - is-binary-path@2.1.0: resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==} engines: {node: '>=8'} - is-boolean-object@1.1.2: - resolution: {integrity: sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==} - engines: {node: '>= 0.4'} - - is-callable@1.2.7: - resolution: {integrity: sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==} - engines: {node: '>= 0.4'} - is-ci@3.0.1: resolution: {integrity: sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==} hasBin: true - is-core-module@2.13.1: - resolution: {integrity: sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==} - - is-data-view@1.0.1: - resolution: {integrity: sha512-AHkaJrsUVW6wq6JS8y3JnM/GJF/9cf+k20+iDzlSaJrinEo5+7vRiteOSwBhHRiAyQATN1AmY4hwzxJKPmYf+w==} - engines: {node: '>= 0.4'} - - is-date-object@1.0.5: - resolution: {integrity: sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==} + is-core-module@2.14.0: + resolution: {integrity: sha512-a5dFJih5ZLYlRtDc0dZWP7RiKr6xIKzmn/oAYCDvdLThadVgyJwlaoQPmRtMSpz+rk0OGAgIu+TcM9HUF0fk1A==} engines: {node: '>= 0.4'} is-extglob@2.1.1: @@ -2459,18 +2269,10 @@ packages: is-lambda@1.0.1: resolution: {integrity: sha512-z7CMFGNrENq5iFB9Bqo64Xk6Y9sg+epq1myIcdHaGnbMTYOxvzsEtdYqQUylB7LxfkvgrrjP32T6Ywciio9UIQ==} - is-negative-zero@2.0.3: - resolution: {integrity: sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==} - engines: {node: '>= 0.4'} - is-npm@6.0.0: resolution: {integrity: sha512-JEjxbSmtPSt1c8XTkVrlujcXdKV1/tvuQ7GwKcAlyiVLeYFQ2VHat8xfrDJsIkhCdF/tZ7CiIR3sy141c6+gPQ==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - is-number-object@1.0.7: - resolution: {integrity: sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==} - engines: {node: '>= 0.4'} - is-number@7.0.0: resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} engines: {node: '>=0.12.0'} @@ -2483,42 +2285,17 @@ packages: resolution: {integrity: sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==} engines: {node: '>=8'} - is-plain-obj@1.1.0: - resolution: {integrity: sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg==} - engines: {node: '>=0.10.0'} - - is-regex@1.1.4: - resolution: {integrity: sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==} - engines: {node: '>= 0.4'} - - is-shared-array-buffer@1.0.3: - resolution: {integrity: sha512-nA2hv5XIhLR3uVzDDfCIknerhx8XUKnstuOERPNNIinXG7v9u+ohXF67vxm4TPTEPU6lm61ZkwP3c9PCB97rhg==} - engines: {node: '>= 0.4'} + is-property@1.0.2: + resolution: {integrity: sha512-Ks/IoX00TtClbGQr4TWXemAnktAQvYB7HzcCxDGqEZU6oCmb2INHuOoKxbtR+HFkmYWBKv/dOZtGRiAjDhj92g==} is-stream@2.0.1: resolution: {integrity: sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==} engines: {node: '>=8'} - is-stream@3.0.0: - resolution: {integrity: sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==} - engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - - is-string@1.0.7: - resolution: {integrity: sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==} - engines: {node: '>= 0.4'} - is-subdir@1.2.0: resolution: {integrity: sha512-2AT6j+gXe/1ueqbW6fLZJiIw3F8iXGJtt0yDrZaBhAZEG1raiTxKWU+IPqMCzQAXOUCKdA4UDMgacKH25XG2Cw==} engines: {node: '>=4'} - is-symbol@1.0.4: - resolution: {integrity: sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==} - engines: {node: '>= 0.4'} - - is-typed-array@1.1.13: - resolution: {integrity: sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw==} - engines: {node: '>= 0.4'} - is-typedarray@1.0.0: resolution: {integrity: sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==} @@ -2526,9 +2303,6 @@ packages: resolution: {integrity: sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==} engines: {node: '>=10'} - is-weakref@1.0.2: - resolution: {integrity: sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==} - is-windows@1.0.2: resolution: {integrity: sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==} engines: {node: '>=0.10.0'} @@ -2543,24 +2317,20 @@ packages: isarray@1.0.0: resolution: {integrity: sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==} - isarray@2.0.5: - resolution: {integrity: sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==} - isexe@2.0.0: resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} ix@5.0.0: resolution: {integrity: sha512-6LyyrHnvNrSy5pKtW/KA+KKusHrB223aBJCJlIGPN7QBfDkEEtNrAkAz9lLLShIcdJntq6BiPCHuKaCM/9wwXw==} - jackspeak@3.1.2: - resolution: {integrity: sha512-kWmLKn2tRtfYMF/BakihVVRzBKOxz4gJMiL2Rj91WnAB5TPZumSH99R/Yf1qE1u4uRimvCSJfm6hnxohXeEXjQ==} - engines: {node: '>=14'} + jackspeak@3.4.3: + resolution: {integrity: sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==} jju@1.4.0: resolution: {integrity: sha512-8wb9Yw966OSxApiCt0K3yNJL8pnNeIv+OEq2YMidz4FKP6nonSRoOXc80iXY4JaN2FC11B9qsNmDsm+ZOfMROA==} - jose@4.15.5: - resolution: {integrity: sha512-jc7BFxgKPKi94uOvEmzlSWFFe2+vASyXaKUpdQKatWAESU2MWjDfFf0fdfc83CDKcA5QecabZeNLyfhe3yKNkg==} + jose@4.15.9: + resolution: {integrity: sha512-1vUQX+IdDMVPj4k8kOxgUqlcK518yluMuGZwqlr44FS1ppZB/5GWh4rZG89erpOBOJjU/OBsnCVFfapsRz6nEA==} js-tokens@4.0.0: resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} @@ -2579,9 +2349,6 @@ packages: json-buffer@3.0.1: resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==} - json-parse-even-better-errors@2.3.1: - resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} - json-parse-even-better-errors@3.0.2: resolution: {integrity: sha512-fi0NG4bPjCHunUJffmLd0gxssIgkNmArMvis4iNah6Owg1MCJjWhEcDLmsK6iGkJq3tHwbDkTlce70/tmXN4cQ==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} @@ -2617,10 +2384,6 @@ packages: keyv@4.5.4: resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} - kind-of@6.0.3: - resolution: {integrity: sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==} - engines: {node: '>=0.10.0'} - kleur@4.1.5: resolution: {integrity: sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==} engines: {node: '>=6'} @@ -2639,17 +2402,6 @@ packages: light-my-request@5.13.0: resolution: {integrity: sha512-9IjUN9ZyCS9pTG+KqTDEQo68Sui2lHsYBrfMyVUTTZ3XhH8PMZq7xO94Kr+eP9dhi/kcKsx4N41p2IXEBil1pQ==} - lines-and-columns@1.2.4: - resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==} - - load-yaml-file@0.2.0: - resolution: {integrity: sha512-OfCBkGEw4nN6JLtgRidPX6QxjBQGQf72q3si2uvqyFEMbycSFFHwAZeXx6cJgFM9wmLrf9zBwCP3Ivqa+LLZPw==} - engines: {node: '>=6'} - - local-pkg@0.4.3: - resolution: {integrity: sha512-SFppqq5p42fe2qcZQqqEOiVRXl+WCP1MdT6k7BDEW1j++sp5fIY+/fdRQitvKgB5BrBcmrs5m/L0v2FrU5MY1g==} - engines: {node: '>=14'} - locate-path@5.0.0: resolution: {integrity: sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==} engines: {node: '>=8'} @@ -2671,16 +2423,16 @@ packages: resolution: {integrity: sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==} engines: {node: '>=10'} - logform@2.6.0: - resolution: {integrity: sha512-1ulHeNPp6k/LD8H91o7VYFBng5i1BDE7HoKxVbZiGFidS1Rj65qcywLxX+pVfAPoQJEjRdvKcusKwOupHCVOVQ==} + logform@2.6.1: + resolution: {integrity: sha512-CdaO738xRapbKIMVn2m4F6KTj4j7ooJ8POVnebSgKo3KBz5axNXRAL7ZdRjIV6NOr2Uf4vjtRkxrFETOioCqSA==} engines: {node: '>= 12.0.0'} + long@5.2.3: + resolution: {integrity: sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==} + lossless-json@2.0.11: resolution: {integrity: sha512-BP0vn+NGYvzDielvBZaFain/wgeJ1hTvURCqtKvhr1SCPePdaaTanmmcplrHfEJSJOUql7hk4FHwToNJjWRY3g==} - loupe@2.3.7: - resolution: {integrity: sha512-zSMINGVYkdpYSOBmLi0D1Uo7JU9nVdQKrHxC8eYlV+9YKK9WePqAlL7lSlorG/U2Fw1w0hTBmaa/jrQ3UbPHtA==} - loupe@3.1.1: resolution: {integrity: sha512-edNu/8D5MKVfGVFRhFf8aAxiTM6Wumfz5XsaatSxlD3w4R1d/WEKUTydCdPGbl9K7QG/Ca3GnDV2sIKIpXRQcw==} @@ -2688,9 +2440,8 @@ packages: resolution: {integrity: sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - lru-cache@10.2.2: - resolution: {integrity: sha512-9hp3Vp2/hFQUiIwKo8XCeFVnrg8Pk3TYNPIR7tJADKi5YfcF7vEaK7avFHTlSy3kOKYaJQaalfEo6YuXdceBOQ==} - engines: {node: 14 || >=16.14} + lru-cache@10.4.3: + resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==} lru-cache@4.1.5: resolution: {integrity: sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==} @@ -2699,8 +2450,12 @@ packages: resolution: {integrity: sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==} engines: {node: '>=12'} - magic-string@0.30.10: - resolution: {integrity: sha512-iIRwTIf0QKV3UAnYK4PU8uiEc4SRh5jX0mwpIwETPpHdhVM4f53RSwS/vXvN1JhGX+Cs7B8qIq3d6AH49O5fAQ==} + lru.min@1.1.1: + resolution: {integrity: sha512-FbAj6lXil6t8z4z3j0E5mfRlPzxkySotzUHwRXjlpRh10vc6AI6WN62ehZj82VG7M20rqogJ0GLwar2Xa05a8Q==} + engines: {bun: '>=1.0.0', deno: '>=1.30.0', node: '>=8.0.0'} + + magic-string@0.30.11: + resolution: {integrity: sha512-+Wri9p0QHMy+545hKww7YAu5NyzF8iomPL/RQazugQ9+Ez4Ic3mERMd8ZTX5rfK944j+560ZJi8iAwgak1Ac7A==} make-error@1.3.6: resolution: {integrity: sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==} @@ -2713,27 +2468,12 @@ packages: resolution: {integrity: sha512-rLWS7GCSTcEujjVBs2YqG7Y4643u8ucvCJeSRqiLYhesrDuzeuFIk37xREzAsfQaqzl8b9rNCE4m6J8tvX4Q8w==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} - map-obj@1.0.1: - resolution: {integrity: sha512-7N/q3lyZ+LVCp7PzuxrJr4KMbBE2hW7BT7YNia330OFxIf4d3r5zVpicP2650l7CPN6RM9zOJRl3NGpqSiw3Eg==} - engines: {node: '>=0.10.0'} - - map-obj@4.3.0: - resolution: {integrity: sha512-hdN1wVrZbb29eBGiGjJbeP8JbKjq1urkHJ/LIP/NY48MZ1QVXUsQBV1G1zvYFHn1XE06cwjBsOI2K3Ulnj1YXQ==} - engines: {node: '>=8'} - map-stream@0.1.0: resolution: {integrity: sha512-CkYQrPYZfWnu/DAmVCpTSX/xHpKZ80eKh2lAkyA6AJTef6bW+6JpbQZN5rofum7da+SyN1bi5ctTm+lTfcCW3g==} memory-pager@1.5.0: resolution: {integrity: sha512-ZS4Bp4r/Zoeq6+NLJpP+0Zzm0pR8whtGPf1XExKLJBAczGMnSi3It14OiNCStjQjM6NU1okjQGSxgEZN8eBYKg==} - meow@6.1.1: - resolution: {integrity: sha512-3YffViIt2QWgTy6Pale5QpopX/IvU3LPL03jOTqp6pGj3VjesdO/U8CuHMKpnQr4shCNCM5fd5XFFvIIl6JBHg==} - engines: {node: '>=8'} - - merge-stream@2.0.0: - resolution: {integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==} - merge2@1.4.1: resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} engines: {node: '>= 8'} @@ -2746,10 +2486,6 @@ packages: resolution: {integrity: sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==} engines: {node: '>=6'} - mimic-fn@4.0.0: - resolution: {integrity: sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==} - engines: {node: '>=12'} - mimic-response@3.1.0: resolution: {integrity: sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==} engines: {node: '>=10'} @@ -2758,10 +2494,6 @@ packages: resolution: {integrity: sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - min-indent@1.0.1: - resolution: {integrity: sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==} - engines: {node: '>=4'} - minimatch@3.1.2: resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} @@ -2769,14 +2501,10 @@ packages: resolution: {integrity: sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==} engines: {node: '>=10'} - minimatch@9.0.4: - resolution: {integrity: sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw==} + minimatch@9.0.5: + resolution: {integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==} engines: {node: '>=16 || 14 >=14.17'} - minimist-options@4.1.0: - resolution: {integrity: sha512-Q4r8ghd80yhO/0j1O3B2BjweX3fiHg9cdOwjJd2J76Q135c+NDxGCqdYKQ1SKBuFfgWbAUzBfvYjPUEeNgqN1A==} - engines: {node: '>= 6'} - minimist@1.2.8: resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} @@ -2823,18 +2551,11 @@ packages: resolution: {integrity: sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==} engines: {node: '>= 8'} - mixme@0.5.10: - resolution: {integrity: sha512-5H76ANWinB1H3twpJ6JY8uvAtpmFvHNArpilJAjXRKXSDDLPIMoZArw5SH0q9z+lLs8IrMw7Q2VWpWimFKFT1Q==} - engines: {node: '>= 8.0.0'} - mkdirp@1.0.4: resolution: {integrity: sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==} engines: {node: '>=10'} hasBin: true - mlly@1.7.0: - resolution: {integrity: sha512-U9SDaXGEREBYQgfejV97coK0UL1r+qnF2SyO9A3qcI8MzKnsIFKHNVEkrDyNncQTKQQumsasmeq84eNMdBfsNQ==} - mnemonist@0.39.5: resolution: {integrity: sha512-FPUtkhtJ0efmEFGpU14x7jGbTB+s18LrzRL2KgoWz9YvcY3cPomz8tih01GbHwnGk/OmkOKfqd/RAQoc8Lm7DQ==} @@ -2844,8 +2565,8 @@ packages: mongodb-connection-string-url@3.0.1: resolution: {integrity: sha512-XqMGwRX0Lgn05TDB4PyG2h2kKO/FfWJyCzYQbIhXUxz7ETt0I/FqHjUeqj37irJ+Dl1ZtU82uYyj14u2XsZKfg==} - mongodb@6.7.0: - resolution: {integrity: sha512-TMKyHdtMcO0fYBNORiYdmM25ijsHs+Njs963r4Tro4OQZzqYigAzYQouwWRg4OIaiLRUEGUh/1UAcH5lxdSLIA==} + mongodb@6.8.0: + resolution: {integrity: sha512-HGQ9NWDle5WvwMnrvUxsFYPd3JEbqD3RgABHBQRuoCEND0qzhsd0iH5ypHsf1eJ+sXmvmyKpP+FLOKY8Il7jMw==} engines: {node: '>=16.20.1'} peerDependencies: '@aws-sdk/credential-providers': ^3.188.0 @@ -2874,6 +2595,10 @@ packages: moo@0.5.2: resolution: {integrity: sha512-iSAJLHYKnX41mKcJKjqvnAN9sf0LMDTXDEvFv+ffuRR9a1MIuXLjMNL6EsnDHSkKLTWNqQQ5uo61P4EbU4NU+Q==} + mri@1.2.0: + resolution: {integrity: sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==} + engines: {node: '>=4'} + ms@2.1.2: resolution: {integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==} @@ -2884,6 +2609,14 @@ packages: resolution: {integrity: sha512-avsJQhyd+680gKXyG/sQc0nXaC6rBkPOfyHYcFb9+hdkqQkR9bdnkJ0AMZhke0oesPqIO+mFFJ+IdBc7mst4IA==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} + mysql2@3.11.3: + resolution: {integrity: sha512-Qpu2ADfbKzyLdwC/5d4W7+5Yz7yBzCU05YWt5npWzACST37wJsB23wgOSo00qi043urkiRwXtEvJc9UnuLX/MQ==} + engines: {node: '>= 8.0'} + + named-placeholders@1.1.3: + resolution: {integrity: sha512-eLoBxg6wE/rZkJPhU/xRX1WTpkFEwDJEN96oxFrTsqBdbT5ec295Q+CoHrL9IT0DipqKhmGcaZmwOt8OON5x1w==} + engines: {node: '>=12.0.0'} + nanoid@3.3.7: resolution: {integrity: sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==} engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} @@ -2913,8 +2646,8 @@ packages: engines: {node: ^12.13 || ^14.13 || >=16} hasBin: true - nodemon@3.1.1: - resolution: {integrity: sha512-k43xGaDtaDIcufn0Fc6fTtsdKSkV/hQzoQFigNH//GaKta28yoKVYXCnV+KXRqfT/YzsFaQU9VdeEG+HEyxr6A==} + nodemon@3.1.4: + resolution: {integrity: sha512-wjPBbFhtpJwmIeY2yP7QF+UKzPfltVGtfce1g/bB15/8vCGZj8uxD62b/b9M9/WVgme0NZudpownKN+c0plXlQ==} engines: {node: '>=10'} hasBin: true @@ -2926,9 +2659,6 @@ packages: engines: {node: ^12.13.0 || ^14.15.0 || >=16.0.0} hasBin: true - normalize-package-data@2.5.0: - resolution: {integrity: sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==} - normalize-package-data@5.0.0: resolution: {integrity: sha512-h9iPVIfrVZ9wVYQnxFgtw1ugSvGEMOlyPWWtm8BMJhnwyEL/FLbYbTY3V3PpjI/BUK67n9PEWDu6eHzu1fB15Q==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} @@ -2950,6 +2680,11 @@ packages: engines: {node: '>=14.14'} hasBin: true + npm-check-updates@17.1.3: + resolution: {integrity: sha512-4uDLBWPuDHT5KLieIJ20FoAB8yqJejmupI42wPyfObgQOBbPAikQSwT73afDwREvhuxYrRDqlRvxTMSfvO+L8A==} + engines: {node: ^18.18.0 || >=20.0.0, npm: '>=8.12.1'} + hasBin: true + npm-install-checks@6.3.0: resolution: {integrity: sha512-W29RiK/xtpCGqn6f3ixfRYGk+zRyr+Ew9F2E20BfXxT5/euLdA/Nm7fO7OeTGuAmTs30cpgInyJ0cYe708YTZw==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} @@ -2974,10 +2709,6 @@ packages: resolution: {integrity: sha512-kIDMIo4aBm6xg7jOttupWZamsZRkAqMqwqqbVXnUqstY5+tapvv6bkH/qMR76jdgV+YljEUCyWx3hRYMrJiAgA==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} - npm-run-path@5.3.0: - resolution: {integrity: sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==} - engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - npmlog@6.0.2: resolution: {integrity: sha512-/vBvz5Jfr9dT/aFWd0FIRf+T/Q2WBsLENygUaFUqstqsycmZAP/t5BvFJTK0viFmSUxiUKTUplWy5vt+rvKIxg==} engines: {node: ^12.13.0 || ^14.15.0 || >=16.0.0} @@ -2987,17 +2718,6 @@ packages: resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} engines: {node: '>=0.10.0'} - object-inspect@1.13.1: - resolution: {integrity: sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==} - - object-keys@1.1.1: - resolution: {integrity: sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==} - engines: {node: '>= 0.4'} - - object.assign@4.1.5: - resolution: {integrity: sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==} - engines: {node: '>= 0.4'} - obliterator@2.0.4: resolution: {integrity: sha512-lgHwxlxV1qIg1Eap7LgIeoBWIMFibOjbrYPIPJZcI1mmGAI2m3lNYpK12Y+GBdPQ0U1hRwSord7GIaawz962qQ==} @@ -3015,13 +2735,11 @@ packages: resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==} engines: {node: '>=6'} - onetime@6.0.0: - resolution: {integrity: sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==} - engines: {node: '>=12'} - - opentelemetry-instrumentation-fetch-node@1.2.0: - resolution: {integrity: sha512-aiSt/4ubOTyb1N5C2ZbGrBvaJOXIZhZvpRPYuUVxQJe27wJZqf/o65iPrqgLcgfeOLaQ8cS2Q+762jrYvniTrA==} + opentelemetry-instrumentation-fetch-node@1.2.3: + resolution: {integrity: sha512-Qb11T7KvoCevMaSeuamcLsAD+pZnavkhDnlVL0kRozfhl42dKG5Q3anUklAFKJZjY3twLR+BnRa6DlwwkIE/+A==} engines: {node: '>18.0.0'} + peerDependencies: + '@opentelemetry/api': ^1.6.0 ora@5.4.1: resolution: {integrity: sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ==} @@ -3050,10 +2768,6 @@ packages: resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} engines: {node: '>=10'} - p-limit@4.0.0: - resolution: {integrity: sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==} - engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - p-locate@4.1.0: resolution: {integrity: sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==} engines: {node: '>=8'} @@ -3074,24 +2788,26 @@ packages: resolution: {integrity: sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==} engines: {node: '>=6'} + package-json-from-dist@1.0.0: + resolution: {integrity: sha512-dATvCeZN/8wQsGywez1mzHtTlP22H8OEfPrVMLNr4/eGa+ijtLn/6M5f0dY8UKNrC2O9UCU6SSoG3qRKnt7STw==} + package-json@8.1.1: resolution: {integrity: sha512-cbH9IAIJHNj9uXi196JVsRlt7cHKak6u/e6AkL/bkRelZ7rlL3X1YKxsZwa36xipOEKAsdtmaG6aAJoM1fx2zA==} engines: {node: '>=14.16'} + package-manager-detector@0.2.0: + resolution: {integrity: sha512-E385OSk9qDcXhcM9LNSe4sdhx8a9mAPrZ4sMLW+tmxl5ZuGtPUcdFu+MPP2jbgiWAZ6Pfe5soGFMd+0Db5Vrog==} + pacote@15.2.0: resolution: {integrity: sha512-rJVZeIwHTUta23sIZgEIM62WYwbmGbThdbnkt81ravBplQv+HjyroqnLRNH2+sLJHcGZmLRmhPwACqhfTcOmnA==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} hasBin: true - parse-github-url@1.0.2: - resolution: {integrity: sha512-kgBf6avCbO3Cn6+RnzRGLkUsv4ZVqv/VfAYkRsyBcgkshNvVBkRn1FEZcW0Jb+npXQWm2vHPnnOqFteZxRRGNw==} - engines: {node: '>=0.10.0'} + parse-github-url@1.0.3: + resolution: {integrity: sha512-tfalY5/4SqGaV/GIGzWyHnFjlpTPTNpENR9Ea2lLldSJ8EWXMsvacWucqY3m3I4YPtas15IxTLQVQ5NSYXPrww==} + engines: {node: '>= 0.10'} hasBin: true - parse-json@5.2.0: - resolution: {integrity: sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==} - engines: {node: '>=8'} - path-exists@4.0.0: resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} engines: {node: '>=8'} @@ -3104,10 +2820,6 @@ packages: resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} engines: {node: '>=8'} - path-key@4.0.0: - resolution: {integrity: sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==} - engines: {node: '>=12'} - path-parse@1.0.7: resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} @@ -3122,9 +2834,6 @@ packages: pathe@1.1.2: resolution: {integrity: sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==} - pathval@1.1.1: - resolution: {integrity: sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==} - pathval@2.0.0: resolution: {integrity: sha512-vE7JKRyES09KiunauX7nd2Q9/L7lhok4smP9RZTDeD4MVs72Dp2qNFVz39Nz5a0FVEW0BJR6C0DYrq6unoziZA==} engines: {node: '>= 14.16'} @@ -3154,6 +2863,9 @@ packages: picocolors@1.0.1: resolution: {integrity: sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==} + picocolors@1.1.0: + resolution: {integrity: sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw==} + picomatch@2.3.1: resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} engines: {node: '>=8.6'} @@ -3172,19 +2884,8 @@ packages: resolution: {integrity: sha512-ip4qdzjkAyDDZklUaZkcRFb2iA118H9SgRh8yzTkSQK8HilsOJF7rSY8HoW5+I0M46AZgX/pxbprf2vvzQCE0Q==} hasBin: true - pkg-dir@4.2.0: - resolution: {integrity: sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==} - engines: {node: '>=8'} - - pkg-types@1.1.1: - resolution: {integrity: sha512-ko14TjmDuQJ14zsotODv7dBlwxKhUKQEhuhmbqo1uCi9BB0Z2alo/wAXg6q1dTR5TyuqYyWhjtfe/Tsh+X28jQ==} - - possible-typed-array-names@1.0.0: - resolution: {integrity: sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q==} - engines: {node: '>= 0.4'} - - postcss@8.4.38: - resolution: {integrity: sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==} + postcss@8.4.39: + resolution: {integrity: sha512-0vzE+lAiG7hZl1/9I8yzKLx3aR9Xbof3fBHKunvMfOCYAtMhrsnccJY2iTURb9EZd5+pLuiNV9/c/GZJOHsgIw==} engines: {node: ^10 || ^12 || >=14} postgres-array@2.0.0: @@ -3203,18 +2904,15 @@ packages: resolution: {integrity: sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==} engines: {node: '>=0.10.0'} - preferred-pm@3.1.3: - resolution: {integrity: sha512-MkXsENfftWSRpzCzImcp4FRsCc3y1opwB73CfCNWyzMqArju2CrlMHlqB7VexKiPEOjGMbttv1r9fSCn5S610w==} - engines: {node: '>=10'} - prettier@2.8.8: resolution: {integrity: sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==} engines: {node: '>=10.13.0'} hasBin: true - pretty-format@29.7.0: - resolution: {integrity: sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==} - engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + prettier@3.3.3: + resolution: {integrity: sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew==} + engines: {node: '>=14'} + hasBin: true proc-log@3.0.0: resolution: {integrity: sha512-++Vn7NS4Xf9NacaU9Xq3URUuqZETPsf8L4j5/ckhaRYsfPeRyzGw+iDjFhV/Jr3uNmTvvddEJFWh5R1gRgUH8A==} @@ -3285,10 +2983,6 @@ packages: quick-format-unescaped@4.0.4: resolution: {integrity: sha512-tYC1Q1hgyRuHgloV/YXs2w15unPVh8qfu/qCTfhTYamaw7fyhumKa2yGpdSo87vY32rIclj+4fWYQXUMs9EHvg==} - quick-lru@4.0.1: - resolution: {integrity: sha512-ARhCpm70fzdcvNQfPoy49IaanKkTlRWF2JMzqhcJbhSFRZv7nPTvZJdcY7301IPmvW+/p0RgIWnQDLJxifsQ7g==} - engines: {node: '>=8'} - quick-lru@5.1.1: resolution: {integrity: sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==} engines: {node: '>=10'} @@ -3307,9 +3001,6 @@ packages: resolution: {integrity: sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==} hasBin: true - react-is@18.3.1: - resolution: {integrity: sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==} - read-package-json-fast@3.0.2: resolution: {integrity: sha512-0J+Msgym3vrLOUB3hzQCuZHII0xkNGCtz/HJH9xZshwv9DbDwkw1KaE3gx/e2J5rpEY5rtOy6cyhKOPrkP7FZw==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} @@ -3319,14 +3010,6 @@ packages: engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} deprecated: This package is no longer supported. Please use @npmcli/package-json instead. - read-pkg-up@7.0.1: - resolution: {integrity: sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==} - engines: {node: '>=8'} - - read-pkg@5.2.0: - resolution: {integrity: sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==} - engines: {node: '>=8'} - read-yaml-file@1.1.0: resolution: {integrity: sha512-VIMnQi/Z4HT2Fxuwg5KrY174U1VdUIASQVWXXyqtNRtxSr9IYkn1rsI6Tb6HsrHCmB7gVpNwX6JxPTHcH6IoTA==} engines: {node: '>=6'} @@ -3334,6 +3017,9 @@ packages: readable-stream@1.0.34: resolution: {integrity: sha512-ok1qVCJuRkNmvebYikljxJA/UEsKwLl2nI1OmaqAu4/UE+h0wKCHok4XkL/gvi39OacXvw59RJUOFUkDib2rHg==} + readable-stream@2.3.7: + resolution: {integrity: sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==} + readable-stream@2.3.8: resolution: {integrity: sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==} @@ -3353,17 +3039,9 @@ packages: resolution: {integrity: sha512-57frrGM/OCTLqLOAh0mhVA9VBMHd+9U7Zb2THMGdBUoZVOtGbJzjxsYGDJ3A9AYYCP4hn6y1TVbaOfzWtm5GFg==} engines: {node: '>= 12.13.0'} - redent@3.0.0: - resolution: {integrity: sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==} - engines: {node: '>=8'} - regenerator-runtime@0.14.1: resolution: {integrity: sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==} - regexp.prototype.flags@1.5.2: - resolution: {integrity: sha512-NcDiDkTLuPR+++OCKB0nWafEmhg/Da8aUPLPMQbK+bxKKCm1/S5he+AqYa4PlMCVBalb4/yxIRub6qkEx5yJbw==} - engines: {node: '>= 0.4'} - registry-auth-token@5.0.2: resolution: {integrity: sha512-o/3ikDxtXaA59BmZuZrJZDJv8NMDGSj+6j6XaeBmHw8eY1i1qd9+6H+LjVvQXx3HN6aRCGa1cUdJ9RaJZUugnQ==} engines: {node: '>=14'} @@ -3388,9 +3066,6 @@ packages: resolution: {integrity: sha512-nQFEv9gRw6SJAwWD2LrL0NmQvAcO7FBwJbwmr2ttPAacfy0xuiOjE5zt+zM4xDyuyvUaxBi/9gb2SoCyNEVJcw==} engines: {node: '>=8.6.0'} - require-main-filename@2.0.0: - resolution: {integrity: sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==} - resolve-alpn@1.2.1: resolution: {integrity: sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==} @@ -3426,8 +3101,8 @@ packages: resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==} engines: {iojs: '>=1.0.0', node: '>=0.10.0'} - rfdc@1.3.1: - resolution: {integrity: sha512-r5a3l5HzYlIC68TpmYKlxWjmOP6wiPJ1vWv2HeLhNsRZMrCkxeqxiHlQ21oXmQ4F3SiryXBHhAD7JZqvOJjFmg==} + rfdc@1.4.1: + resolution: {integrity: sha512-q1b3N5QkRUWUl7iyylaaj3kOpIT0N2i9MqIEQXP73GVsN9cw3fdx8X63cEmWhJGi2PPCF23Ijp7ktmd39rawIA==} rimraf@2.7.1: resolution: {integrity: sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==} @@ -3439,13 +3114,13 @@ packages: deprecated: Rimraf versions prior to v4 are no longer supported hasBin: true - rimraf@5.0.7: - resolution: {integrity: sha512-nV6YcJo5wbLW77m+8KjH8aB/7/rxQy9SZ0HY5shnwULfS+9nmTtVXAJET5NdZmCzA4fPI/Hm1wo/Po/4mopOdg==} - engines: {node: '>=14.18'} + rimraf@5.0.9: + resolution: {integrity: sha512-3i7b8OcswU6CpU8Ej89quJD4O98id7TtVM5U4Mybh84zQXdrFmDLouWBEEaD/QfO3gDDfH+AGFCGsR7kngzQnA==} + engines: {node: 14 >=14.20 || 16 >=16.20 || >=18} hasBin: true - rollup@4.18.0: - resolution: {integrity: sha512-QmJz14PX3rzbJCN1SG4Xe/bAAX2a6NpCP8ab2vfu2GiUr8AQcr2nCV/oEO3yneFarB67zk8ShlIyWb2LGTb3Sg==} + rollup@4.18.1: + resolution: {integrity: sha512-Elx2UT8lzxxOXMpy5HWQGZqkrQOtrVDDa/bm9l10+U4rQnVzbL/LgZ4NOM1MPIDyHk69W4InuYDF5dzRh4Kw1A==} engines: {node: '>=18.0.0', npm: '>=8.0.0'} hasBin: true @@ -3465,20 +3140,12 @@ packages: rxjs@7.8.1: resolution: {integrity: sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==} - safe-array-concat@1.1.2: - resolution: {integrity: sha512-vj6RsCsWBCf19jIeHEfkRMw8DPiBb+DMXklQ/1SGDHOMlHdPUkZXFQ2YdplS23zESTijAcurb1aSgJA3AgMu1Q==} - engines: {node: '>=0.4'} - safe-buffer@5.1.2: resolution: {integrity: sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==} safe-buffer@5.2.1: resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==} - safe-regex-test@1.0.3: - resolution: {integrity: sha512-CdASjNJPvRa7roO6Ra/gLYBTzYzzPyyBXxIMdGW3USQLyjWEls2RgW5UBTXaQVp+OrpeCK3bLem8smtmheoRuw==} - engines: {node: '>= 0.4'} - safe-regex2@2.0.0: resolution: {integrity: sha512-PaUSFsUaNNuKwkBijoAPHAK6/eM6VirvyPWlZ7BAQy4D+hCvh4B6lIG+nPdhbFfIbP+gTGBcrdsOaUs0F+ZBOQ==} @@ -3499,29 +3166,20 @@ packages: semver-utils@1.1.4: resolution: {integrity: sha512-EjnoLE5OGmDAVV/8YDoN5KiajNadjzIp9BAHOhYeQHt7j0UWxjmgsx4YD48wp4Ue1Qogq38F1GNUJNqF1kKKxA==} - semver@5.7.2: - resolution: {integrity: sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==} - hasBin: true - semver@7.6.2: resolution: {integrity: sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==} engines: {node: '>=10'} hasBin: true + seq-queue@0.0.5: + resolution: {integrity: sha512-hr3Wtp/GZIc/6DAGPDcV4/9WoZhjrkXsi5B/07QgX8tsdc6ilr7BFM6PM6rbdAX1kFSDYeZGLipIZZKyQP0O5Q==} + set-blocking@2.0.0: resolution: {integrity: sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==} set-cookie-parser@2.6.0: resolution: {integrity: sha512-RVnVQxTXuerk653XfuliOxBP81Sf0+qfQE73LIYKcyMYHG94AuH0kgrQpRDuTZnSmjpysHmzxJXKNfa6PjFhyQ==} - set-function-length@1.2.2: - resolution: {integrity: sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==} - engines: {node: '>= 0.4'} - - set-function-name@2.0.2: - resolution: {integrity: sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==} - engines: {node: '>= 0.4'} - shebang-command@1.2.0: resolution: {integrity: sha512-EV3L1+UQWGor21OmnvojK36mhg+TyIKDh3iFBKBohr5xeXIhNBcx8oWdgkTEEQ+BEFFYdLRuqMfd5L84N1V5Vg==} engines: {node: '>=0.10.0'} @@ -3544,10 +3202,6 @@ packages: shimmer@1.2.1: resolution: {integrity: sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw==} - side-channel@1.0.6: - resolution: {integrity: sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==} - engines: {node: '>= 0.4'} - siginfo@2.0.0: resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==} @@ -3581,11 +3235,6 @@ packages: resolution: {integrity: sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==} engines: {node: '>= 6.0.0', npm: '>= 3.0.0'} - smartwrap@2.0.2: - resolution: {integrity: sha512-vCsKNQxb7PnCNd2wY1WClWifAc2lwqsG8OaswpJkVJsvMGcnEntdTCDajZCkk93Ay1U3t/9puJmb525Rg5MZBA==} - engines: {node: '>=6'} - hasBin: true - socks-proxy-agent@7.0.0: resolution: {integrity: sha512-Fgl0YPZ902wEsAyiQ+idGd1A7rSFx/ayC1CQVMw5P+EQx2V0SgpGtf6OKFhVjPflPUl9YMmEOnmfjCdMUsygww==} engines: {node: '>= 10'} @@ -3646,6 +3295,10 @@ packages: sprintf-js@1.1.3: resolution: {integrity: sha512-Oo+0REFV59/rz3gfJNKQiBlwfHaSESl1pcGyABQsnnIfWOFt6JNj5gCog2U6MLZ//IGYD+nA8nI+mTShREReaA==} + sqlstring@2.3.3: + resolution: {integrity: sha512-qC9iz2FlN7DQl3+wjwn3802RTyjCx7sDvfQEXchwa6CWOx07/WVfh91gBmQ9fahw8snwGEWU3xGzOt4tFyHLxg==} + engines: {node: '>= 0.6'} + ssri@10.0.6: resolution: {integrity: sha512-MGrFH9Z4NP9Iyhqn16sDtBpRRNJ0Y2hNa6D65h736fVSaPCHr4DM4sWUNvVaSuC+0OBGhwsrydQwmgfg5LncqQ==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} @@ -3666,9 +3319,6 @@ packages: stream-combiner@0.0.4: resolution: {integrity: sha512-rT00SPnTVyRsaSz5zgSPma/aHSOic5U1prhYdRy5HS2kTZviFpmDgzilbtsJsxiroqACmayynDN/9VzIbX5DOw==} - stream-transform@2.1.3: - resolution: {integrity: sha512-9GHUiM5hMiCi6Y03jD2ARC1ettBXkQBoQAe7nJsPknnI0ow10aXjTnew8QtYQmLjzn974BnmWEAJgCY6ZP1DeQ==} - string-argv@0.3.2: resolution: {integrity: sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q==} engines: {node: '>=0.6.19'} @@ -3681,17 +3331,6 @@ packages: resolution: {integrity: sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==} engines: {node: '>=12'} - string.prototype.trim@1.2.9: - resolution: {integrity: sha512-klHuCNxiMZ8MlsOihJhJEBJAiMVqU3Z2nEXWfWnIqjN0gEFS9J9+IxKozWWtQGcgoa1WUZzLjKPTr4ZHNFTFxw==} - engines: {node: '>= 0.4'} - - string.prototype.trimend@1.0.8: - resolution: {integrity: sha512-p73uL5VCHCO2BZZ6krwwQE3kCzM7NKmis8S//xEC6fQonchbum4eP6kR4DLEjQFO3Wnj3Fuo8NM0kOSjVdHjZQ==} - - string.prototype.trimstart@1.0.8: - resolution: {integrity: sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==} - engines: {node: '>= 0.4'} - string_decoder@0.10.31: resolution: {integrity: sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ==} @@ -3713,14 +3352,6 @@ packages: resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==} engines: {node: '>=4'} - strip-final-newline@3.0.0: - resolution: {integrity: sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==} - engines: {node: '>=12'} - - strip-indent@3.0.0: - resolution: {integrity: sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==} - engines: {node: '>=8'} - strip-json-comments@2.0.1: resolution: {integrity: sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==} engines: {node: '>=0.10.0'} @@ -3729,9 +3360,6 @@ packages: resolution: {integrity: sha512-0fk9zBqO67Nq5M/m45qHCJxylV/DhBlIOVExqgOMiCCrzrhU6tCibRXNqE3jwJLftzE9SNuZtYbpzcO+i9FiKw==} engines: {node: '>=14.16'} - strip-literal@1.3.0: - resolution: {integrity: sha512-PugKzOsyXpArk0yWmUwqOZecSO0GH0bPoctLcqNDH9J04pVW3lflYE0ujElBGTloevcxF5MofAOZ7C5l2b+wLg==} - supports-color@5.5.0: resolution: {integrity: sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==} engines: {node: '>=4'} @@ -3768,12 +3396,11 @@ packages: through@2.3.8: resolution: {integrity: sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==} - tinybench@2.8.0: - resolution: {integrity: sha512-1/eK7zUnIklz4JUUlL+658n58XO2hHLQfSk1Zf2LKieUjxidN16eKFEoDEfjHc3ohofSSqK3X5yO6VGb6iW8Lw==} + tinybench@2.9.0: + resolution: {integrity: sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==} - tinypool@0.7.0: - resolution: {integrity: sha512-zSYNUlYSMhJ6Zdou4cJwo/p7w5nmAH17GRfU/ui3ctvjXFErXXkruT4MWW6poDeXgCaIBlGLrfU6TbTXxyGMww==} - engines: {node: '>=14.0.0'} + tinyexec@0.3.0: + resolution: {integrity: sha512-tVGE0mVJPGb0chKhqmsoosjsS+qUnJVGJpZgsHYQcGoPlG3B51R3PouqTgEGH2Dc9jjFyOqOpix6ZHNMXp1FZg==} tinypool@1.0.1: resolution: {integrity: sha512-URZYihUbRPcGv95En+sz6MfghfIc2OJ1sv/RmhWZLouPY0/8Vo80viwPvg3dlaS9fuq7fQMEfgRRK7BBZThBEA==} @@ -3783,12 +3410,8 @@ packages: resolution: {integrity: sha512-weEDEq7Z5eTHPDh4xjX789+fHfF+P8boiFB+0vbWzpbnbsEr/GRaohi/uMKxg8RZMXnl1ItAi/IUHWMsjDV7kQ==} engines: {node: '>=14.0.0'} - tinyspy@2.2.1: - resolution: {integrity: sha512-KYad6Vy5VDWV4GH3fjpseMQ/XU2BhIYP7Vzd0LG44qRWm/Yt2WCOTicFdvmgo6gWaqooMQCawTtILVQJupKu7A==} - engines: {node: '>=14.0.0'} - - tinyspy@3.0.0: - resolution: {integrity: sha512-q5nmENpTHgiPVd1cJDDc9cVoYN5x4vCvwT3FMilvKPKneCBZAxn2YWQjDF0UMcE9k0Cay1gBiDfTMU0g+mPMQA==} + tinyspy@3.0.2: + resolution: {integrity: sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==} engines: {node: '>=14.0.0'} tmp@0.0.33: @@ -3815,10 +3438,6 @@ packages: resolution: {integrity: sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A==} hasBin: true - trim-newlines@3.0.1: - resolution: {integrity: sha512-c1PTsA3tYrIsLGkJkzHF+w9F2EyxfXGo4UyJc4pFL++FMjnq0HJS69T3M7d//gKrFKwy429bouPescbjecU+Zw==} - engines: {node: '>=8'} - triple-beam@1.4.1: resolution: {integrity: sha512-aZbgViZrg1QNcG+LULa7nhZpJTZSLm/mXnHXnbAbjmN5aSa0y7V+wvv6+4WaBtpISJzThKy+PIPxc1Nq1EJ9mg==} engines: {node: '>= 14.0.0'} @@ -3858,8 +3477,8 @@ packages: peerDependencies: typescript: '*' - tsconfck@3.0.3: - resolution: {integrity: sha512-4t0noZX9t6GcPTfBAbIbbIU4pfpCwh0ueq3S4O/5qXI1VwK1outmxhe9dOiEWqMz3MW2LKgDTpqWV+37IWuVbA==} + tsconfck@3.1.1: + resolution: {integrity: sha512-00eoI6WY57SvZEVjm13stEVE90VkEdJAFGgpFLTsZbJyW/LwFQ7uQxJHWpZ2hzSWgCPKc9AnBnNP+0X7o3hAmQ==} engines: {node: ^18 || >=20} hasBin: true peerDependencies: @@ -3871,38 +3490,17 @@ packages: tsconfig@7.0.0: resolution: {integrity: sha512-vZXmzPrL+EmC4T/4rVlT2jNVMWCi/O4DIiSj3UHg1OE5kCKbk4mfrXc6dZksLgRM/TZlKnousKH9bbTazUWRRw==} - tslib@2.6.2: - resolution: {integrity: sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==} - - tty-table@4.2.3: - resolution: {integrity: sha512-Fs15mu0vGzCrj8fmJNP7Ynxt5J7praPXqFN0leZeZBXJwkMxv9cb2D454k1ltrtUSJbZ4yH4e0CynsHLxmUfFA==} - engines: {node: '>=8.0.0'} - hasBin: true + tslib@2.6.3: + resolution: {integrity: sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==} tuf-js@1.1.7: resolution: {integrity: sha512-i3P9Kgw3ytjELUfpuKVDNBJvk4u5bXL6gskv572mcevPbSKCV3zt3djhmlEQ65yERjIbOSncy7U4cQJaB1CBCg==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} - type-detect@4.0.8: - resolution: {integrity: sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==} - engines: {node: '>=4'} - - type-fest@0.13.1: - resolution: {integrity: sha512-34R7HTnG0XIJcBSn5XhDd7nNFPRcXYRZrBB2O2jdKqYODldSzBAqzsWoZYYvduky73toYS/ESqxPvkDf/F0XMg==} - engines: {node: '>=10'} - type-fest@0.21.3: resolution: {integrity: sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==} engines: {node: '>=10'} - type-fest@0.6.0: - resolution: {integrity: sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==} - engines: {node: '>=8'} - - type-fest@0.8.1: - resolution: {integrity: sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==} - engines: {node: '>=8'} - type-fest@1.4.0: resolution: {integrity: sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==} engines: {node: '>=10'} @@ -3911,46 +3509,19 @@ packages: resolution: {integrity: sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==} engines: {node: '>=12.20'} - typed-array-buffer@1.0.2: - resolution: {integrity: sha512-gEymJYKZtKXzzBzM4jqa9w6Q1Jjm7x2d+sh19AdsD4wqnMPDYyvwpsIc2Q/835kHuo3BEQ7CjelGhfTsoBb2MQ==} - engines: {node: '>= 0.4'} - - typed-array-byte-length@1.0.1: - resolution: {integrity: sha512-3iMJ9q0ao7WE9tWcaYKIptkNBuOIcZCCT0d4MRvuuH88fEoEH62IuQe0OtraD3ebQEoTRk8XCBoknUNc1Y67pw==} - engines: {node: '>= 0.4'} - - typed-array-byte-offset@1.0.2: - resolution: {integrity: sha512-Ous0vodHa56FviZucS2E63zkgtgrACj7omjwd/8lTEMEPFFyjfixMZ1ZXenpgCFBBt4EC1J2XsyVS2gkG0eTFA==} - engines: {node: '>= 0.4'} - - typed-array-length@1.0.6: - resolution: {integrity: sha512-/OxDN6OtAk5KBpGb28T+HZc2M+ADtvRxXrKKbUwtsLgdoxgX13hyy7ek6bFRl5+aBs2yZzB0c4CnQfAtVypW/g==} - engines: {node: '>= 0.4'} - typedarray-to-buffer@3.1.5: resolution: {integrity: sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==} - typescript@5.2.2: - resolution: {integrity: sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==} - engines: {node: '>=14.17'} - hasBin: true - - typescript@5.4.5: - resolution: {integrity: sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==} + typescript@5.6.2: + resolution: {integrity: sha512-NW8ByodCSNCwZeghjN3o+JX5OFH0Ojg6sadjEKY4huZ52TqbJTJnDo5+Tw98lSy63NZvi4n+ez5m2u5d4PkZyw==} engines: {node: '>=14.17'} hasBin: true - ufo@1.5.3: - resolution: {integrity: sha512-Y7HYmWaFwPUmkoQCUIAYpKqkOf+SbVj/2fJJZ4RJMCfZp0rTGwRbzQD+HghfnhKOjL9E01okqz+ncJskGYfBNw==} - - unbox-primitive@1.0.2: - resolution: {integrity: sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==} - undefsafe@2.0.5: resolution: {integrity: sha512-WxONCrssBM8TSPRqN5EmsjVrsv4A8X12J4ArBiiayv3DyyG3ZlIg6yysuuSYdZsVz3TKcTg2fd//Ujd4CHV1iA==} - undici-types@5.26.5: - resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} + undici-types@6.19.8: + resolution: {integrity: sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==} unique-filename@2.0.1: resolution: {integrity: sha512-ODWHtkkdx3IAR+veKxFV+VBkUMcN+FaqzUUd7IZzt+0zhDZFPFxhlqwPF3YQvMHx1TD0tdgYl+kuPnJ8E6ql7A==} @@ -4008,13 +3579,8 @@ packages: resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==} engines: {node: '>= 0.8'} - vite-node@0.34.6: - resolution: {integrity: sha512-nlBMJ9x6n7/Amaz6F3zJ97EBwR2FkzhBRxF5e+jE6LA3yi6Wtc2lyTij1OnDMIr34v5g/tVQtsVAzhT0jc5ygA==} - engines: {node: '>=v14.18.0'} - hasBin: true - - vite-node@2.0.5: - resolution: {integrity: sha512-LdsW4pxj0Ot69FAoXZ1yTnA9bjGohr2yNBU7QKRxpz8ITSkhuDl6h3zS/tvgz4qrNjeRnvrWeXQ8ZF7Um4W00Q==} + vite-node@2.1.1: + resolution: {integrity: sha512-N/mGckI1suG/5wQI35XeR9rsMsPqKXzq1CdUndzVstBj/HvyxxGctwnK6WX43NGt5L3Z5tcRf83g4TITKJhPrA==} engines: {node: ^18.0.0 || >=20.0.0} hasBin: true @@ -4026,8 +3592,8 @@ packages: vite: optional: true - vite@5.2.11: - resolution: {integrity: sha512-HndV31LWW05i1BLPMUCE1B9E9GFbOu1MbenhS58FuK6owSO5qHm7GiCotrNY1YE5rMeQSFBGmT5ZaLEjFizgiQ==} + vite@5.3.3: + resolution: {integrity: sha512-NPQdeCU0Dv2z5fu+ULotpuq5yfCS1BzKUIPhNbP3YBfAMGJXbt2nS+sbTFu+qchaqWTD+H3JK++nRwr6XIcp6A==} engines: {node: ^18.0.0 || >=20.0.0} hasBin: true peerDependencies: @@ -4054,46 +3620,15 @@ packages: terser: optional: true - vitest@0.34.6: - resolution: {integrity: sha512-+5CALsOvbNKnS+ZHMXtuUC7nL8/7F1F2DnHGjSsszX8zCjWSSviphCb/NuS9Nzf4Q03KyyDRBAXhF/8lffME4Q==} - engines: {node: '>=v14.18.0'} - hasBin: true - peerDependencies: - '@edge-runtime/vm': '*' - '@vitest/browser': '*' - '@vitest/ui': '*' - happy-dom: '*' - jsdom: '*' - playwright: '*' - safaridriver: '*' - webdriverio: '*' - peerDependenciesMeta: - '@edge-runtime/vm': - optional: true - '@vitest/browser': - optional: true - '@vitest/ui': - optional: true - happy-dom: - optional: true - jsdom: - optional: true - playwright: - optional: true - safaridriver: - optional: true - webdriverio: - optional: true - - vitest@2.0.5: - resolution: {integrity: sha512-8GUxONfauuIdeSl5f9GTgVEpg5BTOlplET4WEDaeY2QBiN8wSm68vxN/tb5z405OwppfoCavnwXafiaYBC/xOA==} + vitest@2.1.1: + resolution: {integrity: sha512-97We7/VC0e9X5zBVkvt7SGQMGrRtn3KtySFQG5fpaMlS+l62eeXRQO633AYhSTC3z7IMebnPPNjGXVGNRFlxBA==} engines: {node: ^18.0.0 || >=20.0.0} hasBin: true peerDependencies: '@edge-runtime/vm': '*' '@types/node': ^18.0.0 || >=20.0.0 - '@vitest/browser': 2.0.5 - '@vitest/ui': 2.0.5 + '@vitest/browser': 2.1.1 + '@vitest/ui': 2.1.1 happy-dom: '*' jsdom: '*' peerDependenciesMeta: @@ -4125,20 +3660,6 @@ packages: resolution: {integrity: sha512-9WWbymnqj57+XEuqADHrCJ2eSXzn8WXIW/YSGaZtb2WKAInQ6CHfaUUcTyyver0p8BDg5StLQq8h1vtZuwmOig==} engines: {node: '>=16'} - which-boxed-primitive@1.0.2: - resolution: {integrity: sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==} - - which-module@2.0.1: - resolution: {integrity: sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ==} - - which-pm@2.0.0: - resolution: {integrity: sha512-Lhs9Pmyph0p5n5Z3mVnN0yWcbQYUAD7rbQUiMsQxOJ3T57k7RFe35SUwWMf7dsbDZks1uOmw4AecB/JMDj3v/w==} - engines: {node: '>=8.15'} - - which-typed-array@1.1.15: - resolution: {integrity: sha512-oV0jmFtUky6CXfkqehVvBP/LSWJ2sy4vWMioiENyJLePrBO/yKyV9OyJySfAKosh+RYkIl5zJCNZ8/4JncrpdA==} - engines: {node: '>= 0.4'} - which@1.3.1: resolution: {integrity: sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==} hasBin: true @@ -4153,11 +3674,6 @@ packages: engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} hasBin: true - why-is-node-running@2.2.2: - resolution: {integrity: sha512-6tSwToZxTOcotxHeA+qGCq1mVzKR3CwcJGmVcY+QE8SHy6TnpFnh8PAvPNHYr7EcuVeG0QSMxtYCuO1ta/G/oA==} - engines: {node: '>=8'} - hasBin: true - why-is-node-running@2.3.0: resolution: {integrity: sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==} engines: {node: '>=8'} @@ -4170,12 +3686,12 @@ packages: resolution: {integrity: sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==} engines: {node: '>=12'} - winston-transport@4.7.0: - resolution: {integrity: sha512-ajBj65K5I7denzer2IYW6+2bNIVqLGDHqDw3Ow8Ohh+vdW+rv4MZ6eiDvHoKhfJFZ2auyN8byXieDDJ96ViONg==} + winston-transport@4.7.1: + resolution: {integrity: sha512-wQCXXVgfv/wUPOfb2x0ruxzwkcZfxcktz6JIMUaPLmcNhO4bZTwA/WtDWK74xV3F2dKu8YadrFv0qhwYjVEwhA==} engines: {node: '>= 12.0.0'} - winston@3.13.0: - resolution: {integrity: sha512-rwidmA1w3SE4j0E5MuIufFhyJPBDG7Nu71RkZor1p2+qHvJSZ9GYDA81AyleQcZbh/+V6HjeBdfnTZJm9rSeQQ==} + winston@3.13.1: + resolution: {integrity: sha512-SvZit7VFNvXRzbqGHsv5KSmgbEYR5EiQfDAL9gxYkRqa934Hnk++zze0wANKtMHcy/gI4W/3xmSDwlhf865WGw==} engines: {node: '>= 12.0.0'} wrap-ansi@6.2.0: @@ -4196,8 +3712,8 @@ packages: write-file-atomic@3.0.3: resolution: {integrity: sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==} - ws@8.17.0: - resolution: {integrity: sha512-uJq6108EgZMAl20KagGkzCKfMEjxmKvZHG7Tlq0Z6nOky7YF7aq4mOx6xK8TJ/i1LeK4Qus7INktacctDgY8Ow==} + ws@8.18.0: + resolution: {integrity: sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==} engines: {node: '>=10.0.0'} peerDependencies: bufferutil: ^4.0.1 @@ -4208,18 +3724,6 @@ packages: utf-8-validate: optional: true - ws@8.2.3: - resolution: {integrity: sha512-wBuoj1BDpC6ZQ1B7DWQBYVLphPWkm8i9Y0/3YdHjHKHiohOJ1ws+3OccDWtH+PoC9DZD5WOTrJvNbWvjS6JWaA==} - engines: {node: '>=10.0.0'} - peerDependencies: - bufferutil: ^4.0.1 - utf-8-validate: ^5.0.2 - peerDependenciesMeta: - bufferutil: - optional: true - utf-8-validate: - optional: true - xdg-basedir@5.1.0: resolution: {integrity: sha512-GCPAHLvrIH13+c0SuacwvRYj2SxJXQ4kaVTT5xgL3kPrz56XxkF21IGhjSE1+W0aw7gpBWRGXLCPnPby6lSpmQ==} engines: {node: '>=12'} @@ -4228,9 +3732,6 @@ packages: resolution: {integrity: sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==} engines: {node: '>=0.4'} - y18n@4.0.3: - resolution: {integrity: sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==} - y18n@5.0.8: resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} engines: {node: '>=10'} @@ -4241,8 +3742,8 @@ packages: yallist@4.0.0: resolution: {integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==} - yaml@2.4.2: - resolution: {integrity: sha512-B3VqDZ+JAg1nZpaEmWtTXUlBneoGx6CPM9b0TENK6aoSu5t73dItudwdgmi6tHlIZZId4dZ9skcAQ2UbcyAeVA==} + yaml@2.4.5: + resolution: {integrity: sha512-aBx2bnqDzVOyNKfsysjA2ms5ZlnjSAW2eG3/L5G/CSujfjLJTJsEw1bGw8kCf04KodQWk1pxlGnZ56CRxiawmg==} engines: {node: '>= 14'} hasBin: true @@ -4251,10 +3752,6 @@ packages: engines: {node: '>= 14'} hasBin: true - yargs-parser@18.1.3: - resolution: {integrity: sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==} - engines: {node: '>=6'} - yargs-parser@20.2.9: resolution: {integrity: sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==} engines: {node: '>=10'} @@ -4263,10 +3760,6 @@ packages: resolution: {integrity: sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==} engines: {node: '>=12'} - yargs@15.4.1: - resolution: {integrity: sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==} - engines: {node: '>=8'} - yargs@16.2.0: resolution: {integrity: sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==} engines: {node: '>=10'} @@ -4283,44 +3776,39 @@ packages: resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} engines: {node: '>=10'} - yocto-queue@1.0.0: - resolution: {integrity: sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==} - engines: {node: '>=12.20'} + yoctocolors-cjs@2.1.2: + resolution: {integrity: sha512-cYVsTjKl8b+FrnidjibDWskAv7UKOfcwaVZdp/it9n1s9fU3IkgDbhdIRKCW4JDsAlECJY0ytoVPT3sK6kideA==} + engines: {node: '>=18'} zod@3.23.8: resolution: {integrity: sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g==} snapshots: - '@ampproject/remapping@2.3.0': + '@babel/code-frame@7.24.7': dependencies: - '@jridgewell/gen-mapping': 0.3.5 - '@jridgewell/trace-mapping': 0.3.25 - - '@babel/code-frame@7.24.6': - dependencies: - '@babel/highlight': 7.24.6 + '@babel/highlight': 7.24.7 picocolors: 1.0.1 - '@babel/helper-validator-identifier@7.24.6': {} + '@babel/helper-validator-identifier@7.24.7': {} - '@babel/highlight@7.24.6': + '@babel/highlight@7.24.7': dependencies: - '@babel/helper-validator-identifier': 7.24.6 + '@babel/helper-validator-identifier': 7.24.7 chalk: 2.4.2 js-tokens: 4.0.0 picocolors: 1.0.1 - '@babel/runtime@7.24.6': + '@babel/runtime@7.24.8': dependencies: regenerator-runtime: 0.14.1 - '@changesets/apply-release-plan@7.0.1': + '@changesets/apply-release-plan@7.0.5': dependencies: - '@babel/runtime': 7.24.6 - '@changesets/config': 3.0.0 + '@changesets/config': 3.0.3 '@changesets/get-version-range-type': 0.4.0 - '@changesets/git': 3.0.0 + '@changesets/git': 3.0.1 + '@changesets/should-skip-package': 0.1.1 '@changesets/types': 6.0.0 '@manypkg/get-packages': 1.1.3 detect-indent: 6.1.0 @@ -4331,11 +3819,11 @@ snapshots: resolve-from: 5.0.0 semver: 7.6.2 - '@changesets/assemble-release-plan@6.0.0': + '@changesets/assemble-release-plan@6.0.4': dependencies: - '@babel/runtime': 7.24.6 '@changesets/errors': 0.2.0 - '@changesets/get-dependents-graph': 2.0.0 + '@changesets/get-dependents-graph': 2.1.2 + '@changesets/should-skip-package': 0.1.1 '@changesets/types': 6.0.0 '@manypkg/get-packages': 1.1.3 semver: 7.6.2 @@ -4344,46 +3832,44 @@ snapshots: dependencies: '@changesets/types': 6.0.0 - '@changesets/cli@2.27.3': + '@changesets/cli@2.27.8': dependencies: - '@babel/runtime': 7.24.6 - '@changesets/apply-release-plan': 7.0.1 - '@changesets/assemble-release-plan': 6.0.0 + '@changesets/apply-release-plan': 7.0.5 + '@changesets/assemble-release-plan': 6.0.4 '@changesets/changelog-git': 0.2.0 - '@changesets/config': 3.0.0 + '@changesets/config': 3.0.3 '@changesets/errors': 0.2.0 - '@changesets/get-dependents-graph': 2.0.0 - '@changesets/get-release-plan': 4.0.0 - '@changesets/git': 3.0.0 - '@changesets/logger': 0.1.0 - '@changesets/pre': 2.0.0 - '@changesets/read': 0.6.0 + '@changesets/get-dependents-graph': 2.1.2 + '@changesets/get-release-plan': 4.0.4 + '@changesets/git': 3.0.1 + '@changesets/logger': 0.1.1 + '@changesets/pre': 2.0.1 + '@changesets/read': 0.6.1 + '@changesets/should-skip-package': 0.1.1 '@changesets/types': 6.0.0 - '@changesets/write': 0.3.1 + '@changesets/write': 0.3.2 '@manypkg/get-packages': 1.1.3 '@types/semver': 7.5.8 ansi-colors: 4.1.3 - chalk: 2.4.2 ci-info: 3.9.0 enquirer: 2.4.1 external-editor: 3.1.0 fs-extra: 7.0.1 - human-id: 1.0.2 - meow: 6.1.1 + mri: 1.2.0 outdent: 0.5.0 p-limit: 2.3.0 - preferred-pm: 3.1.3 + package-manager-detector: 0.2.0 + picocolors: 1.1.0 resolve-from: 5.0.0 semver: 7.6.2 spawndamnit: 2.0.0 term-size: 2.2.1 - tty-table: 4.2.3 - '@changesets/config@3.0.0': + '@changesets/config@3.0.3': dependencies: '@changesets/errors': 0.2.0 - '@changesets/get-dependents-graph': 2.0.0 - '@changesets/logger': 0.1.0 + '@changesets/get-dependents-graph': 2.1.2 + '@changesets/logger': 0.1.1 '@changesets/types': 6.0.0 '@manypkg/get-packages': 1.1.3 fs-extra: 7.0.1 @@ -4393,71 +3879,69 @@ snapshots: dependencies: extendable-error: 0.1.7 - '@changesets/get-dependents-graph@2.0.0': + '@changesets/get-dependents-graph@2.1.2': dependencies: '@changesets/types': 6.0.0 '@manypkg/get-packages': 1.1.3 - chalk: 2.4.2 - fs-extra: 7.0.1 + picocolors: 1.1.0 semver: 7.6.2 - '@changesets/get-release-plan@4.0.0': + '@changesets/get-release-plan@4.0.4': dependencies: - '@babel/runtime': 7.24.6 - '@changesets/assemble-release-plan': 6.0.0 - '@changesets/config': 3.0.0 - '@changesets/pre': 2.0.0 - '@changesets/read': 0.6.0 + '@changesets/assemble-release-plan': 6.0.4 + '@changesets/config': 3.0.3 + '@changesets/pre': 2.0.1 + '@changesets/read': 0.6.1 '@changesets/types': 6.0.0 '@manypkg/get-packages': 1.1.3 '@changesets/get-version-range-type@0.4.0': {} - '@changesets/git@3.0.0': + '@changesets/git@3.0.1': dependencies: - '@babel/runtime': 7.24.6 '@changesets/errors': 0.2.0 - '@changesets/types': 6.0.0 '@manypkg/get-packages': 1.1.3 is-subdir: 1.2.0 micromatch: 4.0.7 spawndamnit: 2.0.0 - '@changesets/logger@0.1.0': + '@changesets/logger@0.1.1': dependencies: - chalk: 2.4.2 + picocolors: 1.1.0 '@changesets/parse@0.4.0': dependencies: '@changesets/types': 6.0.0 js-yaml: 3.14.1 - '@changesets/pre@2.0.0': + '@changesets/pre@2.0.1': dependencies: - '@babel/runtime': 7.24.6 '@changesets/errors': 0.2.0 '@changesets/types': 6.0.0 '@manypkg/get-packages': 1.1.3 fs-extra: 7.0.1 - '@changesets/read@0.6.0': + '@changesets/read@0.6.1': dependencies: - '@babel/runtime': 7.24.6 - '@changesets/git': 3.0.0 - '@changesets/logger': 0.1.0 + '@changesets/git': 3.0.1 + '@changesets/logger': 0.1.1 '@changesets/parse': 0.4.0 '@changesets/types': 6.0.0 - chalk: 2.4.2 fs-extra: 7.0.1 p-filter: 2.1.0 + picocolors: 1.1.0 + + '@changesets/should-skip-package@0.1.1': + dependencies: + '@changesets/types': 6.0.0 + '@manypkg/get-packages': 1.1.3 '@changesets/types@4.1.0': {} '@changesets/types@6.0.0': {} - '@changesets/write@0.3.1': + '@changesets/write@0.3.2': dependencies: - '@babel/runtime': 7.24.6 '@changesets/types': 6.0.0 fs-extra: 7.0.1 human-id: 1.0.2 @@ -4478,80 +3962,80 @@ snapshots: enabled: 2.0.0 kuler: 2.0.0 - '@esbuild/aix-ppc64@0.20.2': + '@esbuild/aix-ppc64@0.21.5': optional: true - '@esbuild/android-arm64@0.20.2': + '@esbuild/android-arm64@0.21.5': optional: true - '@esbuild/android-arm@0.20.2': + '@esbuild/android-arm@0.21.5': optional: true - '@esbuild/android-x64@0.20.2': + '@esbuild/android-x64@0.21.5': optional: true - '@esbuild/darwin-arm64@0.20.2': + '@esbuild/darwin-arm64@0.21.5': optional: true - '@esbuild/darwin-x64@0.20.2': + '@esbuild/darwin-x64@0.21.5': optional: true - '@esbuild/freebsd-arm64@0.20.2': + '@esbuild/freebsd-arm64@0.21.5': optional: true - '@esbuild/freebsd-x64@0.20.2': + '@esbuild/freebsd-x64@0.21.5': optional: true - '@esbuild/linux-arm64@0.20.2': + '@esbuild/linux-arm64@0.21.5': optional: true - '@esbuild/linux-arm@0.20.2': + '@esbuild/linux-arm@0.21.5': optional: true - '@esbuild/linux-ia32@0.20.2': + '@esbuild/linux-ia32@0.21.5': optional: true - '@esbuild/linux-loong64@0.20.2': + '@esbuild/linux-loong64@0.21.5': optional: true - '@esbuild/linux-mips64el@0.20.2': + '@esbuild/linux-mips64el@0.21.5': optional: true - '@esbuild/linux-ppc64@0.20.2': + '@esbuild/linux-ppc64@0.21.5': optional: true - '@esbuild/linux-riscv64@0.20.2': + '@esbuild/linux-riscv64@0.21.5': optional: true - '@esbuild/linux-s390x@0.20.2': + '@esbuild/linux-s390x@0.21.5': optional: true - '@esbuild/linux-x64@0.20.2': + '@esbuild/linux-x64@0.21.5': optional: true - '@esbuild/netbsd-x64@0.20.2': + '@esbuild/netbsd-x64@0.21.5': optional: true - '@esbuild/openbsd-x64@0.20.2': + '@esbuild/openbsd-x64@0.21.5': optional: true - '@esbuild/sunos-x64@0.20.2': + '@esbuild/sunos-x64@0.21.5': optional: true - '@esbuild/win32-arm64@0.20.2': + '@esbuild/win32-arm64@0.21.5': optional: true - '@esbuild/win32-ia32@0.20.2': + '@esbuild/win32-ia32@0.21.5': optional: true - '@esbuild/win32-x64@0.20.2': + '@esbuild/win32-x64@0.21.5': optional: true - '@fastify/ajv-compiler@3.5.0': + '@fastify/ajv-compiler@3.6.0': dependencies: - ajv: 8.14.0 - ajv-formats: 2.1.1(ajv@8.14.0) - fast-uri: 2.3.0 + ajv: 8.16.0 + ajv-formats: 2.1.1(ajv@8.16.0) + fast-uri: 2.4.0 '@fastify/cors@8.4.1': dependencies: @@ -4562,7 +4046,7 @@ snapshots: '@fastify/fast-json-stringify-compiler@4.3.0': dependencies: - fast-json-stringify: 5.16.0 + fast-json-stringify: 5.16.1 '@fastify/merge-json-schemas@0.1.1': dependencies: @@ -4572,7 +4056,7 @@ snapshots: '@humanwhocodes/momoa@2.0.4': {} - '@inquirer/figures@1.0.2': {} + '@inquirer/figures@1.0.3': {} '@isaacs/cliui@8.0.2': dependencies: @@ -4583,48 +4067,27 @@ snapshots: wrap-ansi: 8.1.0 wrap-ansi-cjs: wrap-ansi@7.0.0 - '@jest/schemas@29.6.3': - dependencies: - '@sinclair/typebox': 0.27.8 + '@jridgewell/resolve-uri@3.1.2': {} - '@jridgewell/gen-mapping@0.3.5': - dependencies: - '@jridgewell/set-array': 1.2.1 - '@jridgewell/sourcemap-codec': 1.4.15 - '@jridgewell/trace-mapping': 0.3.25 - - '@jridgewell/resolve-uri@3.1.2': {} - - '@jridgewell/set-array@1.2.1': {} - - '@jridgewell/sourcemap-codec@1.4.15': {} - - '@jridgewell/trace-mapping@0.3.25': - dependencies: - '@jridgewell/resolve-uri': 3.1.2 - '@jridgewell/sourcemap-codec': 1.4.15 + '@jridgewell/sourcemap-codec@1.5.0': {} '@jridgewell/trace-mapping@0.3.9': dependencies: '@jridgewell/resolve-uri': 3.1.2 - '@jridgewell/sourcemap-codec': 1.4.15 + '@jridgewell/sourcemap-codec': 1.5.0 '@js-sdsl/ordered-set@4.4.2': {} - '@ljharb/through@2.3.13': - dependencies: - call-bind: 1.0.7 - '@manypkg/find-root@1.1.0': dependencies: - '@babel/runtime': 7.24.6 + '@babel/runtime': 7.24.8 '@types/node': 12.20.55 find-up: 4.1.0 fs-extra: 8.1.0 '@manypkg/get-packages@1.1.3': dependencies: - '@babel/runtime': 7.24.6 + '@babel/runtime': 7.24.8 '@changesets/types': 4.1.0 '@manypkg/find-root': 1.1.0 fs-extra: 8.1.0 @@ -4700,9 +4163,9 @@ snapshots: dependencies: '@opentelemetry/api': 1.8.0 - '@opentelemetry/api-logs@0.52.0': + '@opentelemetry/api-logs@0.52.1': dependencies: - '@opentelemetry/api': 1.8.0 + '@opentelemetry/api': 1.6.0 '@opentelemetry/api@1.6.0': {} @@ -4710,7 +4173,7 @@ snapshots: '@opentelemetry/api@1.9.0': {} - '@opentelemetry/context-async-hooks@1.25.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/context-async-hooks@1.25.1(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 @@ -4734,15 +4197,15 @@ snapshots: '@opentelemetry/api': 1.9.0 '@opentelemetry/semantic-conventions': 1.24.1 - '@opentelemetry/core@1.25.0(@opentelemetry/api@1.8.0)': + '@opentelemetry/core@1.25.1(@opentelemetry/api@1.8.0)': dependencies: '@opentelemetry/api': 1.8.0 - '@opentelemetry/semantic-conventions': 1.25.0 + '@opentelemetry/semantic-conventions': 1.25.1 - '@opentelemetry/core@1.25.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/core@1.25.1(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/semantic-conventions': 1.25.0 + '@opentelemetry/semantic-conventions': 1.25.1 '@opentelemetry/exporter-metrics-otlp-http@0.51.1(@opentelemetry/api@1.8.0)': dependencies: @@ -4767,149 +4230,147 @@ snapshots: '@opentelemetry/resources': 1.24.1(@opentelemetry/api@1.8.0) '@opentelemetry/sdk-metrics': 1.24.1(@opentelemetry/api@1.8.0) - '@opentelemetry/instrumentation-connect@0.37.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-connect@0.38.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.52.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.0 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.25.1 '@types/connect': 3.4.36 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-express@0.40.1(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-express@0.41.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.52.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.0 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.25.1 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-fastify@0.37.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-fastify@0.38.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.52.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.0 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.25.1 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-graphql@0.41.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-graphql@0.42.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.52.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0) transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-hapi@0.39.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-hapi@0.40.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.52.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.0 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.25.1 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-http@0.52.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-http@0.52.1(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.52.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.0 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.25.1 semver: 7.6.2 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-ioredis@0.41.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-ioredis@0.42.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.52.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0) '@opentelemetry/redis-common': 0.36.2 - '@opentelemetry/semantic-conventions': 1.25.0 + '@opentelemetry/semantic-conventions': 1.25.1 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-koa@0.41.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-koa@0.42.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.52.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.0 - '@types/koa': 2.14.0 - '@types/koa__router': 12.0.3 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.25.1 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-mongodb@0.45.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-mongodb@0.46.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.52.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0) '@opentelemetry/sdk-metrics': 1.24.1(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.0 + '@opentelemetry/semantic-conventions': 1.25.1 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-mongoose@0.39.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-mongoose@0.40.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.52.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.0 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.25.1 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-mysql2@0.39.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-mysql2@0.40.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.52.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.0 + '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.25.1 '@opentelemetry/sql-common': 0.40.1(@opentelemetry/api@1.9.0) transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-mysql@0.39.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-mysql@0.40.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.52.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.0 + '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.25.1 '@types/mysql': 2.15.22 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-nestjs-core@0.38.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-nestjs-core@0.39.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.52.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.0 + '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.25.1 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-pg@0.42.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-pg@0.43.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.52.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.0 + '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.25.1 '@opentelemetry/sql-common': 0.40.1(@opentelemetry/api@1.9.0) '@types/pg': 8.6.1 '@types/pg-pool': 2.0.4 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation-redis-4@0.40.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation-redis-4@0.41.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/instrumentation': 0.52.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0) '@opentelemetry/redis-common': 0.36.2 - '@opentelemetry/semantic-conventions': 1.25.0 + '@opentelemetry/semantic-conventions': 1.25.1 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation@0.43.0(@opentelemetry/api@1.8.0)': + '@opentelemetry/instrumentation@0.46.0(@opentelemetry/api@1.9.0)': dependencies: - '@opentelemetry/api': 1.8.0 - '@types/shimmer': 1.0.5 - import-in-the-middle: 1.4.2 + '@opentelemetry/api': 1.9.0 + '@types/shimmer': 1.2.0 + import-in-the-middle: 1.7.1 require-in-the-middle: 7.3.0 semver: 7.6.2 shimmer: 1.2.1 @@ -4917,24 +4378,24 @@ snapshots: - supports-color optional: true - '@opentelemetry/instrumentation@0.51.1(@opentelemetry/api@1.8.0)': + '@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.8.0)': dependencies: '@opentelemetry/api': 1.8.0 - '@opentelemetry/api-logs': 0.51.1 - '@types/shimmer': 1.0.5 - import-in-the-middle: 1.7.4 + '@opentelemetry/api-logs': 0.52.1 + '@types/shimmer': 1.2.0 + import-in-the-middle: 1.9.0 require-in-the-middle: 7.3.0 semver: 7.6.2 shimmer: 1.2.1 transitivePeerDependencies: - supports-color - '@opentelemetry/instrumentation@0.52.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/api-logs': 0.52.0 - '@types/shimmer': 1.0.5 - import-in-the-middle: 1.8.0 + '@opentelemetry/api-logs': 0.52.1 + '@types/shimmer': 1.2.0 + import-in-the-middle: 1.9.0 require-in-the-middle: 7.3.0 semver: 7.6.2 shimmer: 1.2.1 @@ -4982,17 +4443,17 @@ snapshots: '@opentelemetry/core': 1.24.1(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.24.1 - '@opentelemetry/resources@1.25.0(@opentelemetry/api@1.8.0)': + '@opentelemetry/resources@1.25.1(@opentelemetry/api@1.8.0)': dependencies: '@opentelemetry/api': 1.8.0 - '@opentelemetry/core': 1.25.0(@opentelemetry/api@1.8.0) - '@opentelemetry/semantic-conventions': 1.25.0 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.8.0) + '@opentelemetry/semantic-conventions': 1.25.1 - '@opentelemetry/resources@1.25.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/resources@1.25.1(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.0 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.25.1 '@opentelemetry/sdk-logs@0.51.1(@opentelemetry/api-logs@0.51.1)(@opentelemetry/api@1.8.0)': dependencies: @@ -5036,30 +4497,30 @@ snapshots: '@opentelemetry/resources': 1.24.1(@opentelemetry/api@1.8.0) '@opentelemetry/semantic-conventions': 1.24.1 - '@opentelemetry/sdk-trace-base@1.25.0(@opentelemetry/api@1.8.0)': + '@opentelemetry/sdk-trace-base@1.25.1(@opentelemetry/api@1.8.0)': dependencies: '@opentelemetry/api': 1.8.0 - '@opentelemetry/core': 1.25.0(@opentelemetry/api@1.8.0) - '@opentelemetry/resources': 1.25.0(@opentelemetry/api@1.8.0) - '@opentelemetry/semantic-conventions': 1.25.0 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.8.0) + '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.8.0) + '@opentelemetry/semantic-conventions': 1.25.1 - '@opentelemetry/sdk-trace-base@1.25.0(@opentelemetry/api@1.9.0)': + '@opentelemetry/sdk-trace-base@1.25.1(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.0(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 1.25.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.0 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.25.1 '@opentelemetry/semantic-conventions@1.17.0': {} '@opentelemetry/semantic-conventions@1.24.1': {} - '@opentelemetry/semantic-conventions@1.25.0': {} + '@opentelemetry/semantic-conventions@1.25.1': {} '@opentelemetry/sql-common@0.40.1(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.0(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) '@pkgjs/parseargs@0.11.0': optional: true @@ -5076,117 +4537,123 @@ snapshots: '@pnpm/network.ca-file': 1.0.2 config-chain: 1.1.13 - '@prisma/instrumentation@5.15.0': + '@powersync/mysql-zongji@0.1.0': + dependencies: + '@vlasky/mysql': 2.18.6 + big-integer: 1.6.51 + iconv-lite: 0.6.3 + + '@prisma/instrumentation@5.16.1': dependencies: '@opentelemetry/api': 1.8.0 - '@opentelemetry/instrumentation': 0.51.1(@opentelemetry/api@1.8.0) - '@opentelemetry/sdk-trace-base': 1.25.0(@opentelemetry/api@1.8.0) + '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.8.0) + '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.8.0) transitivePeerDependencies: - supports-color - '@rollup/rollup-android-arm-eabi@4.18.0': + '@rollup/rollup-android-arm-eabi@4.18.1': optional: true - '@rollup/rollup-android-arm64@4.18.0': + '@rollup/rollup-android-arm64@4.18.1': optional: true - '@rollup/rollup-darwin-arm64@4.18.0': + '@rollup/rollup-darwin-arm64@4.18.1': optional: true - '@rollup/rollup-darwin-x64@4.18.0': + '@rollup/rollup-darwin-x64@4.18.1': optional: true - '@rollup/rollup-linux-arm-gnueabihf@4.18.0': + '@rollup/rollup-linux-arm-gnueabihf@4.18.1': optional: true - '@rollup/rollup-linux-arm-musleabihf@4.18.0': + '@rollup/rollup-linux-arm-musleabihf@4.18.1': optional: true - '@rollup/rollup-linux-arm64-gnu@4.18.0': + '@rollup/rollup-linux-arm64-gnu@4.18.1': optional: true - '@rollup/rollup-linux-arm64-musl@4.18.0': + '@rollup/rollup-linux-arm64-musl@4.18.1': optional: true - '@rollup/rollup-linux-powerpc64le-gnu@4.18.0': + '@rollup/rollup-linux-powerpc64le-gnu@4.18.1': optional: true - '@rollup/rollup-linux-riscv64-gnu@4.18.0': + '@rollup/rollup-linux-riscv64-gnu@4.18.1': optional: true - '@rollup/rollup-linux-s390x-gnu@4.18.0': + '@rollup/rollup-linux-s390x-gnu@4.18.1': optional: true - '@rollup/rollup-linux-x64-gnu@4.18.0': + '@rollup/rollup-linux-x64-gnu@4.18.1': optional: true - '@rollup/rollup-linux-x64-musl@4.18.0': + '@rollup/rollup-linux-x64-musl@4.18.1': optional: true - '@rollup/rollup-win32-arm64-msvc@4.18.0': + '@rollup/rollup-win32-arm64-msvc@4.18.1': optional: true - '@rollup/rollup-win32-ia32-msvc@4.18.0': + '@rollup/rollup-win32-ia32-msvc@4.18.1': optional: true - '@rollup/rollup-win32-x64-msvc@4.18.0': + '@rollup/rollup-win32-x64-msvc@4.18.1': optional: true - '@sentry/core@8.9.2': + '@sentry/core@8.17.0': dependencies: - '@sentry/types': 8.9.2 - '@sentry/utils': 8.9.2 + '@sentry/types': 8.17.0 + '@sentry/utils': 8.17.0 - '@sentry/node@8.9.2': + '@sentry/node@8.17.0': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/context-async-hooks': 1.25.0(@opentelemetry/api@1.9.0) - '@opentelemetry/core': 1.25.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.52.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-connect': 0.37.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-express': 0.40.1(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-fastify': 0.37.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-graphql': 0.41.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-hapi': 0.39.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-http': 0.52.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-ioredis': 0.41.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-koa': 0.41.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-mongodb': 0.45.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-mongoose': 0.39.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-mysql': 0.39.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-mysql2': 0.39.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-nestjs-core': 0.38.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-pg': 0.42.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation-redis-4': 0.40.0(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 1.25.0(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 1.25.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.0 - '@prisma/instrumentation': 5.15.0 - '@sentry/core': 8.9.2 - '@sentry/opentelemetry': 8.9.2(@opentelemetry/api@1.9.0)(@opentelemetry/core@1.25.0(@opentelemetry/api@1.9.0))(@opentelemetry/instrumentation@0.52.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.25.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.25.0) - '@sentry/types': 8.9.2 - '@sentry/utils': 8.9.2 + '@opentelemetry/context-async-hooks': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-connect': 0.38.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-express': 0.41.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-fastify': 0.38.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-graphql': 0.42.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-hapi': 0.40.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-http': 0.52.1(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-ioredis': 0.42.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-koa': 0.42.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-mongodb': 0.46.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-mongoose': 0.40.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-mysql': 0.40.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-mysql2': 0.40.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-nestjs-core': 0.39.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-pg': 0.43.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation-redis-4': 0.41.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.25.1 + '@prisma/instrumentation': 5.16.1 + '@sentry/core': 8.17.0 + '@sentry/opentelemetry': 8.17.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@1.25.1(@opentelemetry/api@1.6.0))(@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.6.0))(@opentelemetry/sdk-trace-base@1.25.1(@opentelemetry/api@1.6.0))(@opentelemetry/semantic-conventions@1.25.1) + '@sentry/types': 8.17.0 + '@sentry/utils': 8.17.0 optionalDependencies: - opentelemetry-instrumentation-fetch-node: 1.2.0 + opentelemetry-instrumentation-fetch-node: 1.2.3(@opentelemetry/api@1.9.0) transitivePeerDependencies: - supports-color - '@sentry/opentelemetry@8.9.2(@opentelemetry/api@1.9.0)(@opentelemetry/core@1.25.0(@opentelemetry/api@1.9.0))(@opentelemetry/instrumentation@0.52.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.25.0(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.25.0)': + '@sentry/opentelemetry@8.17.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@1.25.1(@opentelemetry/api@1.6.0))(@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.6.0))(@opentelemetry/sdk-trace-base@1.25.1(@opentelemetry/api@1.6.0))(@opentelemetry/semantic-conventions@1.25.1)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.0(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.52.0(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 1.25.0(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.0 - '@sentry/core': 8.9.2 - '@sentry/types': 8.9.2 - '@sentry/utils': 8.9.2 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.25.1 + '@sentry/core': 8.17.0 + '@sentry/types': 8.17.0 + '@sentry/utils': 8.17.0 - '@sentry/types@8.9.2': {} + '@sentry/types@8.17.0': {} - '@sentry/utils@8.9.2': + '@sentry/utils@8.17.0': dependencies: - '@sentry/types': 8.9.2 + '@sentry/types': 8.17.0 '@sigstore/bundle@1.1.0': dependencies: @@ -5209,8 +4676,6 @@ snapshots: transitivePeerDependencies: - supports-color - '@sinclair/typebox@0.27.8': {} - '@sindresorhus/is@5.6.0': {} '@syncpoint/wkx@0.5.2': @@ -5236,94 +4701,23 @@ snapshots: '@tufjs/models@1.0.4': dependencies: '@tufjs/canonical-json': 1.0.0 - minimatch: 9.0.4 - - '@types/accepts@1.3.7': - dependencies: - '@types/node': 18.19.50 + minimatch: 9.0.5 '@types/async@3.2.24': {} - '@types/body-parser@1.19.5': - dependencies: - '@types/connect': 3.4.38 - '@types/node': 18.19.50 - - '@types/chai-subset@1.3.5': - dependencies: - '@types/chai': 4.3.16 - - '@types/chai@4.3.16': {} - '@types/connect@3.4.36': dependencies: - '@types/node': 18.19.50 - - '@types/connect@3.4.38': - dependencies: - '@types/node': 18.19.50 - - '@types/content-disposition@0.5.8': {} - - '@types/cookies@0.9.0': - dependencies: - '@types/connect': 3.4.38 - '@types/express': 4.17.21 - '@types/keygrip': 1.0.6 - '@types/node': 18.19.50 + '@types/node': 22.5.5 '@types/estree@1.0.5': {} - '@types/express-serve-static-core@4.19.1': - dependencies: - '@types/node': 18.19.50 - '@types/qs': 6.9.15 - '@types/range-parser': 1.2.7 - '@types/send': 0.17.4 - - '@types/express@4.17.21': - dependencies: - '@types/body-parser': 1.19.5 - '@types/express-serve-static-core': 4.19.1 - '@types/qs': 6.9.15 - '@types/serve-static': 1.15.7 - - '@types/http-assert@1.5.5': {} - '@types/http-cache-semantics@4.0.4': {} - '@types/http-errors@2.0.4': {} - - '@types/keygrip@1.0.6': {} - - '@types/koa-compose@3.2.8': - dependencies: - '@types/koa': 2.14.0 - - '@types/koa@2.14.0': - dependencies: - '@types/accepts': 1.3.7 - '@types/content-disposition': 0.5.8 - '@types/cookies': 0.9.0 - '@types/http-assert': 1.5.5 - '@types/http-errors': 2.0.4 - '@types/keygrip': 1.0.6 - '@types/koa-compose': 3.2.8 - '@types/node': 18.19.50 - - '@types/koa__router@12.0.3': - dependencies: - '@types/koa': 2.14.0 - - '@types/lodash@4.17.5': {} - - '@types/mime@1.3.5': {} - - '@types/minimist@1.2.5': {} + '@types/lodash@4.17.6': {} '@types/mysql@2.15.22': dependencies: - '@types/node': 18.19.50 + '@types/node': 22.5.5 '@types/node@12.20.55': {} @@ -5331,13 +4725,9 @@ snapshots: '@types/node@15.14.9': {} - '@types/node@18.11.11': {} - - '@types/node@18.19.50': + '@types/node@22.5.5': dependencies: - undici-types: 5.26.5 - - '@types/normalize-package-data@2.4.4': {} + undici-types: 6.19.8 '@types/pg-pool@2.0.4': dependencies: @@ -5345,30 +4735,15 @@ snapshots: '@types/pg@8.6.1': dependencies: - '@types/node': 18.19.50 + '@types/node': 22.5.5 pg-protocol: 1.6.1 pg-types: 2.2.0 - '@types/qs@6.9.15': {} - - '@types/range-parser@1.2.7': {} - '@types/semver-utils@1.1.3': {} '@types/semver@7.5.8': {} - '@types/send@0.17.4': - dependencies: - '@types/mime': 1.3.5 - '@types/node': 18.19.50 - - '@types/serve-static@1.15.7': - dependencies: - '@types/http-errors': 2.0.4 - '@types/node': 18.19.50 - '@types/send': 0.17.4 - - '@types/shimmer@1.0.5': {} + '@types/shimmer@1.2.0': {} '@types/strip-bom@3.0.0': {} @@ -5386,69 +4761,55 @@ snapshots: '@types/ws@8.2.3': dependencies: - '@types/node': 18.11.11 - - '@vitest/expect@0.34.6': - dependencies: - '@vitest/spy': 0.34.6 - '@vitest/utils': 0.34.6 - chai: 4.4.1 + '@types/node': 22.5.5 - '@vitest/expect@2.0.5': + '@vitest/expect@2.1.1': dependencies: - '@vitest/spy': 2.0.5 - '@vitest/utils': 2.0.5 + '@vitest/spy': 2.1.1 + '@vitest/utils': 2.1.1 chai: 5.1.1 tinyrainbow: 1.2.0 - '@vitest/pretty-format@2.0.5': + '@vitest/mocker@2.1.1(@vitest/spy@2.1.1)(vite@5.3.3(@types/node@22.5.5))': dependencies: - tinyrainbow: 1.2.0 - - '@vitest/runner@0.34.6': - dependencies: - '@vitest/utils': 0.34.6 - p-limit: 4.0.0 - pathe: 1.1.2 + '@vitest/spy': 2.1.1 + estree-walker: 3.0.3 + magic-string: 0.30.11 + optionalDependencies: + vite: 5.3.3(@types/node@22.5.5) - '@vitest/runner@2.0.5': + '@vitest/pretty-format@2.1.1': dependencies: - '@vitest/utils': 2.0.5 - pathe: 1.1.2 + tinyrainbow: 1.2.0 - '@vitest/snapshot@0.34.6': + '@vitest/runner@2.1.1': dependencies: - magic-string: 0.30.10 + '@vitest/utils': 2.1.1 pathe: 1.1.2 - pretty-format: 29.7.0 - '@vitest/snapshot@2.0.5': + '@vitest/snapshot@2.1.1': dependencies: - '@vitest/pretty-format': 2.0.5 - magic-string: 0.30.10 + '@vitest/pretty-format': 2.1.1 + magic-string: 0.30.11 pathe: 1.1.2 - '@vitest/spy@0.34.6': - dependencies: - tinyspy: 2.2.1 - - '@vitest/spy@2.0.5': + '@vitest/spy@2.1.1': dependencies: - tinyspy: 3.0.0 + tinyspy: 3.0.2 - '@vitest/utils@0.34.6': + '@vitest/utils@2.1.1': dependencies: - diff-sequences: 29.6.3 - loupe: 2.3.7 - pretty-format: 29.7.0 - - '@vitest/utils@2.0.5': - dependencies: - '@vitest/pretty-format': 2.0.5 - estree-walker: 3.0.3 + '@vitest/pretty-format': 2.1.1 loupe: 3.1.1 tinyrainbow: 1.2.0 + '@vlasky/mysql@2.18.6': + dependencies: + bignumber.js: 9.1.1 + readable-stream: 2.3.7 + safe-buffer: 5.2.1 + sqlstring: 2.3.3 + abbrev@1.1.1: {} abort-controller@3.0.0: @@ -5457,22 +4818,24 @@ snapshots: abstract-logging@2.0.1: {} - acorn-import-assertions@1.9.0(acorn@8.11.3): + acorn-import-assertions@1.9.0(acorn@8.12.1): dependencies: - acorn: 8.11.3 + acorn: 8.12.1 optional: true - acorn-import-attributes@1.9.5(acorn@8.11.3): + acorn-import-attributes@1.9.5(acorn@8.12.1): dependencies: - acorn: 8.11.3 + acorn: 8.12.1 - acorn-walk@8.3.2: {} + acorn-walk@8.3.3: + dependencies: + acorn: 8.12.1 - acorn@8.11.3: {} + acorn@8.12.1: {} agent-base@6.0.2: dependencies: - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.7 transitivePeerDependencies: - supports-color @@ -5485,15 +4848,15 @@ snapshots: clean-stack: 2.2.0 indent-string: 4.0.0 - ajv-formats@2.1.1(ajv@8.14.0): + ajv-formats@2.1.1(ajv@8.16.0): optionalDependencies: - ajv: 8.14.0 + ajv: 8.16.0 - ajv-formats@3.0.1(ajv@8.14.0): + ajv-formats@3.0.1(ajv@8.16.0): optionalDependencies: - ajv: 8.14.0 + ajv: 8.16.0 - ajv@8.14.0: + ajv@8.16.0: dependencies: fast-deep-equal: 3.1.3 json-schema-traverse: 1.0.0 @@ -5522,8 +4885,6 @@ snapshots: dependencies: color-convert: 2.0.1 - ansi-styles@5.2.0: {} - ansi-styles@6.2.1: {} anymatch@3.1.3: @@ -5546,63 +4907,34 @@ snapshots: argparse@2.0.1: {} - array-buffer-byte-length@1.0.1: - dependencies: - call-bind: 1.0.7 - is-array-buffer: 3.0.4 - array-union@2.1.0: {} - array.prototype.flat@1.3.2: - dependencies: - call-bind: 1.0.7 - define-properties: 1.2.1 - es-abstract: 1.23.3 - es-shim-unscopables: 1.0.2 - - arraybuffer.prototype.slice@1.0.3: - dependencies: - array-buffer-byte-length: 1.0.1 - call-bind: 1.0.7 - define-properties: 1.2.1 - es-abstract: 1.23.3 - es-errors: 1.3.0 - get-intrinsic: 1.2.4 - is-array-buffer: 3.0.4 - is-shared-array-buffer: 1.0.3 - - arrify@1.0.1: {} - - assertion-error@1.1.0: {} - assertion-error@2.0.1: {} async-mutex@0.5.0: dependencies: - tslib: 2.6.2 + tslib: 2.6.3 async@3.2.5: {} atomic-sleep@1.0.0: {} - available-typed-arrays@1.0.7: - dependencies: - possible-typed-array-names: 1.0.0 - avvio@8.3.2: dependencies: '@fastify/error': 3.4.1 fastq: 1.17.1 + aws-ssl-profiles@1.1.2: {} + balanced-match@1.0.2: {} base64-js@1.5.1: {} - better-ajv-errors@1.2.0(ajv@8.14.0): + better-ajv-errors@1.2.0(ajv@8.16.0): dependencies: - '@babel/code-frame': 7.24.6 + '@babel/code-frame': 7.24.7 '@humanwhocodes/momoa': 2.0.4 - ajv: 8.14.0 + ajv: 8.16.0 chalk: 4.1.2 jsonpointer: 5.0.1 leven: 3.1.0 @@ -5611,6 +4943,10 @@ snapshots: dependencies: is-windows: 1.0.2 + big-integer@1.6.51: {} + + bignumber.js@9.1.1: {} + binary-extensions@2.3.0: {} bl@4.1.0: @@ -5643,11 +4979,7 @@ snapshots: dependencies: fill-range: 7.1.1 - breakword@1.0.6: - dependencies: - wcwidth: 1.0.1 - - bson@6.7.0: {} + bson@6.8.0: {} buffer-from@1.1.2: {} @@ -5690,7 +5022,7 @@ snapshots: dependencies: '@npmcli/fs': 3.1.1 fs-minipass: 3.0.3 - glob: 10.4.1 + glob: 10.4.5 lru-cache: 7.18.3 minipass: 7.1.2 minipass-collect: 1.0.2 @@ -5713,34 +5045,8 @@ snapshots: normalize-url: 8.0.1 responselike: 3.0.0 - call-bind@1.0.7: - dependencies: - es-define-property: 1.0.0 - es-errors: 1.3.0 - function-bind: 1.1.2 - get-intrinsic: 1.2.4 - set-function-length: 1.2.2 - - camelcase-keys@6.2.2: - dependencies: - camelcase: 5.3.1 - map-obj: 4.3.0 - quick-lru: 4.0.1 - - camelcase@5.3.1: {} - camelcase@7.0.1: {} - chai@4.4.1: - dependencies: - assertion-error: 1.1.0 - check-error: 1.0.3 - deep-eql: 4.1.3 - get-func-name: 2.0.2 - loupe: 2.3.7 - pathval: 1.1.1 - type-detect: 4.0.8 - chai@5.1.1: dependencies: assertion-error: 2.0.1 @@ -5764,10 +5070,6 @@ snapshots: chardet@0.7.0: {} - check-error@1.0.3: - dependencies: - get-func-name: 2.0.2 - check-error@2.1.1: {} chokidar@3.6.0: @@ -5806,12 +5108,6 @@ snapshots: cli-width@4.1.0: {} - cliui@6.0.0: - dependencies: - string-width: 4.2.3 - strip-ansi: 6.0.1 - wrap-ansi: 6.2.0 - cliui@7.0.4: dependencies: string-width: 4.2.3 @@ -5875,8 +5171,6 @@ snapshots: tree-kill: 1.2.2 yargs: 17.7.2 - confbox@0.1.7: {} - config-chain@1.1.13: dependencies: ini: 1.3.8 @@ -5929,46 +5223,15 @@ snapshots: dependencies: type-fest: 1.4.0 - csv-generate@3.4.3: {} - - csv-parse@4.16.3: {} - - csv-stringify@5.6.5: {} - - csv@5.5.3: - dependencies: - csv-generate: 3.4.3 - csv-parse: 4.16.3 - csv-stringify: 5.6.5 - stream-transform: 2.1.3 - data-uri-to-buffer@4.0.1: {} - data-view-buffer@1.0.1: - dependencies: - call-bind: 1.0.7 - es-errors: 1.3.0 - is-data-view: 1.0.1 - - data-view-byte-length@1.0.1: - dependencies: - call-bind: 1.0.7 - es-errors: 1.3.0 - is-data-view: 1.0.1 - - data-view-byte-offset@1.0.0: - dependencies: - call-bind: 1.0.7 - es-errors: 1.3.0 - is-data-view: 1.0.1 - date-fns@2.30.0: dependencies: - '@babel/runtime': 7.24.6 + '@babel/runtime': 7.24.8 - date-fns@3.6.0: {} + date-fns@4.1.0: {} - debug@4.3.4(supports-color@5.5.0): + debug@4.3.5(supports-color@5.5.0): dependencies: ms: 2.1.2 optionalDependencies: @@ -5978,21 +5241,10 @@ snapshots: dependencies: ms: 2.1.3 - decamelize-keys@1.1.1: - dependencies: - decamelize: 1.2.0 - map-obj: 1.0.1 - - decamelize@1.2.0: {} - decompress-response@6.0.0: dependencies: mimic-response: 3.1.0 - deep-eql@4.1.3: - dependencies: - type-detect: 4.0.8 - deep-eql@5.0.2: {} deep-extend@0.6.0: {} @@ -6003,23 +5255,11 @@ snapshots: defer-to-connect@2.0.1: {} - define-data-property@1.1.4: - dependencies: - es-define-property: 1.0.0 - es-errors: 1.3.0 - gopd: 1.0.1 - - define-properties@1.2.1: - dependencies: - define-data-property: 1.1.4 - has-property-descriptors: 1.0.2 - object-keys: 1.1.1 - delegates@1.0.0: {} - detect-indent@6.1.0: {} + denque@2.1.0: {} - diff-sequences@29.6.3: {} + detect-indent@6.1.0: {} diff@4.0.2: {} @@ -6063,110 +5303,31 @@ snapshots: err-code@2.0.3: {} - error-ex@1.3.2: - dependencies: - is-arrayish: 0.2.1 - - es-abstract@1.23.3: - dependencies: - array-buffer-byte-length: 1.0.1 - arraybuffer.prototype.slice: 1.0.3 - available-typed-arrays: 1.0.7 - call-bind: 1.0.7 - data-view-buffer: 1.0.1 - data-view-byte-length: 1.0.1 - data-view-byte-offset: 1.0.0 - es-define-property: 1.0.0 - es-errors: 1.3.0 - es-object-atoms: 1.0.0 - es-set-tostringtag: 2.0.3 - es-to-primitive: 1.2.1 - function.prototype.name: 1.1.6 - get-intrinsic: 1.2.4 - get-symbol-description: 1.0.2 - globalthis: 1.0.4 - gopd: 1.0.1 - has-property-descriptors: 1.0.2 - has-proto: 1.0.3 - has-symbols: 1.0.3 - hasown: 2.0.2 - internal-slot: 1.0.7 - is-array-buffer: 3.0.4 - is-callable: 1.2.7 - is-data-view: 1.0.1 - is-negative-zero: 2.0.3 - is-regex: 1.1.4 - is-shared-array-buffer: 1.0.3 - is-string: 1.0.7 - is-typed-array: 1.1.13 - is-weakref: 1.0.2 - object-inspect: 1.13.1 - object-keys: 1.1.1 - object.assign: 4.1.5 - regexp.prototype.flags: 1.5.2 - safe-array-concat: 1.1.2 - safe-regex-test: 1.0.3 - string.prototype.trim: 1.2.9 - string.prototype.trimend: 1.0.8 - string.prototype.trimstart: 1.0.8 - typed-array-buffer: 1.0.2 - typed-array-byte-length: 1.0.1 - typed-array-byte-offset: 1.0.2 - typed-array-length: 1.0.6 - unbox-primitive: 1.0.2 - which-typed-array: 1.1.15 - - es-define-property@1.0.0: - dependencies: - get-intrinsic: 1.2.4 - - es-errors@1.3.0: {} - - es-object-atoms@1.0.0: - dependencies: - es-errors: 1.3.0 - - es-set-tostringtag@2.0.3: - dependencies: - get-intrinsic: 1.2.4 - has-tostringtag: 1.0.2 - hasown: 2.0.2 - - es-shim-unscopables@1.0.2: - dependencies: - hasown: 2.0.2 - - es-to-primitive@1.2.1: - dependencies: - is-callable: 1.2.7 - is-date-object: 1.0.5 - is-symbol: 1.0.4 - - esbuild@0.20.2: + esbuild@0.21.5: optionalDependencies: - '@esbuild/aix-ppc64': 0.20.2 - '@esbuild/android-arm': 0.20.2 - '@esbuild/android-arm64': 0.20.2 - '@esbuild/android-x64': 0.20.2 - '@esbuild/darwin-arm64': 0.20.2 - '@esbuild/darwin-x64': 0.20.2 - '@esbuild/freebsd-arm64': 0.20.2 - '@esbuild/freebsd-x64': 0.20.2 - '@esbuild/linux-arm': 0.20.2 - '@esbuild/linux-arm64': 0.20.2 - '@esbuild/linux-ia32': 0.20.2 - '@esbuild/linux-loong64': 0.20.2 - '@esbuild/linux-mips64el': 0.20.2 - '@esbuild/linux-ppc64': 0.20.2 - '@esbuild/linux-riscv64': 0.20.2 - '@esbuild/linux-s390x': 0.20.2 - '@esbuild/linux-x64': 0.20.2 - '@esbuild/netbsd-x64': 0.20.2 - '@esbuild/openbsd-x64': 0.20.2 - '@esbuild/sunos-x64': 0.20.2 - '@esbuild/win32-arm64': 0.20.2 - '@esbuild/win32-ia32': 0.20.2 - '@esbuild/win32-x64': 0.20.2 + '@esbuild/aix-ppc64': 0.21.5 + '@esbuild/android-arm': 0.21.5 + '@esbuild/android-arm64': 0.21.5 + '@esbuild/android-x64': 0.21.5 + '@esbuild/darwin-arm64': 0.21.5 + '@esbuild/darwin-x64': 0.21.5 + '@esbuild/freebsd-arm64': 0.21.5 + '@esbuild/freebsd-x64': 0.21.5 + '@esbuild/linux-arm': 0.21.5 + '@esbuild/linux-arm64': 0.21.5 + '@esbuild/linux-ia32': 0.21.5 + '@esbuild/linux-loong64': 0.21.5 + '@esbuild/linux-mips64el': 0.21.5 + '@esbuild/linux-ppc64': 0.21.5 + '@esbuild/linux-riscv64': 0.21.5 + '@esbuild/linux-s390x': 0.21.5 + '@esbuild/linux-x64': 0.21.5 + '@esbuild/netbsd-x64': 0.21.5 + '@esbuild/openbsd-x64': 0.21.5 + '@esbuild/sunos-x64': 0.21.5 + '@esbuild/win32-arm64': 0.21.5 + '@esbuild/win32-ia32': 0.21.5 + '@esbuild/win32-x64': 0.21.5 escalade@3.1.2: {} @@ -6194,18 +5355,6 @@ snapshots: events@3.3.0: {} - execa@8.0.1: - dependencies: - cross-spawn: 7.0.3 - get-stream: 8.0.1 - human-signals: 5.0.0 - is-stream: 3.0.0 - merge-stream: 2.0.0 - npm-run-path: 5.3.0 - onetime: 6.0.0 - signal-exit: 4.1.0 - strip-final-newline: 3.0.0 - exponential-backoff@3.1.1: {} extendable-error@0.1.7: {} @@ -6230,15 +5379,15 @@ snapshots: merge2: 1.4.1 micromatch: 4.0.7 - fast-json-stringify@5.16.0: + fast-json-stringify@5.16.1: dependencies: '@fastify/merge-json-schemas': 0.1.1 - ajv: 8.14.0 - ajv-formats: 3.0.1(ajv@8.14.0) + ajv: 8.16.0 + ajv-formats: 3.0.1(ajv@8.16.0) fast-deep-equal: 3.1.3 - fast-uri: 2.3.0 + fast-uri: 2.4.0 json-schema-ref-resolver: 1.0.1 - rfdc: 1.3.1 + rfdc: 1.4.1 fast-memoize@2.5.2: {} @@ -6248,25 +5397,25 @@ snapshots: fast-redact@3.5.0: {} - fast-uri@2.3.0: {} + fast-uri@2.4.0: {} fastify-plugin@4.5.1: {} fastify@4.23.2: dependencies: - '@fastify/ajv-compiler': 3.5.0 + '@fastify/ajv-compiler': 3.6.0 '@fastify/error': 3.4.1 '@fastify/fast-json-stringify-compiler': 4.3.0 abstract-logging: 2.0.1 avvio: 8.3.2 fast-content-type-parse: 1.1.0 - fast-json-stringify: 5.16.0 + fast-json-stringify: 5.16.1 find-my-way: 7.7.0 light-my-request: 5.13.0 pino: 8.21.0 process-warning: 2.3.2 proxy-addr: 2.0.7 - rfdc: 1.3.1 + rfdc: 1.4.1 secure-json-parse: 2.7.0 semver: 7.6.2 toad-cache: 3.7.0 @@ -6302,18 +5451,9 @@ snapshots: locate-path: 6.0.0 path-exists: 4.0.0 - find-yarn-workspace-root2@1.2.16: - dependencies: - micromatch: 4.0.7 - pkg-dir: 4.2.0 - fn.name@1.1.0: {} - for-each@0.3.3: - dependencies: - is-callable: 1.2.7 - - foreground-child@3.1.1: + foreground-child@3.2.1: dependencies: cross-spawn: 7.0.3 signal-exit: 4.1.0 @@ -6357,15 +5497,6 @@ snapshots: function-bind@1.1.2: {} - function.prototype.name@1.1.6: - dependencies: - call-bind: 1.0.7 - define-properties: 1.2.1 - es-abstract: 1.23.3 - functions-have-names: 1.2.3 - - functions-have-names@1.2.3: {} - gauge@4.0.4: dependencies: aproba: 2.0.0 @@ -6377,40 +5508,29 @@ snapshots: strip-ansi: 6.0.1 wide-align: 1.1.5 + generate-function@2.3.1: + dependencies: + is-property: 1.0.2 + get-caller-file@2.0.5: {} get-func-name@2.0.2: {} - get-intrinsic@1.2.4: - dependencies: - es-errors: 1.3.0 - function-bind: 1.1.2 - has-proto: 1.0.3 - has-symbols: 1.0.3 - hasown: 2.0.2 - get-stdin@8.0.0: {} get-stream@6.0.1: {} - get-stream@8.0.1: {} - - get-symbol-description@1.0.2: - dependencies: - call-bind: 1.0.7 - es-errors: 1.3.0 - get-intrinsic: 1.2.4 - glob-parent@5.1.2: dependencies: is-glob: 4.0.3 - glob@10.4.1: + glob@10.4.5: dependencies: - foreground-child: 3.1.1 - jackspeak: 3.1.2 - minimatch: 9.0.4 + foreground-child: 3.2.1 + jackspeak: 3.4.3 + minimatch: 9.0.5 minipass: 7.1.2 + package-json-from-dist: 1.0.0 path-scurry: 1.11.1 glob@7.2.3: @@ -6434,11 +5554,6 @@ snapshots: dependencies: ini: 2.0.0 - globalthis@1.0.4: - dependencies: - define-properties: 1.2.1 - gopd: 1.0.1 - globby@11.1.0: dependencies: array-union: 2.1.0 @@ -6450,10 +5565,6 @@ snapshots: globrex@0.1.2: {} - gopd@1.0.1: - dependencies: - get-intrinsic: 1.2.4 - got@12.6.1: dependencies: '@sindresorhus/is': 5.6.0 @@ -6472,28 +5583,10 @@ snapshots: graceful-fs@4.2.11: {} - grapheme-splitter@1.0.4: {} - - hard-rejection@2.1.0: {} - - has-bigints@1.0.2: {} - has-flag@3.0.0: {} has-flag@4.0.0: {} - has-property-descriptors@1.0.2: - dependencies: - es-define-property: 1.0.0 - - has-proto@1.0.3: {} - - has-symbols@1.0.3: {} - - has-tostringtag@1.0.2: - dependencies: - has-symbols: 1.0.3 - has-unicode@2.0.1: {} has-yarn@3.0.0: {} @@ -6502,8 +5595,6 @@ snapshots: dependencies: function-bind: 1.1.2 - hosted-git-info@2.8.9: {} - hosted-git-info@5.2.1: dependencies: lru-cache: 7.18.3 @@ -6518,7 +5609,7 @@ snapshots: dependencies: '@tootallnate/once': 2.0.0 agent-base: 6.0.2 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.5(supports-color@5.5.0) transitivePeerDependencies: - supports-color @@ -6530,14 +5621,12 @@ snapshots: https-proxy-agent@5.0.1: dependencies: agent-base: 6.0.2 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.5(supports-color@5.5.0) transitivePeerDependencies: - supports-color human-id@1.0.2: {} - human-signals@5.0.0: {} - humanize-ms@1.2.1: dependencies: ms: 2.1.3 @@ -6549,7 +5638,6 @@ snapshots: iconv-lite@0.6.3: dependencies: safer-buffer: 2.1.2 - optional: true ieee754@1.2.1: {} @@ -6557,29 +5645,22 @@ snapshots: ignore-walk@6.0.5: dependencies: - minimatch: 9.0.4 + minimatch: 9.0.5 ignore@5.3.1: {} - import-in-the-middle@1.4.2: + import-in-the-middle@1.7.1: dependencies: - acorn: 8.11.3 - acorn-import-assertions: 1.9.0(acorn@8.11.3) + acorn: 8.12.1 + acorn-import-assertions: 1.9.0(acorn@8.12.1) cjs-module-lexer: 1.3.1 module-details-from-path: 1.0.3 optional: true - import-in-the-middle@1.7.4: + import-in-the-middle@1.9.0: dependencies: - acorn: 8.11.3 - acorn-import-attributes: 1.9.5(acorn@8.11.3) - cjs-module-lexer: 1.3.1 - module-details-from-path: 1.0.3 - - import-in-the-middle@1.8.0: - dependencies: - acorn: 8.11.3 - acorn-import-attributes: 1.9.5(acorn@8.11.3) + acorn: 8.12.1 + acorn-import-attributes: 1.9.5(acorn@8.12.1) cjs-module-lexer: 1.3.1 module-details-from-path: 1.0.3 @@ -6604,16 +5685,12 @@ snapshots: ini@4.1.3: {} - inquirer@9.2.22: + inquirer@9.3.5: dependencies: - '@inquirer/figures': 1.0.2 - '@ljharb/through': 2.3.13 + '@inquirer/figures': 1.0.3 ansi-escapes: 4.3.2 - chalk: 5.3.0 - cli-cursor: 3.1.0 cli-width: 4.1.0 external-editor: 3.1.0 - lodash: 4.17.21 mute-stream: 1.0.0 ora: 5.4.1 run-async: 3.0.0 @@ -6621,12 +5698,7 @@ snapshots: string-width: 4.2.3 strip-ansi: 6.0.1 wrap-ansi: 6.2.0 - - internal-slot@1.0.7: - dependencies: - es-errors: 1.3.0 - hasown: 2.0.2 - side-channel: 1.0.6 + yoctocolors-cjs: 2.1.2 ip-address@9.0.5: dependencies: @@ -6637,46 +5709,20 @@ snapshots: ipaddr.js@2.2.0: {} - is-array-buffer@3.0.4: - dependencies: - call-bind: 1.0.7 - get-intrinsic: 1.2.4 - - is-arrayish@0.2.1: {} - is-arrayish@0.3.2: {} - is-bigint@1.0.4: - dependencies: - has-bigints: 1.0.2 - is-binary-path@2.1.0: dependencies: binary-extensions: 2.3.0 - is-boolean-object@1.1.2: - dependencies: - call-bind: 1.0.7 - has-tostringtag: 1.0.2 - - is-callable@1.2.7: {} - is-ci@3.0.1: dependencies: ci-info: 3.9.0 - is-core-module@2.13.1: + is-core-module@2.14.0: dependencies: hasown: 2.0.2 - is-data-view@1.0.1: - dependencies: - is-typed-array: 1.1.13 - - is-date-object@1.0.5: - dependencies: - has-tostringtag: 1.0.2 - is-extglob@2.1.1: {} is-fullwidth-code-point@3.0.0: {} @@ -6694,59 +5740,26 @@ snapshots: is-lambda@1.0.1: {} - is-negative-zero@2.0.3: {} - is-npm@6.0.0: {} - is-number-object@1.0.7: - dependencies: - has-tostringtag: 1.0.2 - is-number@7.0.0: {} is-obj@2.0.0: {} is-path-inside@3.0.3: {} - is-plain-obj@1.1.0: {} - - is-regex@1.1.4: - dependencies: - call-bind: 1.0.7 - has-tostringtag: 1.0.2 - - is-shared-array-buffer@1.0.3: - dependencies: - call-bind: 1.0.7 + is-property@1.0.2: {} is-stream@2.0.1: {} - is-stream@3.0.0: {} - - is-string@1.0.7: - dependencies: - has-tostringtag: 1.0.2 - is-subdir@1.2.0: dependencies: better-path-resolve: 1.0.0 - is-symbol@1.0.4: - dependencies: - has-symbols: 1.0.3 - - is-typed-array@1.1.13: - dependencies: - which-typed-array: 1.1.15 - is-typedarray@1.0.0: {} is-unicode-supported@0.1.0: {} - is-weakref@1.0.2: - dependencies: - call-bind: 1.0.7 - is-windows@1.0.2: {} is-yarn-global@0.4.1: {} @@ -6755,16 +5768,14 @@ snapshots: isarray@1.0.0: {} - isarray@2.0.5: {} - isexe@2.0.0: {} ix@5.0.0: dependencies: '@types/node': 13.13.52 - tslib: 2.6.2 + tslib: 2.6.3 - jackspeak@3.1.2: + jackspeak@3.4.3: dependencies: '@isaacs/cliui': 8.0.2 optionalDependencies: @@ -6772,7 +5783,7 @@ snapshots: jju@1.4.0: {} - jose@4.15.5: {} + jose@4.15.9: {} js-tokens@4.0.0: {} @@ -6789,8 +5800,6 @@ snapshots: json-buffer@3.0.1: {} - json-parse-even-better-errors@2.3.1: {} - json-parse-even-better-errors@3.0.2: {} json-parse-helpfulerror@1.0.3: @@ -6819,8 +5828,6 @@ snapshots: dependencies: json-buffer: 3.0.1 - kind-of@6.0.3: {} - kleur@4.1.5: {} kuler@2.0.0: {} @@ -6837,17 +5844,6 @@ snapshots: process-warning: 3.0.0 set-cookie-parser: 2.6.0 - lines-and-columns@1.2.4: {} - - load-yaml-file@0.2.0: - dependencies: - graceful-fs: 4.2.11 - js-yaml: 3.14.1 - pify: 4.0.1 - strip-bom: 3.0.0 - - local-pkg@0.4.3: {} - locate-path@5.0.0: dependencies: p-locate: 4.1.0 @@ -6867,7 +5863,7 @@ snapshots: chalk: 4.1.2 is-unicode-supported: 0.1.0 - logform@2.6.0: + logform@2.6.1: dependencies: '@colors/colors': 1.6.0 '@types/triple-beam': 1.3.5 @@ -6876,11 +5872,9 @@ snapshots: safe-stable-stringify: 2.4.3 triple-beam: 1.4.1 - lossless-json@2.0.11: {} + long@5.2.3: {} - loupe@2.3.7: - dependencies: - get-func-name: 2.0.2 + lossless-json@2.0.11: {} loupe@3.1.1: dependencies: @@ -6888,7 +5882,7 @@ snapshots: lowercase-keys@3.0.0: {} - lru-cache@10.2.2: {} + lru-cache@10.4.3: {} lru-cache@4.1.5: dependencies: @@ -6897,9 +5891,11 @@ snapshots: lru-cache@7.18.3: {} - magic-string@0.30.10: + lru.min@1.1.1: {} + + magic-string@0.30.11: dependencies: - '@jridgewell/sourcemap-codec': 1.4.15 + '@jridgewell/sourcemap-codec': 1.5.0 make-error@1.3.6: {} @@ -6945,30 +5941,10 @@ snapshots: transitivePeerDependencies: - supports-color - map-obj@1.0.1: {} - - map-obj@4.3.0: {} - map-stream@0.1.0: {} memory-pager@1.5.0: {} - meow@6.1.1: - dependencies: - '@types/minimist': 1.2.5 - camelcase-keys: 6.2.2 - decamelize-keys: 1.1.1 - hard-rejection: 2.1.0 - minimist-options: 4.1.0 - normalize-package-data: 2.5.0 - read-pkg-up: 7.0.1 - redent: 3.0.0 - trim-newlines: 3.0.1 - type-fest: 0.13.1 - yargs-parser: 18.1.3 - - merge-stream@2.0.0: {} - merge2@1.4.1: {} micromatch@4.0.7: @@ -6978,14 +5954,10 @@ snapshots: mimic-fn@2.1.0: {} - mimic-fn@4.0.0: {} - mimic-response@3.1.0: {} mimic-response@4.0.0: {} - min-indent@1.0.1: {} - minimatch@3.1.2: dependencies: brace-expansion: 1.1.11 @@ -6994,16 +5966,10 @@ snapshots: dependencies: brace-expansion: 2.0.1 - minimatch@9.0.4: + minimatch@9.0.5: dependencies: brace-expansion: 2.0.1 - minimist-options@4.1.0: - dependencies: - arrify: 1.0.1 - is-plain-obj: 1.1.0 - kind-of: 6.0.3 - minimist@1.2.8: {} minipass-collect@1.0.2: @@ -7056,17 +6022,8 @@ snapshots: minipass: 3.3.6 yallist: 4.0.0 - mixme@0.5.10: {} - mkdirp@1.0.4: {} - mlly@1.7.0: - dependencies: - acorn: 8.11.3 - pathe: 1.1.2 - pkg-types: 1.1.1 - ufo: 1.5.3 - mnemonist@0.39.5: dependencies: obliterator: 2.0.4 @@ -7078,22 +6035,40 @@ snapshots: '@types/whatwg-url': 11.0.5 whatwg-url: 13.0.0 - mongodb@6.7.0(socks@2.8.3): + mongodb@6.8.0(socks@2.8.3): dependencies: '@mongodb-js/saslprep': 1.1.7 - bson: 6.7.0 + bson: 6.8.0 mongodb-connection-string-url: 3.0.1 optionalDependencies: socks: 2.8.3 moo@0.5.2: {} + mri@1.2.0: {} + ms@2.1.2: {} ms@2.1.3: {} mute-stream@1.0.0: {} + mysql2@3.11.3: + dependencies: + aws-ssl-profiles: 1.1.2 + denque: 2.1.0 + generate-function: 2.3.1 + iconv-lite: 0.6.3 + long: 5.2.3 + lru.min: 1.1.1 + named-placeholders: 1.1.3 + seq-queue: 0.0.5 + sqlstring: 2.3.3 + + named-placeholders@1.1.3: + dependencies: + lru-cache: 7.18.3 + nanoid@3.3.7: {} nearley@2.20.1: @@ -7132,10 +6107,10 @@ snapshots: - bluebird - supports-color - nodemon@3.1.1: + nodemon@3.1.4: dependencies: chokidar: 3.6.0 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.5(supports-color@5.5.0) ignore-by-default: 1.0.1 minimatch: 3.1.2 pstree.remy: 1.1.8 @@ -7154,17 +6129,10 @@ snapshots: dependencies: abbrev: 1.1.1 - normalize-package-data@2.5.0: - dependencies: - hosted-git-info: 2.8.9 - resolve: 1.22.8 - semver: 5.7.2 - validate-npm-package-license: 3.0.4 - normalize-package-data@5.0.0: dependencies: hosted-git-info: 6.1.1 - is-core-module: 2.13.1 + is-core-module: 2.14.0 semver: 7.6.2 validate-npm-package-license: 3.0.4 @@ -7194,15 +6162,15 @@ snapshots: jsonlines: 0.1.1 lodash: 4.17.21 make-fetch-happen: 11.1.1 - minimatch: 9.0.4 + minimatch: 9.0.5 p-map: 4.0.0 pacote: 15.2.0 - parse-github-url: 1.0.2 + parse-github-url: 1.0.3 progress: 2.0.3 prompts-ncu: 3.0.0 rc-config-loader: 4.1.3 remote-git-tags: 3.0.0 - rimraf: 5.0.7 + rimraf: 5.0.9 semver: 7.6.2 semver-utils: 1.1.4 source-map-support: 0.5.21 @@ -7215,6 +6183,8 @@ snapshots: - bluebird - supports-color + npm-check-updates@17.1.3: {} + npm-install-checks@6.3.0: dependencies: semver: 7.6.2 @@ -7251,10 +6221,6 @@ snapshots: transitivePeerDependencies: - supports-color - npm-run-path@5.3.0: - dependencies: - path-key: 4.0.0 - npmlog@6.0.2: dependencies: are-we-there-yet: 3.0.1 @@ -7264,17 +6230,6 @@ snapshots: object-assign@4.1.1: {} - object-inspect@1.13.1: {} - - object-keys@1.1.1: {} - - object.assign@4.1.5: - dependencies: - call-bind: 1.0.7 - define-properties: 1.2.1 - has-symbols: 1.0.3 - object-keys: 1.1.1 - obliterator@2.0.4: {} on-exit-leak-free@2.1.2: {} @@ -7291,15 +6246,11 @@ snapshots: dependencies: mimic-fn: 2.1.0 - onetime@6.0.0: - dependencies: - mimic-fn: 4.0.0 - - opentelemetry-instrumentation-fetch-node@1.2.0: + opentelemetry-instrumentation-fetch-node@1.2.3(@opentelemetry/api@1.9.0): dependencies: - '@opentelemetry/api': 1.8.0 - '@opentelemetry/instrumentation': 0.43.0(@opentelemetry/api@1.8.0) - '@opentelemetry/semantic-conventions': 1.25.0 + '@opentelemetry/api': 1.9.0 + '@opentelemetry/instrumentation': 0.46.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.25.1 transitivePeerDependencies: - supports-color optional: true @@ -7334,10 +6285,6 @@ snapshots: dependencies: yocto-queue: 0.1.0 - p-limit@4.0.0: - dependencies: - yocto-queue: 1.0.0 - p-locate@4.1.0: dependencies: p-limit: 2.3.0 @@ -7354,6 +6301,8 @@ snapshots: p-try@2.2.0: {} + package-json-from-dist@1.0.0: {} + package-json@8.1.1: dependencies: got: 12.6.1 @@ -7361,6 +6310,8 @@ snapshots: registry-url: 6.0.1 semver: 7.6.2 + package-manager-detector@0.2.0: {} + pacote@15.2.0: dependencies: '@npmcli/git': 4.1.0 @@ -7385,14 +6336,7 @@ snapshots: - bluebird - supports-color - parse-github-url@1.0.2: {} - - parse-json@5.2.0: - dependencies: - '@babel/code-frame': 7.24.6 - error-ex: 1.3.2 - json-parse-even-better-errors: 2.3.1 - lines-and-columns: 1.2.4 + parse-github-url@1.0.3: {} path-exists@4.0.0: {} @@ -7400,21 +6344,17 @@ snapshots: path-key@3.1.1: {} - path-key@4.0.0: {} - path-parse@1.0.7: {} path-scurry@1.11.1: dependencies: - lru-cache: 10.2.2 + lru-cache: 10.4.3 minipass: 7.1.2 path-type@4.0.0: {} pathe@1.1.2: {} - pathval@1.1.1: {} - pathval@2.0.0: {} pause-stream@0.0.11: @@ -7442,6 +6382,8 @@ snapshots: picocolors@1.0.1: {} + picocolors@1.1.0: {} + picomatch@2.3.1: {} pify@4.0.1: {} @@ -7467,19 +6409,7 @@ snapshots: sonic-boom: 3.8.1 thread-stream: 2.7.0 - pkg-dir@4.2.0: - dependencies: - find-up: 4.1.0 - - pkg-types@1.1.1: - dependencies: - confbox: 0.1.7 - mlly: 1.7.0 - pathe: 1.1.2 - - possible-typed-array-names@1.0.0: {} - - postcss@8.4.38: + postcss@8.4.39: dependencies: nanoid: 3.3.7 picocolors: 1.0.1 @@ -7495,20 +6425,9 @@ snapshots: dependencies: xtend: 4.0.2 - preferred-pm@3.1.3: - dependencies: - find-up: 5.0.0 - find-yarn-workspace-root2: 1.2.16 - path-exists: 4.0.0 - which-pm: 2.0.0 - prettier@2.8.8: {} - pretty-format@29.7.0: - dependencies: - '@jest/schemas': 29.6.3 - ansi-styles: 5.2.0 - react-is: 18.3.1 + prettier@3.3.3: {} proc-log@3.0.0: {} @@ -7559,8 +6478,6 @@ snapshots: quick-format-unescaped@4.0.4: {} - quick-lru@4.0.1: {} - quick-lru@5.1.1: {} railroad-diagrams@1.0.0: {} @@ -7572,7 +6489,7 @@ snapshots: rc-config-loader@4.1.3: dependencies: - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.5(supports-color@5.5.0) js-yaml: 4.1.0 json5: 2.2.3 require-from-string: 2.0.2 @@ -7586,8 +6503,6 @@ snapshots: minimist: 1.2.8 strip-json-comments: 2.0.1 - react-is@18.3.1: {} - read-package-json-fast@3.0.2: dependencies: json-parse-even-better-errors: 3.0.2 @@ -7595,24 +6510,11 @@ snapshots: read-package-json@6.0.4: dependencies: - glob: 10.4.1 + glob: 10.4.5 json-parse-even-better-errors: 3.0.2 normalize-package-data: 5.0.0 npm-normalize-package-bin: 3.0.1 - read-pkg-up@7.0.1: - dependencies: - find-up: 4.1.0 - read-pkg: 5.2.0 - type-fest: 0.8.1 - - read-pkg@5.2.0: - dependencies: - '@types/normalize-package-data': 2.4.4 - normalize-package-data: 2.5.0 - parse-json: 5.2.0 - type-fest: 0.6.0 - read-yaml-file@1.1.0: dependencies: graceful-fs: 4.2.11 @@ -7627,6 +6529,16 @@ snapshots: isarray: 0.0.1 string_decoder: 0.10.31 + readable-stream@2.3.7: + dependencies: + core-util-is: 1.0.3 + inherits: 2.0.4 + isarray: 1.0.0 + process-nextick-args: 2.0.1 + safe-buffer: 5.1.2 + string_decoder: 1.1.1 + util-deprecate: 1.0.2 + readable-stream@2.3.8: dependencies: core-util-is: 1.0.3 @@ -7657,20 +6569,8 @@ snapshots: real-require@0.2.0: {} - redent@3.0.0: - dependencies: - indent-string: 4.0.0 - strip-indent: 3.0.0 - regenerator-runtime@0.14.1: {} - regexp.prototype.flags@1.5.2: - dependencies: - call-bind: 1.0.7 - define-properties: 1.2.1 - es-errors: 1.3.0 - set-function-name: 2.0.2 - registry-auth-token@5.0.2: dependencies: '@pnpm/npm-conf': 2.2.2 @@ -7687,21 +6587,19 @@ snapshots: require-in-the-middle@7.3.0: dependencies: - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.5(supports-color@5.5.0) module-details-from-path: 1.0.3 resolve: 1.22.8 transitivePeerDependencies: - supports-color - require-main-filename@2.0.0: {} - resolve-alpn@1.2.1: {} resolve-from@5.0.0: {} resolve@1.22.8: dependencies: - is-core-module: 2.13.1 + is-core-module: 2.14.0 path-parse: 1.0.7 supports-preserve-symlinks-flag: 1.0.0 @@ -7722,7 +6620,7 @@ snapshots: reusify@1.0.4: {} - rfdc@1.3.1: {} + rfdc@1.4.1: {} rimraf@2.7.1: dependencies: @@ -7732,30 +6630,30 @@ snapshots: dependencies: glob: 7.2.3 - rimraf@5.0.7: + rimraf@5.0.9: dependencies: - glob: 10.4.1 + glob: 10.4.5 - rollup@4.18.0: + rollup@4.18.1: dependencies: '@types/estree': 1.0.5 optionalDependencies: - '@rollup/rollup-android-arm-eabi': 4.18.0 - '@rollup/rollup-android-arm64': 4.18.0 - '@rollup/rollup-darwin-arm64': 4.18.0 - '@rollup/rollup-darwin-x64': 4.18.0 - '@rollup/rollup-linux-arm-gnueabihf': 4.18.0 - '@rollup/rollup-linux-arm-musleabihf': 4.18.0 - '@rollup/rollup-linux-arm64-gnu': 4.18.0 - '@rollup/rollup-linux-arm64-musl': 4.18.0 - '@rollup/rollup-linux-powerpc64le-gnu': 4.18.0 - '@rollup/rollup-linux-riscv64-gnu': 4.18.0 - '@rollup/rollup-linux-s390x-gnu': 4.18.0 - '@rollup/rollup-linux-x64-gnu': 4.18.0 - '@rollup/rollup-linux-x64-musl': 4.18.0 - '@rollup/rollup-win32-arm64-msvc': 4.18.0 - '@rollup/rollup-win32-ia32-msvc': 4.18.0 - '@rollup/rollup-win32-x64-msvc': 4.18.0 + '@rollup/rollup-android-arm-eabi': 4.18.1 + '@rollup/rollup-android-arm64': 4.18.1 + '@rollup/rollup-darwin-arm64': 4.18.1 + '@rollup/rollup-darwin-x64': 4.18.1 + '@rollup/rollup-linux-arm-gnueabihf': 4.18.1 + '@rollup/rollup-linux-arm-musleabihf': 4.18.1 + '@rollup/rollup-linux-arm64-gnu': 4.18.1 + '@rollup/rollup-linux-arm64-musl': 4.18.1 + '@rollup/rollup-linux-powerpc64le-gnu': 4.18.1 + '@rollup/rollup-linux-riscv64-gnu': 4.18.1 + '@rollup/rollup-linux-s390x-gnu': 4.18.1 + '@rollup/rollup-linux-x64-gnu': 4.18.1 + '@rollup/rollup-linux-x64-musl': 4.18.1 + '@rollup/rollup-win32-arm64-msvc': 4.18.1 + '@rollup/rollup-win32-ia32-msvc': 4.18.1 + '@rollup/rollup-win32-x64-msvc': 4.18.1 fsevents: 2.3.3 rsocket-core@1.0.0-alpha.3: {} @@ -7772,25 +6670,12 @@ snapshots: rxjs@7.8.1: dependencies: - tslib: 2.6.2 - - safe-array-concat@1.1.2: - dependencies: - call-bind: 1.0.7 - get-intrinsic: 1.2.4 - has-symbols: 1.0.3 - isarray: 2.0.5 + tslib: 2.6.3 safe-buffer@5.1.2: {} safe-buffer@5.2.1: {} - safe-regex-test@1.0.3: - dependencies: - call-bind: 1.0.7 - es-errors: 1.3.0 - is-regex: 1.1.4 - safe-regex2@2.0.0: dependencies: ret: 0.2.2 @@ -7807,30 +6692,14 @@ snapshots: semver-utils@1.1.4: {} - semver@5.7.2: {} - semver@7.6.2: {} + seq-queue@0.0.5: {} + set-blocking@2.0.0: {} set-cookie-parser@2.6.0: {} - set-function-length@1.2.2: - dependencies: - define-data-property: 1.1.4 - es-errors: 1.3.0 - function-bind: 1.1.2 - get-intrinsic: 1.2.4 - gopd: 1.0.1 - has-property-descriptors: 1.0.2 - - set-function-name@2.0.2: - dependencies: - define-data-property: 1.1.4 - es-errors: 1.3.0 - functions-have-names: 1.2.3 - has-property-descriptors: 1.0.2 - shebang-command@1.2.0: dependencies: shebang-regex: 1.0.0 @@ -7847,13 +6716,6 @@ snapshots: shimmer@1.2.1: {} - side-channel@1.0.6: - dependencies: - call-bind: 1.0.7 - es-errors: 1.3.0 - get-intrinsic: 1.2.4 - object-inspect: 1.13.1 - siginfo@2.0.0: {} signal-exit@3.0.7: {} @@ -7884,19 +6746,10 @@ snapshots: smart-buffer@4.2.0: {} - smartwrap@2.0.2: - dependencies: - array.prototype.flat: 1.3.2 - breakword: 1.0.6 - grapheme-splitter: 1.0.4 - strip-ansi: 6.0.1 - wcwidth: 1.0.1 - yargs: 15.4.1 - socks-proxy-agent@7.0.0: dependencies: agent-base: 6.0.2 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.5(supports-color@5.5.0) socks: 2.8.3 transitivePeerDependencies: - supports-color @@ -7958,6 +6811,8 @@ snapshots: sprintf-js@1.1.3: {} + sqlstring@2.3.3: {} + ssri@10.0.6: dependencies: minipass: 7.1.2 @@ -7976,10 +6831,6 @@ snapshots: dependencies: duplexer: 0.1.2 - stream-transform@2.1.3: - dependencies: - mixme: 0.5.10 - string-argv@0.3.2: {} string-width@4.2.3: @@ -7994,25 +6845,6 @@ snapshots: emoji-regex: 9.2.2 strip-ansi: 7.1.0 - string.prototype.trim@1.2.9: - dependencies: - call-bind: 1.0.7 - define-properties: 1.2.1 - es-abstract: 1.23.3 - es-object-atoms: 1.0.0 - - string.prototype.trimend@1.0.8: - dependencies: - call-bind: 1.0.7 - define-properties: 1.2.1 - es-object-atoms: 1.0.0 - - string.prototype.trimstart@1.0.8: - dependencies: - call-bind: 1.0.7 - define-properties: 1.2.1 - es-object-atoms: 1.0.0 - string_decoder@0.10.31: {} string_decoder@1.1.1: @@ -8033,20 +6865,10 @@ snapshots: strip-bom@3.0.0: {} - strip-final-newline@3.0.0: {} - - strip-indent@3.0.0: - dependencies: - min-indent: 1.0.1 - strip-json-comments@2.0.1: {} strip-json-comments@5.0.1: {} - strip-literal@1.3.0: - dependencies: - acorn: 8.11.3 - supports-color@5.5.0: dependencies: has-flag: 3.0.0 @@ -8085,17 +6907,15 @@ snapshots: through@2.3.8: {} - tinybench@2.8.0: {} + tinybench@2.9.0: {} - tinypool@0.7.0: {} + tinyexec@0.3.0: {} tinypool@1.0.1: {} tinyrainbow@1.2.0: {} - tinyspy@2.2.1: {} - - tinyspy@3.0.0: {} + tinyspy@3.0.2: {} tmp@0.0.33: dependencies: @@ -8115,13 +6935,11 @@ snapshots: tree-kill@1.2.2: {} - trim-newlines@3.0.1: {} - triple-beam@1.4.1: {} ts-codec@1.2.2: {} - ts-node-dev@2.0.0(@types/node@18.11.11)(typescript@5.2.2): + ts-node-dev@2.0.0(@types/node@22.5.5)(typescript@5.6.2): dependencies: chokidar: 3.6.0 dynamic-dedupe: 0.3.0 @@ -8131,61 +6949,43 @@ snapshots: rimraf: 2.7.1 source-map-support: 0.5.21 tree-kill: 1.2.2 - ts-node: 10.9.2(@types/node@18.11.11)(typescript@5.2.2) + ts-node: 10.9.2(@types/node@22.5.5)(typescript@5.6.2) tsconfig: 7.0.0 - typescript: 5.2.2 + typescript: 5.6.2 transitivePeerDependencies: - '@swc/core' - '@swc/wasm' - '@types/node' - ts-node@10.9.2(@types/node@18.11.11)(typescript@5.2.2): - dependencies: - '@cspotcode/source-map-support': 0.8.1 - '@tsconfig/node10': 1.0.11 - '@tsconfig/node12': 1.0.11 - '@tsconfig/node14': 1.0.3 - '@tsconfig/node16': 1.0.4 - '@types/node': 18.11.11 - acorn: 8.11.3 - acorn-walk: 8.3.2 - arg: 4.1.3 - create-require: 1.1.1 - diff: 4.0.2 - make-error: 1.3.6 - typescript: 5.2.2 - v8-compile-cache-lib: 3.0.1 - yn: 3.1.1 - - ts-node@10.9.2(@types/node@18.19.50)(typescript@5.2.2): + ts-node@10.9.2(@types/node@22.5.5)(typescript@5.6.2): dependencies: '@cspotcode/source-map-support': 0.8.1 '@tsconfig/node10': 1.0.11 '@tsconfig/node12': 1.0.11 '@tsconfig/node14': 1.0.3 '@tsconfig/node16': 1.0.4 - '@types/node': 18.19.50 - acorn: 8.11.3 - acorn-walk: 8.3.2 + '@types/node': 22.5.5 + acorn: 8.12.1 + acorn-walk: 8.3.3 arg: 4.1.3 create-require: 1.1.1 diff: 4.0.2 make-error: 1.3.6 - typescript: 5.2.2 + typescript: 5.6.2 v8-compile-cache-lib: 3.0.1 yn: 3.1.1 - tsc-watch@6.2.0(typescript@5.2.2): + tsc-watch@6.2.0(typescript@5.6.2): dependencies: cross-spawn: 7.0.3 node-cleanup: 2.1.2 ps-tree: 1.2.0 string-argv: 0.3.2 - typescript: 5.2.2 + typescript: 5.6.2 - tsconfck@3.0.3(typescript@5.4.5): + tsconfck@3.1.1(typescript@5.6.2): optionalDependencies: - typescript: 5.4.5 + typescript: 5.6.2 tsconfig@7.0.0: dependencies: @@ -8194,92 +6994,31 @@ snapshots: strip-bom: 3.0.0 strip-json-comments: 2.0.1 - tslib@2.6.2: {} - - tty-table@4.2.3: - dependencies: - chalk: 4.1.2 - csv: 5.5.3 - kleur: 4.1.5 - smartwrap: 2.0.2 - strip-ansi: 6.0.1 - wcwidth: 1.0.1 - yargs: 17.7.2 + tslib@2.6.3: {} tuf-js@1.1.7: dependencies: '@tufjs/models': 1.0.4 - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.7 make-fetch-happen: 11.1.1 transitivePeerDependencies: - supports-color - type-detect@4.0.8: {} - - type-fest@0.13.1: {} - type-fest@0.21.3: {} - type-fest@0.6.0: {} - - type-fest@0.8.1: {} - type-fest@1.4.0: {} type-fest@2.19.0: {} - typed-array-buffer@1.0.2: - dependencies: - call-bind: 1.0.7 - es-errors: 1.3.0 - is-typed-array: 1.1.13 - - typed-array-byte-length@1.0.1: - dependencies: - call-bind: 1.0.7 - for-each: 0.3.3 - gopd: 1.0.1 - has-proto: 1.0.3 - is-typed-array: 1.1.13 - - typed-array-byte-offset@1.0.2: - dependencies: - available-typed-arrays: 1.0.7 - call-bind: 1.0.7 - for-each: 0.3.3 - gopd: 1.0.1 - has-proto: 1.0.3 - is-typed-array: 1.1.13 - - typed-array-length@1.0.6: - dependencies: - call-bind: 1.0.7 - for-each: 0.3.3 - gopd: 1.0.1 - has-proto: 1.0.3 - is-typed-array: 1.1.13 - possible-typed-array-names: 1.0.0 - typedarray-to-buffer@3.1.5: dependencies: is-typedarray: 1.0.0 - typescript@5.2.2: {} - - typescript@5.4.5: {} - - ufo@1.5.3: {} - - unbox-primitive@1.0.2: - dependencies: - call-bind: 1.0.7 - has-bigints: 1.0.2 - has-symbols: 1.0.3 - which-boxed-primitive: 1.0.2 + typescript@5.6.2: {} undefsafe@2.0.5: {} - undici-types@5.26.5: {} + undici-types@6.19.8: {} unique-filename@2.0.1: dependencies: @@ -8341,31 +7080,12 @@ snapshots: vary@1.1.2: {} - vite-node@0.34.6(@types/node@18.11.11): - dependencies: - cac: 6.7.14 - debug: 4.3.4(supports-color@5.5.0) - mlly: 1.7.0 - pathe: 1.1.2 - picocolors: 1.0.1 - vite: 5.2.11(@types/node@18.11.11) - transitivePeerDependencies: - - '@types/node' - - less - - lightningcss - - sass - - stylus - - sugarss - - supports-color - - terser - - vite-node@2.0.5(@types/node@18.19.50): + vite-node@2.1.1(@types/node@22.5.5): dependencies: cac: 6.7.14 debug: 4.3.7 pathe: 1.1.2 - tinyrainbow: 1.2.0 - vite: 5.2.11(@types/node@18.19.50) + vite: 5.3.3(@types/node@22.5.5) transitivePeerDependencies: - '@types/node' - less @@ -8376,96 +7096,53 @@ snapshots: - supports-color - terser - vite-tsconfig-paths@4.3.2(typescript@5.4.5)(vite@5.2.11(@types/node@18.11.11)): + vite-tsconfig-paths@4.3.2(typescript@5.6.2)(vite@5.3.3(@types/node@22.5.5)): dependencies: - debug: 4.3.4(supports-color@5.5.0) + debug: 4.3.7 globrex: 0.1.2 - tsconfck: 3.0.3(typescript@5.4.5) + tsconfck: 3.1.1(typescript@5.6.2) optionalDependencies: - vite: 5.2.11(@types/node@18.11.11) + vite: 5.3.3(@types/node@22.5.5) transitivePeerDependencies: - supports-color - typescript - vite@5.2.11(@types/node@18.11.11): + vite@5.3.3(@types/node@22.5.5): dependencies: - esbuild: 0.20.2 - postcss: 8.4.38 - rollup: 4.18.0 + esbuild: 0.21.5 + postcss: 8.4.39 + rollup: 4.18.1 optionalDependencies: - '@types/node': 18.11.11 + '@types/node': 22.5.5 fsevents: 2.3.3 - vite@5.2.11(@types/node@18.19.50): - dependencies: - esbuild: 0.20.2 - postcss: 8.4.38 - rollup: 4.18.0 - optionalDependencies: - '@types/node': 18.19.50 - fsevents: 2.3.3 - - vitest@0.34.6: - dependencies: - '@types/chai': 4.3.16 - '@types/chai-subset': 1.3.5 - '@types/node': 18.11.11 - '@vitest/expect': 0.34.6 - '@vitest/runner': 0.34.6 - '@vitest/snapshot': 0.34.6 - '@vitest/spy': 0.34.6 - '@vitest/utils': 0.34.6 - acorn: 8.11.3 - acorn-walk: 8.3.2 - cac: 6.7.14 - chai: 4.4.1 - debug: 4.3.4(supports-color@5.5.0) - local-pkg: 0.4.3 - magic-string: 0.30.10 - pathe: 1.1.2 - picocolors: 1.0.1 - std-env: 3.7.0 - strip-literal: 1.3.0 - tinybench: 2.8.0 - tinypool: 0.7.0 - vite: 5.2.11(@types/node@18.11.11) - vite-node: 0.34.6(@types/node@18.11.11) - why-is-node-running: 2.2.2 - transitivePeerDependencies: - - less - - lightningcss - - sass - - stylus - - sugarss - - supports-color - - terser - - vitest@2.0.5(@types/node@18.19.50): + vitest@2.1.1(@types/node@22.5.5): dependencies: - '@ampproject/remapping': 2.3.0 - '@vitest/expect': 2.0.5 - '@vitest/pretty-format': 2.0.5 - '@vitest/runner': 2.0.5 - '@vitest/snapshot': 2.0.5 - '@vitest/spy': 2.0.5 - '@vitest/utils': 2.0.5 + '@vitest/expect': 2.1.1 + '@vitest/mocker': 2.1.1(@vitest/spy@2.1.1)(vite@5.3.3(@types/node@22.5.5)) + '@vitest/pretty-format': 2.1.1 + '@vitest/runner': 2.1.1 + '@vitest/snapshot': 2.1.1 + '@vitest/spy': 2.1.1 + '@vitest/utils': 2.1.1 chai: 5.1.1 debug: 4.3.7 - execa: 8.0.1 - magic-string: 0.30.10 + magic-string: 0.30.11 pathe: 1.1.2 std-env: 3.7.0 - tinybench: 2.8.0 + tinybench: 2.9.0 + tinyexec: 0.3.0 tinypool: 1.0.1 tinyrainbow: 1.2.0 - vite: 5.2.11(@types/node@18.19.50) - vite-node: 2.0.5(@types/node@18.19.50) + vite: 5.3.3(@types/node@22.5.5) + vite-node: 2.1.1(@types/node@22.5.5) why-is-node-running: 2.3.0 optionalDependencies: - '@types/node': 18.19.50 + '@types/node': 22.5.5 transitivePeerDependencies: - less - lightningcss + - msw - sass - stylus - sugarss @@ -8485,29 +7162,6 @@ snapshots: tr46: 4.1.1 webidl-conversions: 7.0.0 - which-boxed-primitive@1.0.2: - dependencies: - is-bigint: 1.0.4 - is-boolean-object: 1.1.2 - is-number-object: 1.0.7 - is-string: 1.0.7 - is-symbol: 1.0.4 - - which-module@2.0.1: {} - - which-pm@2.0.0: - dependencies: - load-yaml-file: 0.2.0 - path-exists: 4.0.0 - - which-typed-array@1.1.15: - dependencies: - available-typed-arrays: 1.0.7 - call-bind: 1.0.7 - for-each: 0.3.3 - gopd: 1.0.1 - has-tostringtag: 1.0.2 - which@1.3.1: dependencies: isexe: 2.0.0 @@ -8520,11 +7174,6 @@ snapshots: dependencies: isexe: 2.0.0 - why-is-node-running@2.2.2: - dependencies: - siginfo: 2.0.0 - stackback: 0.0.2 - why-is-node-running@2.3.0: dependencies: siginfo: 2.0.0 @@ -8538,25 +7187,25 @@ snapshots: dependencies: string-width: 5.1.2 - winston-transport@4.7.0: + winston-transport@4.7.1: dependencies: - logform: 2.6.0 + logform: 2.6.1 readable-stream: 3.6.2 triple-beam: 1.4.1 - winston@3.13.0: + winston@3.13.1: dependencies: '@colors/colors': 1.6.0 '@dabh/diagnostics': 2.0.3 async: 3.2.5 is-stream: 2.0.1 - logform: 2.6.0 + logform: 2.6.1 one-time: 1.0.0 readable-stream: 3.6.2 safe-stable-stringify: 2.4.3 stack-trace: 0.0.10 triple-beam: 1.4.1 - winston-transport: 4.7.0 + winston-transport: 4.7.1 wrap-ansi@6.2.0: dependencies: @@ -8585,49 +7234,26 @@ snapshots: signal-exit: 3.0.7 typedarray-to-buffer: 3.1.5 - ws@8.17.0: {} - - ws@8.2.3: {} + ws@8.18.0: {} xdg-basedir@5.1.0: {} xtend@4.0.2: {} - y18n@4.0.3: {} - y18n@5.0.8: {} yallist@2.1.2: {} yallist@4.0.0: {} - yaml@2.4.2: {} + yaml@2.4.5: {} yaml@2.5.0: {} - yargs-parser@18.1.3: - dependencies: - camelcase: 5.3.1 - decamelize: 1.2.0 - yargs-parser@20.2.9: {} yargs-parser@21.1.1: {} - yargs@15.4.1: - dependencies: - cliui: 6.0.0 - decamelize: 1.2.0 - find-up: 4.1.0 - get-caller-file: 2.0.5 - require-directory: 2.1.1 - require-main-filename: 2.0.0 - set-blocking: 2.0.0 - string-width: 4.2.3 - which-module: 2.0.1 - y18n: 4.0.3 - yargs-parser: 18.1.3 - yargs@16.2.0: dependencies: cliui: 7.0.4 @@ -8652,6 +7278,6 @@ snapshots: yocto-queue@0.1.0: {} - yocto-queue@1.0.0: {} + yoctocolors-cjs@2.1.2: {} zod@3.23.8: {} diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml index 25f0b6fc1..85bd6839d 100644 --- a/pnpm-workspace.yaml +++ b/pnpm-workspace.yaml @@ -1,6 +1,7 @@ packages: - 'packages/*' - 'libs/*' + - 'modules/*' - 'service' - 'test-client' # exclude packages that are inside test directories diff --git a/service/Dockerfile b/service/Dockerfile index 7351662af..98a893320 100644 --- a/service/Dockerfile +++ b/service/Dockerfile @@ -17,6 +17,10 @@ COPY packages/types/package.json packages/types/tsconfig.json packages/types/ COPY libs/lib-services/package.json libs/lib-services/tsconfig.json libs/lib-services/ +COPY modules/module-postgres/package.json modules/module-postgres/tsconfig.json modules/module-postgres/ +COPY modules/module-mongodb/package.json modules/module-mongodb/tsconfig.json modules/module-mongodb/ +COPY modules/module-mysql/package.json modules/module-mysql/tsconfig.json modules/module-mysql/ + RUN pnpm install --frozen-lockfile COPY service/src service/src/ @@ -26,11 +30,16 @@ COPY packages/jpgwire/src packages/jpgwire/src/ COPY packages/jpgwire/ca packages/jpgwire/ca/ COPY packages/jsonbig/src packages/jsonbig/src/ COPY packages/sync-rules/src packages/sync-rules/src/ +COPY packages/sync-rules/scripts packages/sync-rules/scripts/ COPY packages/rsocket-router/src packages/rsocket-router/src/ COPY packages/types/src packages/types/src/ COPY libs/lib-services/src libs/lib-services/src/ +COPY modules/module-postgres/src modules/module-postgres/src/ +COPY modules/module-mongodb/src modules/module-mongodb/src/ +COPY modules/module-mysql/src modules/module-mysql/src/ + RUN pnpm build:production && \ rm -rf node_modules **/node_modules && \ pnpm install --frozen-lockfile --prod --ignore-scripts diff --git a/service/README.md b/service/README.md index f884271c3..668d4575d 100644 --- a/service/README.md +++ b/service/README.md @@ -2,7 +2,7 @@

-*[PowerSync](https://www.powersync.com) is a sync engine for building local-first apps with instantly-responsive UI/UX and simplified state transfer. Syncs between SQLite on the client-side and Postgres, MongoDB or MySQL on the server-side.* +_[PowerSync](https://www.powersync.com) is a sync engine for building local-first apps with instantly-responsive UI/UX and simplified state transfer. Syncs between SQLite on the client-side and Postgres, MongoDB or MySQL on the server-side._ # Quick reference @@ -48,4 +48,4 @@ View [license information](https://github.com/powersync-ja/powersync-service/blo As with all Docker images, these likely also contain other software which may be under other licenses (such as Bash, etc from the base distribution, along with any direct or indirect dependencies of the primary software being contained). -As for any pre-built image usage, it is the image user's responsibility to ensure that any use of this image complies with any relevant licenses for all software contained within. \ No newline at end of file +As for any pre-built image usage, it is the image user's responsibility to ensure that any use of this image complies with any relevant licenses for all software contained within. diff --git a/service/package.json b/service/package.json index c44fcad18..10f636faa 100644 --- a/service/package.json +++ b/service/package.json @@ -16,6 +16,9 @@ "@opentelemetry/sdk-metrics": "^1.17.0", "@powersync/service-core": "workspace:*", "@powersync/lib-services-framework": "workspace:*", + "@powersync/service-module-postgres": "workspace:*", + "@powersync/service-module-mongodb": "workspace:*", + "@powersync/service-module-mysql": "workspace:*", "@powersync/service-jpgwire": "workspace:*", "@powersync/service-jsonbig": "workspace:*", "@powersync/service-rsocket-router": "workspace:*", @@ -45,8 +48,6 @@ "copyfiles": "^2.4.1", "nodemon": "^3.0.1", "npm-check-updates": "^16.14.4", - "ts-node": "^10.9.1", - "typescript": "~5.2.2", - "vitest": "^0.34.6" + "ts-node": "^10.9.1" } } diff --git a/service/src/entry.ts b/service/src/entry.ts index ae57cccd7..b4add3850 100644 --- a/service/src/entry.ts +++ b/service/src/entry.ts @@ -1,20 +1,32 @@ -import { entry, utils } from '@powersync/service-core'; -import { startServer } from './runners/server.js'; -import { startStreamWorker } from './runners/stream-worker.js'; import { container, ContainerImplementation } from '@powersync/lib-services-framework'; +import * as core from '@powersync/service-core'; + +import { startServer } from './runners/server.js'; +import { startStreamRunner } from './runners/stream-worker.js'; +import { startUnifiedRunner } from './runners/unified-runner.js'; import { createSentryReporter } from './util/alerting.js'; +import { PostgresModule } from '@powersync/service-module-postgres'; +import { MySQLModule } from '@powersync/service-module-mysql'; +import { MongoModule } from '@powersync/service-module-mongodb'; +// Initialize framework components container.registerDefaults(); container.register(ContainerImplementation.REPORTER, createSentryReporter()); +const moduleManager = new core.modules.ModuleManager(); +moduleManager.register([new PostgresModule(), new MySQLModule(), new MongoModule()]); +// This is a bit of a hack. Commands such as the teardown command or even migrations might +// want access to the ModuleManager in order to use modules +container.register(core.ModuleManager, moduleManager); + +// This is nice to have to avoid passing it around +container.register(core.utils.CompoundConfigCollector, new core.utils.CompoundConfigCollector()); + // Generate Commander CLI entry point program -const { execute } = entry.generateEntryProgram({ - [utils.ServiceRunner.API]: startServer, - [utils.ServiceRunner.SYNC]: startStreamWorker, - [utils.ServiceRunner.UNIFIED]: async (config: utils.RunnerConfig) => { - await startServer(config); - await startStreamWorker(config); - } +const { execute } = core.entry.generateEntryProgram({ + [core.utils.ServiceRunner.API]: startServer, + [core.utils.ServiceRunner.SYNC]: startStreamRunner, + [core.utils.ServiceRunner.UNIFIED]: startUnifiedRunner }); /** diff --git a/service/src/metrics.ts b/service/src/metrics.ts new file mode 100644 index 000000000..57ef04db0 --- /dev/null +++ b/service/src/metrics.ts @@ -0,0 +1,40 @@ +import * as core from '@powersync/service-core'; + +export enum MetricModes { + API = 'api', + REPLICATION = 'replication' +} + +export type MetricsRegistrationOptions = { + service_context: core.system.ServiceContextContainer; + modes: MetricModes[]; +}; + +export const registerMetrics = async (options: MetricsRegistrationOptions) => { + const { service_context, modes } = options; + + // This requires an instantiated bucket storage, which is only created when the lifecycle starts + service_context.lifeCycleEngine.withLifecycle(null, { + start: async () => { + const instanceId = await service_context.storageEngine.activeBucketStorage.getPowerSyncInstanceId(); + await core.metrics.Metrics.initialise({ + powersync_instance_id: instanceId, + disable_telemetry_sharing: service_context.configuration.telemetry.disable_telemetry_sharing, + internal_metrics_endpoint: service_context.configuration.telemetry.internal_service_endpoint + }); + + // TODO remove singleton + const instance = core.Metrics.getInstance(); + service_context.register(core.metrics.Metrics, instance); + + if (modes.includes(MetricModes.API)) { + instance.configureApiMetrics(); + } + + if (modes.includes(MetricModes.REPLICATION)) { + instance.configureReplicationMetrics(service_context.storageEngine.activeBucketStorage); + } + }, + stop: () => service_context.metrics!.shutdown() + }); +}; diff --git a/service/src/runners/server.ts b/service/src/runners/server.ts index 1c32da9e4..fef5cfea6 100644 --- a/service/src/runners/server.ts +++ b/service/src/runners/server.ts @@ -1,54 +1,87 @@ import cors from '@fastify/cors'; +import fastify from 'fastify'; + import { container, logger } from '@powersync/lib-services-framework'; import * as core from '@powersync/service-core'; -import fastify from 'fastify'; +import { MetricModes, registerMetrics } from '../metrics.js'; import { SocketRouter } from '../routes/router.js'; -import { PowerSyncSystem } from '../system/PowerSyncSystem.js'; + /** - * Starts an API server + * Configures the server portion on a {@link ServiceContext} */ -export async function startServer(runnerConfig: core.utils.RunnerConfig) { - logger.info('Booting'); +export function registerServerServices(serviceContext: core.system.ServiceContextContainer) { + serviceContext.register(core.routes.RouterEngine, new core.routes.RouterEngine()); + serviceContext.lifeCycleEngine.withLifecycle(serviceContext.routerEngine!, { + start: async (routerEngine) => { + await routerEngine!.start(async (routes) => { + const server = fastify.fastify(); - const config = await core.utils.loadConfig(runnerConfig); - const system = new PowerSyncSystem(config); + server.register(cors, { + origin: '*', + allowedHeaders: ['Content-Type', 'Authorization', 'User-Agent', 'X-User-Agent'], + exposedHeaders: ['Content-Type'], + // Cache time for preflight response + maxAge: 3600 + }); - const server = fastify.fastify(); + core.routes.configureFastifyServer(server, { + service_context: serviceContext, + routes: { api: { routes: routes.api_routes }, sync_stream: { routes: routes.stream_routes } } + }); - server.register(cors, { - origin: '*', - allowedHeaders: ['Content-Type', 'Authorization', 'User-Agent', 'X-User-Agent'], - exposedHeaders: ['Content-Type'], - // Cache time for preflight response - maxAge: 3600 - }); + core.routes.configureRSocket(SocketRouter, { + server: server.server, + service_context: serviceContext, + route_generators: routes.socket_routes + }); - core.routes.configureFastifyServer(server, { system }); - core.routes.configureRSocket(SocketRouter, { server: server.server, system }); + const { port } = serviceContext.configuration; - logger.info('Starting system'); - await system.start(); - logger.info('System started'); + await server.listen({ + host: '0.0.0.0', + port + }); - core.Metrics.getInstance().configureApiMetrics(); + logger.info(`Running on port ${port}`); - await server.listen({ - host: '0.0.0.0', - port: system.config.port + return { + onShutdown: async () => { + logger.info('Shutting down HTTP server...'); + await server.close(); + logger.info('HTTP server stopped'); + } + }; + }); + }, + stop: (routerEngine) => routerEngine!.shutDown() }); +} - container.terminationHandler.handleTerminationSignal(async () => { - logger.info('Shutting down HTTP server...'); - await server.close(); - logger.info('HTTP server stopped'); +/** + * Starts an API server + */ +export async function startServer(runnerConfig: core.utils.RunnerConfig) { + logger.info('Booting'); + + const config = await core.utils.loadConfig(runnerConfig); + const serviceContext = new core.system.ServiceContextContainer(config); + + registerServerServices(serviceContext); + + await registerMetrics({ + service_context: serviceContext, + modes: [MetricModes.API] }); - // MUST be after adding the termination handler above. - // This is so that the handler is run before the server's handler, allowing streams to be interrupted on exit - system.addTerminationHandler(); + const moduleManager = container.getImplementation(core.modules.ModuleManager); + await moduleManager.initialize(serviceContext); + + logger.info('Starting service...'); + + await serviceContext.lifeCycleEngine.start(); + logger.info('Service started.'); - logger.info(`Running on port ${system.config.port}`); await container.probes.ready(); // Enable in development to track memory usage: diff --git a/service/src/runners/stream-worker.ts b/service/src/runners/stream-worker.ts index 96efef381..b3c69d2a8 100644 --- a/service/src/runners/stream-worker.ts +++ b/service/src/runners/stream-worker.ts @@ -1,39 +1,47 @@ -import { migrations, replication, utils, Metrics } from '@powersync/service-core'; import { container, logger } from '@powersync/lib-services-framework'; +import * as core from '@powersync/service-core'; +import { MetricModes, registerMetrics } from '../metrics.js'; + +/** + * Configures the replication portion on a {@link serviceContext} + */ +export const registerReplicationServices = (serviceContext: core.system.ServiceContextContainer) => { + // Needs to be executed after shared registrations + const replication = new core.replication.ReplicationEngine(); + + serviceContext.register(core.replication.ReplicationEngine, replication); + serviceContext.lifeCycleEngine.withLifecycle(replication, { + start: (replication) => replication.start(), + stop: (replication) => replication.shutDown() + }); +}; -import { PowerSyncSystem } from '../system/PowerSyncSystem.js'; - -export async function startStreamWorker(runnerConfig: utils.RunnerConfig) { +export const startStreamRunner = async (runnerConfig: core.utils.RunnerConfig) => { logger.info('Booting'); - const config = await utils.loadConfig(runnerConfig); - - // Self hosted version allows for automatic migrations - if (!config.migrations?.disable_auto_migration) { - await migrations.migrate({ - direction: migrations.Direction.Up, - runner_config: runnerConfig - }); - } + const config = await core.utils.loadConfig(runnerConfig); - const system = new PowerSyncSystem(config); + await core.migrations.ensureAutomaticMigrations({ + config, + runner_config: runnerConfig + }); - logger.info('Starting system'); - await system.start(); - logger.info('System started'); + // Self hosted version allows for automatic migrations + const serviceContext = new core.system.ServiceContextContainer(config); - Metrics.getInstance().configureReplicationMetrics(system); + registerReplicationServices(serviceContext); - const mngr = new replication.WalStreamManager(system); - mngr.start(); + await registerMetrics({ + service_context: serviceContext, + modes: [MetricModes.REPLICATION] + }); - // MUST be after startServer. - // This is so that the handler is run before the server's handler, allowing streams to be interrupted on exit - system.addTerminationHandler(); + const moduleManager = container.getImplementation(core.modules.ModuleManager); + await moduleManager.initialize(serviceContext); - container.terminationHandler.handleTerminationSignal(async () => { - await mngr.stop(); - }); + logger.info('Starting system'); + await serviceContext.lifeCycleEngine.start(); + logger.info('System started'); await container.probes.ready(); -} +}; diff --git a/service/src/runners/unified-runner.ts b/service/src/runners/unified-runner.ts new file mode 100644 index 000000000..4997d8933 --- /dev/null +++ b/service/src/runners/unified-runner.ts @@ -0,0 +1,42 @@ +import { container, logger } from '@powersync/lib-services-framework'; +import * as core from '@powersync/service-core'; + +import { MetricModes, registerMetrics } from '../metrics.js'; +import { registerServerServices } from './server.js'; +import { registerReplicationServices } from './stream-worker.js'; + +/** + * Starts an API server + */ +export const startUnifiedRunner = async (runnerConfig: core.utils.RunnerConfig) => { + logger.info('Booting'); + + const config = await core.utils.loadConfig(runnerConfig); + + await core.migrations.ensureAutomaticMigrations({ + config, + runner_config: runnerConfig + }); + + const serviceContext = new core.system.ServiceContextContainer(config); + + registerServerServices(serviceContext); + registerReplicationServices(serviceContext); + + await registerMetrics({ + service_context: serviceContext, + modes: [MetricModes.API, MetricModes.REPLICATION] + }); + + const moduleManager = container.getImplementation(core.modules.ModuleManager); + await moduleManager.initialize(serviceContext); + + logger.info('Starting service...'); + await serviceContext.lifeCycleEngine.start(); + logger.info('Service started'); + + await container.probes.ready(); + + // Enable in development to track memory usage: + // trackMemoryUsage(); +}; diff --git a/service/src/system/PowerSyncSystem.ts b/service/src/system/PowerSyncSystem.ts deleted file mode 100644 index 93024dca7..000000000 --- a/service/src/system/PowerSyncSystem.ts +++ /dev/null @@ -1,55 +0,0 @@ -import { db, system, utils, storage, Metrics } from '@powersync/service-core'; -import * as pgwire from '@powersync/service-jpgwire'; - -export class PowerSyncSystem extends system.CorePowerSyncSystem { - storage: storage.BucketStorageFactory; - pgwire_pool?: pgwire.PgClient; - - constructor(public config: utils.ResolvedPowerSyncConfig) { - super(config); - - utils.setTags(config.metadata); - - const pgOptions = config.connection; - if (pgOptions != null) { - const pool = pgwire.connectPgWirePool(pgOptions, { - idleTimeout: 30_000 - }); - this.pgwire_pool = this.withLifecycle(pool, { - async start(pool) {}, - async stop(pool) { - await pool.end(); - } - }); - } - - if (config.storage.type == 'mongodb') { - const client = this.withLifecycle(db.mongo.createMongoClient(config.storage), { - async start(client) {}, - async stop(client) { - await client.close(); - } - }); - const database = new storage.PowerSyncMongo(client, { database: config.storage.database }); - this.storage = new storage.MongoBucketStorage(database, { - slot_name_prefix: config.slot_name_prefix - }); - } else { - throw new Error('No storage configured'); - } - - this.withLifecycle(this.storage, { - async start(storage) { - const instanceId = await storage.getPowerSyncInstanceId(); - await Metrics.initialise({ - powersync_instance_id: instanceId, - disable_telemetry_sharing: config.telemetry.disable_telemetry_sharing, - internal_metrics_endpoint: config.telemetry.internal_service_endpoint - }); - }, - async stop() { - await Metrics.getInstance().shutdown(); - } - }); - } -} diff --git a/service/tsconfig.json b/service/tsconfig.json index 22d065e42..1576a67cc 100644 --- a/service/tsconfig.json +++ b/service/tsconfig.json @@ -21,14 +21,23 @@ { "path": "../packages/service-core" }, + { + "path": "../packages/sync-rules" + }, + { + "path": "../packages/types" + }, { "path": "../libs/lib-services" }, { - "path": "../packages/sync-rules" + "path": "../modules/module-postgres" }, { - "path": "../packages/types" + "path": "../modules/module-mongodb" + }, + { + "path": "../modules/module-mysql" } ] } diff --git a/test-client/package.json b/test-client/package.json index 9bba6da39..4d5f3d9d4 100644 --- a/test-client/package.json +++ b/test-client/package.json @@ -20,7 +20,6 @@ "yaml": "^2.5.0" }, "devDependencies": { - "@types/node": "18.11.11", - "typescript": "^5.2.2" + "@types/node": "^22.5.5" } } diff --git a/tsconfig.json b/tsconfig.json index bce764b1d..542ecf4b2 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -16,6 +16,15 @@ { "path": "./packages/sync-rules" }, + { + "path": "./modules/module-postgres" + }, + { + "path": "./modules/module-mysql" + }, + { + "path": "./modules/module-mongodb" + }, { "path": "./libs/lib-services" },