From dc02d0bac67c6a86fc68d219f69a43c5b1a04ee6 Mon Sep 17 00:00:00 2001 From: CJ Brewer Date: Wed, 13 May 2026 16:05:13 -0600 Subject: [PATCH 1/4] feat: release prisma next extension --- .changeset/add-prisma-next-integration.md | 24 + examples/prisma/.cipherstash/context.json | 17 + examples/prisma/.env.example | 25 + examples/prisma/README.md | 92 + examples/prisma/docker-compose.yml | 33 + .../20260513T1735_initial/end-contract.d.ts | 486 ++ .../20260513T1735_initial/end-contract.json | 467 ++ .../app/20260513T1735_initial/migration.json | 493 ++ .../app/20260513T1735_initial/migration.ts | 104 + .../app/20260513T1735_initial/ops.json | 221 + .../contract.json | 1 + .../migration.json | 120 + .../20260601T0000_install_eql_bundle/ops.json | 28 + .../migrations/cipherstash/contract.d.ts | 10 + .../migrations/cipherstash/contract.json | 1 + .../migrations/cipherstash/refs/head.json | 1 + examples/prisma/package.json | 36 + examples/prisma/prisma-next.config.ts | 31 + examples/prisma/prisma/schema.prisma | 44 + examples/prisma/src/db.ts | 27 + examples/prisma/src/index.ts | 214 + examples/prisma/src/prisma/contract.d.ts | 486 ++ examples/prisma/src/prisma/contract.json | 467 ++ examples/prisma/tsconfig.json | 18 + package.json | 29 +- packages/cli/src/bin/stash.ts | 2 + packages/cli/src/commands/db/detect.ts | 40 + packages/cli/src/commands/init/index.ts | 2 + .../providers/__tests__/prisma-next.test.ts | 39 + .../commands/init/providers/prisma-next.ts | 29 + .../src/commands/init/steps/build-schema.ts | 41 +- .../src/commands/init/steps/install-deps.ts | 24 +- .../src/commands/init/steps/install-eql.ts | 14 + packages/cli/src/commands/init/types.ts | 4 +- packages/prisma-next/DEVELOPING.md | 341 + packages/prisma-next/README.md | 126 + .../end-contract.d.ts | 149 + .../end-contract.json | 102 + .../migration.json | 120 + .../migration.ts | 71 + .../20260601T0000_install_eql_bundle/ops.json | 28 + .../prisma-next/migrations/refs/head.json | 4 + packages/prisma-next/package.json | 109 + packages/prisma-next/prisma-next.config.ts | 36 + .../prisma-next/src/contract-authoring.ts | 200 + packages/prisma-next/src/contract.d.ts | 149 + packages/prisma-next/src/contract.json | 104 + packages/prisma-next/src/contract.prisma | 46 + packages/prisma-next/src/execution/abort.ts | 143 + .../src/execution/cell-codec-factory.ts | 291 + .../src/execution/codec-runtime.ts | 107 + .../prisma-next/src/execution/decrypt-all.ts | 244 + .../src/execution/envelope-base.ts | 308 + .../src/execution/envelope-bigint.ts | 103 + .../src/execution/envelope-boolean.ts | 45 + .../src/execution/envelope-date.ts | 108 + .../src/execution/envelope-double.ts | 57 + .../src/execution/envelope-json.ts | 53 + .../src/execution/envelope-string.ts | 78 + packages/prisma-next/src/execution/helpers.ts | 223 + .../src/execution/middleware-registry.ts | 25 + .../prisma-next/src/execution/operators.ts | 595 ++ .../src/execution/parameterized.ts | 239 + packages/prisma-next/src/execution/routing.ts | 101 + packages/prisma-next/src/execution/sdk.ts | 74 + .../prisma-next/src/exports/codec-types.ts | 10 + .../prisma-next/src/exports/column-types.ts | 240 + .../src/exports/contract-space-typing.ts | 86 + packages/prisma-next/src/exports/control.ts | 111 + .../prisma-next/src/exports/middleware.ts | 24 + packages/prisma-next/src/exports/migration.ts | 43 + .../src/exports/operation-types.ts | 16 + packages/prisma-next/src/exports/pack.ts | 13 + packages/prisma-next/src/exports/runtime.ts | 171 + packages/prisma-next/src/exports/stack.ts | 36 + .../src/extension-metadata/codec-metadata.ts | 121 + .../src/extension-metadata/constants.ts | 235 + .../src/extension-metadata/descriptor-meta.ts | 164 + .../src/middleware/bulk-encrypt.ts | 233 + .../prisma-next/src/migration/call-classes.ts | 359 + .../src/migration/cipherstash-codec.ts | 125 + .../src/migration/codec-hooks-factory.ts | 164 + .../prisma-next/src/migration/eql-bundle.ts | 29 + .../src/migration/eql-install.generated.ts | 5751 +++++++++++++++++ .../prisma-next/src/stack/derive-schemas.ts | 142 + packages/prisma-next/src/stack/from-stack.ts | 189 + packages/prisma-next/src/stack/sdk-adapter.ts | 214 + packages/prisma-next/src/types/codec-types.ts | 94 + .../prisma-next/src/types/operation-types.ts | 175 + packages/prisma-next/test/abort.test.ts | 397 ++ packages/prisma-next/test/authoring.test.ts | 234 + .../test/bulk-encrypt-middleware.test.ts | 533 ++ .../test/bundling-isolation.test.ts | 236 + .../prisma-next/test/call-classes.test.ts | 188 + .../test/call-classes.types.test-d.ts | 50 + .../test/cipherstash-codec-numeric.test.ts | 133 + .../cipherstash-codec-other-codecs.test.ts | 132 + .../test/cipherstash-codec-string.test.ts | 318 + .../test/cipherstash-codec.test.ts | 153 + .../prisma-next/test/codec-runtime.test.ts | 496 ++ .../prisma-next/test/column-types.test.ts | 228 + packages/prisma-next/test/decrypt-all.test.ts | 564 ++ .../prisma-next/test/derive-schemas.test.ts | 178 + packages/prisma-next/test/descriptor.test.ts | 106 + .../prisma-next/test/envelope-bigint.test.ts | 182 + .../prisma-next/test/envelope-boolean.test.ts | 105 + .../prisma-next/test/envelope-date.test.ts | 185 + .../prisma-next/test/envelope-double.test.ts | 123 + .../prisma-next/test/envelope-json.test.ts | 116 + .../prisma-next/test/envelope-string.test.ts | 223 + .../prisma-next/test/envelope.types.test-d.ts | 42 + .../test/equality-trait-removal.test.ts | 121 + .../test/from-stack-divergence.test.ts | 112 + packages/prisma-next/test/helpers.test.ts | 317 + .../prisma-next/test/helpers.types.test-d.ts | 70 + .../test/operation-types.types.test-d.ts | 253 + .../test/operator-lowering-equality.test.ts | 165 + .../operator-lowering-order-range.test.ts | 96 + .../operator-lowering-text-search.test.ts | 69 + .../test/operator-lowering.helpers.ts | 216 + .../test/operator-lowering.test.ts | 253 + .../test/psl-interpretation-numeric.test.ts | 175 + .../psl-interpretation-other-types.test.ts | 188 + .../test/psl-interpretation.test.ts | 309 + packages/prisma-next/test/routing.test.ts | 107 + .../test/runtime-descriptor.test.ts | 123 + packages/prisma-next/test/sdk-adapter.test.ts | 244 + packages/prisma-next/test/sdk.types.test-d.ts | 77 + packages/prisma-next/tsconfig.json | 23 + packages/prisma-next/tsup.config.ts | 21 + packages/prisma-next/vitest.config.ts | 23 + pnpm-lock.yaml | 1175 +++- pnpm-workspace.yaml | 6 + 133 files changed, 25837 insertions(+), 22 deletions(-) create mode 100644 .changeset/add-prisma-next-integration.md create mode 100644 examples/prisma/.cipherstash/context.json create mode 100644 examples/prisma/.env.example create mode 100644 examples/prisma/README.md create mode 100644 examples/prisma/docker-compose.yml create mode 100644 examples/prisma/migrations/app/20260513T1735_initial/end-contract.d.ts create mode 100644 examples/prisma/migrations/app/20260513T1735_initial/end-contract.json create mode 100644 examples/prisma/migrations/app/20260513T1735_initial/migration.json create mode 100755 examples/prisma/migrations/app/20260513T1735_initial/migration.ts create mode 100644 examples/prisma/migrations/app/20260513T1735_initial/ops.json create mode 100644 examples/prisma/migrations/cipherstash/20260601T0000_install_eql_bundle/contract.json create mode 100644 examples/prisma/migrations/cipherstash/20260601T0000_install_eql_bundle/migration.json create mode 100644 examples/prisma/migrations/cipherstash/20260601T0000_install_eql_bundle/ops.json create mode 100644 examples/prisma/migrations/cipherstash/contract.d.ts create mode 100644 examples/prisma/migrations/cipherstash/contract.json create mode 100644 examples/prisma/migrations/cipherstash/refs/head.json create mode 100644 examples/prisma/package.json create mode 100644 examples/prisma/prisma-next.config.ts create mode 100644 examples/prisma/prisma/schema.prisma create mode 100644 examples/prisma/src/db.ts create mode 100644 examples/prisma/src/index.ts create mode 100644 examples/prisma/src/prisma/contract.d.ts create mode 100644 examples/prisma/src/prisma/contract.json create mode 100644 examples/prisma/tsconfig.json create mode 100644 packages/cli/src/commands/init/providers/__tests__/prisma-next.test.ts create mode 100644 packages/cli/src/commands/init/providers/prisma-next.ts create mode 100644 packages/prisma-next/DEVELOPING.md create mode 100644 packages/prisma-next/README.md create mode 100644 packages/prisma-next/migrations/20260601T0000_install_eql_bundle/end-contract.d.ts create mode 100644 packages/prisma-next/migrations/20260601T0000_install_eql_bundle/end-contract.json create mode 100644 packages/prisma-next/migrations/20260601T0000_install_eql_bundle/migration.json create mode 100755 packages/prisma-next/migrations/20260601T0000_install_eql_bundle/migration.ts create mode 100644 packages/prisma-next/migrations/20260601T0000_install_eql_bundle/ops.json create mode 100644 packages/prisma-next/migrations/refs/head.json create mode 100644 packages/prisma-next/package.json create mode 100644 packages/prisma-next/prisma-next.config.ts create mode 100644 packages/prisma-next/src/contract-authoring.ts create mode 100644 packages/prisma-next/src/contract.d.ts create mode 100644 packages/prisma-next/src/contract.json create mode 100644 packages/prisma-next/src/contract.prisma create mode 100644 packages/prisma-next/src/execution/abort.ts create mode 100644 packages/prisma-next/src/execution/cell-codec-factory.ts create mode 100644 packages/prisma-next/src/execution/codec-runtime.ts create mode 100644 packages/prisma-next/src/execution/decrypt-all.ts create mode 100644 packages/prisma-next/src/execution/envelope-base.ts create mode 100644 packages/prisma-next/src/execution/envelope-bigint.ts create mode 100644 packages/prisma-next/src/execution/envelope-boolean.ts create mode 100644 packages/prisma-next/src/execution/envelope-date.ts create mode 100644 packages/prisma-next/src/execution/envelope-double.ts create mode 100644 packages/prisma-next/src/execution/envelope-json.ts create mode 100644 packages/prisma-next/src/execution/envelope-string.ts create mode 100644 packages/prisma-next/src/execution/helpers.ts create mode 100644 packages/prisma-next/src/execution/middleware-registry.ts create mode 100644 packages/prisma-next/src/execution/operators.ts create mode 100644 packages/prisma-next/src/execution/parameterized.ts create mode 100644 packages/prisma-next/src/execution/routing.ts create mode 100644 packages/prisma-next/src/execution/sdk.ts create mode 100644 packages/prisma-next/src/exports/codec-types.ts create mode 100644 packages/prisma-next/src/exports/column-types.ts create mode 100644 packages/prisma-next/src/exports/contract-space-typing.ts create mode 100644 packages/prisma-next/src/exports/control.ts create mode 100644 packages/prisma-next/src/exports/middleware.ts create mode 100644 packages/prisma-next/src/exports/migration.ts create mode 100644 packages/prisma-next/src/exports/operation-types.ts create mode 100644 packages/prisma-next/src/exports/pack.ts create mode 100644 packages/prisma-next/src/exports/runtime.ts create mode 100644 packages/prisma-next/src/exports/stack.ts create mode 100644 packages/prisma-next/src/extension-metadata/codec-metadata.ts create mode 100644 packages/prisma-next/src/extension-metadata/constants.ts create mode 100644 packages/prisma-next/src/extension-metadata/descriptor-meta.ts create mode 100644 packages/prisma-next/src/middleware/bulk-encrypt.ts create mode 100644 packages/prisma-next/src/migration/call-classes.ts create mode 100644 packages/prisma-next/src/migration/cipherstash-codec.ts create mode 100644 packages/prisma-next/src/migration/codec-hooks-factory.ts create mode 100644 packages/prisma-next/src/migration/eql-bundle.ts create mode 100644 packages/prisma-next/src/migration/eql-install.generated.ts create mode 100644 packages/prisma-next/src/stack/derive-schemas.ts create mode 100644 packages/prisma-next/src/stack/from-stack.ts create mode 100644 packages/prisma-next/src/stack/sdk-adapter.ts create mode 100644 packages/prisma-next/src/types/codec-types.ts create mode 100644 packages/prisma-next/src/types/operation-types.ts create mode 100644 packages/prisma-next/test/abort.test.ts create mode 100644 packages/prisma-next/test/authoring.test.ts create mode 100644 packages/prisma-next/test/bulk-encrypt-middleware.test.ts create mode 100644 packages/prisma-next/test/bundling-isolation.test.ts create mode 100644 packages/prisma-next/test/call-classes.test.ts create mode 100644 packages/prisma-next/test/call-classes.types.test-d.ts create mode 100644 packages/prisma-next/test/cipherstash-codec-numeric.test.ts create mode 100644 packages/prisma-next/test/cipherstash-codec-other-codecs.test.ts create mode 100644 packages/prisma-next/test/cipherstash-codec-string.test.ts create mode 100644 packages/prisma-next/test/cipherstash-codec.test.ts create mode 100644 packages/prisma-next/test/codec-runtime.test.ts create mode 100644 packages/prisma-next/test/column-types.test.ts create mode 100644 packages/prisma-next/test/decrypt-all.test.ts create mode 100644 packages/prisma-next/test/derive-schemas.test.ts create mode 100644 packages/prisma-next/test/descriptor.test.ts create mode 100644 packages/prisma-next/test/envelope-bigint.test.ts create mode 100644 packages/prisma-next/test/envelope-boolean.test.ts create mode 100644 packages/prisma-next/test/envelope-date.test.ts create mode 100644 packages/prisma-next/test/envelope-double.test.ts create mode 100644 packages/prisma-next/test/envelope-json.test.ts create mode 100644 packages/prisma-next/test/envelope-string.test.ts create mode 100644 packages/prisma-next/test/envelope.types.test-d.ts create mode 100644 packages/prisma-next/test/equality-trait-removal.test.ts create mode 100644 packages/prisma-next/test/from-stack-divergence.test.ts create mode 100644 packages/prisma-next/test/helpers.test.ts create mode 100644 packages/prisma-next/test/helpers.types.test-d.ts create mode 100644 packages/prisma-next/test/operation-types.types.test-d.ts create mode 100644 packages/prisma-next/test/operator-lowering-equality.test.ts create mode 100644 packages/prisma-next/test/operator-lowering-order-range.test.ts create mode 100644 packages/prisma-next/test/operator-lowering-text-search.test.ts create mode 100644 packages/prisma-next/test/operator-lowering.helpers.ts create mode 100644 packages/prisma-next/test/operator-lowering.test.ts create mode 100644 packages/prisma-next/test/psl-interpretation-numeric.test.ts create mode 100644 packages/prisma-next/test/psl-interpretation-other-types.test.ts create mode 100644 packages/prisma-next/test/psl-interpretation.test.ts create mode 100644 packages/prisma-next/test/routing.test.ts create mode 100644 packages/prisma-next/test/runtime-descriptor.test.ts create mode 100644 packages/prisma-next/test/sdk-adapter.test.ts create mode 100644 packages/prisma-next/test/sdk.types.test-d.ts create mode 100644 packages/prisma-next/tsconfig.json create mode 100644 packages/prisma-next/tsup.config.ts create mode 100644 packages/prisma-next/vitest.config.ts diff --git a/.changeset/add-prisma-next-integration.md b/.changeset/add-prisma-next-integration.md new file mode 100644 index 00000000..1a0cec41 --- /dev/null +++ b/.changeset/add-prisma-next-integration.md @@ -0,0 +1,24 @@ +--- +"@cipherstash/prisma-next": minor +"stash": minor +--- + +Add `@cipherstash/prisma-next` — searchable application-layer encryption for Postgres with Prisma Next. The framework's migration system installs the EQL bundle in the same `prisma-next migration apply` sweep that creates the application schema; no separate `stash db install` step. + +**`@cipherstash/prisma-next` (new package, initial release)** + +- **Six encrypted column types** — `EncryptedString`, `EncryptedDouble`, `EncryptedBigInt`, `EncryptedDate`, `EncryptedBoolean`, `EncryptedJson` — declared via PSL constructors (`cipherstash.Encrypted*()`) or TS factories (`encryptedString()`, etc.). +- **17 query operators** — 13 predicate operators surfaced as column methods (`cipherstashEq`, `cipherstashIlike`, `cipherstashGt`, `cipherstashBetween`, `cipherstashInArray`, `cipherstashJsonbPathExists`, …) and 4 free-standing helpers (`cipherstashAsc`, `cipherstashDesc`, `cipherstashJsonbPathQueryFirst`, `cipherstashJsonbGet`). +- **Per-codec search-mode flags** (`equality`, `freeTextSearch`, `orderAndRange`, `searchableJson`) drive the EQL search-config indices the codec lifecycle hook emits at migration time. Defaults to `true` across the board. +- **One-call setup** via `cipherstashFromStack({ contractJson })` from `@cipherstash/prisma-next/stack` — derives the stack `encryptedTable` / `encryptedColumn` schemas from `contract.json` (single source of truth, no duplicate hand-written declarations), constructs the `@cipherstash/stack` `EncryptionClient`, builds the framework-native `CipherstashSdk` adapter, and returns ready-to-spread `{ extensions, middleware, encryptionClient }` for `postgres({...})`. +- **Layered API** — `deriveStackSchemas(contractJson)` and `createCipherstashSdk(client, schemas)` exposed as primitives for advanced users (custom keysets, multi-tenant routing, non-stack KMS). +- **Bulk-encrypt middleware** (`bulkEncryptMiddleware(sdk)`) coalesces every plaintext placeholder across a query into one `bulkEncrypt` SDK round-trip per `(table, column)` group. `decryptAll(rows)` does the symmetric coalescing on the read side. +- **Misconfig diagnostic** — if the user constructs the runtime descriptor but forgets to register `bulkEncryptMiddleware(sdk)` against the same SDK, the codec's encode throws a `RUNTIME.ENCODE_FAILED` envelope with a copy-pasteable wiring snippet at the first encrypted write. +- **Subpath exports** — `./stack`, `./control`, `./runtime`, `./middleware`, `./pack`, `./column-types`; tree-shakable along the control / runtime / middleware seams. +- **Contributes an EQL contract space** — installs the `eql_v2` schema, `eql_v2_encrypted` composite type, `ore_*` types, EQL functions / operators / casts via the cipherstash extension's baseline migration. Runs in the same control-plane sweep as the application schema. +- **Full docs**: https://cipherstash.com/docs/stack/cipherstash/encryption/prisma-next. + +**`stash` (new feature)** + +- **`stash init --prisma-next`** — new init provider for Prisma Next projects. Reuses `authenticate` + `resolve-database` + `install-deps` (additionally installs `@cipherstash/prisma-next`), skips `install-eql` (the framework handles it via `prisma-next migration apply`) and `build-schema` (`cipherstashFromStack` derives schemas from the contract — no hand-written encryption client file). Detected automatically when a `prisma-next.config.*` or `@cipherstash/prisma-next` dependency is present in the project. +- **`detectPrismaNext(cwd)`** — new export from `commands/db/detect.ts` mirroring the existing `detectDrizzle` / `detectSupabase` helpers. diff --git a/examples/prisma/.cipherstash/context.json b/examples/prisma/.cipherstash/context.json new file mode 100644 index 00000000..c3d1261b --- /dev/null +++ b/examples/prisma/.cipherstash/context.json @@ -0,0 +1,17 @@ +{ + "cliVersion": "0.14.0", + "integration": "prisma-next", + "encryptionClientPath": "./src/encryption/index.ts", + "packageManager": "npm", + "installCommand": "npm install @cipherstash/stack", + "envKeys": [ + "CS_CLIENT_ACCESS_KEY", + "CS_CLIENT_ID", + "CS_CLIENT_KEY", + "CS_WORKSPACE_CRN", + "DATABASE_URL" + ], + "schemas": [], + "installedSkills": [], + "generatedAt": "2026-05-13T18:16:34.922Z" +} diff --git a/examples/prisma/.env.example b/examples/prisma/.env.example new file mode 100644 index 00000000..e21c34f9 --- /dev/null +++ b/examples/prisma/.env.example @@ -0,0 +1,25 @@ +# Postgres connection. The database must have the EQL bundle +# installed; `pnpm migration:apply` (after `pnpm migration:plan`) +# installs it for you alongside the application schema. +# +# Defaults match the bundled `docker-compose.yml`. Run +# `docker compose up -d` from this directory to start a Postgres on +# port 5544 with these credentials. +DATABASE_URL=postgres://postgres:postgres@localhost:5544/cipherstash_prisma_example + +# CipherStash workspace credentials — **deployment only**. +# +# For local development, run `stash auth login` once. The PKCE flow +# stores per-developer credentials in your OS keychain, and the +# `@cipherstash/stack` `EncryptionClient` picks them up automatically. +# No CS_* env vars needed. +# +# Set the four values below only when you're deploying — production +# servers and CI runners are machine accounts with no human at the +# keyboard, so they use static credentials provisioned via the +# CipherStash dashboard (Settings → Access Keys). +# +# CS_WORKSPACE_CRN= +# CS_CLIENT_ID= +# CS_CLIENT_KEY= +# CS_CLIENT_ACCESS_KEY= diff --git a/examples/prisma/README.md b/examples/prisma/README.md new file mode 100644 index 00000000..4dfdffa7 --- /dev/null +++ b/examples/prisma/README.md @@ -0,0 +1,92 @@ +# @cipherstash/prisma-next example + +End-to-end demo of [`@cipherstash/prisma-next`](../../packages/prisma-next/README.md): searchable application-layer encryption for Postgres with [Prisma Next](https://www.npmjs.com/package/@prisma-next/cli), using [`@cipherstash/stack`](../../packages/stack/README.md) as the encryption SDK. + +A single `User` model with one column per cipherstash codec (string, double, bigint, date, boolean, JSON), exercised end-to-end: insert, equality, free-text search, range, between, in-array, sort, and `decryptAll`-amortised read. + +📖 See the [Prisma Next encryption docs](https://cipherstash.com/docs/stack/cipherstash/encryption/prisma-next) for the full operator reference, security model, and known limitations. + +## Layout + +| Path | Purpose | +| -------------------------- | --------------------------------------------------------------------------------------------- | +| `docker-compose.yml` | Local Postgres 16 on port 5544. | +| `prisma/schema.prisma` | Application schema (one `User` model exercising all six cipherstash codecs). | +| `prisma-next.config.ts` | Wires `cipherstash` into `extensionPacks`. | +| `src/db.ts` | One-call setup via `cipherstashFromStack({ contractJson })`. | +| `src/index.ts` | The demo flow. | +| `src/prisma/contract.*` | Emitted by `pnpm emit`. | +| `migrations/` | Emitted by `pnpm migration:plan`. | + +## Prerequisites + +1. **Docker** for the bundled Postgres on port 5544 (or any Postgres 16+). +2. **A CipherStash workspace** — sign up at [cipherstash.com](https://cipherstash.com), then run `stash auth login` (PKCE; caches credentials in your OS keychain — no `CS_*` env vars needed in local dev). + +## Run it + +```bash +cp .env.example .env # DATABASE_URL points at the bundled Postgres +stash auth login # one-time, per developer + +docker compose up -d +pnpm install +pnpm emit # PSL → contract.{json,d.ts} +pnpm migration:plan --name initial +pnpm migration:apply # installs EQL bundle + your app schema in one sweep +pnpm start # runs the demo +``` + +Teardown: + +```bash +docker compose down -v +``` + +Or, to just verify the example typechecks and emits a valid contract (no database, no workspace): + +```bash +pnpm install && pnpm emit && pnpm typecheck +``` + +## Expected output + +```text +--- Insert (mixed-codec round-trip) --- +Inserted 4 rows across six cipherstash codecs. + +--- cipherstashEq (string equality) --- +Found 1 row(s) for alice@example.com. + user-0: alice@example.com + +--- cipherstashIlike (string free-text-search) --- +Found 3 row(s) matching %@example.com. + user-0: alice@example.com + user-1: bob@example.com + user-2: carol@example.com + +--- cipherstashGt (double order-and-range) --- +Found 2 user(s) with salary > 100,000. + user-1: salary=110000 + user-3: salary=145000 + +--- cipherstashBetween (date order-and-range) --- +Found 3 user(s) born between 1985 and 1995. + +--- cipherstashInArray (bigint equality) --- +Found 2 user(s) whose accountId is in the supplied array. + +--- cipherstashInArray (boolean equality-only) --- +Found 3 user(s) with emailVerified = true. + +--- cipherstashAsc (bare-column ORDER BY) --- + user-0: email=alice@example.com + user-1: email=bob@example.com + user-2: email=carol@example.com + user-3: email=dave@otherorg.test +``` + +## References + +- 📖 [Prisma Next encryption docs](https://cipherstash.com/docs/stack/cipherstash/encryption/prisma-next) — the canonical reference. +- [`@cipherstash/prisma-next` package README](../../packages/prisma-next/README.md) — install, subpath exports, quick start. diff --git a/examples/prisma/docker-compose.yml b/examples/prisma/docker-compose.yml new file mode 100644 index 00000000..49b0dd3b --- /dev/null +++ b/examples/prisma/docker-compose.yml @@ -0,0 +1,33 @@ +# Local Postgres for the @cipherstash/prisma-next example. +# +# Usage: +# docker compose up -d # start +# docker compose down -v # stop + delete volume (fresh state) +# +# The DATABASE_URL in .env.example matches the values below: +# postgres://postgres:postgres@localhost:5544/cipherstash_prisma_example +# +# Port 5544 (not 5432) is used to avoid colliding with any host-side +# Postgres / other example containers. + +services: + postgres: + image: postgres:16 + container_name: cipherstash-prisma-example-pg + restart: unless-stopped + environment: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + POSTGRES_DB: cipherstash_prisma_example + ports: + - "5544:5432" + volumes: + - cipherstash-prisma-example-pg-data:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres -d cipherstash_prisma_example"] + interval: 2s + timeout: 5s + retries: 30 + +volumes: + cipherstash-prisma-example-pg-data: diff --git a/examples/prisma/migrations/app/20260513T1735_initial/end-contract.d.ts b/examples/prisma/migrations/app/20260513T1735_initial/end-contract.d.ts new file mode 100644 index 00000000..6de5bc0a --- /dev/null +++ b/examples/prisma/migrations/app/20260513T1735_initial/end-contract.d.ts @@ -0,0 +1,486 @@ +// ⚠️ GENERATED FILE - DO NOT EDIT +// This file is automatically generated by 'prisma-next contract emit'. +// To regenerate, run: prisma-next contract emit +import type { CodecTypes as PgTypes } from '@prisma-next/target-postgres/codec-types'; +import type { JsonValue } from '@prisma-next/target-postgres/codec-types'; +import type { Char } from '@prisma-next/target-postgres/codec-types'; +import type { Varchar } from '@prisma-next/target-postgres/codec-types'; +import type { Numeric } from '@prisma-next/target-postgres/codec-types'; +import type { Bit } from '@prisma-next/target-postgres/codec-types'; +import type { VarBit } from '@prisma-next/target-postgres/codec-types'; +import type { Timestamp } from '@prisma-next/target-postgres/codec-types'; +import type { Timestamptz } from '@prisma-next/target-postgres/codec-types'; +import type { Time } from '@prisma-next/target-postgres/codec-types'; +import type { Timetz } from '@prisma-next/target-postgres/codec-types'; +import type { Interval } from '@prisma-next/target-postgres/codec-types'; +import type { CodecTypes as CipherstashTypes } from '@prisma-next/extension-cipherstash/codec-types'; +import type { EncryptedString } from '@prisma-next/extension-cipherstash/runtime'; +import type { EncryptedDouble } from '@prisma-next/extension-cipherstash/runtime'; +import type { EncryptedBigInt } from '@prisma-next/extension-cipherstash/runtime'; +import type { EncryptedDate } from '@prisma-next/extension-cipherstash/runtime'; +import type { EncryptedBoolean } from '@prisma-next/extension-cipherstash/runtime'; +import type { EncryptedJson } from '@prisma-next/extension-cipherstash/runtime'; +import type { QueryOperationTypes as PgAdapterQueryOps } from '@prisma-next/adapter-postgres/operation-types'; +import type { QueryOperationTypes as CipherstashQueryOperationTypes } from '@prisma-next/extension-cipherstash/operation-types'; + +import type { + ContractWithTypeMaps, + TypeMaps as TypeMapsType, +} from '@prisma-next/sql-contract/types'; +import type { + Contract as ContractType, + ExecutionHashBase, + ProfileHashBase, + StorageHashBase, +} from '@prisma-next/contract/types'; + +export type StorageHash = + StorageHashBase<'sha256:7475191ce0d78258ce5586265bcdfd12202f5daf90690b902890e58eb7508373'>; +export type ExecutionHash = ExecutionHashBase; +export type ProfileHash = + ProfileHashBase<'sha256:1a8dbe044289f30a1de958fe800cc5a8378b285d2e126a8c44b58864bac2c18e'>; + +export type CodecTypes = PgTypes & CipherstashTypes; +export type LaneCodecTypes = CodecTypes; +export type QueryOperationTypes = PgAdapterQueryOps & + CipherstashQueryOperationTypes; +type DefaultLiteralValue = CodecId extends keyof CodecTypes + ? CodecTypes[CodecId]['output'] + : _Encoded; + +export type FieldOutputTypes = { + readonly User: { + readonly id: CodecTypes['pg/text@1']['output']; + readonly email: CodecTypes['cipherstash/string@1']['output']; + readonly salary: CodecTypes['cipherstash/double@1']['output']; + readonly accountId: CodecTypes['cipherstash/bigint@1']['output']; + readonly birthday: CodecTypes['cipherstash/date@1']['output']; + readonly emailVerified: CodecTypes['cipherstash/boolean@1']['output']; + readonly preferences: CodecTypes['cipherstash/json@1']['output']; + }; +}; +export type FieldInputTypes = { + readonly User: { + readonly id: CodecTypes['pg/text@1']['input']; + readonly email: CodecTypes['cipherstash/string@1']['input']; + readonly salary: CodecTypes['cipherstash/double@1']['input']; + readonly accountId: CodecTypes['cipherstash/bigint@1']['input']; + readonly birthday: CodecTypes['cipherstash/date@1']['input']; + readonly emailVerified: CodecTypes['cipherstash/boolean@1']['input']; + readonly preferences: CodecTypes['cipherstash/json@1']['input']; + }; +}; +export type TypeMaps = TypeMapsType< + CodecTypes, + QueryOperationTypes, + FieldOutputTypes, + FieldInputTypes +>; + +type ContractBase = ContractType< + { + readonly tables: { + readonly users: { + columns: { + readonly id: { + readonly nativeType: 'text'; + readonly codecId: 'pg/text@1'; + readonly nullable: false; + }; + readonly email: { + readonly nativeType: 'eql_v2_encrypted'; + readonly codecId: 'cipherstash/string@1'; + readonly nullable: false; + readonly typeParams: { + readonly equality: true; + readonly freeTextSearch: true; + readonly orderAndRange: true; + }; + }; + readonly salary: { + readonly nativeType: 'eql_v2_encrypted'; + readonly codecId: 'cipherstash/double@1'; + readonly nullable: false; + readonly typeParams: { readonly equality: true; readonly orderAndRange: true }; + }; + readonly accountid: { + readonly nativeType: 'eql_v2_encrypted'; + readonly codecId: 'cipherstash/bigint@1'; + readonly nullable: false; + readonly typeParams: { readonly equality: true; readonly orderAndRange: true }; + }; + readonly birthday: { + readonly nativeType: 'eql_v2_encrypted'; + readonly codecId: 'cipherstash/date@1'; + readonly nullable: false; + readonly typeParams: { readonly equality: true; readonly orderAndRange: true }; + }; + readonly emailverified: { + readonly nativeType: 'eql_v2_encrypted'; + readonly codecId: 'cipherstash/boolean@1'; + readonly nullable: false; + readonly typeParams: { readonly equality: true }; + }; + readonly preferences: { + readonly nativeType: 'eql_v2_encrypted'; + readonly codecId: 'cipherstash/json@1'; + readonly nullable: false; + readonly typeParams: { readonly searchableJson: true }; + }; + }; + primaryKey: { readonly columns: readonly ['id'] }; + uniques: readonly []; + indexes: readonly []; + foreignKeys: readonly []; + }; + }; + readonly types: Record; + readonly storageHash: StorageHash; + }, + { + readonly User: { + readonly fields: { + readonly id: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/text@1' }; + }; + readonly email: { + readonly nullable: false; + readonly type: { + readonly kind: 'scalar'; + readonly codecId: 'cipherstash/string@1'; + readonly typeParams: { + readonly equality: true; + readonly freeTextSearch: true; + readonly orderAndRange: true; + }; + }; + }; + readonly salary: { + readonly nullable: false; + readonly type: { + readonly kind: 'scalar'; + readonly codecId: 'cipherstash/double@1'; + readonly typeParams: { readonly equality: true; readonly orderAndRange: true }; + }; + }; + readonly accountId: { + readonly nullable: false; + readonly type: { + readonly kind: 'scalar'; + readonly codecId: 'cipherstash/bigint@1'; + readonly typeParams: { readonly equality: true; readonly orderAndRange: true }; + }; + }; + readonly birthday: { + readonly nullable: false; + readonly type: { + readonly kind: 'scalar'; + readonly codecId: 'cipherstash/date@1'; + readonly typeParams: { readonly equality: true; readonly orderAndRange: true }; + }; + }; + readonly emailVerified: { + readonly nullable: false; + readonly type: { + readonly kind: 'scalar'; + readonly codecId: 'cipherstash/boolean@1'; + readonly typeParams: { readonly equality: true }; + }; + }; + readonly preferences: { + readonly nullable: false; + readonly type: { + readonly kind: 'scalar'; + readonly codecId: 'cipherstash/json@1'; + readonly typeParams: { readonly searchableJson: true }; + }; + }; + }; + readonly relations: Record; + readonly storage: { + readonly table: 'users'; + readonly fields: { + readonly id: { readonly column: 'id' }; + readonly email: { readonly column: 'email' }; + readonly salary: { readonly column: 'salary' }; + readonly accountId: { readonly column: 'accountid' }; + readonly birthday: { readonly column: 'birthday' }; + readonly emailVerified: { readonly column: 'emailverified' }; + readonly preferences: { readonly column: 'preferences' }; + }; + }; + }; + } +> & { + readonly target: 'postgres'; + readonly targetFamily: 'sql'; + readonly roots: { readonly users: 'User' }; + readonly capabilities: { + readonly postgres: { + readonly jsonAgg: true; + readonly lateral: true; + readonly limit: true; + readonly orderBy: true; + readonly returning: true; + }; + readonly sql: { + readonly defaultInInsert: true; + readonly enums: true; + readonly returning: true; + }; + }; + readonly extensionPacks: { + readonly cipherstash: { + readonly familyId: 'sql'; + readonly id: 'cipherstash'; + readonly kind: 'extension'; + readonly targetId: 'postgres'; + readonly types: { + readonly codecTypes: { + readonly codecInstances: readonly [ + { + readonly descriptor: { + readonly codecId: 'cipherstash/string@1'; + readonly factory: unknown; + readonly isParameterized: false; + readonly meta: { + readonly db: { + readonly sql: { + readonly postgres: { readonly nativeType: 'eql_v2_encrypted' }; + }; + }; + }; + readonly paramsSchema: { + readonly '~standard': { + readonly validate: unknown; + readonly vendor: 'cipherstash'; + readonly version: 1; + }; + }; + readonly renderOutputType: unknown; + readonly targetTypes: readonly ['eql_v2_encrypted']; + readonly traits: readonly [ + 'cipherstash:equality', + 'cipherstash:free-text-search', + 'cipherstash:order-and-range', + ]; + }; + }, + { + readonly descriptor: { + readonly codecId: 'cipherstash/double@1'; + readonly factory: unknown; + readonly isParameterized: false; + readonly meta: { + readonly db: { + readonly sql: { + readonly postgres: { readonly nativeType: 'eql_v2_encrypted' }; + }; + }; + }; + readonly paramsSchema: { + readonly '~standard': { + readonly validate: unknown; + readonly vendor: 'cipherstash'; + readonly version: 1; + }; + }; + readonly renderOutputType: unknown; + readonly targetTypes: readonly ['eql_v2_encrypted']; + readonly traits: readonly ['cipherstash:equality', 'cipherstash:order-and-range']; + }; + }, + { + readonly descriptor: { + readonly codecId: 'cipherstash/bigint@1'; + readonly factory: unknown; + readonly isParameterized: false; + readonly meta: { + readonly db: { + readonly sql: { + readonly postgres: { readonly nativeType: 'eql_v2_encrypted' }; + }; + }; + }; + readonly paramsSchema: { + readonly '~standard': { + readonly validate: unknown; + readonly vendor: 'cipherstash'; + readonly version: 1; + }; + }; + readonly renderOutputType: unknown; + readonly targetTypes: readonly ['eql_v2_encrypted']; + readonly traits: readonly ['cipherstash:equality', 'cipherstash:order-and-range']; + }; + }, + { + readonly descriptor: { + readonly codecId: 'cipherstash/date@1'; + readonly factory: unknown; + readonly isParameterized: false; + readonly meta: { + readonly db: { + readonly sql: { + readonly postgres: { readonly nativeType: 'eql_v2_encrypted' }; + }; + }; + }; + readonly paramsSchema: { + readonly '~standard': { + readonly validate: unknown; + readonly vendor: 'cipherstash'; + readonly version: 1; + }; + }; + readonly renderOutputType: unknown; + readonly targetTypes: readonly ['eql_v2_encrypted']; + readonly traits: readonly ['cipherstash:equality', 'cipherstash:order-and-range']; + }; + }, + { + readonly descriptor: { + readonly codecId: 'cipherstash/boolean@1'; + readonly factory: unknown; + readonly isParameterized: false; + readonly meta: { + readonly db: { + readonly sql: { + readonly postgres: { readonly nativeType: 'eql_v2_encrypted' }; + }; + }; + }; + readonly paramsSchema: { + readonly '~standard': { + readonly validate: unknown; + readonly vendor: 'cipherstash'; + readonly version: 1; + }; + }; + readonly renderOutputType: unknown; + readonly targetTypes: readonly ['eql_v2_encrypted']; + readonly traits: readonly ['cipherstash:equality']; + }; + }, + { + readonly descriptor: { + readonly codecId: 'cipherstash/json@1'; + readonly factory: unknown; + readonly isParameterized: false; + readonly meta: { + readonly db: { + readonly sql: { + readonly postgres: { readonly nativeType: 'eql_v2_encrypted' }; + }; + }; + }; + readonly paramsSchema: { + readonly '~standard': { + readonly validate: unknown; + readonly vendor: 'cipherstash'; + readonly version: 1; + }; + }; + readonly renderOutputType: unknown; + readonly targetTypes: readonly ['eql_v2_encrypted']; + readonly traits: readonly ['cipherstash:searchable-json']; + }; + }, + ]; + readonly import: { + readonly alias: 'CipherstashTypes'; + readonly named: 'CodecTypes'; + readonly package: '@prisma-next/extension-cipherstash/codec-types'; + }; + readonly typeImports: readonly [ + { + readonly alias: 'EncryptedString'; + readonly named: 'EncryptedString'; + readonly package: '@prisma-next/extension-cipherstash/runtime'; + }, + { + readonly alias: 'EncryptedDouble'; + readonly named: 'EncryptedDouble'; + readonly package: '@prisma-next/extension-cipherstash/runtime'; + }, + { + readonly alias: 'EncryptedBigInt'; + readonly named: 'EncryptedBigInt'; + readonly package: '@prisma-next/extension-cipherstash/runtime'; + }, + { + readonly alias: 'EncryptedDate'; + readonly named: 'EncryptedDate'; + readonly package: '@prisma-next/extension-cipherstash/runtime'; + }, + { + readonly alias: 'EncryptedBoolean'; + readonly named: 'EncryptedBoolean'; + readonly package: '@prisma-next/extension-cipherstash/runtime'; + }, + { + readonly alias: 'EncryptedJson'; + readonly named: 'EncryptedJson'; + readonly package: '@prisma-next/extension-cipherstash/runtime'; + }, + ]; + }; + readonly queryOperationTypes: { + readonly import: { + readonly alias: 'CipherstashQueryOperationTypes'; + readonly named: 'QueryOperationTypes'; + readonly package: '@prisma-next/extension-cipherstash/operation-types'; + }; + }; + readonly storage: readonly [ + { + readonly familyId: 'sql'; + readonly nativeType: 'eql_v2_encrypted'; + readonly targetId: 'postgres'; + readonly typeId: 'cipherstash/string@1'; + }, + { + readonly familyId: 'sql'; + readonly nativeType: 'eql_v2_encrypted'; + readonly targetId: 'postgres'; + readonly typeId: 'cipherstash/double@1'; + }, + { + readonly familyId: 'sql'; + readonly nativeType: 'eql_v2_encrypted'; + readonly targetId: 'postgres'; + readonly typeId: 'cipherstash/bigint@1'; + }, + { + readonly familyId: 'sql'; + readonly nativeType: 'eql_v2_encrypted'; + readonly targetId: 'postgres'; + readonly typeId: 'cipherstash/date@1'; + }, + { + readonly familyId: 'sql'; + readonly nativeType: 'eql_v2_encrypted'; + readonly targetId: 'postgres'; + readonly typeId: 'cipherstash/boolean@1'; + }, + { + readonly familyId: 'sql'; + readonly nativeType: 'eql_v2_encrypted'; + readonly targetId: 'postgres'; + readonly typeId: 'cipherstash/json@1'; + }, + ]; + }; + readonly version: '0.0.1'; + }; + }; + readonly meta: {}; + + readonly profileHash: ProfileHash; +}; + +export type Contract = ContractWithTypeMaps; + +export type Tables = Contract['storage']['tables']; +export type Models = Contract['models']; diff --git a/examples/prisma/migrations/app/20260513T1735_initial/end-contract.json b/examples/prisma/migrations/app/20260513T1735_initial/end-contract.json new file mode 100644 index 00000000..d71e5d28 --- /dev/null +++ b/examples/prisma/migrations/app/20260513T1735_initial/end-contract.json @@ -0,0 +1,467 @@ +{ + "schemaVersion": "1", + "targetFamily": "sql", + "target": "postgres", + "profileHash": "sha256:1a8dbe044289f30a1de958fe800cc5a8378b285d2e126a8c44b58864bac2c18e", + "roots": { + "users": "User" + }, + "models": { + "User": { + "fields": { + "accountId": { + "nullable": false, + "type": { + "codecId": "cipherstash/bigint@1", + "kind": "scalar", + "typeParams": { + "equality": true, + "orderAndRange": true + } + } + }, + "birthday": { + "nullable": false, + "type": { + "codecId": "cipherstash/date@1", + "kind": "scalar", + "typeParams": { + "equality": true, + "orderAndRange": true + } + } + }, + "email": { + "nullable": false, + "type": { + "codecId": "cipherstash/string@1", + "kind": "scalar", + "typeParams": { + "equality": true, + "freeTextSearch": true, + "orderAndRange": true + } + } + }, + "emailVerified": { + "nullable": false, + "type": { + "codecId": "cipherstash/boolean@1", + "kind": "scalar", + "typeParams": { + "equality": true + } + } + }, + "id": { + "nullable": false, + "type": { + "codecId": "pg/text@1", + "kind": "scalar" + } + }, + "preferences": { + "nullable": false, + "type": { + "codecId": "cipherstash/json@1", + "kind": "scalar", + "typeParams": { + "searchableJson": true + } + } + }, + "salary": { + "nullable": false, + "type": { + "codecId": "cipherstash/double@1", + "kind": "scalar", + "typeParams": { + "equality": true, + "orderAndRange": true + } + } + } + }, + "relations": {}, + "storage": { + "fields": { + "accountId": { + "column": "accountid" + }, + "birthday": { + "column": "birthday" + }, + "email": { + "column": "email" + }, + "emailVerified": { + "column": "emailverified" + }, + "id": { + "column": "id" + }, + "preferences": { + "column": "preferences" + }, + "salary": { + "column": "salary" + } + }, + "table": "users" + } + } + }, + "storage": { + "storageHash": "sha256:7475191ce0d78258ce5586265bcdfd12202f5daf90690b902890e58eb7508373", + "tables": { + "users": { + "columns": { + "accountid": { + "codecId": "cipherstash/bigint@1", + "nativeType": "eql_v2_encrypted", + "nullable": false, + "typeParams": { + "equality": true, + "orderAndRange": true + } + }, + "birthday": { + "codecId": "cipherstash/date@1", + "nativeType": "eql_v2_encrypted", + "nullable": false, + "typeParams": { + "equality": true, + "orderAndRange": true + } + }, + "email": { + "codecId": "cipherstash/string@1", + "nativeType": "eql_v2_encrypted", + "nullable": false, + "typeParams": { + "equality": true, + "freeTextSearch": true, + "orderAndRange": true + } + }, + "emailverified": { + "codecId": "cipherstash/boolean@1", + "nativeType": "eql_v2_encrypted", + "nullable": false, + "typeParams": { + "equality": true + } + }, + "id": { + "codecId": "pg/text@1", + "nativeType": "text", + "nullable": false + }, + "preferences": { + "codecId": "cipherstash/json@1", + "nativeType": "eql_v2_encrypted", + "nullable": false, + "typeParams": { + "searchableJson": true + } + }, + "salary": { + "codecId": "cipherstash/double@1", + "nativeType": "eql_v2_encrypted", + "nullable": false, + "typeParams": { + "equality": true, + "orderAndRange": true + } + } + }, + "foreignKeys": [], + "indexes": [], + "primaryKey": { + "columns": [ + "id" + ] + }, + "uniques": [] + } + } + }, + "capabilities": { + "postgres": { + "jsonAgg": true, + "lateral": true, + "limit": true, + "orderBy": true, + "returning": true + }, + "sql": { + "defaultInInsert": true, + "enums": true, + "returning": true + } + }, + "extensionPacks": { + "cipherstash": { + "familyId": "sql", + "id": "cipherstash", + "kind": "extension", + "targetId": "postgres", + "types": { + "codecTypes": { + "codecInstances": [ + { + "descriptor": { + "codecId": "cipherstash/string@1", + "meta": { + "db": { + "sql": { + "postgres": { + "nativeType": "eql_v2_encrypted" + } + } + } + }, + "paramsSchema": { + "~standard": { + "vendor": "cipherstash", + "version": 1 + } + }, + "targetTypes": [ + "eql_v2_encrypted" + ], + "traits": [ + "cipherstash:equality", + "cipherstash:free-text-search", + "cipherstash:order-and-range" + ] + } + }, + { + "descriptor": { + "codecId": "cipherstash/double@1", + "meta": { + "db": { + "sql": { + "postgres": { + "nativeType": "eql_v2_encrypted" + } + } + } + }, + "paramsSchema": { + "~standard": { + "vendor": "cipherstash", + "version": 1 + } + }, + "targetTypes": [ + "eql_v2_encrypted" + ], + "traits": [ + "cipherstash:equality", + "cipherstash:order-and-range" + ] + } + }, + { + "descriptor": { + "codecId": "cipherstash/bigint@1", + "meta": { + "db": { + "sql": { + "postgres": { + "nativeType": "eql_v2_encrypted" + } + } + } + }, + "paramsSchema": { + "~standard": { + "vendor": "cipherstash", + "version": 1 + } + }, + "targetTypes": [ + "eql_v2_encrypted" + ], + "traits": [ + "cipherstash:equality", + "cipherstash:order-and-range" + ] + } + }, + { + "descriptor": { + "codecId": "cipherstash/date@1", + "meta": { + "db": { + "sql": { + "postgres": { + "nativeType": "eql_v2_encrypted" + } + } + } + }, + "paramsSchema": { + "~standard": { + "vendor": "cipherstash", + "version": 1 + } + }, + "targetTypes": [ + "eql_v2_encrypted" + ], + "traits": [ + "cipherstash:equality", + "cipherstash:order-and-range" + ] + } + }, + { + "descriptor": { + "codecId": "cipherstash/boolean@1", + "meta": { + "db": { + "sql": { + "postgres": { + "nativeType": "eql_v2_encrypted" + } + } + } + }, + "paramsSchema": { + "~standard": { + "vendor": "cipherstash", + "version": 1 + } + }, + "targetTypes": [ + "eql_v2_encrypted" + ], + "traits": [ + "cipherstash:equality" + ] + } + }, + { + "descriptor": { + "codecId": "cipherstash/json@1", + "meta": { + "db": { + "sql": { + "postgres": { + "nativeType": "eql_v2_encrypted" + } + } + } + }, + "paramsSchema": { + "~standard": { + "vendor": "cipherstash", + "version": 1 + } + }, + "targetTypes": [ + "eql_v2_encrypted" + ], + "traits": [ + "cipherstash:searchable-json" + ] + } + } + ], + "import": { + "alias": "CipherstashTypes", + "named": "CodecTypes", + "package": "@prisma-next/extension-cipherstash/codec-types" + }, + "typeImports": [ + { + "alias": "EncryptedString", + "named": "EncryptedString", + "package": "@prisma-next/extension-cipherstash/runtime" + }, + { + "alias": "EncryptedDouble", + "named": "EncryptedDouble", + "package": "@prisma-next/extension-cipherstash/runtime" + }, + { + "alias": "EncryptedBigInt", + "named": "EncryptedBigInt", + "package": "@prisma-next/extension-cipherstash/runtime" + }, + { + "alias": "EncryptedDate", + "named": "EncryptedDate", + "package": "@prisma-next/extension-cipherstash/runtime" + }, + { + "alias": "EncryptedBoolean", + "named": "EncryptedBoolean", + "package": "@prisma-next/extension-cipherstash/runtime" + }, + { + "alias": "EncryptedJson", + "named": "EncryptedJson", + "package": "@prisma-next/extension-cipherstash/runtime" + } + ] + }, + "queryOperationTypes": { + "import": { + "alias": "CipherstashQueryOperationTypes", + "named": "QueryOperationTypes", + "package": "@prisma-next/extension-cipherstash/operation-types" + } + }, + "storage": [ + { + "familyId": "sql", + "nativeType": "eql_v2_encrypted", + "targetId": "postgres", + "typeId": "cipherstash/string@1" + }, + { + "familyId": "sql", + "nativeType": "eql_v2_encrypted", + "targetId": "postgres", + "typeId": "cipherstash/double@1" + }, + { + "familyId": "sql", + "nativeType": "eql_v2_encrypted", + "targetId": "postgres", + "typeId": "cipherstash/bigint@1" + }, + { + "familyId": "sql", + "nativeType": "eql_v2_encrypted", + "targetId": "postgres", + "typeId": "cipherstash/date@1" + }, + { + "familyId": "sql", + "nativeType": "eql_v2_encrypted", + "targetId": "postgres", + "typeId": "cipherstash/boolean@1" + }, + { + "familyId": "sql", + "nativeType": "eql_v2_encrypted", + "targetId": "postgres", + "typeId": "cipherstash/json@1" + } + ] + }, + "version": "0.0.1" + } + }, + "meta": {}, + "_generated": { + "warning": "⚠️ GENERATED FILE - DO NOT EDIT", + "message": "This file is automatically generated by \"prisma-next contract emit\".", + "regenerate": "To regenerate, run: prisma-next contract emit" + } +} \ No newline at end of file diff --git a/examples/prisma/migrations/app/20260513T1735_initial/migration.json b/examples/prisma/migrations/app/20260513T1735_initial/migration.json new file mode 100644 index 00000000..0e892cf4 --- /dev/null +++ b/examples/prisma/migrations/app/20260513T1735_initial/migration.json @@ -0,0 +1,493 @@ +{ + "from": null, + "to": "sha256:7475191ce0d78258ce5586265bcdfd12202f5daf90690b902890e58eb7508373", + "fromContract": null, + "toContract": { + "schemaVersion": "1", + "targetFamily": "sql", + "target": "postgres", + "profileHash": "sha256:1a8dbe044289f30a1de958fe800cc5a8378b285d2e126a8c44b58864bac2c18e", + "roots": { + "users": "User" + }, + "models": { + "User": { + "fields": { + "accountId": { + "nullable": false, + "type": { + "codecId": "cipherstash/bigint@1", + "kind": "scalar", + "typeParams": { + "equality": true, + "orderAndRange": true + } + } + }, + "birthday": { + "nullable": false, + "type": { + "codecId": "cipherstash/date@1", + "kind": "scalar", + "typeParams": { + "equality": true, + "orderAndRange": true + } + } + }, + "email": { + "nullable": false, + "type": { + "codecId": "cipherstash/string@1", + "kind": "scalar", + "typeParams": { + "equality": true, + "freeTextSearch": true, + "orderAndRange": true + } + } + }, + "emailVerified": { + "nullable": false, + "type": { + "codecId": "cipherstash/boolean@1", + "kind": "scalar", + "typeParams": { + "equality": true + } + } + }, + "id": { + "nullable": false, + "type": { + "codecId": "pg/text@1", + "kind": "scalar" + } + }, + "preferences": { + "nullable": false, + "type": { + "codecId": "cipherstash/json@1", + "kind": "scalar", + "typeParams": { + "searchableJson": true + } + } + }, + "salary": { + "nullable": false, + "type": { + "codecId": "cipherstash/double@1", + "kind": "scalar", + "typeParams": { + "equality": true, + "orderAndRange": true + } + } + } + }, + "relations": {}, + "storage": { + "fields": { + "accountId": { + "column": "accountid" + }, + "birthday": { + "column": "birthday" + }, + "email": { + "column": "email" + }, + "emailVerified": { + "column": "emailverified" + }, + "id": { + "column": "id" + }, + "preferences": { + "column": "preferences" + }, + "salary": { + "column": "salary" + } + }, + "table": "users" + } + } + }, + "storage": { + "storageHash": "sha256:7475191ce0d78258ce5586265bcdfd12202f5daf90690b902890e58eb7508373", + "tables": { + "users": { + "columns": { + "accountid": { + "codecId": "cipherstash/bigint@1", + "nativeType": "eql_v2_encrypted", + "nullable": false, + "typeParams": { + "equality": true, + "orderAndRange": true + } + }, + "birthday": { + "codecId": "cipherstash/date@1", + "nativeType": "eql_v2_encrypted", + "nullable": false, + "typeParams": { + "equality": true, + "orderAndRange": true + } + }, + "email": { + "codecId": "cipherstash/string@1", + "nativeType": "eql_v2_encrypted", + "nullable": false, + "typeParams": { + "equality": true, + "freeTextSearch": true, + "orderAndRange": true + } + }, + "emailverified": { + "codecId": "cipherstash/boolean@1", + "nativeType": "eql_v2_encrypted", + "nullable": false, + "typeParams": { + "equality": true + } + }, + "id": { + "codecId": "pg/text@1", + "nativeType": "text", + "nullable": false + }, + "preferences": { + "codecId": "cipherstash/json@1", + "nativeType": "eql_v2_encrypted", + "nullable": false, + "typeParams": { + "searchableJson": true + } + }, + "salary": { + "codecId": "cipherstash/double@1", + "nativeType": "eql_v2_encrypted", + "nullable": false, + "typeParams": { + "equality": true, + "orderAndRange": true + } + } + }, + "foreignKeys": [], + "indexes": [], + "primaryKey": { + "columns": [ + "id" + ] + }, + "uniques": [] + } + } + }, + "capabilities": { + "postgres": { + "jsonAgg": true, + "lateral": true, + "limit": true, + "orderBy": true, + "returning": true + }, + "sql": { + "defaultInInsert": true, + "enums": true, + "returning": true + } + }, + "extensionPacks": { + "cipherstash": { + "familyId": "sql", + "id": "cipherstash", + "kind": "extension", + "targetId": "postgres", + "types": { + "codecTypes": { + "codecInstances": [ + { + "descriptor": { + "codecId": "cipherstash/string@1", + "meta": { + "db": { + "sql": { + "postgres": { + "nativeType": "eql_v2_encrypted" + } + } + } + }, + "paramsSchema": { + "~standard": { + "vendor": "cipherstash", + "version": 1 + } + }, + "targetTypes": [ + "eql_v2_encrypted" + ], + "traits": [ + "cipherstash:equality", + "cipherstash:free-text-search", + "cipherstash:order-and-range" + ] + } + }, + { + "descriptor": { + "codecId": "cipherstash/double@1", + "meta": { + "db": { + "sql": { + "postgres": { + "nativeType": "eql_v2_encrypted" + } + } + } + }, + "paramsSchema": { + "~standard": { + "vendor": "cipherstash", + "version": 1 + } + }, + "targetTypes": [ + "eql_v2_encrypted" + ], + "traits": [ + "cipherstash:equality", + "cipherstash:order-and-range" + ] + } + }, + { + "descriptor": { + "codecId": "cipherstash/bigint@1", + "meta": { + "db": { + "sql": { + "postgres": { + "nativeType": "eql_v2_encrypted" + } + } + } + }, + "paramsSchema": { + "~standard": { + "vendor": "cipherstash", + "version": 1 + } + }, + "targetTypes": [ + "eql_v2_encrypted" + ], + "traits": [ + "cipherstash:equality", + "cipherstash:order-and-range" + ] + } + }, + { + "descriptor": { + "codecId": "cipherstash/date@1", + "meta": { + "db": { + "sql": { + "postgres": { + "nativeType": "eql_v2_encrypted" + } + } + } + }, + "paramsSchema": { + "~standard": { + "vendor": "cipherstash", + "version": 1 + } + }, + "targetTypes": [ + "eql_v2_encrypted" + ], + "traits": [ + "cipherstash:equality", + "cipherstash:order-and-range" + ] + } + }, + { + "descriptor": { + "codecId": "cipherstash/boolean@1", + "meta": { + "db": { + "sql": { + "postgres": { + "nativeType": "eql_v2_encrypted" + } + } + } + }, + "paramsSchema": { + "~standard": { + "vendor": "cipherstash", + "version": 1 + } + }, + "targetTypes": [ + "eql_v2_encrypted" + ], + "traits": [ + "cipherstash:equality" + ] + } + }, + { + "descriptor": { + "codecId": "cipherstash/json@1", + "meta": { + "db": { + "sql": { + "postgres": { + "nativeType": "eql_v2_encrypted" + } + } + } + }, + "paramsSchema": { + "~standard": { + "vendor": "cipherstash", + "version": 1 + } + }, + "targetTypes": [ + "eql_v2_encrypted" + ], + "traits": [ + "cipherstash:searchable-json" + ] + } + } + ], + "import": { + "alias": "CipherstashTypes", + "named": "CodecTypes", + "package": "@prisma-next/extension-cipherstash/codec-types" + }, + "typeImports": [ + { + "alias": "EncryptedString", + "named": "EncryptedString", + "package": "@prisma-next/extension-cipherstash/runtime" + }, + { + "alias": "EncryptedDouble", + "named": "EncryptedDouble", + "package": "@prisma-next/extension-cipherstash/runtime" + }, + { + "alias": "EncryptedBigInt", + "named": "EncryptedBigInt", + "package": "@prisma-next/extension-cipherstash/runtime" + }, + { + "alias": "EncryptedDate", + "named": "EncryptedDate", + "package": "@prisma-next/extension-cipherstash/runtime" + }, + { + "alias": "EncryptedBoolean", + "named": "EncryptedBoolean", + "package": "@prisma-next/extension-cipherstash/runtime" + }, + { + "alias": "EncryptedJson", + "named": "EncryptedJson", + "package": "@prisma-next/extension-cipherstash/runtime" + } + ] + }, + "queryOperationTypes": { + "import": { + "alias": "CipherstashQueryOperationTypes", + "named": "QueryOperationTypes", + "package": "@prisma-next/extension-cipherstash/operation-types" + } + }, + "storage": [ + { + "familyId": "sql", + "nativeType": "eql_v2_encrypted", + "targetId": "postgres", + "typeId": "cipherstash/string@1" + }, + { + "familyId": "sql", + "nativeType": "eql_v2_encrypted", + "targetId": "postgres", + "typeId": "cipherstash/double@1" + }, + { + "familyId": "sql", + "nativeType": "eql_v2_encrypted", + "targetId": "postgres", + "typeId": "cipherstash/bigint@1" + }, + { + "familyId": "sql", + "nativeType": "eql_v2_encrypted", + "targetId": "postgres", + "typeId": "cipherstash/date@1" + }, + { + "familyId": "sql", + "nativeType": "eql_v2_encrypted", + "targetId": "postgres", + "typeId": "cipherstash/boolean@1" + }, + { + "familyId": "sql", + "nativeType": "eql_v2_encrypted", + "targetId": "postgres", + "typeId": "cipherstash/json@1" + } + ] + }, + "version": "0.0.1" + } + }, + "meta": {}, + "_generated": { + "warning": "⚠️ GENERATED FILE - DO NOT EDIT", + "message": "This file is automatically generated by \"prisma-next contract emit\".", + "regenerate": "To regenerate, run: prisma-next contract emit" + } + }, + "hints": { + "used": [], + "applied": [], + "plannerVersion": "2.0.0" + }, + "labels": [], + "createdAt": "2026-05-13T17:35:47.440Z", + "providedInvariants": [ + "cipherstash-codec:users.accountid:add-search-config:ore@v1", + "cipherstash-codec:users.accountid:add-search-config:unique@v1", + "cipherstash-codec:users.birthday:add-search-config:ore@v1", + "cipherstash-codec:users.birthday:add-search-config:unique@v1", + "cipherstash-codec:users.email:add-search-config:match@v1", + "cipherstash-codec:users.email:add-search-config:ore@v1", + "cipherstash-codec:users.email:add-search-config:unique@v1", + "cipherstash-codec:users.emailverified:add-search-config:unique@v1", + "cipherstash-codec:users.preferences:add-search-config:ste_vec@v1", + "cipherstash-codec:users.salary:add-search-config:ore@v1", + "cipherstash-codec:users.salary:add-search-config:unique@v1" + ], + "migrationHash": "sha256:9ea9b8e790665ce11265339be522ed4baba54d446036386c80fa589196d5f645" +} \ No newline at end of file diff --git a/examples/prisma/migrations/app/20260513T1735_initial/migration.ts b/examples/prisma/migrations/app/20260513T1735_initial/migration.ts new file mode 100755 index 00000000..7ff5588f --- /dev/null +++ b/examples/prisma/migrations/app/20260513T1735_initial/migration.ts @@ -0,0 +1,104 @@ +#!/usr/bin/env -S node +import { cipherstashAddSearchConfig } from '@prisma-next/extension-cipherstash/migration'; +import { Migration, MigrationCLI, createTable } from '@prisma-next/target-postgres/migration'; + +export default class M extends Migration { + override describe() { + return { + from: null, + to: 'sha256:7475191ce0d78258ce5586265bcdfd12202f5daf90690b902890e58eb7508373', + }; + } + + override get operations() { + return [ + createTable( + 'public', + 'users', + [ + { + name: 'accountid', + typeSql: 'eql_v2_encrypted', + defaultSql: '', + nullable: false, + }, + { + name: 'birthday', + typeSql: 'eql_v2_encrypted', + defaultSql: '', + nullable: false, + }, + { name: 'email', typeSql: 'eql_v2_encrypted', defaultSql: '', nullable: false }, + { + name: 'emailverified', + typeSql: 'eql_v2_encrypted', + defaultSql: '', + nullable: false, + }, + { name: 'id', typeSql: 'text', defaultSql: '', nullable: false }, + { + name: 'preferences', + typeSql: 'eql_v2_encrypted', + defaultSql: '', + nullable: false, + }, + { name: 'salary', typeSql: 'eql_v2_encrypted', defaultSql: '', nullable: false }, + ], + { columns: ['id'] }, + ), + cipherstashAddSearchConfig({ + table: 'users', + column: 'accountid', + index: 'unique', + castAs: 'big_int', + }), + cipherstashAddSearchConfig({ + table: 'users', + column: 'accountid', + index: 'ore', + castAs: 'big_int', + }), + cipherstashAddSearchConfig({ + table: 'users', + column: 'birthday', + index: 'unique', + castAs: 'date', + }), + cipherstashAddSearchConfig({ + table: 'users', + column: 'birthday', + index: 'ore', + castAs: 'date', + }), + cipherstashAddSearchConfig({ table: 'users', column: 'email', index: 'unique' }), + cipherstashAddSearchConfig({ table: 'users', column: 'email', index: 'match' }), + cipherstashAddSearchConfig({ table: 'users', column: 'email', index: 'ore' }), + cipherstashAddSearchConfig({ + table: 'users', + column: 'emailverified', + index: 'unique', + castAs: 'boolean', + }), + cipherstashAddSearchConfig({ + table: 'users', + column: 'preferences', + index: 'ste_vec', + castAs: 'jsonb', + }), + cipherstashAddSearchConfig({ + table: 'users', + column: 'salary', + index: 'unique', + castAs: 'double', + }), + cipherstashAddSearchConfig({ + table: 'users', + column: 'salary', + index: 'ore', + castAs: 'double', + }), + ]; + } +} + +MigrationCLI.run(import.meta.url, M); diff --git a/examples/prisma/migrations/app/20260513T1735_initial/ops.json b/examples/prisma/migrations/app/20260513T1735_initial/ops.json new file mode 100644 index 00000000..26ba7f09 --- /dev/null +++ b/examples/prisma/migrations/app/20260513T1735_initial/ops.json @@ -0,0 +1,221 @@ +[ + { + "id": "table.users", + "label": "Create table \"users\"", + "summary": "Creates table \"users\"", + "operationClass": "additive", + "target": { + "id": "postgres", + "details": { + "schema": "public", + "objectType": "table", + "name": "users" + } + }, + "precheck": [ + { + "description": "ensure table \"users\" does not exist", + "sql": "SELECT to_regclass('\"public\".\"users\"') IS NULL" + } + ], + "execute": [ + { + "description": "create table \"users\"", + "sql": "CREATE TABLE \"public\".\"users\" (\n \"accountid\" eql_v2_encrypted NOT NULL,\n \"birthday\" eql_v2_encrypted NOT NULL,\n \"email\" eql_v2_encrypted NOT NULL,\n \"emailverified\" eql_v2_encrypted NOT NULL,\n \"id\" text NOT NULL,\n \"preferences\" eql_v2_encrypted NOT NULL,\n \"salary\" eql_v2_encrypted NOT NULL,\n PRIMARY KEY (\"id\")\n)" + } + ], + "postcheck": [ + { + "description": "verify table \"users\" exists", + "sql": "SELECT to_regclass('\"public\".\"users\"') IS NOT NULL" + } + ] + }, + { + "id": "cipherstash-codec.users.accountid.add-search-config.unique", + "label": "Enable cipherstash search on users.accountid", + "operationClass": "additive", + "invariantId": "cipherstash-codec:users.accountid:add-search-config:unique@v1", + "target": { + "id": "postgres" + }, + "precheck": [], + "execute": [ + { + "description": "Register cipherstash unique search config for users.accountid", + "sql": "SELECT eql_v2.add_search_config('users', 'accountid', 'unique', 'big_int');" + } + ], + "postcheck": [] + }, + { + "id": "cipherstash-codec.users.accountid.add-search-config.ore", + "label": "Enable cipherstash search on users.accountid", + "operationClass": "additive", + "invariantId": "cipherstash-codec:users.accountid:add-search-config:ore@v1", + "target": { + "id": "postgres" + }, + "precheck": [], + "execute": [ + { + "description": "Register cipherstash ore search config for users.accountid", + "sql": "SELECT eql_v2.add_search_config('users', 'accountid', 'ore', 'big_int');" + } + ], + "postcheck": [] + }, + { + "id": "cipherstash-codec.users.birthday.add-search-config.unique", + "label": "Enable cipherstash search on users.birthday", + "operationClass": "additive", + "invariantId": "cipherstash-codec:users.birthday:add-search-config:unique@v1", + "target": { + "id": "postgres" + }, + "precheck": [], + "execute": [ + { + "description": "Register cipherstash unique search config for users.birthday", + "sql": "SELECT eql_v2.add_search_config('users', 'birthday', 'unique', 'date');" + } + ], + "postcheck": [] + }, + { + "id": "cipherstash-codec.users.birthday.add-search-config.ore", + "label": "Enable cipherstash search on users.birthday", + "operationClass": "additive", + "invariantId": "cipherstash-codec:users.birthday:add-search-config:ore@v1", + "target": { + "id": "postgres" + }, + "precheck": [], + "execute": [ + { + "description": "Register cipherstash ore search config for users.birthday", + "sql": "SELECT eql_v2.add_search_config('users', 'birthday', 'ore', 'date');" + } + ], + "postcheck": [] + }, + { + "id": "cipherstash-codec.users.email.add-search-config.unique", + "label": "Enable cipherstash search on users.email", + "operationClass": "additive", + "invariantId": "cipherstash-codec:users.email:add-search-config:unique@v1", + "target": { + "id": "postgres" + }, + "precheck": [], + "execute": [ + { + "description": "Register cipherstash unique search config for users.email", + "sql": "SELECT eql_v2.add_search_config('users', 'email', 'unique', 'text');" + } + ], + "postcheck": [] + }, + { + "id": "cipherstash-codec.users.email.add-search-config.match", + "label": "Enable cipherstash search on users.email", + "operationClass": "additive", + "invariantId": "cipherstash-codec:users.email:add-search-config:match@v1", + "target": { + "id": "postgres" + }, + "precheck": [], + "execute": [ + { + "description": "Register cipherstash match search config for users.email", + "sql": "SELECT eql_v2.add_search_config('users', 'email', 'match', 'text');" + } + ], + "postcheck": [] + }, + { + "id": "cipherstash-codec.users.email.add-search-config.ore", + "label": "Enable cipherstash search on users.email", + "operationClass": "additive", + "invariantId": "cipherstash-codec:users.email:add-search-config:ore@v1", + "target": { + "id": "postgres" + }, + "precheck": [], + "execute": [ + { + "description": "Register cipherstash ore search config for users.email", + "sql": "SELECT eql_v2.add_search_config('users', 'email', 'ore', 'text');" + } + ], + "postcheck": [] + }, + { + "id": "cipherstash-codec.users.emailverified.add-search-config.unique", + "label": "Enable cipherstash search on users.emailverified", + "operationClass": "additive", + "invariantId": "cipherstash-codec:users.emailverified:add-search-config:unique@v1", + "target": { + "id": "postgres" + }, + "precheck": [], + "execute": [ + { + "description": "Register cipherstash unique search config for users.emailverified", + "sql": "SELECT eql_v2.add_search_config('users', 'emailverified', 'unique', 'boolean');" + } + ], + "postcheck": [] + }, + { + "id": "cipherstash-codec.users.preferences.add-search-config.ste_vec", + "label": "Enable cipherstash search on users.preferences", + "operationClass": "additive", + "invariantId": "cipherstash-codec:users.preferences:add-search-config:ste_vec@v1", + "target": { + "id": "postgres" + }, + "precheck": [], + "execute": [ + { + "description": "Register cipherstash ste_vec search config for users.preferences", + "sql": "SELECT eql_v2.add_search_config('users', 'preferences', 'ste_vec', 'jsonb');" + } + ], + "postcheck": [] + }, + { + "id": "cipherstash-codec.users.salary.add-search-config.unique", + "label": "Enable cipherstash search on users.salary", + "operationClass": "additive", + "invariantId": "cipherstash-codec:users.salary:add-search-config:unique@v1", + "target": { + "id": "postgres" + }, + "precheck": [], + "execute": [ + { + "description": "Register cipherstash unique search config for users.salary", + "sql": "SELECT eql_v2.add_search_config('users', 'salary', 'unique', 'double');" + } + ], + "postcheck": [] + }, + { + "id": "cipherstash-codec.users.salary.add-search-config.ore", + "label": "Enable cipherstash search on users.salary", + "operationClass": "additive", + "invariantId": "cipherstash-codec:users.salary:add-search-config:ore@v1", + "target": { + "id": "postgres" + }, + "precheck": [], + "execute": [ + { + "description": "Register cipherstash ore search config for users.salary", + "sql": "SELECT eql_v2.add_search_config('users', 'salary', 'ore', 'double');" + } + ], + "postcheck": [] + } +] \ No newline at end of file diff --git a/examples/prisma/migrations/cipherstash/20260601T0000_install_eql_bundle/contract.json b/examples/prisma/migrations/cipherstash/20260601T0000_install_eql_bundle/contract.json new file mode 100644 index 00000000..9c4939d7 --- /dev/null +++ b/examples/prisma/migrations/cipherstash/20260601T0000_install_eql_bundle/contract.json @@ -0,0 +1 @@ +{"_generated":{"message":"This file is automatically generated by \"prisma-next contract emit\".","regenerate":"To regenerate, run: prisma-next contract emit","warning":"⚠️ GENERATED FILE - DO NOT EDIT"},"capabilities":{"postgres":{"jsonAgg":true,"lateral":true,"limit":true,"orderBy":true,"returning":true},"sql":{"defaultInInsert":true,"enums":true,"returning":true}},"extensionPacks":{},"meta":{},"models":{"EqlV2Configuration":{"fields":{"data":{"nullable":false,"type":{"codecId":"pg/jsonb@1","kind":"scalar"}},"id":{"nullable":false,"type":{"codecId":"pg/text@1","kind":"scalar"}},"state":{"nullable":false,"type":{"codecId":"pg/text@1","kind":"scalar"}}},"relations":{},"storage":{"fields":{"data":{"column":"data"},"id":{"column":"id"},"state":{"column":"state"}},"table":"eql_v2_configuration"}}},"profileHash":"sha256:1a8dbe044289f30a1de958fe800cc5a8378b285d2e126a8c44b58864bac2c18e","roots":{"eql_v2_configuration":"EqlV2Configuration"},"schemaVersion":"1","storage":{"storageHash":"sha256:efa685171bebbb8f078f08d12be3578bb5d96b71669dccc6cc9e4be96af8cdb4","tables":{"eql_v2_configuration":{"columns":{"data":{"codecId":"pg/jsonb@1","nativeType":"jsonb","nullable":false},"id":{"codecId":"pg/text@1","nativeType":"text","nullable":false},"state":{"codecId":"pg/text@1","nativeType":"text","nullable":false}},"foreignKeys":[],"indexes":[],"primaryKey":{"columns":["id"]},"uniques":[]}}},"target":"postgres","targetFamily":"sql"} diff --git a/examples/prisma/migrations/cipherstash/20260601T0000_install_eql_bundle/migration.json b/examples/prisma/migrations/cipherstash/20260601T0000_install_eql_bundle/migration.json new file mode 100644 index 00000000..cf0c8195 --- /dev/null +++ b/examples/prisma/migrations/cipherstash/20260601T0000_install_eql_bundle/migration.json @@ -0,0 +1,120 @@ +{ + "from": null, + "to": "sha256:efa685171bebbb8f078f08d12be3578bb5d96b71669dccc6cc9e4be96af8cdb4", + "labels": [], + "providedInvariants": [ + "cipherstash:install-eql-bundle-v1" + ], + "createdAt": "2026-05-09T03:42:56.902Z", + "fromContract": null, + "toContract": { + "schemaVersion": "1", + "targetFamily": "sql", + "target": "postgres", + "profileHash": "sha256:1a8dbe044289f30a1de958fe800cc5a8378b285d2e126a8c44b58864bac2c18e", + "roots": { + "eql_v2_configuration": "EqlV2Configuration" + }, + "models": { + "EqlV2Configuration": { + "fields": { + "data": { + "nullable": false, + "type": { + "codecId": "pg/jsonb@1", + "kind": "scalar" + } + }, + "id": { + "nullable": false, + "type": { + "codecId": "pg/text@1", + "kind": "scalar" + } + }, + "state": { + "nullable": false, + "type": { + "codecId": "pg/text@1", + "kind": "scalar" + } + } + }, + "relations": {}, + "storage": { + "fields": { + "data": { + "column": "data" + }, + "id": { + "column": "id" + }, + "state": { + "column": "state" + } + }, + "table": "eql_v2_configuration" + } + } + }, + "storage": { + "storageHash": "sha256:efa685171bebbb8f078f08d12be3578bb5d96b71669dccc6cc9e4be96af8cdb4", + "tables": { + "eql_v2_configuration": { + "columns": { + "data": { + "codecId": "pg/jsonb@1", + "nativeType": "jsonb", + "nullable": false + }, + "id": { + "codecId": "pg/text@1", + "nativeType": "text", + "nullable": false + }, + "state": { + "codecId": "pg/text@1", + "nativeType": "text", + "nullable": false + } + }, + "foreignKeys": [], + "indexes": [], + "primaryKey": { + "columns": [ + "id" + ] + }, + "uniques": [] + } + } + }, + "capabilities": { + "postgres": { + "jsonAgg": true, + "lateral": true, + "limit": true, + "orderBy": true, + "returning": true + }, + "sql": { + "defaultInInsert": true, + "enums": true, + "returning": true + } + }, + "extensionPacks": {}, + "meta": {}, + "_generated": { + "warning": "⚠️ GENERATED FILE - DO NOT EDIT", + "message": "This file is automatically generated by \"prisma-next contract emit\".", + "regenerate": "To regenerate, run: prisma-next contract emit" + } + }, + "hints": { + "used": [], + "applied": [], + "plannerVersion": "2.0.0" + }, + "migrationHash": "sha256:9b44ccc4d0753b364e546297857dcd8dd1ea0c16d2d09579ddb3c8d0e5fc3115" +} \ No newline at end of file diff --git a/examples/prisma/migrations/cipherstash/20260601T0000_install_eql_bundle/ops.json b/examples/prisma/migrations/cipherstash/20260601T0000_install_eql_bundle/ops.json new file mode 100644 index 00000000..9af1a5d5 --- /dev/null +++ b/examples/prisma/migrations/cipherstash/20260601T0000_install_eql_bundle/ops.json @@ -0,0 +1,28 @@ +[ + { + "id": "cipherstash.install-eql-bundle", + "label": "Install EQL bundle (functions, operators, casts, op classes, schema, types)", + "operationClass": "additive", + "invariantId": "cipherstash:install-eql-bundle-v1", + "target": { + "id": "postgres" + }, + "precheck": [], + "execute": [ + { + "description": "Install EQL bundle (functions, operators, casts, op classes, schema, types)", + "sql": "--! @file schema.sql\n--! @brief EQL v2 schema creation\n--!\n--! Creates the eql_v2 schema which contains all Encrypt Query Language\n--! functions, types, and tables. Drops existing schema if present to\n--! support clean reinstallation.\n--!\n--! @warning DROP SCHEMA CASCADE will remove all objects in the schema\n--! @note All EQL objects (functions, types, tables) reside in eql_v2 schema\n\n--! @brief Drop existing EQL v2 schema\n--! @warning CASCADE will drop all dependent objects\nDROP SCHEMA IF EXISTS eql_v2 CASCADE;\n\n--! @brief Create EQL v2 schema\n--! @note All EQL functions and types will be created in this schema\nCREATE SCHEMA eql_v2;\n\n--! @brief Composite type for encrypted column data\n--!\n--! Core type used for all encrypted columns in EQL. Stores encrypted data as JSONB\n--! with the following structure:\n--! - `c`: ciphertext (base64-encoded encrypted value)\n--! - `i`: index terms (searchable metadata for encrypted searches)\n--! - `k`: key ID (identifier for encryption key)\n--! - `m`: metadata (additional encryption metadata)\n--!\n--! Created in public schema to persist independently of eql_v2 schema lifecycle.\n--! Customer data columns use this type, so it must not be dropped if data exists.\n--!\n--! @note DO NOT DROP this type unless absolutely certain no encrypted data uses it\n--! @see eql_v2.ciphertext\n--! @see eql_v2.meta_data\n--! @see eql_v2.add_column\nDO $$\n BEGIN\n IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'eql_v2_encrypted') THEN\n CREATE TYPE public.eql_v2_encrypted AS (\n data jsonb\n );\n END IF;\n END\n$$;\n\n\n\n\n\n\n\n\n\n\n--! @brief Bloom filter index term type\n--!\n--! Domain type representing Bloom filter bit arrays stored as smallint arrays.\n--! Used for pattern-match encrypted searches via the 'match' index type.\n--! The filter is stored in the 'bf' field of encrypted data payloads.\n--!\n--! @see eql_v2.add_search_config\n--! @see eql_v2.\"~~\"\n--! @note This is a transient type used only during query execution\nCREATE DOMAIN eql_v2.bloom_filter AS smallint[];\n\n\n\n--! @brief ORE block term type for Order-Revealing Encryption\n--!\n--! Composite type representing a single ORE (Order-Revealing Encryption) block term.\n--! Stores encrypted data as bytea that enables range comparisons without decryption.\n--!\n--! @see eql_v2.ore_block_u64_8_256\n--! @see eql_v2.compare_ore_block_u64_8_256_term\nCREATE TYPE eql_v2.ore_block_u64_8_256_term AS (\n bytes bytea\n);\n\n\n--! @brief ORE block index term type for range queries\n--!\n--! Composite type containing an array of ORE block terms. Used for encrypted\n--! range queries via the 'ore' index type. The array is stored in the 'ob' field\n--! of encrypted data payloads.\n--!\n--! @see eql_v2.add_search_config\n--! @see eql_v2.compare_ore_block_u64_8_256_terms\n--! @note This is a transient type used only during query execution\nCREATE TYPE eql_v2.ore_block_u64_8_256 AS (\n terms eql_v2.ore_block_u64_8_256_term[]\n);\n\n--! @brief HMAC-SHA256 index term type\n--!\n--! Domain type representing HMAC-SHA256 hash values.\n--! Used for exact-match encrypted searches via the 'unique' index type.\n--! The hash is stored in the 'hm' field of encrypted data payloads.\n--!\n--! @see eql_v2.add_search_config\n--! @note This is a transient type used only during query execution\nCREATE DOMAIN eql_v2.hmac_256 AS text;\n-- AUTOMATICALLY GENERATED FILE\n\n--! @file common.sql\n--! @brief Common utility functions\n--!\n--! Provides general-purpose utility functions used across EQL:\n--! - Constant-time bytea comparison for security\n--! - JSONB to bytea array conversion\n--! - Logging helpers for debugging and testing\n\n\n--! @brief Constant-time comparison of bytea values\n--! @internal\n--!\n--! Compares two bytea values in constant time to prevent timing attacks.\n--! Always checks all bytes even after finding differences, maintaining\n--! consistent execution time regardless of where differences occur.\n--!\n--! @param a bytea First value to compare\n--! @param b bytea Second value to compare\n--! @return boolean True if values are equal\n--!\n--! @note Returns false immediately if lengths differ (length is not secret)\n--! @note Used for secure comparison of cryptographic values\nCREATE FUNCTION eql_v2.bytea_eq(a bytea, b bytea) RETURNS boolean AS $$\nDECLARE\n result boolean;\n differing bytea;\nBEGIN\n\n -- Check if the bytea values are the same length\n IF LENGTH(a) != LENGTH(b) THEN\n RETURN false;\n END IF;\n\n -- Compare each byte in the bytea values\n result := true;\n FOR i IN 1..LENGTH(a) LOOP\n IF SUBSTRING(a FROM i FOR 1) != SUBSTRING(b FROM i FOR 1) THEN\n result := result AND false;\n END IF;\n END LOOP;\n\n RETURN result;\nEND;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Convert JSONB hex array to bytea array\n--! @internal\n--!\n--! Converts a JSONB array of hex-encoded strings into a PostgreSQL bytea array.\n--! Used for deserializing binary data (like ORE terms) from JSONB storage.\n--!\n--! @param jsonb JSONB array of hex-encoded strings\n--! @return bytea[] Array of decoded binary values\n--!\n--! @note Returns NULL if input is JSON null\n--! @note Each array element is hex-decoded to bytea\nCREATE FUNCTION eql_v2.jsonb_array_to_bytea_array(val jsonb)\nRETURNS bytea[] AS $$\nDECLARE\n terms_arr bytea[];\nBEGIN\n IF jsonb_typeof(val) = 'null' THEN\n RETURN NULL;\n END IF;\n\n SELECT array_agg(decode(value::text, 'hex')::bytea)\n INTO terms_arr\n FROM jsonb_array_elements_text(val) AS value;\n\n RETURN terms_arr;\nEND;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Log message for debugging\n--!\n--! Convenience function to emit log messages during testing and debugging.\n--! Uses RAISE NOTICE to output messages to PostgreSQL logs.\n--!\n--! @param text Message to log\n--!\n--! @note Primarily used in tests and development\n--! @see eql_v2.log(text, text) for contextual logging\nCREATE FUNCTION eql_v2.log(s text)\n RETURNS void\nAS $$\n BEGIN\n RAISE NOTICE '[LOG] %', s;\nEND;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Log message with context\n--!\n--! Overload of log function that includes context label for better\n--! log organization during testing.\n--!\n--! @param ctx text Context label (e.g., test name, module name)\n--! @param s text Message to log\n--!\n--! @note Format: \"[LOG] {ctx} {message}\"\n--! @see eql_v2.log(text)\nCREATE FUNCTION eql_v2.log(ctx text, s text)\n RETURNS void\nAS $$\n BEGIN\n RAISE NOTICE '[LOG] % %', ctx, s;\nEND;\n$$ LANGUAGE plpgsql;\n\n--! @brief CLLW ORE index term type for range queries\n--!\n--! Composite type for CLLW (Copyless Logarithmic Width) Order-Revealing Encryption.\n--! Each output block is 8-bits. Used for encrypted range queries via the 'ore' index type.\n--! The ciphertext is stored in the 'ocf' field of encrypted data payloads.\n--!\n--! @see eql_v2.add_search_config\n--! @see eql_v2.compare_ore_cllw_u64_8\n--! @note This is a transient type used only during query execution\nCREATE TYPE eql_v2.ore_cllw_u64_8 AS (\n bytes bytea\n);\n\n--! @file crypto.sql\n--! @brief PostgreSQL pgcrypto extension enablement\n--!\n--! Enables the pgcrypto extension which provides cryptographic functions\n--! used by EQL for hashing and other cryptographic operations.\n--!\n--! @note pgcrypto provides functions like digest(), hmac(), gen_random_bytes()\n--! @note IF NOT EXISTS prevents errors if extension already enabled\n\n--! @brief Enable pgcrypto extension\n--! @note Provides cryptographic functions for hashing and random number generation\nCREATE EXTENSION IF NOT EXISTS pgcrypto;\n\n\n--! @brief Extract ciphertext from encrypted JSONB value\n--!\n--! Extracts the ciphertext (c field) from a raw JSONB encrypted value.\n--! The ciphertext is the base64-encoded encrypted data.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return Text Base64-encoded ciphertext string\n--! @throws Exception if 'c' field is not present in JSONB\n--!\n--! @example\n--! -- Extract ciphertext from JSONB literal\n--! SELECT eql_v2.ciphertext('{\"c\":\"AQIDBA==\",\"i\":{\"unique\":\"...\"}}'::jsonb);\n--!\n--! @see eql_v2.ciphertext(eql_v2_encrypted)\n--! @see eql_v2.meta_data\nCREATE FUNCTION eql_v2.ciphertext(val jsonb)\n RETURNS text\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n IF val ? 'c' THEN\n RETURN val->>'c';\n END IF;\n RAISE 'Expected a ciphertext (c) value in json: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Extract ciphertext from encrypted column value\n--!\n--! Extracts the ciphertext from an encrypted column value. Convenience\n--! overload that unwraps eql_v2_encrypted type and delegates to JSONB version.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return Text Base64-encoded ciphertext string\n--! @throws Exception if encrypted value is malformed\n--!\n--! @example\n--! -- Extract ciphertext from encrypted column\n--! SELECT eql_v2.ciphertext(encrypted_email) FROM users;\n--!\n--! @see eql_v2.ciphertext(jsonb)\n--! @see eql_v2.meta_data\nCREATE FUNCTION eql_v2.ciphertext(val eql_v2_encrypted)\n RETURNS text\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.ciphertext(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief State transition function for grouped_value aggregate\n--! @internal\n--!\n--! Returns the first non-null value encountered. Used as state function\n--! for the grouped_value aggregate to select first value in each group.\n--!\n--! @param $1 JSONB Accumulated state (first non-null value found)\n--! @param $2 JSONB New value from current row\n--! @return JSONB First non-null value (state or new value)\n--!\n--! @see eql_v2.grouped_value\nCREATE FUNCTION eql_v2._first_grouped_value(jsonb, jsonb)\nRETURNS jsonb AS $$\n SELECT COALESCE($1, $2);\n$$ LANGUAGE sql IMMUTABLE;\n\n--! @brief Return first non-null encrypted value in a group\n--!\n--! Aggregate function that returns the first non-null encrypted value\n--! encountered within a GROUP BY clause. Useful for deduplication or\n--! selecting representative values from grouped encrypted data.\n--!\n--! @param input JSONB Encrypted values to aggregate\n--! @return JSONB First non-null encrypted value in group\n--!\n--! @example\n--! -- Get first email per user group\n--! SELECT user_id, eql_v2.grouped_value(encrypted_email)\n--! FROM user_emails\n--! GROUP BY user_id;\n--!\n--! -- Deduplicate encrypted values\n--! SELECT DISTINCT ON (user_id)\n--! user_id,\n--! eql_v2.grouped_value(encrypted_ssn) as primary_ssn\n--! FROM user_records\n--! GROUP BY user_id;\n--!\n--! @see eql_v2._first_grouped_value\nCREATE AGGREGATE eql_v2.grouped_value(jsonb) (\n SFUNC = eql_v2._first_grouped_value,\n STYPE = jsonb\n);\n\n--! @brief Add validation constraint to encrypted column\n--!\n--! Adds a CHECK constraint to ensure column values conform to encrypted data\n--! structure. Constraint uses eql_v2.check_encrypted to validate format.\n--! Called automatically by eql_v2.add_column.\n--!\n--! @param table_name TEXT Name of table containing the column\n--! @param column_name TEXT Name of column to constrain\n--! @return Void\n--!\n--! @example\n--! -- Manually add constraint (normally done by add_column)\n--! SELECT eql_v2.add_encrypted_constraint('users', 'encrypted_email');\n--!\n--! -- Resulting constraint:\n--! -- ALTER TABLE users ADD CONSTRAINT eql_v2_encrypted_check_encrypted_email\n--! -- CHECK (eql_v2.check_encrypted(encrypted_email));\n--!\n--! @see eql_v2.add_column\n--! @see eql_v2.remove_encrypted_constraint\nCREATE FUNCTION eql_v2.add_encrypted_constraint(table_name TEXT, column_name TEXT)\n RETURNS void\nAS $$\n\tBEGIN\n EXECUTE format('ALTER TABLE %I ADD CONSTRAINT eql_v2_encrypted_constraint_%I_%I CHECK (eql_v2.check_encrypted(%I))', table_name, table_name, column_name, column_name);\n EXCEPTION\n WHEN duplicate_table THEN\n WHEN duplicate_object THEN\n RAISE NOTICE 'Constraint `eql_v2_encrypted_constraint_%_%` already exists, skipping', table_name, column_name;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Remove validation constraint from encrypted column\n--!\n--! Removes the CHECK constraint that validates encrypted data structure.\n--! Called automatically by eql_v2.remove_column. Uses IF EXISTS to avoid\n--! errors if constraint doesn't exist.\n--!\n--! @param table_name TEXT Name of table containing the column\n--! @param column_name TEXT Name of column to unconstrain\n--! @return Void\n--!\n--! @example\n--! -- Manually remove constraint (normally done by remove_column)\n--! SELECT eql_v2.remove_encrypted_constraint('users', 'encrypted_email');\n--!\n--! @see eql_v2.remove_column\n--! @see eql_v2.add_encrypted_constraint\nCREATE FUNCTION eql_v2.remove_encrypted_constraint(table_name TEXT, column_name TEXT)\n RETURNS void\nAS $$\n\tBEGIN\n\t\tEXECUTE format('ALTER TABLE %I DROP CONSTRAINT IF EXISTS eql_v2_encrypted_constraint_%I_%I', table_name, table_name, column_name);\n\tEND;\n$$ LANGUAGE plpgsql;\n\n--! @brief Extract metadata from encrypted JSONB value\n--!\n--! Extracts index terms (i) and version (v) from a raw JSONB encrypted value.\n--! Returns metadata object containing searchable index terms without ciphertext.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return JSONB Metadata object with 'i' (index terms) and 'v' (version) fields\n--!\n--! @example\n--! -- Extract metadata to inspect index terms\n--! SELECT eql_v2.meta_data('{\"c\":\"...\",\"i\":{\"unique\":\"abc123\"},\"v\":1}'::jsonb);\n--! -- Returns: {\"i\":{\"unique\":\"abc123\"},\"v\":1}\n--!\n--! @see eql_v2.meta_data(eql_v2_encrypted)\n--! @see eql_v2.ciphertext\nCREATE FUNCTION eql_v2.meta_data(val jsonb)\n RETURNS jsonb\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN jsonb_build_object(\n 'i', val->'i',\n 'v', val->'v'\n );\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Extract metadata from encrypted column value\n--!\n--! Extracts index terms and version from an encrypted column value.\n--! Convenience overload that unwraps eql_v2_encrypted type and\n--! delegates to JSONB version.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return JSONB Metadata object with 'i' (index terms) and 'v' (version) fields\n--!\n--! @example\n--! -- Inspect index terms for encrypted column\n--! SELECT user_id, eql_v2.meta_data(encrypted_email) as email_metadata\n--! FROM users;\n--!\n--! @see eql_v2.meta_data(jsonb)\n--! @see eql_v2.ciphertext\nCREATE FUNCTION eql_v2.meta_data(val eql_v2_encrypted)\n RETURNS jsonb\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN eql_v2.meta_data(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Variable-width CLLW ORE index term type for range queries\n--!\n--! Composite type for variable-width CLLW (Copyless Logarithmic Width) Order-Revealing Encryption.\n--! Each output block is 8-bits. Unlike ore_cllw_u64_8, supports variable-length ciphertexts.\n--! Used for encrypted range queries via the 'ore' index type.\n--! The ciphertext is stored in the 'ocv' field of encrypted data payloads.\n--!\n--! @see eql_v2.add_search_config\n--! @see eql_v2.compare_ore_cllw_var_8\n--! @note This is a transient type used only during query execution\nCREATE TYPE eql_v2.ore_cllw_var_8 AS (\n bytes bytea\n);\n\n\n--! @brief Extract CLLW ORE index term from JSONB payload\n--!\n--! Extracts the CLLW ORE ciphertext from the 'ocf' field of an encrypted\n--! data payload. Used internally for range query comparisons.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return eql_v2.ore_cllw_u64_8 CLLW ORE ciphertext\n--! @throws Exception if 'ocf' field is missing when ore index is expected\n--!\n--! @see eql_v2.has_ore_cllw_u64_8\n--! @see eql_v2.compare_ore_cllw_u64_8\nCREATE FUNCTION eql_v2.ore_cllw_u64_8(val jsonb)\n RETURNS eql_v2.ore_cllw_u64_8\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n IF val IS NULL THEN\n RETURN NULL;\n END IF;\n\n IF NOT (eql_v2.has_ore_cllw_u64_8(val)) THEN\n RAISE 'Expected a ore_cllw_u64_8 index (ocf) value in json: %', val;\n END IF;\n\n RETURN ROW(decode(val->>'ocf', 'hex'));\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract CLLW ORE index term from encrypted column value\n--!\n--! Extracts the CLLW ORE ciphertext from an encrypted column value by accessing\n--! its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return eql_v2.ore_cllw_u64_8 CLLW ORE ciphertext\n--!\n--! @see eql_v2.ore_cllw_u64_8(jsonb)\nCREATE FUNCTION eql_v2.ore_cllw_u64_8(val eql_v2_encrypted)\n RETURNS eql_v2.ore_cllw_u64_8\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (SELECT eql_v2.ore_cllw_u64_8(val.data));\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if JSONB payload contains CLLW ORE index term\n--!\n--! Tests whether the encrypted data payload includes an 'ocf' field,\n--! indicating a CLLW ORE ciphertext is available for range queries.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return Boolean True if 'ocf' field is present and non-null\n--!\n--! @see eql_v2.ore_cllw_u64_8\nCREATE FUNCTION eql_v2.has_ore_cllw_u64_8(val jsonb)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN val ->> 'ocf' IS NOT NULL;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if encrypted column value contains CLLW ORE index term\n--!\n--! Tests whether an encrypted column value includes a CLLW ORE ciphertext\n--! by checking its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return Boolean True if CLLW ORE ciphertext is present\n--!\n--! @see eql_v2.has_ore_cllw_u64_8(jsonb)\nCREATE FUNCTION eql_v2.has_ore_cllw_u64_8(val eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.has_ore_cllw_u64_8(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n--! @brief Compare CLLW ORE ciphertext bytes\n--! @internal\n--!\n--! Byte-by-byte comparison of CLLW ORE ciphertexts implementing the CLLW\n--! comparison algorithm. Used by both fixed-width (ore_cllw_u64_8) and\n--! variable-width (ore_cllw_var_8) ORE variants.\n--!\n--! @param a Bytea First CLLW ORE ciphertext\n--! @param b Bytea Second CLLW ORE ciphertext\n--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b\n--! @throws Exception if ciphertexts are different lengths\n--!\n--! @note Shared comparison logic for multiple ORE CLLW schemes\n--! @see eql_v2.compare_ore_cllw_u64_8\nCREATE FUNCTION eql_v2.compare_ore_cllw_term_bytes(a bytea, b bytea)\nRETURNS int AS $$\nDECLARE\n len_a INT;\n len_b INT;\n x BYTEA;\n y BYTEA;\n i INT;\n differing boolean;\nBEGIN\n\n -- Check if the lengths of the two bytea arguments are the same\n len_a := LENGTH(a);\n len_b := LENGTH(b);\n\n IF len_a != len_b THEN\n RAISE EXCEPTION 'ore_cllw index terms are not the same length';\n END IF;\n\n -- Iterate over each byte and compare them\n FOR i IN 1..len_a LOOP\n x := SUBSTRING(a FROM i FOR 1);\n y := SUBSTRING(b FROM i FOR 1);\n\n -- Check if there's a difference\n IF x != y THEN\n differing := true;\n EXIT;\n END IF;\n END LOOP;\n\n -- If a difference is found, compare the bytes as in Rust logic\n IF differing THEN\n IF (get_byte(y, 0) + 1) % 256 = get_byte(x, 0) THEN\n RETURN 1;\n ELSE\n RETURN -1;\n END IF;\n ELSE\n RETURN 0;\n END IF;\nEND;\n$$ LANGUAGE plpgsql;\n\n\n\n--! @brief Blake3 hash index term type\n--!\n--! Domain type representing Blake3 cryptographic hash values.\n--! Used for exact-match encrypted searches via the 'unique' index type.\n--! The hash is stored in the 'b3' field of encrypted data payloads.\n--!\n--! @see eql_v2.add_search_config\n--! @note This is a transient type used only during query execution\nCREATE DOMAIN eql_v2.blake3 AS text;\n\n--! @brief Extract Blake3 hash index term from JSONB payload\n--!\n--! Extracts the Blake3 hash value from the 'b3' field of an encrypted\n--! data payload. Used internally for exact-match comparisons.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return eql_v2.blake3 Blake3 hash value, or NULL if not present\n--! @throws Exception if 'b3' field is missing when blake3 index is expected\n--!\n--! @see eql_v2.has_blake3\n--! @see eql_v2.compare_blake3\nCREATE FUNCTION eql_v2.blake3(val jsonb)\n RETURNS eql_v2.blake3\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n IF val IS NULL THEN\n RETURN NULL;\n END IF;\n\n IF NOT eql_v2.has_blake3(val) THEN\n RAISE 'Expected a blake3 index (b3) value in json: %', val;\n END IF;\n\n IF val->>'b3' IS NULL THEN\n RETURN NULL;\n END IF;\n\n RETURN val->>'b3';\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract Blake3 hash index term from encrypted column value\n--!\n--! Extracts the Blake3 hash from an encrypted column value by accessing\n--! its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return eql_v2.blake3 Blake3 hash value, or NULL if not present\n--!\n--! @see eql_v2.blake3(jsonb)\nCREATE FUNCTION eql_v2.blake3(val eql_v2_encrypted)\n RETURNS eql_v2.blake3\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (SELECT eql_v2.blake3(val.data));\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if JSONB payload contains Blake3 index term\n--!\n--! Tests whether the encrypted data payload includes a 'b3' field,\n--! indicating a Blake3 hash is available for exact-match queries.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return Boolean True if 'b3' field is present and non-null\n--!\n--! @see eql_v2.blake3\nCREATE FUNCTION eql_v2.has_blake3(val jsonb)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN val ->> 'b3' IS NOT NULL;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if encrypted column value contains Blake3 index term\n--!\n--! Tests whether an encrypted column value includes a Blake3 hash\n--! by checking its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return Boolean True if Blake3 hash is present\n--!\n--! @see eql_v2.has_blake3(jsonb)\nCREATE FUNCTION eql_v2.has_blake3(val eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.has_blake3(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract HMAC-SHA256 index term from JSONB payload\n--!\n--! Extracts the HMAC-SHA256 hash value from the 'hm' field of an encrypted\n--! data payload. Used internally for exact-match comparisons.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return eql_v2.hmac_256 HMAC-SHA256 hash value\n--! @throws Exception if 'hm' field is missing when hmac_256 index is expected\n--!\n--! @see eql_v2.has_hmac_256\n--! @see eql_v2.compare_hmac_256\nCREATE FUNCTION eql_v2.hmac_256(val jsonb)\n RETURNS eql_v2.hmac_256\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n IF val IS NULL THEN\n RETURN NULL;\n END IF;\n\n IF eql_v2.has_hmac_256(val) THEN\n RETURN val->>'hm';\n END IF;\n RAISE 'Expected a hmac_256 index (hm) value in json: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if JSONB payload contains HMAC-SHA256 index term\n--!\n--! Tests whether the encrypted data payload includes an 'hm' field,\n--! indicating an HMAC-SHA256 hash is available for exact-match queries.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return Boolean True if 'hm' field is present and non-null\n--!\n--! @see eql_v2.hmac_256\nCREATE FUNCTION eql_v2.has_hmac_256(val jsonb)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN val ->> 'hm' IS NOT NULL;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if encrypted column value contains HMAC-SHA256 index term\n--!\n--! Tests whether an encrypted column value includes an HMAC-SHA256 hash\n--! by checking its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return Boolean True if HMAC-SHA256 hash is present\n--!\n--! @see eql_v2.has_hmac_256(jsonb)\nCREATE FUNCTION eql_v2.has_hmac_256(val eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.has_hmac_256(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n--! @brief Extract HMAC-SHA256 index term from encrypted column value\n--!\n--! Extracts the HMAC-SHA256 hash from an encrypted column value by accessing\n--! its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return eql_v2.hmac_256 HMAC-SHA256 hash value\n--!\n--! @see eql_v2.hmac_256(jsonb)\nCREATE FUNCTION eql_v2.hmac_256(val eql_v2_encrypted)\n RETURNS eql_v2.hmac_256\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (SELECT eql_v2.hmac_256(val.data));\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n\n--! @brief Convert JSONB array to ORE block composite type\n--! @internal\n--!\n--! Converts a JSONB array of hex-encoded ORE terms from the CipherStash Proxy\n--! payload into the PostgreSQL composite type used for ORE operations.\n--!\n--! @param val JSONB Array of hex-encoded ORE block terms\n--! @return eql_v2.ore_block_u64_8_256 ORE block composite type, or NULL if input is null\n--!\n--! @see eql_v2.ore_block_u64_8_256(jsonb)\nCREATE FUNCTION eql_v2.jsonb_array_to_ore_block_u64_8_256(val jsonb)\nRETURNS eql_v2.ore_block_u64_8_256 AS $$\nDECLARE\n terms eql_v2.ore_block_u64_8_256_term[];\nBEGIN\n IF jsonb_typeof(val) = 'null' THEN\n RETURN NULL;\n END IF;\n\n SELECT array_agg(ROW(b)::eql_v2.ore_block_u64_8_256_term)\n INTO terms\n FROM unnest(eql_v2.jsonb_array_to_bytea_array(val)) AS b;\n\n RETURN ROW(terms)::eql_v2.ore_block_u64_8_256;\nEND;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract ORE block index term from JSONB payload\n--!\n--! Extracts the ORE block array from the 'ob' field of an encrypted\n--! data payload. Used internally for range query comparisons.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return eql_v2.ore_block_u64_8_256 ORE block index term\n--! @throws Exception if 'ob' field is missing when ore index is expected\n--!\n--! @see eql_v2.has_ore_block_u64_8_256\n--! @see eql_v2.compare_ore_block_u64_8_256\nCREATE FUNCTION eql_v2.ore_block_u64_8_256(val jsonb)\n RETURNS eql_v2.ore_block_u64_8_256\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n IF val IS NULL THEN\n RETURN NULL;\n END IF;\n\n IF eql_v2.has_ore_block_u64_8_256(val) THEN\n RETURN eql_v2.jsonb_array_to_ore_block_u64_8_256(val->'ob');\n END IF;\n RAISE 'Expected an ore index (ob) value in json: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract ORE block index term from encrypted column value\n--!\n--! Extracts the ORE block from an encrypted column value by accessing\n--! its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return eql_v2.ore_block_u64_8_256 ORE block index term\n--!\n--! @see eql_v2.ore_block_u64_8_256(jsonb)\nCREATE FUNCTION eql_v2.ore_block_u64_8_256(val eql_v2_encrypted)\n RETURNS eql_v2.ore_block_u64_8_256\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.ore_block_u64_8_256(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if JSONB payload contains ORE block index term\n--!\n--! Tests whether the encrypted data payload includes an 'ob' field,\n--! indicating an ORE block is available for range queries.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return Boolean True if 'ob' field is present and non-null\n--!\n--! @see eql_v2.ore_block_u64_8_256\nCREATE FUNCTION eql_v2.has_ore_block_u64_8_256(val jsonb)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN val ->> 'ob' IS NOT NULL;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if encrypted column value contains ORE block index term\n--!\n--! Tests whether an encrypted column value includes an ORE block\n--! by checking its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return Boolean True if ORE block is present\n--!\n--! @see eql_v2.has_ore_block_u64_8_256(jsonb)\nCREATE FUNCTION eql_v2.has_ore_block_u64_8_256(val eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.has_ore_block_u64_8_256(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n--! @brief Compare two ORE block terms using cryptographic comparison\n--! @internal\n--!\n--! Performs a three-way comparison (returns -1/0/1) of individual ORE block terms\n--! using the ORE cryptographic protocol. Compares PRP and PRF blocks to determine\n--! ordering without decryption.\n--!\n--! @param a eql_v2.ore_block_u64_8_256_term First ORE term to compare\n--! @param b eql_v2.ore_block_u64_8_256_term Second ORE term to compare\n--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b\n--! @throws Exception if ciphertexts are different lengths\n--!\n--! @note Uses AES-ECB encryption for bit comparisons per ORE protocol\n--! @see eql_v2.compare_ore_block_u64_8_256_terms\nCREATE FUNCTION eql_v2.compare_ore_block_u64_8_256_term(a eql_v2.ore_block_u64_8_256_term, b eql_v2.ore_block_u64_8_256_term)\n RETURNS integer\nAS $$\n DECLARE\n eq boolean := true;\n unequal_block smallint := 0;\n hash_key bytea;\n data_block bytea;\n encrypt_block bytea;\n target_block bytea;\n\n left_block_size CONSTANT smallint := 16;\n right_block_size CONSTANT smallint := 32;\n right_offset CONSTANT smallint := 136; -- 8 * 17\n\n indicator smallint := 0;\n BEGIN\n IF a IS NULL AND b IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b IS NULL THEN\n RETURN 1;\n END IF;\n\n IF bit_length(a.bytes) != bit_length(b.bytes) THEN\n RAISE EXCEPTION 'Ciphertexts are different lengths';\n END IF;\n\n FOR block IN 0..7 LOOP\n -- Compare each PRP (byte from the first 8 bytes) and PRF block (8 byte\n -- chunks of the rest of the value).\n -- NOTE:\n -- * Substr is ordinally indexed (hence 1 and not 0, and 9 and not 8).\n -- * We are not worrying about timing attacks here; don't fret about\n -- the OR or !=.\n IF\n substr(a.bytes, 1 + block, 1) != substr(b.bytes, 1 + block, 1)\n OR substr(a.bytes, 9 + left_block_size * block, left_block_size) != substr(b.bytes, 9 + left_block_size * BLOCK, left_block_size)\n THEN\n -- set the first unequal block we find\n IF eq THEN\n unequal_block := block;\n END IF;\n eq = false;\n END IF;\n END LOOP;\n\n IF eq THEN\n RETURN 0::integer;\n END IF;\n\n -- Hash key is the IV from the right CT of b\n hash_key := substr(b.bytes, right_offset + 1, 16);\n\n -- first right block is at right offset + nonce_size (ordinally indexed)\n target_block := substr(b.bytes, right_offset + 17 + (unequal_block * right_block_size), right_block_size);\n\n data_block := substr(a.bytes, 9 + (left_block_size * unequal_block), left_block_size);\n\n encrypt_block := public.encrypt(data_block::bytea, hash_key::bytea, 'aes-ecb');\n\n indicator := (\n get_bit(\n encrypt_block,\n 0\n ) + get_bit(target_block, get_byte(a.bytes, unequal_block))) % 2;\n\n IF indicator = 1 THEN\n RETURN 1::integer;\n ELSE\n RETURN -1::integer;\n END IF;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Compare arrays of ORE block terms recursively\n--! @internal\n--!\n--! Recursively compares arrays of ORE block terms element-by-element.\n--! Empty arrays are considered less than non-empty arrays. If the first elements\n--! are equal, recursively compares remaining elements.\n--!\n--! @param a eql_v2.ore_block_u64_8_256_term[] First array of ORE terms\n--! @param b eql_v2.ore_block_u64_8_256_term[] Second array of ORE terms\n--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b, NULL if either array is NULL\n--!\n--! @note Empty arrays sort before non-empty arrays\n--! @see eql_v2.compare_ore_block_u64_8_256_term\nCREATE FUNCTION eql_v2.compare_ore_block_u64_8_256_terms(a eql_v2.ore_block_u64_8_256_term[], b eql_v2.ore_block_u64_8_256_term[])\nRETURNS integer AS $$\n DECLARE\n cmp_result integer;\n BEGIN\n\n -- NULLs are NULL\n IF a IS NULL OR b IS NULL THEN\n RETURN NULL;\n END IF;\n\n -- empty a and b\n IF cardinality(a) = 0 AND cardinality(b) = 0 THEN\n RETURN 0;\n END IF;\n\n -- empty a and some b\n IF (cardinality(a) = 0) AND cardinality(b) > 0 THEN\n RETURN -1;\n END IF;\n\n -- some a and empty b\n IF cardinality(a) > 0 AND (cardinality(b) = 0) THEN\n RETURN 1;\n END IF;\n\n cmp_result := eql_v2.compare_ore_block_u64_8_256_term(a[1], b[1]);\n\n IF cmp_result = 0 THEN\n -- Removes the first element in the array, and calls this fn again to compare the next element/s in the array.\n RETURN eql_v2.compare_ore_block_u64_8_256_terms(a[2:array_length(a,1)], b[2:array_length(b,1)]);\n END IF;\n\n RETURN cmp_result;\n END\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Compare ORE block composite types\n--! @internal\n--!\n--! Wrapper function that extracts term arrays from ORE block composite types\n--! and delegates to the array comparison function.\n--!\n--! @param a eql_v2.ore_block_u64_8_256 First ORE block\n--! @param b eql_v2.ore_block_u64_8_256 Second ORE block\n--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b\n--!\n--! @see eql_v2.compare_ore_block_u64_8_256_terms(eql_v2.ore_block_u64_8_256_term[], eql_v2.ore_block_u64_8_256_term[])\nCREATE FUNCTION eql_v2.compare_ore_block_u64_8_256_terms(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256)\nRETURNS integer AS $$\n BEGIN\n RETURN eql_v2.compare_ore_block_u64_8_256_terms(a.terms, b.terms);\n END\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract variable-width CLLW ORE index term from JSONB payload\n--!\n--! Extracts the variable-width CLLW ORE ciphertext from the 'ocv' field of an encrypted\n--! data payload. Used internally for range query comparisons.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return eql_v2.ore_cllw_var_8 Variable-width CLLW ORE ciphertext\n--! @throws Exception if 'ocv' field is missing when ore index is expected\n--!\n--! @see eql_v2.has_ore_cllw_var_8\n--! @see eql_v2.compare_ore_cllw_var_8\nCREATE FUNCTION eql_v2.ore_cllw_var_8(val jsonb)\n RETURNS eql_v2.ore_cllw_var_8\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n\n IF val IS NULL THEN\n RETURN NULL;\n END IF;\n\n IF NOT (eql_v2.has_ore_cllw_var_8(val)) THEN\n RAISE 'Expected a ore_cllw_var_8 index (ocv) value in json: %', val;\n END IF;\n\n RETURN ROW(decode(val->>'ocv', 'hex'));\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract variable-width CLLW ORE index term from encrypted column value\n--!\n--! Extracts the variable-width CLLW ORE ciphertext from an encrypted column value by accessing\n--! its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return eql_v2.ore_cllw_var_8 Variable-width CLLW ORE ciphertext\n--!\n--! @see eql_v2.ore_cllw_var_8(jsonb)\nCREATE FUNCTION eql_v2.ore_cllw_var_8(val eql_v2_encrypted)\n RETURNS eql_v2.ore_cllw_var_8\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (SELECT eql_v2.ore_cllw_var_8(val.data));\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if JSONB payload contains variable-width CLLW ORE index term\n--!\n--! Tests whether the encrypted data payload includes an 'ocv' field,\n--! indicating a variable-width CLLW ORE ciphertext is available for range queries.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return Boolean True if 'ocv' field is present and non-null\n--!\n--! @see eql_v2.ore_cllw_var_8\nCREATE FUNCTION eql_v2.has_ore_cllw_var_8(val jsonb)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN val ->> 'ocv' IS NOT NULL;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if encrypted column value contains variable-width CLLW ORE index term\n--!\n--! Tests whether an encrypted column value includes a variable-width CLLW ORE ciphertext\n--! by checking its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return Boolean True if variable-width CLLW ORE ciphertext is present\n--!\n--! @see eql_v2.has_ore_cllw_var_8(jsonb)\nCREATE FUNCTION eql_v2.has_ore_cllw_var_8(val eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.has_ore_cllw_var_8(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Compare variable-width CLLW ORE ciphertext terms\n--! @internal\n--!\n--! Three-way comparison of variable-width CLLW ORE ciphertexts. Compares the common\n--! prefix using byte-by-byte CLLW comparison, then falls back to length comparison\n--! if the common prefix is equal. Used by compare_ore_cllw_var_8 for range queries.\n--!\n--! @param a eql_v2.ore_cllw_var_8 First variable-width CLLW ORE ciphertext\n--! @param b eql_v2.ore_cllw_var_8 Second variable-width CLLW ORE ciphertext\n--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b\n--!\n--! @note Handles variable-length ciphertexts by comparing common prefix first\n--! @note Returns NULL if either input is NULL\n--!\n--! @see eql_v2.compare_ore_cllw_term_bytes\n--! @see eql_v2.compare_ore_cllw_var_8\nCREATE FUNCTION eql_v2.compare_ore_cllw_var_8_term(a eql_v2.ore_cllw_var_8, b eql_v2.ore_cllw_var_8)\nRETURNS int AS $$\nDECLARE\n len_a INT;\n len_b INT;\n -- length of the common part of the two bytea values\n common_len INT;\n cmp_result INT;\nBEGIN\n IF a IS NULL OR b IS NULL THEN\n RETURN NULL;\n END IF;\n\n -- Get the lengths of both bytea inputs\n len_a := LENGTH(a.bytes);\n len_b := LENGTH(b.bytes);\n\n -- Handle empty cases\n IF len_a = 0 AND len_b = 0 THEN\n RETURN 0;\n ELSIF len_a = 0 THEN\n RETURN -1;\n ELSIF len_b = 0 THEN\n RETURN 1;\n END IF;\n\n -- Find the length of the shorter bytea\n IF len_a < len_b THEN\n common_len := len_a;\n ELSE\n common_len := len_b;\n END IF;\n\n -- Use the compare_ore_cllw_term function to compare byte by byte\n cmp_result := eql_v2.compare_ore_cllw_term_bytes(\n SUBSTRING(a.bytes FROM 1 FOR common_len),\n SUBSTRING(b.bytes FROM 1 FOR common_len)\n );\n\n -- If the comparison returns 'less' or 'greater', return that result\n IF cmp_result = -1 THEN\n RETURN -1;\n ELSIF cmp_result = 1 THEN\n RETURN 1;\n END IF;\n\n -- If the bytea comparison is 'equal', compare lengths\n IF len_a < len_b THEN\n RETURN -1;\n ELSIF len_a > len_b THEN\n RETURN 1;\n ELSE\n RETURN 0;\n END IF;\nEND;\n$$ LANGUAGE plpgsql;\n\n\n\n\n\n\n--! @brief Core comparison function for encrypted values\n--!\n--! Compares two encrypted values using their index terms without decryption.\n--! This function implements all comparison operators required for btree indexing\n--! (<, <=, =, >=, >).\n--!\n--! Index terms are checked in the following priority order:\n--! 1. ore_block_u64_8_256 (Order-Revealing Encryption)\n--! 2. ore_cllw_u64_8 (Order-Revealing Encryption)\n--! 3. ore_cllw_var_8 (Order-Revealing Encryption)\n--! 4. hmac_256 (Hash-based equality)\n--! 5. blake3 (Hash-based equality)\n--!\n--! The first index term type present in both values is used for comparison.\n--! If no matching index terms are found, falls back to JSONB literal comparison\n--! to ensure consistent ordering (required for btree correctness).\n--!\n--! @param a eql_v2_encrypted First encrypted value\n--! @param b eql_v2_encrypted Second encrypted value\n--! @return integer -1 if a < b, 0 if a = b, 1 if a > b\n--!\n--! @note Literal fallback prevents \"lock BufferContent is not held\" errors\n--! @see eql_v2.compare_ore_block_u64_8_256\n--! @see eql_v2.compare_blake3\n--! @see eql_v2.compare_hmac_256\nCREATE FUNCTION eql_v2.compare(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS integer\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n\n IF a IS NULL AND b IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b IS NULL THEN\n RETURN 1;\n END IF;\n\n a := eql_v2.to_ste_vec_value(a);\n b := eql_v2.to_ste_vec_value(b);\n\n IF eql_v2.has_ore_block_u64_8_256(a) AND eql_v2.has_ore_block_u64_8_256(b) THEN\n RETURN eql_v2.compare_ore_block_u64_8_256(a, b);\n END IF;\n\n IF eql_v2.has_ore_cllw_u64_8(a) AND eql_v2.has_ore_cllw_u64_8(b) THEN\n RETURN eql_v2.compare_ore_cllw_u64_8(a, b);\n END IF;\n\n IF eql_v2.has_ore_cllw_var_8(a) AND eql_v2.has_ore_cllw_var_8(b) THEN\n RETURN eql_v2.compare_ore_cllw_var_8(a, b);\n END IF;\n\n IF eql_v2.has_hmac_256(a) AND eql_v2.has_hmac_256(b) THEN\n RETURN eql_v2.compare_hmac_256(a, b);\n END IF;\n\n IF eql_v2.has_blake3(a) AND eql_v2.has_blake3(b) THEN\n RETURN eql_v2.compare_blake3(a, b);\n END IF;\n\n -- Fallback to literal comparison of the encrypted data\n -- Compare must have consistent ordering for a given state\n -- Without this text fallback, database errors with \"lock BufferContent is not held\"\n RETURN eql_v2.compare_literal(a, b);\n\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n--! @brief Convert JSONB to encrypted type\n--!\n--! Wraps a JSONB encrypted payload into the eql_v2_encrypted composite type.\n--! Used internally for type conversions and operator implementations.\n--!\n--! @param jsonb JSONB encrypted payload with structure: {\"c\": \"...\", \"i\": {...}, \"k\": \"...\", \"v\": \"2\"}\n--! @return eql_v2_encrypted Encrypted value wrapped in composite type\n--!\n--! @note This is primarily used for implicit casts in operator expressions\n--! @see eql_v2.to_jsonb\nCREATE FUNCTION eql_v2.to_encrypted(data jsonb)\n RETURNS public.eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\nBEGIN\n IF data IS NULL THEN\n RETURN NULL;\n END IF;\n\n RETURN ROW(data)::public.eql_v2_encrypted;\nEND;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Implicit cast from JSONB to encrypted type\n--!\n--! Enables PostgreSQL to automatically convert JSONB values to eql_v2_encrypted\n--! in assignment contexts and comparison operations.\n--!\n--! @see eql_v2.to_encrypted(jsonb)\nCREATE CAST (jsonb AS public.eql_v2_encrypted)\n\tWITH FUNCTION eql_v2.to_encrypted(jsonb) AS ASSIGNMENT;\n\n\n--! @brief Convert text to encrypted type\n--!\n--! Parses a text representation of encrypted JSONB payload and wraps it\n--! in the eql_v2_encrypted composite type.\n--!\n--! @param text Text representation of JSONB encrypted payload\n--! @return eql_v2_encrypted Encrypted value wrapped in composite type\n--!\n--! @note Delegates to eql_v2.to_encrypted(jsonb) after parsing text as JSON\n--! @see eql_v2.to_encrypted(jsonb)\nCREATE FUNCTION eql_v2.to_encrypted(data text)\n RETURNS public.eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\nBEGIN\n IF data IS NULL THEN\n RETURN NULL;\n END IF;\n\n RETURN eql_v2.to_encrypted(data::jsonb);\nEND;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Implicit cast from text to encrypted type\n--!\n--! Enables PostgreSQL to automatically convert text JSON strings to eql_v2_encrypted\n--! in assignment contexts.\n--!\n--! @see eql_v2.to_encrypted(text)\nCREATE CAST (text AS public.eql_v2_encrypted)\n\tWITH FUNCTION eql_v2.to_encrypted(text) AS ASSIGNMENT;\n\n\n\n--! @brief Convert encrypted type to JSONB\n--!\n--! Extracts the underlying JSONB payload from an eql_v2_encrypted composite type.\n--! Useful for debugging or when raw encrypted payload access is needed.\n--!\n--! @param e eql_v2_encrypted Encrypted value to unwrap\n--! @return jsonb Raw JSONB encrypted payload\n--!\n--! @note Returns the raw encrypted structure including ciphertext and index terms\n--! @see eql_v2.to_encrypted(jsonb)\nCREATE FUNCTION eql_v2.to_jsonb(e public.eql_v2_encrypted)\n RETURNS jsonb\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\nBEGIN\n IF e IS NULL THEN\n RETURN NULL;\n END IF;\n\n RETURN e.data;\nEND;\n$$ LANGUAGE plpgsql;\n\n--! @brief Implicit cast from encrypted type to JSONB\n--!\n--! Enables PostgreSQL to automatically extract the JSONB payload from\n--! eql_v2_encrypted values in assignment contexts.\n--!\n--! @see eql_v2.to_jsonb(eql_v2_encrypted)\nCREATE CAST (public.eql_v2_encrypted AS jsonb)\n\tWITH FUNCTION eql_v2.to_jsonb(public.eql_v2_encrypted) AS ASSIGNMENT;\n\n\n\n--! @file config/types.sql\n--! @brief Configuration state type definition\n--!\n--! Defines the ENUM type for tracking encryption configuration lifecycle states.\n--! The configuration table uses this type to manage transitions between states\n--! during setup, activation, and encryption operations.\n--!\n--! @note CREATE TYPE does not support IF NOT EXISTS, so wrapped in DO block\n--! @note Configuration data stored as JSONB directly, not as DOMAIN\n--! @see config/tables.sql\n\n\n--! @brief Configuration lifecycle state\n--!\n--! Defines valid states for encryption configurations in the eql_v2_configuration table.\n--! Configurations transition through these states during setup and activation.\n--!\n--! @note Only one configuration can be in 'active', 'pending', or 'encrypting' state at once\n--! @see config/indexes.sql for uniqueness enforcement\n--! @see config/tables.sql for usage in eql_v2_configuration table\nDO $$\n BEGIN\n IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'eql_v2_configuration_state') THEN\n CREATE TYPE public.eql_v2_configuration_state AS ENUM ('active', 'inactive', 'encrypting', 'pending');\n END IF;\n END\n$$;\n\n\n\n--! @brief Extract Bloom filter index term from JSONB payload\n--!\n--! Extracts the Bloom filter array from the 'bf' field of an encrypted\n--! data payload. Used internally for pattern-match queries (LIKE operator).\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return eql_v2.bloom_filter Bloom filter as smallint array\n--! @throws Exception if 'bf' field is missing when bloom_filter index is expected\n--!\n--! @see eql_v2.has_bloom_filter\n--! @see eql_v2.\"~~\"\nCREATE FUNCTION eql_v2.bloom_filter(val jsonb)\n RETURNS eql_v2.bloom_filter\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n IF val IS NULL THEN\n RETURN NULL;\n END IF;\n\n IF eql_v2.has_bloom_filter(val) THEN\n RETURN ARRAY(SELECT jsonb_array_elements(val->'bf'))::eql_v2.bloom_filter;\n END IF;\n\n RAISE 'Expected a match index (bf) value in json: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract Bloom filter index term from encrypted column value\n--!\n--! Extracts the Bloom filter from an encrypted column value by accessing\n--! its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return eql_v2.bloom_filter Bloom filter as smallint array\n--!\n--! @see eql_v2.bloom_filter(jsonb)\nCREATE FUNCTION eql_v2.bloom_filter(val eql_v2_encrypted)\n RETURNS eql_v2.bloom_filter\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (SELECT eql_v2.bloom_filter(val.data));\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if JSONB payload contains Bloom filter index term\n--!\n--! Tests whether the encrypted data payload includes a 'bf' field,\n--! indicating a Bloom filter is available for pattern-match queries.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return Boolean True if 'bf' field is present and non-null\n--!\n--! @see eql_v2.bloom_filter\nCREATE FUNCTION eql_v2.has_bloom_filter(val jsonb)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN val ->> 'bf' IS NOT NULL;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if encrypted column value contains Bloom filter index term\n--!\n--! Tests whether an encrypted column value includes a Bloom filter\n--! by checking its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return Boolean True if Bloom filter is present\n--!\n--! @see eql_v2.has_bloom_filter(jsonb)\nCREATE FUNCTION eql_v2.has_bloom_filter(val eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.has_bloom_filter(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Fallback literal comparison for encrypted values\n--! @internal\n--!\n--! Compares two encrypted values by their raw JSONB representation when no\n--! suitable index terms are available. This ensures consistent ordering required\n--! for btree correctness and prevents \"lock BufferContent is not held\" errors.\n--!\n--! Used as a last resort fallback in eql_v2.compare() when encrypted values\n--! lack matching index terms (blake3, hmac_256, ore).\n--!\n--! @param a eql_v2_encrypted First encrypted value\n--! @param b eql_v2_encrypted Second encrypted value\n--! @return integer -1 if a < b, 0 if a = b, 1 if a > b\n--!\n--! @note This compares the encrypted payloads directly, not the plaintext values\n--! @note Ordering is consistent but not meaningful for range queries\n--! @see eql_v2.compare\nCREATE FUNCTION eql_v2.compare_literal(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS integer\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n a_data jsonb;\n b_data jsonb;\n BEGIN\n\n IF a IS NULL AND b IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b IS NULL THEN\n RETURN 1;\n END IF;\n\n a_data := a.data;\n b_data := b.data;\n\n IF a_data < b_data THEN\n RETURN -1;\n END IF;\n\n IF a_data > b_data THEN\n RETURN 1;\n END IF;\n\n RETURN 0;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Less-than comparison helper for encrypted values\n--! @internal\n--!\n--! Internal helper that delegates to eql_v2.compare for less-than testing.\n--! Returns true if first value is less than second using ORE comparison.\n--!\n--! @param a eql_v2_encrypted First encrypted value\n--! @param b eql_v2_encrypted Second encrypted value\n--! @return Boolean True if a < b (compare result = -1)\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.\"<\"\nCREATE FUNCTION eql_v2.lt(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.compare(a, b) = -1;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Less-than operator for encrypted values\n--!\n--! Implements the < operator for comparing two encrypted values using Order-Revealing\n--! Encryption (ORE) index terms. Enables range queries and sorting without decryption.\n--! Requires 'ore' index configuration on the column.\n--!\n--! @param a eql_v2_encrypted Left operand\n--! @param b eql_v2_encrypted Right operand\n--! @return Boolean True if a is less than b\n--!\n--! @example\n--! -- Range query on encrypted timestamps\n--! SELECT * FROM events\n--! WHERE encrypted_timestamp < '2024-01-01'::timestamp::text::eql_v2_encrypted;\n--!\n--! -- Compare encrypted numeric columns\n--! SELECT * FROM products WHERE encrypted_price < encrypted_discount_price;\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.\"<\"(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.lt(a, b);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR <(\n FUNCTION=eql_v2.\"<\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=eql_v2_encrypted,\n COMMUTATOR = >,\n NEGATOR = >=,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n--! @brief Less-than operator for encrypted value and JSONB\n--!\n--! Overload of < operator accepting JSONB on the right side. Automatically\n--! casts JSONB to eql_v2_encrypted for ORE comparison.\n--!\n--! @param eql_v2_encrypted Left operand (encrypted value)\n--! @param b JSONB Right operand (will be cast to eql_v2_encrypted)\n--! @return Boolean True if a < b\n--!\n--! @example\n--! SELECT * FROM events WHERE encrypted_age < '18'::int::text::jsonb;\n--!\n--! @see eql_v2.\"<\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\"<\"(a eql_v2_encrypted, b jsonb)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.lt(a, b::eql_v2_encrypted);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR <(\n FUNCTION=eql_v2.\"<\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=jsonb,\n COMMUTATOR = >,\n NEGATOR = >=,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n--! @brief Less-than operator for JSONB and encrypted value\n--!\n--! Overload of < operator accepting JSONB on the left side. Automatically\n--! casts JSONB to eql_v2_encrypted for ORE comparison.\n--!\n--! @param a JSONB Left operand (will be cast to eql_v2_encrypted)\n--! @param eql_v2_encrypted Right operand (encrypted value)\n--! @return Boolean True if a < b\n--!\n--! @example\n--! SELECT * FROM events WHERE '2023-01-01'::date::text::jsonb < encrypted_date;\n--!\n--! @see eql_v2.\"<\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\"<\"(a jsonb, b eql_v2_encrypted)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.lt(a::eql_v2_encrypted, b);\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR <(\n FUNCTION=eql_v2.\"<\",\n LEFTARG=jsonb,\n RIGHTARG=eql_v2_encrypted,\n COMMUTATOR = >,\n NEGATOR = >=,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n\n\n--! @brief Less-than-or-equal comparison helper for encrypted values\n--! @internal\n--!\n--! Internal helper that delegates to eql_v2.compare for <= testing.\n--! Returns true if first value is less than or equal to second using ORE comparison.\n--!\n--! @param a eql_v2_encrypted First encrypted value\n--! @param b eql_v2_encrypted Second encrypted value\n--! @return Boolean True if a <= b (compare result <= 0)\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.\"<=\"\nCREATE FUNCTION eql_v2.lte(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.compare(a, b) <= 0;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Less-than-or-equal operator for encrypted values\n--!\n--! Implements the <= operator for comparing encrypted values using ORE index terms.\n--! Enables range queries with inclusive lower bounds without decryption.\n--!\n--! @param a eql_v2_encrypted Left operand\n--! @param b eql_v2_encrypted Right operand\n--! @return Boolean True if a <= b\n--!\n--! @example\n--! -- Find records with encrypted age 18 or under\n--! SELECT * FROM users WHERE encrypted_age <= '18'::int::text::eql_v2_encrypted;\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.\"<=\"(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.lte(a, b);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR <=(\n FUNCTION = eql_v2.\"<=\",\n LEFTARG = eql_v2_encrypted,\n RIGHTARG = eql_v2_encrypted,\n COMMUTATOR = >=,\n NEGATOR = >,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n--! @brief <= operator for encrypted value and JSONB\n--! @see eql_v2.\"<=\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\"<=\"(a eql_v2_encrypted, b jsonb)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.lte(a, b::eql_v2_encrypted);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR <=(\n FUNCTION = eql_v2.\"<=\",\n LEFTARG = eql_v2_encrypted,\n RIGHTARG = jsonb,\n COMMUTATOR = >=,\n NEGATOR = >,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n--! @brief <= operator for JSONB and encrypted value\n--! @see eql_v2.\"<=\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\"<=\"(a jsonb, b eql_v2_encrypted)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.lte(a::eql_v2_encrypted, b);\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR <=(\n FUNCTION = eql_v2.\"<=\",\n LEFTARG = jsonb,\n RIGHTARG = eql_v2_encrypted,\n COMMUTATOR = >=,\n NEGATOR = >,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n\n\n--! @brief Equality comparison helper for encrypted values\n--! @internal\n--!\n--! Internal helper that delegates to eql_v2.compare for equality testing.\n--! Returns true if encrypted values are equal via encrypted index comparison.\n--!\n--! @param a eql_v2_encrypted First encrypted value\n--! @param b eql_v2_encrypted Second encrypted value\n--! @return Boolean True if values are equal (compare result = 0)\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.\"=\"\nCREATE FUNCTION eql_v2.eq(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN eql_v2.compare(a, b) = 0;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Equality operator for encrypted values\n--!\n--! Implements the = operator for comparing two encrypted values using their\n--! encrypted index terms (unique/blake3). Enables WHERE clause comparisons\n--! without decryption.\n--!\n--! @param a eql_v2_encrypted Left operand\n--! @param b eql_v2_encrypted Right operand\n--! @return Boolean True if encrypted values are equal\n--!\n--! @example\n--! -- Compare encrypted columns\n--! SELECT * FROM users WHERE encrypted_email = other_encrypted_email;\n--!\n--! -- Search using encrypted literal\n--! SELECT * FROM users\n--! WHERE encrypted_email = '{\"c\":\"...\",\"i\":{\"unique\":\"...\"}}'::eql_v2_encrypted;\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.\"=\"(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN eql_v2.eq(a, b);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR = (\n FUNCTION=eql_v2.\"=\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=eql_v2_encrypted,\n NEGATOR = <>,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n--! @brief Equality operator for encrypted value and JSONB\n--!\n--! Overload of = operator accepting JSONB on the right side. Automatically\n--! casts JSONB to eql_v2_encrypted for comparison. Useful for comparing\n--! against JSONB literals or columns.\n--!\n--! @param eql_v2_encrypted Left operand (encrypted value)\n--! @param b JSONB Right operand (will be cast to eql_v2_encrypted)\n--! @return Boolean True if values are equal\n--!\n--! @example\n--! -- Compare encrypted column to JSONB literal\n--! SELECT * FROM users\n--! WHERE encrypted_email = '{\"c\":\"...\",\"i\":{\"unique\":\"...\"}}'::jsonb;\n--!\n--! @see eql_v2.\"=\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\"=\"(a eql_v2_encrypted, b jsonb)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN eql_v2.eq(a, b::eql_v2_encrypted);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR = (\n FUNCTION=eql_v2.\"=\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=jsonb,\n NEGATOR = <>,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n--! @brief Equality operator for JSONB and encrypted value\n--!\n--! Overload of = operator accepting JSONB on the left side. Automatically\n--! casts JSONB to eql_v2_encrypted for comparison. Enables commutative\n--! equality comparisons.\n--!\n--! @param a JSONB Left operand (will be cast to eql_v2_encrypted)\n--! @param eql_v2_encrypted Right operand (encrypted value)\n--! @return Boolean True if values are equal\n--!\n--! @example\n--! -- Compare JSONB literal to encrypted column\n--! SELECT * FROM users\n--! WHERE '{\"c\":\"...\",\"i\":{\"unique\":\"...\"}}'::jsonb = encrypted_email;\n--!\n--! @see eql_v2.\"=\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\"=\"(a jsonb, b eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN eql_v2.eq(a::eql_v2_encrypted, b);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR = (\n FUNCTION=eql_v2.\"=\",\n LEFTARG=jsonb,\n RIGHTARG=eql_v2_encrypted,\n NEGATOR = <>,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n\n--! @brief Greater-than-or-equal comparison helper for encrypted values\n--! @internal\n--!\n--! Internal helper that delegates to eql_v2.compare for >= testing.\n--! Returns true if first value is greater than or equal to second using ORE comparison.\n--!\n--! @param a eql_v2_encrypted First encrypted value\n--! @param b eql_v2_encrypted Second encrypted value\n--! @return Boolean True if a >= b (compare result >= 0)\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.\">=\"\nCREATE FUNCTION eql_v2.gte(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.compare(a, b) >= 0;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Greater-than-or-equal operator for encrypted values\n--!\n--! Implements the >= operator for comparing encrypted values using ORE index terms.\n--! Enables range queries with inclusive upper bounds without decryption.\n--!\n--! @param a eql_v2_encrypted Left operand\n--! @param b eql_v2_encrypted Right operand\n--! @return Boolean True if a >= b\n--!\n--! @example\n--! -- Find records with age 18 or over\n--! SELECT * FROM users WHERE encrypted_age >= '18'::int::text::eql_v2_encrypted;\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.\">=\"(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.gte(a, b);\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR >=(\n FUNCTION = eql_v2.\">=\",\n LEFTARG = eql_v2_encrypted,\n RIGHTARG = eql_v2_encrypted,\n COMMUTATOR = <=,\n NEGATOR = <,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n--! @brief >= operator for encrypted value and JSONB\n--! @see eql_v2.\">=\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\">=\"(a eql_v2_encrypted, b jsonb)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.gte(a, b::eql_v2_encrypted);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR >=(\n FUNCTION = eql_v2.\">=\",\n LEFTARG = eql_v2_encrypted,\n RIGHTARG=jsonb,\n COMMUTATOR = <=,\n NEGATOR = <,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n--! @brief >= operator for JSONB and encrypted value\n--! @see eql_v2.\">=\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\">=\"(a jsonb, b eql_v2_encrypted)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.gte(a::eql_v2_encrypted, b);\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR >=(\n FUNCTION = eql_v2.\">=\",\n LEFTARG = jsonb,\n RIGHTARG =eql_v2_encrypted,\n COMMUTATOR = <=,\n NEGATOR = <,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n\n\n--! @brief Greater-than comparison helper for encrypted values\n--! @internal\n--!\n--! Internal helper that delegates to eql_v2.compare for greater-than testing.\n--! Returns true if first value is greater than second using ORE comparison.\n--!\n--! @param a eql_v2_encrypted First encrypted value\n--! @param b eql_v2_encrypted Second encrypted value\n--! @return Boolean True if a > b (compare result = 1)\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.\">\"\nCREATE FUNCTION eql_v2.gt(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.compare(a, b) = 1;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Greater-than operator for encrypted values\n--!\n--! Implements the > operator for comparing encrypted values using ORE index terms.\n--! Enables range queries and sorting without decryption. Requires 'ore' index\n--! configuration on the column.\n--!\n--! @param a eql_v2_encrypted Left operand\n--! @param b eql_v2_encrypted Right operand\n--! @return Boolean True if a is greater than b\n--!\n--! @example\n--! -- Find records above threshold\n--! SELECT * FROM events\n--! WHERE encrypted_value > '100'::int::text::eql_v2_encrypted;\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.\">\"(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.gt(a, b);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR >(\n FUNCTION=eql_v2.\">\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=eql_v2_encrypted,\n COMMUTATOR = <,\n NEGATOR = <=,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n--! @brief > operator for encrypted value and JSONB\n--! @see eql_v2.\">\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\">\"(a eql_v2_encrypted, b jsonb)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.gt(a, b::eql_v2_encrypted);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR >(\n FUNCTION = eql_v2.\">\",\n LEFTARG = eql_v2_encrypted,\n RIGHTARG = jsonb,\n COMMUTATOR = <,\n NEGATOR = <=,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n--! @brief > operator for JSONB and encrypted value\n--! @see eql_v2.\">\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\">\"(a jsonb, b eql_v2_encrypted)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.gt(a::eql_v2_encrypted, b);\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR >(\n FUNCTION = eql_v2.\">\",\n LEFTARG = jsonb,\n RIGHTARG = eql_v2_encrypted,\n COMMUTATOR = <,\n NEGATOR = <=,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n\n\n\n--! @brief Extract STE vector index from JSONB payload\n--!\n--! Extracts the STE (Searchable Symmetric Encryption) vector from the 'sv' field\n--! of an encrypted data payload. Returns an array of encrypted values used for\n--! containment queries (@>, <@). If no 'sv' field exists, wraps the entire payload\n--! as a single-element array.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return eql_v2_encrypted[] Array of encrypted STE vector elements\n--!\n--! @see eql_v2.ste_vec(eql_v2_encrypted)\n--! @see eql_v2.ste_vec_contains\nCREATE FUNCTION eql_v2.ste_vec(val jsonb)\n RETURNS public.eql_v2_encrypted[]\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n sv jsonb;\n ary public.eql_v2_encrypted[];\n\tBEGIN\n\n IF val ? 'sv' THEN\n sv := val->'sv';\n ELSE\n sv := jsonb_build_array(val);\n END IF;\n\n SELECT array_agg(eql_v2.to_encrypted(elem))\n INTO ary\n FROM jsonb_array_elements(sv) AS elem;\n\n RETURN ary;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract STE vector index from encrypted column value\n--!\n--! Extracts the STE vector from an encrypted column value by accessing its\n--! underlying JSONB data field. Used for containment query operations.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return eql_v2_encrypted[] Array of encrypted STE vector elements\n--!\n--! @see eql_v2.ste_vec(jsonb)\nCREATE FUNCTION eql_v2.ste_vec(val eql_v2_encrypted)\n RETURNS public.eql_v2_encrypted[]\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (SELECT eql_v2.ste_vec(val.data));\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Check if JSONB payload is a single-element STE vector\n--!\n--! Tests whether the encrypted data payload contains an 'sv' field with exactly\n--! one element. Single-element STE vectors can be treated as regular encrypted values.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return Boolean True if 'sv' field exists with exactly one element\n--!\n--! @see eql_v2.to_ste_vec_value\nCREATE FUNCTION eql_v2.is_ste_vec_value(val jsonb)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n IF val ? 'sv' THEN\n RETURN jsonb_array_length(val->'sv') = 1;\n END IF;\n\n RETURN false;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Check if encrypted column value is a single-element STE vector\n--!\n--! Tests whether an encrypted column value is a single-element STE vector\n--! by checking its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return Boolean True if value is a single-element STE vector\n--!\n--! @see eql_v2.is_ste_vec_value(jsonb)\nCREATE FUNCTION eql_v2.is_ste_vec_value(val eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.is_ste_vec_value(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Convert single-element STE vector to regular encrypted value\n--!\n--! Extracts the single element from a single-element STE vector and returns it\n--! as a regular encrypted value, preserving metadata. If the input is not a\n--! single-element STE vector, returns it unchanged.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return eql_v2_encrypted Regular encrypted value (unwrapped if single-element STE vector)\n--!\n--! @see eql_v2.is_ste_vec_value\nCREATE FUNCTION eql_v2.to_ste_vec_value(val jsonb)\n RETURNS eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n meta jsonb;\n sv jsonb;\n\tBEGIN\n\n IF val IS NULL THEN\n RETURN NULL;\n END IF;\n\n IF eql_v2.is_ste_vec_value(val) THEN\n meta := eql_v2.meta_data(val);\n sv := val->'sv';\n sv := sv[0];\n\n RETURN eql_v2.to_encrypted(meta || sv);\n END IF;\n\n RETURN eql_v2.to_encrypted(val);\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Convert single-element STE vector to regular encrypted value (encrypted type)\n--!\n--! Converts an encrypted column value to a regular encrypted value by unwrapping\n--! if it's a single-element STE vector.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return eql_v2_encrypted Regular encrypted value (unwrapped if single-element STE vector)\n--!\n--! @see eql_v2.to_ste_vec_value(jsonb)\nCREATE FUNCTION eql_v2.to_ste_vec_value(val eql_v2_encrypted)\n RETURNS eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.to_ste_vec_value(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Extract selector value from JSONB payload\n--!\n--! Extracts the selector ('s') field from an encrypted data payload.\n--! Selectors are used to match STE vector elements during containment queries.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return Text The selector value\n--! @throws Exception if 's' field is missing\n--!\n--! @see eql_v2.ste_vec_contains\nCREATE FUNCTION eql_v2.selector(val jsonb)\n RETURNS text\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n IF val IS NULL THEN\n RETURN NULL;\n END IF;\n\n IF val ? 's' THEN\n RETURN val->>'s';\n END IF;\n RAISE 'Expected a selector index (s) value in json: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract selector value from encrypted column value\n--!\n--! Extracts the selector from an encrypted column value by accessing its\n--! underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return Text The selector value\n--!\n--! @see eql_v2.selector(jsonb)\nCREATE FUNCTION eql_v2.selector(val eql_v2_encrypted)\n RETURNS text\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (SELECT eql_v2.selector(val.data));\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n--! @brief Check if JSONB payload is marked as an STE vector array\n--!\n--! Tests whether the encrypted data payload has the 'a' (array) flag set to true,\n--! indicating it represents an array for STE vector operations.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return Boolean True if 'a' field is present and true\n--!\n--! @see eql_v2.ste_vec\nCREATE FUNCTION eql_v2.is_ste_vec_array(val jsonb)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n IF val ? 'a' THEN\n RETURN (val->>'a')::boolean;\n END IF;\n\n RETURN false;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if encrypted column value is marked as an STE vector array\n--!\n--! Tests whether an encrypted column value has the array flag set by checking\n--! its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return Boolean True if value is marked as an STE vector array\n--!\n--! @see eql_v2.is_ste_vec_array(jsonb)\nCREATE FUNCTION eql_v2.is_ste_vec_array(val eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (SELECT eql_v2.is_ste_vec_array(val.data));\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n--! @brief Extract full encrypted JSONB elements as array\n--!\n--! Extracts all JSONB elements from the STE vector including non-deterministic fields.\n--! Use jsonb_array() instead for GIN indexing and containment queries.\n--!\n--! @param val jsonb containing encrypted EQL payload\n--! @return jsonb[] Array of full JSONB elements\n--!\n--! @see eql_v2.jsonb_array\nCREATE FUNCTION eql_v2.jsonb_array_from_array_elements(val jsonb)\nRETURNS jsonb[]\nIMMUTABLE STRICT PARALLEL SAFE\nLANGUAGE SQL\nAS $$\n SELECT CASE\n WHEN val ? 'sv' THEN\n ARRAY(SELECT elem FROM jsonb_array_elements(val->'sv') AS elem)\n ELSE\n ARRAY[val]\n END;\n$$;\n\n\n--! @brief Extract full encrypted JSONB elements as array from encrypted column\n--!\n--! @param val eql_v2_encrypted Encrypted column value\n--! @return jsonb[] Array of full JSONB elements\n--!\n--! @see eql_v2.jsonb_array_from_array_elements(jsonb)\nCREATE FUNCTION eql_v2.jsonb_array_from_array_elements(val eql_v2_encrypted)\nRETURNS jsonb[]\nIMMUTABLE STRICT PARALLEL SAFE\nLANGUAGE SQL\nAS $$\n SELECT eql_v2.jsonb_array_from_array_elements(val.data);\n$$;\n\n\n--! @brief Extract deterministic fields as array for GIN indexing\n--!\n--! Extracts only deterministic search term fields (s, b3, hm, ocv, ocf) from each\n--! STE vector element. Excludes non-deterministic ciphertext for correct containment\n--! comparison using PostgreSQL's native @> operator.\n--!\n--! @param val jsonb containing encrypted EQL payload\n--! @return jsonb[] Array of JSONB elements with only deterministic fields\n--!\n--! @note Use this for GIN indexes and containment queries\n--! @see eql_v2.jsonb_contains\nCREATE FUNCTION eql_v2.jsonb_array(val jsonb)\nRETURNS jsonb[]\nIMMUTABLE STRICT PARALLEL SAFE\nLANGUAGE SQL\nAS $$\n SELECT ARRAY(\n SELECT jsonb_object_agg(kv.key, kv.value)\n FROM jsonb_array_elements(\n CASE WHEN val ? 'sv' THEN val->'sv' ELSE jsonb_build_array(val) END\n ) AS elem,\n LATERAL jsonb_each(elem) AS kv(key, value)\n WHERE kv.key IN ('s', 'b3', 'hm', 'ocv', 'ocf')\n GROUP BY elem\n );\n$$;\n\n\n--! @brief Extract deterministic fields as array from encrypted column\n--!\n--! @param val eql_v2_encrypted Encrypted column value\n--! @return jsonb[] Array of JSONB elements with only deterministic fields\n--!\n--! @see eql_v2.jsonb_array(jsonb)\nCREATE FUNCTION eql_v2.jsonb_array(val eql_v2_encrypted)\nRETURNS jsonb[]\nIMMUTABLE STRICT PARALLEL SAFE\nLANGUAGE SQL\nAS $$\n SELECT eql_v2.jsonb_array(val.data);\n$$;\n\n\n--! @brief GIN-indexable JSONB containment check\n--!\n--! Checks if encrypted value 'a' contains all JSONB elements from 'b'.\n--! Uses jsonb[] arrays internally for native PostgreSQL GIN index support.\n--!\n--! This function is designed for use with a GIN index on jsonb_array(column).\n--! When combined with such an index, PostgreSQL can efficiently search large tables.\n--!\n--! @param a eql_v2_encrypted Container value (typically a table column)\n--! @param b eql_v2_encrypted Value to search for\n--! @return Boolean True if a contains all elements of b\n--!\n--! @example\n--! -- Create GIN index for efficient containment queries\n--! CREATE INDEX idx ON mytable USING GIN (eql_v2.jsonb_array(encrypted_col));\n--!\n--! -- Query using the helper function\n--! SELECT * FROM mytable WHERE eql_v2.jsonb_contains(encrypted_col, search_value);\n--!\n--! @see eql_v2.jsonb_array\nCREATE FUNCTION eql_v2.jsonb_contains(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean\nIMMUTABLE STRICT PARALLEL SAFE\nLANGUAGE SQL\nAS $$\n SELECT eql_v2.jsonb_array(a) @> eql_v2.jsonb_array(b);\n$$;\n\n\n--! @brief GIN-indexable JSONB containment check (encrypted, jsonb)\n--!\n--! Checks if encrypted value 'a' contains all JSONB elements from jsonb value 'b'.\n--! Uses jsonb[] arrays internally for native PostgreSQL GIN index support.\n--!\n--! @param a eql_v2_encrypted Container value (typically a table column)\n--! @param b jsonb JSONB value to search for\n--! @return Boolean True if a contains all elements of b\n--!\n--! @see eql_v2.jsonb_array\n--! @see eql_v2.jsonb_contains(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.jsonb_contains(a eql_v2_encrypted, b jsonb)\nRETURNS boolean\nIMMUTABLE STRICT PARALLEL SAFE\nLANGUAGE SQL\nAS $$\n SELECT eql_v2.jsonb_array(a) @> eql_v2.jsonb_array(b);\n$$;\n\n\n--! @brief GIN-indexable JSONB containment check (jsonb, encrypted)\n--!\n--! Checks if jsonb value 'a' contains all JSONB elements from encrypted value 'b'.\n--! Uses jsonb[] arrays internally for native PostgreSQL GIN index support.\n--!\n--! @param a jsonb Container JSONB value\n--! @param b eql_v2_encrypted Encrypted value to search for\n--! @return Boolean True if a contains all elements of b\n--!\n--! @see eql_v2.jsonb_array\n--! @see eql_v2.jsonb_contains(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.jsonb_contains(a jsonb, b eql_v2_encrypted)\nRETURNS boolean\nIMMUTABLE STRICT PARALLEL SAFE\nLANGUAGE SQL\nAS $$\n SELECT eql_v2.jsonb_array(a) @> eql_v2.jsonb_array(b);\n$$;\n\n\n--! @brief GIN-indexable JSONB \"is contained by\" check\n--!\n--! Checks if all JSONB elements from 'a' are contained in 'b'.\n--! Uses jsonb[] arrays internally for native PostgreSQL GIN index support.\n--!\n--! @param a eql_v2_encrypted Value to check (typically a table column)\n--! @param b eql_v2_encrypted Container value\n--! @return Boolean True if all elements of a are contained in b\n--!\n--! @see eql_v2.jsonb_array\n--! @see eql_v2.jsonb_contains\nCREATE FUNCTION eql_v2.jsonb_contained_by(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean\nIMMUTABLE STRICT PARALLEL SAFE\nLANGUAGE SQL\nAS $$\n SELECT eql_v2.jsonb_array(a) <@ eql_v2.jsonb_array(b);\n$$;\n\n\n--! @brief GIN-indexable JSONB \"is contained by\" check (encrypted, jsonb)\n--!\n--! Checks if all JSONB elements from encrypted value 'a' are contained in jsonb value 'b'.\n--! Uses jsonb[] arrays internally for native PostgreSQL GIN index support.\n--!\n--! @param a eql_v2_encrypted Value to check (typically a table column)\n--! @param b jsonb Container JSONB value\n--! @return Boolean True if all elements of a are contained in b\n--!\n--! @see eql_v2.jsonb_array\n--! @see eql_v2.jsonb_contained_by(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.jsonb_contained_by(a eql_v2_encrypted, b jsonb)\nRETURNS boolean\nIMMUTABLE STRICT PARALLEL SAFE\nLANGUAGE SQL\nAS $$\n SELECT eql_v2.jsonb_array(a) <@ eql_v2.jsonb_array(b);\n$$;\n\n\n--! @brief GIN-indexable JSONB \"is contained by\" check (jsonb, encrypted)\n--!\n--! Checks if all JSONB elements from jsonb value 'a' are contained in encrypted value 'b'.\n--! Uses jsonb[] arrays internally for native PostgreSQL GIN index support.\n--!\n--! @param a jsonb Value to check\n--! @param b eql_v2_encrypted Container encrypted value\n--! @return Boolean True if all elements of a are contained in b\n--!\n--! @see eql_v2.jsonb_array\n--! @see eql_v2.jsonb_contained_by(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.jsonb_contained_by(a jsonb, b eql_v2_encrypted)\nRETURNS boolean\nIMMUTABLE STRICT PARALLEL SAFE\nLANGUAGE SQL\nAS $$\n SELECT eql_v2.jsonb_array(a) <@ eql_v2.jsonb_array(b);\n$$;\n\n\n--! @brief Check if STE vector array contains a specific encrypted element\n--!\n--! Tests whether any element in the STE vector array 'a' contains the encrypted value 'b'.\n--! Matching requires both the selector and encrypted value to be equal.\n--! Used internally by ste_vec_contains(encrypted, encrypted) for array containment checks.\n--!\n--! @param eql_v2_encrypted[] STE vector array to search within\n--! @param eql_v2_encrypted Encrypted element to search for\n--! @return Boolean True if b is found in any element of a\n--!\n--! @note Compares both selector and encrypted value for match\n--!\n--! @see eql_v2.selector\n--! @see eql_v2.ste_vec_contains(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.ste_vec_contains(a public.eql_v2_encrypted[], b eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n result boolean;\n _a public.eql_v2_encrypted;\n BEGIN\n\n result := false;\n\n FOR idx IN 1..array_length(a, 1) LOOP\n _a := a[idx];\n result := result OR (eql_v2.selector(_a) = eql_v2.selector(b) AND _a = b);\n END LOOP;\n\n RETURN result;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if encrypted value 'a' contains all elements of encrypted value 'b'\n--!\n--! Performs STE vector containment comparison between two encrypted values.\n--! Returns true if all elements in b's STE vector are found in a's STE vector.\n--! Used internally by the @> containment operator for searchable encryption.\n--!\n--! @param a eql_v2_encrypted First encrypted value (container)\n--! @param b eql_v2_encrypted Second encrypted value (elements to find)\n--! @return Boolean True if all elements of b are contained in a\n--!\n--! @note Empty b is always contained in any a\n--! @note Each element of b must match both selector and value in a\n--!\n--! @see eql_v2.ste_vec\n--! @see eql_v2.ste_vec_contains(eql_v2_encrypted[], eql_v2_encrypted)\n--! @see eql_v2.\"@>\"\nCREATE FUNCTION eql_v2.ste_vec_contains(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n result boolean;\n sv_a public.eql_v2_encrypted[];\n sv_b public.eql_v2_encrypted[];\n _b public.eql_v2_encrypted;\n BEGIN\n\n -- jsonb arrays of ste_vec encrypted values\n sv_a := eql_v2.ste_vec(a);\n sv_b := eql_v2.ste_vec(b);\n\n -- an empty b is always contained in a\n IF array_length(sv_b, 1) IS NULL THEN\n RETURN true;\n END IF;\n\n IF array_length(sv_a, 1) IS NULL THEN\n RETURN false;\n END IF;\n\n result := true;\n\n -- for each element of b check if it is in a\n FOR idx IN 1..array_length(sv_b, 1) LOOP\n _b := sv_b[idx];\n result := result AND eql_v2.ste_vec_contains(sv_a, _b);\n END LOOP;\n\n RETURN result;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @file config/tables.sql\n--! @brief Encryption configuration storage table\n--!\n--! Defines the main table for storing EQL v2 encryption configurations.\n--! Each row represents a configuration specifying which tables/columns to encrypt\n--! and what index types to use. Configurations progress through lifecycle states.\n--!\n--! @see config/types.sql for state ENUM definition\n--! @see config/indexes.sql for state uniqueness constraints\n--! @see config/constraints.sql for data validation\n\n\n--! @brief Encryption configuration table\n--!\n--! Stores encryption configurations with their state and metadata.\n--! The 'data' JSONB column contains the full configuration structure including\n--! table/column mappings, index types, and casting rules.\n--!\n--! @note Only one configuration can be 'active', 'pending', or 'encrypting' at once\n--! @note 'id' is auto-generated identity column\n--! @note 'state' defaults to 'pending' for new configurations\n--! @note 'data' validated by CHECK constraint (see config/constraints.sql)\nCREATE TABLE IF NOT EXISTS public.eql_v2_configuration\n(\n id bigint GENERATED ALWAYS AS IDENTITY,\n state eql_v2_configuration_state NOT NULL DEFAULT 'pending',\n data jsonb,\n created_at timestamptz not null default current_timestamp,\n PRIMARY KEY(id)\n);\n\n\n--! @brief Initialize default configuration structure\n--! @internal\n--!\n--! Creates a default configuration object if input is NULL. Used internally\n--! by public configuration functions to ensure consistent structure.\n--!\n--! @param config JSONB Existing configuration or NULL\n--! @return JSONB Configuration with default structure (version 1, empty tables)\nCREATE FUNCTION eql_v2.config_default(config jsonb)\n RETURNS jsonb\n IMMUTABLE PARALLEL SAFE\nAS $$\n BEGIN\n IF config IS NULL THEN\n SELECT jsonb_build_object('v', 1, 'tables', jsonb_build_object()) INTO config;\n END IF;\n RETURN config;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Add table to configuration if not present\n--! @internal\n--!\n--! Ensures the specified table exists in the configuration structure.\n--! Creates empty table entry if needed. Idempotent operation.\n--!\n--! @param table_name Text Name of table to add\n--! @param config JSONB Configuration object\n--! @return JSONB Updated configuration with table entry\nCREATE FUNCTION eql_v2.config_add_table(table_name text, config jsonb)\n RETURNS jsonb\n IMMUTABLE PARALLEL SAFE\nAS $$\n DECLARE\n tbl jsonb;\n BEGIN\n IF NOT config #> array['tables'] ? table_name THEN\n SELECT jsonb_insert(config, array['tables', table_name], jsonb_build_object()) INTO config;\n END IF;\n RETURN config;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Add column to table configuration if not present\n--! @internal\n--!\n--! Ensures the specified column exists in the table's configuration structure.\n--! Creates empty column entry with indexes object if needed. Idempotent operation.\n--!\n--! @param table_name Text Name of parent table\n--! @param column_name Text Name of column to add\n--! @param config JSONB Configuration object\n--! @return JSONB Updated configuration with column entry\nCREATE FUNCTION eql_v2.config_add_column(table_name text, column_name text, config jsonb)\n RETURNS jsonb\n IMMUTABLE PARALLEL SAFE\nAS $$\n DECLARE\n col jsonb;\n BEGIN\n IF NOT config #> array['tables', table_name] ? column_name THEN\n SELECT jsonb_build_object('indexes', jsonb_build_object()) into col;\n SELECT jsonb_set(config, array['tables', table_name, column_name], col) INTO config;\n END IF;\n RETURN config;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Set cast type for column in configuration\n--! @internal\n--!\n--! Updates the cast_as field for a column, specifying the PostgreSQL type\n--! that decrypted values should be cast to.\n--!\n--! @param table_name Text Name of parent table\n--! @param column_name Text Name of column\n--! @param cast_as Text PostgreSQL type for casting (e.g., 'text', 'int', 'jsonb')\n--! @param config JSONB Configuration object\n--! @return JSONB Updated configuration with cast_as set\nCREATE FUNCTION eql_v2.config_add_cast(table_name text, column_name text, cast_as text, config jsonb)\n RETURNS jsonb\n IMMUTABLE PARALLEL SAFE\nAS $$\n BEGIN\n SELECT jsonb_set(config, array['tables', table_name, column_name, 'cast_as'], to_jsonb(cast_as)) INTO config;\n RETURN config;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Add search index to column configuration\n--! @internal\n--!\n--! Inserts a search index entry (unique, match, ore, ste_vec) with its options\n--! into the column's indexes object.\n--!\n--! @param table_name Text Name of parent table\n--! @param column_name Text Name of column\n--! @param index_name Text Type of index to add\n--! @param opts JSONB Index-specific options\n--! @param config JSONB Configuration object\n--! @return JSONB Updated configuration with index added\nCREATE FUNCTION eql_v2.config_add_index(table_name text, column_name text, index_name text, opts jsonb, config jsonb)\n RETURNS jsonb\n IMMUTABLE PARALLEL SAFE\nAS $$\n BEGIN\n SELECT jsonb_insert(config, array['tables', table_name, column_name, 'indexes', index_name], opts) INTO config;\n RETURN config;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Generate default options for match index\n--! @internal\n--!\n--! Returns default configuration for match (LIKE) indexes: k=6, bf=2048,\n--! ngram tokenizer with token_length=3, downcase filter, include_original=true.\n--!\n--! @return JSONB Default match index options\nCREATE FUNCTION eql_v2.config_match_default()\n RETURNS jsonb\nLANGUAGE sql STRICT PARALLEL SAFE\nBEGIN ATOMIC\n SELECT jsonb_build_object(\n 'k', 6,\n 'bf', 2048,\n 'include_original', true,\n 'tokenizer', json_build_object('kind', 'ngram', 'token_length', 3),\n 'token_filters', json_build_array(json_build_object('kind', 'downcase')));\nEND;\n-- AUTOMATICALLY GENERATED FILE\n-- Source is version-template.sql\n\nDROP FUNCTION IF EXISTS eql_v2.version();\n\n--! @file version.sql\n--! @brief EQL version reporting\n--!\n--! This file is auto-generated from version.template during build.\n--! The version string placeholder is replaced with the actual release version.\n\n--! @brief Get EQL library version string\n--!\n--! Returns the version string for the installed EQL library.\n--! This value is set at build time from the project version.\n--!\n--! @return text Version string (e.g., \"2.1.0\" or \"DEV\" for development builds)\n--!\n--! @note Auto-generated during build from version.template\n--!\n--! @example\n--! -- Check installed EQL version\n--! SELECT eql_v2.version();\n--! -- Returns: '2.1.0'\nCREATE FUNCTION eql_v2.version()\n RETURNS text\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n SELECT 'eql-2.2.1';\n$$ LANGUAGE SQL;\n\n\n\n--! @brief Compare two encrypted values using variable-width CLLW ORE index terms\n--!\n--! Performs a three-way comparison (returns -1/0/1) of encrypted values using\n--! their variable-width CLLW ORE ciphertext index terms. Used internally by range operators\n--! (<, <=, >, >=) for order-revealing comparisons without decryption.\n--!\n--! @param a eql_v2_encrypted First encrypted value to compare\n--! @param b eql_v2_encrypted Second encrypted value to compare\n--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b\n--!\n--! @note NULL values are sorted before non-NULL values\n--! @note Uses variable-width CLLW ORE cryptographic protocol for secure comparisons\n--!\n--! @see eql_v2.ore_cllw_var_8\n--! @see eql_v2.has_ore_cllw_var_8\n--! @see eql_v2.compare_ore_cllw_var_8_term\n--! @see eql_v2.\"<\"\n--! @see eql_v2.\">\"\nCREATE FUNCTION eql_v2.compare_ore_cllw_var_8(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS integer\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n a_term eql_v2.ore_cllw_var_8;\n b_term eql_v2.ore_cllw_var_8;\n BEGIN\n\n -- PERFORM eql_v2.log('eql_v2.compare_ore_cllw_var_8');\n -- PERFORM eql_v2.log('a', a::text);\n -- PERFORM eql_v2.log('b', b::text);\n\n IF a IS NULL AND b IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b IS NULL THEN\n RETURN 1;\n END IF;\n\n IF eql_v2.has_ore_cllw_var_8(a) THEN\n a_term := eql_v2.ore_cllw_var_8(a);\n END IF;\n\n IF eql_v2.has_ore_cllw_var_8(a) THEN\n b_term := eql_v2.ore_cllw_var_8(b);\n END IF;\n\n IF a_term IS NULL AND b_term IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a_term IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b_term IS NULL THEN\n RETURN 1;\n END IF;\n\n RETURN eql_v2.compare_ore_cllw_var_8_term(a_term, b_term);\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n--! @brief Compare two encrypted values using CLLW ORE index terms\n--!\n--! Performs a three-way comparison (returns -1/0/1) of encrypted values using\n--! their CLLW ORE ciphertext index terms. Used internally by range operators\n--! (<, <=, >, >=) for order-revealing comparisons without decryption.\n--!\n--! @param a eql_v2_encrypted First encrypted value to compare\n--! @param b eql_v2_encrypted Second encrypted value to compare\n--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b\n--!\n--! @note NULL values are sorted before non-NULL values\n--! @note Uses CLLW ORE cryptographic protocol for secure comparisons\n--!\n--! @see eql_v2.ore_cllw_u64_8\n--! @see eql_v2.has_ore_cllw_u64_8\n--! @see eql_v2.compare_ore_cllw_term_bytes\n--! @see eql_v2.\"<\"\n--! @see eql_v2.\">\"\nCREATE FUNCTION eql_v2.compare_ore_cllw_u64_8(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS integer\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n a_term eql_v2.ore_cllw_u64_8;\n b_term eql_v2.ore_cllw_u64_8;\n BEGIN\n\n -- PERFORM eql_v2.log('eql_v2.compare_ore_cllw_u64_8');\n -- PERFORM eql_v2.log('a', a::text);\n -- PERFORM eql_v2.log('b', b::text);\n\n IF a IS NULL AND b IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b IS NULL THEN\n RETURN 1;\n END IF;\n\n IF eql_v2.has_ore_cllw_u64_8(a) THEN\n a_term := eql_v2.ore_cllw_u64_8(a);\n END IF;\n\n IF eql_v2.has_ore_cllw_u64_8(a) THEN\n b_term := eql_v2.ore_cllw_u64_8(b);\n END IF;\n\n IF a_term IS NULL AND b_term IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a_term IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b_term IS NULL THEN\n RETURN 1;\n END IF;\n\n RETURN eql_v2.compare_ore_cllw_term_bytes(a_term.bytes, b_term.bytes);\n END;\n$$ LANGUAGE plpgsql;\n\n-- NOTE FILE IS DISABLED\n\n\n--! @brief Equality operator for ORE block types\n--! @internal\n--!\n--! Implements the = operator for direct ORE block comparisons.\n--!\n--! @param a eql_v2.ore_block_u64_8_256 Left operand\n--! @param b eql_v2.ore_block_u64_8_256 Right operand\n--! @return Boolean True if ORE blocks are equal\n--!\n--! @note FILE IS DISABLED - Not included in build\n--! @see eql_v2.compare_ore_block_u64_8_256_terms\nCREATE FUNCTION eql_v2.ore_block_u64_8_256_eq(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256)\nRETURNS boolean AS $$\n SELECT eql_v2.compare_ore_block_u64_8_256_terms(a, b) = 0\n$$ LANGUAGE SQL;\n\n\n\n--! @brief Not equal operator for ORE block types\n--! @internal\n--!\n--! Implements the <> operator for direct ORE block comparisons.\n--!\n--! @param a eql_v2.ore_block_u64_8_256 Left operand\n--! @param b eql_v2.ore_block_u64_8_256 Right operand\n--! @return Boolean True if ORE blocks are not equal\n--!\n--! @note FILE IS DISABLED - Not included in build\n--! @see eql_v2.compare_ore_block_u64_8_256_terms\nCREATE FUNCTION eql_v2.ore_block_u64_8_256_neq(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256)\nRETURNS boolean AS $$\n SELECT eql_v2.compare_ore_block_u64_8_256_terms(a, b) <> 0\n$$ LANGUAGE SQL;\n\n\n\n--! @brief Less than operator for ORE block types\n--! @internal\n--!\n--! Implements the < operator for direct ORE block comparisons.\n--!\n--! @param a eql_v2.ore_block_u64_8_256 Left operand\n--! @param b eql_v2.ore_block_u64_8_256 Right operand\n--! @return Boolean True if left operand is less than right operand\n--!\n--! @note FILE IS DISABLED - Not included in build\n--! @see eql_v2.compare_ore_block_u64_8_256_terms\nCREATE FUNCTION eql_v2.ore_block_u64_8_256_lt(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256)\nRETURNS boolean AS $$\n SELECT eql_v2.compare_ore_block_u64_8_256_terms(a, b) = -1\n$$ LANGUAGE SQL;\n\n\n\n--! @brief Less than or equal operator for ORE block types\n--! @internal\n--!\n--! Implements the <= operator for direct ORE block comparisons.\n--!\n--! @param a eql_v2.ore_block_u64_8_256 Left operand\n--! @param b eql_v2.ore_block_u64_8_256 Right operand\n--! @return Boolean True if left operand is less than or equal to right operand\n--!\n--! @note FILE IS DISABLED - Not included in build\n--! @see eql_v2.compare_ore_block_u64_8_256_terms\nCREATE FUNCTION eql_v2.ore_block_u64_8_256_lte(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256)\nRETURNS boolean AS $$\n SELECT eql_v2.compare_ore_block_u64_8_256_terms(a, b) != 1\n$$ LANGUAGE SQL;\n\n\n\n--! @brief Greater than operator for ORE block types\n--! @internal\n--!\n--! Implements the > operator for direct ORE block comparisons.\n--!\n--! @param a eql_v2.ore_block_u64_8_256 Left operand\n--! @param b eql_v2.ore_block_u64_8_256 Right operand\n--! @return Boolean True if left operand is greater than right operand\n--!\n--! @note FILE IS DISABLED - Not included in build\n--! @see eql_v2.compare_ore_block_u64_8_256_terms\nCREATE FUNCTION eql_v2.ore_block_u64_8_256_gt(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256)\nRETURNS boolean AS $$\n SELECT eql_v2.compare_ore_block_u64_8_256_terms(a, b) = 1\n$$ LANGUAGE SQL;\n\n\n\n--! @brief Greater than or equal operator for ORE block types\n--! @internal\n--!\n--! Implements the >= operator for direct ORE block comparisons.\n--!\n--! @param a eql_v2.ore_block_u64_8_256 Left operand\n--! @param b eql_v2.ore_block_u64_8_256 Right operand\n--! @return Boolean True if left operand is greater than or equal to right operand\n--!\n--! @note FILE IS DISABLED - Not included in build\n--! @see eql_v2.compare_ore_block_u64_8_256_terms\nCREATE FUNCTION eql_v2.ore_block_u64_8_256_gte(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256)\nRETURNS boolean AS $$\n SELECT eql_v2.compare_ore_block_u64_8_256_terms(a, b) != -1\n$$ LANGUAGE SQL;\n\n\n\n--! @brief = operator for ORE block types\n--! @note FILE IS DISABLED - Not included in build\nCREATE OPERATOR = (\n FUNCTION=eql_v2.ore_block_u64_8_256_eq,\n LEFTARG=eql_v2.ore_block_u64_8_256,\n RIGHTARG=eql_v2.ore_block_u64_8_256,\n NEGATOR = <>,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n\n\n--! @brief <> operator for ORE block types\n--! @note FILE IS DISABLED - Not included in build\nCREATE OPERATOR <> (\n FUNCTION=eql_v2.ore_block_u64_8_256_neq,\n LEFTARG=eql_v2.ore_block_u64_8_256,\n RIGHTARG=eql_v2.ore_block_u64_8_256,\n NEGATOR = =,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n\n--! @brief > operator for ORE block types\n--! @note FILE IS DISABLED - Not included in build\nCREATE OPERATOR > (\n FUNCTION=eql_v2.ore_block_u64_8_256_gt,\n LEFTARG=eql_v2.ore_block_u64_8_256,\n RIGHTARG=eql_v2.ore_block_u64_8_256,\n COMMUTATOR = <,\n NEGATOR = <=,\n RESTRICT = scalargtsel,\n JOIN = scalargtjoinsel\n);\n\n\n\n--! @brief < operator for ORE block types\n--! @note FILE IS DISABLED - Not included in build\nCREATE OPERATOR < (\n FUNCTION=eql_v2.ore_block_u64_8_256_lt,\n LEFTARG=eql_v2.ore_block_u64_8_256,\n RIGHTARG=eql_v2.ore_block_u64_8_256,\n COMMUTATOR = >,\n NEGATOR = >=,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n\n\n--! @brief <= operator for ORE block types\n--! @note FILE IS DISABLED - Not included in build\nCREATE OPERATOR <= (\n FUNCTION=eql_v2.ore_block_u64_8_256_lte,\n LEFTARG=eql_v2.ore_block_u64_8_256,\n RIGHTARG=eql_v2.ore_block_u64_8_256,\n COMMUTATOR = >=,\n NEGATOR = >,\n RESTRICT = scalarlesel,\n JOIN = scalarlejoinsel\n);\n\n\n\n--! @brief >= operator for ORE block types\n--! @note FILE IS DISABLED - Not included in build\nCREATE OPERATOR >= (\n FUNCTION=eql_v2.ore_block_u64_8_256_gte,\n LEFTARG=eql_v2.ore_block_u64_8_256,\n RIGHTARG=eql_v2.ore_block_u64_8_256,\n COMMUTATOR = <=,\n NEGATOR = <,\n RESTRICT = scalarlesel,\n JOIN = scalarlejoinsel\n);\n-- NOTE FILE IS DISABLED\n\n\n\n--! @brief B-tree operator family for ORE block types\n--!\n--! Defines the operator family for creating B-tree indexes on ORE block types.\n--!\n--! @note FILE IS DISABLED - Not included in build\n--! @see eql_v2.ore_block_u64_8_256_operator_class\nCREATE OPERATOR FAMILY eql_v2.ore_block_u64_8_256_operator_family USING btree;\n\n--! @brief B-tree operator class for ORE block encrypted values\n--!\n--! Defines the operator class required for creating B-tree indexes on columns\n--! using the ore_block_u64_8_256 type. Enables range queries and ORDER BY on\n--! ORE-encrypted data without decryption.\n--!\n--! Supports operators: <, <=, =, >=, >\n--! Uses comparison function: compare_ore_block_u64_8_256_terms\n--!\n--! @note FILE IS DISABLED - Not included in build\n--!\n--! @example\n--! -- Would be used like (if enabled):\n--! CREATE INDEX ON events USING btree (\n--! (encrypted_timestamp::jsonb->'ob')::eql_v2.ore_block_u64_8_256\n--! );\n--!\n--! @see CREATE OPERATOR CLASS in PostgreSQL documentation\n--! @see eql_v2.compare_ore_block_u64_8_256_terms\nCREATE OPERATOR CLASS eql_v2.ore_block_u64_8_256_operator_class DEFAULT FOR TYPE eql_v2.ore_block_u64_8_256 USING btree FAMILY eql_v2.ore_block_u64_8_256_operator_family AS\n OPERATOR 1 <,\n OPERATOR 2 <=,\n OPERATOR 3 =,\n OPERATOR 4 >=,\n OPERATOR 5 >,\n FUNCTION 1 eql_v2.compare_ore_block_u64_8_256_terms(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256);\n\n\n--! @brief Compare two encrypted values using ORE block index terms\n--!\n--! Performs a three-way comparison (returns -1/0/1) of encrypted values using\n--! their ORE block index terms. Used internally by range operators (<, <=, >, >=)\n--! for order-revealing comparisons without decryption.\n--!\n--! @param a eql_v2_encrypted First encrypted value to compare\n--! @param b eql_v2_encrypted Second encrypted value to compare\n--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b\n--!\n--! @note NULL values are sorted before non-NULL values\n--! @note Uses ORE cryptographic protocol for secure comparisons\n--!\n--! @see eql_v2.ore_block_u64_8_256\n--! @see eql_v2.has_ore_block_u64_8_256\n--! @see eql_v2.\"<\"\n--! @see eql_v2.\">\"\nCREATE FUNCTION eql_v2.compare_ore_block_u64_8_256(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS integer\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n a_term eql_v2.ore_block_u64_8_256;\n b_term eql_v2.ore_block_u64_8_256;\n BEGIN\n\n IF a IS NULL AND b IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b IS NULL THEN\n RETURN 1;\n END IF;\n\n IF eql_v2.has_ore_block_u64_8_256(a) THEN\n a_term := eql_v2.ore_block_u64_8_256(a);\n END IF;\n\n IF eql_v2.has_ore_block_u64_8_256(a) THEN\n b_term := eql_v2.ore_block_u64_8_256(b);\n END IF;\n\n IF a_term IS NULL AND b_term IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a_term IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b_term IS NULL THEN\n RETURN 1;\n END IF;\n\n RETURN eql_v2.compare_ore_block_u64_8_256_terms(a_term.terms, b_term.terms);\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Cast text to ORE block term\n--! @internal\n--!\n--! Converts text to bytea and wraps in ore_block_u64_8_256_term type.\n--! Used internally for ORE block extraction and manipulation.\n--!\n--! @param t Text Text value to convert\n--! @return eql_v2.ore_block_u64_8_256_term ORE term containing bytea representation\n--!\n--! @see eql_v2.ore_block_u64_8_256_term\nCREATE FUNCTION eql_v2.text_to_ore_block_u64_8_256_term(t text)\n RETURNS eql_v2.ore_block_u64_8_256_term\n LANGUAGE sql IMMUTABLE STRICT PARALLEL SAFE\nBEGIN ATOMIC\n\tRETURN t::bytea;\nEND;\n\n--! @brief Implicit cast from text to ORE block term\n--!\n--! Defines an implicit cast allowing automatic conversion of text values\n--! to ore_block_u64_8_256_term type for ORE operations.\n--!\n--! @see eql_v2.text_to_ore_block_u64_8_256_term\nCREATE CAST (text AS eql_v2.ore_block_u64_8_256_term)\n\tWITH FUNCTION eql_v2.text_to_ore_block_u64_8_256_term(text) AS IMPLICIT;\n\n--! @brief Pattern matching helper using bloom filters\n--! @internal\n--!\n--! Internal helper for LIKE-style pattern matching on encrypted values.\n--! Uses bloom filter index terms to test substring containment without decryption.\n--! Requires 'match' index configuration on the column.\n--!\n--! @param a eql_v2_encrypted Haystack (value to search in)\n--! @param b eql_v2_encrypted Needle (pattern to search for)\n--! @return Boolean True if bloom filter of a contains bloom filter of b\n--!\n--! @see eql_v2.\"~~\"\n--! @see eql_v2.bloom_filter\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.like(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean AS $$\n SELECT eql_v2.bloom_filter(a) @> eql_v2.bloom_filter(b);\n$$ LANGUAGE SQL;\n\n--! @brief Case-insensitive pattern matching helper\n--! @internal\n--!\n--! Internal helper for ILIKE-style case-insensitive pattern matching.\n--! Case sensitivity is controlled by index configuration (token_filters with downcase).\n--! This function has same implementation as like() - actual case handling is in index terms.\n--!\n--! @param a eql_v2_encrypted Haystack (value to search in)\n--! @param b eql_v2_encrypted Needle (pattern to search for)\n--! @return Boolean True if bloom filter of a contains bloom filter of b\n--!\n--! @note Case sensitivity depends on match index token_filters configuration\n--! @see eql_v2.\"~~\"\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.ilike(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean AS $$\n SELECT eql_v2.bloom_filter(a) @> eql_v2.bloom_filter(b);\n$$ LANGUAGE SQL;\n\n--! @brief LIKE operator for encrypted values (pattern matching)\n--!\n--! Implements the ~~ (LIKE) operator for substring/pattern matching on encrypted\n--! text using bloom filter index terms. Enables WHERE col LIKE '%pattern%' queries\n--! without decryption. Requires 'match' index configuration on the column.\n--!\n--! Pattern matching uses n-gram tokenization configured in match index. Token length\n--! and filters affect matching behavior.\n--!\n--! @param a eql_v2_encrypted Haystack (encrypted text to search in)\n--! @param b eql_v2_encrypted Needle (encrypted pattern to search for)\n--! @return Boolean True if a contains b as substring\n--!\n--! @example\n--! -- Search for substring in encrypted email\n--! SELECT * FROM users\n--! WHERE encrypted_email ~~ '%@example.com%'::text::eql_v2_encrypted;\n--!\n--! -- Pattern matching on encrypted names\n--! SELECT * FROM customers\n--! WHERE encrypted_name ~~ 'John%'::text::eql_v2_encrypted;\n--!\n--! @brief SQL LIKE operator (~~ operator) for encrypted text pattern matching\n--!\n--! @param a eql_v2_encrypted Left operand (encrypted value)\n--! @param b eql_v2_encrypted Right operand (encrypted pattern)\n--! @return boolean True if pattern matches\n--!\n--! @note Requires match index: eql_v2.add_search_config(table, column, 'match')\n--! @see eql_v2.like\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.\"~~\"(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.like(a, b);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR ~~(\n FUNCTION=eql_v2.\"~~\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=eql_v2_encrypted,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n--! @brief Case-insensitive LIKE operator (~~*)\n--!\n--! Implements ~~* (ILIKE) operator for case-insensitive pattern matching.\n--! Case handling depends on match index token_filters configuration (use downcase filter).\n--! Same implementation as ~~, with case sensitivity controlled by index configuration.\n--!\n--! @param a eql_v2_encrypted Haystack\n--! @param b eql_v2_encrypted Needle\n--! @return Boolean True if a contains b (case-insensitive)\n--!\n--! @note Configure match index with downcase token filter for case-insensitivity\n--! @see eql_v2.\"~~\"\nCREATE OPERATOR ~~*(\n FUNCTION=eql_v2.\"~~\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=eql_v2_encrypted,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n--! @brief LIKE operator for encrypted value and JSONB\n--!\n--! Overload of ~~ operator accepting JSONB on the right side. Automatically\n--! casts JSONB to eql_v2_encrypted for bloom filter pattern matching.\n--!\n--! @param eql_v2_encrypted Haystack (encrypted value)\n--! @param b JSONB Needle (will be cast to eql_v2_encrypted)\n--! @return Boolean True if a contains b as substring\n--!\n--! @example\n--! SELECT * FROM users WHERE encrypted_email ~~ '%gmail%'::jsonb;\n--!\n--! @see eql_v2.\"~~\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\"~~\"(a eql_v2_encrypted, b jsonb)\n RETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.like(a, b::eql_v2_encrypted);\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR ~~(\n FUNCTION=eql_v2.\"~~\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=jsonb,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\nCREATE OPERATOR ~~*(\n FUNCTION=eql_v2.\"~~\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=jsonb,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n--! @brief LIKE operator for JSONB and encrypted value\n--!\n--! Overload of ~~ operator accepting JSONB on the left side. Automatically\n--! casts JSONB to eql_v2_encrypted for bloom filter pattern matching.\n--!\n--! @param a JSONB Haystack (will be cast to eql_v2_encrypted)\n--! @param eql_v2_encrypted Needle (encrypted pattern)\n--! @return Boolean True if a contains b as substring\n--!\n--! @example\n--! SELECT * FROM users WHERE 'test@example.com'::jsonb ~~ encrypted_pattern;\n--!\n--! @see eql_v2.\"~~\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\"~~\"(a jsonb, b eql_v2_encrypted)\n RETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.like(a::eql_v2_encrypted, b);\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR ~~(\n FUNCTION=eql_v2.\"~~\",\n LEFTARG=jsonb,\n RIGHTARG=eql_v2_encrypted,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\nCREATE OPERATOR ~~*(\n FUNCTION=eql_v2.\"~~\",\n LEFTARG=jsonb,\n RIGHTARG=eql_v2_encrypted,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n\n-- -----------------------------------------------------------------------------\n\n--! @brief Extract ORE index term for ordering encrypted values\n--!\n--! Helper function that extracts the ore_block_u64_8_256 index term from an encrypted value\n--! for use in ORDER BY clauses when comparison operators are not appropriate or available.\n--!\n--! @param eql_v2_encrypted Encrypted value to extract order term from\n--! @return eql_v2.ore_block_u64_8_256 ORE index term for ordering\n--!\n--! @example\n--! -- Order encrypted values without using comparison operators\n--! SELECT * FROM users ORDER BY eql_v2.order_by(encrypted_age);\n--!\n--! @note Requires 'ore' index configuration on the column\n--! @see eql_v2.ore_block_u64_8_256\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.order_by(a eql_v2_encrypted)\n RETURNS eql_v2.ore_block_u64_8_256\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN eql_v2.ore_block_u64_8_256(a);\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n\n--! @brief PostgreSQL operator class definitions for encrypted value indexing\n--!\n--! Defines the operator family and operator class required for btree indexing\n--! of encrypted values. This enables PostgreSQL to use encrypted columns in:\n--! - CREATE INDEX statements\n--! - ORDER BY clauses\n--! - Range queries\n--! - Primary key constraints\n--!\n--! The operator class maps the five comparison operators (<, <=, =, >=, >)\n--! to the eql_v2.compare() support function for btree index operations.\n--!\n--! @note This is the default operator class for eql_v2_encrypted type\n--! @see eql_v2.compare\n--! @see PostgreSQL documentation on operator classes\n\n--------------------\n\nCREATE OPERATOR FAMILY eql_v2.encrypted_operator_family USING btree;\n\nCREATE OPERATOR CLASS eql_v2.encrypted_operator_class DEFAULT FOR TYPE eql_v2_encrypted USING btree FAMILY eql_v2.encrypted_operator_family AS\n OPERATOR 1 <,\n OPERATOR 2 <=,\n OPERATOR 3 =,\n OPERATOR 4 >=,\n OPERATOR 5 >,\n FUNCTION 1 eql_v2.compare(a eql_v2_encrypted, b eql_v2_encrypted);\n\n\n--------------------\n\n-- CREATE OPERATOR FAMILY eql_v2.encrypted_operator_ordered USING btree;\n\n-- CREATE OPERATOR CLASS eql_v2.encrypted_operator_ordered FOR TYPE eql_v2_encrypted USING btree FAMILY eql_v2.encrypted_operator_ordered AS\n-- OPERATOR 1 <,\n-- OPERATOR 2 <=,\n-- OPERATOR 3 =,\n-- OPERATOR 4 >=,\n-- OPERATOR 5 >,\n-- FUNCTION 1 eql_v2.compare_ore_block_u64_8_256(a eql_v2_encrypted, b eql_v2_encrypted);\n\n--------------------\n\n-- CREATE OPERATOR FAMILY eql_v2.encrypted_hmac_256_operator USING btree;\n\n-- CREATE OPERATOR CLASS eql_v2.encrypted_hmac_256_operator FOR TYPE eql_v2_encrypted USING btree FAMILY eql_v2.encrypted_hmac_256_operator AS\n-- OPERATOR 1 <,\n-- OPERATOR 2 <=,\n-- OPERATOR 3 =,\n-- OPERATOR 4 >=,\n-- OPERATOR 5 >,\n-- FUNCTION 1 eql_v2.compare_hmac(a eql_v2_encrypted, b eql_v2_encrypted);\n\n\n--! @brief Contains operator for encrypted values (@>)\n--!\n--! Implements the @> (contains) operator for testing if left encrypted value\n--! contains the right encrypted value. Uses ste_vec (secure tree encoding vector)\n--! index terms for containment testing without decryption.\n--!\n--! Primarily used for encrypted array or set containment queries.\n--!\n--! @param a eql_v2_encrypted Left operand (container)\n--! @param b eql_v2_encrypted Right operand (contained value)\n--! @return Boolean True if a contains b\n--!\n--! @example\n--! -- Check if encrypted array contains value\n--! SELECT * FROM documents\n--! WHERE encrypted_tags @> '[\"security\"]'::jsonb::eql_v2_encrypted;\n--!\n--! @note Requires ste_vec index configuration\n--! @see eql_v2.ste_vec_contains\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.\"@>\"(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean AS $$\n SELECT eql_v2.ste_vec_contains(a, b)\n$$ LANGUAGE SQL;\n\nCREATE OPERATOR @>(\n FUNCTION=eql_v2.\"@>\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=eql_v2_encrypted\n);\n\n--! @brief Contained-by operator for encrypted values (<@)\n--!\n--! Implements the <@ (contained-by) operator for testing if left encrypted value\n--! is contained by the right encrypted value. Uses ste_vec (secure tree encoding vector)\n--! index terms for containment testing without decryption. Reverse of @> operator.\n--!\n--! Primarily used for encrypted array or set containment queries.\n--!\n--! @param a eql_v2_encrypted Left operand (contained value)\n--! @param b eql_v2_encrypted Right operand (container)\n--! @return Boolean True if a is contained by b\n--!\n--! @example\n--! -- Check if value is contained in encrypted array\n--! SELECT * FROM documents\n--! WHERE '[\"security\"]'::jsonb::eql_v2_encrypted <@ encrypted_tags;\n--!\n--! @note Requires ste_vec index configuration\n--! @see eql_v2.ste_vec_contains\n--! @see eql_v2.\\\"@>\\\"\n--! @see eql_v2.add_search_config\n\nCREATE FUNCTION eql_v2.\"<@\"(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean AS $$\n -- Contains with reversed arguments\n SELECT eql_v2.ste_vec_contains(b, a)\n$$ LANGUAGE SQL;\n\nCREATE OPERATOR <@(\n FUNCTION=eql_v2.\"<@\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=eql_v2_encrypted\n);\n\n--! @brief Not-equal comparison helper for encrypted values\n--! @internal\n--!\n--! Internal helper that delegates to eql_v2.compare for inequality testing.\n--! Returns true if encrypted values are not equal via encrypted index comparison.\n--!\n--! @param a eql_v2_encrypted First encrypted value\n--! @param b eql_v2_encrypted Second encrypted value\n--! @return Boolean True if values are not equal (compare result <> 0)\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.\"<>\"\nCREATE FUNCTION eql_v2.neq(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN eql_v2.compare(a, b) <> 0;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Not-equal operator for encrypted values\n--!\n--! Implements the <> (not equal) operator for comparing encrypted values using their\n--! encrypted index terms. Enables WHERE clause inequality comparisons without decryption.\n--!\n--! @param a eql_v2_encrypted Left operand\n--! @param b eql_v2_encrypted Right operand\n--! @return Boolean True if encrypted values are not equal\n--!\n--! @example\n--! -- Find records with non-matching values\n--! SELECT * FROM users\n--! WHERE encrypted_email <> 'admin@example.com'::text::eql_v2_encrypted;\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.\"=\"\nCREATE FUNCTION eql_v2.\"<>\"(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN eql_v2.neq(a, b );\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR <> (\n FUNCTION=eql_v2.\"<>\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=eql_v2_encrypted,\n NEGATOR = =,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n--! @brief <> operator for encrypted value and JSONB\n--! @see eql_v2.\"<>\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\"<>\"(a eql_v2_encrypted, b jsonb)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN eql_v2.neq(a, b::eql_v2_encrypted);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR <> (\n FUNCTION=eql_v2.\"<>\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=jsonb,\n NEGATOR = =,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n--! @brief <> operator for JSONB and encrypted value\n--!\n--! @param jsonb Plain JSONB value\n--! @param eql_v2_encrypted Encrypted value\n--! @return boolean True if values are not equal\n--!\n--! @see eql_v2.\"<>\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\"<>\"(a jsonb, b eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN eql_v2.neq(a::eql_v2_encrypted, b);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR <> (\n FUNCTION=eql_v2.\"<>\",\n LEFTARG=jsonb,\n RIGHTARG=eql_v2_encrypted,\n NEGATOR = =,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n\n\n\n\n--! @brief JSONB field accessor operator alias (->>)\n--!\n--! Implements the ->> operator as an alias of -> for encrypted JSONB data. This mirrors\n--! PostgreSQL semantics where ->> returns text via implicit casts. The underlying\n--! implementation delegates to eql_v2.\"->\" and allows PostgreSQL to coerce the result.\n--!\n--! Provides two overloads:\n--! - (eql_v2_encrypted, text) - Field name selector\n--! - (eql_v2_encrypted, eql_v2_encrypted) - Encrypted selector\n--!\n--! @see eql_v2.\"->\"\n--! @see eql_v2.selector\n\n--! @brief ->> operator with text selector\n--! @param eql_v2_encrypted Encrypted JSONB data\n--! @param text Field name to extract\n--! @return text Encrypted value at selector, implicitly cast from eql_v2_encrypted\n--! @example\n--! SELECT encrypted_json ->> 'field_name' FROM table;\nCREATE FUNCTION eql_v2.\"->>\"(e eql_v2_encrypted, selector text)\n RETURNS text\nIMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n found eql_v2_encrypted;\n\tBEGIN\n -- found = eql_v2.\"->\"(e, selector);\n -- RETURN eql_v2.ciphertext(found);\n RETURN eql_v2.\"->\"(e, selector);\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR ->> (\n FUNCTION=eql_v2.\"->>\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=text\n);\n\n\n\n---------------------------------------------------\n\n--! @brief ->> operator with encrypted selector\n--! @param e eql_v2_encrypted Encrypted JSONB data\n--! @param selector eql_v2_encrypted Encrypted field selector\n--! @return text Encrypted value at selector, implicitly cast from eql_v2_encrypted\n--! @see eql_v2.\"->>\"(eql_v2_encrypted, text)\nCREATE FUNCTION eql_v2.\"->>\"(e eql_v2_encrypted, selector eql_v2_encrypted)\n RETURNS text\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.\"->>\"(e, eql_v2.selector(selector));\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR ->> (\n FUNCTION=eql_v2.\"->>\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=eql_v2_encrypted\n);\n\n--! @brief JSONB field accessor operator for encrypted values (->)\n--!\n--! Implements the -> operator to access fields/elements from encrypted JSONB data.\n--! Returns encrypted value matching the provided selector without decryption.\n--!\n--! Encrypted JSON is represented as an array of eql_v2_encrypted values in the ste_vec format.\n--! Each element has a selector, ciphertext, and index terms:\n--! {\"sv\": [{\"c\": \"\", \"s\": \"\", \"b3\": \"\"}]}\n--!\n--! Provides three overloads:\n--! - (eql_v2_encrypted, text) - Field name selector\n--! - (eql_v2_encrypted, eql_v2_encrypted) - Encrypted selector\n--! - (eql_v2_encrypted, integer) - Array index selector (0-based)\n--!\n--! @note Operator resolution: Assignment casts are considered (PostgreSQL standard behavior).\n--! To use text selector, parameter may need explicit cast to text.\n--!\n--! @see eql_v2.ste_vec\n--! @see eql_v2.selector\n--! @see eql_v2.\"->>\"\n\n--! @brief -> operator with text selector\n--! @param eql_v2_encrypted Encrypted JSONB data\n--! @param text Field name to extract\n--! @return eql_v2_encrypted Encrypted value at selector\n--! @example\n--! SELECT encrypted_json -> 'field_name' FROM table;\nCREATE FUNCTION eql_v2.\"->\"(e eql_v2_encrypted, selector text)\n RETURNS eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n meta jsonb;\n sv eql_v2_encrypted[];\n found jsonb;\n\tBEGIN\n\n IF e IS NULL THEN\n RETURN NULL;\n END IF;\n\n -- Column identifier and version\n meta := eql_v2.meta_data(e);\n\n sv := eql_v2.ste_vec(e);\n\n FOR idx IN 1..array_length(sv, 1) LOOP\n if eql_v2.selector(sv[idx]) = selector THEN\n found := sv[idx];\n END IF;\n END LOOP;\n\n RETURN (meta || found)::eql_v2_encrypted;\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR ->(\n FUNCTION=eql_v2.\"->\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=text\n);\n\n---------------------------------------------------\n\n--! @brief -> operator with encrypted selector\n--! @param e eql_v2_encrypted Encrypted JSONB data\n--! @param selector eql_v2_encrypted Encrypted field selector\n--! @return eql_v2_encrypted Encrypted value at selector\n--! @see eql_v2.\"->\"(eql_v2_encrypted, text)\nCREATE FUNCTION eql_v2.\"->\"(e eql_v2_encrypted, selector eql_v2_encrypted)\n RETURNS eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.\"->\"(e, eql_v2.selector(selector));\n END;\n$$ LANGUAGE plpgsql;\n\n\n\nCREATE OPERATOR ->(\n FUNCTION=eql_v2.\"->\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=eql_v2_encrypted\n);\n\n\n---------------------------------------------------\n\n--! @brief -> operator with integer array index\n--! @param eql_v2_encrypted Encrypted array data\n--! @param integer Array index (0-based, JSONB convention)\n--! @return eql_v2_encrypted Encrypted value at array index\n--! @note Array index is 0-based (JSONB standard) despite PostgreSQL arrays being 1-based\n--! @example\n--! SELECT encrypted_array -> 0 FROM table;\n--! @see eql_v2.is_ste_vec_array\nCREATE FUNCTION eql_v2.\"->\"(e eql_v2_encrypted, selector integer)\n RETURNS eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n sv eql_v2_encrypted[];\n found eql_v2_encrypted;\n\tBEGIN\n IF NOT eql_v2.is_ste_vec_array(e) THEN\n RETURN NULL;\n END IF;\n\n sv := eql_v2.ste_vec(e);\n\n -- PostgreSQL arrays are 1-based\n -- JSONB arrays are 0-based and so the selector is 0-based\n FOR idx IN 1..array_length(sv, 1) LOOP\n if (idx-1) = selector THEN\n found := sv[idx];\n END IF;\n END LOOP;\n\n RETURN found;\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n\n\nCREATE OPERATOR ->(\n FUNCTION=eql_v2.\"->\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=integer\n);\n\n\n--! @file jsonb/functions.sql\n--! @brief JSONB path query and array manipulation functions for encrypted data\n--!\n--! These functions provide PostgreSQL-compatible operations on encrypted JSONB values\n--! using Structured Transparent Encryption (STE). They support:\n--! - Path-based queries to extract nested encrypted values\n--! - Existence checks for encrypted fields\n--! - Array operations (length, elements extraction)\n--!\n--! @note STE stores encrypted JSONB as a vector of encrypted elements ('sv') with selectors\n--! @note Functions suppress errors for missing fields, type mismatches (similar to PostgreSQL jsonpath)\n\n\n--! @brief Query encrypted JSONB for elements matching selector\n--!\n--! Searches the Structured Transparent Encryption (STE) vector for elements matching\n--! the given selector path. Returns all matching encrypted elements. If multiple\n--! matches form an array, they are wrapped with array metadata.\n--!\n--! @param jsonb Encrypted JSONB payload containing STE vector ('sv')\n--! @param text Path selector to match against encrypted elements\n--! @return SETOF eql_v2_encrypted Matching encrypted elements (may return multiple rows)\n--!\n--! @note Returns empty set if selector is not found (does not throw exception)\n--! @note Array elements use same selector; multiple matches wrapped with 'a' flag\n--! @note Returns a set containing NULL if val is NULL; returns empty set if no matches found\n--! @see eql_v2.jsonb_path_query_first\n--! @see eql_v2.jsonb_path_exists\nCREATE FUNCTION eql_v2.jsonb_path_query(val jsonb, selector text)\n RETURNS SETOF eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n sv eql_v2_encrypted[];\n found jsonb[];\n e jsonb;\n meta jsonb;\n ary boolean;\n BEGIN\n\n IF val IS NULL THEN\n RETURN NEXT NULL;\n END IF;\n\n -- Column identifier and version\n meta := eql_v2.meta_data(val);\n\n sv := eql_v2.ste_vec(val);\n\n FOR idx IN 1..array_length(sv, 1) LOOP\n e := sv[idx];\n\n IF eql_v2.selector(e) = selector THEN\n found := array_append(found, e);\n IF eql_v2.is_ste_vec_array(e) THEN\n ary := true;\n END IF;\n\n END IF;\n END LOOP;\n\n IF found IS NOT NULL THEN\n\n IF ary THEN\n -- Wrap found array elements as eql_v2_encrypted\n\n RETURN NEXT (meta || jsonb_build_object(\n 'sv', found,\n 'a', 1\n ))::eql_v2_encrypted;\n\n ELSE\n RETURN NEXT (meta || found[1])::eql_v2_encrypted;\n END IF;\n\n END IF;\n\n RETURN;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Query encrypted JSONB with encrypted selector\n--!\n--! Overload that accepts encrypted selector and extracts its plaintext value\n--! before delegating to main jsonb_path_query implementation.\n--!\n--! @param val eql_v2_encrypted Encrypted JSONB value to query\n--! @param selector eql_v2_encrypted Encrypted selector to match against\n--! @return SETOF eql_v2_encrypted Matching encrypted elements\n--!\n--! @see eql_v2.jsonb_path_query(jsonb, text)\nCREATE FUNCTION eql_v2.jsonb_path_query(val eql_v2_encrypted, selector eql_v2_encrypted)\n RETURNS SETOF eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN QUERY\n SELECT * FROM eql_v2.jsonb_path_query(val.data, eql_v2.selector(selector));\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Query encrypted JSONB with text selector\n--!\n--! Overload that accepts encrypted JSONB value and text selector,\n--! extracting the JSONB payload before querying.\n--!\n--! @param eql_v2_encrypted Encrypted JSONB value to query\n--! @param text Path selector to match against\n--! @return SETOF eql_v2_encrypted Matching encrypted elements\n--!\n--! @example\n--! -- Query encrypted JSONB for specific field\n--! SELECT * FROM eql_v2.jsonb_path_query(encrypted_document, '$.address.city');\n--!\n--! @see eql_v2.jsonb_path_query(jsonb, text)\nCREATE FUNCTION eql_v2.jsonb_path_query(val eql_v2_encrypted, selector text)\n RETURNS SETOF eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN QUERY\n SELECT * FROM eql_v2.jsonb_path_query(val.data, selector);\n END;\n$$ LANGUAGE plpgsql;\n\n\n------------------------------------------------------------------------------------\n\n\n--! @brief Check if selector path exists in encrypted JSONB\n--!\n--! Tests whether any encrypted elements match the given selector path.\n--! More efficient than jsonb_path_query when only existence check is needed.\n--!\n--! @param jsonb Encrypted JSONB payload to check\n--! @param text Path selector to test\n--! @return boolean True if matching element exists, false otherwise\n--!\n--! @see eql_v2.jsonb_path_query(jsonb, text)\nCREATE FUNCTION eql_v2.jsonb_path_exists(val jsonb, selector text)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN EXISTS (\n SELECT eql_v2.jsonb_path_query(val, selector)\n );\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check existence with encrypted selector\n--!\n--! Overload that accepts encrypted selector and extracts its value\n--! before checking existence.\n--!\n--! @param val eql_v2_encrypted Encrypted JSONB value to check\n--! @param selector eql_v2_encrypted Encrypted selector to test\n--! @return boolean True if path exists\n--!\n--! @see eql_v2.jsonb_path_exists(jsonb, text)\nCREATE FUNCTION eql_v2.jsonb_path_exists(val eql_v2_encrypted, selector eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN EXISTS (\n SELECT eql_v2.jsonb_path_query(val, eql_v2.selector(selector))\n );\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check existence with text selector\n--!\n--! Overload that accepts encrypted JSONB value and text selector.\n--!\n--! @param eql_v2_encrypted Encrypted JSONB value to check\n--! @param text Path selector to test\n--! @return boolean True if path exists\n--!\n--! @example\n--! -- Check if encrypted document has address field\n--! SELECT eql_v2.jsonb_path_exists(encrypted_document, '$.address');\n--!\n--! @see eql_v2.jsonb_path_exists(jsonb, text)\nCREATE FUNCTION eql_v2.jsonb_path_exists(val eql_v2_encrypted, selector text)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN EXISTS (\n SELECT eql_v2.jsonb_path_query(val, selector)\n );\n END;\n$$ LANGUAGE plpgsql;\n\n\n------------------------------------------------------------------------------------\n\n\n--! @brief Get first element matching selector\n--!\n--! Returns only the first encrypted element matching the selector path,\n--! or NULL if no match found. More efficient than jsonb_path_query when\n--! only one result is needed.\n--!\n--! @param jsonb Encrypted JSONB payload to query\n--! @param text Path selector to match\n--! @return eql_v2_encrypted First matching element or NULL\n--!\n--! @note Uses LIMIT 1 internally for efficiency\n--! @see eql_v2.jsonb_path_query(jsonb, text)\nCREATE FUNCTION eql_v2.jsonb_path_query_first(val jsonb, selector text)\n RETURNS eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (\n SELECT e\n FROM eql_v2.jsonb_path_query(val, selector) AS e\n LIMIT 1\n );\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Get first element with encrypted selector\n--!\n--! Overload that accepts encrypted selector and extracts its value\n--! before querying for first match.\n--!\n--! @param val eql_v2_encrypted Encrypted JSONB value to query\n--! @param selector eql_v2_encrypted Encrypted selector to match\n--! @return eql_v2_encrypted First matching element or NULL\n--!\n--! @see eql_v2.jsonb_path_query_first(jsonb, text)\nCREATE FUNCTION eql_v2.jsonb_path_query_first(val eql_v2_encrypted, selector eql_v2_encrypted)\n RETURNS eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (\n SELECT e\n FROM eql_v2.jsonb_path_query(val.data, eql_v2.selector(selector)) AS e\n LIMIT 1\n );\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Get first element with text selector\n--!\n--! Overload that accepts encrypted JSONB value and text selector.\n--!\n--! @param eql_v2_encrypted Encrypted JSONB value to query\n--! @param text Path selector to match\n--! @return eql_v2_encrypted First matching element or NULL\n--!\n--! @example\n--! -- Get first matching address from encrypted document\n--! SELECT eql_v2.jsonb_path_query_first(encrypted_document, '$.addresses[*]');\n--!\n--! @see eql_v2.jsonb_path_query_first(jsonb, text)\nCREATE FUNCTION eql_v2.jsonb_path_query_first(val eql_v2_encrypted, selector text)\n RETURNS eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (\n SELECT e\n FROM eql_v2.jsonb_path_query(val.data, selector) AS e\n LIMIT 1\n );\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n------------------------------------------------------------------------------------\n\n\n--! @brief Get length of encrypted JSONB array\n--!\n--! Returns the number of elements in an encrypted JSONB array by counting\n--! elements in the STE vector ('sv'). The encrypted value must have the\n--! array flag ('a') set to true.\n--!\n--! @param jsonb Encrypted JSONB payload representing an array\n--! @return integer Number of elements in the array\n--! @throws Exception 'cannot get array length of a non-array' if 'a' flag is missing or not true\n--!\n--! @note Array flag 'a' must be present and set to true value\n--! @see eql_v2.jsonb_array_elements\nCREATE FUNCTION eql_v2.jsonb_array_length(val jsonb)\n RETURNS integer\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n sv eql_v2_encrypted[];\n found eql_v2_encrypted[];\n BEGIN\n\n IF val IS NULL THEN\n RETURN NULL;\n END IF;\n\n IF eql_v2.is_ste_vec_array(val) THEN\n sv := eql_v2.ste_vec(val);\n RETURN array_length(sv, 1);\n END IF;\n\n RAISE 'cannot get array length of a non-array';\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Get array length from encrypted type\n--!\n--! Overload that accepts encrypted composite type and extracts the\n--! JSONB payload before computing array length.\n--!\n--! @param eql_v2_encrypted Encrypted array value\n--! @return integer Number of elements in the array\n--! @throws Exception if value is not an array\n--!\n--! @example\n--! -- Get length of encrypted array\n--! SELECT eql_v2.jsonb_array_length(encrypted_tags);\n--!\n--! @see eql_v2.jsonb_array_length(jsonb)\nCREATE FUNCTION eql_v2.jsonb_array_length(val eql_v2_encrypted)\n RETURNS integer\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (\n SELECT eql_v2.jsonb_array_length(val.data)\n );\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n\n--! @brief Extract elements from encrypted JSONB array\n--!\n--! Returns each element of an encrypted JSONB array as a separate row.\n--! Each element is returned as an eql_v2_encrypted value with metadata\n--! preserved from the parent array.\n--!\n--! @param jsonb Encrypted JSONB payload representing an array\n--! @return SETOF eql_v2_encrypted One row per array element\n--! @throws Exception if value is not an array (missing 'a' flag)\n--!\n--! @note Each element inherits metadata (version, ident) from parent\n--! @see eql_v2.jsonb_array_length\n--! @see eql_v2.jsonb_array_elements_text\nCREATE FUNCTION eql_v2.jsonb_array_elements(val jsonb)\n RETURNS SETOF eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n sv eql_v2_encrypted[];\n meta jsonb;\n item jsonb;\n BEGIN\n\n IF NOT eql_v2.is_ste_vec_array(val) THEN\n RAISE 'cannot extract elements from non-array';\n END IF;\n\n -- Column identifier and version\n meta := eql_v2.meta_data(val);\n\n sv := eql_v2.ste_vec(val);\n\n FOR idx IN 1..array_length(sv, 1) LOOP\n item = sv[idx];\n RETURN NEXT (meta || item)::eql_v2_encrypted;\n END LOOP;\n\n RETURN;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract elements from encrypted array type\n--!\n--! Overload that accepts encrypted composite type and extracts each\n--! array element as a separate row.\n--!\n--! @param eql_v2_encrypted Encrypted array value\n--! @return SETOF eql_v2_encrypted One row per array element\n--! @throws Exception if value is not an array\n--!\n--! @example\n--! -- Expand encrypted array into rows\n--! SELECT * FROM eql_v2.jsonb_array_elements(encrypted_tags);\n--!\n--! @see eql_v2.jsonb_array_elements(jsonb)\nCREATE FUNCTION eql_v2.jsonb_array_elements(val eql_v2_encrypted)\n RETURNS SETOF eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN QUERY\n SELECT * FROM eql_v2.jsonb_array_elements(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n--! @brief Extract encrypted array elements as ciphertext\n--!\n--! Returns each element of an encrypted JSONB array as its raw ciphertext\n--! value (text representation). Unlike jsonb_array_elements, this returns\n--! only the ciphertext 'c' field without metadata.\n--!\n--! @param jsonb Encrypted JSONB payload representing an array\n--! @return SETOF text One ciphertext string per array element\n--! @throws Exception if value is not an array (missing 'a' flag)\n--!\n--! @note Returns ciphertext only, not full encrypted structure\n--! @see eql_v2.jsonb_array_elements\nCREATE FUNCTION eql_v2.jsonb_array_elements_text(val jsonb)\n RETURNS SETOF text\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n sv eql_v2_encrypted[];\n found eql_v2_encrypted[];\n BEGIN\n IF NOT eql_v2.is_ste_vec_array(val) THEN\n RAISE 'cannot extract elements from non-array';\n END IF;\n\n sv := eql_v2.ste_vec(val);\n\n FOR idx IN 1..array_length(sv, 1) LOOP\n RETURN NEXT eql_v2.ciphertext(sv[idx]);\n END LOOP;\n\n RETURN;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract array elements as ciphertext from encrypted type\n--!\n--! Overload that accepts encrypted composite type and extracts each\n--! array element's ciphertext as text.\n--!\n--! @param eql_v2_encrypted Encrypted array value\n--! @return SETOF text One ciphertext string per array element\n--! @throws Exception if value is not an array\n--!\n--! @example\n--! -- Get ciphertext of each array element\n--! SELECT * FROM eql_v2.jsonb_array_elements_text(encrypted_tags);\n--!\n--! @see eql_v2.jsonb_array_elements_text(jsonb)\nCREATE FUNCTION eql_v2.jsonb_array_elements_text(val eql_v2_encrypted)\n RETURNS SETOF text\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN QUERY\n SELECT * FROM eql_v2.jsonb_array_elements_text(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Compare two encrypted values using HMAC-SHA256 index terms\n--!\n--! Performs a three-way comparison (returns -1/0/1) of encrypted values using\n--! their HMAC-SHA256 hash index terms. Used internally by the equality operator (=)\n--! for exact-match queries without decryption.\n--!\n--! @param a eql_v2_encrypted First encrypted value to compare\n--! @param b eql_v2_encrypted Second encrypted value to compare\n--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b\n--!\n--! @note NULL values are sorted before non-NULL values\n--! @note Comparison uses underlying text type ordering of HMAC-SHA256 hashes\n--!\n--! @see eql_v2.hmac_256\n--! @see eql_v2.has_hmac_256\n--! @see eql_v2.\"=\"\nCREATE FUNCTION eql_v2.compare_hmac_256(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS integer\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n a_term eql_v2.hmac_256;\n b_term eql_v2.hmac_256;\n BEGIN\n\n IF a IS NULL AND b IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b IS NULL THEN\n RETURN 1;\n END IF;\n\n IF eql_v2.has_hmac_256(a) THEN\n a_term = eql_v2.hmac_256(a);\n END IF;\n\n IF eql_v2.has_hmac_256(b) THEN\n b_term = eql_v2.hmac_256(b);\n END IF;\n\n IF a_term IS NULL AND b_term IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a_term IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b_term IS NULL THEN\n RETURN 1;\n END IF;\n\n -- Using the underlying text type comparison\n IF a_term = b_term THEN\n RETURN 0;\n END IF;\n\n IF a_term < b_term THEN\n RETURN -1;\n END IF;\n\n IF a_term > b_term THEN\n RETURN 1;\n END IF;\n\n END;\n$$ LANGUAGE plpgsql;\n--! @file encryptindex/functions.sql\n--! @brief Configuration lifecycle and column encryption management\n--!\n--! Provides functions for managing encryption configuration transitions:\n--! - Comparing configurations to identify changes\n--! - Identifying columns needing encryption\n--! - Creating and renaming encrypted columns during initial setup\n--! - Tracking encryption progress\n--!\n--! These functions support the workflow of activating a pending configuration\n--! and performing the initial encryption of plaintext columns.\n\n\n--! @brief Compare two configurations and find differences\n--! @internal\n--!\n--! Returns table/column pairs where configuration differs between two configs.\n--! Used to identify which columns need encryption when activating a pending config.\n--!\n--! @param a jsonb First configuration to compare\n--! @param b jsonb Second configuration to compare\n--! @return TABLE(table_name text, column_name text) Columns with differing configuration\n--!\n--! @note Compares configuration structure, not just presence/absence\n--! @see eql_v2.select_pending_columns\nCREATE FUNCTION eql_v2.diff_config(a JSONB, b JSONB)\n\tRETURNS TABLE(table_name TEXT, column_name TEXT)\nIMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN QUERY\n WITH table_keys AS (\n SELECT jsonb_object_keys(a->'tables') AS key\n UNION\n SELECT jsonb_object_keys(b->'tables') AS key\n ),\n column_keys AS (\n SELECT tk.key AS table_key, jsonb_object_keys(a->'tables'->tk.key) AS column_key\n FROM table_keys tk\n UNION\n SELECT tk.key AS table_key, jsonb_object_keys(b->'tables'->tk.key) AS column_key\n FROM table_keys tk\n )\n SELECT\n ck.table_key AS table_name,\n ck.column_key AS column_name\n FROM\n column_keys ck\n WHERE\n (a->'tables'->ck.table_key->ck.column_key IS DISTINCT FROM b->'tables'->ck.table_key->ck.column_key);\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Get columns with pending configuration changes\n--!\n--! Compares 'pending' and 'active' configurations to identify columns that need\n--! encryption or re-encryption. Returns columns where configuration differs.\n--!\n--! @return TABLE(table_name text, column_name text) Columns needing encryption\n--! @throws Exception if no pending configuration exists\n--!\n--! @note Treats missing active config as empty config\n--! @see eql_v2.diff_config\n--! @see eql_v2.select_target_columns\nCREATE FUNCTION eql_v2.select_pending_columns()\n\tRETURNS TABLE(table_name TEXT, column_name TEXT)\nAS $$\n\tDECLARE\n\t\tactive JSONB;\n\t\tpending JSONB;\n\t\tconfig_id BIGINT;\n\tBEGIN\n\t\tSELECT data INTO active FROM eql_v2_configuration WHERE state = 'active';\n\n\t\t-- set default config\n IF active IS NULL THEN\n active := '{}';\n END IF;\n\n\t\tSELECT id, data INTO config_id, pending FROM eql_v2_configuration WHERE state = 'pending';\n\n\t\t-- set default config\n\t\tIF config_id IS NULL THEN\n\t\t\tRAISE EXCEPTION 'No pending configuration exists to encrypt';\n\t\tEND IF;\n\n\t\tRETURN QUERY\n\t\tSELECT d.table_name, d.column_name FROM eql_v2.diff_config(active, pending) as d;\n\tEND;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Map pending columns to their encrypted target columns\n--!\n--! For each column with pending configuration, identifies the corresponding\n--! encrypted column. During initial encryption, target is '{column_name}_encrypted'.\n--! Returns NULL for target_column if encrypted column doesn't exist yet.\n--!\n--! @return TABLE(table_name text, column_name text, target_column text) Column mappings\n--!\n--! @note Target column is NULL if no column exists matching either 'column_name' or 'column_name_encrypted' with type eql_v2_encrypted\n--! @note The LEFT JOIN checks both original and '_encrypted' suffix variations with type verification\n--! @see eql_v2.select_pending_columns\n--! @see eql_v2.create_encrypted_columns\nCREATE FUNCTION eql_v2.select_target_columns()\n\tRETURNS TABLE(table_name TEXT, column_name TEXT, target_column TEXT)\n\tSTABLE STRICT PARALLEL SAFE\nAS $$\n SELECT\n c.table_name,\n c.column_name,\n s.column_name as target_column\n FROM\n eql_v2.select_pending_columns() c\n LEFT JOIN information_schema.columns s ON\n s.table_name = c.table_name AND\n (s.column_name = c.column_name OR s.column_name = c.column_name || '_encrypted') AND\n s.udt_name = 'eql_v2_encrypted';\n$$ LANGUAGE sql;\n\n\n--! @brief Check if database is ready for encryption\n--!\n--! Verifies that all columns with pending configuration have corresponding\n--! encrypted target columns created. Returns true if encryption can proceed.\n--!\n--! @return boolean True if all pending columns have target encrypted columns\n--!\n--! @note Returns false if any pending column lacks encrypted column\n--! @see eql_v2.select_target_columns\n--! @see eql_v2.create_encrypted_columns\nCREATE FUNCTION eql_v2.ready_for_encryption()\n\tRETURNS BOOLEAN\n\tSTABLE STRICT PARALLEL SAFE\nAS $$\n\tSELECT EXISTS (\n\t SELECT *\n\t FROM eql_v2.select_target_columns() AS c\n\t WHERE c.target_column IS NOT NULL);\n$$ LANGUAGE sql;\n\n\n--! @brief Create encrypted columns for initial encryption\n--!\n--! For each plaintext column with pending configuration that lacks an encrypted\n--! target column, creates a new column '{column_name}_encrypted' of type\n--! eql_v2_encrypted. This prepares the database schema for initial encryption.\n--!\n--! @return TABLE(table_name text, column_name text) Created encrypted columns\n--!\n--! @warning Executes dynamic DDL (ALTER TABLE ADD COLUMN) - modifies database schema\n--! @note Only creates columns that don't already exist\n--! @see eql_v2.select_target_columns\n--! @see eql_v2.rename_encrypted_columns\nCREATE FUNCTION eql_v2.create_encrypted_columns()\n\tRETURNS TABLE(table_name TEXT, column_name TEXT)\nAS $$\n\tBEGIN\n FOR table_name, column_name IN\n SELECT c.table_name, (c.column_name || '_encrypted') FROM eql_v2.select_target_columns() AS c WHERE c.target_column IS NULL\n LOOP\n\t\t EXECUTE format('ALTER TABLE %I ADD column %I eql_v2_encrypted;', table_name, column_name);\n RETURN NEXT;\n END LOOP;\n\tEND;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Finalize initial encryption by renaming columns\n--!\n--! After initial encryption completes, renames columns to complete the transition:\n--! - Plaintext column '{column_name}' → '{column_name}_plaintext'\n--! - Encrypted column '{column_name}_encrypted' → '{column_name}'\n--!\n--! This makes the encrypted column the primary column with the original name.\n--!\n--! @return TABLE(table_name text, column_name text, target_column text) Renamed columns\n--!\n--! @warning Executes dynamic DDL (ALTER TABLE RENAME COLUMN) - modifies database schema\n--! @note Only renames columns where target is '{column_name}_encrypted'\n--! @see eql_v2.create_encrypted_columns\nCREATE FUNCTION eql_v2.rename_encrypted_columns()\n\tRETURNS TABLE(table_name TEXT, column_name TEXT, target_column TEXT)\nAS $$\n\tBEGIN\n FOR table_name, column_name, target_column IN\n SELECT * FROM eql_v2.select_target_columns() as c WHERE c.target_column = c.column_name || '_encrypted'\n LOOP\n\t\t EXECUTE format('ALTER TABLE %I RENAME %I TO %I;', table_name, column_name, column_name || '_plaintext');\n\t\t EXECUTE format('ALTER TABLE %I RENAME %I TO %I;', table_name, target_column, column_name);\n RETURN NEXT;\n END LOOP;\n\tEND;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Count rows encrypted with active configuration\n--! @internal\n--!\n--! Counts rows in a table where the encrypted column was encrypted using\n--! the currently active configuration. Used to track encryption progress.\n--!\n--! @param table_name text Name of table to check\n--! @param column_name text Name of encrypted column to check\n--! @return bigint Count of rows encrypted with active configuration\n--!\n--! @note The 'v' field in encrypted payloads stores the payload version (\"2\"), not the configuration ID\n--! @note Configuration tracking mechanism is implementation-specific\nCREATE FUNCTION eql_v2.count_encrypted_with_active_config(table_name TEXT, column_name TEXT)\n RETURNS BIGINT\nAS $$\nDECLARE\n result BIGINT;\nBEGIN\n\tEXECUTE format(\n 'SELECT COUNT(%I) FROM %s t WHERE %I->>%L = (SELECT id::TEXT FROM eql_v2_configuration WHERE state = %L)',\n column_name, table_name, column_name, 'v', 'active'\n )\n\tINTO result;\n \tRETURN result;\nEND;\n$$ LANGUAGE plpgsql;\n\n\n\n--! @brief Validate presence of ident field in encrypted payload\n--! @internal\n--!\n--! Checks that the encrypted JSONB payload contains the required 'i' (ident) field.\n--! The ident field tracks which table and column the encrypted value belongs to.\n--!\n--! @param jsonb Encrypted payload to validate\n--! @return Boolean True if 'i' field is present\n--! @throws Exception if 'i' field is missing\n--!\n--! @note Used in CHECK constraints to ensure payload structure\n--! @see eql_v2.check_encrypted\nCREATE FUNCTION eql_v2._encrypted_check_i(val jsonb)\n RETURNS boolean\nAS $$\n\tBEGIN\n IF val ? 'i' THEN\n RETURN true;\n END IF;\n RAISE 'Encrypted column missing ident (i) field: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Validate table and column fields in ident\n--! @internal\n--!\n--! Checks that the 'i' (ident) field contains both 't' (table) and 'c' (column)\n--! subfields, which identify the origin of the encrypted value.\n--!\n--! @param jsonb Encrypted payload to validate\n--! @return Boolean True if both 't' and 'c' subfields are present\n--! @throws Exception if 't' or 'c' subfields are missing\n--!\n--! @note Used in CHECK constraints to ensure payload structure\n--! @see eql_v2.check_encrypted\nCREATE FUNCTION eql_v2._encrypted_check_i_ct(val jsonb)\n RETURNS boolean\nAS $$\n\tBEGIN\n IF (val->'i' ?& array['t', 'c']) THEN\n RETURN true;\n END IF;\n RAISE 'Encrypted column ident (i) missing table (t) or column (c) fields: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Validate version field in encrypted payload\n--! @internal\n--!\n--! Checks that the encrypted payload has version field 'v' set to '2',\n--! the current EQL v2 payload version.\n--!\n--! @param jsonb Encrypted payload to validate\n--! @return Boolean True if 'v' field is present and equals '2'\n--! @throws Exception if 'v' field is missing or not '2'\n--!\n--! @note Used in CHECK constraints to ensure payload structure\n--! @see eql_v2.check_encrypted\nCREATE FUNCTION eql_v2._encrypted_check_v(val jsonb)\n RETURNS boolean\nAS $$\n\tBEGIN\n IF (val ? 'v') THEN\n\n IF val->>'v' <> '2' THEN\n RAISE 'Expected encrypted column version (v) 2';\n RETURN false;\n END IF;\n\n RETURN true;\n END IF;\n RAISE 'Encrypted column missing version (v) field: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Validate ciphertext field in encrypted payload\n--! @internal\n--!\n--! Checks that the encrypted payload contains the required 'c' (ciphertext) field\n--! which stores the encrypted data.\n--!\n--! @param jsonb Encrypted payload to validate\n--! @return Boolean True if 'c' field is present\n--! @throws Exception if 'c' field is missing\n--!\n--! @note Used in CHECK constraints to ensure payload structure\n--! @see eql_v2.check_encrypted\nCREATE FUNCTION eql_v2._encrypted_check_c(val jsonb)\n RETURNS boolean\nAS $$\n\tBEGIN\n IF (val ? 'c') THEN\n RETURN true;\n END IF;\n RAISE 'Encrypted column missing ciphertext (c) field: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Validate complete encrypted payload structure\n--!\n--! Comprehensive validation function that checks all required fields in an\n--! encrypted JSONB payload: version ('v'), ciphertext ('c'), ident ('i'),\n--! and ident subfields ('t', 'c').\n--!\n--! This function is used in CHECK constraints to ensure encrypted column\n--! data integrity at the database level.\n--!\n--! @param jsonb Encrypted payload to validate\n--! @return Boolean True if all structure checks pass\n--! @throws Exception if any required field is missing or invalid\n--!\n--! @example\n--! -- Add validation constraint to encrypted column\n--! ALTER TABLE users ADD CONSTRAINT check_email_encrypted\n--! CHECK (eql_v2.check_encrypted(encrypted_email::jsonb));\n--!\n--! @see eql_v2._encrypted_check_v\n--! @see eql_v2._encrypted_check_c\n--! @see eql_v2._encrypted_check_i\n--! @see eql_v2._encrypted_check_i_ct\nCREATE FUNCTION eql_v2.check_encrypted(val jsonb)\n RETURNS BOOLEAN\nLANGUAGE sql IMMUTABLE STRICT PARALLEL SAFE\nBEGIN ATOMIC\n RETURN (\n eql_v2._encrypted_check_v(val) AND\n eql_v2._encrypted_check_c(val) AND\n eql_v2._encrypted_check_i(val) AND\n eql_v2._encrypted_check_i_ct(val)\n );\nEND;\n\n\n--! @brief Validate encrypted composite type structure\n--!\n--! Validates an eql_v2_encrypted composite type by checking its underlying\n--! JSONB payload. Delegates to eql_v2.check_encrypted(jsonb).\n--!\n--! @param eql_v2_encrypted Encrypted value to validate\n--! @return Boolean True if structure is valid\n--! @throws Exception if any required field is missing or invalid\n--!\n--! @see eql_v2.check_encrypted(jsonb)\nCREATE FUNCTION eql_v2.check_encrypted(val eql_v2_encrypted)\n RETURNS BOOLEAN\nLANGUAGE sql IMMUTABLE STRICT PARALLEL SAFE\nBEGIN ATOMIC\n RETURN eql_v2.check_encrypted(val.data);\nEND;\n\n\n-- Aggregate functions for ORE\n\n--! @brief State transition function for min aggregate\n--! @internal\n--!\n--! Returns the smaller of two encrypted values for use in MIN aggregate.\n--! Comparison uses ORE index terms without decryption.\n--!\n--! @param a eql_v2_encrypted First encrypted value\n--! @param b eql_v2_encrypted Second encrypted value\n--! @return eql_v2_encrypted The smaller of the two values\n--!\n--! @see eql_v2.min(eql_v2_encrypted)\nCREATE FUNCTION eql_v2.min(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS eql_v2_encrypted\nSTRICT\nAS $$\n BEGIN\n IF a < b THEN\n RETURN a;\n ELSE\n RETURN b;\n END IF;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Find minimum encrypted value in a group\n--!\n--! Aggregate function that returns the minimum encrypted value in a group\n--! using ORE index term comparisons without decryption.\n--!\n--! @param input eql_v2_encrypted Encrypted values to aggregate\n--! @return eql_v2_encrypted Minimum value in the group\n--!\n--! @example\n--! -- Find minimum age per department\n--! SELECT department, eql_v2.min(encrypted_age)\n--! FROM employees\n--! GROUP BY department;\n--!\n--! @note Requires 'ore' index configuration on the column\n--! @see eql_v2.min(eql_v2_encrypted, eql_v2_encrypted)\nCREATE AGGREGATE eql_v2.min(eql_v2_encrypted)\n(\n sfunc = eql_v2.min,\n stype = eql_v2_encrypted\n);\n\n\n--! @brief State transition function for max aggregate\n--! @internal\n--!\n--! Returns the larger of two encrypted values for use in MAX aggregate.\n--! Comparison uses ORE index terms without decryption.\n--!\n--! @param a eql_v2_encrypted First encrypted value\n--! @param b eql_v2_encrypted Second encrypted value\n--! @return eql_v2_encrypted The larger of the two values\n--!\n--! @see eql_v2.max(eql_v2_encrypted)\nCREATE FUNCTION eql_v2.max(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS eql_v2_encrypted\nSTRICT\nAS $$\n BEGIN\n IF a > b THEN\n RETURN a;\n ELSE\n RETURN b;\n END IF;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Find maximum encrypted value in a group\n--!\n--! Aggregate function that returns the maximum encrypted value in a group\n--! using ORE index term comparisons without decryption.\n--!\n--! @param input eql_v2_encrypted Encrypted values to aggregate\n--! @return eql_v2_encrypted Maximum value in the group\n--!\n--! @example\n--! -- Find maximum salary per department\n--! SELECT department, eql_v2.max(encrypted_salary)\n--! FROM employees\n--! GROUP BY department;\n--!\n--! @note Requires 'ore' index configuration on the column\n--! @see eql_v2.max(eql_v2_encrypted, eql_v2_encrypted)\nCREATE AGGREGATE eql_v2.max(eql_v2_encrypted)\n(\n sfunc = eql_v2.max,\n stype = eql_v2_encrypted\n);\n\n\n--! @file config/indexes.sql\n--! @brief Configuration state uniqueness indexes\n--!\n--! Creates partial unique indexes to enforce that only one configuration\n--! can be in 'active', 'pending', or 'encrypting' state at any time.\n--! Multiple 'inactive' configurations are allowed.\n--!\n--! @note Uses partial indexes (WHERE clauses) for efficiency\n--! @note Prevents conflicting configurations from being active simultaneously\n--! @see config/types.sql for state definitions\n\n\n--! @brief Unique active configuration constraint\n--! @note Only one configuration can be 'active' at once\nCREATE UNIQUE INDEX ON public.eql_v2_configuration (state) WHERE state = 'active';\n\n--! @brief Unique pending configuration constraint\n--! @note Only one configuration can be 'pending' at once\nCREATE UNIQUE INDEX ON public.eql_v2_configuration (state) WHERE state = 'pending';\n\n--! @brief Unique encrypting configuration constraint\n--! @note Only one configuration can be 'encrypting' at once\nCREATE UNIQUE INDEX ON public.eql_v2_configuration (state) WHERE state = 'encrypting';\n\n\n--! @brief Add a search index configuration for an encrypted column\n--!\n--! Configures a searchable encryption index (unique, match, ore, or ste_vec) on an\n--! encrypted column. Creates or updates the pending configuration, then migrates\n--! and activates it unless migrating flag is set.\n--!\n--! @param table_name Text Name of the table containing the column\n--! @param column_name Text Name of the column to configure\n--! @param index_name Text Type of index ('unique', 'match', 'ore', 'ste_vec')\n--! @param cast_as Text PostgreSQL type for decrypted values (default: 'text')\n--! @param opts JSONB Index-specific options (default: '{}')\n--! @param migrating Boolean Skip auto-migration if true (default: false)\n--! @return JSONB Updated configuration object\n--! @throws Exception if index already exists for this column\n--! @throws Exception if cast_as is not a valid type\n--!\n--! @example\n--! -- Add unique index for exact-match searches\n--! SELECT eql_v2.add_search_config('users', 'email', 'unique');\n--!\n--! -- Add match index for LIKE searches with custom token length\n--! SELECT eql_v2.add_search_config('posts', 'content', 'match', 'text',\n--! '{\"token_filters\": [{\"kind\": \"downcase\"}], \"tokenizer\": {\"kind\": \"ngram\", \"token_length\": 3}}'\n--! );\n--!\n--! @see eql_v2.add_column\n--! @see eql_v2.remove_search_config\nCREATE FUNCTION eql_v2.add_search_config(table_name text, column_name text, index_name text, cast_as text DEFAULT 'text', opts jsonb DEFAULT '{}', migrating boolean DEFAULT false)\n RETURNS jsonb\n\nAS $$\n DECLARE\n o jsonb;\n _config jsonb;\n BEGIN\n\n -- set the active config\n SELECT data INTO _config FROM public.eql_v2_configuration WHERE state = 'active' OR state = 'pending' ORDER BY state DESC;\n\n -- if index exists\n IF _config #> array['tables', table_name, column_name, 'indexes'] ? index_name THEN\n RAISE EXCEPTION '% index exists for column: % %', index_name, table_name, column_name;\n END IF;\n\n IF NOT cast_as = ANY('{text, int, small_int, big_int, real, double, boolean, date, jsonb}') THEN\n RAISE EXCEPTION '% is not a valid cast type', cast_as;\n END IF;\n\n -- set default config\n SELECT eql_v2.config_default(_config) INTO _config;\n\n SELECT eql_v2.config_add_table(table_name, _config) INTO _config;\n\n SELECT eql_v2.config_add_column(table_name, column_name, _config) INTO _config;\n\n SELECT eql_v2.config_add_cast(table_name, column_name, cast_as, _config) INTO _config;\n\n -- set default options for index if opts empty\n IF index_name = 'match' AND opts = '{}' THEN\n SELECT eql_v2.config_match_default() INTO opts;\n END IF;\n\n SELECT eql_v2.config_add_index(table_name, column_name, index_name, opts, _config) INTO _config;\n\n -- create a new pending record if we don't have one\n INSERT INTO public.eql_v2_configuration (state, data) VALUES ('pending', _config)\n ON CONFLICT (state)\n WHERE state = 'pending'\n DO UPDATE\n SET data = _config;\n\n IF NOT migrating THEN\n PERFORM eql_v2.migrate_config();\n PERFORM eql_v2.activate_config();\n END IF;\n\n PERFORM eql_v2.add_encrypted_constraint(table_name, column_name);\n\n -- exeunt\n RETURN _config;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Remove a search index configuration from an encrypted column\n--!\n--! Removes a previously configured search index from an encrypted column.\n--! Updates the pending configuration, then migrates and activates it\n--! unless migrating flag is set.\n--!\n--! @param table_name Text Name of the table containing the column\n--! @param column_name Text Name of the column\n--! @param index_name Text Type of index to remove\n--! @param migrating Boolean Skip auto-migration if true (default: false)\n--! @return JSONB Updated configuration object\n--! @throws Exception if no active or pending configuration exists\n--! @throws Exception if table is not configured\n--! @throws Exception if column is not configured\n--!\n--! @example\n--! -- Remove match index from column\n--! SELECT eql_v2.remove_search_config('posts', 'content', 'match');\n--!\n--! @see eql_v2.add_search_config\n--! @see eql_v2.modify_search_config\nCREATE FUNCTION eql_v2.remove_search_config(table_name text, column_name text, index_name text, migrating boolean DEFAULT false)\n RETURNS jsonb\nAS $$\n DECLARE\n _config jsonb;\n BEGIN\n\n -- set the active config\n SELECT data INTO _config FROM public.eql_v2_configuration WHERE state = 'active' OR state = 'pending' ORDER BY state DESC;\n\n -- if no config\n IF _config IS NULL THEN\n RAISE EXCEPTION 'No active or pending configuration exists';\n END IF;\n\n -- if the table doesn't exist\n IF NOT _config #> array['tables'] ? table_name THEN\n RAISE EXCEPTION 'No configuration exists for table: %', table_name;\n END IF;\n\n -- if the index does not exist\n -- IF NOT _config->key ? index_name THEN\n IF NOT _config #> array['tables', table_name] ? column_name THEN\n RAISE EXCEPTION 'No % index exists for column: % %', index_name, table_name, column_name;\n END IF;\n\n -- create a new pending record if we don't have one\n INSERT INTO public.eql_v2_configuration (state, data) VALUES ('pending', _config)\n ON CONFLICT (state)\n WHERE state = 'pending'\n DO NOTHING;\n\n -- remove the index\n SELECT _config #- array['tables', table_name, column_name, 'indexes', index_name] INTO _config;\n\n -- update the config and migrate (even if empty)\n UPDATE public.eql_v2_configuration SET data = _config WHERE state = 'pending';\n\n IF NOT migrating THEN\n PERFORM eql_v2.migrate_config();\n PERFORM eql_v2.activate_config();\n END IF;\n\n -- exeunt\n RETURN _config;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Modify a search index configuration for an encrypted column\n--!\n--! Updates an existing search index configuration by removing and re-adding it\n--! with new options. Convenience function that combines remove and add operations.\n--! If index does not exist, it is added.\n--!\n--! @param table_name Text Name of the table containing the column\n--! @param column_name Text Name of the column\n--! @param index_name Text Type of index to modify\n--! @param cast_as Text PostgreSQL type for decrypted values (default: 'text')\n--! @param opts JSONB New index-specific options (default: '{}')\n--! @param migrating Boolean Skip auto-migration if true (default: false)\n--! @return JSONB Updated configuration object\n--!\n--! @example\n--! -- Change match index tokenizer settings\n--! SELECT eql_v2.modify_search_config('posts', 'content', 'match', 'text',\n--! '{\"tokenizer\": {\"kind\": \"ngram\", \"token_length\": 4}}'\n--! );\n--!\n--! @see eql_v2.add_search_config\n--! @see eql_v2.remove_search_config\nCREATE FUNCTION eql_v2.modify_search_config(table_name text, column_name text, index_name text, cast_as text DEFAULT 'text', opts jsonb DEFAULT '{}', migrating boolean DEFAULT false)\n RETURNS jsonb\nAS $$\n BEGIN\n PERFORM eql_v2.remove_search_config(table_name, column_name, index_name, migrating);\n RETURN eql_v2.add_search_config(table_name, column_name, index_name, cast_as, opts, migrating);\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Migrate pending configuration to encrypting state\n--!\n--! Transitions the pending configuration to encrypting state, validating that\n--! all configured columns have encrypted target columns ready. This is part of\n--! the configuration lifecycle: pending → encrypting → active.\n--!\n--! @return Boolean True if migration succeeds\n--! @throws Exception if encryption already in progress\n--! @throws Exception if no pending configuration exists\n--! @throws Exception if configured columns lack encrypted targets\n--!\n--! @example\n--! -- Manually migrate configuration (normally done automatically)\n--! SELECT eql_v2.migrate_config();\n--!\n--! @see eql_v2.activate_config\n--! @see eql_v2.add_column\nCREATE FUNCTION eql_v2.migrate_config()\n RETURNS boolean\nAS $$\n\tBEGIN\n\n IF EXISTS (SELECT FROM public.eql_v2_configuration c WHERE c.state = 'encrypting') THEN\n RAISE EXCEPTION 'An encryption is already in progress';\n END IF;\n\n\t\tIF NOT EXISTS (SELECT FROM public.eql_v2_configuration c WHERE c.state = 'pending') THEN\n\t\t\tRAISE EXCEPTION 'No pending configuration exists to encrypt';\n\t\tEND IF;\n\n IF NOT eql_v2.ready_for_encryption() THEN\n RAISE EXCEPTION 'Some pending columns do not have an encrypted target';\n END IF;\n\n UPDATE public.eql_v2_configuration SET state = 'encrypting' WHERE state = 'pending';\n\t\tRETURN true;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Activate encrypting configuration\n--!\n--! Transitions the encrypting configuration to active state, making it the\n--! current operational configuration. Marks previous active configuration as\n--! inactive. Final step in configuration lifecycle: pending → encrypting → active.\n--!\n--! @return Boolean True if activation succeeds\n--! @throws Exception if no encrypting configuration exists to activate\n--!\n--! @example\n--! -- Manually activate configuration (normally done automatically)\n--! SELECT eql_v2.activate_config();\n--!\n--! @see eql_v2.migrate_config\n--! @see eql_v2.add_column\nCREATE FUNCTION eql_v2.activate_config()\n RETURNS boolean\nAS $$\n\tBEGIN\n\n\t IF EXISTS (SELECT FROM public.eql_v2_configuration c WHERE c.state = 'encrypting') THEN\n\t \tUPDATE public.eql_v2_configuration SET state = 'inactive' WHERE state = 'active';\n\t\t\tUPDATE public.eql_v2_configuration SET state = 'active' WHERE state = 'encrypting';\n\t\t\tRETURN true;\n\t\tELSE\n\t\t\tRAISE EXCEPTION 'No encrypting configuration exists to activate';\n\t\tEND IF;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Discard pending configuration\n--!\n--! Deletes the pending configuration without applying changes. Use this to\n--! abandon configuration changes before they are migrated and activated.\n--!\n--! @return Boolean True if discard succeeds\n--! @throws Exception if no pending configuration exists to discard\n--!\n--! @example\n--! -- Discard uncommitted configuration changes\n--! SELECT eql_v2.discard();\n--!\n--! @see eql_v2.add_column\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.discard()\n RETURNS boolean\nAS $$\n BEGIN\n IF EXISTS (SELECT FROM public.eql_v2_configuration c WHERE c.state = 'pending') THEN\n DELETE FROM public.eql_v2_configuration WHERE state = 'pending';\n RETURN true;\n ELSE\n RAISE EXCEPTION 'No pending configuration exists to discard';\n END IF;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Configure a column for encryption\n--!\n--! Adds a column to the encryption configuration, making it eligible for\n--! encrypted storage and search indexes. Creates or updates pending configuration,\n--! adds encrypted constraint, then migrates and activates unless migrating flag is set.\n--!\n--! @param table_name Text Name of the table containing the column\n--! @param column_name Text Name of the column to encrypt\n--! @param cast_as Text PostgreSQL type to cast decrypted values (default: 'text')\n--! @param migrating Boolean Skip auto-migration if true (default: false)\n--! @return JSONB Updated configuration object\n--! @throws Exception if column already configured for encryption\n--!\n--! @example\n--! -- Configure email column for encryption\n--! SELECT eql_v2.add_column('users', 'email', 'text');\n--!\n--! -- Configure age column with integer casting\n--! SELECT eql_v2.add_column('users', 'age', 'int');\n--!\n--! @see eql_v2.add_search_config\n--! @see eql_v2.remove_column\nCREATE FUNCTION eql_v2.add_column(table_name text, column_name text, cast_as text DEFAULT 'text', migrating boolean DEFAULT false)\n RETURNS jsonb\nAS $$\n DECLARE\n key text;\n _config jsonb;\n BEGIN\n -- set the active config\n SELECT data INTO _config FROM public.eql_v2_configuration WHERE state = 'active' OR state = 'pending' ORDER BY state DESC;\n\n -- set default config\n SELECT eql_v2.config_default(_config) INTO _config;\n\n -- if index exists\n IF _config #> array['tables', table_name] ? column_name THEN\n RAISE EXCEPTION 'Config exists for column: % %', table_name, column_name;\n END IF;\n\n SELECT eql_v2.config_add_table(table_name, _config) INTO _config;\n\n SELECT eql_v2.config_add_column(table_name, column_name, _config) INTO _config;\n\n SELECT eql_v2.config_add_cast(table_name, column_name, cast_as, _config) INTO _config;\n\n -- create a new pending record if we don't have one\n INSERT INTO public.eql_v2_configuration (state, data) VALUES ('pending', _config)\n ON CONFLICT (state)\n WHERE state = 'pending'\n DO UPDATE\n SET data = _config;\n\n IF NOT migrating THEN\n PERFORM eql_v2.migrate_config();\n PERFORM eql_v2.activate_config();\n END IF;\n\n PERFORM eql_v2.add_encrypted_constraint(table_name, column_name);\n\n -- exeunt\n RETURN _config;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Remove a column from encryption configuration\n--!\n--! Removes a column from the encryption configuration, including all associated\n--! search indexes. Removes encrypted constraint, updates pending configuration,\n--! then migrates and activates unless migrating flag is set.\n--!\n--! @param table_name Text Name of the table containing the column\n--! @param column_name Text Name of the column to remove\n--! @param migrating Boolean Skip auto-migration if true (default: false)\n--! @return JSONB Updated configuration object\n--! @throws Exception if no active or pending configuration exists\n--! @throws Exception if table is not configured\n--! @throws Exception if column is not configured\n--!\n--! @example\n--! -- Remove email column from encryption\n--! SELECT eql_v2.remove_column('users', 'email');\n--!\n--! @see eql_v2.add_column\n--! @see eql_v2.remove_search_config\nCREATE FUNCTION eql_v2.remove_column(table_name text, column_name text, migrating boolean DEFAULT false)\n RETURNS jsonb\nAS $$\n DECLARE\n key text;\n _config jsonb;\n BEGIN\n -- set the active config\n SELECT data INTO _config FROM public.eql_v2_configuration WHERE state = 'active' OR state = 'pending' ORDER BY state DESC;\n\n -- if no config\n IF _config IS NULL THEN\n RAISE EXCEPTION 'No active or pending configuration exists';\n END IF;\n\n -- if the table doesn't exist\n IF NOT _config #> array['tables'] ? table_name THEN\n RAISE EXCEPTION 'No configuration exists for table: %', table_name;\n END IF;\n\n -- if the column does not exist\n IF NOT _config #> array['tables', table_name] ? column_name THEN\n RAISE EXCEPTION 'No configuration exists for column: % %', table_name, column_name;\n END IF;\n\n -- create a new pending record if we don't have one\n INSERT INTO public.eql_v2_configuration (state, data) VALUES ('pending', _config)\n ON CONFLICT (state)\n WHERE state = 'pending'\n DO NOTHING;\n\n -- remove the column\n SELECT _config #- array['tables', table_name, column_name] INTO _config;\n\n -- if table is now empty, remove the table\n IF _config #> array['tables', table_name] = '{}' THEN\n SELECT _config #- array['tables', table_name] INTO _config;\n END IF;\n\n PERFORM eql_v2.remove_encrypted_constraint(table_name, column_name);\n\n -- update the config (even if empty) and activate\n UPDATE public.eql_v2_configuration SET data = _config WHERE state = 'pending';\n\n IF NOT migrating THEN\n -- For empty configs, skip migration validation and directly activate\n IF _config #> array['tables'] = '{}' THEN\n UPDATE public.eql_v2_configuration SET state = 'inactive' WHERE state = 'active';\n UPDATE public.eql_v2_configuration SET state = 'active' WHERE state = 'pending';\n ELSE\n PERFORM eql_v2.migrate_config();\n PERFORM eql_v2.activate_config();\n END IF;\n END IF;\n\n -- exeunt\n RETURN _config;\n\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Reload configuration from CipherStash Proxy\n--!\n--! Placeholder function for reloading configuration from the CipherStash Proxy.\n--! Currently returns NULL without side effects.\n--!\n--! @return Void\n--!\n--! @note This function may be used for configuration synchronization in future versions\nCREATE FUNCTION eql_v2.reload_config()\n RETURNS void\nLANGUAGE sql STRICT PARALLEL SAFE\nBEGIN ATOMIC\n RETURN NULL;\nEND;\n\n--! @brief Query encryption configuration in tabular format\n--!\n--! Returns the active encryption configuration as a table for easier querying\n--! and filtering. Shows all configured tables, columns, cast types, and indexes.\n--!\n--! @return TABLE Contains configuration state, relation name, column name, cast type, and indexes\n--!\n--! @example\n--! -- View all encrypted columns\n--! SELECT * FROM eql_v2.config();\n--!\n--! -- Find all columns with match indexes\n--! SELECT relation, col_name FROM eql_v2.config()\n--! WHERE indexes ? 'match';\n--!\n--! @see eql_v2.add_column\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.config() RETURNS TABLE (\n state eql_v2_configuration_state,\n relation text,\n col_name text,\n decrypts_as text,\n indexes jsonb\n)\nAS $$\nBEGIN\n RETURN QUERY\n WITH tables AS (\n SELECT config.state, tables.key AS table, tables.value AS config\n FROM public.eql_v2_configuration config, jsonb_each(data->'tables') tables\n WHERE config.data->>'v' = '1'\n )\n SELECT\n tables.state,\n tables.table,\n column_config.key,\n column_config.value->>'cast_as',\n column_config.value->'indexes'\n FROM tables, jsonb_each(tables.config) column_config;\nEND;\n$$ LANGUAGE plpgsql;\n\n--! @file config/constraints.sql\n--! @brief Configuration validation functions and constraints\n--!\n--! Provides CHECK constraint functions to validate encryption configuration structure.\n--! Ensures configurations have required fields (version, tables) and valid values\n--! for index types and cast types before being stored.\n--!\n--! @see config/tables.sql where constraints are applied\n\n\n--! @brief Extract index type names from configuration\n--! @internal\n--!\n--! Helper function that extracts all index type names from the configuration's\n--! 'indexes' sections across all tables and columns.\n--!\n--! @param jsonb Configuration data to extract from\n--! @return SETOF text Index type names (e.g., 'match', 'ore', 'unique', 'ste_vec')\n--!\n--! @note Used by config_check_indexes for validation\n--! @see eql_v2.config_check_indexes\nCREATE FUNCTION eql_v2.config_get_indexes(val jsonb)\n RETURNS SETOF text\n LANGUAGE sql IMMUTABLE STRICT PARALLEL SAFE\nBEGIN ATOMIC\n\tSELECT jsonb_object_keys(jsonb_path_query(val,'$.tables.*.*.indexes'));\nEND;\n\n\n--! @brief Validate index types in configuration\n--! @internal\n--!\n--! Checks that all index types specified in the configuration are valid.\n--! Valid index types are: match, ore, unique, ste_vec.\n--!\n--! @param jsonb Configuration data to validate\n--! @return boolean True if all index types are valid\n--! @throws Exception if any invalid index type found\n--!\n--! @note Used in CHECK constraint on eql_v2_configuration table\n--! @see eql_v2.config_get_indexes\nCREATE FUNCTION eql_v2.config_check_indexes(val jsonb)\n RETURNS BOOLEAN\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n\n IF (SELECT EXISTS (SELECT eql_v2.config_get_indexes(val))) THEN\n IF (SELECT bool_and(index = ANY('{match, ore, unique, ste_vec}')) FROM eql_v2.config_get_indexes(val) AS index) THEN\n RETURN true;\n END IF;\n RAISE 'Configuration has an invalid index (%). Index should be one of {match, ore, unique, ste_vec}', val;\n END IF;\n RETURN true;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Validate cast types in configuration\n--! @internal\n--!\n--! Checks that all 'cast_as' types specified in the configuration are valid.\n--! Valid cast types are: text, int, small_int, big_int, real, double, boolean, date, jsonb.\n--!\n--! @param jsonb Configuration data to validate\n--! @return boolean True if all cast types are valid or no cast types specified\n--! @throws Exception if any invalid cast type found\n--!\n--! @note Used in CHECK constraint on eql_v2_configuration table\n--! @note Empty configurations (no cast_as fields) are valid\n--! @note Cast type names are EQL's internal representations, not PostgreSQL native types\nCREATE FUNCTION eql_v2.config_check_cast(val jsonb)\n RETURNS BOOLEAN\nAS $$\n\tBEGIN\n -- If there are cast_as fields, validate them\n IF EXISTS (SELECT jsonb_array_elements_text(jsonb_path_query_array(val, '$.tables.*.*.cast_as'))) THEN\n IF (SELECT bool_and(cast_as = ANY('{text, int, small_int, big_int, real, double, boolean, date, jsonb}')) \n FROM (SELECT jsonb_array_elements_text(jsonb_path_query_array(val, '$.tables.*.*.cast_as')) AS cast_as) casts) THEN\n RETURN true;\n END IF;\n RAISE 'Configuration has an invalid cast_as (%). Cast should be one of {text, int, small_int, big_int, real, double, boolean, date, jsonb}', val;\n END IF;\n -- If no cast_as fields exist (empty config), that's valid\n RETURN true;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Validate tables field presence\n--! @internal\n--!\n--! Ensures the configuration has a 'tables' field, which is required\n--! to specify which database tables contain encrypted columns.\n--!\n--! @param jsonb Configuration data to validate\n--! @return boolean True if 'tables' field exists\n--! @throws Exception if 'tables' field is missing\n--!\n--! @note Used in CHECK constraint on eql_v2_configuration table\nCREATE FUNCTION eql_v2.config_check_tables(val jsonb)\n RETURNS boolean\nAS $$\n\tBEGIN\n IF (val ? 'tables') THEN\n RETURN true;\n END IF;\n RAISE 'Configuration missing tables (tables) field: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Validate version field presence\n--! @internal\n--!\n--! Ensures the configuration has a 'v' (version) field, which tracks\n--! the configuration format version.\n--!\n--! @param jsonb Configuration data to validate\n--! @return boolean True if 'v' field exists\n--! @throws Exception if 'v' field is missing\n--!\n--! @note Used in CHECK constraint on eql_v2_configuration table\nCREATE FUNCTION eql_v2.config_check_version(val jsonb)\n RETURNS boolean\nAS $$\n\tBEGIN\n IF (val ? 'v') THEN\n RETURN true;\n END IF;\n RAISE 'Configuration missing version (v) field: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Drop existing data validation constraint if present\n--! @note Allows constraint to be recreated during upgrades\nALTER TABLE public.eql_v2_configuration DROP CONSTRAINT IF EXISTS eql_v2_configuration_data_check;\n\n\n--! @brief Comprehensive configuration data validation\n--!\n--! CHECK constraint that validates all aspects of configuration data:\n--! - Version field presence\n--! - Tables field presence\n--! - Valid cast_as types\n--! - Valid index types\n--!\n--! @note Combines all config_check_* validation functions\n--! @see eql_v2.config_check_version\n--! @see eql_v2.config_check_tables\n--! @see eql_v2.config_check_cast\n--! @see eql_v2.config_check_indexes\nALTER TABLE public.eql_v2_configuration\n ADD CONSTRAINT eql_v2_configuration_data_check CHECK (\n eql_v2.config_check_version(data) AND\n eql_v2.config_check_tables(data) AND\n eql_v2.config_check_cast(data) AND\n eql_v2.config_check_indexes(data)\n);\n\n\n\n\n--! @brief Compare two encrypted values using Blake3 hash index terms\n--!\n--! Performs a three-way comparison (returns -1/0/1) of encrypted values using\n--! their Blake3 hash index terms. Used internally by the equality operator (=)\n--! for exact-match queries without decryption.\n--!\n--! @param a eql_v2_encrypted First encrypted value to compare\n--! @param b eql_v2_encrypted Second encrypted value to compare\n--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b\n--!\n--! @note NULL values are sorted before non-NULL values\n--! @note Comparison uses underlying text type ordering of Blake3 hashes\n--!\n--! @see eql_v2.blake3\n--! @see eql_v2.has_blake3\n--! @see eql_v2.\"=\"\nCREATE FUNCTION eql_v2.compare_blake3(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS integer\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n a_term eql_v2.blake3;\n b_term eql_v2.blake3;\n BEGIN\n\n IF a IS NULL AND b IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b IS NULL THEN\n RETURN 1;\n END IF;\n\n IF eql_v2.has_blake3(a) THEN\n a_term = eql_v2.blake3(a);\n END IF;\n\n IF eql_v2.has_blake3(b) THEN\n b_term = eql_v2.blake3(b);\n END IF;\n\n IF a_term IS NULL AND b_term IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a_term IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b_term IS NULL THEN\n RETURN 1;\n END IF;\n\n -- Using the underlying text type comparison\n IF a_term = b_term THEN\n RETURN 0;\n END IF;\n\n IF a_term < b_term THEN\n RETURN -1;\n END IF;\n\n IF a_term > b_term THEN\n RETURN 1;\n END IF;\n\n END;\n$$ LANGUAGE plpgsql;\n" + } + ], + "postcheck": [ + { + "description": "verify \"eql_v2\" schema exists", + "sql": "SELECT EXISTS (SELECT 1 FROM pg_namespace WHERE nspname = 'eql_v2')" + }, + { + "description": "verify \"public.eql_v2_encrypted\" composite type exists", + "sql": "SELECT EXISTS (SELECT 1 FROM pg_type t JOIN pg_namespace n ON n.oid = t.typnamespace WHERE n.nspname = 'public' AND t.typname = 'eql_v2_encrypted')" + } + ] + } +] \ No newline at end of file diff --git a/examples/prisma/migrations/cipherstash/contract.d.ts b/examples/prisma/migrations/cipherstash/contract.d.ts new file mode 100644 index 00000000..588635ae --- /dev/null +++ b/examples/prisma/migrations/cipherstash/contract.d.ts @@ -0,0 +1,10 @@ +/** + * Placeholder `.d.ts` for extension space "cipherstash". + * + * The framework re-emits this file on every `migration plan` run + * alongside `contract.json` and `refs/head.json`. A typed `.d.ts` + * rendering pass for extension contracts is tracked separately; + * until that ships, consumers should import `contract.json` + * directly with `validateContract<…>(…)`. + */ +export {}; diff --git a/examples/prisma/migrations/cipherstash/contract.json b/examples/prisma/migrations/cipherstash/contract.json new file mode 100644 index 00000000..9c4939d7 --- /dev/null +++ b/examples/prisma/migrations/cipherstash/contract.json @@ -0,0 +1 @@ +{"_generated":{"message":"This file is automatically generated by \"prisma-next contract emit\".","regenerate":"To regenerate, run: prisma-next contract emit","warning":"⚠️ GENERATED FILE - DO NOT EDIT"},"capabilities":{"postgres":{"jsonAgg":true,"lateral":true,"limit":true,"orderBy":true,"returning":true},"sql":{"defaultInInsert":true,"enums":true,"returning":true}},"extensionPacks":{},"meta":{},"models":{"EqlV2Configuration":{"fields":{"data":{"nullable":false,"type":{"codecId":"pg/jsonb@1","kind":"scalar"}},"id":{"nullable":false,"type":{"codecId":"pg/text@1","kind":"scalar"}},"state":{"nullable":false,"type":{"codecId":"pg/text@1","kind":"scalar"}}},"relations":{},"storage":{"fields":{"data":{"column":"data"},"id":{"column":"id"},"state":{"column":"state"}},"table":"eql_v2_configuration"}}},"profileHash":"sha256:1a8dbe044289f30a1de958fe800cc5a8378b285d2e126a8c44b58864bac2c18e","roots":{"eql_v2_configuration":"EqlV2Configuration"},"schemaVersion":"1","storage":{"storageHash":"sha256:efa685171bebbb8f078f08d12be3578bb5d96b71669dccc6cc9e4be96af8cdb4","tables":{"eql_v2_configuration":{"columns":{"data":{"codecId":"pg/jsonb@1","nativeType":"jsonb","nullable":false},"id":{"codecId":"pg/text@1","nativeType":"text","nullable":false},"state":{"codecId":"pg/text@1","nativeType":"text","nullable":false}},"foreignKeys":[],"indexes":[],"primaryKey":{"columns":["id"]},"uniques":[]}}},"target":"postgres","targetFamily":"sql"} diff --git a/examples/prisma/migrations/cipherstash/refs/head.json b/examples/prisma/migrations/cipherstash/refs/head.json new file mode 100644 index 00000000..7dc7fb9e --- /dev/null +++ b/examples/prisma/migrations/cipherstash/refs/head.json @@ -0,0 +1 @@ +{"hash":"sha256:efa685171bebbb8f078f08d12be3578bb5d96b71669dccc6cc9e4be96af8cdb4","invariants":["cipherstash:install-eql-bundle-v1"]} diff --git a/examples/prisma/package.json b/examples/prisma/package.json new file mode 100644 index 00000000..98421f0f --- /dev/null +++ b/examples/prisma/package.json @@ -0,0 +1,36 @@ +{ + "name": "@cipherstash/prisma-next-example", + "private": true, + "version": "0.0.0", + "description": "End-to-end example of @cipherstash/prisma-next: searchable application-layer encryption for Postgres with Prisma Next, using @cipherstash/stack as the SDK.", + "type": "module", + "scripts": { + "emit": "prisma-next contract emit", + "migration:plan": "prisma-next migration plan", + "migration:apply": "prisma-next migration apply", + "start": "tsx src/index.ts", + "typecheck": "tsc --project tsconfig.json --noEmit" + }, + "dependencies": { + "@cipherstash/prisma-next": "workspace:*", + "@cipherstash/stack": "workspace:*", + "@prisma-next/adapter-postgres": "0.6.0-dev.8", + "@prisma-next/contract": "0.6.0-dev.8", + "@prisma-next/driver-postgres": "0.6.0-dev.8", + "@prisma-next/family-sql": "0.6.0-dev.8", + "@prisma-next/framework-components": "0.6.0-dev.8", + "@prisma-next/postgres": "0.6.0-dev.8", + "@prisma-next/sql-contract": "0.6.0-dev.8", + "@prisma-next/sql-contract-psl": "0.6.0-dev.8", + "@prisma-next/sql-orm-client": "0.6.0-dev.8", + "@prisma-next/sql-runtime": "0.6.0-dev.8", + "@prisma-next/target-postgres": "0.6.0-dev.8", + "dotenv": "^16.4.5" + }, + "devDependencies": { + "@prisma-next/cli": "0.6.0-dev.8", + "@types/node": "^22.15.12", + "tsx": "catalog:repo", + "typescript": "catalog:repo" + } +} diff --git a/examples/prisma/prisma-next.config.ts b/examples/prisma/prisma-next.config.ts new file mode 100644 index 00000000..9a6fbf72 --- /dev/null +++ b/examples/prisma/prisma-next.config.ts @@ -0,0 +1,31 @@ +import 'dotenv/config' +import postgresAdapter from '@prisma-next/adapter-postgres/control' +import { defineConfig } from '@prisma-next/cli/config-types' +import postgresDriver from '@prisma-next/driver-postgres/control' +import sql from '@prisma-next/family-sql/control' +import { prismaContract } from '@prisma-next/sql-contract-psl/provider' +import postgres from '@prisma-next/target-postgres/control' +import cipherstash from '@cipherstash/prisma-next/control' + +const databaseUrl = process.env['DATABASE_URL'] + +export default defineConfig({ + family: sql, + target: postgres, + driver: postgresDriver, + adapter: postgresAdapter, + extensionPacks: [cipherstash], + contract: prismaContract('./prisma/schema.prisma', { + output: 'src/prisma/contract.json', + target: postgres, + }), + migrations: { + dir: 'migrations', + }, + // `contract emit` does not need a database connection; only + // `migration apply` does. We pass `connection` through when + // `DATABASE_URL` is set so the same config supports every CLI + // subcommand, and let `migration apply` error explicitly if the + // connection is missing. + ...(databaseUrl ? { db: { connection: databaseUrl } } : {}), +}) diff --git a/examples/prisma/prisma/schema.prisma b/examples/prisma/prisma/schema.prisma new file mode 100644 index 00000000..f6c3746b --- /dev/null +++ b/examples/prisma/prisma/schema.prisma @@ -0,0 +1,44 @@ +// @cipherstash/prisma-next example schema. +// +// One User model exercising every cipherstash codec the extension +// ships — string, double, bigint, date, boolean, and json. The +// no-args constructors default every per-codec search-mode flag to +// `true`, opting each column into the maximal index surface its +// codec supports. +// +// Operator visibility per column follows from the codec's search-mode +// flag set (see the package README's operator surface table): +// +// - email (string) — cipherstashEq / Ilike / NotIlike / +// Gt / Gte / Lt / Lte / Between / +// NotBetween / InArray / NotInArray. +// - salary (double) — equality + order-and-range. +// - accountId (bigint) — equality + order-and-range. +// - birthday (date) — equality + order-and-range. +// - emailVerified (boolean) — equality only. +// - preferences (json) — cipherstashJsonbPathQueryFirst / +// cipherstashJsonbGet SELECT-expr +// helpers; cipherstashJsonbPathExists +// predicate is currently no-op against +// the live EQL bundle — see the +// "Known limitations" section of the +// package README. + +model User { + id String @id + email cipherstash.EncryptedString() + salary cipherstash.EncryptedDouble() + accountId cipherstash.EncryptedBigInt() @map("accountid") + birthday cipherstash.EncryptedDate() + emailVerified cipherstash.EncryptedBoolean() @map("emailverified") + preferences cipherstash.EncryptedJson() + + // The CipherStash EQL bundle hits a SQL-injection-style bug in + // `eql_v2.add_encrypted_constraint` when the table or column name + // needs quoting (reserved words, mixed case, etc.). `user` is + // reserved in Postgres, so we map the table to `users`; the + // mixed-case columns are mapped to lowercase column names so the + // bundle's `%I` interpolation renders them unquoted. Drop these + // overrides once the upstream bundle bug is fixed. + @@map("users") +} diff --git a/examples/prisma/src/db.ts b/examples/prisma/src/db.ts new file mode 100644 index 00000000..b9f31e68 --- /dev/null +++ b/examples/prisma/src/db.ts @@ -0,0 +1,27 @@ +/** + * Wire the Prisma Next Postgres runtime with the cipherstash extension + * in one call. + * + * `cipherstashFromStack({ contractJson })` derives the encryption + * schemas from the contract, constructs the `@cipherstash/stack` + * `EncryptionClient` against your `CS_*` env vars, builds the SDK + * adapter, and returns ready-to-spread arrays for `extensions` and + * `middleware`. Override `schemas` only if you have additional tables + * the contract does not model. + */ + +import 'dotenv/config' + +import { cipherstashFromStack } from '@cipherstash/prisma-next/stack' +import postgres from '@prisma-next/postgres/runtime' + +import type { Contract } from './prisma/contract.d' +import contractJson from './prisma/contract.json' with { type: 'json' } + +const cipherstash = await cipherstashFromStack({ contractJson }) + +export const db = postgres({ + contractJson, + extensions: cipherstash.extensions, + middleware: cipherstash.middleware, +}) diff --git a/examples/prisma/src/index.ts b/examples/prisma/src/index.ts new file mode 100644 index 00000000..35cc1023 --- /dev/null +++ b/examples/prisma/src/index.ts @@ -0,0 +1,214 @@ +/** + * @cipherstash/prisma-next example — end-to-end demo. + * + * Exercises every cipherstash codec the extension ships, plus the + * trait-dispatched predicate operators and the sort helpers, against + * a real Postgres + EQL database. + * + * The bulk-encrypt middleware groups every plaintext placeholder + * (row payloads + search terms) into a single `bulkEncrypt` SDK + * round-trip per `(table, column)` group before each query executes; + * `decryptAll(rows)` mirrors the same coalescing on the read side + * with one `bulkDecrypt` call per `(table, column)` group across + * every envelope on every column of the result set. + * + * Prerequisites: + * + * 1. A Postgres database with the EQL bundle installed. The + * extension contributes its own contract space at + * `migrations/cipherstash/` which installs the EQL composite + * types, configuration table, and bundle SQL alongside the + * application schema — `pnpm migration:apply` runs it for you. + * 2. A CipherStash workspace + ZeroKMS credentials. Populate + * `CS_WORKSPACE_CRN`, `CS_CLIENT_ID`, `CS_CLIENT_KEY`, and + * `CS_CLIENT_ACCESS_KEY` in `.env` (see `.env.example`). + * 3. `DATABASE_URL` in `.env` pointing at the database from (1). + */ + +import 'dotenv/config' + +import { + cipherstashAsc, + decryptAll, + EncryptedBigInt, + EncryptedBoolean, + EncryptedDate, + EncryptedDouble, + EncryptedJson, + EncryptedString, +} from '@cipherstash/prisma-next/runtime' + +import { db } from './db' + +interface UserSeed { + readonly id: string + readonly email: string + readonly salary: number + readonly accountId: bigint + readonly birthday: Date + readonly emailVerified: boolean + readonly preferences: { + readonly theme: string + readonly notifications: boolean + } +} + +const SEED_USERS: readonly UserSeed[] = [ + { + id: 'user-0', + email: 'alice@example.com', + salary: 95_000, + accountId: 100_000_000_001n, + birthday: new Date('1990-04-12'), + emailVerified: true, + preferences: { theme: 'dark', notifications: true }, + }, + { + id: 'user-1', + email: 'bob@example.com', + salary: 110_000, + accountId: 100_000_000_002n, + birthday: new Date('1985-09-23'), + emailVerified: true, + preferences: { theme: 'light', notifications: false }, + }, + { + id: 'user-2', + email: 'carol@example.com', + salary: 75_000, + accountId: 100_000_000_003n, + birthday: new Date('1995-01-07'), + emailVerified: false, + preferences: { theme: 'dark', notifications: true }, + }, + { + id: 'user-3', + email: 'dave@otherorg.test', + salary: 145_000, + accountId: 100_000_000_004n, + birthday: new Date('1978-11-30'), + emailVerified: true, + preferences: { theme: 'light', notifications: true }, + }, +] + +async function main() { + const url = process.env['DATABASE_URL'] + if (!url) { + console.error( + 'Set DATABASE_URL in your environment (e.g. .env) before running this demo.', + ) + process.exit(1) + } + + const runtime = await db.connect({ url }) + try { + await insertUsers() + await searchByEq() + await searchByIlikeAndDecrypt() + await rangeQueryOnSalary() + await betweenQueryOnBirthday() + await inArrayQueryOnAccountId() + await equalityQueryOnEmailVerified() + await sortByEmailAsc() + } finally { + await runtime.close() + } +} + +async function insertUsers(): Promise { + console.log('--- Insert (mixed-codec round-trip) ---') + await Promise.all( + SEED_USERS.map((seed) => + db.orm.User.create({ + id: seed.id, + email: EncryptedString.from(seed.email), + salary: EncryptedDouble.from(seed.salary), + accountId: EncryptedBigInt.from(seed.accountId), + birthday: EncryptedDate.from(seed.birthday), + emailVerified: EncryptedBoolean.from(seed.emailVerified), + preferences: EncryptedJson.from(seed.preferences), + }), + ), + ) + console.log( + `Inserted ${SEED_USERS.length} rows across six cipherstash codecs.`, + ) +} + +async function searchByEq(): Promise { + console.log('\n--- cipherstashEq (string equality) ---') + const rows = await db.orm.User.where((u) => + u.email.cipherstashEq('alice@example.com'), + ).all() + console.log(`Found ${rows.length} row(s) for alice@example.com.`) + await decryptAll(rows) + for (const row of rows) { + console.log(` ${row.id}: ${await row.email.decrypt()}`) + } +} + +async function searchByIlikeAndDecrypt(): Promise { + console.log('\n--- cipherstashIlike (string free-text-search) ---') + const rows = await db.orm.User.where((u) => + u.email.cipherstashIlike('%@example.com'), + ).all() + console.log(`Found ${rows.length} row(s) matching %@example.com.`) + await decryptAll(rows) + for (const row of rows) { + console.log(` ${row.id}: ${await row.email.decrypt()}`) + } +} + +async function rangeQueryOnSalary(): Promise { + console.log('\n--- cipherstashGt (double order-and-range) ---') + const rows = await db.orm.User.where((u) => + u.salary.cipherstashGt(100_000), + ).all() + console.log(`Found ${rows.length} user(s) with salary > 100,000.`) + await decryptAll(rows) + for (const row of rows) { + console.log(` ${row.id}: salary=${await row.salary.decrypt()}`) + } +} + +async function betweenQueryOnBirthday(): Promise { + console.log('\n--- cipherstashBetween (date order-and-range) ---') + const lower = new Date('1985-01-01') + const upper = new Date('1995-12-31') + const rows = await db.orm.User.where((u) => + u.birthday.cipherstashBetween(lower, upper), + ).all() + console.log(`Found ${rows.length} user(s) born between 1985 and 1995.`) +} + +async function inArrayQueryOnAccountId(): Promise { + console.log('\n--- cipherstashInArray (bigint equality) ---') + const rows = await db.orm.User.where((u) => + u.accountId.cipherstashInArray([100_000_000_001n, 100_000_000_004n]), + ).all() + console.log( + `Found ${rows.length} user(s) whose accountId is in the supplied array.`, + ) +} + +async function equalityQueryOnEmailVerified(): Promise { + console.log('\n--- cipherstashInArray (boolean equality-only) ---') + // Booleans surface only the equality-trait operators; a single-element + // array is the canonical equality form on non-string codecs. + const rows = await db.orm.User.where((u) => + u.emailVerified.cipherstashInArray([true]), + ).all() + console.log(`Found ${rows.length} user(s) with emailVerified = true.`) +} + +async function sortByEmailAsc(): Promise { + console.log('\n--- cipherstashAsc (bare-column ORDER BY) ---') + const rows = await db.orm.User.orderBy((u) => cipherstashAsc(u.email)).all() + await decryptAll(rows) + for (const row of rows) { + console.log(` ${row.id}: email=${await row.email.decrypt()}`) + } +} + +await main() diff --git a/examples/prisma/src/prisma/contract.d.ts b/examples/prisma/src/prisma/contract.d.ts new file mode 100644 index 00000000..6de5bc0a --- /dev/null +++ b/examples/prisma/src/prisma/contract.d.ts @@ -0,0 +1,486 @@ +// ⚠️ GENERATED FILE - DO NOT EDIT +// This file is automatically generated by 'prisma-next contract emit'. +// To regenerate, run: prisma-next contract emit +import type { CodecTypes as PgTypes } from '@prisma-next/target-postgres/codec-types'; +import type { JsonValue } from '@prisma-next/target-postgres/codec-types'; +import type { Char } from '@prisma-next/target-postgres/codec-types'; +import type { Varchar } from '@prisma-next/target-postgres/codec-types'; +import type { Numeric } from '@prisma-next/target-postgres/codec-types'; +import type { Bit } from '@prisma-next/target-postgres/codec-types'; +import type { VarBit } from '@prisma-next/target-postgres/codec-types'; +import type { Timestamp } from '@prisma-next/target-postgres/codec-types'; +import type { Timestamptz } from '@prisma-next/target-postgres/codec-types'; +import type { Time } from '@prisma-next/target-postgres/codec-types'; +import type { Timetz } from '@prisma-next/target-postgres/codec-types'; +import type { Interval } from '@prisma-next/target-postgres/codec-types'; +import type { CodecTypes as CipherstashTypes } from '@prisma-next/extension-cipherstash/codec-types'; +import type { EncryptedString } from '@prisma-next/extension-cipherstash/runtime'; +import type { EncryptedDouble } from '@prisma-next/extension-cipherstash/runtime'; +import type { EncryptedBigInt } from '@prisma-next/extension-cipherstash/runtime'; +import type { EncryptedDate } from '@prisma-next/extension-cipherstash/runtime'; +import type { EncryptedBoolean } from '@prisma-next/extension-cipherstash/runtime'; +import type { EncryptedJson } from '@prisma-next/extension-cipherstash/runtime'; +import type { QueryOperationTypes as PgAdapterQueryOps } from '@prisma-next/adapter-postgres/operation-types'; +import type { QueryOperationTypes as CipherstashQueryOperationTypes } from '@prisma-next/extension-cipherstash/operation-types'; + +import type { + ContractWithTypeMaps, + TypeMaps as TypeMapsType, +} from '@prisma-next/sql-contract/types'; +import type { + Contract as ContractType, + ExecutionHashBase, + ProfileHashBase, + StorageHashBase, +} from '@prisma-next/contract/types'; + +export type StorageHash = + StorageHashBase<'sha256:7475191ce0d78258ce5586265bcdfd12202f5daf90690b902890e58eb7508373'>; +export type ExecutionHash = ExecutionHashBase; +export type ProfileHash = + ProfileHashBase<'sha256:1a8dbe044289f30a1de958fe800cc5a8378b285d2e126a8c44b58864bac2c18e'>; + +export type CodecTypes = PgTypes & CipherstashTypes; +export type LaneCodecTypes = CodecTypes; +export type QueryOperationTypes = PgAdapterQueryOps & + CipherstashQueryOperationTypes; +type DefaultLiteralValue = CodecId extends keyof CodecTypes + ? CodecTypes[CodecId]['output'] + : _Encoded; + +export type FieldOutputTypes = { + readonly User: { + readonly id: CodecTypes['pg/text@1']['output']; + readonly email: CodecTypes['cipherstash/string@1']['output']; + readonly salary: CodecTypes['cipherstash/double@1']['output']; + readonly accountId: CodecTypes['cipherstash/bigint@1']['output']; + readonly birthday: CodecTypes['cipherstash/date@1']['output']; + readonly emailVerified: CodecTypes['cipherstash/boolean@1']['output']; + readonly preferences: CodecTypes['cipherstash/json@1']['output']; + }; +}; +export type FieldInputTypes = { + readonly User: { + readonly id: CodecTypes['pg/text@1']['input']; + readonly email: CodecTypes['cipherstash/string@1']['input']; + readonly salary: CodecTypes['cipherstash/double@1']['input']; + readonly accountId: CodecTypes['cipherstash/bigint@1']['input']; + readonly birthday: CodecTypes['cipherstash/date@1']['input']; + readonly emailVerified: CodecTypes['cipherstash/boolean@1']['input']; + readonly preferences: CodecTypes['cipherstash/json@1']['input']; + }; +}; +export type TypeMaps = TypeMapsType< + CodecTypes, + QueryOperationTypes, + FieldOutputTypes, + FieldInputTypes +>; + +type ContractBase = ContractType< + { + readonly tables: { + readonly users: { + columns: { + readonly id: { + readonly nativeType: 'text'; + readonly codecId: 'pg/text@1'; + readonly nullable: false; + }; + readonly email: { + readonly nativeType: 'eql_v2_encrypted'; + readonly codecId: 'cipherstash/string@1'; + readonly nullable: false; + readonly typeParams: { + readonly equality: true; + readonly freeTextSearch: true; + readonly orderAndRange: true; + }; + }; + readonly salary: { + readonly nativeType: 'eql_v2_encrypted'; + readonly codecId: 'cipherstash/double@1'; + readonly nullable: false; + readonly typeParams: { readonly equality: true; readonly orderAndRange: true }; + }; + readonly accountid: { + readonly nativeType: 'eql_v2_encrypted'; + readonly codecId: 'cipherstash/bigint@1'; + readonly nullable: false; + readonly typeParams: { readonly equality: true; readonly orderAndRange: true }; + }; + readonly birthday: { + readonly nativeType: 'eql_v2_encrypted'; + readonly codecId: 'cipherstash/date@1'; + readonly nullable: false; + readonly typeParams: { readonly equality: true; readonly orderAndRange: true }; + }; + readonly emailverified: { + readonly nativeType: 'eql_v2_encrypted'; + readonly codecId: 'cipherstash/boolean@1'; + readonly nullable: false; + readonly typeParams: { readonly equality: true }; + }; + readonly preferences: { + readonly nativeType: 'eql_v2_encrypted'; + readonly codecId: 'cipherstash/json@1'; + readonly nullable: false; + readonly typeParams: { readonly searchableJson: true }; + }; + }; + primaryKey: { readonly columns: readonly ['id'] }; + uniques: readonly []; + indexes: readonly []; + foreignKeys: readonly []; + }; + }; + readonly types: Record; + readonly storageHash: StorageHash; + }, + { + readonly User: { + readonly fields: { + readonly id: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/text@1' }; + }; + readonly email: { + readonly nullable: false; + readonly type: { + readonly kind: 'scalar'; + readonly codecId: 'cipherstash/string@1'; + readonly typeParams: { + readonly equality: true; + readonly freeTextSearch: true; + readonly orderAndRange: true; + }; + }; + }; + readonly salary: { + readonly nullable: false; + readonly type: { + readonly kind: 'scalar'; + readonly codecId: 'cipherstash/double@1'; + readonly typeParams: { readonly equality: true; readonly orderAndRange: true }; + }; + }; + readonly accountId: { + readonly nullable: false; + readonly type: { + readonly kind: 'scalar'; + readonly codecId: 'cipherstash/bigint@1'; + readonly typeParams: { readonly equality: true; readonly orderAndRange: true }; + }; + }; + readonly birthday: { + readonly nullable: false; + readonly type: { + readonly kind: 'scalar'; + readonly codecId: 'cipherstash/date@1'; + readonly typeParams: { readonly equality: true; readonly orderAndRange: true }; + }; + }; + readonly emailVerified: { + readonly nullable: false; + readonly type: { + readonly kind: 'scalar'; + readonly codecId: 'cipherstash/boolean@1'; + readonly typeParams: { readonly equality: true }; + }; + }; + readonly preferences: { + readonly nullable: false; + readonly type: { + readonly kind: 'scalar'; + readonly codecId: 'cipherstash/json@1'; + readonly typeParams: { readonly searchableJson: true }; + }; + }; + }; + readonly relations: Record; + readonly storage: { + readonly table: 'users'; + readonly fields: { + readonly id: { readonly column: 'id' }; + readonly email: { readonly column: 'email' }; + readonly salary: { readonly column: 'salary' }; + readonly accountId: { readonly column: 'accountid' }; + readonly birthday: { readonly column: 'birthday' }; + readonly emailVerified: { readonly column: 'emailverified' }; + readonly preferences: { readonly column: 'preferences' }; + }; + }; + }; + } +> & { + readonly target: 'postgres'; + readonly targetFamily: 'sql'; + readonly roots: { readonly users: 'User' }; + readonly capabilities: { + readonly postgres: { + readonly jsonAgg: true; + readonly lateral: true; + readonly limit: true; + readonly orderBy: true; + readonly returning: true; + }; + readonly sql: { + readonly defaultInInsert: true; + readonly enums: true; + readonly returning: true; + }; + }; + readonly extensionPacks: { + readonly cipherstash: { + readonly familyId: 'sql'; + readonly id: 'cipherstash'; + readonly kind: 'extension'; + readonly targetId: 'postgres'; + readonly types: { + readonly codecTypes: { + readonly codecInstances: readonly [ + { + readonly descriptor: { + readonly codecId: 'cipherstash/string@1'; + readonly factory: unknown; + readonly isParameterized: false; + readonly meta: { + readonly db: { + readonly sql: { + readonly postgres: { readonly nativeType: 'eql_v2_encrypted' }; + }; + }; + }; + readonly paramsSchema: { + readonly '~standard': { + readonly validate: unknown; + readonly vendor: 'cipherstash'; + readonly version: 1; + }; + }; + readonly renderOutputType: unknown; + readonly targetTypes: readonly ['eql_v2_encrypted']; + readonly traits: readonly [ + 'cipherstash:equality', + 'cipherstash:free-text-search', + 'cipherstash:order-and-range', + ]; + }; + }, + { + readonly descriptor: { + readonly codecId: 'cipherstash/double@1'; + readonly factory: unknown; + readonly isParameterized: false; + readonly meta: { + readonly db: { + readonly sql: { + readonly postgres: { readonly nativeType: 'eql_v2_encrypted' }; + }; + }; + }; + readonly paramsSchema: { + readonly '~standard': { + readonly validate: unknown; + readonly vendor: 'cipherstash'; + readonly version: 1; + }; + }; + readonly renderOutputType: unknown; + readonly targetTypes: readonly ['eql_v2_encrypted']; + readonly traits: readonly ['cipherstash:equality', 'cipherstash:order-and-range']; + }; + }, + { + readonly descriptor: { + readonly codecId: 'cipherstash/bigint@1'; + readonly factory: unknown; + readonly isParameterized: false; + readonly meta: { + readonly db: { + readonly sql: { + readonly postgres: { readonly nativeType: 'eql_v2_encrypted' }; + }; + }; + }; + readonly paramsSchema: { + readonly '~standard': { + readonly validate: unknown; + readonly vendor: 'cipherstash'; + readonly version: 1; + }; + }; + readonly renderOutputType: unknown; + readonly targetTypes: readonly ['eql_v2_encrypted']; + readonly traits: readonly ['cipherstash:equality', 'cipherstash:order-and-range']; + }; + }, + { + readonly descriptor: { + readonly codecId: 'cipherstash/date@1'; + readonly factory: unknown; + readonly isParameterized: false; + readonly meta: { + readonly db: { + readonly sql: { + readonly postgres: { readonly nativeType: 'eql_v2_encrypted' }; + }; + }; + }; + readonly paramsSchema: { + readonly '~standard': { + readonly validate: unknown; + readonly vendor: 'cipherstash'; + readonly version: 1; + }; + }; + readonly renderOutputType: unknown; + readonly targetTypes: readonly ['eql_v2_encrypted']; + readonly traits: readonly ['cipherstash:equality', 'cipherstash:order-and-range']; + }; + }, + { + readonly descriptor: { + readonly codecId: 'cipherstash/boolean@1'; + readonly factory: unknown; + readonly isParameterized: false; + readonly meta: { + readonly db: { + readonly sql: { + readonly postgres: { readonly nativeType: 'eql_v2_encrypted' }; + }; + }; + }; + readonly paramsSchema: { + readonly '~standard': { + readonly validate: unknown; + readonly vendor: 'cipherstash'; + readonly version: 1; + }; + }; + readonly renderOutputType: unknown; + readonly targetTypes: readonly ['eql_v2_encrypted']; + readonly traits: readonly ['cipherstash:equality']; + }; + }, + { + readonly descriptor: { + readonly codecId: 'cipherstash/json@1'; + readonly factory: unknown; + readonly isParameterized: false; + readonly meta: { + readonly db: { + readonly sql: { + readonly postgres: { readonly nativeType: 'eql_v2_encrypted' }; + }; + }; + }; + readonly paramsSchema: { + readonly '~standard': { + readonly validate: unknown; + readonly vendor: 'cipherstash'; + readonly version: 1; + }; + }; + readonly renderOutputType: unknown; + readonly targetTypes: readonly ['eql_v2_encrypted']; + readonly traits: readonly ['cipherstash:searchable-json']; + }; + }, + ]; + readonly import: { + readonly alias: 'CipherstashTypes'; + readonly named: 'CodecTypes'; + readonly package: '@prisma-next/extension-cipherstash/codec-types'; + }; + readonly typeImports: readonly [ + { + readonly alias: 'EncryptedString'; + readonly named: 'EncryptedString'; + readonly package: '@prisma-next/extension-cipherstash/runtime'; + }, + { + readonly alias: 'EncryptedDouble'; + readonly named: 'EncryptedDouble'; + readonly package: '@prisma-next/extension-cipherstash/runtime'; + }, + { + readonly alias: 'EncryptedBigInt'; + readonly named: 'EncryptedBigInt'; + readonly package: '@prisma-next/extension-cipherstash/runtime'; + }, + { + readonly alias: 'EncryptedDate'; + readonly named: 'EncryptedDate'; + readonly package: '@prisma-next/extension-cipherstash/runtime'; + }, + { + readonly alias: 'EncryptedBoolean'; + readonly named: 'EncryptedBoolean'; + readonly package: '@prisma-next/extension-cipherstash/runtime'; + }, + { + readonly alias: 'EncryptedJson'; + readonly named: 'EncryptedJson'; + readonly package: '@prisma-next/extension-cipherstash/runtime'; + }, + ]; + }; + readonly queryOperationTypes: { + readonly import: { + readonly alias: 'CipherstashQueryOperationTypes'; + readonly named: 'QueryOperationTypes'; + readonly package: '@prisma-next/extension-cipherstash/operation-types'; + }; + }; + readonly storage: readonly [ + { + readonly familyId: 'sql'; + readonly nativeType: 'eql_v2_encrypted'; + readonly targetId: 'postgres'; + readonly typeId: 'cipherstash/string@1'; + }, + { + readonly familyId: 'sql'; + readonly nativeType: 'eql_v2_encrypted'; + readonly targetId: 'postgres'; + readonly typeId: 'cipherstash/double@1'; + }, + { + readonly familyId: 'sql'; + readonly nativeType: 'eql_v2_encrypted'; + readonly targetId: 'postgres'; + readonly typeId: 'cipherstash/bigint@1'; + }, + { + readonly familyId: 'sql'; + readonly nativeType: 'eql_v2_encrypted'; + readonly targetId: 'postgres'; + readonly typeId: 'cipherstash/date@1'; + }, + { + readonly familyId: 'sql'; + readonly nativeType: 'eql_v2_encrypted'; + readonly targetId: 'postgres'; + readonly typeId: 'cipherstash/boolean@1'; + }, + { + readonly familyId: 'sql'; + readonly nativeType: 'eql_v2_encrypted'; + readonly targetId: 'postgres'; + readonly typeId: 'cipherstash/json@1'; + }, + ]; + }; + readonly version: '0.0.1'; + }; + }; + readonly meta: {}; + + readonly profileHash: ProfileHash; +}; + +export type Contract = ContractWithTypeMaps; + +export type Tables = Contract['storage']['tables']; +export type Models = Contract['models']; diff --git a/examples/prisma/src/prisma/contract.json b/examples/prisma/src/prisma/contract.json new file mode 100644 index 00000000..d71e5d28 --- /dev/null +++ b/examples/prisma/src/prisma/contract.json @@ -0,0 +1,467 @@ +{ + "schemaVersion": "1", + "targetFamily": "sql", + "target": "postgres", + "profileHash": "sha256:1a8dbe044289f30a1de958fe800cc5a8378b285d2e126a8c44b58864bac2c18e", + "roots": { + "users": "User" + }, + "models": { + "User": { + "fields": { + "accountId": { + "nullable": false, + "type": { + "codecId": "cipherstash/bigint@1", + "kind": "scalar", + "typeParams": { + "equality": true, + "orderAndRange": true + } + } + }, + "birthday": { + "nullable": false, + "type": { + "codecId": "cipherstash/date@1", + "kind": "scalar", + "typeParams": { + "equality": true, + "orderAndRange": true + } + } + }, + "email": { + "nullable": false, + "type": { + "codecId": "cipherstash/string@1", + "kind": "scalar", + "typeParams": { + "equality": true, + "freeTextSearch": true, + "orderAndRange": true + } + } + }, + "emailVerified": { + "nullable": false, + "type": { + "codecId": "cipherstash/boolean@1", + "kind": "scalar", + "typeParams": { + "equality": true + } + } + }, + "id": { + "nullable": false, + "type": { + "codecId": "pg/text@1", + "kind": "scalar" + } + }, + "preferences": { + "nullable": false, + "type": { + "codecId": "cipherstash/json@1", + "kind": "scalar", + "typeParams": { + "searchableJson": true + } + } + }, + "salary": { + "nullable": false, + "type": { + "codecId": "cipherstash/double@1", + "kind": "scalar", + "typeParams": { + "equality": true, + "orderAndRange": true + } + } + } + }, + "relations": {}, + "storage": { + "fields": { + "accountId": { + "column": "accountid" + }, + "birthday": { + "column": "birthday" + }, + "email": { + "column": "email" + }, + "emailVerified": { + "column": "emailverified" + }, + "id": { + "column": "id" + }, + "preferences": { + "column": "preferences" + }, + "salary": { + "column": "salary" + } + }, + "table": "users" + } + } + }, + "storage": { + "storageHash": "sha256:7475191ce0d78258ce5586265bcdfd12202f5daf90690b902890e58eb7508373", + "tables": { + "users": { + "columns": { + "accountid": { + "codecId": "cipherstash/bigint@1", + "nativeType": "eql_v2_encrypted", + "nullable": false, + "typeParams": { + "equality": true, + "orderAndRange": true + } + }, + "birthday": { + "codecId": "cipherstash/date@1", + "nativeType": "eql_v2_encrypted", + "nullable": false, + "typeParams": { + "equality": true, + "orderAndRange": true + } + }, + "email": { + "codecId": "cipherstash/string@1", + "nativeType": "eql_v2_encrypted", + "nullable": false, + "typeParams": { + "equality": true, + "freeTextSearch": true, + "orderAndRange": true + } + }, + "emailverified": { + "codecId": "cipherstash/boolean@1", + "nativeType": "eql_v2_encrypted", + "nullable": false, + "typeParams": { + "equality": true + } + }, + "id": { + "codecId": "pg/text@1", + "nativeType": "text", + "nullable": false + }, + "preferences": { + "codecId": "cipherstash/json@1", + "nativeType": "eql_v2_encrypted", + "nullable": false, + "typeParams": { + "searchableJson": true + } + }, + "salary": { + "codecId": "cipherstash/double@1", + "nativeType": "eql_v2_encrypted", + "nullable": false, + "typeParams": { + "equality": true, + "orderAndRange": true + } + } + }, + "foreignKeys": [], + "indexes": [], + "primaryKey": { + "columns": [ + "id" + ] + }, + "uniques": [] + } + } + }, + "capabilities": { + "postgres": { + "jsonAgg": true, + "lateral": true, + "limit": true, + "orderBy": true, + "returning": true + }, + "sql": { + "defaultInInsert": true, + "enums": true, + "returning": true + } + }, + "extensionPacks": { + "cipherstash": { + "familyId": "sql", + "id": "cipherstash", + "kind": "extension", + "targetId": "postgres", + "types": { + "codecTypes": { + "codecInstances": [ + { + "descriptor": { + "codecId": "cipherstash/string@1", + "meta": { + "db": { + "sql": { + "postgres": { + "nativeType": "eql_v2_encrypted" + } + } + } + }, + "paramsSchema": { + "~standard": { + "vendor": "cipherstash", + "version": 1 + } + }, + "targetTypes": [ + "eql_v2_encrypted" + ], + "traits": [ + "cipherstash:equality", + "cipherstash:free-text-search", + "cipherstash:order-and-range" + ] + } + }, + { + "descriptor": { + "codecId": "cipherstash/double@1", + "meta": { + "db": { + "sql": { + "postgres": { + "nativeType": "eql_v2_encrypted" + } + } + } + }, + "paramsSchema": { + "~standard": { + "vendor": "cipherstash", + "version": 1 + } + }, + "targetTypes": [ + "eql_v2_encrypted" + ], + "traits": [ + "cipherstash:equality", + "cipherstash:order-and-range" + ] + } + }, + { + "descriptor": { + "codecId": "cipherstash/bigint@1", + "meta": { + "db": { + "sql": { + "postgres": { + "nativeType": "eql_v2_encrypted" + } + } + } + }, + "paramsSchema": { + "~standard": { + "vendor": "cipherstash", + "version": 1 + } + }, + "targetTypes": [ + "eql_v2_encrypted" + ], + "traits": [ + "cipherstash:equality", + "cipherstash:order-and-range" + ] + } + }, + { + "descriptor": { + "codecId": "cipherstash/date@1", + "meta": { + "db": { + "sql": { + "postgres": { + "nativeType": "eql_v2_encrypted" + } + } + } + }, + "paramsSchema": { + "~standard": { + "vendor": "cipherstash", + "version": 1 + } + }, + "targetTypes": [ + "eql_v2_encrypted" + ], + "traits": [ + "cipherstash:equality", + "cipherstash:order-and-range" + ] + } + }, + { + "descriptor": { + "codecId": "cipherstash/boolean@1", + "meta": { + "db": { + "sql": { + "postgres": { + "nativeType": "eql_v2_encrypted" + } + } + } + }, + "paramsSchema": { + "~standard": { + "vendor": "cipherstash", + "version": 1 + } + }, + "targetTypes": [ + "eql_v2_encrypted" + ], + "traits": [ + "cipherstash:equality" + ] + } + }, + { + "descriptor": { + "codecId": "cipherstash/json@1", + "meta": { + "db": { + "sql": { + "postgres": { + "nativeType": "eql_v2_encrypted" + } + } + } + }, + "paramsSchema": { + "~standard": { + "vendor": "cipherstash", + "version": 1 + } + }, + "targetTypes": [ + "eql_v2_encrypted" + ], + "traits": [ + "cipherstash:searchable-json" + ] + } + } + ], + "import": { + "alias": "CipherstashTypes", + "named": "CodecTypes", + "package": "@prisma-next/extension-cipherstash/codec-types" + }, + "typeImports": [ + { + "alias": "EncryptedString", + "named": "EncryptedString", + "package": "@prisma-next/extension-cipherstash/runtime" + }, + { + "alias": "EncryptedDouble", + "named": "EncryptedDouble", + "package": "@prisma-next/extension-cipherstash/runtime" + }, + { + "alias": "EncryptedBigInt", + "named": "EncryptedBigInt", + "package": "@prisma-next/extension-cipherstash/runtime" + }, + { + "alias": "EncryptedDate", + "named": "EncryptedDate", + "package": "@prisma-next/extension-cipherstash/runtime" + }, + { + "alias": "EncryptedBoolean", + "named": "EncryptedBoolean", + "package": "@prisma-next/extension-cipherstash/runtime" + }, + { + "alias": "EncryptedJson", + "named": "EncryptedJson", + "package": "@prisma-next/extension-cipherstash/runtime" + } + ] + }, + "queryOperationTypes": { + "import": { + "alias": "CipherstashQueryOperationTypes", + "named": "QueryOperationTypes", + "package": "@prisma-next/extension-cipherstash/operation-types" + } + }, + "storage": [ + { + "familyId": "sql", + "nativeType": "eql_v2_encrypted", + "targetId": "postgres", + "typeId": "cipherstash/string@1" + }, + { + "familyId": "sql", + "nativeType": "eql_v2_encrypted", + "targetId": "postgres", + "typeId": "cipherstash/double@1" + }, + { + "familyId": "sql", + "nativeType": "eql_v2_encrypted", + "targetId": "postgres", + "typeId": "cipherstash/bigint@1" + }, + { + "familyId": "sql", + "nativeType": "eql_v2_encrypted", + "targetId": "postgres", + "typeId": "cipherstash/date@1" + }, + { + "familyId": "sql", + "nativeType": "eql_v2_encrypted", + "targetId": "postgres", + "typeId": "cipherstash/boolean@1" + }, + { + "familyId": "sql", + "nativeType": "eql_v2_encrypted", + "targetId": "postgres", + "typeId": "cipherstash/json@1" + } + ] + }, + "version": "0.0.1" + } + }, + "meta": {}, + "_generated": { + "warning": "⚠️ GENERATED FILE - DO NOT EDIT", + "message": "This file is automatically generated by \"prisma-next contract emit\".", + "regenerate": "To regenerate, run: prisma-next contract emit" + } +} \ No newline at end of file diff --git a/examples/prisma/tsconfig.json b/examples/prisma/tsconfig.json new file mode 100644 index 00000000..cf5c1d54 --- /dev/null +++ b/examples/prisma/tsconfig.json @@ -0,0 +1,18 @@ +{ + "compilerOptions": { + "lib": ["ES2022"], + "target": "ES2022", + "module": "ESNext", + "moduleResolution": "bundler", + "esModuleInterop": true, + "resolveJsonModule": true, + "strict": true, + "skipLibCheck": true, + "noEmit": true, + "verbatimModuleSyntax": false, + "noFallthroughCasesInSwitch": true, + "outDir": "dist" + }, + "include": ["src/**/*.ts", "prisma-next.config.ts"], + "exclude": ["dist", "migrations", "node_modules"] +} diff --git a/package.json b/package.json index 3b0dc431..92a9d961 100644 --- a/package.json +++ b/package.json @@ -105,6 +105,33 @@ "drizzle-orm": ">=0.45.2", "postcss": ">=8.5.10", "hono": ">=4.12.14", - "@hono/node-server": ">=1.19.13" + "@hono/node-server": ">=1.19.13", + "@prisma-next/adapter-postgres": "0.6.0-dev.8", + "@prisma-next/cli": "0.6.0-dev.8", + "@prisma-next/config": "0.6.0-dev.8", + "@prisma-next/contract": "0.6.0-dev.8", + "@prisma-next/contract-authoring": "0.6.0-dev.8", + "@prisma-next/driver-postgres": "0.6.0-dev.8", + "@prisma-next/emitter": "0.6.0-dev.8", + "@prisma-next/errors": "0.6.0-dev.8", + "@prisma-next/family-sql": "0.6.0-dev.8", + "@prisma-next/framework-components": "0.6.0-dev.8", + "@prisma-next/ids": "0.6.0-dev.8", + "@prisma-next/migration-tools": "0.6.0-dev.8", + "@prisma-next/operations": "0.6.0-dev.8", + "@prisma-next/psl-parser": "0.6.0-dev.8", + "@prisma-next/psl-printer": "0.6.0-dev.8", + "@prisma-next/sql-contract": "0.6.0-dev.8", + "@prisma-next/sql-contract-emitter": "0.6.0-dev.8", + "@prisma-next/sql-contract-psl": "0.6.0-dev.8", + "@prisma-next/sql-contract-ts": "0.6.0-dev.8", + "@prisma-next/sql-errors": "0.6.0-dev.8", + "@prisma-next/sql-operations": "0.6.0-dev.8", + "@prisma-next/sql-relational-core": "0.6.0-dev.8", + "@prisma-next/sql-runtime": "0.6.0-dev.8", + "@prisma-next/sql-schema-ir": "0.6.0-dev.8", + "@prisma-next/target-postgres": "0.6.0-dev.8", + "@prisma-next/ts-render": "0.6.0-dev.8", + "@prisma-next/utils": "0.6.0-dev.8" } } diff --git a/packages/cli/src/bin/stash.ts b/packages/cli/src/bin/stash.ts index e8169ca5..edecae3f 100644 --- a/packages/cli/src/bin/stash.ts +++ b/packages/cli/src/bin/stash.ts @@ -109,6 +109,7 @@ Options: Init Flags: --supabase Use Supabase-specific setup flow --drizzle Use Drizzle-specific setup flow + --prisma-next Use Prisma Next-specific setup flow (EQL bundle installed via prisma-next migration apply) Plan Flags: --complete-rollout Plan the entire encryption lifecycle (schema-add through drop) @@ -143,6 +144,7 @@ DB Flags: Examples: ${STASH} init ${STASH} init --supabase + ${STASH} init --prisma-next ${STASH} plan ${STASH} impl ${STASH} impl --continue-without-plan diff --git a/packages/cli/src/commands/db/detect.ts b/packages/cli/src/commands/db/detect.ts index 61558deb..5900a0a8 100644 --- a/packages/cli/src/commands/db/detect.ts +++ b/packages/cli/src/commands/db/detect.ts @@ -125,3 +125,43 @@ export function detectDrizzle(cwd: string): boolean { return false } } + +/** + * Return true when the project uses Prisma Next. + * + * Detected via a `prisma-next.config.*` at the cwd (fast path) or + * a `@prisma-next/cli` / `@cipherstash/prisma-next` entry in + * package.json. Either signal alone is enough. + */ +export function detectPrismaNext(cwd: string): boolean { + const configCandidates = [ + 'prisma-next.config.ts', + 'prisma-next.config.js', + 'prisma-next.config.mjs', + 'prisma-next.config.cjs', + ] + for (const candidate of configCandidates) { + if (existsSync(resolve(cwd, candidate))) return true + } + + const pkgPath = resolve(cwd, 'package.json') + if (!existsSync(pkgPath)) return false + + try { + const pkg = JSON.parse(readFileSync(pkgPath, 'utf-8')) as { + dependencies?: Record + devDependencies?: Record + peerDependencies?: Record + optionalDependencies?: Record + } + const deps = { + ...pkg.dependencies, + ...pkg.devDependencies, + ...pkg.peerDependencies, + ...pkg.optionalDependencies, + } + return '@prisma-next/cli' in deps || '@cipherstash/prisma-next' in deps + } catch { + return false + } +} diff --git a/packages/cli/src/commands/init/index.ts b/packages/cli/src/commands/init/index.ts index ee15b3cf..12d8a5c9 100644 --- a/packages/cli/src/commands/init/index.ts +++ b/packages/cli/src/commands/init/index.ts @@ -2,6 +2,7 @@ import * as p from '@clack/prompts' import { planCommand } from '../plan/index.js' import { createBaseProvider } from './providers/base.js' import { createDrizzleProvider } from './providers/drizzle.js' +import { createPrismaNextProvider } from './providers/prisma-next.js' import { createSupabaseProvider } from './providers/supabase.js' import { authenticateStep } from './steps/authenticate.js' import { buildSchemaStep } from './steps/build-schema.js' @@ -16,6 +17,7 @@ import { detectPackageManager, runnerCommand } from './utils.js' const PROVIDER_MAP: Record InitProvider> = { supabase: createSupabaseProvider, drizzle: createDrizzleProvider, + 'prisma-next': createPrismaNextProvider, } /** diff --git a/packages/cli/src/commands/init/providers/__tests__/prisma-next.test.ts b/packages/cli/src/commands/init/providers/__tests__/prisma-next.test.ts new file mode 100644 index 00000000..196e07e9 --- /dev/null +++ b/packages/cli/src/commands/init/providers/__tests__/prisma-next.test.ts @@ -0,0 +1,39 @@ +import { describe, expect, it } from 'vitest' +import { createPrismaNextProvider } from '../prisma-next.js' + +describe('createPrismaNextProvider getNextSteps', () => { + const provider = createPrismaNextProvider() + + it('points at prisma-next migration plan + apply rather than stash db install', () => { + const steps = provider.getNextSteps({}, 'pnpm') + // The whole story hinges on this: Prisma Next users never run + // `stash db install` — the framework handles the EQL bundle. + for (const step of steps) { + expect(step).not.toMatch(/stash db install/) + } + const planApply = steps.find((s) => s.includes('migration plan')) + expect(planApply).toBeDefined() + expect(planApply).toContain('migration apply') + }) + + it('uses pnpm dlx for invocations when the package manager is pnpm', () => { + const steps = provider.getNextSteps({}, 'pnpm') + expect(steps.some((s) => s.includes('pnpm dlx prisma-next'))).toBe(true) + }) + + it('uses npx for invocations when the package manager is npm', () => { + const steps = provider.getNextSteps({}, 'npm') + expect(steps.some((s) => s.includes('npx prisma-next'))).toBe(true) + for (const step of steps) { + expect(step).not.toMatch(/\bbunx\b/) + } + }) + + it('mentions cipherstashFromStack rather than a hand-written encryption client', () => { + const steps = provider.getNextSteps({}, 'pnpm') + expect(steps.some((s) => s.includes('cipherstashFromStack'))).toBe(true) + for (const step of steps) { + expect(step).not.toMatch(/encryption\/index\.ts/) + } + }) +}) diff --git a/packages/cli/src/commands/init/providers/prisma-next.ts b/packages/cli/src/commands/init/providers/prisma-next.ts new file mode 100644 index 00000000..74807757 --- /dev/null +++ b/packages/cli/src/commands/init/providers/prisma-next.ts @@ -0,0 +1,29 @@ +import type { InitProvider } from '../types.js' +import { type PackageManager, runnerCommand } from '../utils.js' + +export function createPrismaNextProvider(): InitProvider { + return { + name: 'prisma-next', + introMessage: 'Setting up CipherStash for your Prisma Next project...', + // Note: Prisma Next absorbs the EQL bundle install and schema + // scaffold steps via its migration framework. The next-steps list + // below therefore points at `prisma-next migration plan|apply` + // instead of `stash db install`, and at `cipherstashFromStack` + // instead of an `encryption/index.ts` placeholder. + getNextSteps(_state, pm: PackageManager): string[] { + const stash = runnerCommand(pm, 'stash') + const prismaNext = runnerCommand(pm, 'prisma-next') + return [ + 'Declare encrypted columns in prisma/schema.prisma using cipherstash.Encrypted*()', + 'Register the extension: add `cipherstash` to `extensionPacks` in prisma-next.config.ts', + `Generate the contract: ${prismaNext} contract emit`, + `Plan + apply (installs the EQL bundle alongside your app schema): ${prismaNext} migration plan && ${prismaNext} migration apply`, + 'Wire the runtime: cipherstashFromStack({ contractJson }) — see @cipherstash/prisma-next/stack', + `Customize your schema: ${stash} wizard (AI-guided, automated)`, + 'Prisma Next guide: https://cipherstash.com/docs/stack/cipherstash/encryption/prisma-next', + 'Dashboard: https://dashboard.cipherstash.com/workspaces', + 'Need help? Discord or support@cipherstash.com', + ] + }, + } +} diff --git a/packages/cli/src/commands/init/steps/build-schema.ts b/packages/cli/src/commands/init/steps/build-schema.ts index 1c60aa75..f23e4782 100644 --- a/packages/cli/src/commands/init/steps/build-schema.ts +++ b/packages/cli/src/commands/init/steps/build-schema.ts @@ -1,7 +1,11 @@ import { existsSync, mkdirSync, writeFileSync } from 'node:fs' import { dirname, resolve } from 'node:path' import * as p from '@clack/prompts' -import { detectDrizzle, detectSupabase } from '../../db/detect.js' +import { + detectDrizzle, + detectPrismaNext, + detectSupabase, +} from '../../db/detect.js' import { readEnvKeyNames } from '../lib/env-keys.js' import { writeBaselineContextFile } from '../lib/write-context.js' import type { @@ -25,6 +29,11 @@ function detectIntegration( cwd: string, databaseUrl: string | undefined, ): Integration { + // Prisma Next is checked first: a project can use Prisma Next on top + // of a Supabase-hosted database, in which case both signals fire but + // the migration framework belongs to Prisma Next and that's what + // drives the install path. + if (detectPrismaNext(cwd)) return 'prisma-next' if (detectDrizzle(cwd)) return 'drizzle' if (detectSupabase(databaseUrl)) return 'supabase' return 'postgresql' @@ -49,12 +58,38 @@ function detectIntegration( export const buildSchemaStep: InitStep = { id: 'build-schema', name: 'Generate encryption client', - async run(state: InitState, _provider: InitProvider): Promise { + async run(state: InitState, provider: InitProvider): Promise { const cwd = process.cwd() - const integration = detectIntegration(cwd, state.databaseUrl) + const integration = + provider.name === 'prisma-next' + ? 'prisma-next' + : detectIntegration(cwd, state.databaseUrl) const clientFilePath = DEFAULT_CLIENT_PATH const resolvedPath = resolve(cwd, clientFilePath) + // Prisma Next derives the stack-side schema from `contract.json` + // via `cipherstashFromStack({ contractJson })` at runtime — there + // is no hand-written `src/encryption/index.ts` to scaffold. Skip + // the placeholder step and let the framework drive the schema + // surface. + if (integration === 'prisma-next') { + p.log.success( + 'Skipping encryption-client scaffold — Prisma Next derives schemas from contract.json via `cipherstashFromStack({ contractJson })`.', + ) + + const envKeys = readEnvKeyNames(cwd) + const nextState: InitState = { + ...state, + schemaGenerated: false, + integration, + schemas: [], + schemaFromIntrospection: false, + envKeys, + } + writeBaselineContextFile(nextState, cwd, envKeys) + return nextState + } + // Existing-file branch: silent overwrite is bad. Ask once. let keepExisting = false if (existsSync(resolvedPath)) { diff --git a/packages/cli/src/commands/init/steps/install-deps.ts b/packages/cli/src/commands/init/steps/install-deps.ts index 4c7bb105..1677dd03 100644 --- a/packages/cli/src/commands/init/steps/install-deps.ts +++ b/packages/cli/src/commands/init/steps/install-deps.ts @@ -10,6 +10,7 @@ import { const STACK_PACKAGE = '@cipherstash/stack' const CLI_PACKAGE = 'stash' +const PRISMA_NEXT_PACKAGE = '@cipherstash/prisma-next' /** * Install the runtime + dev npm packages the user needs to run encryption: @@ -26,20 +27,29 @@ const CLI_PACKAGE = 'stash' export const installDepsStep: InitStep = { id: 'install-deps', name: 'Install dependencies', - async run(state: InitState, _provider: InitProvider): Promise { + async run(state: InitState, provider: InitProvider): Promise { + const wantPrismaNext = + state.integration === 'prisma-next' || provider.name === 'prisma-next' const stackPresent = isPackageInstalled(STACK_PACKAGE) const cliPresent = isPackageInstalled(CLI_PACKAGE) + const prismaNextPresent = wantPrismaNext + ? isPackageInstalled(PRISMA_NEXT_PACKAGE) + : true - // Both already there — silent success, no prompts. - if (stackPresent && cliPresent) { - p.log.success( - `${STACK_PACKAGE} and ${CLI_PACKAGE} are already installed.`, - ) + // Everything already there — silent success, no prompts. + if (stackPresent && cliPresent && prismaNextPresent) { + const installed = wantPrismaNext + ? `${STACK_PACKAGE}, ${PRISMA_NEXT_PACKAGE} and ${CLI_PACKAGE}` + : `${STACK_PACKAGE} and ${CLI_PACKAGE}` + p.log.success(`${installed} are already installed.`) return { ...state, stackInstalled: true, cliInstalled: true } } const pm = detectPackageManager() - const prodPackages = stackPresent ? [] : [STACK_PACKAGE] + const prodPackages: string[] = [] + if (!stackPresent) prodPackages.push(STACK_PACKAGE) + if (wantPrismaNext && !prismaNextPresent) + prodPackages.push(PRISMA_NEXT_PACKAGE) const devPackages = cliPresent ? [] : [CLI_PACKAGE] const commands = combinedInstallCommands(pm, prodPackages, devPackages) diff --git a/packages/cli/src/commands/init/steps/install-eql.ts b/packages/cli/src/commands/init/steps/install-eql.ts index e1bf3cd7..594fce0b 100644 --- a/packages/cli/src/commands/init/steps/install-eql.ts +++ b/packages/cli/src/commands/init/steps/install-eql.ts @@ -27,6 +27,20 @@ export const installEqlStep: InitStep = { name: 'Install EQL extension', async run(state: InitState, provider: InitProvider): Promise { const integration = state.integration ?? 'postgresql' + + // Prisma Next ships the EQL bundle as a baseline migration inside + // `@cipherstash/prisma-next`. `prisma-next migration apply` runs + // it in the same control-plane sweep as the user's application + // migrations — running `stash db install` here would be a + // duplicate install and would race with the framework's + // migration journal. Skip with guidance instead. + if (integration === 'prisma-next' || provider.name === 'prisma-next') { + p.log.success( + 'Skipping `stash db install` — Prisma Next installs the EQL bundle via `prisma-next migration apply` (runs alongside your app migrations).', + ) + return { ...state, eqlInstalled: false } + } + const supabase = integration === 'supabase' || provider.name === 'supabase' const drizzle = integration === 'drizzle' || provider.name === 'drizzle' diff --git a/packages/cli/src/commands/init/types.ts b/packages/cli/src/commands/init/types.ts index e365a857..ed8b69e1 100644 --- a/packages/cli/src/commands/init/types.ts +++ b/packages/cli/src/commands/init/types.ts @@ -1,8 +1,8 @@ -import type { PlanStep } from './lib/parse-plan.js' import type { AgentEnvironment } from './detect-agents.js' +import type { PlanStep } from './lib/parse-plan.js' import type { PackageManager } from './utils.js' -export type Integration = 'drizzle' | 'supabase' | 'postgresql' +export type Integration = 'drizzle' | 'supabase' | 'prisma-next' | 'postgresql' export type DataType = 'string' | 'number' | 'boolean' | 'date' | 'json' diff --git a/packages/prisma-next/DEVELOPING.md b/packages/prisma-next/DEVELOPING.md new file mode 100644 index 00000000..9fb45199 --- /dev/null +++ b/packages/prisma-next/DEVELOPING.md @@ -0,0 +1,341 @@ +# Developing `@cipherstash/prisma-next` + +Contributor-facing notes for the cipherstash extension. The user-facing +surface lives in [`README.md`](./README.md); this file collects the +internal layout, the substrate architecture, the per-codec wiring +template, and the design choices a contributor needs to know when +extending the package. + +## Source layout + +```text +packages/3-extensions/cipherstash/ +├── contract.{json,d.ts} emitted contract-space artefacts +├── migrations/cipherstash/ emitted on-disk migrations +├── refs/head.json hand-pinned contract-space head ref +└── src/ + ├── contract/ + │ ├── authoring.ts cipherstash.Encrypted() PSL constructors (six) + │ └── contract.d.ts contract-space declaration + ├── execution/ + │ ├── envelope-base.ts EncryptedEnvelopeBase shared substrate + │ ├── envelope-string.ts EncryptedString (extends base) + │ ├── envelope-double.ts EncryptedDouble + │ ├── envelope-bigint.ts EncryptedBigInt + parseDecryptedValue override + │ ├── envelope-date.ts EncryptedDate + parseDecryptedValue override + │ ├── envelope-boolean.ts EncryptedBoolean + │ ├── envelope-json.ts EncryptedJson + │ ├── cell-codec-factory.ts makeCipherstashCellCodec({...}) factory + │ ├── codec-runtime.ts createCipherstashStringCodec(sdk) string-only constructor + │ ├── parameterized.ts RuntimeParameterizedCodecDescriptor for all six + │ ├── operators.ts 13 predicate operators + asEncryptedParam dispatch + │ ├── helpers.ts 4 free-standing helpers (Asc / Desc / JsonbPath…) + │ ├── decrypt-all.ts opt-in read-side bulk-decrypt walker + │ ├── routing.ts physical-column-name routing-key helpers + │ ├── sdk.ts CipherstashSdk interface (framework-native) + │ └── abort.ts RUNTIME.ABORTED envelope wrappers + ├── extension-metadata/ + │ ├── constants.ts codec ids, EQL native type, CIPHERSTASH_CODEC_IDS tuple, + │ │ isCipherstashCodecId guard, namespaced-trait casts + │ ├── codec-metadata.ts SDK-free codec instances for pack-meta authoring + │ └── descriptor-meta.ts cipherstashPackMeta + authoring + storage + codec instances + ├── middleware/ + │ └── bulk-encrypt.ts bulkEncryptMiddleware(sdk) + stampRoutingKeysFromAst + ├── migration/ + │ ├── codec-hooks-factory.ts makeCipherstashCodecHooks({...}) factory (per codec) + │ ├── cipherstash-codec.ts cipherstashStringCodecHooks string-only hook bundle + │ ├── call-classes.ts CipherstashAddSearchConfigCall / RemoveSearchConfigCall + │ ├── eql-bundle.ts EQL install SQL (vendored byte-for-byte) + │ └── eql-install.generated.ts generated EQL install op definitions + ├── types/ + │ ├── codec-types.ts CipherstashCodecTypes interface (decode return types) + │ └── operation-types.ts QueryOperationTypes augmentation (column-method surface) + └── exports/ + ├── control.ts SqlControlExtensionDescriptor (control-plane entry) + ├── runtime.ts Envelope classes + SDK + codec runtime + decryptAll + + │ 4 free-standing helpers (runtime entry) + ├── middleware.ts bulkEncryptMiddleware (runtime middleware entry) + ├── migration.ts call-classes re-export + ├── pack.ts cipherstashPackMeta default export (TS contract authoring) + ├── column-types.ts 6 TS contract factories (encryptedString / Double / …) + ├── codec-types.ts codec-types augmentation re-export + ├── operation-types.ts operation-types augmentation re-export + └── contract-space-typing.ts helper types for contract-space consumers +``` + +## Substrate architecture + +The package centres on a shared substrate that lets every cipherstash codec be one factory call away from the same shape. Three substrate factories carry the load: + +### `EncryptedEnvelopeBase` — shared envelope superclass + +`packages/3-extensions/cipherstash/src/execution/envelope-base.ts` exports an abstract `EncryptedEnvelopeBase` class that holds the `#`-prefixed `EncryptedHandle` slot and ships the five redaction overrides (`toJSON`, `toString`, `valueOf`, `Symbol.toPrimitive`, `Symbol.for('nodejs.util.inspect.custom')`), `expose()`, `decrypt({ signal? })`, and the post-decrypt plaintext cache. + +Each concrete subclass: + +- Holds nothing of its own beyond a static `from(plaintext: T): Self` and `fromInternal(args): Self`. +- May override `parseDecryptedValue(plaintext: unknown): T` when the SDK round-trips through a JS type that differs from the envelope's `T`. `EncryptedBigInt` overrides this to coerce SDK `number | string` → `bigint`; `EncryptedDate` overrides it to coerce ISO strings → `Date`. + +The base class also stamps the redacted JSON placeholder per subclass (`{ "$encryptedString": "" }` vs `{ "$encryptedBigInt": "" }`) so accidental `JSON.stringify` paths reveal the *type* but not the *value*. + +### `makeCipherstashCellCodec({...})` — runtime cell-codec factory + +`src/execution/cell-codec-factory.ts` exports a single factory that builds a `cipherstash/@1` `CellCodec` given: + +- The `codecId` to register under. +- The envelope subclass's `fromInternal({ ciphertext, table, column, sdk })` constructor — picked up by reference, not by ID. +- The Postgres native type (`eql_v2_encrypted` for every cipherstash codec). +- The static `traits: []` declaration (the wrong-SQL-footgun protection — see [ADR 214](../../../docs/architecture%20docs/adrs/ADR%20214%20-%20Extension%20operator%20surface%20namespaced%20replacement%20operators.md)). + +Per-codec `encode(envelope, ctx)` and `decode(wire, ctx)` bodies are the same shape across all six codecs: encode reads the envelope handle's ciphertext (already populated by the bulk-encrypt middleware) and wraps it in the `eql_v2_encrypted` composite text format; decode constructs the right envelope subclass via the captured `fromInternal` constructor. + +### `makeCipherstashCodecHooks({ flagToIndex, castAs })` — codec lifecycle hook factory + +`src/migration/codec-hooks-factory.ts` exports the factory that builds a `CodecControlHooks` instance given: + +- A `flagToIndex` map from the codec's search-mode flags to EQL search-config index names (e.g. `{ equality: 'unique', orderAndRange: 'ore' }`). +- The EQL `cast_as` value (`'text'`, `'double'`, `'big_int'`, `'date'`, `'boolean'`, `'jsonb'`). + +The factory's returned hook reads `typeParams` off the column (the validated cipherstash search-mode flags) and emits one `cipherstashAddSearchConfig(table, column, index)` op per enabled flag at field-added events, and the corresponding `cipherstashRemoveSearchConfig(...)` at field-dropped events. Flag flips (`true → false` between contract versions) emit a removal at the field-altered event. The framework's destructive-op classification surfaces removals via the standard planner mechanisms — no cipherstash-specific warning policy. + +## Per-codec wiring template + +Adding a new cipherstash codec (e.g. a hypothetical `cipherstashInt` for non-bigint integer support) touches the following files **in this order**. Each step is one or two lines; the substrate factories carry the variable shape. + +1. **Constants** (`src/extension-metadata/constants.ts`). Add the codec id (`'cipherstash/int@1'`), append to the `CIPHERSTASH_CODEC_IDS` stable-order tuple, and the closed-union `CipherstashCodecId` widens automatically. The `isCipherstashCodecId` guard picks up the new entry through the constant tuple. + +2. **Envelope class** (`src/execution/envelope-.ts`). New file extending `EncryptedEnvelopeBase` where `T` is the new codec's JS plaintext type. Add a `parseDecryptedValue` override only if the SDK round-trip introduces a type mismatch (e.g. `EncryptedBigInt`'s `number | string → bigint` coercion). Re-export from `src/exports/runtime.ts`. + +3. **Cell-codec factory call** (`src/execution/parameterized.ts`). One factory invocation: `makeCipherstashCellCodec({ codecId, fromInternal: EncryptedInt.fromInternal, ... })`. The parameterized-descriptor registration in the same file picks it up. + +4. **Codec lifecycle hooks** (`src/migration/codec-hooks-factory.ts` consumer; new constant in the same file or co-located). One factory invocation: `cipherstashIntCodecHooks = makeCipherstashCodecHooks({ flagToIndex: { equality: 'unique', orderAndRange: 'ore' }, castAs: 'int' })`. Add it to the hook export. + +5. **PSL constructor** (`src/contract/authoring.ts`). Add a `cipherstash.EncryptedInt` constructor descriptor mirroring the others. The arktype params schema (validating the codec's search-mode flags) goes alongside. + +6. **TS factory** (`src/exports/column-types.ts`). Add `encryptedInt({...})` mirroring `encryptedBigInt`. Defaults map every search-mode flag to `true`. + +7. **Parameterized codec descriptor** (`src/execution/parameterized.ts`). Add the new codec to `createParameterizedCodecDescriptors(sdk)` so the per-tenant SDK binding reaches it. + +8. **Operator type-visibility** (`src/types/operation-types.ts`). Add the new codec id to whichever `QueryOperationTypes` entries the new codec should surface predicates from. Trait-keyed entries (the multi-codec predicates: `cipherstashEq`, `cipherstashGt`, etc.) pick it up automatically through the `cipherstash:`-namespaced trait dispatch. + +9. **Codec-types augmentation** (`src/types/codec-types.ts`). Add an entry mapping the new codec id to the envelope class's decode-side TypeScript type (used by the framework's decode-result typing). + +10. **Pack-meta authoring** (`src/extension-metadata/descriptor-meta.ts`). Append the new authoring entry + storage entry + codec instance to `cipherstashPackMeta`. + +11. **Parity fixture** (`test/integration/test/authoring/parity/cipherstash-encrypted-/`). New PSL + TS contract pair authoring the same column under the new codec; pinned by the shared parity harness. + +12. **Codec-specific tests** (`test/envelope-.test.ts`, `test/operator-lowering.test.ts` extension). Cover the envelope's redaction overrides + `parseDecryptedValue` if present, and the per-codec predicate lowerings. + +The order is mechanical; the substrate factories are the leverage that makes adding a new codec a ~20-line change across these files. + +## The operator surface — predicate vs helper + +The 17 cipherstash operators decompose along the framework's predicate / non-predicate split per [ADR 214 — Extension operator surface](../../../docs/architecture%20docs/adrs/ADR%20214%20-%20Extension%20operator%20surface%20namespaced%20replacement%20operators.md). + +### Predicate operators — column-method surface + +Return `Expression<{codecId: 'pg/bool@1', nullable: ...}>`. Surface as column methods through the operation registry; the model accessor synthesises them onto columns whose codec carries the required `cipherstash:*` trait. + +- **Source**: `src/execution/operators.ts` (one factory per predicate, registered through the framework's `OperationRegistry` SPI). +- **Dispatch**: trait- or codec-id-keyed entries in `QueryOperationTypes` (`src/types/operation-types.ts`). Multi-codec predicates (`cipherstashEq`, `cipherstashGt`, `cipherstashLt`, etc.) key off `cipherstash:equality` / `cipherstash:order-and-range` so a new codec advertising those traits picks up the predicates automatically. +- **Encoded-arg path**: `asEncryptedParam(plaintext, columnRef)` dispatches on the column's codec id to construct the right envelope subclass; the dispatch table is typed `Readonly>` over the closed-union `CipherstashCodecId` so a new codec id without a matching dispatch entry is a TS error. The envelope's handle carries the column's `(table, column)` routing key from the `ParamRef.of({ refs: { table, column } })` call site so the bulk-encrypt middleware can group it correctly. + +### Free-standing helpers — non-predicate surface + +Return non-boolean shapes: `OrderByItem` for sort, `Expression` for SELECT-expression accessors. + +- **Source**: `src/execution/helpers.ts`. Each helper is a pure function exported from `@cipherstash/prisma-next/runtime`. +- **Dispatch**: none. The helpers are typed at their function-declaration site; there is no registry participation. Calls like `cipherstashAsc(u.salary)` validate the column's codec id at runtime via `getCodecId(col, helperName)` and throw a descriptive `TypeError` on mismatch. +- **AST primitives**: sort helpers return `OrderByItem.asc/desc(col.buildAst())` directly (bare-column form; EQL's native operator overloads on `eql_v2_encrypted` handle the comparison at the Postgres level). JSON helpers construct an `Expression`-shaped `OperationExpr` via `buildOperation({ method, args, returns, lowering })` — the same framework primitive that powers the predicate registrations. +- **No `QueryOperationTypes` entry** — by design. The split is documented in `src/execution/helpers.ts`'s top-of-file docblock and the per-helper JSDoc. + +## Cipherstash-namespaced traits + +The cipherstash codecs use `cipherstash:`-prefixed traits exclusively — `cipherstash:equality`, `cipherstash:order-and-range`, `cipherstash:free-text-search`, `cipherstash:searchable-json`. These sit *outside* the framework's closed `CodecTrait` union ([ADR 202](../../../docs/architecture%20docs/adrs/ADR%20202%20-%20Codec%20trait%20system.md)) deliberately: + +- The framework union is closed for the built-in trait set so trait-gated synthesis can reason exhaustively. A cipherstash codec advertising the framework's `equality` trait would mean the built-in `m.col.eq(...)` synthesises on cipherstash columns and lowers to SQL `=` against a randomised EQL ciphertext — the wrong-SQL footgun this design closes. +- Extension traits are open-ended — they're per-extension capability declarations the framework does not need to recognise. The cipherstash operator registry consumes them; the framework's `eq`-synthesis path does not. + +The cast from extension trait names to the framework-internal `CodecTrait` array shape is localised to one site at `src/extension-metadata/constants.ts` with a rationale comment citing the framework type, the model-accessor's `readonly string[]` widening at the dispatch site, and the wrong-SQL-`eq` footgun rationale. This is the only `as unknown as ...` cast in the package; all other type discipline is explicit. + +A pinned regression test at `test/equality-trait-removal.test.ts` asserts every cipherstash codec's `traits` array contains only `cipherstash:`-namespaced strings — catches a regression where someone re-introduces the framework `equality` trait by accident. + +## The `parseDecryptedValue` hook contract + +`EncryptedEnvelopeBase` exposes a protected `parseDecryptedValue(plaintext: unknown): T` hook that subclasses override when the SDK round-trips through a JS type that differs from the envelope's `T`. + +Used by: + +- The single-cell `decrypt({ signal? })` path on the envelope itself. +- The `decryptAll(rows)` walker — every `(sdk, table, column)` group's `bulkDecrypt` returns `ReadonlyArray`; the walker invokes `envelope.parseDecryptedValue(plaintexts[i])` per entry before caching the result on the envelope's handle. + +The hook defaults to an identity cast (`plaintext as T`) so the common-case envelopes (`EncryptedString` for `string`, `EncryptedDouble` for `number`, `EncryptedBoolean` for `boolean`) need no override. + +Subclasses that override: + +- **`EncryptedBigInt`** — the `@cipherstash/stack` SDK's `JsPlaintext` union does not include `bigint`. The example app's SDK adapter converts `bigint → Number` with a `Number.MAX_SAFE_INTEGER` bounds check on the encrypt side; `EncryptedBigInt.parseDecryptedValue` coerces back via `BigInt(plaintext)` and accepts either `number` or `string` per the SDK's polymorphic return shape. +- **`EncryptedDate`** — accepts ISO-8601 strings from the SDK round-trip and returns a `Date` instance. +- **`EncryptedJson`** — defaults to identity (the SDK returns the parsed JSON value as-is). + +## Runtime-side gotchas + +### Physical column-name routing keys + +The framework lowers the user's PSL field names through any `@map(...)` directives before middleware sees `ParamRef`s. The cipherstash bulk-encrypt middleware therefore receives **physical column names** (e.g. `accountid` rather than the PSL `accountId`), and the SDK's `bulkEncrypt(routingKey: { table, column })` round-trip is keyed on the physical name. The example app's SDK adapter at `examples/cipherstash-integration/src/sdk.ts` keeps its `tableRegistry` keyed by physical names to match. + +This is structural — the routing key has to agree between the cipherstash bulk-encrypt middleware (which sees the lowered SQL) and the SDK's per-column EQL index lookup (which reads the schema-time physical name). The decoded envelope's `(table, column)` slot likewise carries the physical name. + +### `bigint` SDK boundary + +`@cipherstash/stack`'s SDK and ZeroKMS only accept `JsPlaintext = string | number | boolean | object | array` for plaintexts (no `bigint`). For `EncryptedBigInt`: + +- **Encrypt side** (example app's SDK adapter, `examples/cipherstash-integration/src/sdk.ts`): converts `bigint → Number` with an eager `Number.MAX_SAFE_INTEGER` bounds check (throws on overflow). Values beyond the safe-integer range cannot be encrypted today. +- **Decrypt side** (envelope subclass, `src/execution/envelope-bigint.ts`): `parseDecryptedValue` accepts either `number` or `string` from the SDK and coerces back to `bigint` via the `BigInt(plaintext)` constructor. + +This is a known limitation — lifting requires upstream SDK / ZeroKMS work. + +### Polymorphic `CipherstashSdk.decrypt` return type + +`CipherstashSdk.bulkDecrypt(...)` returns `Promise>`. The polymorphic return type is deliberate — the SDK round-trips a heterogeneous mix of plaintext shapes (`string | number | boolean | object | array`) and the example app's adapter mirrors that. + +One small follow-up: the single-cell `CipherstashSdk.decrypt(...)` return type is currently typed `Promise` from the original string-only contract. A widening to `Promise` would match the bulk shape and remove a runtime narrowing cast in `EncryptedEnvelopeBase.decrypt`. Filed as a one-line interface follow-up; tracked under the cipherstash-integration umbrella. + +## Write-path lifecycle — two-pass codec encode + middleware rewrite + +The SQL family runtime calls `lower` (which calls `encodeParams`, which calls each codec's `encode`) **before** running the `beforeExecute` middleware chain. The cipherstash codec therefore *cannot* read `handle.ciphertext` during `encode` on the write path — the bulk-encrypt middleware hasn't run yet and the envelope only carries plaintext at this point. The package handles this as a deliberate two-pass design: + +1. **First pass — `CipherstashCellCodec#encode`** (in `lower`/`encodeParams`). Reached with `handle.ciphertext === undefined`. The codec returns the envelope itself as a sentinel; the SQL runtime treats it as the param's current value but doesn't unwrap it further. If the codec's SDK has no `bulkEncryptMiddleware(sdk)` registered for it (see `src/execution/middleware-registry.ts`), encode throws a `RUNTIME.ENCODE_FAILED` envelope here with a copy-pasteable wiring snippet — that's the loud-failure mode for a misconfigured runtime. + +2. **Second pass — `bulkEncryptMiddleware#beforeExecute`**. Iterates the param entries, collects every cipherstash envelope, groups by `(table, column)`, issues one `sdk.bulkEncrypt(...)` per group, stamps each returned ciphertext onto the envelope's handle (so synchronous `envelope.decrypt()` still works), and calls `params.replaceValues(...)` with the **wire-format string** (the `eql_v2_encrypted` composite-text literal produced by `encodeEqlV2EncryptedWire(ciphertext)`). The driver reads `currentParams()` after the middleware chain, so it sees a string. + +The read path degenerates to a single pass — when `handle.ciphertext` is already set (the envelope was just decoded from a `SELECT`, or it was carried across queries), `encode` returns the wire format directly without ever consulting the middleware. + +Why the codec returns the envelope rather than a placeholder string: the bulk-encrypt middleware needs the envelope instance to read the plaintext slot, look up routing metadata, and stamp the ciphertext back. Returning an opaque placeholder would force a second registry to map param positions to envelopes. + +Why the middleware writes the wire-format string into `params.replaceValues` rather than the envelope: the pg driver only serialises primitives / arrays / Buffers. Handing it the envelope (an `EncryptedEnvelopeBase` instance) would surface an opaque driver error. + +Tests pin both halves: `test/codec-runtime.test.ts` covers the encode-returns-envelope contract and the missing-middleware diagnostic; `test/bulk-encrypt-middleware.test.ts` covers the wire-format `replaceValues` payload. + +## Other design choices worth knowing + +### Handle storage — SecretBox-style `#` field with redacting overrides + +Every `EncryptedEnvelopeBase` instance holds the `EncryptedHandle` on a single `#`-prefixed class field. The plaintext and ciphertext are reachable through an explicit `envelope.expose()` accessor — that's the deliberate seam for callers who genuinely want the inner values. Every implicit serialization / coercion path (`toJSON`, `toString`, `valueOf`, `Symbol.toPrimitive`, `Symbol.for('nodejs.util.inspect.custom')`) returns a `[REDACTED]` placeholder (or, for `toJSON`, a typed `{ "$encrypted": "" }` placeholder) so accidental `console.log`, `JSON.stringify`, template-literal interpolation, error string construction, and `util.inspect` paths cannot leak plaintext. + +The encapsulation is deliberately not airtight (we do not use a closure-scoped `WeakMap` to hide the storage entirely) — the goal is to make plaintext access **explicit** at the call site, not **impossible**. Callers who need to round-trip envelopes across a network boundary can opt in via `envelope.expose()`. + +### Plaintext is retained post-encrypt + +The bulk-encrypt middleware populates the handle's ciphertext slot but does **not** zero the plaintext slot. Zeroing in JS is best-effort (strings are immutable) and the GC-driven lifecycle is sufficient. As a side effect, a write-side envelope's `decrypt()` returns the original plaintext synchronously without an SDK round-trip. + +### Codec is constructed per SDK binding + +The factory `createParameterizedCodecDescriptors(sdk)` is called per tenant — the codec's `decode` body captures the SDK so the read-side envelope can issue `decrypt({ signal? })` against it. This differs from pgvector (whose codec is fully stateless and *can* be a module singleton) but aligns with multi-tenant deployments constructing one extension descriptor per tenant. The seam is tracked at [TML-2388 — Codec-SDK binding refactor](https://linear.app/prisma-company/issue/TML-2388). + +### SDK-free metadata codec for pack-meta + +`src/extension-metadata/codec-metadata.ts` ships an SDK-free codec used in `cipherstashPackMeta.types.codecTypes.codecInstances`. Pack-meta consumers only read codec metadata (`typeId`, `targetTypes`, `traits`, `renderOutputType`) at contract emit time — they never call `encode`/`decode`. Keeping the metadata codec separate from the SDK-bound runtime codec preserves the control vs runtime split: control-plane consumers (`exports/control.ts`, `exports/pack.ts`) pull this file but never the envelope subclasses, the SDK interface, or the codec runtime. + +### `CipherstashSdk` is framework-native, not the upstream SDK shape + +The interface declares three async methods (`decrypt`, `bulkEncrypt`, `bulkDecrypt`), each accepting an optional `AbortSignal`. The values are typed polymorphically (`unknown` for the bulk paths). This is deliberately smaller than CipherStash's upstream `EncryptionClient` (rich `EncryptOperation` / `LockContext` / lazy-init machinery) so real-world usage wraps the upstream client behind a thin adapter satisfying `CipherstashSdk`. Keeps the framework-side surface free of upstream-specific types. + +### `decryptAll(rows, opts?)` — opt-in read-side amortisation + +The cell codec's `decode` returns envelope subclasses that defer their SDK round-trip until `envelope.decrypt(...)` is awaited; this keeps SELECT plans cheap when consumers only need a subset of encrypted columns or when consumers want to forward envelopes to a downstream service without ever reading the plaintext. + +`decryptAll(rows)` is the read-side amortisation for the case where the consumer DOES want plaintexts: it walks the result-set graph (arrays, plain objects, nested envelopes; cycle-safe; skips already-decrypted envelopes; passes over exotic containers like `Date` / `Map` / `Set` / `Uint8Array`), partitions the discovered envelopes by `(sdk identity, table, column)`, and issues one `bulkDecrypt` SDK call per partition. The resolved plaintexts pass through each envelope's `parseDecryptedValue(...)` hook and cache back onto each envelope's handle so subsequent `envelope.decrypt()` calls return synchronously. Already-decrypted envelopes (write-side envelopes from `Encrypted.from(plaintext)`, or read-side envelopes that already cached a plaintext) are not re-decrypted — a re-run of `decryptAll` over a previously-decrypted result set is a no-op. + +The walker is intentionally narrow: traversing arbitrary graphs (JS-side `Map` / `Set` / `Date` containers) is out of scope and loud-skipped — embedding an envelope inside a `Map` value will not be discovered by the walker. Consumers needing such shapes should call `envelope.decrypt(...)` directly. + +### Control vs runtime tree-shaking architecture + +The package publishes three runtime-relevant subpath entries — `./control` (contract-space authoring + the codec lifecycle hooks), `./runtime` (envelope subclasses + SDK + codec runtime + `decryptAll` + free-standing helpers), and `./middleware` (bulk-encrypt middleware) — and each composes tree-shakably so a consumer pulling `./runtime` does not drag in the EQL bundle SQL or the codec lifecycle hooks (which would defeat the runtime-bundle size budget and leak control-plane behaviour into runtime call paths) and a consumer pulling `./control` does not drag in the runtime envelopes, the SDK interface, the codec runtime, or the bulk-encrypt middleware. + +The split lives in the source layout: `src/exports/control.ts` only imports from `src/contract/`, `src/migration/`, `src/extension-metadata/`, and never from `src/execution/{envelope*, codec-runtime, parameterized, decrypt-all, helpers, operators}` nor from `src/middleware/`. `src/exports/runtime.ts` / `src/exports/middleware.ts` only import from the runtime-side source modules (and the shared `extension-metadata/constants.ts`). The `test/bundling-isolation.test.ts` guard pins this byte-level — asserting the entry `.mjs` files don't carry forbidden symbols and that the transitively-reached chunk-file sets are disjoint modulo the shared `constants-*.mjs` chunk. + +The shared `constants-*.mjs` chunk is structurally permitted to live in both planes — it carries pure literal constants (codec ids, native types, invariant ids, the `CIPHERSTASH_CODEC_IDS` tuple, the `isCipherstashCodecId` guard) and no executable behaviour. + +The cross-package convention (source-level discipline + bundling-isolation test, with rationale and assertion strategies) is documented in the extension-packs reference doc at [Extension-Packs-Naming-and-Layout § Tree-shakability between control and runtime planes](../../../docs/reference/Extension-Packs-Naming-and-Layout.md); this package is the worked example for that section. + +## Tracked follow-ups + +| Linear ticket | Surface | +| --- | --- | +| [TML-2388](https://linear.app/prisma-company/issue/TML-2388) | Codec-SDK binding refactor — pull the per-tenant SDK binding out of the codec factory closure into the descriptor seam so multi-tenant deployments don't re-author the codec per tenant. | +| Polymorphic `CipherstashSdk.decrypt` return type | One-line interface widening from `Promise` to `Promise` to mirror the bulk shape; removes a narrowing cast in `EncryptedEnvelopeBase.decrypt`. | +| [TML-2504 — Cipherstash JSONB path-exists predicate: STE-VEC selector hashing](https://linear.app/prisma-company/issue/TML-2504) | `cipherstashJsonbPathExists` against the live EQL bundle expects a hashed STE-VEC selector computed via the CipherStash SDK's `selector(...)` API; the framework currently binds the JSONpath as a plain `pg/text@1` `ParamRef`. Round-trip and the two SELECT-expression helpers (`cipherstashJsonbPathQueryFirst`, `cipherstashJsonbGet`) work; the predicate clause returns zero rows. Resolution requires either a client-side path-hashing middleware or an EQL-side plaintext-path overload. | + +## Behavioural invariants pinned by tests + +The following user-facing behaviours are pinned by on-disk tests in `test/` (package-level) and `test/integration/test/authoring/parity/` (cross-package parity harness). This section is the canonical statement of what the package guarantees; if you find yourself loosening one of these, that's the signal to add a regression test alongside. + +### Envelope substrate + +- `EncryptedEnvelopeBase` ships the `#`-prefixed handle slot + five redaction overrides + `expose()` + `decrypt({ signal? })` + `parseDecryptedValue` hook. Every concrete envelope subclass extends it. +- `Encrypted.from(plaintext)` returns a write-side envelope carrying the plaintext on its handle whose ciphertext slot is unfilled until the bulk-encrypt middleware runs. +- `envelope.decrypt({ signal? })` returns plaintext via the SDK's single-cell `decrypt`; `signal` is forwarded by identity (the slot is omitted when `signal` is undefined, preserving `exactOptionalPropertyTypes`). +- After `decryptAll(...)` returns, every touched envelope's `decrypt()` returns the cached plaintext synchronously without consulting the SDK. +- The handle has no public TypeScript surface; pinned per-subclass by `test/envelope-*.test.ts` runtime tests asserting `Object.keys(envelope) === []` and `JSON.stringify(envelope)` produces the documented redacted placeholder. + +### Codec runtime + +- Six codecs (`cipherstash/string@1`, `cipherstash/double@1`, `cipherstash/bigint@1`, `cipherstash/date@1`, `cipherstash/boolean@1`, `cipherstash/json@1`) all registered with target type `eql_v2_encrypted` and `traits: []`. +- `decode(wire, ctx)` builds the right envelope subclass whose handle carries `(table, column)` from `ctx.column`. +- `encode(envelope, ctx)` runs in two-pass mode (see "Write-path lifecycle" above): returns the envelope itself when `handle.ciphertext` is unset (the middleware will rewrite the param slot to the wire-format string), wraps the ciphertext in the `eql_v2_encrypted` composite-text literal when `handle.ciphertext` is set. +- When `handle.ciphertext` is unset AND no `bulkEncryptMiddleware(sdk)` has been constructed against the codec's SDK, encode throws a `RUNTIME.ENCODE_FAILED` envelope with a copy-pasteable wiring snippet. +- `renderOutputType` returns the codec's envelope class name. +- `RuntimeParameterizedCodecDescriptor` per codec, each with its own arktype `paramsSchema` validating that codec's search-mode flags. + +### Bulk-encrypt middleware + +- For N rows × 1 cipherstash column sharing one routing key, exactly one `bulkEncrypt` SDK call per `(table, column)` group. +- For M cipherstash columns across rows, exactly M `bulkEncrypt` calls. +- The middleware writes the encoded **wire-format string** (`encodeEqlV2EncryptedWire(ciphertext)`) into each ParamRef's value slot via `params.replaceValues(...)`. The envelope's ciphertext slot is **also** stamped via `setHandleCiphertext` so any follow-on read off the same envelope skips a re-encrypt round-trip; the plaintext slot is retained. +- Constructing `bulkEncryptMiddleware(sdk)` marks the SDK as registered in the per-process `WeakSet` at `src/execution/middleware-registry.ts`, which the codec's misconfig diagnostic consults. +- `ctx.signal` forwarded by identity to `bulkEncrypt`; cancellation observable downstream. + +### Operator lowering + +- 13 predicate operators (`cipherstashEq` / `Ne` / `InArray` / `NotInArray` / `Ilike` / `NotIlike` / `Gt` / `Gte` / `Lt` / `Lte` / `Between` / `NotBetween` / `JsonbPathExists`) lower to the corresponding `eql_v2.*` function calls. Each is trait- or codec-id-gated. +- 4 free-standing helpers (`cipherstashAsc` / `Desc` / `JsonbPathQueryFirst` / `JsonbGet`) return `OrderByItem` / `Expression`. Sort uses the bare-column form (no `eql_v2.order_by_(col)` wrapping); JSON helpers construct `Expression`-shaped `OperationExpr` via `buildOperation({...})`. +- `m.col.isNull()` / `m.col.isNotNull()` lower to `m.col IS NULL` / `IS NOT NULL` directly via the framework's `NullCheckExpr`; no EQL involvement, no parameter binding. The operator registry is not consulted. +- `m.col.eq(...)` is unreachable on cipherstash columns at the model accessor (compile-time + runtime) — codec declares zero of the framework's built-in traits at all three sites (`codec-runtime.ts` / `codec-metadata.ts` / `parameterized.ts`). Pinned by `test/equality-trait-removal.test.ts`. + +### `decryptAll` walker + +- Walks recursively (objects, arrays, nested envelopes) and decrypts every cipherstash envelope it finds. Skips already-cached envelopes; passes over exotic containers (`Date`, `Map`, `Set`, `Uint8Array`); cycle-safe. +- For K envelopes across distinct routing keys, exactly one `bulkDecrypt` per `(sdk, table, column)` group. +- After return, every touched envelope's `decrypt()` returns the cached plaintext synchronously without consulting the SDK. +- `opts.signal` forwarded by identity to the SDK on every `bulkDecrypt` call. The slot is omitted from the SDK call when `opts.signal` is undefined. + +### Cancellation envelope + +- `RUNTIME.ABORTED` envelope wrapping at every cipherstash-internal phase (`bulk-encrypt`, `decrypt`, `decrypt-all`). Mirrors the framework's `runtimeError(RUNTIME_ABORTED, ...)` envelope shape exactly; only the legal `details.phase` string set widens (the cipherstash phase strings are not added to the framework's `RuntimeAbortedPhase` union). Codec encode/decode are intentionally left unwrapped — the framework's `encodeParams` / `decodeRow` per-cell race already raises `RUNTIME.ABORTED { phase: 'encode' | 'decode' }` per ADR 207. + +### Authoring parity + +- TS contract authoring (`encrypted({...})`) produces a `contract.json` byte-identical to the PSL version (`cipherstash.Encrypted({...})`) for every codec. Pinned by the parity fixtures at `test/integration/test/authoring/parity/cipherstash-encrypted-{string,double,bigint,date,boolean,json}/`. + +### Live e2e umbrella round-trips + +- One `*.e2e.test.ts` per codec under `examples/cipherstash-integration/test/e2e/` exercising the insert → `cipherstash` → optional `cipherstashAsc/Desc` → `decryptAll` round-trip against a live Postgres + EQL + ZeroKMS environment. +- A mixed-codec round-trip exercises four codecs (string + double + bigint + date) in one query, asserting the bulk-encrypt middleware coalesces one SDK call per `(table, column)` group. +- A `*.e2e.json.e2e.test.ts` covers the JSON codec's round-trip and the two SELECT-expression helpers; the `JsonbPathExists` predicate clause is skipped pending the STE-VEC selector hashing follow-up (see Tracked follow-ups). + +### Layering + bundling + +- `pnpm lint:deps` clean for the package's subtree. +- Strict `dbInit` preserved — no `strictVerification: false` anywhere in the cipherstash subtree. +- Tree-shakable control vs runtime / middleware planes pinned by `test/bundling-isolation.test.ts` (entry-body forbidden-substring check + chunk-graph disjointness modulo the shared `constants-*.mjs` chunk). + +## References + +- [pgvector extension](../pgvector/README.md) — the structural precedent for codec, parameterized descriptor, and pack-meta layout. +- [ADR 202 — Codec trait system](../../../docs/architecture%20docs/adrs/ADR%20202%20-%20Codec%20trait%20system.md). +- [ADR 207 — Codec call context per-query AbortSignal and column metadata](../../../docs/architecture%20docs/adrs/ADR%20207%20-%20Codec%20call%20context%20per-query%20AbortSignal%20and%20column%20metadata.md). +- [ADR 208 — Higher-order codecs for parameterized types](../../../docs/architecture%20docs/adrs/ADR%20208%20-%20Higher-order%20codecs%20for%20parameterized%20types.md). +- [ADR 212 — Contract spaces](../../../docs/architecture%20docs/adrs/ADR%20212%20-%20Contract%20spaces.md). +- [ADR 213 — Codec lifecycle hooks](../../../docs/architecture%20docs/adrs/ADR%20213%20-%20Codec%20lifecycle%20hooks.md). +- [ADR 214 — Extension operator surface: namespaced replacement operators and the predicate/helper split](../../../docs/architecture%20docs/adrs/ADR%20214%20-%20Extension%20operator%20surface%20namespaced%20replacement%20operators.md). +- [ADR 215 — Runtime middleware lifecycle: `beforeExecute` fires before `encodeParams`](../../../docs/architecture%20docs/adrs/ADR%20215%20-%20Runtime%20middleware%20lifecycle%20beforeExecute%20before%20encodeParams.md). diff --git a/packages/prisma-next/README.md b/packages/prisma-next/README.md new file mode 100644 index 00000000..c3484b80 --- /dev/null +++ b/packages/prisma-next/README.md @@ -0,0 +1,126 @@ +# @cipherstash/prisma-next + +**Searchable field-level encryption for Postgres with [Prisma Next](https://www.npmjs.com/package/@prisma-next/cli)** — via the [EQL bundle](https://cipherstash.com/docs/stack/platform/eql). + +Declare encrypted columns directly in `schema.prisma`, and the framework's migration system installs the EQL bundle in the same control-plane sweep that creates your tables. No separate "install EQL" step. + +📖 **[Full documentation →](https://cipherstash.com/docs/stack/cipherstash/encryption/prisma-next)** + +## Features + +- 🔒 Six encrypted column types — string, double, bigint, date, boolean, JSON +- 🔍 Searchable encryption — equality, free-text search (ILIKE), range, order, JSON path +- 🎯 17 type-safe query operators (`cipherstashEq`, `cipherstashIlike`, `cipherstashGt`, `cipherstashAsc`, …) +- ⚡ Bulk encrypt / bulk decrypt coalescing — one SDK round-trip per `(table, column)` group per query +- 🧩 One-call setup via `cipherstashFromStack({ contractJson })` — no duplicate stack schema to maintain +- 🛡️ Plaintext redaction on every implicit serialisation path (`toJSON`, `toString`, `util.inspect`, …) + +## Installation + +```bash +npm install @cipherstash/stack @cipherstash/prisma-next +``` + +## Quick start + +```prisma +// prisma/schema.prisma +model User { + id String @id + email cipherstash.EncryptedString() + salary cipherstash.EncryptedDouble() + birthday cipherstash.EncryptedDate() + preferences cipherstash.EncryptedJson() +} +``` + +```typescript +// prisma-next.config.ts +import cipherstash from "@cipherstash/prisma-next/control" +// ... other imports +export default defineConfig({ + // ... family, target, adapter, contract + extensionPacks: [cipherstash], +}) +``` + +```typescript +// src/db.ts +import "dotenv/config" +import { cipherstashFromStack } from "@cipherstash/prisma-next/stack" +import postgres from "@prisma-next/postgres/runtime" +import type { Contract } from "./prisma/contract.d" +import contractJson from "./prisma/contract.json" with { type: "json" } + +const cipherstash = await cipherstashFromStack({ contractJson }) + +export const db = postgres({ + contractJson, + extensions: cipherstash.extensions, + middleware: cipherstash.middleware, +}) +``` + +```bash +stash auth login # one-time, per developer +npx prisma-next contract emit +npx prisma-next migration plan --name initial +npx prisma-next migration apply # installs EQL bundle + your schema +``` + +```typescript +import { EncryptedString, decryptAll } from "@cipherstash/prisma-next/runtime" + +await db.orm.User.create({ + id: "user-0", + email: EncryptedString.from("alice@example.com"), + // ... +}) + +const rows = await db.orm.User + .where((u) => u.email.cipherstashIlike("%@example.com")) + .all() + +await decryptAll(rows) +console.log(await rows[0]?.email.decrypt()) +``` + +See the [full documentation](https://cipherstash.com/docs/stack/cipherstash/encryption/prisma-next) for the complete encrypted column reference, all 17 query operators, the override surface, security model, and known limitations. + +## Subpath exports + +| Subpath | Purpose | +| ---------------- | ------------------------------------------------------------------------------------------------------ | +| `./stack` | One-call setup against `@cipherstash/stack`: `cipherstashFromStack`, `deriveStackSchemas`, `createCipherstashSdk` | +| `./control` | `SqlControlExtensionDescriptor` (contract space + pack meta + codec lifecycle hooks) | +| `./runtime` | Six envelope classes + `CipherstashSdk` + codec runtime + `decryptAll` + four free-standing helpers | +| `./middleware` | `bulkEncryptMiddleware(sdk)` | +| `./pack` | `cipherstashPackMeta` for TS contract authoring | +| `./column-types` | Six TS factories: `encryptedString` / `encryptedDouble` / `encryptedBigInt` / `encryptedDate` / `encryptedBoolean` / `encryptedJson` | + +`./control`, `./runtime`, and `./middleware` are tree-shakable. `./stack` sits on top of `./runtime` + `./middleware` and additionally pulls in `@cipherstash/stack`; consumers who implement `CipherstashSdk` against a different KMS skip `./stack` and pay no `@cipherstash/stack` bundle cost. + +## Authentication + +`stash auth login` runs a PKCE flow and caches credentials in your OS keychain — each developer ends up with their own identity for every encrypt / decrypt against the workspace. No `CS_*` env vars in local development. + +The four `CS_*` env vars (`CS_WORKSPACE_CRN`, `CS_CLIENT_ID`, `CS_CLIENT_KEY`, `CS_CLIENT_ACCESS_KEY`) are reserved for production deployments and CI runners. See the [authentication docs](https://cipherstash.com/docs/stack/cipherstash/encryption/prisma-next#authentication) for the full identity story. + +## Example + +A runnable end-to-end example lives at [`examples/prisma/`](../../examples/prisma/) — bundles a docker-compose Postgres, a six-codec `User` schema, and a flow that exercises every operator category against a live ZeroKMS workspace. + +## Contributing + +See [`DEVELOPING.md`](./DEVELOPING.md) for the source layout, two-pass codec encode + middleware rewrite lifecycle, physical-column-name routing, the `bigint → Number` SDK boundary, and other runtime-side details. + +## References + +- 📖 [**Full docs**](https://cipherstash.com/docs/stack/cipherstash/encryption/prisma-next) — column types, operator reference, security model, known limitations. +- [CipherStash EQL reference](https://cipherstash.com/docs/stack/platform/eql) — encrypted operator semantics and search-config index types. +- [`@cipherstash/stack`](../stack/README.md) — encryption SDK and schema DSL. +- [Prisma Next CLI](https://www.npmjs.com/package/@prisma-next/cli) — the framework this extension plugs into. + +## License + +MIT diff --git a/packages/prisma-next/migrations/20260601T0000_install_eql_bundle/end-contract.d.ts b/packages/prisma-next/migrations/20260601T0000_install_eql_bundle/end-contract.d.ts new file mode 100644 index 00000000..76d0217b --- /dev/null +++ b/packages/prisma-next/migrations/20260601T0000_install_eql_bundle/end-contract.d.ts @@ -0,0 +1,149 @@ +// ⚠️ GENERATED FILE - DO NOT EDIT +// This file is automatically generated by 'prisma-next contract emit'. +// To regenerate, run: prisma-next contract emit +import type { CodecTypes as PgTypes } from '@prisma-next/target-postgres/codec-types'; +import type { JsonValue } from '@prisma-next/target-postgres/codec-types'; +import type { Char } from '@prisma-next/target-postgres/codec-types'; +import type { Varchar } from '@prisma-next/target-postgres/codec-types'; +import type { Numeric } from '@prisma-next/target-postgres/codec-types'; +import type { Bit } from '@prisma-next/target-postgres/codec-types'; +import type { VarBit } from '@prisma-next/target-postgres/codec-types'; +import type { Timestamp } from '@prisma-next/target-postgres/codec-types'; +import type { Timestamptz } from '@prisma-next/target-postgres/codec-types'; +import type { Time } from '@prisma-next/target-postgres/codec-types'; +import type { Timetz } from '@prisma-next/target-postgres/codec-types'; +import type { Interval } from '@prisma-next/target-postgres/codec-types'; +import type { QueryOperationTypes as PgAdapterQueryOps } from '@prisma-next/adapter-postgres/operation-types'; + +import type { + ContractWithTypeMaps, + TypeMaps as TypeMapsType, +} from '@prisma-next/sql-contract/types'; +import type { + Contract as ContractType, + ExecutionHashBase, + ProfileHashBase, + StorageHashBase, +} from '@prisma-next/contract/types'; + +export type StorageHash = + StorageHashBase<'sha256:efa685171bebbb8f078f08d12be3578bb5d96b71669dccc6cc9e4be96af8cdb4'>; +export type ExecutionHash = ExecutionHashBase; +export type ProfileHash = + ProfileHashBase<'sha256:1a8dbe044289f30a1de958fe800cc5a8378b285d2e126a8c44b58864bac2c18e'>; + +export type CodecTypes = PgTypes; +export type OperationTypes = Record; +export type LaneCodecTypes = CodecTypes; +export type QueryOperationTypes = PgAdapterQueryOps; +type DefaultLiteralValue = CodecId extends keyof CodecTypes + ? CodecTypes[CodecId]['output'] + : _Encoded; + +export type FieldOutputTypes = { + readonly EqlV2Configuration: { + readonly id: CodecTypes['pg/text@1']['output']; + readonly state: CodecTypes['pg/text@1']['output']; + readonly data: CodecTypes['pg/jsonb@1']['output']; + }; +}; +export type FieldInputTypes = { + readonly EqlV2Configuration: { + readonly id: CodecTypes['pg/text@1']['input']; + readonly state: CodecTypes['pg/text@1']['input']; + readonly data: CodecTypes['pg/jsonb@1']['input']; + }; +}; +export type TypeMaps = TypeMapsType< + CodecTypes, + OperationTypes, + QueryOperationTypes, + FieldOutputTypes, + FieldInputTypes +>; + +type ContractBase = ContractType< + { + readonly tables: { + readonly eql_v2_configuration: { + columns: { + readonly id: { + readonly nativeType: 'text'; + readonly codecId: 'pg/text@1'; + readonly nullable: false; + }; + readonly state: { + readonly nativeType: 'text'; + readonly codecId: 'pg/text@1'; + readonly nullable: false; + }; + readonly data: { + readonly nativeType: 'jsonb'; + readonly codecId: 'pg/jsonb@1'; + readonly nullable: false; + }; + }; + primaryKey: { readonly columns: readonly ['id'] }; + uniques: readonly []; + indexes: readonly []; + foreignKeys: readonly []; + }; + }; + readonly types: Record; + readonly storageHash: StorageHash; + }, + { + readonly EqlV2Configuration: { + readonly fields: { + readonly id: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/text@1' }; + }; + readonly state: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/text@1' }; + }; + readonly data: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/jsonb@1' }; + }; + }; + readonly relations: Record; + readonly storage: { + readonly table: 'eql_v2_configuration'; + readonly fields: { + readonly id: { readonly column: 'id' }; + readonly state: { readonly column: 'state' }; + readonly data: { readonly column: 'data' }; + }; + }; + }; + } +> & { + readonly target: 'postgres'; + readonly targetFamily: 'sql'; + readonly roots: { readonly eql_v2_configuration: 'EqlV2Configuration' }; + readonly capabilities: { + readonly postgres: { + readonly jsonAgg: true; + readonly lateral: true; + readonly limit: true; + readonly orderBy: true; + readonly returning: true; + }; + readonly sql: { + readonly defaultInInsert: true; + readonly enums: true; + readonly returning: true; + }; + }; + readonly extensionPacks: {}; + readonly meta: {}; + + readonly profileHash: ProfileHash; +}; + +export type Contract = ContractWithTypeMaps; + +export type Tables = Contract['storage']['tables']; +export type Models = Contract['models']; diff --git a/packages/prisma-next/migrations/20260601T0000_install_eql_bundle/end-contract.json b/packages/prisma-next/migrations/20260601T0000_install_eql_bundle/end-contract.json new file mode 100644 index 00000000..4b32ae17 --- /dev/null +++ b/packages/prisma-next/migrations/20260601T0000_install_eql_bundle/end-contract.json @@ -0,0 +1,102 @@ +{ + "schemaVersion": "1", + "targetFamily": "sql", + "target": "postgres", + "profileHash": "sha256:1a8dbe044289f30a1de958fe800cc5a8378b285d2e126a8c44b58864bac2c18e", + "roots": { + "eql_v2_configuration": "EqlV2Configuration" + }, + "models": { + "EqlV2Configuration": { + "fields": { + "data": { + "nullable": false, + "type": { + "codecId": "pg/jsonb@1", + "kind": "scalar" + } + }, + "id": { + "nullable": false, + "type": { + "codecId": "pg/text@1", + "kind": "scalar" + } + }, + "state": { + "nullable": false, + "type": { + "codecId": "pg/text@1", + "kind": "scalar" + } + } + }, + "relations": {}, + "storage": { + "fields": { + "data": { + "column": "data" + }, + "id": { + "column": "id" + }, + "state": { + "column": "state" + } + }, + "table": "eql_v2_configuration" + } + } + }, + "storage": { + "storageHash": "sha256:efa685171bebbb8f078f08d12be3578bb5d96b71669dccc6cc9e4be96af8cdb4", + "tables": { + "eql_v2_configuration": { + "columns": { + "data": { + "codecId": "pg/jsonb@1", + "nativeType": "jsonb", + "nullable": false + }, + "id": { + "codecId": "pg/text@1", + "nativeType": "text", + "nullable": false + }, + "state": { + "codecId": "pg/text@1", + "nativeType": "text", + "nullable": false + } + }, + "foreignKeys": [], + "indexes": [], + "primaryKey": { + "columns": ["id"] + }, + "uniques": [] + } + } + }, + "capabilities": { + "postgres": { + "jsonAgg": true, + "lateral": true, + "limit": true, + "orderBy": true, + "returning": true + }, + "sql": { + "defaultInInsert": true, + "enums": true, + "returning": true + } + }, + "extensionPacks": {}, + "meta": {}, + "_generated": { + "warning": "⚠️ GENERATED FILE - DO NOT EDIT", + "message": "This file is automatically generated by \"prisma-next contract emit\".", + "regenerate": "To regenerate, run: prisma-next contract emit" + } +} diff --git a/packages/prisma-next/migrations/20260601T0000_install_eql_bundle/migration.json b/packages/prisma-next/migrations/20260601T0000_install_eql_bundle/migration.json new file mode 100644 index 00000000..cf0c8195 --- /dev/null +++ b/packages/prisma-next/migrations/20260601T0000_install_eql_bundle/migration.json @@ -0,0 +1,120 @@ +{ + "from": null, + "to": "sha256:efa685171bebbb8f078f08d12be3578bb5d96b71669dccc6cc9e4be96af8cdb4", + "labels": [], + "providedInvariants": [ + "cipherstash:install-eql-bundle-v1" + ], + "createdAt": "2026-05-09T03:42:56.902Z", + "fromContract": null, + "toContract": { + "schemaVersion": "1", + "targetFamily": "sql", + "target": "postgres", + "profileHash": "sha256:1a8dbe044289f30a1de958fe800cc5a8378b285d2e126a8c44b58864bac2c18e", + "roots": { + "eql_v2_configuration": "EqlV2Configuration" + }, + "models": { + "EqlV2Configuration": { + "fields": { + "data": { + "nullable": false, + "type": { + "codecId": "pg/jsonb@1", + "kind": "scalar" + } + }, + "id": { + "nullable": false, + "type": { + "codecId": "pg/text@1", + "kind": "scalar" + } + }, + "state": { + "nullable": false, + "type": { + "codecId": "pg/text@1", + "kind": "scalar" + } + } + }, + "relations": {}, + "storage": { + "fields": { + "data": { + "column": "data" + }, + "id": { + "column": "id" + }, + "state": { + "column": "state" + } + }, + "table": "eql_v2_configuration" + } + } + }, + "storage": { + "storageHash": "sha256:efa685171bebbb8f078f08d12be3578bb5d96b71669dccc6cc9e4be96af8cdb4", + "tables": { + "eql_v2_configuration": { + "columns": { + "data": { + "codecId": "pg/jsonb@1", + "nativeType": "jsonb", + "nullable": false + }, + "id": { + "codecId": "pg/text@1", + "nativeType": "text", + "nullable": false + }, + "state": { + "codecId": "pg/text@1", + "nativeType": "text", + "nullable": false + } + }, + "foreignKeys": [], + "indexes": [], + "primaryKey": { + "columns": [ + "id" + ] + }, + "uniques": [] + } + } + }, + "capabilities": { + "postgres": { + "jsonAgg": true, + "lateral": true, + "limit": true, + "orderBy": true, + "returning": true + }, + "sql": { + "defaultInInsert": true, + "enums": true, + "returning": true + } + }, + "extensionPacks": {}, + "meta": {}, + "_generated": { + "warning": "⚠️ GENERATED FILE - DO NOT EDIT", + "message": "This file is automatically generated by \"prisma-next contract emit\".", + "regenerate": "To regenerate, run: prisma-next contract emit" + } + }, + "hints": { + "used": [], + "applied": [], + "plannerVersion": "2.0.0" + }, + "migrationHash": "sha256:9b44ccc4d0753b364e546297857dcd8dd1ea0c16d2d09579ddb3c8d0e5fc3115" +} \ No newline at end of file diff --git a/packages/prisma-next/migrations/20260601T0000_install_eql_bundle/migration.ts b/packages/prisma-next/migrations/20260601T0000_install_eql_bundle/migration.ts new file mode 100755 index 00000000..18b22829 --- /dev/null +++ b/packages/prisma-next/migrations/20260601T0000_install_eql_bundle/migration.ts @@ -0,0 +1,71 @@ +#!/usr/bin/env -S node +/** + * CipherStash baseline migration — install the vendored EQL bundle. + * + * The contract IR (see `/contract.json`) declares the + * `eql_v2_configuration` table only — that's the single typed object + * today's `SqlStorage` IR can model. The actual database state — the + * `eql_v2` schema, the `eql_v2_configuration_state` enum, the + * `eql_v2_encrypted` composite, the `eql_v2.bloom_filter` / + * `hmac_256` / `blake3` domains, plus the ORE composites — is created + * by the vendored EQL bundle SQL (see `../../src/migration/eql-bundle.ts`, + * which re-exports the bundle from `eql-install.generated.ts` + * byte-for-byte). The bundle also creates the `eql_v2_configuration` + * table itself, so the planner-emitted + * `createTable` op would conflict with the bundle's `CREATE TABLE` + * and is intentionally dropped from this migration's `operations` + * getter. + * + * Authoring loop: this file is hand-edited (see + * `docs/architecture docs/adrs/ADR 212 - Contract spaces.md`'s + * contract-space package layout section). Re-emit `ops.json` / + * `migration.json` after edits via `node migration.ts`. + */ +import { Migration, MigrationCLI, rawSql } from '@prisma-next/target-postgres/migration'; +import { CIPHERSTASH_INVARIANTS } from '../../src/extension-metadata/constants'; +import { EQL_BUNDLE_SQL } from '../../src/migration/eql-bundle'; + +const INSTALL_LABEL = 'Install EQL bundle (functions, operators, casts, op classes, schema, types)'; + +export default class M extends Migration { + override describe() { + return { + from: null, + to: 'sha256:efa685171bebbb8f078f08d12be3578bb5d96b71669dccc6cc9e4be96af8cdb4', + }; + } + + override get operations() { + return [ + rawSql({ + id: 'cipherstash.install-eql-bundle', + label: INSTALL_LABEL, + operationClass: 'additive', + invariantId: CIPHERSTASH_INVARIANTS.installBundle, + target: { id: 'postgres' }, + precheck: [], + execute: [{ description: INSTALL_LABEL, sql: EQL_BUNDLE_SQL }], + postcheck: [ + { + description: 'verify "eql_v2" schema exists', + sql: "SELECT EXISTS (SELECT 1 FROM pg_namespace WHERE nspname = 'eql_v2')", + }, + { + // The composite type is created in the `public` schema + // (not `eql_v2`) — by design. Customer data columns + // declared as `eql_v2_encrypted` are pinned to the type's + // OID and must survive a `DROP SCHEMA eql_v2 CASCADE` + // re-install of the bundle without losing the columns. + // Placing the composite outside the `eql_v2` namespace + // decouples the type's lifecycle from the bundle's + // functions / operators / casts. + description: 'verify "public.eql_v2_encrypted" composite type exists', + sql: "SELECT EXISTS (SELECT 1 FROM pg_type t JOIN pg_namespace n ON n.oid = t.typnamespace WHERE n.nspname = 'public' AND t.typname = 'eql_v2_encrypted')", + }, + ], + }), + ]; + } +} + +MigrationCLI.run(import.meta.url, M); diff --git a/packages/prisma-next/migrations/20260601T0000_install_eql_bundle/ops.json b/packages/prisma-next/migrations/20260601T0000_install_eql_bundle/ops.json new file mode 100644 index 00000000..9af1a5d5 --- /dev/null +++ b/packages/prisma-next/migrations/20260601T0000_install_eql_bundle/ops.json @@ -0,0 +1,28 @@ +[ + { + "id": "cipherstash.install-eql-bundle", + "label": "Install EQL bundle (functions, operators, casts, op classes, schema, types)", + "operationClass": "additive", + "invariantId": "cipherstash:install-eql-bundle-v1", + "target": { + "id": "postgres" + }, + "precheck": [], + "execute": [ + { + "description": "Install EQL bundle (functions, operators, casts, op classes, schema, types)", + "sql": "--! @file schema.sql\n--! @brief EQL v2 schema creation\n--!\n--! Creates the eql_v2 schema which contains all Encrypt Query Language\n--! functions, types, and tables. Drops existing schema if present to\n--! support clean reinstallation.\n--!\n--! @warning DROP SCHEMA CASCADE will remove all objects in the schema\n--! @note All EQL objects (functions, types, tables) reside in eql_v2 schema\n\n--! @brief Drop existing EQL v2 schema\n--! @warning CASCADE will drop all dependent objects\nDROP SCHEMA IF EXISTS eql_v2 CASCADE;\n\n--! @brief Create EQL v2 schema\n--! @note All EQL functions and types will be created in this schema\nCREATE SCHEMA eql_v2;\n\n--! @brief Composite type for encrypted column data\n--!\n--! Core type used for all encrypted columns in EQL. Stores encrypted data as JSONB\n--! with the following structure:\n--! - `c`: ciphertext (base64-encoded encrypted value)\n--! - `i`: index terms (searchable metadata for encrypted searches)\n--! - `k`: key ID (identifier for encryption key)\n--! - `m`: metadata (additional encryption metadata)\n--!\n--! Created in public schema to persist independently of eql_v2 schema lifecycle.\n--! Customer data columns use this type, so it must not be dropped if data exists.\n--!\n--! @note DO NOT DROP this type unless absolutely certain no encrypted data uses it\n--! @see eql_v2.ciphertext\n--! @see eql_v2.meta_data\n--! @see eql_v2.add_column\nDO $$\n BEGIN\n IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'eql_v2_encrypted') THEN\n CREATE TYPE public.eql_v2_encrypted AS (\n data jsonb\n );\n END IF;\n END\n$$;\n\n\n\n\n\n\n\n\n\n\n--! @brief Bloom filter index term type\n--!\n--! Domain type representing Bloom filter bit arrays stored as smallint arrays.\n--! Used for pattern-match encrypted searches via the 'match' index type.\n--! The filter is stored in the 'bf' field of encrypted data payloads.\n--!\n--! @see eql_v2.add_search_config\n--! @see eql_v2.\"~~\"\n--! @note This is a transient type used only during query execution\nCREATE DOMAIN eql_v2.bloom_filter AS smallint[];\n\n\n\n--! @brief ORE block term type for Order-Revealing Encryption\n--!\n--! Composite type representing a single ORE (Order-Revealing Encryption) block term.\n--! Stores encrypted data as bytea that enables range comparisons without decryption.\n--!\n--! @see eql_v2.ore_block_u64_8_256\n--! @see eql_v2.compare_ore_block_u64_8_256_term\nCREATE TYPE eql_v2.ore_block_u64_8_256_term AS (\n bytes bytea\n);\n\n\n--! @brief ORE block index term type for range queries\n--!\n--! Composite type containing an array of ORE block terms. Used for encrypted\n--! range queries via the 'ore' index type. The array is stored in the 'ob' field\n--! of encrypted data payloads.\n--!\n--! @see eql_v2.add_search_config\n--! @see eql_v2.compare_ore_block_u64_8_256_terms\n--! @note This is a transient type used only during query execution\nCREATE TYPE eql_v2.ore_block_u64_8_256 AS (\n terms eql_v2.ore_block_u64_8_256_term[]\n);\n\n--! @brief HMAC-SHA256 index term type\n--!\n--! Domain type representing HMAC-SHA256 hash values.\n--! Used for exact-match encrypted searches via the 'unique' index type.\n--! The hash is stored in the 'hm' field of encrypted data payloads.\n--!\n--! @see eql_v2.add_search_config\n--! @note This is a transient type used only during query execution\nCREATE DOMAIN eql_v2.hmac_256 AS text;\n-- AUTOMATICALLY GENERATED FILE\n\n--! @file common.sql\n--! @brief Common utility functions\n--!\n--! Provides general-purpose utility functions used across EQL:\n--! - Constant-time bytea comparison for security\n--! - JSONB to bytea array conversion\n--! - Logging helpers for debugging and testing\n\n\n--! @brief Constant-time comparison of bytea values\n--! @internal\n--!\n--! Compares two bytea values in constant time to prevent timing attacks.\n--! Always checks all bytes even after finding differences, maintaining\n--! consistent execution time regardless of where differences occur.\n--!\n--! @param a bytea First value to compare\n--! @param b bytea Second value to compare\n--! @return boolean True if values are equal\n--!\n--! @note Returns false immediately if lengths differ (length is not secret)\n--! @note Used for secure comparison of cryptographic values\nCREATE FUNCTION eql_v2.bytea_eq(a bytea, b bytea) RETURNS boolean AS $$\nDECLARE\n result boolean;\n differing bytea;\nBEGIN\n\n -- Check if the bytea values are the same length\n IF LENGTH(a) != LENGTH(b) THEN\n RETURN false;\n END IF;\n\n -- Compare each byte in the bytea values\n result := true;\n FOR i IN 1..LENGTH(a) LOOP\n IF SUBSTRING(a FROM i FOR 1) != SUBSTRING(b FROM i FOR 1) THEN\n result := result AND false;\n END IF;\n END LOOP;\n\n RETURN result;\nEND;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Convert JSONB hex array to bytea array\n--! @internal\n--!\n--! Converts a JSONB array of hex-encoded strings into a PostgreSQL bytea array.\n--! Used for deserializing binary data (like ORE terms) from JSONB storage.\n--!\n--! @param jsonb JSONB array of hex-encoded strings\n--! @return bytea[] Array of decoded binary values\n--!\n--! @note Returns NULL if input is JSON null\n--! @note Each array element is hex-decoded to bytea\nCREATE FUNCTION eql_v2.jsonb_array_to_bytea_array(val jsonb)\nRETURNS bytea[] AS $$\nDECLARE\n terms_arr bytea[];\nBEGIN\n IF jsonb_typeof(val) = 'null' THEN\n RETURN NULL;\n END IF;\n\n SELECT array_agg(decode(value::text, 'hex')::bytea)\n INTO terms_arr\n FROM jsonb_array_elements_text(val) AS value;\n\n RETURN terms_arr;\nEND;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Log message for debugging\n--!\n--! Convenience function to emit log messages during testing and debugging.\n--! Uses RAISE NOTICE to output messages to PostgreSQL logs.\n--!\n--! @param text Message to log\n--!\n--! @note Primarily used in tests and development\n--! @see eql_v2.log(text, text) for contextual logging\nCREATE FUNCTION eql_v2.log(s text)\n RETURNS void\nAS $$\n BEGIN\n RAISE NOTICE '[LOG] %', s;\nEND;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Log message with context\n--!\n--! Overload of log function that includes context label for better\n--! log organization during testing.\n--!\n--! @param ctx text Context label (e.g., test name, module name)\n--! @param s text Message to log\n--!\n--! @note Format: \"[LOG] {ctx} {message}\"\n--! @see eql_v2.log(text)\nCREATE FUNCTION eql_v2.log(ctx text, s text)\n RETURNS void\nAS $$\n BEGIN\n RAISE NOTICE '[LOG] % %', ctx, s;\nEND;\n$$ LANGUAGE plpgsql;\n\n--! @brief CLLW ORE index term type for range queries\n--!\n--! Composite type for CLLW (Copyless Logarithmic Width) Order-Revealing Encryption.\n--! Each output block is 8-bits. Used for encrypted range queries via the 'ore' index type.\n--! The ciphertext is stored in the 'ocf' field of encrypted data payloads.\n--!\n--! @see eql_v2.add_search_config\n--! @see eql_v2.compare_ore_cllw_u64_8\n--! @note This is a transient type used only during query execution\nCREATE TYPE eql_v2.ore_cllw_u64_8 AS (\n bytes bytea\n);\n\n--! @file crypto.sql\n--! @brief PostgreSQL pgcrypto extension enablement\n--!\n--! Enables the pgcrypto extension which provides cryptographic functions\n--! used by EQL for hashing and other cryptographic operations.\n--!\n--! @note pgcrypto provides functions like digest(), hmac(), gen_random_bytes()\n--! @note IF NOT EXISTS prevents errors if extension already enabled\n\n--! @brief Enable pgcrypto extension\n--! @note Provides cryptographic functions for hashing and random number generation\nCREATE EXTENSION IF NOT EXISTS pgcrypto;\n\n\n--! @brief Extract ciphertext from encrypted JSONB value\n--!\n--! Extracts the ciphertext (c field) from a raw JSONB encrypted value.\n--! The ciphertext is the base64-encoded encrypted data.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return Text Base64-encoded ciphertext string\n--! @throws Exception if 'c' field is not present in JSONB\n--!\n--! @example\n--! -- Extract ciphertext from JSONB literal\n--! SELECT eql_v2.ciphertext('{\"c\":\"AQIDBA==\",\"i\":{\"unique\":\"...\"}}'::jsonb);\n--!\n--! @see eql_v2.ciphertext(eql_v2_encrypted)\n--! @see eql_v2.meta_data\nCREATE FUNCTION eql_v2.ciphertext(val jsonb)\n RETURNS text\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n IF val ? 'c' THEN\n RETURN val->>'c';\n END IF;\n RAISE 'Expected a ciphertext (c) value in json: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Extract ciphertext from encrypted column value\n--!\n--! Extracts the ciphertext from an encrypted column value. Convenience\n--! overload that unwraps eql_v2_encrypted type and delegates to JSONB version.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return Text Base64-encoded ciphertext string\n--! @throws Exception if encrypted value is malformed\n--!\n--! @example\n--! -- Extract ciphertext from encrypted column\n--! SELECT eql_v2.ciphertext(encrypted_email) FROM users;\n--!\n--! @see eql_v2.ciphertext(jsonb)\n--! @see eql_v2.meta_data\nCREATE FUNCTION eql_v2.ciphertext(val eql_v2_encrypted)\n RETURNS text\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.ciphertext(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief State transition function for grouped_value aggregate\n--! @internal\n--!\n--! Returns the first non-null value encountered. Used as state function\n--! for the grouped_value aggregate to select first value in each group.\n--!\n--! @param $1 JSONB Accumulated state (first non-null value found)\n--! @param $2 JSONB New value from current row\n--! @return JSONB First non-null value (state or new value)\n--!\n--! @see eql_v2.grouped_value\nCREATE FUNCTION eql_v2._first_grouped_value(jsonb, jsonb)\nRETURNS jsonb AS $$\n SELECT COALESCE($1, $2);\n$$ LANGUAGE sql IMMUTABLE;\n\n--! @brief Return first non-null encrypted value in a group\n--!\n--! Aggregate function that returns the first non-null encrypted value\n--! encountered within a GROUP BY clause. Useful for deduplication or\n--! selecting representative values from grouped encrypted data.\n--!\n--! @param input JSONB Encrypted values to aggregate\n--! @return JSONB First non-null encrypted value in group\n--!\n--! @example\n--! -- Get first email per user group\n--! SELECT user_id, eql_v2.grouped_value(encrypted_email)\n--! FROM user_emails\n--! GROUP BY user_id;\n--!\n--! -- Deduplicate encrypted values\n--! SELECT DISTINCT ON (user_id)\n--! user_id,\n--! eql_v2.grouped_value(encrypted_ssn) as primary_ssn\n--! FROM user_records\n--! GROUP BY user_id;\n--!\n--! @see eql_v2._first_grouped_value\nCREATE AGGREGATE eql_v2.grouped_value(jsonb) (\n SFUNC = eql_v2._first_grouped_value,\n STYPE = jsonb\n);\n\n--! @brief Add validation constraint to encrypted column\n--!\n--! Adds a CHECK constraint to ensure column values conform to encrypted data\n--! structure. Constraint uses eql_v2.check_encrypted to validate format.\n--! Called automatically by eql_v2.add_column.\n--!\n--! @param table_name TEXT Name of table containing the column\n--! @param column_name TEXT Name of column to constrain\n--! @return Void\n--!\n--! @example\n--! -- Manually add constraint (normally done by add_column)\n--! SELECT eql_v2.add_encrypted_constraint('users', 'encrypted_email');\n--!\n--! -- Resulting constraint:\n--! -- ALTER TABLE users ADD CONSTRAINT eql_v2_encrypted_check_encrypted_email\n--! -- CHECK (eql_v2.check_encrypted(encrypted_email));\n--!\n--! @see eql_v2.add_column\n--! @see eql_v2.remove_encrypted_constraint\nCREATE FUNCTION eql_v2.add_encrypted_constraint(table_name TEXT, column_name TEXT)\n RETURNS void\nAS $$\n\tBEGIN\n EXECUTE format('ALTER TABLE %I ADD CONSTRAINT eql_v2_encrypted_constraint_%I_%I CHECK (eql_v2.check_encrypted(%I))', table_name, table_name, column_name, column_name);\n EXCEPTION\n WHEN duplicate_table THEN\n WHEN duplicate_object THEN\n RAISE NOTICE 'Constraint `eql_v2_encrypted_constraint_%_%` already exists, skipping', table_name, column_name;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Remove validation constraint from encrypted column\n--!\n--! Removes the CHECK constraint that validates encrypted data structure.\n--! Called automatically by eql_v2.remove_column. Uses IF EXISTS to avoid\n--! errors if constraint doesn't exist.\n--!\n--! @param table_name TEXT Name of table containing the column\n--! @param column_name TEXT Name of column to unconstrain\n--! @return Void\n--!\n--! @example\n--! -- Manually remove constraint (normally done by remove_column)\n--! SELECT eql_v2.remove_encrypted_constraint('users', 'encrypted_email');\n--!\n--! @see eql_v2.remove_column\n--! @see eql_v2.add_encrypted_constraint\nCREATE FUNCTION eql_v2.remove_encrypted_constraint(table_name TEXT, column_name TEXT)\n RETURNS void\nAS $$\n\tBEGIN\n\t\tEXECUTE format('ALTER TABLE %I DROP CONSTRAINT IF EXISTS eql_v2_encrypted_constraint_%I_%I', table_name, table_name, column_name);\n\tEND;\n$$ LANGUAGE plpgsql;\n\n--! @brief Extract metadata from encrypted JSONB value\n--!\n--! Extracts index terms (i) and version (v) from a raw JSONB encrypted value.\n--! Returns metadata object containing searchable index terms without ciphertext.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return JSONB Metadata object with 'i' (index terms) and 'v' (version) fields\n--!\n--! @example\n--! -- Extract metadata to inspect index terms\n--! SELECT eql_v2.meta_data('{\"c\":\"...\",\"i\":{\"unique\":\"abc123\"},\"v\":1}'::jsonb);\n--! -- Returns: {\"i\":{\"unique\":\"abc123\"},\"v\":1}\n--!\n--! @see eql_v2.meta_data(eql_v2_encrypted)\n--! @see eql_v2.ciphertext\nCREATE FUNCTION eql_v2.meta_data(val jsonb)\n RETURNS jsonb\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN jsonb_build_object(\n 'i', val->'i',\n 'v', val->'v'\n );\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Extract metadata from encrypted column value\n--!\n--! Extracts index terms and version from an encrypted column value.\n--! Convenience overload that unwraps eql_v2_encrypted type and\n--! delegates to JSONB version.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return JSONB Metadata object with 'i' (index terms) and 'v' (version) fields\n--!\n--! @example\n--! -- Inspect index terms for encrypted column\n--! SELECT user_id, eql_v2.meta_data(encrypted_email) as email_metadata\n--! FROM users;\n--!\n--! @see eql_v2.meta_data(jsonb)\n--! @see eql_v2.ciphertext\nCREATE FUNCTION eql_v2.meta_data(val eql_v2_encrypted)\n RETURNS jsonb\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN eql_v2.meta_data(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Variable-width CLLW ORE index term type for range queries\n--!\n--! Composite type for variable-width CLLW (Copyless Logarithmic Width) Order-Revealing Encryption.\n--! Each output block is 8-bits. Unlike ore_cllw_u64_8, supports variable-length ciphertexts.\n--! Used for encrypted range queries via the 'ore' index type.\n--! The ciphertext is stored in the 'ocv' field of encrypted data payloads.\n--!\n--! @see eql_v2.add_search_config\n--! @see eql_v2.compare_ore_cllw_var_8\n--! @note This is a transient type used only during query execution\nCREATE TYPE eql_v2.ore_cllw_var_8 AS (\n bytes bytea\n);\n\n\n--! @brief Extract CLLW ORE index term from JSONB payload\n--!\n--! Extracts the CLLW ORE ciphertext from the 'ocf' field of an encrypted\n--! data payload. Used internally for range query comparisons.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return eql_v2.ore_cllw_u64_8 CLLW ORE ciphertext\n--! @throws Exception if 'ocf' field is missing when ore index is expected\n--!\n--! @see eql_v2.has_ore_cllw_u64_8\n--! @see eql_v2.compare_ore_cllw_u64_8\nCREATE FUNCTION eql_v2.ore_cllw_u64_8(val jsonb)\n RETURNS eql_v2.ore_cllw_u64_8\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n IF val IS NULL THEN\n RETURN NULL;\n END IF;\n\n IF NOT (eql_v2.has_ore_cllw_u64_8(val)) THEN\n RAISE 'Expected a ore_cllw_u64_8 index (ocf) value in json: %', val;\n END IF;\n\n RETURN ROW(decode(val->>'ocf', 'hex'));\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract CLLW ORE index term from encrypted column value\n--!\n--! Extracts the CLLW ORE ciphertext from an encrypted column value by accessing\n--! its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return eql_v2.ore_cllw_u64_8 CLLW ORE ciphertext\n--!\n--! @see eql_v2.ore_cllw_u64_8(jsonb)\nCREATE FUNCTION eql_v2.ore_cllw_u64_8(val eql_v2_encrypted)\n RETURNS eql_v2.ore_cllw_u64_8\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (SELECT eql_v2.ore_cllw_u64_8(val.data));\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if JSONB payload contains CLLW ORE index term\n--!\n--! Tests whether the encrypted data payload includes an 'ocf' field,\n--! indicating a CLLW ORE ciphertext is available for range queries.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return Boolean True if 'ocf' field is present and non-null\n--!\n--! @see eql_v2.ore_cllw_u64_8\nCREATE FUNCTION eql_v2.has_ore_cllw_u64_8(val jsonb)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN val ->> 'ocf' IS NOT NULL;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if encrypted column value contains CLLW ORE index term\n--!\n--! Tests whether an encrypted column value includes a CLLW ORE ciphertext\n--! by checking its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return Boolean True if CLLW ORE ciphertext is present\n--!\n--! @see eql_v2.has_ore_cllw_u64_8(jsonb)\nCREATE FUNCTION eql_v2.has_ore_cllw_u64_8(val eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.has_ore_cllw_u64_8(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n--! @brief Compare CLLW ORE ciphertext bytes\n--! @internal\n--!\n--! Byte-by-byte comparison of CLLW ORE ciphertexts implementing the CLLW\n--! comparison algorithm. Used by both fixed-width (ore_cllw_u64_8) and\n--! variable-width (ore_cllw_var_8) ORE variants.\n--!\n--! @param a Bytea First CLLW ORE ciphertext\n--! @param b Bytea Second CLLW ORE ciphertext\n--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b\n--! @throws Exception if ciphertexts are different lengths\n--!\n--! @note Shared comparison logic for multiple ORE CLLW schemes\n--! @see eql_v2.compare_ore_cllw_u64_8\nCREATE FUNCTION eql_v2.compare_ore_cllw_term_bytes(a bytea, b bytea)\nRETURNS int AS $$\nDECLARE\n len_a INT;\n len_b INT;\n x BYTEA;\n y BYTEA;\n i INT;\n differing boolean;\nBEGIN\n\n -- Check if the lengths of the two bytea arguments are the same\n len_a := LENGTH(a);\n len_b := LENGTH(b);\n\n IF len_a != len_b THEN\n RAISE EXCEPTION 'ore_cllw index terms are not the same length';\n END IF;\n\n -- Iterate over each byte and compare them\n FOR i IN 1..len_a LOOP\n x := SUBSTRING(a FROM i FOR 1);\n y := SUBSTRING(b FROM i FOR 1);\n\n -- Check if there's a difference\n IF x != y THEN\n differing := true;\n EXIT;\n END IF;\n END LOOP;\n\n -- If a difference is found, compare the bytes as in Rust logic\n IF differing THEN\n IF (get_byte(y, 0) + 1) % 256 = get_byte(x, 0) THEN\n RETURN 1;\n ELSE\n RETURN -1;\n END IF;\n ELSE\n RETURN 0;\n END IF;\nEND;\n$$ LANGUAGE plpgsql;\n\n\n\n--! @brief Blake3 hash index term type\n--!\n--! Domain type representing Blake3 cryptographic hash values.\n--! Used for exact-match encrypted searches via the 'unique' index type.\n--! The hash is stored in the 'b3' field of encrypted data payloads.\n--!\n--! @see eql_v2.add_search_config\n--! @note This is a transient type used only during query execution\nCREATE DOMAIN eql_v2.blake3 AS text;\n\n--! @brief Extract Blake3 hash index term from JSONB payload\n--!\n--! Extracts the Blake3 hash value from the 'b3' field of an encrypted\n--! data payload. Used internally for exact-match comparisons.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return eql_v2.blake3 Blake3 hash value, or NULL if not present\n--! @throws Exception if 'b3' field is missing when blake3 index is expected\n--!\n--! @see eql_v2.has_blake3\n--! @see eql_v2.compare_blake3\nCREATE FUNCTION eql_v2.blake3(val jsonb)\n RETURNS eql_v2.blake3\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n IF val IS NULL THEN\n RETURN NULL;\n END IF;\n\n IF NOT eql_v2.has_blake3(val) THEN\n RAISE 'Expected a blake3 index (b3) value in json: %', val;\n END IF;\n\n IF val->>'b3' IS NULL THEN\n RETURN NULL;\n END IF;\n\n RETURN val->>'b3';\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract Blake3 hash index term from encrypted column value\n--!\n--! Extracts the Blake3 hash from an encrypted column value by accessing\n--! its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return eql_v2.blake3 Blake3 hash value, or NULL if not present\n--!\n--! @see eql_v2.blake3(jsonb)\nCREATE FUNCTION eql_v2.blake3(val eql_v2_encrypted)\n RETURNS eql_v2.blake3\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (SELECT eql_v2.blake3(val.data));\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if JSONB payload contains Blake3 index term\n--!\n--! Tests whether the encrypted data payload includes a 'b3' field,\n--! indicating a Blake3 hash is available for exact-match queries.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return Boolean True if 'b3' field is present and non-null\n--!\n--! @see eql_v2.blake3\nCREATE FUNCTION eql_v2.has_blake3(val jsonb)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN val ->> 'b3' IS NOT NULL;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if encrypted column value contains Blake3 index term\n--!\n--! Tests whether an encrypted column value includes a Blake3 hash\n--! by checking its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return Boolean True if Blake3 hash is present\n--!\n--! @see eql_v2.has_blake3(jsonb)\nCREATE FUNCTION eql_v2.has_blake3(val eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.has_blake3(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract HMAC-SHA256 index term from JSONB payload\n--!\n--! Extracts the HMAC-SHA256 hash value from the 'hm' field of an encrypted\n--! data payload. Used internally for exact-match comparisons.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return eql_v2.hmac_256 HMAC-SHA256 hash value\n--! @throws Exception if 'hm' field is missing when hmac_256 index is expected\n--!\n--! @see eql_v2.has_hmac_256\n--! @see eql_v2.compare_hmac_256\nCREATE FUNCTION eql_v2.hmac_256(val jsonb)\n RETURNS eql_v2.hmac_256\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n IF val IS NULL THEN\n RETURN NULL;\n END IF;\n\n IF eql_v2.has_hmac_256(val) THEN\n RETURN val->>'hm';\n END IF;\n RAISE 'Expected a hmac_256 index (hm) value in json: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if JSONB payload contains HMAC-SHA256 index term\n--!\n--! Tests whether the encrypted data payload includes an 'hm' field,\n--! indicating an HMAC-SHA256 hash is available for exact-match queries.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return Boolean True if 'hm' field is present and non-null\n--!\n--! @see eql_v2.hmac_256\nCREATE FUNCTION eql_v2.has_hmac_256(val jsonb)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN val ->> 'hm' IS NOT NULL;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if encrypted column value contains HMAC-SHA256 index term\n--!\n--! Tests whether an encrypted column value includes an HMAC-SHA256 hash\n--! by checking its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return Boolean True if HMAC-SHA256 hash is present\n--!\n--! @see eql_v2.has_hmac_256(jsonb)\nCREATE FUNCTION eql_v2.has_hmac_256(val eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.has_hmac_256(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n--! @brief Extract HMAC-SHA256 index term from encrypted column value\n--!\n--! Extracts the HMAC-SHA256 hash from an encrypted column value by accessing\n--! its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return eql_v2.hmac_256 HMAC-SHA256 hash value\n--!\n--! @see eql_v2.hmac_256(jsonb)\nCREATE FUNCTION eql_v2.hmac_256(val eql_v2_encrypted)\n RETURNS eql_v2.hmac_256\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (SELECT eql_v2.hmac_256(val.data));\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n\n--! @brief Convert JSONB array to ORE block composite type\n--! @internal\n--!\n--! Converts a JSONB array of hex-encoded ORE terms from the CipherStash Proxy\n--! payload into the PostgreSQL composite type used for ORE operations.\n--!\n--! @param val JSONB Array of hex-encoded ORE block terms\n--! @return eql_v2.ore_block_u64_8_256 ORE block composite type, or NULL if input is null\n--!\n--! @see eql_v2.ore_block_u64_8_256(jsonb)\nCREATE FUNCTION eql_v2.jsonb_array_to_ore_block_u64_8_256(val jsonb)\nRETURNS eql_v2.ore_block_u64_8_256 AS $$\nDECLARE\n terms eql_v2.ore_block_u64_8_256_term[];\nBEGIN\n IF jsonb_typeof(val) = 'null' THEN\n RETURN NULL;\n END IF;\n\n SELECT array_agg(ROW(b)::eql_v2.ore_block_u64_8_256_term)\n INTO terms\n FROM unnest(eql_v2.jsonb_array_to_bytea_array(val)) AS b;\n\n RETURN ROW(terms)::eql_v2.ore_block_u64_8_256;\nEND;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract ORE block index term from JSONB payload\n--!\n--! Extracts the ORE block array from the 'ob' field of an encrypted\n--! data payload. Used internally for range query comparisons.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return eql_v2.ore_block_u64_8_256 ORE block index term\n--! @throws Exception if 'ob' field is missing when ore index is expected\n--!\n--! @see eql_v2.has_ore_block_u64_8_256\n--! @see eql_v2.compare_ore_block_u64_8_256\nCREATE FUNCTION eql_v2.ore_block_u64_8_256(val jsonb)\n RETURNS eql_v2.ore_block_u64_8_256\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n IF val IS NULL THEN\n RETURN NULL;\n END IF;\n\n IF eql_v2.has_ore_block_u64_8_256(val) THEN\n RETURN eql_v2.jsonb_array_to_ore_block_u64_8_256(val->'ob');\n END IF;\n RAISE 'Expected an ore index (ob) value in json: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract ORE block index term from encrypted column value\n--!\n--! Extracts the ORE block from an encrypted column value by accessing\n--! its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return eql_v2.ore_block_u64_8_256 ORE block index term\n--!\n--! @see eql_v2.ore_block_u64_8_256(jsonb)\nCREATE FUNCTION eql_v2.ore_block_u64_8_256(val eql_v2_encrypted)\n RETURNS eql_v2.ore_block_u64_8_256\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.ore_block_u64_8_256(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if JSONB payload contains ORE block index term\n--!\n--! Tests whether the encrypted data payload includes an 'ob' field,\n--! indicating an ORE block is available for range queries.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return Boolean True if 'ob' field is present and non-null\n--!\n--! @see eql_v2.ore_block_u64_8_256\nCREATE FUNCTION eql_v2.has_ore_block_u64_8_256(val jsonb)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN val ->> 'ob' IS NOT NULL;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if encrypted column value contains ORE block index term\n--!\n--! Tests whether an encrypted column value includes an ORE block\n--! by checking its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return Boolean True if ORE block is present\n--!\n--! @see eql_v2.has_ore_block_u64_8_256(jsonb)\nCREATE FUNCTION eql_v2.has_ore_block_u64_8_256(val eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.has_ore_block_u64_8_256(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n--! @brief Compare two ORE block terms using cryptographic comparison\n--! @internal\n--!\n--! Performs a three-way comparison (returns -1/0/1) of individual ORE block terms\n--! using the ORE cryptographic protocol. Compares PRP and PRF blocks to determine\n--! ordering without decryption.\n--!\n--! @param a eql_v2.ore_block_u64_8_256_term First ORE term to compare\n--! @param b eql_v2.ore_block_u64_8_256_term Second ORE term to compare\n--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b\n--! @throws Exception if ciphertexts are different lengths\n--!\n--! @note Uses AES-ECB encryption for bit comparisons per ORE protocol\n--! @see eql_v2.compare_ore_block_u64_8_256_terms\nCREATE FUNCTION eql_v2.compare_ore_block_u64_8_256_term(a eql_v2.ore_block_u64_8_256_term, b eql_v2.ore_block_u64_8_256_term)\n RETURNS integer\nAS $$\n DECLARE\n eq boolean := true;\n unequal_block smallint := 0;\n hash_key bytea;\n data_block bytea;\n encrypt_block bytea;\n target_block bytea;\n\n left_block_size CONSTANT smallint := 16;\n right_block_size CONSTANT smallint := 32;\n right_offset CONSTANT smallint := 136; -- 8 * 17\n\n indicator smallint := 0;\n BEGIN\n IF a IS NULL AND b IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b IS NULL THEN\n RETURN 1;\n END IF;\n\n IF bit_length(a.bytes) != bit_length(b.bytes) THEN\n RAISE EXCEPTION 'Ciphertexts are different lengths';\n END IF;\n\n FOR block IN 0..7 LOOP\n -- Compare each PRP (byte from the first 8 bytes) and PRF block (8 byte\n -- chunks of the rest of the value).\n -- NOTE:\n -- * Substr is ordinally indexed (hence 1 and not 0, and 9 and not 8).\n -- * We are not worrying about timing attacks here; don't fret about\n -- the OR or !=.\n IF\n substr(a.bytes, 1 + block, 1) != substr(b.bytes, 1 + block, 1)\n OR substr(a.bytes, 9 + left_block_size * block, left_block_size) != substr(b.bytes, 9 + left_block_size * BLOCK, left_block_size)\n THEN\n -- set the first unequal block we find\n IF eq THEN\n unequal_block := block;\n END IF;\n eq = false;\n END IF;\n END LOOP;\n\n IF eq THEN\n RETURN 0::integer;\n END IF;\n\n -- Hash key is the IV from the right CT of b\n hash_key := substr(b.bytes, right_offset + 1, 16);\n\n -- first right block is at right offset + nonce_size (ordinally indexed)\n target_block := substr(b.bytes, right_offset + 17 + (unequal_block * right_block_size), right_block_size);\n\n data_block := substr(a.bytes, 9 + (left_block_size * unequal_block), left_block_size);\n\n encrypt_block := public.encrypt(data_block::bytea, hash_key::bytea, 'aes-ecb');\n\n indicator := (\n get_bit(\n encrypt_block,\n 0\n ) + get_bit(target_block, get_byte(a.bytes, unequal_block))) % 2;\n\n IF indicator = 1 THEN\n RETURN 1::integer;\n ELSE\n RETURN -1::integer;\n END IF;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Compare arrays of ORE block terms recursively\n--! @internal\n--!\n--! Recursively compares arrays of ORE block terms element-by-element.\n--! Empty arrays are considered less than non-empty arrays. If the first elements\n--! are equal, recursively compares remaining elements.\n--!\n--! @param a eql_v2.ore_block_u64_8_256_term[] First array of ORE terms\n--! @param b eql_v2.ore_block_u64_8_256_term[] Second array of ORE terms\n--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b, NULL if either array is NULL\n--!\n--! @note Empty arrays sort before non-empty arrays\n--! @see eql_v2.compare_ore_block_u64_8_256_term\nCREATE FUNCTION eql_v2.compare_ore_block_u64_8_256_terms(a eql_v2.ore_block_u64_8_256_term[], b eql_v2.ore_block_u64_8_256_term[])\nRETURNS integer AS $$\n DECLARE\n cmp_result integer;\n BEGIN\n\n -- NULLs are NULL\n IF a IS NULL OR b IS NULL THEN\n RETURN NULL;\n END IF;\n\n -- empty a and b\n IF cardinality(a) = 0 AND cardinality(b) = 0 THEN\n RETURN 0;\n END IF;\n\n -- empty a and some b\n IF (cardinality(a) = 0) AND cardinality(b) > 0 THEN\n RETURN -1;\n END IF;\n\n -- some a and empty b\n IF cardinality(a) > 0 AND (cardinality(b) = 0) THEN\n RETURN 1;\n END IF;\n\n cmp_result := eql_v2.compare_ore_block_u64_8_256_term(a[1], b[1]);\n\n IF cmp_result = 0 THEN\n -- Removes the first element in the array, and calls this fn again to compare the next element/s in the array.\n RETURN eql_v2.compare_ore_block_u64_8_256_terms(a[2:array_length(a,1)], b[2:array_length(b,1)]);\n END IF;\n\n RETURN cmp_result;\n END\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Compare ORE block composite types\n--! @internal\n--!\n--! Wrapper function that extracts term arrays from ORE block composite types\n--! and delegates to the array comparison function.\n--!\n--! @param a eql_v2.ore_block_u64_8_256 First ORE block\n--! @param b eql_v2.ore_block_u64_8_256 Second ORE block\n--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b\n--!\n--! @see eql_v2.compare_ore_block_u64_8_256_terms(eql_v2.ore_block_u64_8_256_term[], eql_v2.ore_block_u64_8_256_term[])\nCREATE FUNCTION eql_v2.compare_ore_block_u64_8_256_terms(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256)\nRETURNS integer AS $$\n BEGIN\n RETURN eql_v2.compare_ore_block_u64_8_256_terms(a.terms, b.terms);\n END\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract variable-width CLLW ORE index term from JSONB payload\n--!\n--! Extracts the variable-width CLLW ORE ciphertext from the 'ocv' field of an encrypted\n--! data payload. Used internally for range query comparisons.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return eql_v2.ore_cllw_var_8 Variable-width CLLW ORE ciphertext\n--! @throws Exception if 'ocv' field is missing when ore index is expected\n--!\n--! @see eql_v2.has_ore_cllw_var_8\n--! @see eql_v2.compare_ore_cllw_var_8\nCREATE FUNCTION eql_v2.ore_cllw_var_8(val jsonb)\n RETURNS eql_v2.ore_cllw_var_8\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n\n IF val IS NULL THEN\n RETURN NULL;\n END IF;\n\n IF NOT (eql_v2.has_ore_cllw_var_8(val)) THEN\n RAISE 'Expected a ore_cllw_var_8 index (ocv) value in json: %', val;\n END IF;\n\n RETURN ROW(decode(val->>'ocv', 'hex'));\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract variable-width CLLW ORE index term from encrypted column value\n--!\n--! Extracts the variable-width CLLW ORE ciphertext from an encrypted column value by accessing\n--! its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return eql_v2.ore_cllw_var_8 Variable-width CLLW ORE ciphertext\n--!\n--! @see eql_v2.ore_cllw_var_8(jsonb)\nCREATE FUNCTION eql_v2.ore_cllw_var_8(val eql_v2_encrypted)\n RETURNS eql_v2.ore_cllw_var_8\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (SELECT eql_v2.ore_cllw_var_8(val.data));\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if JSONB payload contains variable-width CLLW ORE index term\n--!\n--! Tests whether the encrypted data payload includes an 'ocv' field,\n--! indicating a variable-width CLLW ORE ciphertext is available for range queries.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return Boolean True if 'ocv' field is present and non-null\n--!\n--! @see eql_v2.ore_cllw_var_8\nCREATE FUNCTION eql_v2.has_ore_cllw_var_8(val jsonb)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN val ->> 'ocv' IS NOT NULL;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if encrypted column value contains variable-width CLLW ORE index term\n--!\n--! Tests whether an encrypted column value includes a variable-width CLLW ORE ciphertext\n--! by checking its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return Boolean True if variable-width CLLW ORE ciphertext is present\n--!\n--! @see eql_v2.has_ore_cllw_var_8(jsonb)\nCREATE FUNCTION eql_v2.has_ore_cllw_var_8(val eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.has_ore_cllw_var_8(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Compare variable-width CLLW ORE ciphertext terms\n--! @internal\n--!\n--! Three-way comparison of variable-width CLLW ORE ciphertexts. Compares the common\n--! prefix using byte-by-byte CLLW comparison, then falls back to length comparison\n--! if the common prefix is equal. Used by compare_ore_cllw_var_8 for range queries.\n--!\n--! @param a eql_v2.ore_cllw_var_8 First variable-width CLLW ORE ciphertext\n--! @param b eql_v2.ore_cllw_var_8 Second variable-width CLLW ORE ciphertext\n--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b\n--!\n--! @note Handles variable-length ciphertexts by comparing common prefix first\n--! @note Returns NULL if either input is NULL\n--!\n--! @see eql_v2.compare_ore_cllw_term_bytes\n--! @see eql_v2.compare_ore_cllw_var_8\nCREATE FUNCTION eql_v2.compare_ore_cllw_var_8_term(a eql_v2.ore_cllw_var_8, b eql_v2.ore_cllw_var_8)\nRETURNS int AS $$\nDECLARE\n len_a INT;\n len_b INT;\n -- length of the common part of the two bytea values\n common_len INT;\n cmp_result INT;\nBEGIN\n IF a IS NULL OR b IS NULL THEN\n RETURN NULL;\n END IF;\n\n -- Get the lengths of both bytea inputs\n len_a := LENGTH(a.bytes);\n len_b := LENGTH(b.bytes);\n\n -- Handle empty cases\n IF len_a = 0 AND len_b = 0 THEN\n RETURN 0;\n ELSIF len_a = 0 THEN\n RETURN -1;\n ELSIF len_b = 0 THEN\n RETURN 1;\n END IF;\n\n -- Find the length of the shorter bytea\n IF len_a < len_b THEN\n common_len := len_a;\n ELSE\n common_len := len_b;\n END IF;\n\n -- Use the compare_ore_cllw_term function to compare byte by byte\n cmp_result := eql_v2.compare_ore_cllw_term_bytes(\n SUBSTRING(a.bytes FROM 1 FOR common_len),\n SUBSTRING(b.bytes FROM 1 FOR common_len)\n );\n\n -- If the comparison returns 'less' or 'greater', return that result\n IF cmp_result = -1 THEN\n RETURN -1;\n ELSIF cmp_result = 1 THEN\n RETURN 1;\n END IF;\n\n -- If the bytea comparison is 'equal', compare lengths\n IF len_a < len_b THEN\n RETURN -1;\n ELSIF len_a > len_b THEN\n RETURN 1;\n ELSE\n RETURN 0;\n END IF;\nEND;\n$$ LANGUAGE plpgsql;\n\n\n\n\n\n\n--! @brief Core comparison function for encrypted values\n--!\n--! Compares two encrypted values using their index terms without decryption.\n--! This function implements all comparison operators required for btree indexing\n--! (<, <=, =, >=, >).\n--!\n--! Index terms are checked in the following priority order:\n--! 1. ore_block_u64_8_256 (Order-Revealing Encryption)\n--! 2. ore_cllw_u64_8 (Order-Revealing Encryption)\n--! 3. ore_cllw_var_8 (Order-Revealing Encryption)\n--! 4. hmac_256 (Hash-based equality)\n--! 5. blake3 (Hash-based equality)\n--!\n--! The first index term type present in both values is used for comparison.\n--! If no matching index terms are found, falls back to JSONB literal comparison\n--! to ensure consistent ordering (required for btree correctness).\n--!\n--! @param a eql_v2_encrypted First encrypted value\n--! @param b eql_v2_encrypted Second encrypted value\n--! @return integer -1 if a < b, 0 if a = b, 1 if a > b\n--!\n--! @note Literal fallback prevents \"lock BufferContent is not held\" errors\n--! @see eql_v2.compare_ore_block_u64_8_256\n--! @see eql_v2.compare_blake3\n--! @see eql_v2.compare_hmac_256\nCREATE FUNCTION eql_v2.compare(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS integer\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n\n IF a IS NULL AND b IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b IS NULL THEN\n RETURN 1;\n END IF;\n\n a := eql_v2.to_ste_vec_value(a);\n b := eql_v2.to_ste_vec_value(b);\n\n IF eql_v2.has_ore_block_u64_8_256(a) AND eql_v2.has_ore_block_u64_8_256(b) THEN\n RETURN eql_v2.compare_ore_block_u64_8_256(a, b);\n END IF;\n\n IF eql_v2.has_ore_cllw_u64_8(a) AND eql_v2.has_ore_cllw_u64_8(b) THEN\n RETURN eql_v2.compare_ore_cllw_u64_8(a, b);\n END IF;\n\n IF eql_v2.has_ore_cllw_var_8(a) AND eql_v2.has_ore_cllw_var_8(b) THEN\n RETURN eql_v2.compare_ore_cllw_var_8(a, b);\n END IF;\n\n IF eql_v2.has_hmac_256(a) AND eql_v2.has_hmac_256(b) THEN\n RETURN eql_v2.compare_hmac_256(a, b);\n END IF;\n\n IF eql_v2.has_blake3(a) AND eql_v2.has_blake3(b) THEN\n RETURN eql_v2.compare_blake3(a, b);\n END IF;\n\n -- Fallback to literal comparison of the encrypted data\n -- Compare must have consistent ordering for a given state\n -- Without this text fallback, database errors with \"lock BufferContent is not held\"\n RETURN eql_v2.compare_literal(a, b);\n\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n--! @brief Convert JSONB to encrypted type\n--!\n--! Wraps a JSONB encrypted payload into the eql_v2_encrypted composite type.\n--! Used internally for type conversions and operator implementations.\n--!\n--! @param jsonb JSONB encrypted payload with structure: {\"c\": \"...\", \"i\": {...}, \"k\": \"...\", \"v\": \"2\"}\n--! @return eql_v2_encrypted Encrypted value wrapped in composite type\n--!\n--! @note This is primarily used for implicit casts in operator expressions\n--! @see eql_v2.to_jsonb\nCREATE FUNCTION eql_v2.to_encrypted(data jsonb)\n RETURNS public.eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\nBEGIN\n IF data IS NULL THEN\n RETURN NULL;\n END IF;\n\n RETURN ROW(data)::public.eql_v2_encrypted;\nEND;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Implicit cast from JSONB to encrypted type\n--!\n--! Enables PostgreSQL to automatically convert JSONB values to eql_v2_encrypted\n--! in assignment contexts and comparison operations.\n--!\n--! @see eql_v2.to_encrypted(jsonb)\nCREATE CAST (jsonb AS public.eql_v2_encrypted)\n\tWITH FUNCTION eql_v2.to_encrypted(jsonb) AS ASSIGNMENT;\n\n\n--! @brief Convert text to encrypted type\n--!\n--! Parses a text representation of encrypted JSONB payload and wraps it\n--! in the eql_v2_encrypted composite type.\n--!\n--! @param text Text representation of JSONB encrypted payload\n--! @return eql_v2_encrypted Encrypted value wrapped in composite type\n--!\n--! @note Delegates to eql_v2.to_encrypted(jsonb) after parsing text as JSON\n--! @see eql_v2.to_encrypted(jsonb)\nCREATE FUNCTION eql_v2.to_encrypted(data text)\n RETURNS public.eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\nBEGIN\n IF data IS NULL THEN\n RETURN NULL;\n END IF;\n\n RETURN eql_v2.to_encrypted(data::jsonb);\nEND;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Implicit cast from text to encrypted type\n--!\n--! Enables PostgreSQL to automatically convert text JSON strings to eql_v2_encrypted\n--! in assignment contexts.\n--!\n--! @see eql_v2.to_encrypted(text)\nCREATE CAST (text AS public.eql_v2_encrypted)\n\tWITH FUNCTION eql_v2.to_encrypted(text) AS ASSIGNMENT;\n\n\n\n--! @brief Convert encrypted type to JSONB\n--!\n--! Extracts the underlying JSONB payload from an eql_v2_encrypted composite type.\n--! Useful for debugging or when raw encrypted payload access is needed.\n--!\n--! @param e eql_v2_encrypted Encrypted value to unwrap\n--! @return jsonb Raw JSONB encrypted payload\n--!\n--! @note Returns the raw encrypted structure including ciphertext and index terms\n--! @see eql_v2.to_encrypted(jsonb)\nCREATE FUNCTION eql_v2.to_jsonb(e public.eql_v2_encrypted)\n RETURNS jsonb\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\nBEGIN\n IF e IS NULL THEN\n RETURN NULL;\n END IF;\n\n RETURN e.data;\nEND;\n$$ LANGUAGE plpgsql;\n\n--! @brief Implicit cast from encrypted type to JSONB\n--!\n--! Enables PostgreSQL to automatically extract the JSONB payload from\n--! eql_v2_encrypted values in assignment contexts.\n--!\n--! @see eql_v2.to_jsonb(eql_v2_encrypted)\nCREATE CAST (public.eql_v2_encrypted AS jsonb)\n\tWITH FUNCTION eql_v2.to_jsonb(public.eql_v2_encrypted) AS ASSIGNMENT;\n\n\n\n--! @file config/types.sql\n--! @brief Configuration state type definition\n--!\n--! Defines the ENUM type for tracking encryption configuration lifecycle states.\n--! The configuration table uses this type to manage transitions between states\n--! during setup, activation, and encryption operations.\n--!\n--! @note CREATE TYPE does not support IF NOT EXISTS, so wrapped in DO block\n--! @note Configuration data stored as JSONB directly, not as DOMAIN\n--! @see config/tables.sql\n\n\n--! @brief Configuration lifecycle state\n--!\n--! Defines valid states for encryption configurations in the eql_v2_configuration table.\n--! Configurations transition through these states during setup and activation.\n--!\n--! @note Only one configuration can be in 'active', 'pending', or 'encrypting' state at once\n--! @see config/indexes.sql for uniqueness enforcement\n--! @see config/tables.sql for usage in eql_v2_configuration table\nDO $$\n BEGIN\n IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'eql_v2_configuration_state') THEN\n CREATE TYPE public.eql_v2_configuration_state AS ENUM ('active', 'inactive', 'encrypting', 'pending');\n END IF;\n END\n$$;\n\n\n\n--! @brief Extract Bloom filter index term from JSONB payload\n--!\n--! Extracts the Bloom filter array from the 'bf' field of an encrypted\n--! data payload. Used internally for pattern-match queries (LIKE operator).\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return eql_v2.bloom_filter Bloom filter as smallint array\n--! @throws Exception if 'bf' field is missing when bloom_filter index is expected\n--!\n--! @see eql_v2.has_bloom_filter\n--! @see eql_v2.\"~~\"\nCREATE FUNCTION eql_v2.bloom_filter(val jsonb)\n RETURNS eql_v2.bloom_filter\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n IF val IS NULL THEN\n RETURN NULL;\n END IF;\n\n IF eql_v2.has_bloom_filter(val) THEN\n RETURN ARRAY(SELECT jsonb_array_elements(val->'bf'))::eql_v2.bloom_filter;\n END IF;\n\n RAISE 'Expected a match index (bf) value in json: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract Bloom filter index term from encrypted column value\n--!\n--! Extracts the Bloom filter from an encrypted column value by accessing\n--! its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return eql_v2.bloom_filter Bloom filter as smallint array\n--!\n--! @see eql_v2.bloom_filter(jsonb)\nCREATE FUNCTION eql_v2.bloom_filter(val eql_v2_encrypted)\n RETURNS eql_v2.bloom_filter\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (SELECT eql_v2.bloom_filter(val.data));\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if JSONB payload contains Bloom filter index term\n--!\n--! Tests whether the encrypted data payload includes a 'bf' field,\n--! indicating a Bloom filter is available for pattern-match queries.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return Boolean True if 'bf' field is present and non-null\n--!\n--! @see eql_v2.bloom_filter\nCREATE FUNCTION eql_v2.has_bloom_filter(val jsonb)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN val ->> 'bf' IS NOT NULL;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if encrypted column value contains Bloom filter index term\n--!\n--! Tests whether an encrypted column value includes a Bloom filter\n--! by checking its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return Boolean True if Bloom filter is present\n--!\n--! @see eql_v2.has_bloom_filter(jsonb)\nCREATE FUNCTION eql_v2.has_bloom_filter(val eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.has_bloom_filter(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Fallback literal comparison for encrypted values\n--! @internal\n--!\n--! Compares two encrypted values by their raw JSONB representation when no\n--! suitable index terms are available. This ensures consistent ordering required\n--! for btree correctness and prevents \"lock BufferContent is not held\" errors.\n--!\n--! Used as a last resort fallback in eql_v2.compare() when encrypted values\n--! lack matching index terms (blake3, hmac_256, ore).\n--!\n--! @param a eql_v2_encrypted First encrypted value\n--! @param b eql_v2_encrypted Second encrypted value\n--! @return integer -1 if a < b, 0 if a = b, 1 if a > b\n--!\n--! @note This compares the encrypted payloads directly, not the plaintext values\n--! @note Ordering is consistent but not meaningful for range queries\n--! @see eql_v2.compare\nCREATE FUNCTION eql_v2.compare_literal(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS integer\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n a_data jsonb;\n b_data jsonb;\n BEGIN\n\n IF a IS NULL AND b IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b IS NULL THEN\n RETURN 1;\n END IF;\n\n a_data := a.data;\n b_data := b.data;\n\n IF a_data < b_data THEN\n RETURN -1;\n END IF;\n\n IF a_data > b_data THEN\n RETURN 1;\n END IF;\n\n RETURN 0;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Less-than comparison helper for encrypted values\n--! @internal\n--!\n--! Internal helper that delegates to eql_v2.compare for less-than testing.\n--! Returns true if first value is less than second using ORE comparison.\n--!\n--! @param a eql_v2_encrypted First encrypted value\n--! @param b eql_v2_encrypted Second encrypted value\n--! @return Boolean True if a < b (compare result = -1)\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.\"<\"\nCREATE FUNCTION eql_v2.lt(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.compare(a, b) = -1;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Less-than operator for encrypted values\n--!\n--! Implements the < operator for comparing two encrypted values using Order-Revealing\n--! Encryption (ORE) index terms. Enables range queries and sorting without decryption.\n--! Requires 'ore' index configuration on the column.\n--!\n--! @param a eql_v2_encrypted Left operand\n--! @param b eql_v2_encrypted Right operand\n--! @return Boolean True if a is less than b\n--!\n--! @example\n--! -- Range query on encrypted timestamps\n--! SELECT * FROM events\n--! WHERE encrypted_timestamp < '2024-01-01'::timestamp::text::eql_v2_encrypted;\n--!\n--! -- Compare encrypted numeric columns\n--! SELECT * FROM products WHERE encrypted_price < encrypted_discount_price;\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.\"<\"(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.lt(a, b);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR <(\n FUNCTION=eql_v2.\"<\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=eql_v2_encrypted,\n COMMUTATOR = >,\n NEGATOR = >=,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n--! @brief Less-than operator for encrypted value and JSONB\n--!\n--! Overload of < operator accepting JSONB on the right side. Automatically\n--! casts JSONB to eql_v2_encrypted for ORE comparison.\n--!\n--! @param eql_v2_encrypted Left operand (encrypted value)\n--! @param b JSONB Right operand (will be cast to eql_v2_encrypted)\n--! @return Boolean True if a < b\n--!\n--! @example\n--! SELECT * FROM events WHERE encrypted_age < '18'::int::text::jsonb;\n--!\n--! @see eql_v2.\"<\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\"<\"(a eql_v2_encrypted, b jsonb)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.lt(a, b::eql_v2_encrypted);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR <(\n FUNCTION=eql_v2.\"<\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=jsonb,\n COMMUTATOR = >,\n NEGATOR = >=,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n--! @brief Less-than operator for JSONB and encrypted value\n--!\n--! Overload of < operator accepting JSONB on the left side. Automatically\n--! casts JSONB to eql_v2_encrypted for ORE comparison.\n--!\n--! @param a JSONB Left operand (will be cast to eql_v2_encrypted)\n--! @param eql_v2_encrypted Right operand (encrypted value)\n--! @return Boolean True if a < b\n--!\n--! @example\n--! SELECT * FROM events WHERE '2023-01-01'::date::text::jsonb < encrypted_date;\n--!\n--! @see eql_v2.\"<\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\"<\"(a jsonb, b eql_v2_encrypted)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.lt(a::eql_v2_encrypted, b);\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR <(\n FUNCTION=eql_v2.\"<\",\n LEFTARG=jsonb,\n RIGHTARG=eql_v2_encrypted,\n COMMUTATOR = >,\n NEGATOR = >=,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n\n\n--! @brief Less-than-or-equal comparison helper for encrypted values\n--! @internal\n--!\n--! Internal helper that delegates to eql_v2.compare for <= testing.\n--! Returns true if first value is less than or equal to second using ORE comparison.\n--!\n--! @param a eql_v2_encrypted First encrypted value\n--! @param b eql_v2_encrypted Second encrypted value\n--! @return Boolean True if a <= b (compare result <= 0)\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.\"<=\"\nCREATE FUNCTION eql_v2.lte(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.compare(a, b) <= 0;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Less-than-or-equal operator for encrypted values\n--!\n--! Implements the <= operator for comparing encrypted values using ORE index terms.\n--! Enables range queries with inclusive lower bounds without decryption.\n--!\n--! @param a eql_v2_encrypted Left operand\n--! @param b eql_v2_encrypted Right operand\n--! @return Boolean True if a <= b\n--!\n--! @example\n--! -- Find records with encrypted age 18 or under\n--! SELECT * FROM users WHERE encrypted_age <= '18'::int::text::eql_v2_encrypted;\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.\"<=\"(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.lte(a, b);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR <=(\n FUNCTION = eql_v2.\"<=\",\n LEFTARG = eql_v2_encrypted,\n RIGHTARG = eql_v2_encrypted,\n COMMUTATOR = >=,\n NEGATOR = >,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n--! @brief <= operator for encrypted value and JSONB\n--! @see eql_v2.\"<=\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\"<=\"(a eql_v2_encrypted, b jsonb)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.lte(a, b::eql_v2_encrypted);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR <=(\n FUNCTION = eql_v2.\"<=\",\n LEFTARG = eql_v2_encrypted,\n RIGHTARG = jsonb,\n COMMUTATOR = >=,\n NEGATOR = >,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n--! @brief <= operator for JSONB and encrypted value\n--! @see eql_v2.\"<=\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\"<=\"(a jsonb, b eql_v2_encrypted)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.lte(a::eql_v2_encrypted, b);\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR <=(\n FUNCTION = eql_v2.\"<=\",\n LEFTARG = jsonb,\n RIGHTARG = eql_v2_encrypted,\n COMMUTATOR = >=,\n NEGATOR = >,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n\n\n--! @brief Equality comparison helper for encrypted values\n--! @internal\n--!\n--! Internal helper that delegates to eql_v2.compare for equality testing.\n--! Returns true if encrypted values are equal via encrypted index comparison.\n--!\n--! @param a eql_v2_encrypted First encrypted value\n--! @param b eql_v2_encrypted Second encrypted value\n--! @return Boolean True if values are equal (compare result = 0)\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.\"=\"\nCREATE FUNCTION eql_v2.eq(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN eql_v2.compare(a, b) = 0;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Equality operator for encrypted values\n--!\n--! Implements the = operator for comparing two encrypted values using their\n--! encrypted index terms (unique/blake3). Enables WHERE clause comparisons\n--! without decryption.\n--!\n--! @param a eql_v2_encrypted Left operand\n--! @param b eql_v2_encrypted Right operand\n--! @return Boolean True if encrypted values are equal\n--!\n--! @example\n--! -- Compare encrypted columns\n--! SELECT * FROM users WHERE encrypted_email = other_encrypted_email;\n--!\n--! -- Search using encrypted literal\n--! SELECT * FROM users\n--! WHERE encrypted_email = '{\"c\":\"...\",\"i\":{\"unique\":\"...\"}}'::eql_v2_encrypted;\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.\"=\"(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN eql_v2.eq(a, b);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR = (\n FUNCTION=eql_v2.\"=\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=eql_v2_encrypted,\n NEGATOR = <>,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n--! @brief Equality operator for encrypted value and JSONB\n--!\n--! Overload of = operator accepting JSONB on the right side. Automatically\n--! casts JSONB to eql_v2_encrypted for comparison. Useful for comparing\n--! against JSONB literals or columns.\n--!\n--! @param eql_v2_encrypted Left operand (encrypted value)\n--! @param b JSONB Right operand (will be cast to eql_v2_encrypted)\n--! @return Boolean True if values are equal\n--!\n--! @example\n--! -- Compare encrypted column to JSONB literal\n--! SELECT * FROM users\n--! WHERE encrypted_email = '{\"c\":\"...\",\"i\":{\"unique\":\"...\"}}'::jsonb;\n--!\n--! @see eql_v2.\"=\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\"=\"(a eql_v2_encrypted, b jsonb)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN eql_v2.eq(a, b::eql_v2_encrypted);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR = (\n FUNCTION=eql_v2.\"=\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=jsonb,\n NEGATOR = <>,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n--! @brief Equality operator for JSONB and encrypted value\n--!\n--! Overload of = operator accepting JSONB on the left side. Automatically\n--! casts JSONB to eql_v2_encrypted for comparison. Enables commutative\n--! equality comparisons.\n--!\n--! @param a JSONB Left operand (will be cast to eql_v2_encrypted)\n--! @param eql_v2_encrypted Right operand (encrypted value)\n--! @return Boolean True if values are equal\n--!\n--! @example\n--! -- Compare JSONB literal to encrypted column\n--! SELECT * FROM users\n--! WHERE '{\"c\":\"...\",\"i\":{\"unique\":\"...\"}}'::jsonb = encrypted_email;\n--!\n--! @see eql_v2.\"=\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\"=\"(a jsonb, b eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN eql_v2.eq(a::eql_v2_encrypted, b);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR = (\n FUNCTION=eql_v2.\"=\",\n LEFTARG=jsonb,\n RIGHTARG=eql_v2_encrypted,\n NEGATOR = <>,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n\n--! @brief Greater-than-or-equal comparison helper for encrypted values\n--! @internal\n--!\n--! Internal helper that delegates to eql_v2.compare for >= testing.\n--! Returns true if first value is greater than or equal to second using ORE comparison.\n--!\n--! @param a eql_v2_encrypted First encrypted value\n--! @param b eql_v2_encrypted Second encrypted value\n--! @return Boolean True if a >= b (compare result >= 0)\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.\">=\"\nCREATE FUNCTION eql_v2.gte(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.compare(a, b) >= 0;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Greater-than-or-equal operator for encrypted values\n--!\n--! Implements the >= operator for comparing encrypted values using ORE index terms.\n--! Enables range queries with inclusive upper bounds without decryption.\n--!\n--! @param a eql_v2_encrypted Left operand\n--! @param b eql_v2_encrypted Right operand\n--! @return Boolean True if a >= b\n--!\n--! @example\n--! -- Find records with age 18 or over\n--! SELECT * FROM users WHERE encrypted_age >= '18'::int::text::eql_v2_encrypted;\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.\">=\"(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.gte(a, b);\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR >=(\n FUNCTION = eql_v2.\">=\",\n LEFTARG = eql_v2_encrypted,\n RIGHTARG = eql_v2_encrypted,\n COMMUTATOR = <=,\n NEGATOR = <,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n--! @brief >= operator for encrypted value and JSONB\n--! @see eql_v2.\">=\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\">=\"(a eql_v2_encrypted, b jsonb)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.gte(a, b::eql_v2_encrypted);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR >=(\n FUNCTION = eql_v2.\">=\",\n LEFTARG = eql_v2_encrypted,\n RIGHTARG=jsonb,\n COMMUTATOR = <=,\n NEGATOR = <,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n--! @brief >= operator for JSONB and encrypted value\n--! @see eql_v2.\">=\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\">=\"(a jsonb, b eql_v2_encrypted)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.gte(a::eql_v2_encrypted, b);\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR >=(\n FUNCTION = eql_v2.\">=\",\n LEFTARG = jsonb,\n RIGHTARG =eql_v2_encrypted,\n COMMUTATOR = <=,\n NEGATOR = <,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n\n\n--! @brief Greater-than comparison helper for encrypted values\n--! @internal\n--!\n--! Internal helper that delegates to eql_v2.compare for greater-than testing.\n--! Returns true if first value is greater than second using ORE comparison.\n--!\n--! @param a eql_v2_encrypted First encrypted value\n--! @param b eql_v2_encrypted Second encrypted value\n--! @return Boolean True if a > b (compare result = 1)\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.\">\"\nCREATE FUNCTION eql_v2.gt(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.compare(a, b) = 1;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Greater-than operator for encrypted values\n--!\n--! Implements the > operator for comparing encrypted values using ORE index terms.\n--! Enables range queries and sorting without decryption. Requires 'ore' index\n--! configuration on the column.\n--!\n--! @param a eql_v2_encrypted Left operand\n--! @param b eql_v2_encrypted Right operand\n--! @return Boolean True if a is greater than b\n--!\n--! @example\n--! -- Find records above threshold\n--! SELECT * FROM events\n--! WHERE encrypted_value > '100'::int::text::eql_v2_encrypted;\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.\">\"(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.gt(a, b);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR >(\n FUNCTION=eql_v2.\">\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=eql_v2_encrypted,\n COMMUTATOR = <,\n NEGATOR = <=,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n--! @brief > operator for encrypted value and JSONB\n--! @see eql_v2.\">\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\">\"(a eql_v2_encrypted, b jsonb)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.gt(a, b::eql_v2_encrypted);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR >(\n FUNCTION = eql_v2.\">\",\n LEFTARG = eql_v2_encrypted,\n RIGHTARG = jsonb,\n COMMUTATOR = <,\n NEGATOR = <=,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n--! @brief > operator for JSONB and encrypted value\n--! @see eql_v2.\">\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\">\"(a jsonb, b eql_v2_encrypted)\nRETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.gt(a::eql_v2_encrypted, b);\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR >(\n FUNCTION = eql_v2.\">\",\n LEFTARG = jsonb,\n RIGHTARG = eql_v2_encrypted,\n COMMUTATOR = <,\n NEGATOR = <=,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n\n\n\n--! @brief Extract STE vector index from JSONB payload\n--!\n--! Extracts the STE (Searchable Symmetric Encryption) vector from the 'sv' field\n--! of an encrypted data payload. Returns an array of encrypted values used for\n--! containment queries (@>, <@). If no 'sv' field exists, wraps the entire payload\n--! as a single-element array.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return eql_v2_encrypted[] Array of encrypted STE vector elements\n--!\n--! @see eql_v2.ste_vec(eql_v2_encrypted)\n--! @see eql_v2.ste_vec_contains\nCREATE FUNCTION eql_v2.ste_vec(val jsonb)\n RETURNS public.eql_v2_encrypted[]\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n sv jsonb;\n ary public.eql_v2_encrypted[];\n\tBEGIN\n\n IF val ? 'sv' THEN\n sv := val->'sv';\n ELSE\n sv := jsonb_build_array(val);\n END IF;\n\n SELECT array_agg(eql_v2.to_encrypted(elem))\n INTO ary\n FROM jsonb_array_elements(sv) AS elem;\n\n RETURN ary;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract STE vector index from encrypted column value\n--!\n--! Extracts the STE vector from an encrypted column value by accessing its\n--! underlying JSONB data field. Used for containment query operations.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return eql_v2_encrypted[] Array of encrypted STE vector elements\n--!\n--! @see eql_v2.ste_vec(jsonb)\nCREATE FUNCTION eql_v2.ste_vec(val eql_v2_encrypted)\n RETURNS public.eql_v2_encrypted[]\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (SELECT eql_v2.ste_vec(val.data));\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Check if JSONB payload is a single-element STE vector\n--!\n--! Tests whether the encrypted data payload contains an 'sv' field with exactly\n--! one element. Single-element STE vectors can be treated as regular encrypted values.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return Boolean True if 'sv' field exists with exactly one element\n--!\n--! @see eql_v2.to_ste_vec_value\nCREATE FUNCTION eql_v2.is_ste_vec_value(val jsonb)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n IF val ? 'sv' THEN\n RETURN jsonb_array_length(val->'sv') = 1;\n END IF;\n\n RETURN false;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Check if encrypted column value is a single-element STE vector\n--!\n--! Tests whether an encrypted column value is a single-element STE vector\n--! by checking its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return Boolean True if value is a single-element STE vector\n--!\n--! @see eql_v2.is_ste_vec_value(jsonb)\nCREATE FUNCTION eql_v2.is_ste_vec_value(val eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.is_ste_vec_value(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Convert single-element STE vector to regular encrypted value\n--!\n--! Extracts the single element from a single-element STE vector and returns it\n--! as a regular encrypted value, preserving metadata. If the input is not a\n--! single-element STE vector, returns it unchanged.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return eql_v2_encrypted Regular encrypted value (unwrapped if single-element STE vector)\n--!\n--! @see eql_v2.is_ste_vec_value\nCREATE FUNCTION eql_v2.to_ste_vec_value(val jsonb)\n RETURNS eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n meta jsonb;\n sv jsonb;\n\tBEGIN\n\n IF val IS NULL THEN\n RETURN NULL;\n END IF;\n\n IF eql_v2.is_ste_vec_value(val) THEN\n meta := eql_v2.meta_data(val);\n sv := val->'sv';\n sv := sv[0];\n\n RETURN eql_v2.to_encrypted(meta || sv);\n END IF;\n\n RETURN eql_v2.to_encrypted(val);\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Convert single-element STE vector to regular encrypted value (encrypted type)\n--!\n--! Converts an encrypted column value to a regular encrypted value by unwrapping\n--! if it's a single-element STE vector.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return eql_v2_encrypted Regular encrypted value (unwrapped if single-element STE vector)\n--!\n--! @see eql_v2.to_ste_vec_value(jsonb)\nCREATE FUNCTION eql_v2.to_ste_vec_value(val eql_v2_encrypted)\n RETURNS eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.to_ste_vec_value(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Extract selector value from JSONB payload\n--!\n--! Extracts the selector ('s') field from an encrypted data payload.\n--! Selectors are used to match STE vector elements during containment queries.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return Text The selector value\n--! @throws Exception if 's' field is missing\n--!\n--! @see eql_v2.ste_vec_contains\nCREATE FUNCTION eql_v2.selector(val jsonb)\n RETURNS text\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n IF val IS NULL THEN\n RETURN NULL;\n END IF;\n\n IF val ? 's' THEN\n RETURN val->>'s';\n END IF;\n RAISE 'Expected a selector index (s) value in json: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract selector value from encrypted column value\n--!\n--! Extracts the selector from an encrypted column value by accessing its\n--! underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return Text The selector value\n--!\n--! @see eql_v2.selector(jsonb)\nCREATE FUNCTION eql_v2.selector(val eql_v2_encrypted)\n RETURNS text\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (SELECT eql_v2.selector(val.data));\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n--! @brief Check if JSONB payload is marked as an STE vector array\n--!\n--! Tests whether the encrypted data payload has the 'a' (array) flag set to true,\n--! indicating it represents an array for STE vector operations.\n--!\n--! @param jsonb containing encrypted EQL payload\n--! @return Boolean True if 'a' field is present and true\n--!\n--! @see eql_v2.ste_vec\nCREATE FUNCTION eql_v2.is_ste_vec_array(val jsonb)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n IF val ? 'a' THEN\n RETURN (val->>'a')::boolean;\n END IF;\n\n RETURN false;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if encrypted column value is marked as an STE vector array\n--!\n--! Tests whether an encrypted column value has the array flag set by checking\n--! its underlying JSONB data field.\n--!\n--! @param eql_v2_encrypted Encrypted column value\n--! @return Boolean True if value is marked as an STE vector array\n--!\n--! @see eql_v2.is_ste_vec_array(jsonb)\nCREATE FUNCTION eql_v2.is_ste_vec_array(val eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (SELECT eql_v2.is_ste_vec_array(val.data));\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n--! @brief Extract full encrypted JSONB elements as array\n--!\n--! Extracts all JSONB elements from the STE vector including non-deterministic fields.\n--! Use jsonb_array() instead for GIN indexing and containment queries.\n--!\n--! @param val jsonb containing encrypted EQL payload\n--! @return jsonb[] Array of full JSONB elements\n--!\n--! @see eql_v2.jsonb_array\nCREATE FUNCTION eql_v2.jsonb_array_from_array_elements(val jsonb)\nRETURNS jsonb[]\nIMMUTABLE STRICT PARALLEL SAFE\nLANGUAGE SQL\nAS $$\n SELECT CASE\n WHEN val ? 'sv' THEN\n ARRAY(SELECT elem FROM jsonb_array_elements(val->'sv') AS elem)\n ELSE\n ARRAY[val]\n END;\n$$;\n\n\n--! @brief Extract full encrypted JSONB elements as array from encrypted column\n--!\n--! @param val eql_v2_encrypted Encrypted column value\n--! @return jsonb[] Array of full JSONB elements\n--!\n--! @see eql_v2.jsonb_array_from_array_elements(jsonb)\nCREATE FUNCTION eql_v2.jsonb_array_from_array_elements(val eql_v2_encrypted)\nRETURNS jsonb[]\nIMMUTABLE STRICT PARALLEL SAFE\nLANGUAGE SQL\nAS $$\n SELECT eql_v2.jsonb_array_from_array_elements(val.data);\n$$;\n\n\n--! @brief Extract deterministic fields as array for GIN indexing\n--!\n--! Extracts only deterministic search term fields (s, b3, hm, ocv, ocf) from each\n--! STE vector element. Excludes non-deterministic ciphertext for correct containment\n--! comparison using PostgreSQL's native @> operator.\n--!\n--! @param val jsonb containing encrypted EQL payload\n--! @return jsonb[] Array of JSONB elements with only deterministic fields\n--!\n--! @note Use this for GIN indexes and containment queries\n--! @see eql_v2.jsonb_contains\nCREATE FUNCTION eql_v2.jsonb_array(val jsonb)\nRETURNS jsonb[]\nIMMUTABLE STRICT PARALLEL SAFE\nLANGUAGE SQL\nAS $$\n SELECT ARRAY(\n SELECT jsonb_object_agg(kv.key, kv.value)\n FROM jsonb_array_elements(\n CASE WHEN val ? 'sv' THEN val->'sv' ELSE jsonb_build_array(val) END\n ) AS elem,\n LATERAL jsonb_each(elem) AS kv(key, value)\n WHERE kv.key IN ('s', 'b3', 'hm', 'ocv', 'ocf')\n GROUP BY elem\n );\n$$;\n\n\n--! @brief Extract deterministic fields as array from encrypted column\n--!\n--! @param val eql_v2_encrypted Encrypted column value\n--! @return jsonb[] Array of JSONB elements with only deterministic fields\n--!\n--! @see eql_v2.jsonb_array(jsonb)\nCREATE FUNCTION eql_v2.jsonb_array(val eql_v2_encrypted)\nRETURNS jsonb[]\nIMMUTABLE STRICT PARALLEL SAFE\nLANGUAGE SQL\nAS $$\n SELECT eql_v2.jsonb_array(val.data);\n$$;\n\n\n--! @brief GIN-indexable JSONB containment check\n--!\n--! Checks if encrypted value 'a' contains all JSONB elements from 'b'.\n--! Uses jsonb[] arrays internally for native PostgreSQL GIN index support.\n--!\n--! This function is designed for use with a GIN index on jsonb_array(column).\n--! When combined with such an index, PostgreSQL can efficiently search large tables.\n--!\n--! @param a eql_v2_encrypted Container value (typically a table column)\n--! @param b eql_v2_encrypted Value to search for\n--! @return Boolean True if a contains all elements of b\n--!\n--! @example\n--! -- Create GIN index for efficient containment queries\n--! CREATE INDEX idx ON mytable USING GIN (eql_v2.jsonb_array(encrypted_col));\n--!\n--! -- Query using the helper function\n--! SELECT * FROM mytable WHERE eql_v2.jsonb_contains(encrypted_col, search_value);\n--!\n--! @see eql_v2.jsonb_array\nCREATE FUNCTION eql_v2.jsonb_contains(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean\nIMMUTABLE STRICT PARALLEL SAFE\nLANGUAGE SQL\nAS $$\n SELECT eql_v2.jsonb_array(a) @> eql_v2.jsonb_array(b);\n$$;\n\n\n--! @brief GIN-indexable JSONB containment check (encrypted, jsonb)\n--!\n--! Checks if encrypted value 'a' contains all JSONB elements from jsonb value 'b'.\n--! Uses jsonb[] arrays internally for native PostgreSQL GIN index support.\n--!\n--! @param a eql_v2_encrypted Container value (typically a table column)\n--! @param b jsonb JSONB value to search for\n--! @return Boolean True if a contains all elements of b\n--!\n--! @see eql_v2.jsonb_array\n--! @see eql_v2.jsonb_contains(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.jsonb_contains(a eql_v2_encrypted, b jsonb)\nRETURNS boolean\nIMMUTABLE STRICT PARALLEL SAFE\nLANGUAGE SQL\nAS $$\n SELECT eql_v2.jsonb_array(a) @> eql_v2.jsonb_array(b);\n$$;\n\n\n--! @brief GIN-indexable JSONB containment check (jsonb, encrypted)\n--!\n--! Checks if jsonb value 'a' contains all JSONB elements from encrypted value 'b'.\n--! Uses jsonb[] arrays internally for native PostgreSQL GIN index support.\n--!\n--! @param a jsonb Container JSONB value\n--! @param b eql_v2_encrypted Encrypted value to search for\n--! @return Boolean True if a contains all elements of b\n--!\n--! @see eql_v2.jsonb_array\n--! @see eql_v2.jsonb_contains(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.jsonb_contains(a jsonb, b eql_v2_encrypted)\nRETURNS boolean\nIMMUTABLE STRICT PARALLEL SAFE\nLANGUAGE SQL\nAS $$\n SELECT eql_v2.jsonb_array(a) @> eql_v2.jsonb_array(b);\n$$;\n\n\n--! @brief GIN-indexable JSONB \"is contained by\" check\n--!\n--! Checks if all JSONB elements from 'a' are contained in 'b'.\n--! Uses jsonb[] arrays internally for native PostgreSQL GIN index support.\n--!\n--! @param a eql_v2_encrypted Value to check (typically a table column)\n--! @param b eql_v2_encrypted Container value\n--! @return Boolean True if all elements of a are contained in b\n--!\n--! @see eql_v2.jsonb_array\n--! @see eql_v2.jsonb_contains\nCREATE FUNCTION eql_v2.jsonb_contained_by(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean\nIMMUTABLE STRICT PARALLEL SAFE\nLANGUAGE SQL\nAS $$\n SELECT eql_v2.jsonb_array(a) <@ eql_v2.jsonb_array(b);\n$$;\n\n\n--! @brief GIN-indexable JSONB \"is contained by\" check (encrypted, jsonb)\n--!\n--! Checks if all JSONB elements from encrypted value 'a' are contained in jsonb value 'b'.\n--! Uses jsonb[] arrays internally for native PostgreSQL GIN index support.\n--!\n--! @param a eql_v2_encrypted Value to check (typically a table column)\n--! @param b jsonb Container JSONB value\n--! @return Boolean True if all elements of a are contained in b\n--!\n--! @see eql_v2.jsonb_array\n--! @see eql_v2.jsonb_contained_by(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.jsonb_contained_by(a eql_v2_encrypted, b jsonb)\nRETURNS boolean\nIMMUTABLE STRICT PARALLEL SAFE\nLANGUAGE SQL\nAS $$\n SELECT eql_v2.jsonb_array(a) <@ eql_v2.jsonb_array(b);\n$$;\n\n\n--! @brief GIN-indexable JSONB \"is contained by\" check (jsonb, encrypted)\n--!\n--! Checks if all JSONB elements from jsonb value 'a' are contained in encrypted value 'b'.\n--! Uses jsonb[] arrays internally for native PostgreSQL GIN index support.\n--!\n--! @param a jsonb Value to check\n--! @param b eql_v2_encrypted Container encrypted value\n--! @return Boolean True if all elements of a are contained in b\n--!\n--! @see eql_v2.jsonb_array\n--! @see eql_v2.jsonb_contained_by(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.jsonb_contained_by(a jsonb, b eql_v2_encrypted)\nRETURNS boolean\nIMMUTABLE STRICT PARALLEL SAFE\nLANGUAGE SQL\nAS $$\n SELECT eql_v2.jsonb_array(a) <@ eql_v2.jsonb_array(b);\n$$;\n\n\n--! @brief Check if STE vector array contains a specific encrypted element\n--!\n--! Tests whether any element in the STE vector array 'a' contains the encrypted value 'b'.\n--! Matching requires both the selector and encrypted value to be equal.\n--! Used internally by ste_vec_contains(encrypted, encrypted) for array containment checks.\n--!\n--! @param eql_v2_encrypted[] STE vector array to search within\n--! @param eql_v2_encrypted Encrypted element to search for\n--! @return Boolean True if b is found in any element of a\n--!\n--! @note Compares both selector and encrypted value for match\n--!\n--! @see eql_v2.selector\n--! @see eql_v2.ste_vec_contains(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.ste_vec_contains(a public.eql_v2_encrypted[], b eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n result boolean;\n _a public.eql_v2_encrypted;\n BEGIN\n\n result := false;\n\n FOR idx IN 1..array_length(a, 1) LOOP\n _a := a[idx];\n result := result OR (eql_v2.selector(_a) = eql_v2.selector(b) AND _a = b);\n END LOOP;\n\n RETURN result;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check if encrypted value 'a' contains all elements of encrypted value 'b'\n--!\n--! Performs STE vector containment comparison between two encrypted values.\n--! Returns true if all elements in b's STE vector are found in a's STE vector.\n--! Used internally by the @> containment operator for searchable encryption.\n--!\n--! @param a eql_v2_encrypted First encrypted value (container)\n--! @param b eql_v2_encrypted Second encrypted value (elements to find)\n--! @return Boolean True if all elements of b are contained in a\n--!\n--! @note Empty b is always contained in any a\n--! @note Each element of b must match both selector and value in a\n--!\n--! @see eql_v2.ste_vec\n--! @see eql_v2.ste_vec_contains(eql_v2_encrypted[], eql_v2_encrypted)\n--! @see eql_v2.\"@>\"\nCREATE FUNCTION eql_v2.ste_vec_contains(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n result boolean;\n sv_a public.eql_v2_encrypted[];\n sv_b public.eql_v2_encrypted[];\n _b public.eql_v2_encrypted;\n BEGIN\n\n -- jsonb arrays of ste_vec encrypted values\n sv_a := eql_v2.ste_vec(a);\n sv_b := eql_v2.ste_vec(b);\n\n -- an empty b is always contained in a\n IF array_length(sv_b, 1) IS NULL THEN\n RETURN true;\n END IF;\n\n IF array_length(sv_a, 1) IS NULL THEN\n RETURN false;\n END IF;\n\n result := true;\n\n -- for each element of b check if it is in a\n FOR idx IN 1..array_length(sv_b, 1) LOOP\n _b := sv_b[idx];\n result := result AND eql_v2.ste_vec_contains(sv_a, _b);\n END LOOP;\n\n RETURN result;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @file config/tables.sql\n--! @brief Encryption configuration storage table\n--!\n--! Defines the main table for storing EQL v2 encryption configurations.\n--! Each row represents a configuration specifying which tables/columns to encrypt\n--! and what index types to use. Configurations progress through lifecycle states.\n--!\n--! @see config/types.sql for state ENUM definition\n--! @see config/indexes.sql for state uniqueness constraints\n--! @see config/constraints.sql for data validation\n\n\n--! @brief Encryption configuration table\n--!\n--! Stores encryption configurations with their state and metadata.\n--! The 'data' JSONB column contains the full configuration structure including\n--! table/column mappings, index types, and casting rules.\n--!\n--! @note Only one configuration can be 'active', 'pending', or 'encrypting' at once\n--! @note 'id' is auto-generated identity column\n--! @note 'state' defaults to 'pending' for new configurations\n--! @note 'data' validated by CHECK constraint (see config/constraints.sql)\nCREATE TABLE IF NOT EXISTS public.eql_v2_configuration\n(\n id bigint GENERATED ALWAYS AS IDENTITY,\n state eql_v2_configuration_state NOT NULL DEFAULT 'pending',\n data jsonb,\n created_at timestamptz not null default current_timestamp,\n PRIMARY KEY(id)\n);\n\n\n--! @brief Initialize default configuration structure\n--! @internal\n--!\n--! Creates a default configuration object if input is NULL. Used internally\n--! by public configuration functions to ensure consistent structure.\n--!\n--! @param config JSONB Existing configuration or NULL\n--! @return JSONB Configuration with default structure (version 1, empty tables)\nCREATE FUNCTION eql_v2.config_default(config jsonb)\n RETURNS jsonb\n IMMUTABLE PARALLEL SAFE\nAS $$\n BEGIN\n IF config IS NULL THEN\n SELECT jsonb_build_object('v', 1, 'tables', jsonb_build_object()) INTO config;\n END IF;\n RETURN config;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Add table to configuration if not present\n--! @internal\n--!\n--! Ensures the specified table exists in the configuration structure.\n--! Creates empty table entry if needed. Idempotent operation.\n--!\n--! @param table_name Text Name of table to add\n--! @param config JSONB Configuration object\n--! @return JSONB Updated configuration with table entry\nCREATE FUNCTION eql_v2.config_add_table(table_name text, config jsonb)\n RETURNS jsonb\n IMMUTABLE PARALLEL SAFE\nAS $$\n DECLARE\n tbl jsonb;\n BEGIN\n IF NOT config #> array['tables'] ? table_name THEN\n SELECT jsonb_insert(config, array['tables', table_name], jsonb_build_object()) INTO config;\n END IF;\n RETURN config;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Add column to table configuration if not present\n--! @internal\n--!\n--! Ensures the specified column exists in the table's configuration structure.\n--! Creates empty column entry with indexes object if needed. Idempotent operation.\n--!\n--! @param table_name Text Name of parent table\n--! @param column_name Text Name of column to add\n--! @param config JSONB Configuration object\n--! @return JSONB Updated configuration with column entry\nCREATE FUNCTION eql_v2.config_add_column(table_name text, column_name text, config jsonb)\n RETURNS jsonb\n IMMUTABLE PARALLEL SAFE\nAS $$\n DECLARE\n col jsonb;\n BEGIN\n IF NOT config #> array['tables', table_name] ? column_name THEN\n SELECT jsonb_build_object('indexes', jsonb_build_object()) into col;\n SELECT jsonb_set(config, array['tables', table_name, column_name], col) INTO config;\n END IF;\n RETURN config;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Set cast type for column in configuration\n--! @internal\n--!\n--! Updates the cast_as field for a column, specifying the PostgreSQL type\n--! that decrypted values should be cast to.\n--!\n--! @param table_name Text Name of parent table\n--! @param column_name Text Name of column\n--! @param cast_as Text PostgreSQL type for casting (e.g., 'text', 'int', 'jsonb')\n--! @param config JSONB Configuration object\n--! @return JSONB Updated configuration with cast_as set\nCREATE FUNCTION eql_v2.config_add_cast(table_name text, column_name text, cast_as text, config jsonb)\n RETURNS jsonb\n IMMUTABLE PARALLEL SAFE\nAS $$\n BEGIN\n SELECT jsonb_set(config, array['tables', table_name, column_name, 'cast_as'], to_jsonb(cast_as)) INTO config;\n RETURN config;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Add search index to column configuration\n--! @internal\n--!\n--! Inserts a search index entry (unique, match, ore, ste_vec) with its options\n--! into the column's indexes object.\n--!\n--! @param table_name Text Name of parent table\n--! @param column_name Text Name of column\n--! @param index_name Text Type of index to add\n--! @param opts JSONB Index-specific options\n--! @param config JSONB Configuration object\n--! @return JSONB Updated configuration with index added\nCREATE FUNCTION eql_v2.config_add_index(table_name text, column_name text, index_name text, opts jsonb, config jsonb)\n RETURNS jsonb\n IMMUTABLE PARALLEL SAFE\nAS $$\n BEGIN\n SELECT jsonb_insert(config, array['tables', table_name, column_name, 'indexes', index_name], opts) INTO config;\n RETURN config;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Generate default options for match index\n--! @internal\n--!\n--! Returns default configuration for match (LIKE) indexes: k=6, bf=2048,\n--! ngram tokenizer with token_length=3, downcase filter, include_original=true.\n--!\n--! @return JSONB Default match index options\nCREATE FUNCTION eql_v2.config_match_default()\n RETURNS jsonb\nLANGUAGE sql STRICT PARALLEL SAFE\nBEGIN ATOMIC\n SELECT jsonb_build_object(\n 'k', 6,\n 'bf', 2048,\n 'include_original', true,\n 'tokenizer', json_build_object('kind', 'ngram', 'token_length', 3),\n 'token_filters', json_build_array(json_build_object('kind', 'downcase')));\nEND;\n-- AUTOMATICALLY GENERATED FILE\n-- Source is version-template.sql\n\nDROP FUNCTION IF EXISTS eql_v2.version();\n\n--! @file version.sql\n--! @brief EQL version reporting\n--!\n--! This file is auto-generated from version.template during build.\n--! The version string placeholder is replaced with the actual release version.\n\n--! @brief Get EQL library version string\n--!\n--! Returns the version string for the installed EQL library.\n--! This value is set at build time from the project version.\n--!\n--! @return text Version string (e.g., \"2.1.0\" or \"DEV\" for development builds)\n--!\n--! @note Auto-generated during build from version.template\n--!\n--! @example\n--! -- Check installed EQL version\n--! SELECT eql_v2.version();\n--! -- Returns: '2.1.0'\nCREATE FUNCTION eql_v2.version()\n RETURNS text\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n SELECT 'eql-2.2.1';\n$$ LANGUAGE SQL;\n\n\n\n--! @brief Compare two encrypted values using variable-width CLLW ORE index terms\n--!\n--! Performs a three-way comparison (returns -1/0/1) of encrypted values using\n--! their variable-width CLLW ORE ciphertext index terms. Used internally by range operators\n--! (<, <=, >, >=) for order-revealing comparisons without decryption.\n--!\n--! @param a eql_v2_encrypted First encrypted value to compare\n--! @param b eql_v2_encrypted Second encrypted value to compare\n--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b\n--!\n--! @note NULL values are sorted before non-NULL values\n--! @note Uses variable-width CLLW ORE cryptographic protocol for secure comparisons\n--!\n--! @see eql_v2.ore_cllw_var_8\n--! @see eql_v2.has_ore_cllw_var_8\n--! @see eql_v2.compare_ore_cllw_var_8_term\n--! @see eql_v2.\"<\"\n--! @see eql_v2.\">\"\nCREATE FUNCTION eql_v2.compare_ore_cllw_var_8(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS integer\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n a_term eql_v2.ore_cllw_var_8;\n b_term eql_v2.ore_cllw_var_8;\n BEGIN\n\n -- PERFORM eql_v2.log('eql_v2.compare_ore_cllw_var_8');\n -- PERFORM eql_v2.log('a', a::text);\n -- PERFORM eql_v2.log('b', b::text);\n\n IF a IS NULL AND b IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b IS NULL THEN\n RETURN 1;\n END IF;\n\n IF eql_v2.has_ore_cllw_var_8(a) THEN\n a_term := eql_v2.ore_cllw_var_8(a);\n END IF;\n\n IF eql_v2.has_ore_cllw_var_8(a) THEN\n b_term := eql_v2.ore_cllw_var_8(b);\n END IF;\n\n IF a_term IS NULL AND b_term IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a_term IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b_term IS NULL THEN\n RETURN 1;\n END IF;\n\n RETURN eql_v2.compare_ore_cllw_var_8_term(a_term, b_term);\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n--! @brief Compare two encrypted values using CLLW ORE index terms\n--!\n--! Performs a three-way comparison (returns -1/0/1) of encrypted values using\n--! their CLLW ORE ciphertext index terms. Used internally by range operators\n--! (<, <=, >, >=) for order-revealing comparisons without decryption.\n--!\n--! @param a eql_v2_encrypted First encrypted value to compare\n--! @param b eql_v2_encrypted Second encrypted value to compare\n--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b\n--!\n--! @note NULL values are sorted before non-NULL values\n--! @note Uses CLLW ORE cryptographic protocol for secure comparisons\n--!\n--! @see eql_v2.ore_cllw_u64_8\n--! @see eql_v2.has_ore_cllw_u64_8\n--! @see eql_v2.compare_ore_cllw_term_bytes\n--! @see eql_v2.\"<\"\n--! @see eql_v2.\">\"\nCREATE FUNCTION eql_v2.compare_ore_cllw_u64_8(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS integer\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n a_term eql_v2.ore_cllw_u64_8;\n b_term eql_v2.ore_cllw_u64_8;\n BEGIN\n\n -- PERFORM eql_v2.log('eql_v2.compare_ore_cllw_u64_8');\n -- PERFORM eql_v2.log('a', a::text);\n -- PERFORM eql_v2.log('b', b::text);\n\n IF a IS NULL AND b IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b IS NULL THEN\n RETURN 1;\n END IF;\n\n IF eql_v2.has_ore_cllw_u64_8(a) THEN\n a_term := eql_v2.ore_cllw_u64_8(a);\n END IF;\n\n IF eql_v2.has_ore_cllw_u64_8(a) THEN\n b_term := eql_v2.ore_cllw_u64_8(b);\n END IF;\n\n IF a_term IS NULL AND b_term IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a_term IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b_term IS NULL THEN\n RETURN 1;\n END IF;\n\n RETURN eql_v2.compare_ore_cllw_term_bytes(a_term.bytes, b_term.bytes);\n END;\n$$ LANGUAGE plpgsql;\n\n-- NOTE FILE IS DISABLED\n\n\n--! @brief Equality operator for ORE block types\n--! @internal\n--!\n--! Implements the = operator for direct ORE block comparisons.\n--!\n--! @param a eql_v2.ore_block_u64_8_256 Left operand\n--! @param b eql_v2.ore_block_u64_8_256 Right operand\n--! @return Boolean True if ORE blocks are equal\n--!\n--! @note FILE IS DISABLED - Not included in build\n--! @see eql_v2.compare_ore_block_u64_8_256_terms\nCREATE FUNCTION eql_v2.ore_block_u64_8_256_eq(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256)\nRETURNS boolean AS $$\n SELECT eql_v2.compare_ore_block_u64_8_256_terms(a, b) = 0\n$$ LANGUAGE SQL;\n\n\n\n--! @brief Not equal operator for ORE block types\n--! @internal\n--!\n--! Implements the <> operator for direct ORE block comparisons.\n--!\n--! @param a eql_v2.ore_block_u64_8_256 Left operand\n--! @param b eql_v2.ore_block_u64_8_256 Right operand\n--! @return Boolean True if ORE blocks are not equal\n--!\n--! @note FILE IS DISABLED - Not included in build\n--! @see eql_v2.compare_ore_block_u64_8_256_terms\nCREATE FUNCTION eql_v2.ore_block_u64_8_256_neq(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256)\nRETURNS boolean AS $$\n SELECT eql_v2.compare_ore_block_u64_8_256_terms(a, b) <> 0\n$$ LANGUAGE SQL;\n\n\n\n--! @brief Less than operator for ORE block types\n--! @internal\n--!\n--! Implements the < operator for direct ORE block comparisons.\n--!\n--! @param a eql_v2.ore_block_u64_8_256 Left operand\n--! @param b eql_v2.ore_block_u64_8_256 Right operand\n--! @return Boolean True if left operand is less than right operand\n--!\n--! @note FILE IS DISABLED - Not included in build\n--! @see eql_v2.compare_ore_block_u64_8_256_terms\nCREATE FUNCTION eql_v2.ore_block_u64_8_256_lt(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256)\nRETURNS boolean AS $$\n SELECT eql_v2.compare_ore_block_u64_8_256_terms(a, b) = -1\n$$ LANGUAGE SQL;\n\n\n\n--! @brief Less than or equal operator for ORE block types\n--! @internal\n--!\n--! Implements the <= operator for direct ORE block comparisons.\n--!\n--! @param a eql_v2.ore_block_u64_8_256 Left operand\n--! @param b eql_v2.ore_block_u64_8_256 Right operand\n--! @return Boolean True if left operand is less than or equal to right operand\n--!\n--! @note FILE IS DISABLED - Not included in build\n--! @see eql_v2.compare_ore_block_u64_8_256_terms\nCREATE FUNCTION eql_v2.ore_block_u64_8_256_lte(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256)\nRETURNS boolean AS $$\n SELECT eql_v2.compare_ore_block_u64_8_256_terms(a, b) != 1\n$$ LANGUAGE SQL;\n\n\n\n--! @brief Greater than operator for ORE block types\n--! @internal\n--!\n--! Implements the > operator for direct ORE block comparisons.\n--!\n--! @param a eql_v2.ore_block_u64_8_256 Left operand\n--! @param b eql_v2.ore_block_u64_8_256 Right operand\n--! @return Boolean True if left operand is greater than right operand\n--!\n--! @note FILE IS DISABLED - Not included in build\n--! @see eql_v2.compare_ore_block_u64_8_256_terms\nCREATE FUNCTION eql_v2.ore_block_u64_8_256_gt(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256)\nRETURNS boolean AS $$\n SELECT eql_v2.compare_ore_block_u64_8_256_terms(a, b) = 1\n$$ LANGUAGE SQL;\n\n\n\n--! @brief Greater than or equal operator for ORE block types\n--! @internal\n--!\n--! Implements the >= operator for direct ORE block comparisons.\n--!\n--! @param a eql_v2.ore_block_u64_8_256 Left operand\n--! @param b eql_v2.ore_block_u64_8_256 Right operand\n--! @return Boolean True if left operand is greater than or equal to right operand\n--!\n--! @note FILE IS DISABLED - Not included in build\n--! @see eql_v2.compare_ore_block_u64_8_256_terms\nCREATE FUNCTION eql_v2.ore_block_u64_8_256_gte(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256)\nRETURNS boolean AS $$\n SELECT eql_v2.compare_ore_block_u64_8_256_terms(a, b) != -1\n$$ LANGUAGE SQL;\n\n\n\n--! @brief = operator for ORE block types\n--! @note FILE IS DISABLED - Not included in build\nCREATE OPERATOR = (\n FUNCTION=eql_v2.ore_block_u64_8_256_eq,\n LEFTARG=eql_v2.ore_block_u64_8_256,\n RIGHTARG=eql_v2.ore_block_u64_8_256,\n NEGATOR = <>,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n\n\n--! @brief <> operator for ORE block types\n--! @note FILE IS DISABLED - Not included in build\nCREATE OPERATOR <> (\n FUNCTION=eql_v2.ore_block_u64_8_256_neq,\n LEFTARG=eql_v2.ore_block_u64_8_256,\n RIGHTARG=eql_v2.ore_block_u64_8_256,\n NEGATOR = =,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n\n--! @brief > operator for ORE block types\n--! @note FILE IS DISABLED - Not included in build\nCREATE OPERATOR > (\n FUNCTION=eql_v2.ore_block_u64_8_256_gt,\n LEFTARG=eql_v2.ore_block_u64_8_256,\n RIGHTARG=eql_v2.ore_block_u64_8_256,\n COMMUTATOR = <,\n NEGATOR = <=,\n RESTRICT = scalargtsel,\n JOIN = scalargtjoinsel\n);\n\n\n\n--! @brief < operator for ORE block types\n--! @note FILE IS DISABLED - Not included in build\nCREATE OPERATOR < (\n FUNCTION=eql_v2.ore_block_u64_8_256_lt,\n LEFTARG=eql_v2.ore_block_u64_8_256,\n RIGHTARG=eql_v2.ore_block_u64_8_256,\n COMMUTATOR = >,\n NEGATOR = >=,\n RESTRICT = scalarltsel,\n JOIN = scalarltjoinsel\n);\n\n\n\n--! @brief <= operator for ORE block types\n--! @note FILE IS DISABLED - Not included in build\nCREATE OPERATOR <= (\n FUNCTION=eql_v2.ore_block_u64_8_256_lte,\n LEFTARG=eql_v2.ore_block_u64_8_256,\n RIGHTARG=eql_v2.ore_block_u64_8_256,\n COMMUTATOR = >=,\n NEGATOR = >,\n RESTRICT = scalarlesel,\n JOIN = scalarlejoinsel\n);\n\n\n\n--! @brief >= operator for ORE block types\n--! @note FILE IS DISABLED - Not included in build\nCREATE OPERATOR >= (\n FUNCTION=eql_v2.ore_block_u64_8_256_gte,\n LEFTARG=eql_v2.ore_block_u64_8_256,\n RIGHTARG=eql_v2.ore_block_u64_8_256,\n COMMUTATOR = <=,\n NEGATOR = <,\n RESTRICT = scalarlesel,\n JOIN = scalarlejoinsel\n);\n-- NOTE FILE IS DISABLED\n\n\n\n--! @brief B-tree operator family for ORE block types\n--!\n--! Defines the operator family for creating B-tree indexes on ORE block types.\n--!\n--! @note FILE IS DISABLED - Not included in build\n--! @see eql_v2.ore_block_u64_8_256_operator_class\nCREATE OPERATOR FAMILY eql_v2.ore_block_u64_8_256_operator_family USING btree;\n\n--! @brief B-tree operator class for ORE block encrypted values\n--!\n--! Defines the operator class required for creating B-tree indexes on columns\n--! using the ore_block_u64_8_256 type. Enables range queries and ORDER BY on\n--! ORE-encrypted data without decryption.\n--!\n--! Supports operators: <, <=, =, >=, >\n--! Uses comparison function: compare_ore_block_u64_8_256_terms\n--!\n--! @note FILE IS DISABLED - Not included in build\n--!\n--! @example\n--! -- Would be used like (if enabled):\n--! CREATE INDEX ON events USING btree (\n--! (encrypted_timestamp::jsonb->'ob')::eql_v2.ore_block_u64_8_256\n--! );\n--!\n--! @see CREATE OPERATOR CLASS in PostgreSQL documentation\n--! @see eql_v2.compare_ore_block_u64_8_256_terms\nCREATE OPERATOR CLASS eql_v2.ore_block_u64_8_256_operator_class DEFAULT FOR TYPE eql_v2.ore_block_u64_8_256 USING btree FAMILY eql_v2.ore_block_u64_8_256_operator_family AS\n OPERATOR 1 <,\n OPERATOR 2 <=,\n OPERATOR 3 =,\n OPERATOR 4 >=,\n OPERATOR 5 >,\n FUNCTION 1 eql_v2.compare_ore_block_u64_8_256_terms(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256);\n\n\n--! @brief Compare two encrypted values using ORE block index terms\n--!\n--! Performs a three-way comparison (returns -1/0/1) of encrypted values using\n--! their ORE block index terms. Used internally by range operators (<, <=, >, >=)\n--! for order-revealing comparisons without decryption.\n--!\n--! @param a eql_v2_encrypted First encrypted value to compare\n--! @param b eql_v2_encrypted Second encrypted value to compare\n--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b\n--!\n--! @note NULL values are sorted before non-NULL values\n--! @note Uses ORE cryptographic protocol for secure comparisons\n--!\n--! @see eql_v2.ore_block_u64_8_256\n--! @see eql_v2.has_ore_block_u64_8_256\n--! @see eql_v2.\"<\"\n--! @see eql_v2.\">\"\nCREATE FUNCTION eql_v2.compare_ore_block_u64_8_256(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS integer\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n a_term eql_v2.ore_block_u64_8_256;\n b_term eql_v2.ore_block_u64_8_256;\n BEGIN\n\n IF a IS NULL AND b IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b IS NULL THEN\n RETURN 1;\n END IF;\n\n IF eql_v2.has_ore_block_u64_8_256(a) THEN\n a_term := eql_v2.ore_block_u64_8_256(a);\n END IF;\n\n IF eql_v2.has_ore_block_u64_8_256(a) THEN\n b_term := eql_v2.ore_block_u64_8_256(b);\n END IF;\n\n IF a_term IS NULL AND b_term IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a_term IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b_term IS NULL THEN\n RETURN 1;\n END IF;\n\n RETURN eql_v2.compare_ore_block_u64_8_256_terms(a_term.terms, b_term.terms);\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Cast text to ORE block term\n--! @internal\n--!\n--! Converts text to bytea and wraps in ore_block_u64_8_256_term type.\n--! Used internally for ORE block extraction and manipulation.\n--!\n--! @param t Text Text value to convert\n--! @return eql_v2.ore_block_u64_8_256_term ORE term containing bytea representation\n--!\n--! @see eql_v2.ore_block_u64_8_256_term\nCREATE FUNCTION eql_v2.text_to_ore_block_u64_8_256_term(t text)\n RETURNS eql_v2.ore_block_u64_8_256_term\n LANGUAGE sql IMMUTABLE STRICT PARALLEL SAFE\nBEGIN ATOMIC\n\tRETURN t::bytea;\nEND;\n\n--! @brief Implicit cast from text to ORE block term\n--!\n--! Defines an implicit cast allowing automatic conversion of text values\n--! to ore_block_u64_8_256_term type for ORE operations.\n--!\n--! @see eql_v2.text_to_ore_block_u64_8_256_term\nCREATE CAST (text AS eql_v2.ore_block_u64_8_256_term)\n\tWITH FUNCTION eql_v2.text_to_ore_block_u64_8_256_term(text) AS IMPLICIT;\n\n--! @brief Pattern matching helper using bloom filters\n--! @internal\n--!\n--! Internal helper for LIKE-style pattern matching on encrypted values.\n--! Uses bloom filter index terms to test substring containment without decryption.\n--! Requires 'match' index configuration on the column.\n--!\n--! @param a eql_v2_encrypted Haystack (value to search in)\n--! @param b eql_v2_encrypted Needle (pattern to search for)\n--! @return Boolean True if bloom filter of a contains bloom filter of b\n--!\n--! @see eql_v2.\"~~\"\n--! @see eql_v2.bloom_filter\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.like(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean AS $$\n SELECT eql_v2.bloom_filter(a) @> eql_v2.bloom_filter(b);\n$$ LANGUAGE SQL;\n\n--! @brief Case-insensitive pattern matching helper\n--! @internal\n--!\n--! Internal helper for ILIKE-style case-insensitive pattern matching.\n--! Case sensitivity is controlled by index configuration (token_filters with downcase).\n--! This function has same implementation as like() - actual case handling is in index terms.\n--!\n--! @param a eql_v2_encrypted Haystack (value to search in)\n--! @param b eql_v2_encrypted Needle (pattern to search for)\n--! @return Boolean True if bloom filter of a contains bloom filter of b\n--!\n--! @note Case sensitivity depends on match index token_filters configuration\n--! @see eql_v2.\"~~\"\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.ilike(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean AS $$\n SELECT eql_v2.bloom_filter(a) @> eql_v2.bloom_filter(b);\n$$ LANGUAGE SQL;\n\n--! @brief LIKE operator for encrypted values (pattern matching)\n--!\n--! Implements the ~~ (LIKE) operator for substring/pattern matching on encrypted\n--! text using bloom filter index terms. Enables WHERE col LIKE '%pattern%' queries\n--! without decryption. Requires 'match' index configuration on the column.\n--!\n--! Pattern matching uses n-gram tokenization configured in match index. Token length\n--! and filters affect matching behavior.\n--!\n--! @param a eql_v2_encrypted Haystack (encrypted text to search in)\n--! @param b eql_v2_encrypted Needle (encrypted pattern to search for)\n--! @return Boolean True if a contains b as substring\n--!\n--! @example\n--! -- Search for substring in encrypted email\n--! SELECT * FROM users\n--! WHERE encrypted_email ~~ '%@example.com%'::text::eql_v2_encrypted;\n--!\n--! -- Pattern matching on encrypted names\n--! SELECT * FROM customers\n--! WHERE encrypted_name ~~ 'John%'::text::eql_v2_encrypted;\n--!\n--! @brief SQL LIKE operator (~~ operator) for encrypted text pattern matching\n--!\n--! @param a eql_v2_encrypted Left operand (encrypted value)\n--! @param b eql_v2_encrypted Right operand (encrypted pattern)\n--! @return boolean True if pattern matches\n--!\n--! @note Requires match index: eql_v2.add_search_config(table, column, 'match')\n--! @see eql_v2.like\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.\"~~\"(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.like(a, b);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR ~~(\n FUNCTION=eql_v2.\"~~\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=eql_v2_encrypted,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n--! @brief Case-insensitive LIKE operator (~~*)\n--!\n--! Implements ~~* (ILIKE) operator for case-insensitive pattern matching.\n--! Case handling depends on match index token_filters configuration (use downcase filter).\n--! Same implementation as ~~, with case sensitivity controlled by index configuration.\n--!\n--! @param a eql_v2_encrypted Haystack\n--! @param b eql_v2_encrypted Needle\n--! @return Boolean True if a contains b (case-insensitive)\n--!\n--! @note Configure match index with downcase token filter for case-insensitivity\n--! @see eql_v2.\"~~\"\nCREATE OPERATOR ~~*(\n FUNCTION=eql_v2.\"~~\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=eql_v2_encrypted,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n--! @brief LIKE operator for encrypted value and JSONB\n--!\n--! Overload of ~~ operator accepting JSONB on the right side. Automatically\n--! casts JSONB to eql_v2_encrypted for bloom filter pattern matching.\n--!\n--! @param eql_v2_encrypted Haystack (encrypted value)\n--! @param b JSONB Needle (will be cast to eql_v2_encrypted)\n--! @return Boolean True if a contains b as substring\n--!\n--! @example\n--! SELECT * FROM users WHERE encrypted_email ~~ '%gmail%'::jsonb;\n--!\n--! @see eql_v2.\"~~\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\"~~\"(a eql_v2_encrypted, b jsonb)\n RETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.like(a, b::eql_v2_encrypted);\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR ~~(\n FUNCTION=eql_v2.\"~~\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=jsonb,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\nCREATE OPERATOR ~~*(\n FUNCTION=eql_v2.\"~~\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=jsonb,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n--! @brief LIKE operator for JSONB and encrypted value\n--!\n--! Overload of ~~ operator accepting JSONB on the left side. Automatically\n--! casts JSONB to eql_v2_encrypted for bloom filter pattern matching.\n--!\n--! @param a JSONB Haystack (will be cast to eql_v2_encrypted)\n--! @param eql_v2_encrypted Needle (encrypted pattern)\n--! @return Boolean True if a contains b as substring\n--!\n--! @example\n--! SELECT * FROM users WHERE 'test@example.com'::jsonb ~~ encrypted_pattern;\n--!\n--! @see eql_v2.\"~~\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\"~~\"(a jsonb, b eql_v2_encrypted)\n RETURNS boolean\nAS $$\n BEGIN\n RETURN eql_v2.like(a::eql_v2_encrypted, b);\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR ~~(\n FUNCTION=eql_v2.\"~~\",\n LEFTARG=jsonb,\n RIGHTARG=eql_v2_encrypted,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\nCREATE OPERATOR ~~*(\n FUNCTION=eql_v2.\"~~\",\n LEFTARG=jsonb,\n RIGHTARG=eql_v2_encrypted,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n\n-- -----------------------------------------------------------------------------\n\n--! @brief Extract ORE index term for ordering encrypted values\n--!\n--! Helper function that extracts the ore_block_u64_8_256 index term from an encrypted value\n--! for use in ORDER BY clauses when comparison operators are not appropriate or available.\n--!\n--! @param eql_v2_encrypted Encrypted value to extract order term from\n--! @return eql_v2.ore_block_u64_8_256 ORE index term for ordering\n--!\n--! @example\n--! -- Order encrypted values without using comparison operators\n--! SELECT * FROM users ORDER BY eql_v2.order_by(encrypted_age);\n--!\n--! @note Requires 'ore' index configuration on the column\n--! @see eql_v2.ore_block_u64_8_256\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.order_by(a eql_v2_encrypted)\n RETURNS eql_v2.ore_block_u64_8_256\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN eql_v2.ore_block_u64_8_256(a);\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n\n--! @brief PostgreSQL operator class definitions for encrypted value indexing\n--!\n--! Defines the operator family and operator class required for btree indexing\n--! of encrypted values. This enables PostgreSQL to use encrypted columns in:\n--! - CREATE INDEX statements\n--! - ORDER BY clauses\n--! - Range queries\n--! - Primary key constraints\n--!\n--! The operator class maps the five comparison operators (<, <=, =, >=, >)\n--! to the eql_v2.compare() support function for btree index operations.\n--!\n--! @note This is the default operator class for eql_v2_encrypted type\n--! @see eql_v2.compare\n--! @see PostgreSQL documentation on operator classes\n\n--------------------\n\nCREATE OPERATOR FAMILY eql_v2.encrypted_operator_family USING btree;\n\nCREATE OPERATOR CLASS eql_v2.encrypted_operator_class DEFAULT FOR TYPE eql_v2_encrypted USING btree FAMILY eql_v2.encrypted_operator_family AS\n OPERATOR 1 <,\n OPERATOR 2 <=,\n OPERATOR 3 =,\n OPERATOR 4 >=,\n OPERATOR 5 >,\n FUNCTION 1 eql_v2.compare(a eql_v2_encrypted, b eql_v2_encrypted);\n\n\n--------------------\n\n-- CREATE OPERATOR FAMILY eql_v2.encrypted_operator_ordered USING btree;\n\n-- CREATE OPERATOR CLASS eql_v2.encrypted_operator_ordered FOR TYPE eql_v2_encrypted USING btree FAMILY eql_v2.encrypted_operator_ordered AS\n-- OPERATOR 1 <,\n-- OPERATOR 2 <=,\n-- OPERATOR 3 =,\n-- OPERATOR 4 >=,\n-- OPERATOR 5 >,\n-- FUNCTION 1 eql_v2.compare_ore_block_u64_8_256(a eql_v2_encrypted, b eql_v2_encrypted);\n\n--------------------\n\n-- CREATE OPERATOR FAMILY eql_v2.encrypted_hmac_256_operator USING btree;\n\n-- CREATE OPERATOR CLASS eql_v2.encrypted_hmac_256_operator FOR TYPE eql_v2_encrypted USING btree FAMILY eql_v2.encrypted_hmac_256_operator AS\n-- OPERATOR 1 <,\n-- OPERATOR 2 <=,\n-- OPERATOR 3 =,\n-- OPERATOR 4 >=,\n-- OPERATOR 5 >,\n-- FUNCTION 1 eql_v2.compare_hmac(a eql_v2_encrypted, b eql_v2_encrypted);\n\n\n--! @brief Contains operator for encrypted values (@>)\n--!\n--! Implements the @> (contains) operator for testing if left encrypted value\n--! contains the right encrypted value. Uses ste_vec (secure tree encoding vector)\n--! index terms for containment testing without decryption.\n--!\n--! Primarily used for encrypted array or set containment queries.\n--!\n--! @param a eql_v2_encrypted Left operand (container)\n--! @param b eql_v2_encrypted Right operand (contained value)\n--! @return Boolean True if a contains b\n--!\n--! @example\n--! -- Check if encrypted array contains value\n--! SELECT * FROM documents\n--! WHERE encrypted_tags @> '[\"security\"]'::jsonb::eql_v2_encrypted;\n--!\n--! @note Requires ste_vec index configuration\n--! @see eql_v2.ste_vec_contains\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.\"@>\"(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean AS $$\n SELECT eql_v2.ste_vec_contains(a, b)\n$$ LANGUAGE SQL;\n\nCREATE OPERATOR @>(\n FUNCTION=eql_v2.\"@>\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=eql_v2_encrypted\n);\n\n--! @brief Contained-by operator for encrypted values (<@)\n--!\n--! Implements the <@ (contained-by) operator for testing if left encrypted value\n--! is contained by the right encrypted value. Uses ste_vec (secure tree encoding vector)\n--! index terms for containment testing without decryption. Reverse of @> operator.\n--!\n--! Primarily used for encrypted array or set containment queries.\n--!\n--! @param a eql_v2_encrypted Left operand (contained value)\n--! @param b eql_v2_encrypted Right operand (container)\n--! @return Boolean True if a is contained by b\n--!\n--! @example\n--! -- Check if value is contained in encrypted array\n--! SELECT * FROM documents\n--! WHERE '[\"security\"]'::jsonb::eql_v2_encrypted <@ encrypted_tags;\n--!\n--! @note Requires ste_vec index configuration\n--! @see eql_v2.ste_vec_contains\n--! @see eql_v2.\\\"@>\\\"\n--! @see eql_v2.add_search_config\n\nCREATE FUNCTION eql_v2.\"<@\"(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS boolean AS $$\n -- Contains with reversed arguments\n SELECT eql_v2.ste_vec_contains(b, a)\n$$ LANGUAGE SQL;\n\nCREATE OPERATOR <@(\n FUNCTION=eql_v2.\"<@\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=eql_v2_encrypted\n);\n\n--! @brief Not-equal comparison helper for encrypted values\n--! @internal\n--!\n--! Internal helper that delegates to eql_v2.compare for inequality testing.\n--! Returns true if encrypted values are not equal via encrypted index comparison.\n--!\n--! @param a eql_v2_encrypted First encrypted value\n--! @param b eql_v2_encrypted Second encrypted value\n--! @return Boolean True if values are not equal (compare result <> 0)\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.\"<>\"\nCREATE FUNCTION eql_v2.neq(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN eql_v2.compare(a, b) <> 0;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Not-equal operator for encrypted values\n--!\n--! Implements the <> (not equal) operator for comparing encrypted values using their\n--! encrypted index terms. Enables WHERE clause inequality comparisons without decryption.\n--!\n--! @param a eql_v2_encrypted Left operand\n--! @param b eql_v2_encrypted Right operand\n--! @return Boolean True if encrypted values are not equal\n--!\n--! @example\n--! -- Find records with non-matching values\n--! SELECT * FROM users\n--! WHERE encrypted_email <> 'admin@example.com'::text::eql_v2_encrypted;\n--!\n--! @see eql_v2.compare\n--! @see eql_v2.\"=\"\nCREATE FUNCTION eql_v2.\"<>\"(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN eql_v2.neq(a, b );\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR <> (\n FUNCTION=eql_v2.\"<>\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=eql_v2_encrypted,\n NEGATOR = =,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n--! @brief <> operator for encrypted value and JSONB\n--! @see eql_v2.\"<>\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\"<>\"(a eql_v2_encrypted, b jsonb)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN eql_v2.neq(a, b::eql_v2_encrypted);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR <> (\n FUNCTION=eql_v2.\"<>\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=jsonb,\n NEGATOR = =,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n--! @brief <> operator for JSONB and encrypted value\n--!\n--! @param jsonb Plain JSONB value\n--! @param eql_v2_encrypted Encrypted value\n--! @return boolean True if values are not equal\n--!\n--! @see eql_v2.\"<>\"(eql_v2_encrypted, eql_v2_encrypted)\nCREATE FUNCTION eql_v2.\"<>\"(a jsonb, b eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN eql_v2.neq(a::eql_v2_encrypted, b);\n END;\n$$ LANGUAGE plpgsql;\n\nCREATE OPERATOR <> (\n FUNCTION=eql_v2.\"<>\",\n LEFTARG=jsonb,\n RIGHTARG=eql_v2_encrypted,\n NEGATOR = =,\n RESTRICT = eqsel,\n JOIN = eqjoinsel,\n HASHES,\n MERGES\n);\n\n\n\n\n\n--! @brief JSONB field accessor operator alias (->>)\n--!\n--! Implements the ->> operator as an alias of -> for encrypted JSONB data. This mirrors\n--! PostgreSQL semantics where ->> returns text via implicit casts. The underlying\n--! implementation delegates to eql_v2.\"->\" and allows PostgreSQL to coerce the result.\n--!\n--! Provides two overloads:\n--! - (eql_v2_encrypted, text) - Field name selector\n--! - (eql_v2_encrypted, eql_v2_encrypted) - Encrypted selector\n--!\n--! @see eql_v2.\"->\"\n--! @see eql_v2.selector\n\n--! @brief ->> operator with text selector\n--! @param eql_v2_encrypted Encrypted JSONB data\n--! @param text Field name to extract\n--! @return text Encrypted value at selector, implicitly cast from eql_v2_encrypted\n--! @example\n--! SELECT encrypted_json ->> 'field_name' FROM table;\nCREATE FUNCTION eql_v2.\"->>\"(e eql_v2_encrypted, selector text)\n RETURNS text\nIMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n found eql_v2_encrypted;\n\tBEGIN\n -- found = eql_v2.\"->\"(e, selector);\n -- RETURN eql_v2.ciphertext(found);\n RETURN eql_v2.\"->\"(e, selector);\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR ->> (\n FUNCTION=eql_v2.\"->>\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=text\n);\n\n\n\n---------------------------------------------------\n\n--! @brief ->> operator with encrypted selector\n--! @param e eql_v2_encrypted Encrypted JSONB data\n--! @param selector eql_v2_encrypted Encrypted field selector\n--! @return text Encrypted value at selector, implicitly cast from eql_v2_encrypted\n--! @see eql_v2.\"->>\"(eql_v2_encrypted, text)\nCREATE FUNCTION eql_v2.\"->>\"(e eql_v2_encrypted, selector eql_v2_encrypted)\n RETURNS text\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.\"->>\"(e, eql_v2.selector(selector));\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR ->> (\n FUNCTION=eql_v2.\"->>\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=eql_v2_encrypted\n);\n\n--! @brief JSONB field accessor operator for encrypted values (->)\n--!\n--! Implements the -> operator to access fields/elements from encrypted JSONB data.\n--! Returns encrypted value matching the provided selector without decryption.\n--!\n--! Encrypted JSON is represented as an array of eql_v2_encrypted values in the ste_vec format.\n--! Each element has a selector, ciphertext, and index terms:\n--! {\"sv\": [{\"c\": \"\", \"s\": \"\", \"b3\": \"\"}]}\n--!\n--! Provides three overloads:\n--! - (eql_v2_encrypted, text) - Field name selector\n--! - (eql_v2_encrypted, eql_v2_encrypted) - Encrypted selector\n--! - (eql_v2_encrypted, integer) - Array index selector (0-based)\n--!\n--! @note Operator resolution: Assignment casts are considered (PostgreSQL standard behavior).\n--! To use text selector, parameter may need explicit cast to text.\n--!\n--! @see eql_v2.ste_vec\n--! @see eql_v2.selector\n--! @see eql_v2.\"->>\"\n\n--! @brief -> operator with text selector\n--! @param eql_v2_encrypted Encrypted JSONB data\n--! @param text Field name to extract\n--! @return eql_v2_encrypted Encrypted value at selector\n--! @example\n--! SELECT encrypted_json -> 'field_name' FROM table;\nCREATE FUNCTION eql_v2.\"->\"(e eql_v2_encrypted, selector text)\n RETURNS eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n meta jsonb;\n sv eql_v2_encrypted[];\n found jsonb;\n\tBEGIN\n\n IF e IS NULL THEN\n RETURN NULL;\n END IF;\n\n -- Column identifier and version\n meta := eql_v2.meta_data(e);\n\n sv := eql_v2.ste_vec(e);\n\n FOR idx IN 1..array_length(sv, 1) LOOP\n if eql_v2.selector(sv[idx]) = selector THEN\n found := sv[idx];\n END IF;\n END LOOP;\n\n RETURN (meta || found)::eql_v2_encrypted;\n END;\n$$ LANGUAGE plpgsql;\n\n\nCREATE OPERATOR ->(\n FUNCTION=eql_v2.\"->\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=text\n);\n\n---------------------------------------------------\n\n--! @brief -> operator with encrypted selector\n--! @param e eql_v2_encrypted Encrypted JSONB data\n--! @param selector eql_v2_encrypted Encrypted field selector\n--! @return eql_v2_encrypted Encrypted value at selector\n--! @see eql_v2.\"->\"(eql_v2_encrypted, text)\nCREATE FUNCTION eql_v2.\"->\"(e eql_v2_encrypted, selector eql_v2_encrypted)\n RETURNS eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n RETURN eql_v2.\"->\"(e, eql_v2.selector(selector));\n END;\n$$ LANGUAGE plpgsql;\n\n\n\nCREATE OPERATOR ->(\n FUNCTION=eql_v2.\"->\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=eql_v2_encrypted\n);\n\n\n---------------------------------------------------\n\n--! @brief -> operator with integer array index\n--! @param eql_v2_encrypted Encrypted array data\n--! @param integer Array index (0-based, JSONB convention)\n--! @return eql_v2_encrypted Encrypted value at array index\n--! @note Array index is 0-based (JSONB standard) despite PostgreSQL arrays being 1-based\n--! @example\n--! SELECT encrypted_array -> 0 FROM table;\n--! @see eql_v2.is_ste_vec_array\nCREATE FUNCTION eql_v2.\"->\"(e eql_v2_encrypted, selector integer)\n RETURNS eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n sv eql_v2_encrypted[];\n found eql_v2_encrypted;\n\tBEGIN\n IF NOT eql_v2.is_ste_vec_array(e) THEN\n RETURN NULL;\n END IF;\n\n sv := eql_v2.ste_vec(e);\n\n -- PostgreSQL arrays are 1-based\n -- JSONB arrays are 0-based and so the selector is 0-based\n FOR idx IN 1..array_length(sv, 1) LOOP\n if (idx-1) = selector THEN\n found := sv[idx];\n END IF;\n END LOOP;\n\n RETURN found;\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n\n\nCREATE OPERATOR ->(\n FUNCTION=eql_v2.\"->\",\n LEFTARG=eql_v2_encrypted,\n RIGHTARG=integer\n);\n\n\n--! @file jsonb/functions.sql\n--! @brief JSONB path query and array manipulation functions for encrypted data\n--!\n--! These functions provide PostgreSQL-compatible operations on encrypted JSONB values\n--! using Structured Transparent Encryption (STE). They support:\n--! - Path-based queries to extract nested encrypted values\n--! - Existence checks for encrypted fields\n--! - Array operations (length, elements extraction)\n--!\n--! @note STE stores encrypted JSONB as a vector of encrypted elements ('sv') with selectors\n--! @note Functions suppress errors for missing fields, type mismatches (similar to PostgreSQL jsonpath)\n\n\n--! @brief Query encrypted JSONB for elements matching selector\n--!\n--! Searches the Structured Transparent Encryption (STE) vector for elements matching\n--! the given selector path. Returns all matching encrypted elements. If multiple\n--! matches form an array, they are wrapped with array metadata.\n--!\n--! @param jsonb Encrypted JSONB payload containing STE vector ('sv')\n--! @param text Path selector to match against encrypted elements\n--! @return SETOF eql_v2_encrypted Matching encrypted elements (may return multiple rows)\n--!\n--! @note Returns empty set if selector is not found (does not throw exception)\n--! @note Array elements use same selector; multiple matches wrapped with 'a' flag\n--! @note Returns a set containing NULL if val is NULL; returns empty set if no matches found\n--! @see eql_v2.jsonb_path_query_first\n--! @see eql_v2.jsonb_path_exists\nCREATE FUNCTION eql_v2.jsonb_path_query(val jsonb, selector text)\n RETURNS SETOF eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n sv eql_v2_encrypted[];\n found jsonb[];\n e jsonb;\n meta jsonb;\n ary boolean;\n BEGIN\n\n IF val IS NULL THEN\n RETURN NEXT NULL;\n END IF;\n\n -- Column identifier and version\n meta := eql_v2.meta_data(val);\n\n sv := eql_v2.ste_vec(val);\n\n FOR idx IN 1..array_length(sv, 1) LOOP\n e := sv[idx];\n\n IF eql_v2.selector(e) = selector THEN\n found := array_append(found, e);\n IF eql_v2.is_ste_vec_array(e) THEN\n ary := true;\n END IF;\n\n END IF;\n END LOOP;\n\n IF found IS NOT NULL THEN\n\n IF ary THEN\n -- Wrap found array elements as eql_v2_encrypted\n\n RETURN NEXT (meta || jsonb_build_object(\n 'sv', found,\n 'a', 1\n ))::eql_v2_encrypted;\n\n ELSE\n RETURN NEXT (meta || found[1])::eql_v2_encrypted;\n END IF;\n\n END IF;\n\n RETURN;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Query encrypted JSONB with encrypted selector\n--!\n--! Overload that accepts encrypted selector and extracts its plaintext value\n--! before delegating to main jsonb_path_query implementation.\n--!\n--! @param val eql_v2_encrypted Encrypted JSONB value to query\n--! @param selector eql_v2_encrypted Encrypted selector to match against\n--! @return SETOF eql_v2_encrypted Matching encrypted elements\n--!\n--! @see eql_v2.jsonb_path_query(jsonb, text)\nCREATE FUNCTION eql_v2.jsonb_path_query(val eql_v2_encrypted, selector eql_v2_encrypted)\n RETURNS SETOF eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN QUERY\n SELECT * FROM eql_v2.jsonb_path_query(val.data, eql_v2.selector(selector));\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Query encrypted JSONB with text selector\n--!\n--! Overload that accepts encrypted JSONB value and text selector,\n--! extracting the JSONB payload before querying.\n--!\n--! @param eql_v2_encrypted Encrypted JSONB value to query\n--! @param text Path selector to match against\n--! @return SETOF eql_v2_encrypted Matching encrypted elements\n--!\n--! @example\n--! -- Query encrypted JSONB for specific field\n--! SELECT * FROM eql_v2.jsonb_path_query(encrypted_document, '$.address.city');\n--!\n--! @see eql_v2.jsonb_path_query(jsonb, text)\nCREATE FUNCTION eql_v2.jsonb_path_query(val eql_v2_encrypted, selector text)\n RETURNS SETOF eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN QUERY\n SELECT * FROM eql_v2.jsonb_path_query(val.data, selector);\n END;\n$$ LANGUAGE plpgsql;\n\n\n------------------------------------------------------------------------------------\n\n\n--! @brief Check if selector path exists in encrypted JSONB\n--!\n--! Tests whether any encrypted elements match the given selector path.\n--! More efficient than jsonb_path_query when only existence check is needed.\n--!\n--! @param jsonb Encrypted JSONB payload to check\n--! @param text Path selector to test\n--! @return boolean True if matching element exists, false otherwise\n--!\n--! @see eql_v2.jsonb_path_query(jsonb, text)\nCREATE FUNCTION eql_v2.jsonb_path_exists(val jsonb, selector text)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN EXISTS (\n SELECT eql_v2.jsonb_path_query(val, selector)\n );\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check existence with encrypted selector\n--!\n--! Overload that accepts encrypted selector and extracts its value\n--! before checking existence.\n--!\n--! @param val eql_v2_encrypted Encrypted JSONB value to check\n--! @param selector eql_v2_encrypted Encrypted selector to test\n--! @return boolean True if path exists\n--!\n--! @see eql_v2.jsonb_path_exists(jsonb, text)\nCREATE FUNCTION eql_v2.jsonb_path_exists(val eql_v2_encrypted, selector eql_v2_encrypted)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN EXISTS (\n SELECT eql_v2.jsonb_path_query(val, eql_v2.selector(selector))\n );\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Check existence with text selector\n--!\n--! Overload that accepts encrypted JSONB value and text selector.\n--!\n--! @param eql_v2_encrypted Encrypted JSONB value to check\n--! @param text Path selector to test\n--! @return boolean True if path exists\n--!\n--! @example\n--! -- Check if encrypted document has address field\n--! SELECT eql_v2.jsonb_path_exists(encrypted_document, '$.address');\n--!\n--! @see eql_v2.jsonb_path_exists(jsonb, text)\nCREATE FUNCTION eql_v2.jsonb_path_exists(val eql_v2_encrypted, selector text)\n RETURNS boolean\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN EXISTS (\n SELECT eql_v2.jsonb_path_query(val, selector)\n );\n END;\n$$ LANGUAGE plpgsql;\n\n\n------------------------------------------------------------------------------------\n\n\n--! @brief Get first element matching selector\n--!\n--! Returns only the first encrypted element matching the selector path,\n--! or NULL if no match found. More efficient than jsonb_path_query when\n--! only one result is needed.\n--!\n--! @param jsonb Encrypted JSONB payload to query\n--! @param text Path selector to match\n--! @return eql_v2_encrypted First matching element or NULL\n--!\n--! @note Uses LIMIT 1 internally for efficiency\n--! @see eql_v2.jsonb_path_query(jsonb, text)\nCREATE FUNCTION eql_v2.jsonb_path_query_first(val jsonb, selector text)\n RETURNS eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (\n SELECT e\n FROM eql_v2.jsonb_path_query(val, selector) AS e\n LIMIT 1\n );\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Get first element with encrypted selector\n--!\n--! Overload that accepts encrypted selector and extracts its value\n--! before querying for first match.\n--!\n--! @param val eql_v2_encrypted Encrypted JSONB value to query\n--! @param selector eql_v2_encrypted Encrypted selector to match\n--! @return eql_v2_encrypted First matching element or NULL\n--!\n--! @see eql_v2.jsonb_path_query_first(jsonb, text)\nCREATE FUNCTION eql_v2.jsonb_path_query_first(val eql_v2_encrypted, selector eql_v2_encrypted)\n RETURNS eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (\n SELECT e\n FROM eql_v2.jsonb_path_query(val.data, eql_v2.selector(selector)) AS e\n LIMIT 1\n );\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Get first element with text selector\n--!\n--! Overload that accepts encrypted JSONB value and text selector.\n--!\n--! @param eql_v2_encrypted Encrypted JSONB value to query\n--! @param text Path selector to match\n--! @return eql_v2_encrypted First matching element or NULL\n--!\n--! @example\n--! -- Get first matching address from encrypted document\n--! SELECT eql_v2.jsonb_path_query_first(encrypted_document, '$.addresses[*]');\n--!\n--! @see eql_v2.jsonb_path_query_first(jsonb, text)\nCREATE FUNCTION eql_v2.jsonb_path_query_first(val eql_v2_encrypted, selector text)\n RETURNS eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (\n SELECT e\n FROM eql_v2.jsonb_path_query(val.data, selector) AS e\n LIMIT 1\n );\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n------------------------------------------------------------------------------------\n\n\n--! @brief Get length of encrypted JSONB array\n--!\n--! Returns the number of elements in an encrypted JSONB array by counting\n--! elements in the STE vector ('sv'). The encrypted value must have the\n--! array flag ('a') set to true.\n--!\n--! @param jsonb Encrypted JSONB payload representing an array\n--! @return integer Number of elements in the array\n--! @throws Exception 'cannot get array length of a non-array' if 'a' flag is missing or not true\n--!\n--! @note Array flag 'a' must be present and set to true value\n--! @see eql_v2.jsonb_array_elements\nCREATE FUNCTION eql_v2.jsonb_array_length(val jsonb)\n RETURNS integer\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n sv eql_v2_encrypted[];\n found eql_v2_encrypted[];\n BEGIN\n\n IF val IS NULL THEN\n RETURN NULL;\n END IF;\n\n IF eql_v2.is_ste_vec_array(val) THEN\n sv := eql_v2.ste_vec(val);\n RETURN array_length(sv, 1);\n END IF;\n\n RAISE 'cannot get array length of a non-array';\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Get array length from encrypted type\n--!\n--! Overload that accepts encrypted composite type and extracts the\n--! JSONB payload before computing array length.\n--!\n--! @param eql_v2_encrypted Encrypted array value\n--! @return integer Number of elements in the array\n--! @throws Exception if value is not an array\n--!\n--! @example\n--! -- Get length of encrypted array\n--! SELECT eql_v2.jsonb_array_length(encrypted_tags);\n--!\n--! @see eql_v2.jsonb_array_length(jsonb)\nCREATE FUNCTION eql_v2.jsonb_array_length(val eql_v2_encrypted)\n RETURNS integer\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN (\n SELECT eql_v2.jsonb_array_length(val.data)\n );\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n\n--! @brief Extract elements from encrypted JSONB array\n--!\n--! Returns each element of an encrypted JSONB array as a separate row.\n--! Each element is returned as an eql_v2_encrypted value with metadata\n--! preserved from the parent array.\n--!\n--! @param jsonb Encrypted JSONB payload representing an array\n--! @return SETOF eql_v2_encrypted One row per array element\n--! @throws Exception if value is not an array (missing 'a' flag)\n--!\n--! @note Each element inherits metadata (version, ident) from parent\n--! @see eql_v2.jsonb_array_length\n--! @see eql_v2.jsonb_array_elements_text\nCREATE FUNCTION eql_v2.jsonb_array_elements(val jsonb)\n RETURNS SETOF eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n sv eql_v2_encrypted[];\n meta jsonb;\n item jsonb;\n BEGIN\n\n IF NOT eql_v2.is_ste_vec_array(val) THEN\n RAISE 'cannot extract elements from non-array';\n END IF;\n\n -- Column identifier and version\n meta := eql_v2.meta_data(val);\n\n sv := eql_v2.ste_vec(val);\n\n FOR idx IN 1..array_length(sv, 1) LOOP\n item = sv[idx];\n RETURN NEXT (meta || item)::eql_v2_encrypted;\n END LOOP;\n\n RETURN;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract elements from encrypted array type\n--!\n--! Overload that accepts encrypted composite type and extracts each\n--! array element as a separate row.\n--!\n--! @param eql_v2_encrypted Encrypted array value\n--! @return SETOF eql_v2_encrypted One row per array element\n--! @throws Exception if value is not an array\n--!\n--! @example\n--! -- Expand encrypted array into rows\n--! SELECT * FROM eql_v2.jsonb_array_elements(encrypted_tags);\n--!\n--! @see eql_v2.jsonb_array_elements(jsonb)\nCREATE FUNCTION eql_v2.jsonb_array_elements(val eql_v2_encrypted)\n RETURNS SETOF eql_v2_encrypted\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN QUERY\n SELECT * FROM eql_v2.jsonb_array_elements(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n\n\n--! @brief Extract encrypted array elements as ciphertext\n--!\n--! Returns each element of an encrypted JSONB array as its raw ciphertext\n--! value (text representation). Unlike jsonb_array_elements, this returns\n--! only the ciphertext 'c' field without metadata.\n--!\n--! @param jsonb Encrypted JSONB payload representing an array\n--! @return SETOF text One ciphertext string per array element\n--! @throws Exception if value is not an array (missing 'a' flag)\n--!\n--! @note Returns ciphertext only, not full encrypted structure\n--! @see eql_v2.jsonb_array_elements\nCREATE FUNCTION eql_v2.jsonb_array_elements_text(val jsonb)\n RETURNS SETOF text\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n sv eql_v2_encrypted[];\n found eql_v2_encrypted[];\n BEGIN\n IF NOT eql_v2.is_ste_vec_array(val) THEN\n RAISE 'cannot extract elements from non-array';\n END IF;\n\n sv := eql_v2.ste_vec(val);\n\n FOR idx IN 1..array_length(sv, 1) LOOP\n RETURN NEXT eql_v2.ciphertext(sv[idx]);\n END LOOP;\n\n RETURN;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Extract array elements as ciphertext from encrypted type\n--!\n--! Overload that accepts encrypted composite type and extracts each\n--! array element's ciphertext as text.\n--!\n--! @param eql_v2_encrypted Encrypted array value\n--! @return SETOF text One ciphertext string per array element\n--! @throws Exception if value is not an array\n--!\n--! @example\n--! -- Get ciphertext of each array element\n--! SELECT * FROM eql_v2.jsonb_array_elements_text(encrypted_tags);\n--!\n--! @see eql_v2.jsonb_array_elements_text(jsonb)\nCREATE FUNCTION eql_v2.jsonb_array_elements_text(val eql_v2_encrypted)\n RETURNS SETOF text\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN QUERY\n SELECT * FROM eql_v2.jsonb_array_elements_text(val.data);\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Compare two encrypted values using HMAC-SHA256 index terms\n--!\n--! Performs a three-way comparison (returns -1/0/1) of encrypted values using\n--! their HMAC-SHA256 hash index terms. Used internally by the equality operator (=)\n--! for exact-match queries without decryption.\n--!\n--! @param a eql_v2_encrypted First encrypted value to compare\n--! @param b eql_v2_encrypted Second encrypted value to compare\n--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b\n--!\n--! @note NULL values are sorted before non-NULL values\n--! @note Comparison uses underlying text type ordering of HMAC-SHA256 hashes\n--!\n--! @see eql_v2.hmac_256\n--! @see eql_v2.has_hmac_256\n--! @see eql_v2.\"=\"\nCREATE FUNCTION eql_v2.compare_hmac_256(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS integer\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n a_term eql_v2.hmac_256;\n b_term eql_v2.hmac_256;\n BEGIN\n\n IF a IS NULL AND b IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b IS NULL THEN\n RETURN 1;\n END IF;\n\n IF eql_v2.has_hmac_256(a) THEN\n a_term = eql_v2.hmac_256(a);\n END IF;\n\n IF eql_v2.has_hmac_256(b) THEN\n b_term = eql_v2.hmac_256(b);\n END IF;\n\n IF a_term IS NULL AND b_term IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a_term IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b_term IS NULL THEN\n RETURN 1;\n END IF;\n\n -- Using the underlying text type comparison\n IF a_term = b_term THEN\n RETURN 0;\n END IF;\n\n IF a_term < b_term THEN\n RETURN -1;\n END IF;\n\n IF a_term > b_term THEN\n RETURN 1;\n END IF;\n\n END;\n$$ LANGUAGE plpgsql;\n--! @file encryptindex/functions.sql\n--! @brief Configuration lifecycle and column encryption management\n--!\n--! Provides functions for managing encryption configuration transitions:\n--! - Comparing configurations to identify changes\n--! - Identifying columns needing encryption\n--! - Creating and renaming encrypted columns during initial setup\n--! - Tracking encryption progress\n--!\n--! These functions support the workflow of activating a pending configuration\n--! and performing the initial encryption of plaintext columns.\n\n\n--! @brief Compare two configurations and find differences\n--! @internal\n--!\n--! Returns table/column pairs where configuration differs between two configs.\n--! Used to identify which columns need encryption when activating a pending config.\n--!\n--! @param a jsonb First configuration to compare\n--! @param b jsonb Second configuration to compare\n--! @return TABLE(table_name text, column_name text) Columns with differing configuration\n--!\n--! @note Compares configuration structure, not just presence/absence\n--! @see eql_v2.select_pending_columns\nCREATE FUNCTION eql_v2.diff_config(a JSONB, b JSONB)\n\tRETURNS TABLE(table_name TEXT, column_name TEXT)\nIMMUTABLE STRICT PARALLEL SAFE\nAS $$\n BEGIN\n RETURN QUERY\n WITH table_keys AS (\n SELECT jsonb_object_keys(a->'tables') AS key\n UNION\n SELECT jsonb_object_keys(b->'tables') AS key\n ),\n column_keys AS (\n SELECT tk.key AS table_key, jsonb_object_keys(a->'tables'->tk.key) AS column_key\n FROM table_keys tk\n UNION\n SELECT tk.key AS table_key, jsonb_object_keys(b->'tables'->tk.key) AS column_key\n FROM table_keys tk\n )\n SELECT\n ck.table_key AS table_name,\n ck.column_key AS column_name\n FROM\n column_keys ck\n WHERE\n (a->'tables'->ck.table_key->ck.column_key IS DISTINCT FROM b->'tables'->ck.table_key->ck.column_key);\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Get columns with pending configuration changes\n--!\n--! Compares 'pending' and 'active' configurations to identify columns that need\n--! encryption or re-encryption. Returns columns where configuration differs.\n--!\n--! @return TABLE(table_name text, column_name text) Columns needing encryption\n--! @throws Exception if no pending configuration exists\n--!\n--! @note Treats missing active config as empty config\n--! @see eql_v2.diff_config\n--! @see eql_v2.select_target_columns\nCREATE FUNCTION eql_v2.select_pending_columns()\n\tRETURNS TABLE(table_name TEXT, column_name TEXT)\nAS $$\n\tDECLARE\n\t\tactive JSONB;\n\t\tpending JSONB;\n\t\tconfig_id BIGINT;\n\tBEGIN\n\t\tSELECT data INTO active FROM eql_v2_configuration WHERE state = 'active';\n\n\t\t-- set default config\n IF active IS NULL THEN\n active := '{}';\n END IF;\n\n\t\tSELECT id, data INTO config_id, pending FROM eql_v2_configuration WHERE state = 'pending';\n\n\t\t-- set default config\n\t\tIF config_id IS NULL THEN\n\t\t\tRAISE EXCEPTION 'No pending configuration exists to encrypt';\n\t\tEND IF;\n\n\t\tRETURN QUERY\n\t\tSELECT d.table_name, d.column_name FROM eql_v2.diff_config(active, pending) as d;\n\tEND;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Map pending columns to their encrypted target columns\n--!\n--! For each column with pending configuration, identifies the corresponding\n--! encrypted column. During initial encryption, target is '{column_name}_encrypted'.\n--! Returns NULL for target_column if encrypted column doesn't exist yet.\n--!\n--! @return TABLE(table_name text, column_name text, target_column text) Column mappings\n--!\n--! @note Target column is NULL if no column exists matching either 'column_name' or 'column_name_encrypted' with type eql_v2_encrypted\n--! @note The LEFT JOIN checks both original and '_encrypted' suffix variations with type verification\n--! @see eql_v2.select_pending_columns\n--! @see eql_v2.create_encrypted_columns\nCREATE FUNCTION eql_v2.select_target_columns()\n\tRETURNS TABLE(table_name TEXT, column_name TEXT, target_column TEXT)\n\tSTABLE STRICT PARALLEL SAFE\nAS $$\n SELECT\n c.table_name,\n c.column_name,\n s.column_name as target_column\n FROM\n eql_v2.select_pending_columns() c\n LEFT JOIN information_schema.columns s ON\n s.table_name = c.table_name AND\n (s.column_name = c.column_name OR s.column_name = c.column_name || '_encrypted') AND\n s.udt_name = 'eql_v2_encrypted';\n$$ LANGUAGE sql;\n\n\n--! @brief Check if database is ready for encryption\n--!\n--! Verifies that all columns with pending configuration have corresponding\n--! encrypted target columns created. Returns true if encryption can proceed.\n--!\n--! @return boolean True if all pending columns have target encrypted columns\n--!\n--! @note Returns false if any pending column lacks encrypted column\n--! @see eql_v2.select_target_columns\n--! @see eql_v2.create_encrypted_columns\nCREATE FUNCTION eql_v2.ready_for_encryption()\n\tRETURNS BOOLEAN\n\tSTABLE STRICT PARALLEL SAFE\nAS $$\n\tSELECT EXISTS (\n\t SELECT *\n\t FROM eql_v2.select_target_columns() AS c\n\t WHERE c.target_column IS NOT NULL);\n$$ LANGUAGE sql;\n\n\n--! @brief Create encrypted columns for initial encryption\n--!\n--! For each plaintext column with pending configuration that lacks an encrypted\n--! target column, creates a new column '{column_name}_encrypted' of type\n--! eql_v2_encrypted. This prepares the database schema for initial encryption.\n--!\n--! @return TABLE(table_name text, column_name text) Created encrypted columns\n--!\n--! @warning Executes dynamic DDL (ALTER TABLE ADD COLUMN) - modifies database schema\n--! @note Only creates columns that don't already exist\n--! @see eql_v2.select_target_columns\n--! @see eql_v2.rename_encrypted_columns\nCREATE FUNCTION eql_v2.create_encrypted_columns()\n\tRETURNS TABLE(table_name TEXT, column_name TEXT)\nAS $$\n\tBEGIN\n FOR table_name, column_name IN\n SELECT c.table_name, (c.column_name || '_encrypted') FROM eql_v2.select_target_columns() AS c WHERE c.target_column IS NULL\n LOOP\n\t\t EXECUTE format('ALTER TABLE %I ADD column %I eql_v2_encrypted;', table_name, column_name);\n RETURN NEXT;\n END LOOP;\n\tEND;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Finalize initial encryption by renaming columns\n--!\n--! After initial encryption completes, renames columns to complete the transition:\n--! - Plaintext column '{column_name}' → '{column_name}_plaintext'\n--! - Encrypted column '{column_name}_encrypted' → '{column_name}'\n--!\n--! This makes the encrypted column the primary column with the original name.\n--!\n--! @return TABLE(table_name text, column_name text, target_column text) Renamed columns\n--!\n--! @warning Executes dynamic DDL (ALTER TABLE RENAME COLUMN) - modifies database schema\n--! @note Only renames columns where target is '{column_name}_encrypted'\n--! @see eql_v2.create_encrypted_columns\nCREATE FUNCTION eql_v2.rename_encrypted_columns()\n\tRETURNS TABLE(table_name TEXT, column_name TEXT, target_column TEXT)\nAS $$\n\tBEGIN\n FOR table_name, column_name, target_column IN\n SELECT * FROM eql_v2.select_target_columns() as c WHERE c.target_column = c.column_name || '_encrypted'\n LOOP\n\t\t EXECUTE format('ALTER TABLE %I RENAME %I TO %I;', table_name, column_name, column_name || '_plaintext');\n\t\t EXECUTE format('ALTER TABLE %I RENAME %I TO %I;', table_name, target_column, column_name);\n RETURN NEXT;\n END LOOP;\n\tEND;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Count rows encrypted with active configuration\n--! @internal\n--!\n--! Counts rows in a table where the encrypted column was encrypted using\n--! the currently active configuration. Used to track encryption progress.\n--!\n--! @param table_name text Name of table to check\n--! @param column_name text Name of encrypted column to check\n--! @return bigint Count of rows encrypted with active configuration\n--!\n--! @note The 'v' field in encrypted payloads stores the payload version (\"2\"), not the configuration ID\n--! @note Configuration tracking mechanism is implementation-specific\nCREATE FUNCTION eql_v2.count_encrypted_with_active_config(table_name TEXT, column_name TEXT)\n RETURNS BIGINT\nAS $$\nDECLARE\n result BIGINT;\nBEGIN\n\tEXECUTE format(\n 'SELECT COUNT(%I) FROM %s t WHERE %I->>%L = (SELECT id::TEXT FROM eql_v2_configuration WHERE state = %L)',\n column_name, table_name, column_name, 'v', 'active'\n )\n\tINTO result;\n \tRETURN result;\nEND;\n$$ LANGUAGE plpgsql;\n\n\n\n--! @brief Validate presence of ident field in encrypted payload\n--! @internal\n--!\n--! Checks that the encrypted JSONB payload contains the required 'i' (ident) field.\n--! The ident field tracks which table and column the encrypted value belongs to.\n--!\n--! @param jsonb Encrypted payload to validate\n--! @return Boolean True if 'i' field is present\n--! @throws Exception if 'i' field is missing\n--!\n--! @note Used in CHECK constraints to ensure payload structure\n--! @see eql_v2.check_encrypted\nCREATE FUNCTION eql_v2._encrypted_check_i(val jsonb)\n RETURNS boolean\nAS $$\n\tBEGIN\n IF val ? 'i' THEN\n RETURN true;\n END IF;\n RAISE 'Encrypted column missing ident (i) field: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Validate table and column fields in ident\n--! @internal\n--!\n--! Checks that the 'i' (ident) field contains both 't' (table) and 'c' (column)\n--! subfields, which identify the origin of the encrypted value.\n--!\n--! @param jsonb Encrypted payload to validate\n--! @return Boolean True if both 't' and 'c' subfields are present\n--! @throws Exception if 't' or 'c' subfields are missing\n--!\n--! @note Used in CHECK constraints to ensure payload structure\n--! @see eql_v2.check_encrypted\nCREATE FUNCTION eql_v2._encrypted_check_i_ct(val jsonb)\n RETURNS boolean\nAS $$\n\tBEGIN\n IF (val->'i' ?& array['t', 'c']) THEN\n RETURN true;\n END IF;\n RAISE 'Encrypted column ident (i) missing table (t) or column (c) fields: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Validate version field in encrypted payload\n--! @internal\n--!\n--! Checks that the encrypted payload has version field 'v' set to '2',\n--! the current EQL v2 payload version.\n--!\n--! @param jsonb Encrypted payload to validate\n--! @return Boolean True if 'v' field is present and equals '2'\n--! @throws Exception if 'v' field is missing or not '2'\n--!\n--! @note Used in CHECK constraints to ensure payload structure\n--! @see eql_v2.check_encrypted\nCREATE FUNCTION eql_v2._encrypted_check_v(val jsonb)\n RETURNS boolean\nAS $$\n\tBEGIN\n IF (val ? 'v') THEN\n\n IF val->>'v' <> '2' THEN\n RAISE 'Expected encrypted column version (v) 2';\n RETURN false;\n END IF;\n\n RETURN true;\n END IF;\n RAISE 'Encrypted column missing version (v) field: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Validate ciphertext field in encrypted payload\n--! @internal\n--!\n--! Checks that the encrypted payload contains the required 'c' (ciphertext) field\n--! which stores the encrypted data.\n--!\n--! @param jsonb Encrypted payload to validate\n--! @return Boolean True if 'c' field is present\n--! @throws Exception if 'c' field is missing\n--!\n--! @note Used in CHECK constraints to ensure payload structure\n--! @see eql_v2.check_encrypted\nCREATE FUNCTION eql_v2._encrypted_check_c(val jsonb)\n RETURNS boolean\nAS $$\n\tBEGIN\n IF (val ? 'c') THEN\n RETURN true;\n END IF;\n RAISE 'Encrypted column missing ciphertext (c) field: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Validate complete encrypted payload structure\n--!\n--! Comprehensive validation function that checks all required fields in an\n--! encrypted JSONB payload: version ('v'), ciphertext ('c'), ident ('i'),\n--! and ident subfields ('t', 'c').\n--!\n--! This function is used in CHECK constraints to ensure encrypted column\n--! data integrity at the database level.\n--!\n--! @param jsonb Encrypted payload to validate\n--! @return Boolean True if all structure checks pass\n--! @throws Exception if any required field is missing or invalid\n--!\n--! @example\n--! -- Add validation constraint to encrypted column\n--! ALTER TABLE users ADD CONSTRAINT check_email_encrypted\n--! CHECK (eql_v2.check_encrypted(encrypted_email::jsonb));\n--!\n--! @see eql_v2._encrypted_check_v\n--! @see eql_v2._encrypted_check_c\n--! @see eql_v2._encrypted_check_i\n--! @see eql_v2._encrypted_check_i_ct\nCREATE FUNCTION eql_v2.check_encrypted(val jsonb)\n RETURNS BOOLEAN\nLANGUAGE sql IMMUTABLE STRICT PARALLEL SAFE\nBEGIN ATOMIC\n RETURN (\n eql_v2._encrypted_check_v(val) AND\n eql_v2._encrypted_check_c(val) AND\n eql_v2._encrypted_check_i(val) AND\n eql_v2._encrypted_check_i_ct(val)\n );\nEND;\n\n\n--! @brief Validate encrypted composite type structure\n--!\n--! Validates an eql_v2_encrypted composite type by checking its underlying\n--! JSONB payload. Delegates to eql_v2.check_encrypted(jsonb).\n--!\n--! @param eql_v2_encrypted Encrypted value to validate\n--! @return Boolean True if structure is valid\n--! @throws Exception if any required field is missing or invalid\n--!\n--! @see eql_v2.check_encrypted(jsonb)\nCREATE FUNCTION eql_v2.check_encrypted(val eql_v2_encrypted)\n RETURNS BOOLEAN\nLANGUAGE sql IMMUTABLE STRICT PARALLEL SAFE\nBEGIN ATOMIC\n RETURN eql_v2.check_encrypted(val.data);\nEND;\n\n\n-- Aggregate functions for ORE\n\n--! @brief State transition function for min aggregate\n--! @internal\n--!\n--! Returns the smaller of two encrypted values for use in MIN aggregate.\n--! Comparison uses ORE index terms without decryption.\n--!\n--! @param a eql_v2_encrypted First encrypted value\n--! @param b eql_v2_encrypted Second encrypted value\n--! @return eql_v2_encrypted The smaller of the two values\n--!\n--! @see eql_v2.min(eql_v2_encrypted)\nCREATE FUNCTION eql_v2.min(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS eql_v2_encrypted\nSTRICT\nAS $$\n BEGIN\n IF a < b THEN\n RETURN a;\n ELSE\n RETURN b;\n END IF;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Find minimum encrypted value in a group\n--!\n--! Aggregate function that returns the minimum encrypted value in a group\n--! using ORE index term comparisons without decryption.\n--!\n--! @param input eql_v2_encrypted Encrypted values to aggregate\n--! @return eql_v2_encrypted Minimum value in the group\n--!\n--! @example\n--! -- Find minimum age per department\n--! SELECT department, eql_v2.min(encrypted_age)\n--! FROM employees\n--! GROUP BY department;\n--!\n--! @note Requires 'ore' index configuration on the column\n--! @see eql_v2.min(eql_v2_encrypted, eql_v2_encrypted)\nCREATE AGGREGATE eql_v2.min(eql_v2_encrypted)\n(\n sfunc = eql_v2.min,\n stype = eql_v2_encrypted\n);\n\n\n--! @brief State transition function for max aggregate\n--! @internal\n--!\n--! Returns the larger of two encrypted values for use in MAX aggregate.\n--! Comparison uses ORE index terms without decryption.\n--!\n--! @param a eql_v2_encrypted First encrypted value\n--! @param b eql_v2_encrypted Second encrypted value\n--! @return eql_v2_encrypted The larger of the two values\n--!\n--! @see eql_v2.max(eql_v2_encrypted)\nCREATE FUNCTION eql_v2.max(a eql_v2_encrypted, b eql_v2_encrypted)\nRETURNS eql_v2_encrypted\nSTRICT\nAS $$\n BEGIN\n IF a > b THEN\n RETURN a;\n ELSE\n RETURN b;\n END IF;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Find maximum encrypted value in a group\n--!\n--! Aggregate function that returns the maximum encrypted value in a group\n--! using ORE index term comparisons without decryption.\n--!\n--! @param input eql_v2_encrypted Encrypted values to aggregate\n--! @return eql_v2_encrypted Maximum value in the group\n--!\n--! @example\n--! -- Find maximum salary per department\n--! SELECT department, eql_v2.max(encrypted_salary)\n--! FROM employees\n--! GROUP BY department;\n--!\n--! @note Requires 'ore' index configuration on the column\n--! @see eql_v2.max(eql_v2_encrypted, eql_v2_encrypted)\nCREATE AGGREGATE eql_v2.max(eql_v2_encrypted)\n(\n sfunc = eql_v2.max,\n stype = eql_v2_encrypted\n);\n\n\n--! @file config/indexes.sql\n--! @brief Configuration state uniqueness indexes\n--!\n--! Creates partial unique indexes to enforce that only one configuration\n--! can be in 'active', 'pending', or 'encrypting' state at any time.\n--! Multiple 'inactive' configurations are allowed.\n--!\n--! @note Uses partial indexes (WHERE clauses) for efficiency\n--! @note Prevents conflicting configurations from being active simultaneously\n--! @see config/types.sql for state definitions\n\n\n--! @brief Unique active configuration constraint\n--! @note Only one configuration can be 'active' at once\nCREATE UNIQUE INDEX ON public.eql_v2_configuration (state) WHERE state = 'active';\n\n--! @brief Unique pending configuration constraint\n--! @note Only one configuration can be 'pending' at once\nCREATE UNIQUE INDEX ON public.eql_v2_configuration (state) WHERE state = 'pending';\n\n--! @brief Unique encrypting configuration constraint\n--! @note Only one configuration can be 'encrypting' at once\nCREATE UNIQUE INDEX ON public.eql_v2_configuration (state) WHERE state = 'encrypting';\n\n\n--! @brief Add a search index configuration for an encrypted column\n--!\n--! Configures a searchable encryption index (unique, match, ore, or ste_vec) on an\n--! encrypted column. Creates or updates the pending configuration, then migrates\n--! and activates it unless migrating flag is set.\n--!\n--! @param table_name Text Name of the table containing the column\n--! @param column_name Text Name of the column to configure\n--! @param index_name Text Type of index ('unique', 'match', 'ore', 'ste_vec')\n--! @param cast_as Text PostgreSQL type for decrypted values (default: 'text')\n--! @param opts JSONB Index-specific options (default: '{}')\n--! @param migrating Boolean Skip auto-migration if true (default: false)\n--! @return JSONB Updated configuration object\n--! @throws Exception if index already exists for this column\n--! @throws Exception if cast_as is not a valid type\n--!\n--! @example\n--! -- Add unique index for exact-match searches\n--! SELECT eql_v2.add_search_config('users', 'email', 'unique');\n--!\n--! -- Add match index for LIKE searches with custom token length\n--! SELECT eql_v2.add_search_config('posts', 'content', 'match', 'text',\n--! '{\"token_filters\": [{\"kind\": \"downcase\"}], \"tokenizer\": {\"kind\": \"ngram\", \"token_length\": 3}}'\n--! );\n--!\n--! @see eql_v2.add_column\n--! @see eql_v2.remove_search_config\nCREATE FUNCTION eql_v2.add_search_config(table_name text, column_name text, index_name text, cast_as text DEFAULT 'text', opts jsonb DEFAULT '{}', migrating boolean DEFAULT false)\n RETURNS jsonb\n\nAS $$\n DECLARE\n o jsonb;\n _config jsonb;\n BEGIN\n\n -- set the active config\n SELECT data INTO _config FROM public.eql_v2_configuration WHERE state = 'active' OR state = 'pending' ORDER BY state DESC;\n\n -- if index exists\n IF _config #> array['tables', table_name, column_name, 'indexes'] ? index_name THEN\n RAISE EXCEPTION '% index exists for column: % %', index_name, table_name, column_name;\n END IF;\n\n IF NOT cast_as = ANY('{text, int, small_int, big_int, real, double, boolean, date, jsonb}') THEN\n RAISE EXCEPTION '% is not a valid cast type', cast_as;\n END IF;\n\n -- set default config\n SELECT eql_v2.config_default(_config) INTO _config;\n\n SELECT eql_v2.config_add_table(table_name, _config) INTO _config;\n\n SELECT eql_v2.config_add_column(table_name, column_name, _config) INTO _config;\n\n SELECT eql_v2.config_add_cast(table_name, column_name, cast_as, _config) INTO _config;\n\n -- set default options for index if opts empty\n IF index_name = 'match' AND opts = '{}' THEN\n SELECT eql_v2.config_match_default() INTO opts;\n END IF;\n\n SELECT eql_v2.config_add_index(table_name, column_name, index_name, opts, _config) INTO _config;\n\n -- create a new pending record if we don't have one\n INSERT INTO public.eql_v2_configuration (state, data) VALUES ('pending', _config)\n ON CONFLICT (state)\n WHERE state = 'pending'\n DO UPDATE\n SET data = _config;\n\n IF NOT migrating THEN\n PERFORM eql_v2.migrate_config();\n PERFORM eql_v2.activate_config();\n END IF;\n\n PERFORM eql_v2.add_encrypted_constraint(table_name, column_name);\n\n -- exeunt\n RETURN _config;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Remove a search index configuration from an encrypted column\n--!\n--! Removes a previously configured search index from an encrypted column.\n--! Updates the pending configuration, then migrates and activates it\n--! unless migrating flag is set.\n--!\n--! @param table_name Text Name of the table containing the column\n--! @param column_name Text Name of the column\n--! @param index_name Text Type of index to remove\n--! @param migrating Boolean Skip auto-migration if true (default: false)\n--! @return JSONB Updated configuration object\n--! @throws Exception if no active or pending configuration exists\n--! @throws Exception if table is not configured\n--! @throws Exception if column is not configured\n--!\n--! @example\n--! -- Remove match index from column\n--! SELECT eql_v2.remove_search_config('posts', 'content', 'match');\n--!\n--! @see eql_v2.add_search_config\n--! @see eql_v2.modify_search_config\nCREATE FUNCTION eql_v2.remove_search_config(table_name text, column_name text, index_name text, migrating boolean DEFAULT false)\n RETURNS jsonb\nAS $$\n DECLARE\n _config jsonb;\n BEGIN\n\n -- set the active config\n SELECT data INTO _config FROM public.eql_v2_configuration WHERE state = 'active' OR state = 'pending' ORDER BY state DESC;\n\n -- if no config\n IF _config IS NULL THEN\n RAISE EXCEPTION 'No active or pending configuration exists';\n END IF;\n\n -- if the table doesn't exist\n IF NOT _config #> array['tables'] ? table_name THEN\n RAISE EXCEPTION 'No configuration exists for table: %', table_name;\n END IF;\n\n -- if the index does not exist\n -- IF NOT _config->key ? index_name THEN\n IF NOT _config #> array['tables', table_name] ? column_name THEN\n RAISE EXCEPTION 'No % index exists for column: % %', index_name, table_name, column_name;\n END IF;\n\n -- create a new pending record if we don't have one\n INSERT INTO public.eql_v2_configuration (state, data) VALUES ('pending', _config)\n ON CONFLICT (state)\n WHERE state = 'pending'\n DO NOTHING;\n\n -- remove the index\n SELECT _config #- array['tables', table_name, column_name, 'indexes', index_name] INTO _config;\n\n -- update the config and migrate (even if empty)\n UPDATE public.eql_v2_configuration SET data = _config WHERE state = 'pending';\n\n IF NOT migrating THEN\n PERFORM eql_v2.migrate_config();\n PERFORM eql_v2.activate_config();\n END IF;\n\n -- exeunt\n RETURN _config;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Modify a search index configuration for an encrypted column\n--!\n--! Updates an existing search index configuration by removing and re-adding it\n--! with new options. Convenience function that combines remove and add operations.\n--! If index does not exist, it is added.\n--!\n--! @param table_name Text Name of the table containing the column\n--! @param column_name Text Name of the column\n--! @param index_name Text Type of index to modify\n--! @param cast_as Text PostgreSQL type for decrypted values (default: 'text')\n--! @param opts JSONB New index-specific options (default: '{}')\n--! @param migrating Boolean Skip auto-migration if true (default: false)\n--! @return JSONB Updated configuration object\n--!\n--! @example\n--! -- Change match index tokenizer settings\n--! SELECT eql_v2.modify_search_config('posts', 'content', 'match', 'text',\n--! '{\"tokenizer\": {\"kind\": \"ngram\", \"token_length\": 4}}'\n--! );\n--!\n--! @see eql_v2.add_search_config\n--! @see eql_v2.remove_search_config\nCREATE FUNCTION eql_v2.modify_search_config(table_name text, column_name text, index_name text, cast_as text DEFAULT 'text', opts jsonb DEFAULT '{}', migrating boolean DEFAULT false)\n RETURNS jsonb\nAS $$\n BEGIN\n PERFORM eql_v2.remove_search_config(table_name, column_name, index_name, migrating);\n RETURN eql_v2.add_search_config(table_name, column_name, index_name, cast_as, opts, migrating);\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Migrate pending configuration to encrypting state\n--!\n--! Transitions the pending configuration to encrypting state, validating that\n--! all configured columns have encrypted target columns ready. This is part of\n--! the configuration lifecycle: pending → encrypting → active.\n--!\n--! @return Boolean True if migration succeeds\n--! @throws Exception if encryption already in progress\n--! @throws Exception if no pending configuration exists\n--! @throws Exception if configured columns lack encrypted targets\n--!\n--! @example\n--! -- Manually migrate configuration (normally done automatically)\n--! SELECT eql_v2.migrate_config();\n--!\n--! @see eql_v2.activate_config\n--! @see eql_v2.add_column\nCREATE FUNCTION eql_v2.migrate_config()\n RETURNS boolean\nAS $$\n\tBEGIN\n\n IF EXISTS (SELECT FROM public.eql_v2_configuration c WHERE c.state = 'encrypting') THEN\n RAISE EXCEPTION 'An encryption is already in progress';\n END IF;\n\n\t\tIF NOT EXISTS (SELECT FROM public.eql_v2_configuration c WHERE c.state = 'pending') THEN\n\t\t\tRAISE EXCEPTION 'No pending configuration exists to encrypt';\n\t\tEND IF;\n\n IF NOT eql_v2.ready_for_encryption() THEN\n RAISE EXCEPTION 'Some pending columns do not have an encrypted target';\n END IF;\n\n UPDATE public.eql_v2_configuration SET state = 'encrypting' WHERE state = 'pending';\n\t\tRETURN true;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Activate encrypting configuration\n--!\n--! Transitions the encrypting configuration to active state, making it the\n--! current operational configuration. Marks previous active configuration as\n--! inactive. Final step in configuration lifecycle: pending → encrypting → active.\n--!\n--! @return Boolean True if activation succeeds\n--! @throws Exception if no encrypting configuration exists to activate\n--!\n--! @example\n--! -- Manually activate configuration (normally done automatically)\n--! SELECT eql_v2.activate_config();\n--!\n--! @see eql_v2.migrate_config\n--! @see eql_v2.add_column\nCREATE FUNCTION eql_v2.activate_config()\n RETURNS boolean\nAS $$\n\tBEGIN\n\n\t IF EXISTS (SELECT FROM public.eql_v2_configuration c WHERE c.state = 'encrypting') THEN\n\t \tUPDATE public.eql_v2_configuration SET state = 'inactive' WHERE state = 'active';\n\t\t\tUPDATE public.eql_v2_configuration SET state = 'active' WHERE state = 'encrypting';\n\t\t\tRETURN true;\n\t\tELSE\n\t\t\tRAISE EXCEPTION 'No encrypting configuration exists to activate';\n\t\tEND IF;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Discard pending configuration\n--!\n--! Deletes the pending configuration without applying changes. Use this to\n--! abandon configuration changes before they are migrated and activated.\n--!\n--! @return Boolean True if discard succeeds\n--! @throws Exception if no pending configuration exists to discard\n--!\n--! @example\n--! -- Discard uncommitted configuration changes\n--! SELECT eql_v2.discard();\n--!\n--! @see eql_v2.add_column\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.discard()\n RETURNS boolean\nAS $$\n BEGIN\n IF EXISTS (SELECT FROM public.eql_v2_configuration c WHERE c.state = 'pending') THEN\n DELETE FROM public.eql_v2_configuration WHERE state = 'pending';\n RETURN true;\n ELSE\n RAISE EXCEPTION 'No pending configuration exists to discard';\n END IF;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Configure a column for encryption\n--!\n--! Adds a column to the encryption configuration, making it eligible for\n--! encrypted storage and search indexes. Creates or updates pending configuration,\n--! adds encrypted constraint, then migrates and activates unless migrating flag is set.\n--!\n--! @param table_name Text Name of the table containing the column\n--! @param column_name Text Name of the column to encrypt\n--! @param cast_as Text PostgreSQL type to cast decrypted values (default: 'text')\n--! @param migrating Boolean Skip auto-migration if true (default: false)\n--! @return JSONB Updated configuration object\n--! @throws Exception if column already configured for encryption\n--!\n--! @example\n--! -- Configure email column for encryption\n--! SELECT eql_v2.add_column('users', 'email', 'text');\n--!\n--! -- Configure age column with integer casting\n--! SELECT eql_v2.add_column('users', 'age', 'int');\n--!\n--! @see eql_v2.add_search_config\n--! @see eql_v2.remove_column\nCREATE FUNCTION eql_v2.add_column(table_name text, column_name text, cast_as text DEFAULT 'text', migrating boolean DEFAULT false)\n RETURNS jsonb\nAS $$\n DECLARE\n key text;\n _config jsonb;\n BEGIN\n -- set the active config\n SELECT data INTO _config FROM public.eql_v2_configuration WHERE state = 'active' OR state = 'pending' ORDER BY state DESC;\n\n -- set default config\n SELECT eql_v2.config_default(_config) INTO _config;\n\n -- if index exists\n IF _config #> array['tables', table_name] ? column_name THEN\n RAISE EXCEPTION 'Config exists for column: % %', table_name, column_name;\n END IF;\n\n SELECT eql_v2.config_add_table(table_name, _config) INTO _config;\n\n SELECT eql_v2.config_add_column(table_name, column_name, _config) INTO _config;\n\n SELECT eql_v2.config_add_cast(table_name, column_name, cast_as, _config) INTO _config;\n\n -- create a new pending record if we don't have one\n INSERT INTO public.eql_v2_configuration (state, data) VALUES ('pending', _config)\n ON CONFLICT (state)\n WHERE state = 'pending'\n DO UPDATE\n SET data = _config;\n\n IF NOT migrating THEN\n PERFORM eql_v2.migrate_config();\n PERFORM eql_v2.activate_config();\n END IF;\n\n PERFORM eql_v2.add_encrypted_constraint(table_name, column_name);\n\n -- exeunt\n RETURN _config;\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Remove a column from encryption configuration\n--!\n--! Removes a column from the encryption configuration, including all associated\n--! search indexes. Removes encrypted constraint, updates pending configuration,\n--! then migrates and activates unless migrating flag is set.\n--!\n--! @param table_name Text Name of the table containing the column\n--! @param column_name Text Name of the column to remove\n--! @param migrating Boolean Skip auto-migration if true (default: false)\n--! @return JSONB Updated configuration object\n--! @throws Exception if no active or pending configuration exists\n--! @throws Exception if table is not configured\n--! @throws Exception if column is not configured\n--!\n--! @example\n--! -- Remove email column from encryption\n--! SELECT eql_v2.remove_column('users', 'email');\n--!\n--! @see eql_v2.add_column\n--! @see eql_v2.remove_search_config\nCREATE FUNCTION eql_v2.remove_column(table_name text, column_name text, migrating boolean DEFAULT false)\n RETURNS jsonb\nAS $$\n DECLARE\n key text;\n _config jsonb;\n BEGIN\n -- set the active config\n SELECT data INTO _config FROM public.eql_v2_configuration WHERE state = 'active' OR state = 'pending' ORDER BY state DESC;\n\n -- if no config\n IF _config IS NULL THEN\n RAISE EXCEPTION 'No active or pending configuration exists';\n END IF;\n\n -- if the table doesn't exist\n IF NOT _config #> array['tables'] ? table_name THEN\n RAISE EXCEPTION 'No configuration exists for table: %', table_name;\n END IF;\n\n -- if the column does not exist\n IF NOT _config #> array['tables', table_name] ? column_name THEN\n RAISE EXCEPTION 'No configuration exists for column: % %', table_name, column_name;\n END IF;\n\n -- create a new pending record if we don't have one\n INSERT INTO public.eql_v2_configuration (state, data) VALUES ('pending', _config)\n ON CONFLICT (state)\n WHERE state = 'pending'\n DO NOTHING;\n\n -- remove the column\n SELECT _config #- array['tables', table_name, column_name] INTO _config;\n\n -- if table is now empty, remove the table\n IF _config #> array['tables', table_name] = '{}' THEN\n SELECT _config #- array['tables', table_name] INTO _config;\n END IF;\n\n PERFORM eql_v2.remove_encrypted_constraint(table_name, column_name);\n\n -- update the config (even if empty) and activate\n UPDATE public.eql_v2_configuration SET data = _config WHERE state = 'pending';\n\n IF NOT migrating THEN\n -- For empty configs, skip migration validation and directly activate\n IF _config #> array['tables'] = '{}' THEN\n UPDATE public.eql_v2_configuration SET state = 'inactive' WHERE state = 'active';\n UPDATE public.eql_v2_configuration SET state = 'active' WHERE state = 'pending';\n ELSE\n PERFORM eql_v2.migrate_config();\n PERFORM eql_v2.activate_config();\n END IF;\n END IF;\n\n -- exeunt\n RETURN _config;\n\n END;\n$$ LANGUAGE plpgsql;\n\n--! @brief Reload configuration from CipherStash Proxy\n--!\n--! Placeholder function for reloading configuration from the CipherStash Proxy.\n--! Currently returns NULL without side effects.\n--!\n--! @return Void\n--!\n--! @note This function may be used for configuration synchronization in future versions\nCREATE FUNCTION eql_v2.reload_config()\n RETURNS void\nLANGUAGE sql STRICT PARALLEL SAFE\nBEGIN ATOMIC\n RETURN NULL;\nEND;\n\n--! @brief Query encryption configuration in tabular format\n--!\n--! Returns the active encryption configuration as a table for easier querying\n--! and filtering. Shows all configured tables, columns, cast types, and indexes.\n--!\n--! @return TABLE Contains configuration state, relation name, column name, cast type, and indexes\n--!\n--! @example\n--! -- View all encrypted columns\n--! SELECT * FROM eql_v2.config();\n--!\n--! -- Find all columns with match indexes\n--! SELECT relation, col_name FROM eql_v2.config()\n--! WHERE indexes ? 'match';\n--!\n--! @see eql_v2.add_column\n--! @see eql_v2.add_search_config\nCREATE FUNCTION eql_v2.config() RETURNS TABLE (\n state eql_v2_configuration_state,\n relation text,\n col_name text,\n decrypts_as text,\n indexes jsonb\n)\nAS $$\nBEGIN\n RETURN QUERY\n WITH tables AS (\n SELECT config.state, tables.key AS table, tables.value AS config\n FROM public.eql_v2_configuration config, jsonb_each(data->'tables') tables\n WHERE config.data->>'v' = '1'\n )\n SELECT\n tables.state,\n tables.table,\n column_config.key,\n column_config.value->>'cast_as',\n column_config.value->'indexes'\n FROM tables, jsonb_each(tables.config) column_config;\nEND;\n$$ LANGUAGE plpgsql;\n\n--! @file config/constraints.sql\n--! @brief Configuration validation functions and constraints\n--!\n--! Provides CHECK constraint functions to validate encryption configuration structure.\n--! Ensures configurations have required fields (version, tables) and valid values\n--! for index types and cast types before being stored.\n--!\n--! @see config/tables.sql where constraints are applied\n\n\n--! @brief Extract index type names from configuration\n--! @internal\n--!\n--! Helper function that extracts all index type names from the configuration's\n--! 'indexes' sections across all tables and columns.\n--!\n--! @param jsonb Configuration data to extract from\n--! @return SETOF text Index type names (e.g., 'match', 'ore', 'unique', 'ste_vec')\n--!\n--! @note Used by config_check_indexes for validation\n--! @see eql_v2.config_check_indexes\nCREATE FUNCTION eql_v2.config_get_indexes(val jsonb)\n RETURNS SETOF text\n LANGUAGE sql IMMUTABLE STRICT PARALLEL SAFE\nBEGIN ATOMIC\n\tSELECT jsonb_object_keys(jsonb_path_query(val,'$.tables.*.*.indexes'));\nEND;\n\n\n--! @brief Validate index types in configuration\n--! @internal\n--!\n--! Checks that all index types specified in the configuration are valid.\n--! Valid index types are: match, ore, unique, ste_vec.\n--!\n--! @param jsonb Configuration data to validate\n--! @return boolean True if all index types are valid\n--! @throws Exception if any invalid index type found\n--!\n--! @note Used in CHECK constraint on eql_v2_configuration table\n--! @see eql_v2.config_get_indexes\nCREATE FUNCTION eql_v2.config_check_indexes(val jsonb)\n RETURNS BOOLEAN\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n\tBEGIN\n\n IF (SELECT EXISTS (SELECT eql_v2.config_get_indexes(val))) THEN\n IF (SELECT bool_and(index = ANY('{match, ore, unique, ste_vec}')) FROM eql_v2.config_get_indexes(val) AS index) THEN\n RETURN true;\n END IF;\n RAISE 'Configuration has an invalid index (%). Index should be one of {match, ore, unique, ste_vec}', val;\n END IF;\n RETURN true;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Validate cast types in configuration\n--! @internal\n--!\n--! Checks that all 'cast_as' types specified in the configuration are valid.\n--! Valid cast types are: text, int, small_int, big_int, real, double, boolean, date, jsonb.\n--!\n--! @param jsonb Configuration data to validate\n--! @return boolean True if all cast types are valid or no cast types specified\n--! @throws Exception if any invalid cast type found\n--!\n--! @note Used in CHECK constraint on eql_v2_configuration table\n--! @note Empty configurations (no cast_as fields) are valid\n--! @note Cast type names are EQL's internal representations, not PostgreSQL native types\nCREATE FUNCTION eql_v2.config_check_cast(val jsonb)\n RETURNS BOOLEAN\nAS $$\n\tBEGIN\n -- If there are cast_as fields, validate them\n IF EXISTS (SELECT jsonb_array_elements_text(jsonb_path_query_array(val, '$.tables.*.*.cast_as'))) THEN\n IF (SELECT bool_and(cast_as = ANY('{text, int, small_int, big_int, real, double, boolean, date, jsonb}')) \n FROM (SELECT jsonb_array_elements_text(jsonb_path_query_array(val, '$.tables.*.*.cast_as')) AS cast_as) casts) THEN\n RETURN true;\n END IF;\n RAISE 'Configuration has an invalid cast_as (%). Cast should be one of {text, int, small_int, big_int, real, double, boolean, date, jsonb}', val;\n END IF;\n -- If no cast_as fields exist (empty config), that's valid\n RETURN true;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Validate tables field presence\n--! @internal\n--!\n--! Ensures the configuration has a 'tables' field, which is required\n--! to specify which database tables contain encrypted columns.\n--!\n--! @param jsonb Configuration data to validate\n--! @return boolean True if 'tables' field exists\n--! @throws Exception if 'tables' field is missing\n--!\n--! @note Used in CHECK constraint on eql_v2_configuration table\nCREATE FUNCTION eql_v2.config_check_tables(val jsonb)\n RETURNS boolean\nAS $$\n\tBEGIN\n IF (val ? 'tables') THEN\n RETURN true;\n END IF;\n RAISE 'Configuration missing tables (tables) field: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Validate version field presence\n--! @internal\n--!\n--! Ensures the configuration has a 'v' (version) field, which tracks\n--! the configuration format version.\n--!\n--! @param jsonb Configuration data to validate\n--! @return boolean True if 'v' field exists\n--! @throws Exception if 'v' field is missing\n--!\n--! @note Used in CHECK constraint on eql_v2_configuration table\nCREATE FUNCTION eql_v2.config_check_version(val jsonb)\n RETURNS boolean\nAS $$\n\tBEGIN\n IF (val ? 'v') THEN\n RETURN true;\n END IF;\n RAISE 'Configuration missing version (v) field: %', val;\n END;\n$$ LANGUAGE plpgsql;\n\n\n--! @brief Drop existing data validation constraint if present\n--! @note Allows constraint to be recreated during upgrades\nALTER TABLE public.eql_v2_configuration DROP CONSTRAINT IF EXISTS eql_v2_configuration_data_check;\n\n\n--! @brief Comprehensive configuration data validation\n--!\n--! CHECK constraint that validates all aspects of configuration data:\n--! - Version field presence\n--! - Tables field presence\n--! - Valid cast_as types\n--! - Valid index types\n--!\n--! @note Combines all config_check_* validation functions\n--! @see eql_v2.config_check_version\n--! @see eql_v2.config_check_tables\n--! @see eql_v2.config_check_cast\n--! @see eql_v2.config_check_indexes\nALTER TABLE public.eql_v2_configuration\n ADD CONSTRAINT eql_v2_configuration_data_check CHECK (\n eql_v2.config_check_version(data) AND\n eql_v2.config_check_tables(data) AND\n eql_v2.config_check_cast(data) AND\n eql_v2.config_check_indexes(data)\n);\n\n\n\n\n--! @brief Compare two encrypted values using Blake3 hash index terms\n--!\n--! Performs a three-way comparison (returns -1/0/1) of encrypted values using\n--! their Blake3 hash index terms. Used internally by the equality operator (=)\n--! for exact-match queries without decryption.\n--!\n--! @param a eql_v2_encrypted First encrypted value to compare\n--! @param b eql_v2_encrypted Second encrypted value to compare\n--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b\n--!\n--! @note NULL values are sorted before non-NULL values\n--! @note Comparison uses underlying text type ordering of Blake3 hashes\n--!\n--! @see eql_v2.blake3\n--! @see eql_v2.has_blake3\n--! @see eql_v2.\"=\"\nCREATE FUNCTION eql_v2.compare_blake3(a eql_v2_encrypted, b eql_v2_encrypted)\n RETURNS integer\n IMMUTABLE STRICT PARALLEL SAFE\nAS $$\n DECLARE\n a_term eql_v2.blake3;\n b_term eql_v2.blake3;\n BEGIN\n\n IF a IS NULL AND b IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b IS NULL THEN\n RETURN 1;\n END IF;\n\n IF eql_v2.has_blake3(a) THEN\n a_term = eql_v2.blake3(a);\n END IF;\n\n IF eql_v2.has_blake3(b) THEN\n b_term = eql_v2.blake3(b);\n END IF;\n\n IF a_term IS NULL AND b_term IS NULL THEN\n RETURN 0;\n END IF;\n\n IF a_term IS NULL THEN\n RETURN -1;\n END IF;\n\n IF b_term IS NULL THEN\n RETURN 1;\n END IF;\n\n -- Using the underlying text type comparison\n IF a_term = b_term THEN\n RETURN 0;\n END IF;\n\n IF a_term < b_term THEN\n RETURN -1;\n END IF;\n\n IF a_term > b_term THEN\n RETURN 1;\n END IF;\n\n END;\n$$ LANGUAGE plpgsql;\n" + } + ], + "postcheck": [ + { + "description": "verify \"eql_v2\" schema exists", + "sql": "SELECT EXISTS (SELECT 1 FROM pg_namespace WHERE nspname = 'eql_v2')" + }, + { + "description": "verify \"public.eql_v2_encrypted\" composite type exists", + "sql": "SELECT EXISTS (SELECT 1 FROM pg_type t JOIN pg_namespace n ON n.oid = t.typnamespace WHERE n.nspname = 'public' AND t.typname = 'eql_v2_encrypted')" + } + ] + } +] \ No newline at end of file diff --git a/packages/prisma-next/migrations/refs/head.json b/packages/prisma-next/migrations/refs/head.json new file mode 100644 index 00000000..78f58089 --- /dev/null +++ b/packages/prisma-next/migrations/refs/head.json @@ -0,0 +1,4 @@ +{ + "hash": "sha256:efa685171bebbb8f078f08d12be3578bb5d96b71669dccc6cc9e4be96af8cdb4", + "invariants": ["cipherstash:install-eql-bundle-v1"] +} diff --git a/packages/prisma-next/package.json b/packages/prisma-next/package.json new file mode 100644 index 00000000..b5db9073 --- /dev/null +++ b/packages/prisma-next/package.json @@ -0,0 +1,109 @@ +{ + "name": "@cipherstash/prisma-next", + "version": "0.0.0", + "license": "MIT", + "author": "CipherStash ", + "description": "CipherStash extension for Prisma Next: searchable application-layer field-level encryption for Postgres, with six encrypted column types, 17 query operators, bulk encrypt/decrypt middleware, and a baseline migration that installs the vendored EQL bundle SQL byte-for-byte.", + "keywords": [ + "encrypted", + "prisma-next", + "orm", + "type-safe", + "security", + "postgres", + "eql" + ], + "bugs": { + "url": "https://github.com/cipherstash/stack/issues" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/cipherstash/stack.git", + "directory": "packages/prisma-next" + }, + "type": "module", + "sideEffects": false, + "exports": { + "./codec-types": { + "types": "./dist/codec-types.d.ts", + "import": "./dist/codec-types.js" + }, + "./column-types": { + "types": "./dist/column-types.d.ts", + "import": "./dist/column-types.js" + }, + "./control": { + "types": "./dist/control.d.ts", + "import": "./dist/control.js" + }, + "./middleware": { + "types": "./dist/middleware.d.ts", + "import": "./dist/middleware.js" + }, + "./migration": { + "types": "./dist/migration.d.ts", + "import": "./dist/migration.js" + }, + "./operation-types": { + "types": "./dist/operation-types.d.ts", + "import": "./dist/operation-types.js" + }, + "./pack": { + "types": "./dist/pack.d.ts", + "import": "./dist/pack.js" + }, + "./runtime": { + "types": "./dist/runtime.d.ts", + "import": "./dist/runtime.js" + }, + "./stack": { + "types": "./dist/stack.d.ts", + "import": "./dist/stack.js" + }, + "./package.json": "./package.json" + }, + "types": "./dist/control.d.ts", + "files": [ + "dist", + "src", + "README.md" + ], + "scripts": { + "build": "tsup", + "dev": "tsup --watch", + "test": "vitest run", + "test:coverage": "vitest run --coverage", + "typecheck": "tsc --project tsconfig.json --noEmit", + "lint": "biome check . --error-on-warnings", + "lint:fix": "biome check --write .", + "clean": "rm -rf dist coverage .tmp-output" + }, + "dependencies": { + "@cipherstash/stack": "workspace:*", + "@prisma-next/contract": "0.6.0-dev.8", + "@prisma-next/family-sql": "0.6.0-dev.8", + "@prisma-next/framework-components": "0.6.0-dev.8", + "@prisma-next/migration-tools": "0.6.0-dev.8", + "@prisma-next/sql-contract": "0.6.0-dev.8", + "@prisma-next/sql-operations": "0.6.0-dev.8", + "@prisma-next/sql-relational-core": "0.6.0-dev.8", + "@prisma-next/sql-runtime": "0.6.0-dev.8", + "@prisma-next/ts-render": "0.6.0-dev.8", + "@prisma-next/utils": "0.6.0-dev.8", + "arktype": "^2.1.29" + }, + "devDependencies": { + "@prisma-next/adapter-postgres": "0.6.0-dev.8", + "@prisma-next/cli": "0.6.0-dev.8", + "@prisma-next/driver-postgres": "0.6.0-dev.8", + "@prisma-next/psl-parser": "0.6.0-dev.8", + "@prisma-next/sql-contract-psl": "0.6.0-dev.8", + "@prisma-next/sql-contract-ts": "0.6.0-dev.8", + "@prisma-next/sql-schema-ir": "0.6.0-dev.8", + "@prisma-next/target-postgres": "0.6.0-dev.8", + "pathe": "^2.0.3", + "tsup": "catalog:repo", + "typescript": "catalog:repo", + "vitest": "catalog:repo" + } +} diff --git a/packages/prisma-next/prisma-next.config.ts b/packages/prisma-next/prisma-next.config.ts new file mode 100644 index 00000000..22ec9b09 --- /dev/null +++ b/packages/prisma-next/prisma-next.config.ts @@ -0,0 +1,36 @@ +/** + * Prisma Next config for the `@cipherstash/prisma-next` package itself. + * + * The extension package is treated as a self-contained "project" for + * the CLI: `prisma-next contract emit` writes + * `src/contract.{json,d.ts}` (colocated with the `src/contract.prisma` + * source); the migration self-emit script + * (`pnpm tsx migrations//migration.ts`) re-emits + * `migrations//{ops,migration}.json` from the hand-edited + * `migration.ts` subclass. + * + * This config is **maintainer-only** — application authors who consume + * this package do not need it. Their own `prisma-next.config.ts` + * registers the extension via `extensionPacks: [cipherstash]`; the + * descriptor at `src/exports/control.ts` JSON-imports the on-disk + * artefacts emitted here. + */ + +import postgresAdapter from '@prisma-next/adapter-postgres/control' +import { defineConfig } from '@prisma-next/cli/config-types' +import sql from '@prisma-next/family-sql/control' +import { prismaContract } from '@prisma-next/sql-contract-psl/provider' +import postgres from '@prisma-next/target-postgres/control' + +export default defineConfig({ + family: sql, + target: postgres, + adapter: postgresAdapter, + contract: prismaContract('./src/contract.prisma', { + output: 'src/contract.json', + target: postgres, + }), + migrations: { + dir: 'migrations', + }, +}) diff --git a/packages/prisma-next/src/contract-authoring.ts b/packages/prisma-next/src/contract-authoring.ts new file mode 100644 index 00000000..4821c56d --- /dev/null +++ b/packages/prisma-next/src/contract-authoring.ts @@ -0,0 +1,200 @@ +/** + * Authoring contributions for the cipherstash extension. + * + * Registers `cipherstash.EncryptedString({ equality?, freeTextSearch? })` + * as a namespaced PSL type constructor. The same descriptor lowers a + * PSL field-type expression like `cipherstash.EncryptedString({ equality: + * true })` and a TS factory call like `encryptedString({ equality: true })` + * (see `../exports/column-types`) to an identical `ColumnTypeDescriptor` + * so PSL- and TS-authored contracts emit byte-identical `contract.json`. + * + * Mirrors `packages/3-extensions/pgvector/src/core/authoring.ts`. The + * cipherstash variant differs in three respects: + * (a) `cipherstash` is the namespace, + * (b) the constructor takes a single OPTIONAL object argument with two + * optional booleans (so `cipherstash.EncryptedString()`, + * `cipherstash.EncryptedString({})`, and the fully-spelled + * `cipherstash.EncryptedString({ equality: true, freeTextSearch: true })` + * all parse), and + * (c) both flags default to `true` — searchable encryption is the + * legitimate default for an extension whose entire reason for + * existing is to make encrypted columns queryable. Users who want + * storage-only encryption opt out explicitly: + * `cipherstash.EncryptedString({ equality: false, freeTextSearch: false })`. + */ + +import type { AuthoringTypeNamespace } from '@prisma-next/framework-components/authoring'; +import { + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, + CIPHERSTASH_STRING_CODEC_ID, + EQL_V2_ENCRYPTED_TYPE, +} from './extension-metadata/constants'; + +export const cipherstashAuthoringTypes = { + cipherstash: { + EncryptedString: { + kind: 'typeConstructor', + args: [ + { + kind: 'object', + name: 'options', + optional: true, + properties: { + equality: { kind: 'boolean', optional: true }, + freeTextSearch: { kind: 'boolean', optional: true }, + orderAndRange: { kind: 'boolean', optional: true }, + }, + }, + ], + output: { + codecId: CIPHERSTASH_STRING_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + typeParams: { + equality: { kind: 'arg', index: 0, path: ['equality'], default: true }, + freeTextSearch: { + kind: 'arg', + index: 0, + path: ['freeTextSearch'], + default: true, + }, + orderAndRange: { + kind: 'arg', + index: 0, + path: ['orderAndRange'], + default: true, + }, + }, + }, + }, + EncryptedDouble: { + kind: 'typeConstructor', + args: [ + { + kind: 'object', + name: 'options', + optional: true, + properties: { + equality: { kind: 'boolean', optional: true }, + orderAndRange: { kind: 'boolean', optional: true }, + }, + }, + ], + output: { + codecId: CIPHERSTASH_DOUBLE_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + typeParams: { + equality: { kind: 'arg', index: 0, path: ['equality'], default: true }, + orderAndRange: { + kind: 'arg', + index: 0, + path: ['orderAndRange'], + default: true, + }, + }, + }, + }, + EncryptedBigInt: { + kind: 'typeConstructor', + args: [ + { + kind: 'object', + name: 'options', + optional: true, + properties: { + equality: { kind: 'boolean', optional: true }, + orderAndRange: { kind: 'boolean', optional: true }, + }, + }, + ], + output: { + codecId: CIPHERSTASH_BIGINT_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + typeParams: { + equality: { kind: 'arg', index: 0, path: ['equality'], default: true }, + orderAndRange: { + kind: 'arg', + index: 0, + path: ['orderAndRange'], + default: true, + }, + }, + }, + }, + EncryptedDate: { + kind: 'typeConstructor', + args: [ + { + kind: 'object', + name: 'options', + optional: true, + properties: { + equality: { kind: 'boolean', optional: true }, + orderAndRange: { kind: 'boolean', optional: true }, + }, + }, + ], + output: { + codecId: CIPHERSTASH_DATE_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + typeParams: { + equality: { kind: 'arg', index: 0, path: ['equality'], default: true }, + orderAndRange: { + kind: 'arg', + index: 0, + path: ['orderAndRange'], + default: true, + }, + }, + }, + }, + EncryptedBoolean: { + kind: 'typeConstructor', + args: [ + { + kind: 'object', + name: 'options', + optional: true, + properties: { + equality: { kind: 'boolean', optional: true }, + }, + }, + ], + output: { + codecId: CIPHERSTASH_BOOLEAN_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + typeParams: { + equality: { kind: 'arg', index: 0, path: ['equality'], default: true }, + }, + }, + }, + EncryptedJson: { + kind: 'typeConstructor', + args: [ + { + kind: 'object', + name: 'options', + optional: true, + properties: { + searchableJson: { kind: 'boolean', optional: true }, + }, + }, + ], + output: { + codecId: CIPHERSTASH_JSON_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + typeParams: { + searchableJson: { + kind: 'arg', + index: 0, + path: ['searchableJson'], + default: true, + }, + }, + }, + }, + }, +} as const satisfies AuthoringTypeNamespace; diff --git a/packages/prisma-next/src/contract.d.ts b/packages/prisma-next/src/contract.d.ts new file mode 100644 index 00000000..76d0217b --- /dev/null +++ b/packages/prisma-next/src/contract.d.ts @@ -0,0 +1,149 @@ +// ⚠️ GENERATED FILE - DO NOT EDIT +// This file is automatically generated by 'prisma-next contract emit'. +// To regenerate, run: prisma-next contract emit +import type { CodecTypes as PgTypes } from '@prisma-next/target-postgres/codec-types'; +import type { JsonValue } from '@prisma-next/target-postgres/codec-types'; +import type { Char } from '@prisma-next/target-postgres/codec-types'; +import type { Varchar } from '@prisma-next/target-postgres/codec-types'; +import type { Numeric } from '@prisma-next/target-postgres/codec-types'; +import type { Bit } from '@prisma-next/target-postgres/codec-types'; +import type { VarBit } from '@prisma-next/target-postgres/codec-types'; +import type { Timestamp } from '@prisma-next/target-postgres/codec-types'; +import type { Timestamptz } from '@prisma-next/target-postgres/codec-types'; +import type { Time } from '@prisma-next/target-postgres/codec-types'; +import type { Timetz } from '@prisma-next/target-postgres/codec-types'; +import type { Interval } from '@prisma-next/target-postgres/codec-types'; +import type { QueryOperationTypes as PgAdapterQueryOps } from '@prisma-next/adapter-postgres/operation-types'; + +import type { + ContractWithTypeMaps, + TypeMaps as TypeMapsType, +} from '@prisma-next/sql-contract/types'; +import type { + Contract as ContractType, + ExecutionHashBase, + ProfileHashBase, + StorageHashBase, +} from '@prisma-next/contract/types'; + +export type StorageHash = + StorageHashBase<'sha256:efa685171bebbb8f078f08d12be3578bb5d96b71669dccc6cc9e4be96af8cdb4'>; +export type ExecutionHash = ExecutionHashBase; +export type ProfileHash = + ProfileHashBase<'sha256:1a8dbe044289f30a1de958fe800cc5a8378b285d2e126a8c44b58864bac2c18e'>; + +export type CodecTypes = PgTypes; +export type OperationTypes = Record; +export type LaneCodecTypes = CodecTypes; +export type QueryOperationTypes = PgAdapterQueryOps; +type DefaultLiteralValue = CodecId extends keyof CodecTypes + ? CodecTypes[CodecId]['output'] + : _Encoded; + +export type FieldOutputTypes = { + readonly EqlV2Configuration: { + readonly id: CodecTypes['pg/text@1']['output']; + readonly state: CodecTypes['pg/text@1']['output']; + readonly data: CodecTypes['pg/jsonb@1']['output']; + }; +}; +export type FieldInputTypes = { + readonly EqlV2Configuration: { + readonly id: CodecTypes['pg/text@1']['input']; + readonly state: CodecTypes['pg/text@1']['input']; + readonly data: CodecTypes['pg/jsonb@1']['input']; + }; +}; +export type TypeMaps = TypeMapsType< + CodecTypes, + OperationTypes, + QueryOperationTypes, + FieldOutputTypes, + FieldInputTypes +>; + +type ContractBase = ContractType< + { + readonly tables: { + readonly eql_v2_configuration: { + columns: { + readonly id: { + readonly nativeType: 'text'; + readonly codecId: 'pg/text@1'; + readonly nullable: false; + }; + readonly state: { + readonly nativeType: 'text'; + readonly codecId: 'pg/text@1'; + readonly nullable: false; + }; + readonly data: { + readonly nativeType: 'jsonb'; + readonly codecId: 'pg/jsonb@1'; + readonly nullable: false; + }; + }; + primaryKey: { readonly columns: readonly ['id'] }; + uniques: readonly []; + indexes: readonly []; + foreignKeys: readonly []; + }; + }; + readonly types: Record; + readonly storageHash: StorageHash; + }, + { + readonly EqlV2Configuration: { + readonly fields: { + readonly id: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/text@1' }; + }; + readonly state: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/text@1' }; + }; + readonly data: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/jsonb@1' }; + }; + }; + readonly relations: Record; + readonly storage: { + readonly table: 'eql_v2_configuration'; + readonly fields: { + readonly id: { readonly column: 'id' }; + readonly state: { readonly column: 'state' }; + readonly data: { readonly column: 'data' }; + }; + }; + }; + } +> & { + readonly target: 'postgres'; + readonly targetFamily: 'sql'; + readonly roots: { readonly eql_v2_configuration: 'EqlV2Configuration' }; + readonly capabilities: { + readonly postgres: { + readonly jsonAgg: true; + readonly lateral: true; + readonly limit: true; + readonly orderBy: true; + readonly returning: true; + }; + readonly sql: { + readonly defaultInInsert: true; + readonly enums: true; + readonly returning: true; + }; + }; + readonly extensionPacks: {}; + readonly meta: {}; + + readonly profileHash: ProfileHash; +}; + +export type Contract = ContractWithTypeMaps; + +export type Tables = Contract['storage']['tables']; +export type Models = Contract['models']; diff --git a/packages/prisma-next/src/contract.json b/packages/prisma-next/src/contract.json new file mode 100644 index 00000000..0a8584e4 --- /dev/null +++ b/packages/prisma-next/src/contract.json @@ -0,0 +1,104 @@ +{ + "schemaVersion": "1", + "targetFamily": "sql", + "target": "postgres", + "profileHash": "sha256:1a8dbe044289f30a1de958fe800cc5a8378b285d2e126a8c44b58864bac2c18e", + "roots": { + "eql_v2_configuration": "EqlV2Configuration" + }, + "models": { + "EqlV2Configuration": { + "fields": { + "data": { + "nullable": false, + "type": { + "codecId": "pg/jsonb@1", + "kind": "scalar" + } + }, + "id": { + "nullable": false, + "type": { + "codecId": "pg/text@1", + "kind": "scalar" + } + }, + "state": { + "nullable": false, + "type": { + "codecId": "pg/text@1", + "kind": "scalar" + } + } + }, + "relations": {}, + "storage": { + "fields": { + "data": { + "column": "data" + }, + "id": { + "column": "id" + }, + "state": { + "column": "state" + } + }, + "table": "eql_v2_configuration" + } + } + }, + "storage": { + "storageHash": "sha256:efa685171bebbb8f078f08d12be3578bb5d96b71669dccc6cc9e4be96af8cdb4", + "tables": { + "eql_v2_configuration": { + "columns": { + "data": { + "codecId": "pg/jsonb@1", + "nativeType": "jsonb", + "nullable": false + }, + "id": { + "codecId": "pg/text@1", + "nativeType": "text", + "nullable": false + }, + "state": { + "codecId": "pg/text@1", + "nativeType": "text", + "nullable": false + } + }, + "foreignKeys": [], + "indexes": [], + "primaryKey": { + "columns": [ + "id" + ] + }, + "uniques": [] + } + } + }, + "capabilities": { + "postgres": { + "jsonAgg": true, + "lateral": true, + "limit": true, + "orderBy": true, + "returning": true + }, + "sql": { + "defaultInInsert": true, + "enums": true, + "returning": true + } + }, + "extensionPacks": {}, + "meta": {}, + "_generated": { + "warning": "⚠️ GENERATED FILE - DO NOT EDIT", + "message": "This file is automatically generated by \"prisma-next contract emit\".", + "regenerate": "To regenerate, run: prisma-next contract emit" + } +} \ No newline at end of file diff --git a/packages/prisma-next/src/contract.prisma b/packages/prisma-next/src/contract.prisma new file mode 100644 index 00000000..def9808b --- /dev/null +++ b/packages/prisma-next/src/contract.prisma @@ -0,0 +1,46 @@ +// PSL contract source for the `extension-cipherstash` package. +// +// Authored against the contract-space package layout convention. The same emit +// pipeline application authors use is applied here: +// +// `prisma-next contract emit` → `/src/contract.{json,d.ts}` +// `prisma-next migration plan` → `/migrations//` +// +// The descriptor at `src/exports/control.ts` then wires the emitted JSON +// artefacts via JSON-import declarations. +// +// ## IR coverage and explicit deferral +// +// CipherStash should declare four kinds of typed objects in its +// contract IR: tables, enums, composite types, and domains. Of these, +// today's `SqlStorage` IR (`@prisma-next/sql-contract/types`) only +// models tables and parameterised type instances (a fit for things +// like pgvector's `vector(N)`, but not yet codec-less composite types, +// standalone enums, or domains). +// +// The contract therefore declares the only IR-representable object +// today (the `eql_v2_configuration` table) using portable scalar +// types (`String` / `Json`). The actual database state — the `eql_v2` +// schema, the typed `eql_v2_configuration_state` enum, the +// `eql_v2_encrypted` composite, the `eql_v2.bloom_filter` / +// `hmac_256` / `blake3` domains, and the various `ore_*` composites — +// is created by the `installEqlBundle` migration op (which carries +// the vendored bundle SQL byte-for-byte; see +// `./src/migration/eql-bundle.ts`). The structural +// `cipherstash:create-*-v1` no-op ops register the invariantIds the +// verifier needs so its `applied_invariants` gate passes. +// +// Once the IR vocabulary expands to first-class composite types, +// standalone enums, and domains, those typed objects shift up into +// `storage.types` and the structural ops gain real verification work +// (precheck SQL probing `pg_type` / `information_schema`). +// +// @see docs/architecture docs/adrs/ADR 211 - Contract spaces.md + +model EqlV2Configuration { + id String @id + state String + data Json + + @@map("eql_v2_configuration") +} diff --git a/packages/prisma-next/src/execution/abort.ts b/packages/prisma-next/src/execution/abort.ts new file mode 100644 index 00000000..08b3b18b --- /dev/null +++ b/packages/prisma-next/src/execution/abort.ts @@ -0,0 +1,143 @@ +/** + * Cipherstash-internal `RUNTIME.ABORTED` phase wrapping. + * + * The framework`s `runtimeAborted(phase)` (`@prisma-next/framework- + * components/runtime`) constructs the canonical `RUNTIME.ABORTED` + * envelope (`code === 'RUNTIME.ABORTED'`, `category === 'RUNTIME'`, + * `details.phase`, `cause`) but its `phase` parameter is typed as + * the framework`s closed `RuntimeAbortedPhase` union — `encode`, + * `decode`, `stream`, `beforeExecute`, `afterExecute`, `onRow`. Those + * tags describe phases of `runtime.execute()` itself (see ADR 207`s + * "Where the runtime observes abort" table); cipherstash`s async + * observation points sit one layer outside the framework runtime: + * + * - `bulk-encrypt` — the bulk-encrypt middleware`s SDK round-trip + * inside `beforeExecute`. Conceptually a sub-phase of the + * framework`s `beforeExecute`, but tag-wise distinct so callers + * can attribute the abort to the cipherstash SDK call rather + * than to a generic middleware step. + * - `decrypt` — the single-cell `EncryptedString#decrypt()` + * SDK call, invoked by the application after the framework + * returns the row. Not inside any framework phase. + * - `decrypt-all` — the `decryptAll` walker`s `bulkDecrypt` calls, + * invoked by the application after the framework returns the + * row set. Not inside any framework phase. + * + * Rather than widen the framework union (which would conflate + * extension-specific tags with the framework`s own attribution + * sites), this module reuses the framework`s `runtimeError(...)` + * envelope builder directly — the *envelope shape* (the + * `RuntimeErrorEnvelope` interface, the `code` slot, the `category` + * slot, the `details.phase` slot, the `cause` field) is unchanged; + * only the set of legal `phase` string values grows. ADR 027`s + * envelope contract is preserved bit-for-bit. + * + * The `raceCipherstashAbort` helper mirrors framework + * `raceAgainstAbort` so cipherstash`s SDK-call sites get the same + * "return promptly even when the SDK ignores the signal" behaviour + * (the cooperative-cancellation model from ADR 207). Identity- + * checked sentinel rejection distinguishes abort-source from a + * codec-thrown envelope, matching the framework`s pattern. We + * duplicate the logic (rather than passing a cast tag to the + * framework helper) to keep the cipherstash `phase` strings + * cipherstash-internal — no widening of the framework union. + */ + +import type { RuntimeErrorEnvelope } from '@prisma-next/framework-components/runtime'; +import { RUNTIME_ABORTED, runtimeError } from '@prisma-next/framework-components/runtime'; + +/** Discriminator placed in `details.phase` of cipherstash-issued aborts. */ +export type CipherstashAbortPhase = 'bulk-encrypt' | 'decrypt' | 'decrypt-all'; + +/** + * Construct a `RUNTIME.ABORTED` envelope tagged with a cipherstash + * phase. Reuses the framework`s `runtimeError(RUNTIME_ABORTED, ...)` + * envelope builder so the structural shape (`code`, `category`, + * `severity`, `message`, `details.phase`, `cause`) matches everything + * else the framework emits. Only the `phase` string set is + * cipherstash-specific. + */ +export function cipherstashAborted( + phase: CipherstashAbortPhase, + cause?: unknown, +): RuntimeErrorEnvelope { + const envelope = runtimeError(RUNTIME_ABORTED, `Operation aborted during ${phase}`, { phase }); + return Object.assign(envelope, { cause }); +} + +/** + * Pre-check helper: throw a cipherstash-tagged `RUNTIME.ABORTED` + * envelope if the supplied signal is already aborted at the call + * site. Mirrors framework `checkAborted` (which is typed against the + * framework`s phase union) — used to short-circuit the bulk-encrypt + * middleware`s pre-flight, the single-cell `decrypt()` pre-flight, + * and the `decryptAll` walker`s pre-flight before any SDK round-trip + * is scheduled. + */ +export function checkCipherstashAborted( + signal: AbortSignal | undefined, + phase: CipherstashAbortPhase, +): void { + if (signal?.aborted) { + throw cipherstashAborted(phase, signal.reason); + } +} + +/** + * Race a cipherstash SDK promise against the supplied `AbortSignal` + * so the awaiting caller is rejected promptly with a + * `RUNTIME.ABORTED` envelope as soon as the signal aborts — even + * when the SDK body itself ignores the signal. Cooperative + * cancellation: in-flight SDK calls that ignore the signal continue + * running in the background and complete; the abort-attributed + * rejection is what the cipherstash caller sees (the SDK`s eventual + * resolution is silently abandoned per ADR 207`s "cooperative + * cancellation, not termination" contract). + * + * Mirrors framework `raceAgainstAbort` line-for-line aside from the + * cipherstash-typed phase parameter and the cipherstash-tagged + * envelope construction. The sentinel-identity attribution is + * load-bearing for the same reason ADR 207 spells out: a codec / + * SDK that itself throws a `RUNTIME.ENCODE_FAILED` / + * `RUNTIME.DECODE_FAILED` (or any other named envelope) must pass + * through unchanged — only the cipherstash-installed listener ever + * rejects with the local sentinel reference, so an `error === + * sentinel` identity check after the race is unambiguous. + */ +export async function raceCipherstashAbort( + work: Promise, + signal: AbortSignal | undefined, + phase: CipherstashAbortPhase, +): Promise { + if (signal === undefined) { + return await work; + } + const sentinel: { reason: unknown } = { reason: undefined }; + let onAbort: (() => void) | undefined; + + const abortPromise = new Promise((_, reject) => { + if (signal.aborted) { + sentinel.reason = signal.reason; + reject(sentinel); + return; + } + onAbort = () => { + sentinel.reason = signal.reason; + reject(sentinel); + }; + signal.addEventListener('abort', onAbort, { once: true }); + }); + + try { + return await Promise.race([work, abortPromise]); + } catch (error) { + if (error === sentinel) { + throw cipherstashAborted(phase, sentinel.reason); + } + throw error; + } finally { + if (onAbort) { + signal.removeEventListener('abort', onAbort); + } + } +} diff --git a/packages/prisma-next/src/execution/cell-codec-factory.ts b/packages/prisma-next/src/execution/cell-codec-factory.ts new file mode 100644 index 00000000..b55cfb86 --- /dev/null +++ b/packages/prisma-next/src/execution/cell-codec-factory.ts @@ -0,0 +1,291 @@ +/** + * Shared factory for every cipherstash storage codec runtime. + * + * Every cipherstash codec (`cipherstash/string@1`, `cipherstash/double@1`, + * `cipherstash/bigint@1`, `cipherstash/date@1`, + * `cipherstash/boolean@1`, `cipherstash/json@1`) wires the same + * encode/decode body: + * + * - `encode(envelope, ctx)` extracts `handle.ciphertext` and renders + * it as the `eql_v2_encrypted` Postgres composite literal. + * - `decode(wire, ctx)` parses the wire (composite literal or + * pre-parsed `{ data: ... }` row), constructs a fresh envelope via + * the codec's per-type `fromInternal` factory, and stamps the + * `(table, column)` routing context from `ctx.column`. + * + * Only two values vary per codec: + * + * - `codecId` — the `cipherstash/@1` discriminator. + * - `fromInternal` — the per-type envelope factory + * (`EncryptedString.fromInternal`, `EncryptedDouble.fromInternal`, + * etc.). + * + * The factory parallels {@link makeCipherstashCodecHooks} on the + * migration plane (see `../migration/codec-hooks-factory.ts`) — same + * pattern, opposite plane: control plane = lifecycle hooks, runtime + * plane = encode/decode bodies. + */ + +import type { JsonValue } from '@prisma-next/contract/types'; +import { + type AnyCodecDescriptor, + CodecImpl, + type CodecTrait, +} from '@prisma-next/framework-components/codec'; +import { runtimeError } from '@prisma-next/framework-components/runtime'; +import type { Codec, SqlCodecCallContext } from '@prisma-next/sql-relational-core/ast'; +import { CIPHERSTASH_CODEC_TRAITS, EQL_V2_ENCRYPTED_TYPE } from '../extension-metadata/constants'; +import type { EncryptedEnvelopeBase } from './envelope-base'; +import { isBulkEncryptMiddlewareRegistered } from './middleware-registry'; +import type { CipherstashSdk } from './sdk'; + +const CIPHERSTASH_TARGET_TYPES = [EQL_V2_ENCRYPTED_TYPE] as const; + +/** + * Encode the SDK ciphertext payload as a Postgres composite literal + * `("...escaped JSON...")`. Embedded `"` are doubled per the composite + * text-format escape rules. Identical across every cipherstash codec — + * the wire format is determined by `eql_v2_encrypted`'s definition + * (`CREATE TYPE eql_v2_encrypted AS (data jsonb)`), not by the codec's + * plaintext type. + */ +export function encodeEqlV2EncryptedWire(payload: unknown): string { + const json = JSON.stringify(payload); + if (json === undefined) { + throw new Error( + 'cipherstash codec: ciphertext payload is not JSON-serializable. ' + + 'The CipherStash SDK must return a JSON-encodable bulk-encrypt result.', + ); + } + const escaped = json.replaceAll('"', '""'); + return `("${escaped}")`; +} + +/** + * Inverse of {@link encodeEqlV2EncryptedWire}. Postgres returns + * `eql_v2_encrypted` cells in composite text format; some pg clients + * pre-parse composite cells into `{ data: ... }` row objects. Both + * shapes — and `null`/`undefined` passthrough — are accepted. + */ +function decodeEqlV2EncryptedWire(wire: unknown): unknown { + if (wire === null || wire === undefined) return wire; + if (typeof wire === 'object') { + if ('data' in wire) { + return (wire as { data: unknown }).data; + } + return wire; + } + if (typeof wire !== 'string') { + throw new Error( + `cipherstash codec: unexpected wire shape for eql_v2_encrypted: ${typeof wire}`, + ); + } + const trimmed = wire.trim(); + if (!trimmed.startsWith('(') || !trimmed.endsWith(')')) { + throw new Error( + `cipherstash codec: expected composite literal "(...)" but got: ${trimmed.slice(0, 40)}`, + ); + } + const inner = trimmed.slice(1, -1); + const unquoted = + inner.startsWith('"') && inner.endsWith('"') ? inner.slice(1, -1).replaceAll('""', '"') : inner; + return JSON.parse(unquoted); +} + +export interface CipherstashCellCodecOptions> { + readonly codecId: string; + readonly typeName: string; + readonly fromInternal: (args: { + readonly ciphertext: unknown; + readonly table: string; + readonly column: string; + readonly sdk: CipherstashSdk; + }) => E; +} + +export class CipherstashCellCodec> extends CodecImpl< + string, + readonly CodecTrait[], + unknown, + E +> { + readonly sdk: CipherstashSdk | undefined; + readonly #fromInternal: CipherstashCellCodecOptions['fromInternal']; + readonly #typeName: string; + // One-shot cache so the per-encode WeakSet lookup only runs until the + // first time we observe a registered middleware on this codec's SDK. + // WeakSet entries are append-only (the registry never un-registers an + // SDK), so flipping this to true is safe for the rest of the codec's + // lifetime. + #middlewareCheckPassed = false; + + constructor( + descriptor: AnyCodecDescriptor, + sdk: CipherstashSdk | undefined, + options: CipherstashCellCodecOptions, + ) { + super(descriptor); + this.sdk = sdk; + this.#fromInternal = options.fromInternal; + this.#typeName = options.typeName; + } + + async encode(value: E, _ctx: SqlCodecCallContext): Promise { + // Two-pass write path: `lower`/`encodeParams` runs first and reaches + // this method with the envelope as the user authored it (plaintext + // set, ciphertext unset). We return the envelope as a sentinel; the + // bulk-encrypt middleware then runs in `beforeExecute`, stamps the + // ciphertext onto the envelope, and rewrites the param slot to the + // wire-format string via `params.replaceValues(...)` before the + // driver reads. See `../middleware/bulk-encrypt.ts` for the full + // flow. On the read side, `handle.ciphertext` is already set on + // arrival and encode short-circuits to the wire-format string. + const handle = value.expose(); + if (handle.ciphertext === undefined) { + // Misconfig diagnostic: when an SDK-bound codec sees a pre-encrypt + // envelope but no `bulkEncryptMiddleware(sdk)` has been + // constructed against that same SDK, the two-pass flow can never + // complete. Throw at the codec boundary with a copy-pasteable + // wiring snippet rather than letting the envelope reach the pg + // driver and produce an opaque serialise error. + if (!this.#middlewareCheckPassed && this.sdk !== undefined) { + if (!isBulkEncryptMiddlewareRegistered(this.sdk)) { + throw runtimeError( + 'RUNTIME.ENCODE_FAILED', + `cipherstash ${this.descriptor.codecId}: encrypted column value has not been encrypted, ` + + 'and no `bulkEncryptMiddleware(sdk)` has been registered with this SDK. ' + + 'Wire it up alongside the extension descriptor:\n\n' + + ' postgres({\n' + + ' contractJson,\n' + + ' extensions: [createCipherstashRuntimeDescriptor({ sdk })],\n' + + ' middleware: [bulkEncryptMiddleware(sdk)],\n' + + ' });\n\n' + + 'Both must close over the SAME `sdk` reference. See the @cipherstash/prisma-next README for the full wiring example.', + { + codecId: this.descriptor.codecId, + reason: 'cipherstash-bulk-encrypt-middleware-not-registered', + envelopeRouting: { table: handle.table, column: handle.column }, + }, + ); + } + this.#middlewareCheckPassed = true; + } + return value; + } + return encodeEqlV2EncryptedWire(handle.ciphertext); + } + + async decode(wire: unknown, ctx: SqlCodecCallContext): Promise { + if (this.sdk === undefined) { + throw runtimeError( + 'RUNTIME.DECODE_FAILED', + `cipherstash ${this.descriptor.codecId}: decode invoked on a metadata-only codec instance that has no SDK attached. ` + + 'Build a runtime codec via the parameterized descriptors returned by `createParameterizedCodecDescriptors(sdk)`, ' + + `or construct the codec directly through the matching \`create*Codec(sdk)\` factory (e.g. \`create${this.#typeName}Codec\`) ` + + 'exported from `@prisma-next/extension-cipherstash/runtime`.', + { + codecId: this.descriptor.codecId, + reason: 'cipherstash-sdk-required', + }, + ); + } + const column = ctx.column; + if (!column) { + throw runtimeError( + 'RUNTIME.DECODE_FAILED', + `cipherstash ${this.descriptor.codecId}: decode requires the column routing context that the SQL runtime populates ` + + 'for projected columns. The cell being decoded came from an aggregate, computed expression, or other unrouted source. ' + + 'cipherstash codecs need a stable `(table, column)` routing key for envelope construction and bulk-decrypt grouping; ' + + 'project the underlying encrypted column directly instead of through an aggregate.', + { + codecId: this.descriptor.codecId, + reason: 'cipherstash-decode-column-context-missing', + }, + ); + } + return this.#fromInternal({ + ciphertext: decodeEqlV2EncryptedWire(wire), + table: column.table, + column: column.name, + sdk: this.sdk, + }); + } + + encodeJson(_value: E): JsonValue { + const marker = `$${this.#typeName.charAt(0).toLowerCase()}${this.#typeName.slice(1)}`; + return { [marker]: '' } as JsonValue; + } + + decodeJson(_json: JsonValue): E { + throw new Error( + 'cipherstash codec: decodeJson is not supported; envelopes do not round-trip through JSON.', + ); + } +} + +/** + * Construct an auxiliary descriptor for a cipherstash cell codec. + * + * The framework's `CodecImpl` base class requires a `descriptor` field + * on every codec instance; readers like `codec.id` proxy through + * `descriptor.codecId`. The production lookup path, however, resolves + * cipherstash codecs through the **parameterized** descriptors built + * in `parameterized.ts` — its `factory(params)(ctx)` returns the codec + * instance directly, never going via `codec.descriptor.factory`. + * + * This descriptor therefore needs only to carry **truthful metadata** + * (`codecId`, `traits`, `targetTypes`, `meta`, `renderOutputType`) so + * that any caller reading those fields off the codec sees the right + * values. Its `factory` field is intentionally a throwing stub: if + * anything ever does invoke it, that is a programming error (the call + * site should be going through the parameterized descriptor) and a + * loud failure is preferred to a silent fallback. + * + * The auxiliary cannot be replaced by passing the parameterized + * descriptor through to the codec constructor because the + * parameterized descriptor's `factory` resolves to the codec instance + * itself — constructing the descriptor before the codec, and the + * codec before the descriptor, are mutually circular. + */ +function makeAuxiliaryDescriptor>( + options: CipherstashCellCodecOptions, +): AnyCodecDescriptor { + return { + codecId: options.codecId, + traits: CIPHERSTASH_CODEC_TRAITS[options.codecId] ?? [], + targetTypes: CIPHERSTASH_TARGET_TYPES, + meta: { + db: { sql: { postgres: { nativeType: EQL_V2_ENCRYPTED_TYPE } } }, + }, + paramsSchema: { + '~standard': { + version: 1, + vendor: 'cipherstash', + validate: (value: unknown) => ({ value }), + }, + }, + isParameterized: false, + renderOutputType: () => options.typeName, + factory: () => () => { + throw new Error( + 'cipherstash codec: auxiliary descriptor factory was invoked. ' + + 'This is a programming error — cipherstash codecs are resolved through the ' + + 'parameterized descriptors built in `parameterized.ts`, not through ' + + '`codec.descriptor.factory`. Use `createParameterizedCodecDescriptors(sdk)` ' + + 'to get the production runtime descriptors.', + ); + }, + }; +} + +/** + * Construct the runtime codec for a cipherstash cell codec given its + * codec id, the user-facing type name, and the per-type envelope + * `fromInternal` factory. + */ +export function makeCipherstashCellCodec>( + sdk: CipherstashSdk, + options: CipherstashCellCodecOptions, +): CipherstashCellCodec & Codec { + return new CipherstashCellCodec(makeAuxiliaryDescriptor(options), sdk, options); +} diff --git a/packages/prisma-next/src/execution/codec-runtime.ts b/packages/prisma-next/src/execution/codec-runtime.ts new file mode 100644 index 00000000..6b89a217 --- /dev/null +++ b/packages/prisma-next/src/execution/codec-runtime.ts @@ -0,0 +1,107 @@ +/** + * Cipherstash storage codec runtimes — wrap each `Encrypted*` envelope + * at the SQL codec boundary. + * + * Every cipherstash codec has identical encode/decode bodies (the + * `eql_v2_encrypted` composite-literal wire format is determined by + * the EQL type definition, not by the plaintext type). The shared body + * lives in `./cell-codec-factory.ts`; the per-codec wrappers below + * supply only the per-type discriminators (codec id, user-facing type + * name, envelope `fromInternal` factory) and re-export the codec class + * for backwards compatibility with consumers that imported it directly + * from this module. + * + * Mirrors the `makeCipherstashCodecHooks` pattern on the migration + * plane (see `../migration/codec-hooks-factory.ts`) — same shape, + * opposite plane. + * + * Equality search on cipherstash columns intentionally goes through the + * cipherstash-namespaced operator (`cipherstashEq`); the framework's + * trait-gated built-in `eq` would lower to standard SQL `=` which is + * wrong for EQL ciphers (randomized nonces). Each codec therefore + * declares no traits — see `./cell-codec-factory.ts`. + */ + +import { + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, + CIPHERSTASH_STRING_CODEC_ID, +} from '../extension-metadata/constants'; +import { CipherstashCellCodec, makeCipherstashCellCodec } from './cell-codec-factory'; +import { EncryptedBigInt } from './envelope-bigint'; +import { EncryptedBoolean } from './envelope-boolean'; +import { EncryptedDate } from './envelope-date'; +import { EncryptedDouble } from './envelope-double'; +import { EncryptedJson } from './envelope-json'; +import { EncryptedString } from './envelope-string'; +import type { CipherstashSdk } from './sdk'; + +export { CIPHERSTASH_STRING_CODEC_ID }; + +/** @deprecated Re-exported for source compatibility; new call sites should use `CipherstashCellCodec`. */ +export type CipherstashStringCodec = CipherstashCellCodec; + +export function createCipherstashStringCodec( + sdk: CipherstashSdk, +): CipherstashCellCodec { + return makeCipherstashCellCodec(sdk, { + codecId: CIPHERSTASH_STRING_CODEC_ID, + typeName: 'EncryptedString', + fromInternal: EncryptedString.fromInternal, + }); +} + +export function createCipherstashDoubleCodec( + sdk: CipherstashSdk, +): CipherstashCellCodec { + return makeCipherstashCellCodec(sdk, { + codecId: CIPHERSTASH_DOUBLE_CODEC_ID, + typeName: 'EncryptedDouble', + fromInternal: EncryptedDouble.fromInternal, + }); +} + +export function createCipherstashBigIntCodec( + sdk: CipherstashSdk, +): CipherstashCellCodec { + return makeCipherstashCellCodec(sdk, { + codecId: CIPHERSTASH_BIGINT_CODEC_ID, + typeName: 'EncryptedBigInt', + fromInternal: EncryptedBigInt.fromInternal, + }); +} + +export function createCipherstashDateCodec( + sdk: CipherstashSdk, +): CipherstashCellCodec { + return makeCipherstashCellCodec(sdk, { + codecId: CIPHERSTASH_DATE_CODEC_ID, + typeName: 'EncryptedDate', + fromInternal: EncryptedDate.fromInternal, + }); +} + +export function createCipherstashBooleanCodec( + sdk: CipherstashSdk, +): CipherstashCellCodec { + return makeCipherstashCellCodec(sdk, { + codecId: CIPHERSTASH_BOOLEAN_CODEC_ID, + typeName: 'EncryptedBoolean', + fromInternal: EncryptedBoolean.fromInternal, + }); +} + +export function createCipherstashJsonCodec( + sdk: CipherstashSdk, +): CipherstashCellCodec { + return makeCipherstashCellCodec(sdk, { + codecId: CIPHERSTASH_JSON_CODEC_ID, + typeName: 'EncryptedJson', + fromInternal: EncryptedJson.fromInternal, + }); +} + +export { CipherstashCellCodec }; diff --git a/packages/prisma-next/src/execution/decrypt-all.ts b/packages/prisma-next/src/execution/decrypt-all.ts new file mode 100644 index 00000000..beb23035 --- /dev/null +++ b/packages/prisma-next/src/execution/decrypt-all.ts @@ -0,0 +1,244 @@ +/** + * `decryptAll` — read-side bulk-decrypt walker. + * + * Public utility users invoke after `findMany` (or any other read + * surface) to materialize the plaintext for every cipherstash envelope + * (any `EncryptedEnvelopeBase` subclass — string / double / bigint / + * date / boolean / json) reachable from the result set in a fixed + * number of bulk SDK round-trips: + * + * const rows = await db.select(...).from(User).execute(); + * await decryptAll(rows); + * // every envelope's `decrypt()` now returns plaintext synchronously. + * + * Why a separate utility (rather than middleware that auto-decrypts on + * every read): the framework`s streaming-read path cannot bulk-amortize + * decryption across rows it`s yielding incrementally — by the time row + * N is yielded, rows 1..N-1 have already been delivered to the caller. + * The `decryptAll` shape lets the caller buffer the result set + * explicitly (with `await stream.toArray()`) and then opt into bulk + * decryption in one round-trip per `(table, column)` group. The runtime + * descriptor wrapper deliberately does NOT register an implicit-decrypt + * middleware for this reason. + * + * **Walker shape**. + * + * - Recursive on plain objects + plain arrays only. Date / Map / Set / + * typed arrays / Buffer / function / etc. are not recursed into: + * cipherstash envelopes are user data and would not normally embed + * inside these host containers; if a future caller needs to bulk- + * decrypt envelopes inside such a container they extract them into a + * plain row first. The narrow scope keeps the walker`s behavior + * trivially predictable and avoids the cycle / iterator / lazy-eval + * surface those exotic types bring. + * - Cycle-safe via a `WeakSet` of visited objects/arrays; the same + * envelope appearing in N positions is collected once. + * - Skips envelopes whose plaintext slot is already populated + * (write-side envelopes from `EncryptedString.from(plaintext)`, or + * read-side envelopes already materialized by a prior + * `decrypt()` / `decryptAll(...)`). The skip means a re-run is a + * no-op and a mixed write/read row tree only round-trips for the + * envelopes that need it. + * + * **Grouping**. Envelopes are grouped by `(sdk, table, column)` — + * routing key plus the envelope handle`s SDK reference. The SDK split + * preserves the per-tenant SDK isolation `runtime.ts`'s docblock spells + * out: each tenant constructs its own runtime descriptor with its own + * SDK so per-tenant key material never crosses runtimes. Envelopes from + * different tenants happening to share `(table, column)` therefore + * still receive separate `bulkDecrypt` calls. + * + * **Cancellation**. `opts.signal` is forwarded by identity to every + * `bulkDecrypt` call via `ifDefined` — the same shape the bulk-encrypt + * middleware and `EncryptedString.decrypt({ signal? })` use. The + * walker also races each SDK promise against `opts.signal` via + * `raceCipherstashAbort` so an abort surfaces `RUNTIME.ABORTED + * { phase: 'decrypt-all' }` promptly even when the SDK body itself + * ignores the signal. A pre-check before the first SDK round-trip + * short-circuits when the signal is already aborted at entry; the + * no-envelopes-reachable fast path returns immediately without + * observing the signal. + */ + +import { ifDefined } from '@prisma-next/utils/defined'; +import { checkCipherstashAborted, raceCipherstashAbort } from './abort'; +import { EncryptedEnvelopeBase, isHandleDecrypted } from './envelope-base'; +import type { CipherstashRoutingKey, CipherstashSdk } from './sdk'; + +export interface DecryptAllOptions { + readonly signal?: AbortSignal; +} + +interface BulkDecryptTarget { + readonly envelope: EncryptedEnvelopeBase; + readonly ciphertext: unknown; + readonly sdk: CipherstashSdk; + readonly routingKey: CipherstashRoutingKey; +} + +/** + * Walk a result set and bulk-decrypt every cipherstash envelope (any + * `EncryptedEnvelopeBase` subclass) reachable from it. After the + * returned promise resolves, every touched envelope's `decrypt()` + * returns the cached plaintext synchronously without consulting the + * SDK. Heterogeneous result sets are supported — envelopes of + * different concrete types (e.g. `EncryptedString` and + * `EncryptedDate` reachable from the same row) are grouped by + * `(sdk, table, column)` and the SDK's polymorphic `bulkDecrypt` + * return is narrowed per envelope through each subclass's + * {@link EncryptedEnvelopeBase.parseDecryptedValue} hook. + * + * The walker is a no-op when no envelopes are reachable (returns + * without making any SDK call), so it is cheap to call defensively + * after queries that may or may not contain encrypted columns. + */ +export async function decryptAll(rows: unknown, opts?: DecryptAllOptions): Promise { + const targets = collectTargets(rows); + if (targets.length === 0) { + return; + } + const groups = groupTargets(targets); + for (const group of groups.values()) { + const first = group[0]; + if (!first) continue; + const ciphertexts = group.map((t) => t.ciphertext); + checkCipherstashAborted(opts?.signal, 'decrypt-all'); + const plaintexts = await raceCipherstashAbort( + first.sdk.bulkDecrypt({ + routingKey: first.routingKey, + ciphertexts, + ...ifDefined('signal', opts?.signal), + }), + opts?.signal, + 'decrypt-all', + ); + if (plaintexts.length !== group.length) { + throw new Error( + `cipherstash decryptAll: SDK returned ${plaintexts.length} plaintexts ` + + `for routing key (${first.routingKey.table}, ${first.routingKey.column}) ` + + `but ${group.length} were requested.`, + ); + } + for (let i = 0; i < group.length; i++) { + const target = group[i]; + const plaintext = plaintexts[i]; + if (!target) continue; + if (plaintext === undefined) { + throw new Error( + `cipherstash decryptAll: SDK returned undefined plaintext at index ${i} ` + + `for routing key (${target.routingKey.table}, ${target.routingKey.column}). ` + + 'A missing plaintext indicates the SDK could not decrypt this envelope; ' + + 'silently skipping it would leave the caller with an envelope that still ' + + 'reports as not-yet-decrypted, so we surface the failure here instead.', + ); + } + // The SDK's `bulkDecrypt` returns `ReadonlyArray`; + // narrowing to each envelope's `T` is the per-subclass + // responsibility. `applyDecryptedSdkResult` is a static member + // on the base class (TS's class-bounded-friend convention) that + // dispatches through the envelope's own `parseDecryptedValue` + // hook (e.g. `EncryptedDate` coerces strings/numbers/Date + // instances to a `Date`) and writes the narrowed plaintext into + // the handle's cache slot. Heterogeneous groups are not possible + // — every cell in a `(sdk, table, column)` group has the same + // codec id, hence the same envelope subclass — but dynamic + // dispatch still keeps the call site agnostic. + EncryptedEnvelopeBase.applyDecryptedSdkResult(target.envelope, plaintext); + } + } +} + +function collectTargets(root: unknown): BulkDecryptTarget[] { + const targets: BulkDecryptTarget[] = []; + const seenObjects = new WeakSet(); + const seenEnvelopes = new WeakSet>(); + visit(root, seenObjects, (envelope) => { + if (seenEnvelopes.has(envelope)) return; + seenEnvelopes.add(envelope); + if (isHandleDecrypted(envelope)) return; + const handle = envelope.expose(); + if (handle.table === undefined || handle.column === undefined) { + throw new Error( + 'cipherstash decryptAll: envelope is missing (table, column) routing context. ' + + 'Read-side envelopes constructed via codec.decode always carry routing context; ' + + 'this typically means the envelope was constructed manually outside the codec path.', + ); + } + if (handle.sdk === undefined) { + throw new Error( + 'cipherstash decryptAll: envelope is missing the SDK reference needed to decrypt. ' + + 'Read-side envelopes constructed via codec.decode always carry an SDK reference; ' + + 'this typically means the envelope was constructed manually outside the codec path.', + ); + } + targets.push({ + envelope, + ciphertext: handle.ciphertext, + sdk: handle.sdk, + routingKey: { table: handle.table, column: handle.column }, + }); + }); + return targets; +} + +function visit( + value: unknown, + seen: WeakSet, + found: (envelope: EncryptedEnvelopeBase) => void, +): void { + if (value === null || value === undefined) return; + if (value instanceof EncryptedEnvelopeBase) { + found(value); + return; + } + if (typeof value !== 'object') return; + if (seen.has(value)) return; + // Walker is intentionally scoped to plain arrays + plain objects. + // Date / Map / Set / typed arrays / Buffer / Error / class instances + // are passed over so the walker`s shape stays trivially predictable + // and immune to host-object iterator surprises. + if (Array.isArray(value)) { + seen.add(value); + for (const item of value) { + visit(item, seen, found); + } + return; + } + if (!isPlainObject(value)) { + return; + } + seen.add(value); + for (const key of Object.keys(value)) { + visit((value as Record)[key], seen, found); + } +} + +function isPlainObject(value: object): boolean { + const proto = Object.getPrototypeOf(value); + return proto === null || proto === Object.prototype; +} + +function groupTargets(targets: ReadonlyArray): Map { + // Group by `(sdk identity, table, column)`. The SDK identity portion + // of the key uses a per-SDK index issued on first encounter so + // grouping never depends on object reference equality colliding + // accidentally (different SDK instances always partition into + // different groups even if their `(table, column)` matches). + const sdkIndex = new Map(); + const groups = new Map(); + for (const target of targets) { + let idx = sdkIndex.get(target.sdk); + if (idx === undefined) { + idx = sdkIndex.size; + sdkIndex.set(target.sdk, idx); + } + const id = `${idx}\u0000${target.routingKey.table}\u0000${target.routingKey.column}`; + let group = groups.get(id); + if (!group) { + group = []; + groups.set(id, group); + } + group.push(target); + } + return groups; +} diff --git a/packages/prisma-next/src/execution/envelope-base.ts b/packages/prisma-next/src/execution/envelope-base.ts new file mode 100644 index 00000000..1d593183 --- /dev/null +++ b/packages/prisma-next/src/execution/envelope-base.ts @@ -0,0 +1,308 @@ +/** + * Shared abstract base for every cipherstash envelope class. + * + * Each concrete encrypted-column type (`EncryptedString`, + * `EncryptedDouble`, `EncryptedBigInt`, `EncryptedDate`, + * `EncryptedBoolean`, `EncryptedJson`) wraps a handle of the same shape + * — only the plaintext slot's `T` differs — and shares verbatim: + * + * - the `#handle` private field and its `expose()` accessor; + * - the `decrypt({signal?}): Promise` body, including the + * plaintext-cache fast path, abort plumbing, and SDK round-trip; + * - the five `[REDACTED]` overrides + * (`toJSON` / `toString` / `valueOf` / `[Symbol.toPrimitive]` / + * `[Symbol.for('nodejs.util.inspect.custom')]`). + * + * Concrete subclasses provide only the typed factories + * (`static from(plaintext: T): Self`, `static fromInternal({...}): Self`), + * a `typeName` getter consumed by the base's error messages, and an + * optional `parseDecryptedValue(sdkResult: unknown): T` narrowing hook + * for codecs whose plaintext type the SDK doesn't already return as `T` + * (e.g. `EncryptedDate` narrows to `Date`). + * + * ## Encapsulation pattern (Rust `secrecy` style) + * + * Storage is a `#private` instance field on the base. The blessed read + * path is `expose()` — same shape as Rust `secrecy`'s + * `SecretBox::expose_secret`. The five coercion / serialization + * vectors (logger output, `JSON.stringify`, primitive coercion, + * template-literal interpolation, `util.inspect`) are all overridden + * to return `[REDACTED]` so accidental exposure through any of those + * paths is impossible without going through `expose()`. + * + * Modern Node runtimes surface `#private` fields in `util.inspect` + * output by default; the `[Symbol.for('nodejs.util.inspect.custom')]` + * override is what stops that re-exposure path. + */ + +import { ifDefined } from '@prisma-next/utils/defined'; +import { checkCipherstashAborted, raceCipherstashAbort } from './abort'; +import type { CipherstashSdk } from './sdk'; + +/** + * The mutable state shared by every envelope. The plaintext slot's `T` + * varies per subclass; ciphertext is opaque per-cell wire bytes; the + * `(table, column)` tuple plus `sdk` reference plumbs the per-cell SDK + * lifecycle (single-cell `decrypt`, bulk-encrypt routing). Mutating + * these slots from outside the package is supported but unusual; the + * package's lifecycle mutators (`setHandleCiphertext`, + * `setHandleRoutingKey`, `setHandlePlaintextCache`) are the conventional + * path. + */ +export interface EncryptedEnvelopeHandle { + plaintext: T | undefined; + ciphertext: unknown; + table: string | undefined; + column: string | undefined; + sdk: CipherstashSdk | undefined; +} + +export interface EncryptedEnvelopeFromInternalArgs { + readonly ciphertext: unknown; + readonly table: string; + readonly column: string; + readonly sdk: CipherstashSdk; +} + +const REDACTED = '[REDACTED]'; + +/** + * Placeholder shape returned by `JSON.stringify(envelope)` for every + * concrete envelope. The marker key is derived from the subclass's + * `typeName` (e.g. `EncryptedString` → `$encryptedString`, + * `EncryptedDouble` → `$encryptedDouble`) so each codec carries a + * distinct, machine-recognisable signature in serialised payloads. + * + * The four other coercion paths (`toString` / `valueOf` / + * `[Symbol.toPrimitive]` / `[Symbol.for('nodejs.util.inspect.custom')]`) + * keep returning the literal `[REDACTED]` string; only `toJSON` + * returns the per-type placeholder object so `JSON.stringify` + * renders the marker shape that downstream serialisers and + * `decryptAll` use to recognise an opaque envelope. + */ +export interface EncryptedEnvelopePlaceholder { + readonly [marker: `$${string}`]: ''; +} + +function placeholderFor(typeName: string): EncryptedEnvelopePlaceholder { + const marker = `$${typeName.charAt(0).toLowerCase()}${typeName.slice(1)}` as const; + // The marker key is constructed at runtime from `typeName`, so TS + // widens the literal-form `{ [marker]: '' }` to + // `{ [k: string]: string }` rather than the template-literal-keyed + // `EncryptedEnvelopePlaceholder` shape. The structural identity + // holds at runtime — every key is `$${typeName}` per construction — + // but the type system can't follow the dynamic key derivation, so a + // last-resort `unknown` cast bridges the two. AGENTS.md requires + // this rationale comment alongside any `as unknown as` cast. + return { [marker]: '' } as unknown as EncryptedEnvelopePlaceholder; +} + +export abstract class EncryptedEnvelopeBase { + readonly #handle: EncryptedEnvelopeHandle; + + protected constructor(handle: EncryptedEnvelopeHandle) { + this.#handle = handle; + } + + /** + * Stable, user-facing class name. Used by the base's error messages + * so each subclass surfaces under its own identity (e.g. + * `EncryptedString.decrypt(): ...` rather than the base class name). + */ + protected abstract get typeName(): string; + + /** + * Narrow the SDK's `unknown` plaintext to the subclass's `T`. The + * default identity cast suffices for codecs whose plaintext type the + * SDK already returns as `T` (e.g. `EncryptedString` — the SDK's + * single-cell `decrypt` returns `Promise`). Subclasses whose + * `T` requires runtime narrowing (e.g. `EncryptedDate` constructing + * a `Date` from an ISO string) override this hook. + * + * Reachable from outside the class hierarchy only via the + * class-bounded {@link EncryptedEnvelopeBase.applyDecryptedSdkResult} + * static method — TS lets static members access protected instance + * members of the same class, so the friend access is scoped to + * one well-named entry point and the hook stays `protected` against + * arbitrary out-of-package callers. + */ + protected parseDecryptedValue(sdkResult: unknown): T { + return sdkResult as T; + } + + /** + * Apply an SDK bulk-decrypt result to an envelope: narrow the + * polymorphic SDK return through the subclass's + * {@link EncryptedEnvelopeBase.parseDecryptedValue} hook and cache + * the narrowed plaintext on the handle. Returns the narrowed + * plaintext for callers that want to observe it. + * + * Lives as a `static` member rather than a free function in this + * module so it stays inside the class's lexical scope — TS's + * class-bounded-friend convention permits a static method to call a + * protected instance method on the same class, which is what lets + * `parseDecryptedValue` stay `protected` while still being reachable + * from {@link ../decrypt-all.ts decryptAll}. + * + * Mirrors the conventional `setHandle*` mutator shape used elsewhere + * in this module — call sites stay symmetric across the encrypt path + * (`setHandleCiphertext`) and the decrypt path + * (`EncryptedEnvelopeBase.applyDecryptedSdkResult`). + */ + static applyDecryptedSdkResult(envelope: EncryptedEnvelopeBase, sdkResult: unknown): U { + const plaintext = envelope.parseDecryptedValue(sdkResult); + envelope.expose().plaintext = plaintext; + return plaintext; + } + + /** + * Explicitly retrieve the wrapped handle. Modelled on Rust `secrecy`'s + * `SecretBox::expose_secret`: the handle is reachable, but you have + * to ask for it by name. Callers reach for `expose()` when they need + * to inspect or transport the ciphertext envelope, debug lifecycle + * state, or wire ad-hoc tooling around the SDK reference. + * + * Mutating the returned handle is supported but unusual — the + * package's lifecycle mutators (`setHandleCiphertext`, + * `setHandleRoutingKey`, etc.) are the conventional path during + * encrypt / decrypt flow. + */ + expose(): EncryptedEnvelopeHandle { + return this.#handle; + } + + /** + * Decrypt and return the plaintext. + * + * - If the handle's `plaintext` slot is already populated (write-side + * envelopes from `from(plaintext)`, or read-side envelopes already + * materialized by `decryptAll(...)` or a prior `decrypt()`), returns + * the cached plaintext synchronously without consulting the SDK. + * - Otherwise (read-side handle without a cached plaintext), invokes + * the SDK's single-cell `decrypt` with the handle's routing context. + * The caller-supplied `signal` is forwarded to the SDK by identity + * per the umbrella cancellation contract; the SDK promise is also + * raced against the signal so an abort surfaces a `RUNTIME.ABORTED + * { phase: 'decrypt' }` envelope promptly even if the SDK body + * ignores the signal. The cached-plaintext fast path returns + * synchronously without consulting the signal — no IO, no abort + * observation point. + */ + async decrypt(opts?: { signal?: AbortSignal }): Promise { + if (this.#handle.plaintext !== undefined) { + return this.#handle.plaintext; + } + if ( + !this.#handle.sdk || + this.#handle.table === undefined || + this.#handle.column === undefined + ) { + throw new Error( + `${this.typeName}.decrypt(): envelope has no cached plaintext and no SDK binding. ` + + 'This typically means the bulk-encrypt middleware did not run before the encode site.', + ); + } + checkCipherstashAborted(opts?.signal, 'decrypt'); + const sdkResult = await raceCipherstashAbort( + this.#handle.sdk.decrypt({ + ciphertext: this.#handle.ciphertext, + table: this.#handle.table, + column: this.#handle.column, + ...ifDefined('signal', opts?.signal), + }), + opts?.signal, + 'decrypt', + ); + const plaintext = this.parseDecryptedValue(sdkResult); + this.#handle.plaintext = plaintext; + return plaintext; + } + + toJSON(): EncryptedEnvelopePlaceholder { + return placeholderFor(this.typeName); + } + + toString(): string { + return REDACTED; + } + + valueOf(): string { + return REDACTED; + } + + [Symbol.toPrimitive](): string { + return REDACTED; + } + + [Symbol.for('nodejs.util.inspect.custom')](): string { + return REDACTED; + } +} + +/** + * Populate the handle's ciphertext slot. Called by the bulk-encrypt + * middleware after the SDK returns the encrypted batch. + * + * The plaintext slot is intentionally retained — zeroing in JS is + * best-effort (strings are immutable; objects can carry references the + * caller still owns) and the GC-driven lifecycle is sufficient. + */ +export function setHandleCiphertext( + envelope: EncryptedEnvelopeBase, + ciphertext: unknown, +): void { + envelope.expose().ciphertext = ciphertext; +} + +/** + * Populate the handle's plaintext slot with a freshly-decrypted value + * (read-side caching path used by `decryptAll` and by `decrypt()`'s own + * memoization). + */ +export function setHandlePlaintextCache(envelope: EncryptedEnvelopeBase, plaintext: T): void { + envelope.expose().plaintext = plaintext; +} + +/** + * Stamp the encrypt-side `(table, column)` routing context onto a + * write-side envelope's handle. Called by the bulk-encrypt middleware + * before grouping envelopes into per-routing-key bulk-encrypt batches. + * + * Idempotent for matching reassignments (re-stamping the same + * `(table, column)` is a no-op, which covers envelopes reconstructed + * via `fromInternal` on the read side and re-stamped on the way back + * in). Conflicting reassignments throw a descriptive error: an + * envelope reused across plans with a different routing context is a + * programming error — silently keeping the stale binding would lower + * to the wrong bulk-encrypt batch. + */ +export function setHandleRoutingKey( + envelope: EncryptedEnvelopeBase, + table: string, + column: string, +): void { + const handle = envelope.expose(); + if (handle.table === undefined) { + handle.table = table; + } else if (handle.table !== table) { + throw new Error( + `cipherstash envelope: routing-key table conflict — handle already bound to "${handle.table}", refusing to rebind to "${table}". Re-encode the value or construct a fresh envelope for the new routing target.`, + ); + } + if (handle.column === undefined) { + handle.column = column; + } else if (handle.column !== column) { + throw new Error( + `cipherstash envelope: routing-key column conflict on table "${handle.table}" — handle already bound to "${handle.column}", refusing to rebind to "${column}". Re-encode the value or construct a fresh envelope for the new routing target.`, + ); + } +} + +/** + * `true` when the handle already carries a usable plaintext (write-side + * construction or post-`decrypt` caching). Used by `decryptAll` to skip + * envelopes that don't need a round-trip. + */ +export function isHandleDecrypted(envelope: EncryptedEnvelopeBase): boolean { + return envelope.expose().plaintext !== undefined; +} diff --git a/packages/prisma-next/src/execution/envelope-bigint.ts b/packages/prisma-next/src/execution/envelope-bigint.ts new file mode 100644 index 00000000..82ad6c21 --- /dev/null +++ b/packages/prisma-next/src/execution/envelope-bigint.ts @@ -0,0 +1,103 @@ +/** + * `EncryptedBigInt` envelope — the user-facing input/output type for + * `cipherstash/bigint@1` columns. Concrete subclass of + * {@link EncryptedEnvelopeBase} parameterised on `bigint`; lowers to + * EQL `cast_as = 'big_int'`. + * + * The SDK's polymorphic decrypt path returns the bigint plaintext in + * whatever shape the wire-format choice surfaces — today, the + * `@cipherstash/stack` SDK serialises `cast_as: 'big_int'` cells as + * JS `number` (limited by `Number.MAX_SAFE_INTEGER`; see the example + * SDK adapter's `toJsPlaintext` for the encrypt-side cap). This + * envelope's `parseDecryptedValue` widens the accepted set so the + * caller still observes a `bigint` end-to-end regardless of whether + * the SDK hands us a `bigint` (future-proof) or a `number` (today). + */ + +import { + EncryptedEnvelopeBase, + type EncryptedEnvelopeFromInternalArgs, + type EncryptedEnvelopeHandle, +} from './envelope-base'; + +export type EncryptedBigIntHandle = EncryptedEnvelopeHandle; + +export type EncryptedBigIntFromInternalArgs = EncryptedEnvelopeFromInternalArgs; + +export class EncryptedBigInt extends EncryptedEnvelopeBase { + protected override get typeName(): string { + return 'EncryptedBigInt'; + } + + /** + * Narrow the SDK's `unknown` plaintext to a `bigint`. + * + * Accepts: + * - `bigint` — passed through unchanged. + * - `number` — converted via `BigInt(...)`; the SDK's `big_int` + * cast presently surfaces values up to `Number.MAX_SAFE_INTEGER` + * in this shape. + * - `string` — accepted defensively (some SDK builds round-trip + * bigints through their decimal-string representation); + * non-numeric strings throw. + * + * Any other shape throws with a descriptive error rather than + * letting the caller observe a silently coerced value downstream. + */ + protected override parseDecryptedValue(sdkResult: unknown): bigint { + if (typeof sdkResult === 'bigint') { + return sdkResult; + } + if (typeof sdkResult === 'number') { + if (!Number.isSafeInteger(sdkResult)) { + throw new Error( + 'EncryptedBigInt.parseDecryptedValue: SDK returned a number that is not a safe integer; ' + + 'expected an integer plaintext within Number.MAX_SAFE_INTEGER or a bigint.', + ); + } + return BigInt(sdkResult); + } + if (typeof sdkResult === 'string') { + try { + return BigInt(sdkResult); + } catch { + throw new Error( + 'EncryptedBigInt.parseDecryptedValue: SDK returned a string plaintext that is not a valid bigint literal.', + ); + } + } + throw new Error( + `EncryptedBigInt.parseDecryptedValue: unsupported SDK plaintext type "${typeof sdkResult}"; expected bigint | number | string.`, + ); + } + + /** + * Construct a write-side envelope from a plaintext `bigint`. + * Bulk-encrypt middleware populates the handle's ciphertext slot + * before the codec encodes the envelope to wire format. + */ + static from(plaintext: bigint): EncryptedBigInt { + return new EncryptedBigInt({ + plaintext, + ciphertext: undefined, + table: undefined, + column: undefined, + sdk: undefined, + }); + } + + /** + * Construct a read-side envelope from a wire ciphertext + the column + * identity + the SDK used to decrypt the cell. Called from the codec + * `decode` body. + */ + static fromInternal(args: EncryptedBigIntFromInternalArgs): EncryptedBigInt { + return new EncryptedBigInt({ + plaintext: undefined, + ciphertext: args.ciphertext, + table: args.table, + column: args.column, + sdk: args.sdk, + }); + } +} diff --git a/packages/prisma-next/src/execution/envelope-boolean.ts b/packages/prisma-next/src/execution/envelope-boolean.ts new file mode 100644 index 00000000..31cc9a52 --- /dev/null +++ b/packages/prisma-next/src/execution/envelope-boolean.ts @@ -0,0 +1,45 @@ +/** + * `EncryptedBoolean` envelope — the user-facing input/output type for + * `cipherstash/boolean@1` columns. Concrete subclass of + * {@link EncryptedEnvelopeBase} parameterised on `boolean` (EQL + * `cast_as = 'boolean'`). + * + * No `parseDecryptedValue` override is needed: the SDK's polymorphic + * decrypt path returns `boolean` plaintexts unchanged. + */ + +import { + EncryptedEnvelopeBase, + type EncryptedEnvelopeFromInternalArgs, + type EncryptedEnvelopeHandle, +} from './envelope-base'; + +export type EncryptedBooleanHandle = EncryptedEnvelopeHandle; + +export type EncryptedBooleanFromInternalArgs = EncryptedEnvelopeFromInternalArgs; + +export class EncryptedBoolean extends EncryptedEnvelopeBase { + protected override get typeName(): string { + return 'EncryptedBoolean'; + } + + static from(plaintext: boolean): EncryptedBoolean { + return new EncryptedBoolean({ + plaintext, + ciphertext: undefined, + table: undefined, + column: undefined, + sdk: undefined, + }); + } + + static fromInternal(args: EncryptedBooleanFromInternalArgs): EncryptedBoolean { + return new EncryptedBoolean({ + plaintext: undefined, + ciphertext: args.ciphertext, + table: args.table, + column: args.column, + sdk: args.sdk, + }); + } +} diff --git a/packages/prisma-next/src/execution/envelope-date.ts b/packages/prisma-next/src/execution/envelope-date.ts new file mode 100644 index 00000000..32e252b0 --- /dev/null +++ b/packages/prisma-next/src/execution/envelope-date.ts @@ -0,0 +1,108 @@ +/** + * `EncryptedDate` envelope — the user-facing input/output type for + * `cipherstash/date@1` columns. Concrete subclass of + * {@link EncryptedEnvelopeBase} parameterised on `Date` (calendar + * date; EQL `cast_as = 'date'`). + * + * Unlike the other envelopes, `EncryptedDate` is the one codec whose + * `parseDecryptedValue` actually does runtime narrowing: + * the SDK's polymorphic `decrypt` returns `unknown`, and the EQL + * surface accepts an ISO date string on encrypt + * (`'2023-01-01'::date::text::jsonb` per the inline example in + * `migration/eql-install.generated.ts:1695`). Whether the SDK + * surfaces a `Date` or a string back to us is an SDK-internal + * choice; this hook accepts both shapes (plus numeric epoch ms as a + * defensive fallback) and produces a `Date` instance for the user. + * + * If the SDK surfaces something else, we throw with a descriptive + * error rather than silently returning an invalid `Date` — the + * caller would otherwise observe `NaN`-valued dates downstream and + * have no signal of where the corruption entered the pipeline. + */ + +import { + EncryptedEnvelopeBase, + type EncryptedEnvelopeFromInternalArgs, + type EncryptedEnvelopeHandle, +} from './envelope-base'; + +export type EncryptedDateHandle = EncryptedEnvelopeHandle; + +export type EncryptedDateFromInternalArgs = EncryptedEnvelopeFromInternalArgs; + +export class EncryptedDate extends EncryptedEnvelopeBase { + protected override get typeName(): string { + return 'EncryptedDate'; + } + + /** + * Coerce the SDK's `unknown` plaintext into a `Date` instance. + * + * Accepts: + * - `Date` instance (returned as-is — the SDK may have already + * parsed the cell into a JS `Date`). + * - `string` (ISO date or ISO datetime — `new Date(value)` + * accepts both). + * - `number` (epoch milliseconds — defensive fallback). + * + * Throws on any other shape; an invalid `Date` (NaN time) is + * rejected before it can leak downstream. + */ + protected override parseDecryptedValue(sdkResult: unknown): Date { + if (sdkResult instanceof Date) { + if (Number.isNaN(sdkResult.getTime())) { + throw new Error( + 'EncryptedDate.parseDecryptedValue: SDK returned an invalid Date instance (NaN time).', + ); + } + return sdkResult; + } + if (typeof sdkResult === 'string' || typeof sdkResult === 'number') { + const parsed = new Date(sdkResult); + if (Number.isNaN(parsed.getTime())) { + throw new Error( + `EncryptedDate.parseDecryptedValue: SDK returned a ${typeof sdkResult} plaintext that does not parse to a valid Date.`, + ); + } + return parsed; + } + throw new Error( + `EncryptedDate.parseDecryptedValue: unsupported SDK plaintext type "${typeof sdkResult}"; expected Date | string | number.`, + ); + } + + /** + * Construct a write-side envelope from a `Date` plaintext. + * Bulk-encrypt middleware populates the handle's ciphertext slot + * before the codec encodes the envelope to wire format. + */ + static from(plaintext: Date): EncryptedDate { + if (!(plaintext instanceof Date) || !Number.isFinite(plaintext.getTime())) { + throw new Error( + 'EncryptedDate.from: plaintext must be a valid Date instance (got an invalid Date or non-Date value).', + ); + } + return new EncryptedDate({ + plaintext, + ciphertext: undefined, + table: undefined, + column: undefined, + sdk: undefined, + }); + } + + /** + * Construct a read-side envelope from a wire ciphertext + the column + * identity + the SDK used to decrypt the cell. Called from the codec + * `decode` body. + */ + static fromInternal(args: EncryptedDateFromInternalArgs): EncryptedDate { + return new EncryptedDate({ + plaintext: undefined, + ciphertext: args.ciphertext, + table: args.table, + column: args.column, + sdk: args.sdk, + }); + } +} diff --git a/packages/prisma-next/src/execution/envelope-double.ts b/packages/prisma-next/src/execution/envelope-double.ts new file mode 100644 index 00000000..6ee92093 --- /dev/null +++ b/packages/prisma-next/src/execution/envelope-double.ts @@ -0,0 +1,57 @@ +/** + * `EncryptedDouble` envelope — the user-facing input/output type for + * `cipherstash/double@1` columns. Concrete subclass of + * {@link EncryptedEnvelopeBase} parameterised on `number` (IEEE-754 + * double; EQL `cast_as = 'double'`). Mirrors `EncryptedString` + * byte-for-byte beyond the typed factories and `typeName`. + * + * No `parseDecryptedValue` override is needed: the SDK's polymorphic + * `bulkDecrypt` / single-cell `decrypt` already returns numeric + * plaintexts as `number`; the base's default identity cast suffices. + */ + +import { + EncryptedEnvelopeBase, + type EncryptedEnvelopeFromInternalArgs, + type EncryptedEnvelopeHandle, +} from './envelope-base'; + +export type EncryptedDoubleHandle = EncryptedEnvelopeHandle; + +export type EncryptedDoubleFromInternalArgs = EncryptedEnvelopeFromInternalArgs; + +export class EncryptedDouble extends EncryptedEnvelopeBase { + protected override get typeName(): string { + return 'EncryptedDouble'; + } + + /** + * Construct a write-side envelope from a plaintext IEEE-754 number. + * Bulk-encrypt middleware populates the handle's ciphertext slot + * before the codec encodes the envelope to wire format. + */ + static from(plaintext: number): EncryptedDouble { + return new EncryptedDouble({ + plaintext, + ciphertext: undefined, + table: undefined, + column: undefined, + sdk: undefined, + }); + } + + /** + * Construct a read-side envelope from a wire ciphertext + the column + * identity + the SDK used to decrypt the cell. Called from the codec + * `decode` body. + */ + static fromInternal(args: EncryptedDoubleFromInternalArgs): EncryptedDouble { + return new EncryptedDouble({ + plaintext: undefined, + ciphertext: args.ciphertext, + table: args.table, + column: args.column, + sdk: args.sdk, + }); + } +} diff --git a/packages/prisma-next/src/execution/envelope-json.ts b/packages/prisma-next/src/execution/envelope-json.ts new file mode 100644 index 00000000..f3df2745 --- /dev/null +++ b/packages/prisma-next/src/execution/envelope-json.ts @@ -0,0 +1,53 @@ +/** + * `EncryptedJson` envelope — the user-facing input/output type for + * `cipherstash/json@1` columns. Concrete subclass of + * {@link EncryptedEnvelopeBase} parameterised on `unknown` + * (JSON-serialisable; EQL `cast_as = 'jsonb'`). + * + * The plaintext slot intentionally types as `unknown` rather than a + * tighter `JsonValue`-style alias: cipherstash users routinely round- + * trip arbitrary nested JS objects through encrypted JSON columns, + * and forcing them through a stricter compile-time alias just shifts + * casts to call sites. Runtime safety is the SDK's responsibility + * (the bulk-encrypt path JSON-stringifies the value, surfacing any + * non-serialisable shape as an SDK-level error). + * + * No `parseDecryptedValue` override is needed: the SDK's polymorphic + * decrypt path returns the decoded JSON value as-is. + */ + +import { + EncryptedEnvelopeBase, + type EncryptedEnvelopeFromInternalArgs, + type EncryptedEnvelopeHandle, +} from './envelope-base'; + +export type EncryptedJsonHandle = EncryptedEnvelopeHandle; + +export type EncryptedJsonFromInternalArgs = EncryptedEnvelopeFromInternalArgs; + +export class EncryptedJson extends EncryptedEnvelopeBase { + protected override get typeName(): string { + return 'EncryptedJson'; + } + + static from(plaintext: unknown): EncryptedJson { + return new EncryptedJson({ + plaintext, + ciphertext: undefined, + table: undefined, + column: undefined, + sdk: undefined, + }); + } + + static fromInternal(args: EncryptedJsonFromInternalArgs): EncryptedJson { + return new EncryptedJson({ + plaintext: undefined, + ciphertext: args.ciphertext, + table: args.table, + column: args.column, + sdk: args.sdk, + }); + } +} diff --git a/packages/prisma-next/src/execution/envelope-string.ts b/packages/prisma-next/src/execution/envelope-string.ts new file mode 100644 index 00000000..0929ce26 --- /dev/null +++ b/packages/prisma-next/src/execution/envelope-string.ts @@ -0,0 +1,78 @@ +/** + * `EncryptedString` envelope — the user-facing input/output type for + * `cipherstash/string@1` columns. The class is the first concrete + * subclass of `EncryptedEnvelopeBase` (see `./envelope-base.ts` for + * the shared encapsulation pattern, decrypt body, and redaction + * overrides). It supplies the typed factories (`from(plaintext)`, + * `fromInternal({...})`) and the user-facing `typeName`; the SDK's + * single-cell `decrypt` already returns `Promise`, so no + * `parseDecryptedValue` override is needed. + * + * ## Lifecycle + * + * The handle has two flavours: + * - **Write side** — `EncryptedString.from(plaintext)` populates the + * `plaintext` slot and leaves `ciphertext` empty. The bulk-encrypt + * middleware populates `ciphertext` post-SDK and intentionally + * leaves the plaintext slot in place (zeroing JS strings is + * best-effort and GC-driven lifecycle is sufficient here). As a + * side effect a write-side envelope's `decrypt()` returns the + * original plaintext synchronously without an SDK round-trip. + * - **Read side** — `EncryptedString.fromInternal({...})` (called from + * the codec `decode` body) populates `ciphertext`, `(table, column)` + * from `SqlCodecCallContext.column`, and an `sdk` reference so + * `decrypt({signal?})` can issue the SDK's single-cell decrypt. + */ + +import { + EncryptedEnvelopeBase, + type EncryptedEnvelopeFromInternalArgs, + type EncryptedEnvelopeHandle, +} from './envelope-base'; + +export type EncryptedStringHandle = EncryptedEnvelopeHandle; + +export type EncryptedStringFromInternalArgs = EncryptedEnvelopeFromInternalArgs; + +export class EncryptedString extends EncryptedEnvelopeBase { + protected override get typeName(): string { + return 'EncryptedString'; + } + + /** + * Construct a write-side envelope from plaintext. Bulk-encrypt + * middleware populates the handle's ciphertext slot before the codec + * encodes the envelope to wire format. + */ + static from(plaintext: string): EncryptedString { + return new EncryptedString({ + plaintext, + ciphertext: undefined, + table: undefined, + column: undefined, + sdk: undefined, + }); + } + + /** + * Construct a read-side envelope from a wire ciphertext + the column + * identity + the SDK used to decrypt the cell. Called from the codec + * `decode` body. + */ + static fromInternal(args: EncryptedStringFromInternalArgs): EncryptedString { + return new EncryptedString({ + plaintext: undefined, + ciphertext: args.ciphertext, + table: args.table, + column: args.column, + sdk: args.sdk, + }); + } +} + +export { + isHandleDecrypted, + setHandleCiphertext, + setHandlePlaintextCache, + setHandleRoutingKey, +} from './envelope-base'; diff --git a/packages/prisma-next/src/execution/helpers.ts b/packages/prisma-next/src/execution/helpers.ts new file mode 100644 index 00000000..a6a9423b --- /dev/null +++ b/packages/prisma-next/src/execution/helpers.ts @@ -0,0 +1,223 @@ +/** + * Cipherstash free-standing helpers — the non-predicate side of the + * cipherstash operator surface (see ADR 214). + * + * Predicates (`cipherstashEq`, `cipherstashGt`, …) live in the + * operator registry and surface as column methods through trait- + * dispatched `QueryOperationTypes`. Non-predicates (sort, JSON + * SELECT-expression accessors) cannot share that surface — they + * return `OrderByItem` / column-codec-typed `Expression`, not the + * boolean predicate the registry's where-binding pipeline expects. + * + * The four helpers below are pure functions exported from + * `@prisma-next/extension-cipherstash/runtime`. Each: + * + * - validates the column's codec id is a cipherstash codec the + * helper supports (sort: any of the four + * `cipherstash:order-and-range`-bearing codecs; + * JSON helpers: `cipherstash/json@1` only) + * - constructs the appropriate AST primitive directly: + * sort → `OrderByItem.asc/desc()` + * JSON → `Expression`-shaped `OperationExpr` with the EQL + * function template baked into `lowering.template` + * - throws a descriptive `TypeError` naming the helper and the + * accepted codec ids on a mismatch + * + * # Sort lowering — bare column reference + * + * `cipherstashAsc(col)` lowers to `ORDER BY ASC` with no EQL + * function wrapping. EQL ships native `<` / `>` / `<=` / `>=` operator + * overloads on `eql_v2_encrypted` (see `eql_v2."<"(eql_v2_encrypted, + * eql_v2_encrypted)` and the `CREATE OPERATOR <(LEFTARG=eql_v2_encrypted, + * RIGHTARG=eql_v2_encrypted, …)` definition in the bundled EQL + * install) so Postgres uses the EQL operator family for the sort + * comparison. The wrapped form (`eql_v2.order_by_(col)`) is + * the documented fallback if the bare-column form ever stops working + * against a future EQL bundle. + * + * # JSON helpers — Expression-typed OperationExpr + * + * `cipherstashJsonbPathQueryFirst(col, path)` lowers to + * `eql_v2.jsonb_path_query_first({{self}}, {{arg0}})` + * `cipherstashJsonbGet(col, path)` lowers to + * `eql_v2."->"({{self}}, {{arg0}})` + * + * Both return `eql_v2_encrypted` and so are typed + * `Expression<{codecId: 'cipherstash/json@1', nullable: false}>` — + * the result is itself a JSON-encrypted value usable as the column + * argument to a follow-on JSON helper or predicate. The path is a + * user-authored static literal (a JSONpath expression or a JSON key + * string) and is bound as a `pg/text@1` `ParamRef`. Dynamic + * user-controlled runtime path values are not supported here — paths + * must be statically authored to keep the JSONpath surface free of + * injection-shaped input. + * + * # No registry participation + * + * These are not registered operators. They're called by the user + * directly (e.g. `db.query(...).orderBy([cipherstashAsc(col)])`) and + * are typed at their function-declaration site. There is no + * `QueryOperationTypes` entry and no operator-registry + * descriptor — the helpers do not flow through the column-method + * dispatch that the predicate operators rely on. + */ + +import { type AnyExpression, OrderByItem, ParamRef } from '@prisma-next/sql-relational-core/ast'; +import { + buildOperation, + type Expression, + type ScopeField, +} from '@prisma-next/sql-relational-core/expression'; +import { + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, + CIPHERSTASH_STRING_CODEC_ID, +} from '../extension-metadata/constants'; + +/** Cipherstash codec ids that carry the `cipherstash:order-and-range` trait. */ +const ORDER_AND_RANGE_CODEC_IDS = [ + CIPHERSTASH_STRING_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, +] as const; + +const ORDER_AND_RANGE_SET: ReadonlySet = new Set(ORDER_AND_RANGE_CODEC_IDS); + +type CipherstashJsonReturn = { + readonly codecId: typeof CIPHERSTASH_JSON_CODEC_ID; + readonly nullable: false; +}; + +function getCodecId(col: Expression, helperName: string): string { + const codecId = col.returnType?.codecId; + if (typeof codecId !== 'string') { + throw new TypeError( + `${helperName}: argument is missing a codec id; expected an Expression bound to a cipherstash column.`, + ); + } + return codecId; +} + +function describeOrderAndRangeCodecs(): string { + return ORDER_AND_RANGE_CODEC_IDS.join(', '); +} + +/** + * ASC sort over a cipherstash column whose codec carries the + * `cipherstash:order-and-range` trait (string / double / bigint / + * date). Returns an `OrderByItem` carrying the column reference; + * Postgres uses EQL's `<` / `>` operator overloads on + * `eql_v2_encrypted` to compute the sort. + */ +export function cipherstashAsc(col: Expression): OrderByItem { + const codecId = getCodecId(col, 'cipherstashAsc'); + if (!ORDER_AND_RANGE_SET.has(codecId)) { + throw new TypeError( + `cipherstashAsc: column codec id "${codecId}" does not support order-and-range sort; ` + + `cipherstashAsc accepts cipherstash columns whose codec id is one of: ${describeOrderAndRangeCodecs()}.`, + ); + } + return OrderByItem.asc(col.buildAst()); +} + +/** + * DESC sort over a cipherstash column whose codec carries the + * `cipherstash:order-and-range` trait. See {@link cipherstashAsc} + * for the lowering rationale. + */ +export function cipherstashDesc(col: Expression): OrderByItem { + const codecId = getCodecId(col, 'cipherstashDesc'); + if (!ORDER_AND_RANGE_SET.has(codecId)) { + throw new TypeError( + `cipherstashDesc: column codec id "${codecId}" does not support order-and-range sort; ` + + `cipherstashDesc accepts cipherstash columns whose codec id is one of: ${describeOrderAndRangeCodecs()}.`, + ); + } + return OrderByItem.desc(col.buildAst()); +} + +function requireJsonColumn(col: Expression, helperName: string): AnyExpression { + const codecId = getCodecId(col, helperName); + if (codecId !== CIPHERSTASH_JSON_CODEC_ID) { + throw new TypeError( + `${helperName}: column codec id "${codecId}" is not "${CIPHERSTASH_JSON_CODEC_ID}"; ` + + `${helperName} only accepts cipherstash JSON columns.`, + ); + } + return col.buildAst(); +} + +function requirePathString(path: unknown, helperName: string): string { + if (typeof path !== 'string') { + throw new TypeError( + `${helperName}: expected a string path argument, got ${ + path === null ? 'null' : typeof path + }.`, + ); + } + return path; +} + +/** + * Lower to `eql_v2.jsonb_path_query_first({{self}}, {{arg0}})`. The + * column must be `cipherstash/json@1`. The path is a user-authored + * static JSONpath literal; it is bound as a `pg/text@1` `ParamRef`. + * + * The result is `eql_v2_encrypted` and can be passed as the column + * argument to a follow-on cipherstash JSON helper or + * `cipherstashJsonbPathExists` predicate (a column codec is not + * required at the type level for those — the runtime branch checks + * the trait/codec at impl time). + */ +export function cipherstashJsonbPathQueryFirst( + col: Expression, + path: string, +): Expression { + const selfAst = requireJsonColumn(col, 'cipherstashJsonbPathQueryFirst'); + const checked = requirePathString(path, 'cipherstashJsonbPathQueryFirst'); + return buildOperation({ + method: 'cipherstashJsonbPathQueryFirst', + args: [selfAst, ParamRef.of(checked, { codec: { codecId: 'pg/text@1' } })], + returns: { codecId: CIPHERSTASH_JSON_CODEC_ID, nullable: false }, + lowering: { + targetFamily: 'sql', + strategy: 'function', + template: 'eql_v2.jsonb_path_query_first({{self}}, {{arg0}})', + }, + }); +} + +/** + * Lower to `eql_v2."->"({{self}}, {{arg0}})`. The column must be + * `cipherstash/json@1`. The path is a JSON key string (the right + * argument of the `->` operator); it is bound as a `pg/text@1` + * `ParamRef` against EQL's `(eql_v2_encrypted, text)` overload. + * + * The result is `eql_v2_encrypted`, mirroring + * {@link cipherstashJsonbPathQueryFirst}. + * + * The exported function name preserves the `Get` suffix convention + * (vs the SQL `->` operator) so the JS surface stays identifier- + * friendly; the lowering still emits the quoted operator-as-function + * form. + */ +export function cipherstashJsonbGet( + col: Expression, + path: string, +): Expression { + const selfAst = requireJsonColumn(col, 'cipherstashJsonbGet'); + const checked = requirePathString(path, 'cipherstashJsonbGet'); + return buildOperation({ + method: 'cipherstashJsonbGet', + args: [selfAst, ParamRef.of(checked, { codec: { codecId: 'pg/text@1' } })], + returns: { codecId: CIPHERSTASH_JSON_CODEC_ID, nullable: false }, + lowering: { + targetFamily: 'sql', + strategy: 'function', + template: 'eql_v2."->"({{self}}, {{arg0}})', + }, + }); +} diff --git a/packages/prisma-next/src/execution/middleware-registry.ts b/packages/prisma-next/src/execution/middleware-registry.ts new file mode 100644 index 00000000..76b3e55a --- /dev/null +++ b/packages/prisma-next/src/execution/middleware-registry.ts @@ -0,0 +1,25 @@ +/** + * Per-process registry of `CipherstashSdk` instances that have been + * wired up via `bulkEncryptMiddleware(sdk)`. The codec's `encode` + * consults this registry to fire a misconfig diagnostic when an + * SDK-bound codec sees a pre-encrypt envelope without a corresponding + * middleware registration — the failure mode is otherwise an opaque + * pg-level serialise error. + * + * Keyed on `CipherstashSdk` reference identity via a `WeakSet`, so + * multi-tenant deployments that construct one SDK per tenant + * correctly distinguish each tenant's middleware lifecycle, and no + * strong references leak. + */ + +import type { CipherstashSdk } from './sdk' + +const REGISTERED: WeakSet = new WeakSet() + +export function markBulkEncryptMiddlewareRegistered(sdk: CipherstashSdk): void { + REGISTERED.add(sdk) +} + +export function isBulkEncryptMiddlewareRegistered(sdk: CipherstashSdk): boolean { + return REGISTERED.has(sdk) +} diff --git a/packages/prisma-next/src/execution/operators.ts b/packages/prisma-next/src/execution/operators.ts new file mode 100644 index 00000000..68427772 --- /dev/null +++ b/packages/prisma-next/src/execution/operators.ts @@ -0,0 +1,595 @@ +/** + * Cipherstash query-operations registry. + * + * `cipherstashEq` and `cipherstashIlike` lower to EQL's encrypted-aware + * comparison functions (`eql_v2.eq`, `eql_v2.ilike`) on + * `cipherstash/string@1`-typed columns. The lowering shape mirrors the + * canonical templates in the reference Prisma integration at + * `reference/cipherstash/stack/packages/stack/src/prisma/core/ + * operation-templates.ts`: + * + * eql_v2.eq(, ) + * eql_v2.ilike(, ) + * + * Why we diverge from Postgres' native `=` / `ILIKE` operators: EQL + * ciphers contain randomized nonces, so two encrypts of the same + * plaintext do not byte-equal under SQL `=`. EQL's `eql_v2.eq` / + * `eql_v2.ilike` short-circuit through the per-column index + * (`unique` / `match`) emitted by the codec lifecycle hook and produce + * correct results. + * + * **Why cipherstash-namespaced method names (`cipherstashEq`, + * `cipherstashIlike`) rather than reusing the framework`s `eq` / + * `ilike`.** The framework`s `OperationRegistry` is a flat method-keyed + * map and operator overriding is disallowed by project decision. Equally + * importantly, cipherstash`s search operators are semantically distinct + * from the framework built-ins — they take encrypted-aware envelope + * arguments and lower to `eql_v2.eq` / `eql_v2.ilike`, which short- + * circuit through EQL`s per-column index — so they belong under a + * cipherstash-prefixed surface that flags the divergence at the call + * site. The supported user-facing call shape on a cipherstash column is: + * + * model.users.where((u) => u.email.cipherstashEq('alice@example.com')) + * model.users.where((u) => u.email.cipherstashIlike('%alice%')) + * + * The framework`s built-in `email.eq(...)` is **not reachable** on + * cipherstash columns: the cipherstash codec declares no `equality` + * trait (see `codec-runtime.ts` / `codec-metadata.ts` / `parameterized.ts`), + * and the model-accessor synthesis in `sql-orm-client` gates + * `COMPARISON_METHODS_META.eq` on the `equality` trait being present in + * the column codec`s trait set. Calling `email.eq(...)` on a cipherstash + * column is therefore `undefined` — the wrong-SQL footgun (where the + * built-in `eq` would lower to standard SQL `=` against an + * `eql_v2_encrypted` value, silently returning zero rows because EQL + * ciphers contain randomized nonces) is closed at the codec layer, not + * the operator layer. The trait declaration is regression-pinned by + * `test/equality-trait-removal.test.ts`. + * + * The encrypted-arg path: the operator wraps the user-supplied value + * in an `EncryptedString` envelope and stamps the column`s + * `(table, column)` routing context onto the envelope`s handle. The + * bulk-encrypt middleware then groups the envelope alongside + * any others targeting the same `(table, column)` and issues one + * `sdk.bulkEncrypt` per group. The cipherstash codec encodes the + * resulting ciphertext as the wire payload at + * `eql_v2_encrypted` cast time. Stamping at lowering time is the + * load-bearing step — the middleware`s AST walk only handles + * `InsertAst` / `UpdateAst` (see + * `src/middleware/bulk-encrypt.ts:stampRoutingKeysFromAst`); SELECT + * envelopes have to arrive at the middleware already routing-keyed. + * + * Build-time return type is the postgres `pg/bool@1` codec — that`s + * the codec the framework`s predicate machinery looks at via the + * `'boolean'` trait to decide that the operator`s return value is a + * predicate suitable for a WHERE clause (see + * `packages/3-extensions/sql-orm-client/src/model-accessor.ts:172-178`). + * + * **`isNull` / `isNotNull` are NOT registered here.** The framework`s + * always-on `isNull` / `isNotNull` comparison methods construct + * `NullCheckExpr` directly, bypassing + * the operator-registry dispatch, and lower to ` IS [NOT] NULL` + * regardless of codec — pinned by `test/operator-lowering.test.ts`. + */ + +import type { CodecTrait } from '@prisma-next/framework-components/codec'; +import type { SqlOperationDescriptor, SqlOperationDescriptors } from '@prisma-next/sql-operations'; +import type { CodecRef } from '@prisma-next/sql-relational-core/ast'; +import { type AnyExpression, type ColumnRef, ParamRef } from '@prisma-next/sql-relational-core/ast'; +import { + buildOperation, + codecOf, + type Expression, + type ScopeField, + toExpr, +} from '@prisma-next/sql-relational-core/expression'; +import { + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, + CIPHERSTASH_STRING_CODEC_ID, + CIPHERSTASH_TRAIT_EQUALITY, + CIPHERSTASH_TRAIT_FREE_TEXT_SEARCH, + CIPHERSTASH_TRAIT_ORDER_AND_RANGE, + CIPHERSTASH_TRAIT_SEARCHABLE_JSON, + type CipherstashCodecId, + isCipherstashCodecId, +} from '../extension-metadata/constants'; +import type { EncryptedEnvelopeBase } from './envelope-base'; +import { EncryptedBigInt } from './envelope-bigint'; +import { EncryptedBoolean } from './envelope-boolean'; +import { EncryptedDate } from './envelope-date'; +import { EncryptedDouble } from './envelope-double'; +import { EncryptedJson } from './envelope-json'; +import { EncryptedString, setHandleRoutingKey } from './envelope-string'; + +/** + * Codec ID of the framework's Postgres boolean codec. Referenced as a + * string (rather than imported from `@prisma-next/target-postgres`) + * so cipherstash does not pick up a peer-dep on the target package + * just to identify a return-codec id. Mirrors the same pattern in the + * reference cipherstash integration's `operation-templates.ts:RETURN_BOOL`. + */ +const PG_BOOL_CODEC_ID = 'pg/bool@1' as const; + +type PgBoolReturn = { readonly codecId: typeof PG_BOOL_CODEC_ID; readonly nullable: false }; + +/** + * Convert a user-supplied value (raw plaintext or an existing + * `Encrypted*` envelope) into a `ParamRef` carrying an envelope + * tagged with the column's cipherstash storage codec ref. The + * envelope's handle is stamped with the column's `(table, column)` + * routing context so the bulk-encrypt middleware can group it for + * SELECT-side bulk encryption (the middleware's AST walk only stamps + * for INSERT / UPDATE). + * + * Already-stamped envelopes are preserved write-once-wins per + * `setHandleRoutingKey`'s contract. + * + * The `selfCodec` argument is the full {@link CodecRef} (codecId + + * typeParams) derived from the `self` expression via {@link codecOf}. + * Forwarding the complete ref — not just the codecId — keeps the + * resulting `ParamRef` aligned with the AST-bound codec resolution + * model introduced in TML-2456: `forCodecRef` validates `typeParams` + * against the codec's `paramsSchema`, and parameterized cipherstash + * codecs (`cipherstash/string@1`, `cipherstash/double@1`, ...) + * require their search-index `typeParams` (`equality`, + * `freeTextSearch`, `orderAndRange`) to be present. + */ +function asEncryptedParam(selfAst: AnyExpression, selfCodec: CodecRef, value: unknown): ParamRef { + const envelope = coerceToEnvelope(selfCodec.codecId, value); + const columnRef = extractColumnRef(selfAst); + if (columnRef !== undefined) { + setHandleRoutingKey(envelope, columnRef.table, columnRef.column); + } + return ParamRef.of(envelope, { codec: selfCodec }); +} + +/** + * Read the column-bound {@link CodecRef} off the `self` expression. + * Cipherstash predicate operators are reachable only via the ORM's + * model-accessor path, which stamps the column's full CodecRef onto + * the field-proxy's `codec` slot at synthesis time. If the ref is + * missing the operator was reached without a column binding (likely + * a programming error in a custom builder); throw with a stable + * runtime envelope so the failure mode is loud. + */ +function requireSelfCodec(self: Expression, publicMethod: string): CodecRef { + const codec = codecOf(self); + if (codec === undefined) { + throw new TypeError( + `cipherstash ${publicMethod}: self expression is missing a CodecRef. ` + + 'Cipherstash predicate operators require a column-bound self argument; ' + + 'reach the operator through the ORM model-accessor (e.g. `model.users.where((u) => u.email.cipherstashEq(...))`).', + ); + } + return codec; +} + +/** + * Coerce a user-supplied value into the envelope subclass appropriate + * for the column's codec id. Each cipherstash column type has its own + * concrete envelope subclass with a typed `from(plaintext)` factory; + * this dispatcher matches the column codec id to the right subclass + * and wraps the user value, while passing already-constructed + * envelopes through unchanged. The error message lists the expected + * plaintext type per codec so a user passing the wrong shape gets a + * specific diagnostic at the call site. + * + * Dispatch is via a `Record` map so adding + * a new cipherstash codec id (which extends the closed + * {@link CipherstashCodecId} union) becomes a compile-time error + * here until the new branch is wired — closing off the runtime-only + * failure mode the previous if-chain shape tolerated. + */ +type EnvelopeCoercer = (value: unknown) => EncryptedEnvelopeBase; + +const ENVELOPE_COERCERS: Readonly> = { + [CIPHERSTASH_STRING_CODEC_ID]: (value) => { + if (value instanceof EncryptedString) return value; + if (typeof value === 'string') return EncryptedString.from(value); + throw envelopeTypeError('EncryptedString', 'string', value); + }, + [CIPHERSTASH_DOUBLE_CODEC_ID]: (value) => { + if (value instanceof EncryptedDouble) return value; + if (typeof value === 'number') return EncryptedDouble.from(value); + throw envelopeTypeError('EncryptedDouble', 'number', value); + }, + [CIPHERSTASH_BIGINT_CODEC_ID]: (value) => { + if (value instanceof EncryptedBigInt) return value; + if (typeof value === 'bigint') return EncryptedBigInt.from(value); + throw envelopeTypeError('EncryptedBigInt', 'bigint', value); + }, + [CIPHERSTASH_DATE_CODEC_ID]: (value) => { + if (value instanceof EncryptedDate) return value; + if (value instanceof Date) return EncryptedDate.from(value); + throw envelopeTypeError('EncryptedDate', 'Date', value); + }, + [CIPHERSTASH_BOOLEAN_CODEC_ID]: (value) => { + if (value instanceof EncryptedBoolean) return value; + if (typeof value === 'boolean') return EncryptedBoolean.from(value); + throw envelopeTypeError('EncryptedBoolean', 'boolean', value); + }, + [CIPHERSTASH_JSON_CODEC_ID]: (value) => { + if (value instanceof EncryptedJson) return value; + return EncryptedJson.from(value); + }, +}; + +function coerceToEnvelope(columnCodecId: string, value: unknown): EncryptedEnvelopeBase { + if (!isCipherstashCodecId(columnCodecId)) { + throw new Error( + `cipherstash operator: column codec id "${columnCodecId}" is not a cipherstash codec; ` + + 'this operator should not be reachable on a non-cipherstash column. ' + + 'If you see this error, the operator-registry trait dispatch is wired against a ' + + 'codec that should not advertise the cipherstash trait. File a bug against the package.', + ); + } + return ENVELOPE_COERCERS[columnCodecId](value); +} + +function envelopeTypeError(envelopeType: string, expected: string, value: unknown): TypeError { + const got = value === null ? 'null' : value instanceof Date ? 'Date' : typeof value; + return new TypeError( + `cipherstash operator: expected a ${expected} plaintext or an ${envelopeType} envelope, ` + + `got ${got}. ` + + `Use \`${envelopeType}.from(plaintext)\` to construct an envelope explicitly, or ` + + 'pass the plaintext directly and let the operator wrap it.', + ); +} + +/** + * Find the column reference inside a `self` expression so the operator + * can stamp its `(table, column)` onto the encrypted-param envelope. + * + * Most calls flow through the ORM model-accessor, where `self` is a + * column-field accessor whose `buildAst()` returns a `ColumnRef` + * directly. For more complex `self` expressions (e.g. wrapped in a + * function call) we fall back to the `baseColumnRef()` inherited from + * `Expression` — every standard AST node walks down to the underlying + * column. If no column is reachable (e.g. a literal `self`), routing + * stamping is skipped; the envelope will surface the + * "envelope reached the bulk-encrypt phase without a (table, column) + * routing context" diagnostic from `collectTargets` at execute time. + */ +function extractColumnRef(selfAst: AnyExpression): ColumnRef | undefined { + if (selfAst.kind === 'column-ref') { + return selfAst; + } + try { + return selfAst.baseColumnRef(); + } catch { + return undefined; + } +} + +/** + * Build a single-codec cipherstash operator descriptor — the + * original shape used by `cipherstashEq` / `cipherstashIlike`, + * pinned to `cipherstash/string@1`. Multi-codec operators use + * {@link envelopeOperator} with trait-based dispatch instead. + * + * @param publicMethod - The user-facing method name on the column + * accessor (e.g. `cipherstashEq`). Must not collide with any + * framework- or adapter-shipped method name. + * @param eqlFunction - The EQL function to lower to (`eq`, `ilike`). + * Embedded into the SQL lowering template as `eql_v2.(...)`. + */ +function eqlOperator(publicMethod: string, eqlFunction: 'eq' | 'ilike'): SqlOperationDescriptor { + return { + self: { codecId: CIPHERSTASH_STRING_CODEC_ID }, + impl: (self: Expression, value: unknown): Expression => { + const selfCodec = requireSelfCodec(self, publicMethod); + const selfAst = toExpr(self, selfCodec); + return buildOperation({ + method: publicMethod, + args: [selfAst, asEncryptedParam(selfAst, selfCodec, value)], + returns: { codecId: PG_BOOL_CODEC_ID, nullable: false }, + lowering: { + targetFamily: 'sql', + strategy: 'function', + template: `eql_v2.${eqlFunction}({{self}}, {{arg0}})`, + }, + }); + }, + }; +} + +/** + * Build a cipherstash predicate operator dispatched via a + * cipherstash-namespaced trait — the multi-codec shape used for the + * trait-namespaced predicate surface (see ADR 214). The operator + * attaches to every codec descriptor whose `traits` list contains + * {@link trait}; the model-accessor's trait dispatch + * (`packages/3-extensions/sql-orm-client/src/model-accessor.ts`) + * handles the per-codec attachment. + * + * Each user-supplied argument is wrapped in the envelope subclass + * that matches the column's codec id at impl time. The lowering + * template uses the standard `{{self}}` and `{{argN}}` placeholders + * that the postgres adapter's `sql-renderer` substitutes per call. + * + * @param publicMethod - User-facing method name on the column + * accessor (e.g. `cipherstashGt`). Must not collide with any + * framework- or adapter-shipped method name. + * @param trait - Cipherstash-namespaced trait that gates the codec + * set the operator attaches to (see `extension-metadata/constants.ts`). + * @param arity - Fixed user-arg count (1 for `gt`/`gte`/`lt`/`lte`/ + * `eq`/`ne`/`ilike`/`notIlike`, 2 for `between`/`notBetween`). + * Excludes the `self` (column-bound) argument. + * @param template - Lowering template, e.g. `eql_v2.gt({{self}}, {{arg0}})` + * or `NOT eql_v2.eq({{self}}, {{arg0}})`. Stored verbatim on the + * `OperationExpr` AST node and substituted by the postgres + * adapter at lower time. + */ +function envelopeOperator( + publicMethod: string, + trait: string, + arity: number, + template: string, +): SqlOperationDescriptor { + return { + // Cipherstash trait identifiers (`cipherstash:equality`, ...) + // intentionally live outside the framework`s closed `CodecTrait` + // union; the runtime dispatcher widens to `readonly string[]` + // before matching, so the namespace round-trips unchanged. See + // `extension-metadata/constants.ts:CIPHERSTASH_CODEC_TRAITS` for + // the full rationale; AGENTS.md requires the rationale comment + // alongside any `as unknown as` cast. + self: { traits: [trait] as unknown as readonly CodecTrait[] }, + impl: (self: Expression, ...userArgs: unknown[]): Expression => { + if (userArgs.length !== arity) { + throw new TypeError( + `cipherstash ${publicMethod}: expected ${arity} argument${arity === 1 ? '' : 's'}, got ${userArgs.length}.`, + ); + } + const selfCodec = requireSelfCodec(self, publicMethod); + const selfAst = toExpr(self, selfCodec); + const argRefs = userArgs.map((value) => asEncryptedParam(selfAst, selfCodec, value)); + return buildOperation({ + method: publicMethod, + args: [selfAst, ...argRefs], + returns: { codecId: PG_BOOL_CODEC_ID, nullable: false }, + lowering: { + targetFamily: 'sql', + strategy: 'function', + template, + }, + }); + }, + }; +} + +/** + * Build a cipherstash variable-arity predicate operator — the shape + * used for `cipherstashInArray` / `cipherstashNotInArray`. Each + * array element is wrapped in its own envelope sharing the + * column's `(table, column)` routing key, and the lowering template + * is built dynamically per call from {@link buildTemplate} based on + * the array length so the framework's `{{argN}}` placeholder + * substitution covers every element. + * + * Empty arrays are rejected with a descriptive error: an OR-of-zero + * fragments lowers to a SQL syntax error and a silent rewrite to + * `FALSE` (or `TRUE` for `notInArray`) would mask the user's likely + * intent. Callers who want "match nothing" should use + * `WHERE FALSE` directly; this operator is for non-empty arrays. + * + * @param publicMethod - User-facing method name (`cipherstashInArray`, + * `cipherstashNotInArray`). + * @param trait - Cipherstash-namespaced trait that gates codec + * visibility (`cipherstash:equality` for both in-array operators). + * @param buildTemplate - Pure function `(n) => template` that + * produces the lowering template for an `n`-element array. For + * `cipherstashInArray`: `(n) => "()"`. For + * `cipherstashNotInArray`: `(n) => "NOT ()"`. + */ +function variableArityEnvelopeOperator( + publicMethod: string, + trait: string, + buildTemplate: (arity: number) => string, +): SqlOperationDescriptor { + return { + // See `envelopeOperator` for the cast rationale. + self: { traits: [trait] as unknown as readonly CodecTrait[] }, + impl: (self: Expression, values: unknown): Expression => { + if (!Array.isArray(values)) { + throw new TypeError( + `cipherstash ${publicMethod}: expected an array argument, got ${ + values === null ? 'null' : typeof values + }.`, + ); + } + if (values.length === 0) { + throw new TypeError( + `cipherstash ${publicMethod}: empty array is not supported. ` + + 'An empty membership check has no well-defined SQL lowering — use ' + + '`WHERE FALSE` directly if you want to match no rows.', + ); + } + const selfCodec = requireSelfCodec(self, publicMethod); + const selfAst = toExpr(self, selfCodec); + const argRefs = values.map((value) => asEncryptedParam(selfAst, selfCodec, value)); + return buildOperation({ + method: publicMethod, + args: [selfAst, ...argRefs], + returns: { codecId: PG_BOOL_CODEC_ID, nullable: false }, + lowering: { + targetFamily: 'sql', + strategy: 'function', + template: buildTemplate(values.length), + }, + }); + }, + }; +} + +/** + * Build the OR-of-equalities lowering template for an `n`-element + * array: `(eql_v2.eq({{self}}, {{arg0}}) OR eql_v2.eq({{self}}, {{arg1}}) OR ...)`. + * The single-element form collapses to one `eql_v2.eq` call with + * outer parentheses retained for shape stability. + */ +function buildInArrayTemplate(n: number): string { + const terms: string[] = []; + for (let i = 0; i < n; i++) { + terms.push(`eql_v2.eq({{self}}, {{arg${i}}})`); + } + return `(${terms.join(' OR ')})`; +} + +function buildNotInArrayTemplate(n: number): string { + return `NOT ${buildInArrayTemplate(n)}`; +} + +/** + * Build the cipherstash JSONB-path-exists operator. Unlike the + * envelope-wrapping operators above, the path argument is a plain + * SQL text literal — the JSONpath expression is a user-authored + * static input, not an encrypted value — so this operator passes + * the path through `toExpr` directly without envelope wrapping. The + * column self IS encrypted; only the path argument is plain. + * + * Note: predicate filtering via this operator is gapped against the + * live EQL bundle pending STE-VEC selector hashing — see TML-2504. + * The framework binds the JSONpath as a plain `pg/text@1` `ParamRef` + * but EQL probes the per-column STE-VEC index for a hashed-selector + * key. The lowering template + AST construction below are correct; + * the bundle-side hashing is the missing piece. + */ +function jsonbPathExistsOperator(): SqlOperationDescriptor { + return { + // See `envelopeOperator` for the cast rationale. + self: { + traits: [CIPHERSTASH_TRAIT_SEARCHABLE_JSON] as unknown as readonly CodecTrait[], + }, + impl: (self: Expression, path: unknown): Expression => { + if (typeof path !== 'string') { + throw new TypeError( + `cipherstash cipherstashJsonbPathExists: expected a string path argument, got ${ + path === null ? 'null' : typeof path + }.`, + ); + } + const selfAst = toExpr(self); + return buildOperation({ + method: 'cipherstashJsonbPathExists', + args: [selfAst, ParamRef.of(path, { codec: { codecId: 'pg/text@1' } })], + returns: { codecId: PG_BOOL_CODEC_ID, nullable: false }, + lowering: { + targetFamily: 'sql', + strategy: 'function', + template: 'eql_v2.jsonb_path_exists({{self}}, {{arg0}})', + }, + }); + }, + }; +} + +/** + * Cipherstash`s query-operations contributions. Wired into the + * runtime descriptor by `createCipherstashRuntimeDescriptor` and read + * by the SQL runtime`s `extractCodecLookup` / `queryOperations` + * aggregation (`packages/2-sql/5-runtime/src/sql-context.ts`). + * + * Two registration shapes are in use: + * + * - **Single-codec** (`cipherstashEq`, `cipherstashIlike`) — + * `self: { codecId: 'cipherstash/string@1' }`. Predates the + * trait-namespaced surface; visibility is fixed to the string + * codec. + * - **Trait-namespaced** (everything else, see ADR 214) — + * `self: { traits: ['cipherstash:'] }`. Visible on every + * codec descriptor whose `traits` list contains the trait + * identifier. The `cipherstash:` prefix isolates these from + * the framework`s closed `CodecTrait` union (`'equality'`, + * `'order'`, ...) so adding them to a cipherstash codec + * descriptor cannot silently re-attach a framework built-in. + * + * Operator -> codec visibility: + * + * - `cipherstashEq` (string only — single-codec, legacy) + * - `cipherstashIlike` (string only — single-codec, legacy) + * - `cipherstashNe` / `cipherstashInArray` / + * `cipherstashNotInArray` (trait `cipherstash:equality` -> + * string, double, bigint, date, boolean) + * - `cipherstashNotIlike` (trait `cipherstash:free-text-search` + * -> string) + * - `cipherstashGt` / `cipherstashGte` / `cipherstashLt` / + * `cipherstashLte` / `cipherstashBetween` / + * `cipherstashNotBetween` (trait `cipherstash:order-and-range` + * -> string, double, bigint, date) + * - `cipherstashJsonbPathExists` (trait + * `cipherstash:searchable-json` -> json) + * + * The lowering templates mirror the canonical EQL function calls. + * The variable-arity `inArray` / `notInArray` + * lowerings build their template per call from the array length + * (see {@link variableArityEnvelopeOperator}). + */ +export function cipherstashQueryOperations(): SqlOperationDescriptors { + return { + cipherstashEq: eqlOperator('cipherstashEq', 'eq'), + cipherstashIlike: eqlOperator('cipherstashIlike', 'ilike'), + cipherstashNe: envelopeOperator( + 'cipherstashNe', + CIPHERSTASH_TRAIT_EQUALITY, + 1, + 'NOT eql_v2.eq({{self}}, {{arg0}})', + ), + cipherstashInArray: variableArityEnvelopeOperator( + 'cipherstashInArray', + CIPHERSTASH_TRAIT_EQUALITY, + buildInArrayTemplate, + ), + cipherstashNotInArray: variableArityEnvelopeOperator( + 'cipherstashNotInArray', + CIPHERSTASH_TRAIT_EQUALITY, + buildNotInArrayTemplate, + ), + cipherstashNotIlike: envelopeOperator( + 'cipherstashNotIlike', + CIPHERSTASH_TRAIT_FREE_TEXT_SEARCH, + 1, + 'NOT eql_v2.ilike({{self}}, {{arg0}})', + ), + cipherstashGt: envelopeOperator( + 'cipherstashGt', + CIPHERSTASH_TRAIT_ORDER_AND_RANGE, + 1, + 'eql_v2.gt({{self}}, {{arg0}})', + ), + cipherstashGte: envelopeOperator( + 'cipherstashGte', + CIPHERSTASH_TRAIT_ORDER_AND_RANGE, + 1, + 'eql_v2.gte({{self}}, {{arg0}})', + ), + cipherstashLt: envelopeOperator( + 'cipherstashLt', + CIPHERSTASH_TRAIT_ORDER_AND_RANGE, + 1, + 'eql_v2.lt({{self}}, {{arg0}})', + ), + cipherstashLte: envelopeOperator( + 'cipherstashLte', + CIPHERSTASH_TRAIT_ORDER_AND_RANGE, + 1, + 'eql_v2.lte({{self}}, {{arg0}})', + ), + cipherstashBetween: envelopeOperator( + 'cipherstashBetween', + CIPHERSTASH_TRAIT_ORDER_AND_RANGE, + 2, + 'eql_v2.gte({{self}}, {{arg0}}) AND eql_v2.lte({{self}}, {{arg1}})', + ), + cipherstashNotBetween: envelopeOperator( + 'cipherstashNotBetween', + CIPHERSTASH_TRAIT_ORDER_AND_RANGE, + 2, + 'NOT (eql_v2.gte({{self}}, {{arg0}}) AND eql_v2.lte({{self}}, {{arg1}}))', + ), + cipherstashJsonbPathExists: jsonbPathExistsOperator(), + }; +} diff --git a/packages/prisma-next/src/execution/parameterized.ts b/packages/prisma-next/src/execution/parameterized.ts new file mode 100644 index 00000000..3b7d9386 --- /dev/null +++ b/packages/prisma-next/src/execution/parameterized.ts @@ -0,0 +1,239 @@ +/** + * `RuntimeParameterizedCodecDescriptor`s for the cipherstash storage + * codecs — the post-#402 unified `CodecDescriptor

` shape consumed by + * the SQL runtime via `SqlStaticContributions.parameterizedCodecs()`. + * + * Mirrors pgvector's `vectorParamsSchema` + `vectorFactory` precedent + * (`packages/3-extensions/pgvector/src/exports/runtime.ts`). Cipherstash + * differs from pgvector in two respects: each codec depends on the + * SDK (read-side single-cell `decrypt`, the bulk-encrypt middleware), + * so each `createParameterizedCodecDescriptors(sdk)` call produces a + * fresh descriptor list closed over the SDK so multi-tenant + * deployments can compose multiple cipherstash extensions side-by-side + * without cross-talk; and the cipherstash family ships six codecs + * (one per encrypted column type) which all share the same + * `eql_v2_encrypted` Postgres native type. + * + * Per-codec params shape (every flag defaults to `true` because + * searchable encryption is the legitimate default for an extension + * whose entire reason for existing is to make encrypted columns + * queryable): + * + * | Codec | Params | + * |---------------------|-------------------------------------| + * | `cipherstash/string@1` | `{ equality, freeTextSearch, orderAndRange }` | + * | `cipherstash/double@1` | `{ equality, orderAndRange }` | + * | `cipherstash/bigint@1` | `{ equality, orderAndRange }` | + * | `cipherstash/date@1` | `{ equality, orderAndRange }` | + * | `cipherstash/boolean@1` | `{ equality }` | + * | `cipherstash/json@1` | `{ searchableJson }` | + * + * The codec runtimes are per-cell stateless across params on the write + * side (encode reads ciphertext from the handle, independent of the + * search-mode flags); read-side decode constructs the per-type + * envelope independent of params. The factory therefore returns the + * same shared codec for every params instance, mirroring pgvector's + * `vectorFactory`. + */ + +import type { CodecInstanceContext } from '@prisma-next/framework-components/codec'; +import type { RuntimeParameterizedCodecDescriptor } from '@prisma-next/sql-runtime'; +import { type as arktype } from 'arktype'; +import { + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_CODEC_TRAITS, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, + CIPHERSTASH_STRING_CODEC_ID, +} from '../extension-metadata/constants'; +import { + createCipherstashBigIntCodec, + createCipherstashBooleanCodec, + createCipherstashDateCodec, + createCipherstashDoubleCodec, + createCipherstashJsonCodec, + createCipherstashStringCodec, +} from './codec-runtime'; +import type { CipherstashSdk } from './sdk'; + +export interface CipherstashStringParams { + readonly equality: boolean; + readonly freeTextSearch: boolean; + readonly orderAndRange: boolean; +} + +export interface CipherstashNumericParams { + readonly equality: boolean; + readonly orderAndRange: boolean; +} + +export interface CipherstashDateParams { + readonly equality: boolean; + readonly orderAndRange: boolean; +} + +export interface CipherstashBooleanParams { + readonly equality: boolean; +} + +export interface CipherstashJsonParams { + readonly searchableJson: boolean; +} + +export const encryptedStringParamsSchema = arktype({ + equality: 'boolean', + freeTextSearch: 'boolean', + orderAndRange: 'boolean', +}); + +export const encryptedDoubleParamsSchema = arktype({ + equality: 'boolean', + orderAndRange: 'boolean', +}); + +export const encryptedBigIntParamsSchema = arktype({ + equality: 'boolean', + orderAndRange: 'boolean', +}); + +export const encryptedDateParamsSchema = arktype({ + equality: 'boolean', + orderAndRange: 'boolean', +}); + +export const encryptedBooleanParamsSchema = arktype({ + equality: 'boolean', +}); + +export const encryptedJsonParamsSchema = arktype({ + searchableJson: 'boolean', +}); + +export function renderEncryptedStringOutputType(_params: CipherstashStringParams): string { + return 'EncryptedString'; +} + +export function renderEncryptedDoubleOutputType(_params: CipherstashNumericParams): string { + return 'EncryptedDouble'; +} + +export function renderEncryptedBigIntOutputType(_params: CipherstashNumericParams): string { + return 'EncryptedBigInt'; +} + +export function renderEncryptedDateOutputType(_params: CipherstashDateParams): string { + return 'EncryptedDate'; +} + +export function renderEncryptedBooleanOutputType(_params: CipherstashBooleanParams): string { + return 'EncryptedBoolean'; +} + +export function renderEncryptedJsonOutputType(_params: CipherstashJsonParams): string { + return 'EncryptedJson'; +} + +const ENCRYPTED_TARGET_TYPES = ['eql_v2_encrypted'] as const; +const ENCRYPTED_META = { db: { sql: { postgres: { nativeType: 'eql_v2_encrypted' } } } } as const; +// Per-codec traits live in `CIPHERSTASH_CODEC_TRAITS` and use the +// `cipherstash:*` namespace so the cipherstash-namespaced operators +// (`cipherstashEq`, `cipherstashGt`, etc.) can register against +// multiple codec ids at once via trait-based dispatch. The traits +// are intentionally namespaced to avoid colliding with framework +// built-ins like `'equality'` — see `equality-trait-removal.test.ts` +// for the regression rationale. + +export type CipherstashAnyParams = + | CipherstashStringParams + | CipherstashNumericParams + | CipherstashDateParams + | CipherstashBooleanParams + | CipherstashJsonParams; + +export function createParameterizedCodecDescriptors( + sdk: CipherstashSdk, +): ReadonlyArray> { + const stringCodec = createCipherstashStringCodec(sdk); + const doubleCodec = createCipherstashDoubleCodec(sdk); + const bigIntCodec = createCipherstashBigIntCodec(sdk); + const dateCodec = createCipherstashDateCodec(sdk); + const booleanCodec = createCipherstashBooleanCodec(sdk); + const jsonCodec = createCipherstashJsonCodec(sdk); + + const stringDescriptor: RuntimeParameterizedCodecDescriptor = { + codecId: CIPHERSTASH_STRING_CODEC_ID, + traits: CIPHERSTASH_CODEC_TRAITS[CIPHERSTASH_STRING_CODEC_ID] ?? [], + targetTypes: ENCRYPTED_TARGET_TYPES, + meta: ENCRYPTED_META, + paramsSchema: encryptedStringParamsSchema, + isParameterized: true as const, + renderOutputType: renderEncryptedStringOutputType, + factory: (_params: CipherstashStringParams) => (_ctx: CodecInstanceContext) => stringCodec, + }; + + const doubleDescriptor: RuntimeParameterizedCodecDescriptor = { + codecId: CIPHERSTASH_DOUBLE_CODEC_ID, + traits: CIPHERSTASH_CODEC_TRAITS[CIPHERSTASH_DOUBLE_CODEC_ID] ?? [], + targetTypes: ENCRYPTED_TARGET_TYPES, + meta: ENCRYPTED_META, + paramsSchema: encryptedDoubleParamsSchema, + isParameterized: true as const, + renderOutputType: renderEncryptedDoubleOutputType, + factory: (_params: CipherstashNumericParams) => (_ctx: CodecInstanceContext) => doubleCodec, + }; + + const bigIntDescriptor: RuntimeParameterizedCodecDescriptor = { + codecId: CIPHERSTASH_BIGINT_CODEC_ID, + traits: CIPHERSTASH_CODEC_TRAITS[CIPHERSTASH_BIGINT_CODEC_ID] ?? [], + targetTypes: ENCRYPTED_TARGET_TYPES, + meta: ENCRYPTED_META, + paramsSchema: encryptedBigIntParamsSchema, + isParameterized: true as const, + renderOutputType: renderEncryptedBigIntOutputType, + factory: (_params: CipherstashNumericParams) => (_ctx: CodecInstanceContext) => bigIntCodec, + }; + + const dateDescriptor: RuntimeParameterizedCodecDescriptor = { + codecId: CIPHERSTASH_DATE_CODEC_ID, + traits: CIPHERSTASH_CODEC_TRAITS[CIPHERSTASH_DATE_CODEC_ID] ?? [], + targetTypes: ENCRYPTED_TARGET_TYPES, + meta: ENCRYPTED_META, + paramsSchema: encryptedDateParamsSchema, + isParameterized: true as const, + renderOutputType: renderEncryptedDateOutputType, + factory: (_params: CipherstashDateParams) => (_ctx: CodecInstanceContext) => dateCodec, + }; + + const booleanDescriptor: RuntimeParameterizedCodecDescriptor = { + codecId: CIPHERSTASH_BOOLEAN_CODEC_ID, + traits: CIPHERSTASH_CODEC_TRAITS[CIPHERSTASH_BOOLEAN_CODEC_ID] ?? [], + targetTypes: ENCRYPTED_TARGET_TYPES, + meta: ENCRYPTED_META, + paramsSchema: encryptedBooleanParamsSchema, + isParameterized: true as const, + renderOutputType: renderEncryptedBooleanOutputType, + factory: (_params: CipherstashBooleanParams) => (_ctx: CodecInstanceContext) => booleanCodec, + }; + + const jsonDescriptor: RuntimeParameterizedCodecDescriptor = { + codecId: CIPHERSTASH_JSON_CODEC_ID, + traits: CIPHERSTASH_CODEC_TRAITS[CIPHERSTASH_JSON_CODEC_ID] ?? [], + targetTypes: ENCRYPTED_TARGET_TYPES, + meta: ENCRYPTED_META, + paramsSchema: encryptedJsonParamsSchema, + isParameterized: true as const, + renderOutputType: renderEncryptedJsonOutputType, + factory: (_params: CipherstashJsonParams) => (_ctx: CodecInstanceContext) => jsonCodec, + }; + + return [ + stringDescriptor, + doubleDescriptor, + bigIntDescriptor, + dateDescriptor, + booleanDescriptor, + jsonDescriptor, + ] as ReadonlyArray>; +} diff --git a/packages/prisma-next/src/execution/routing.ts b/packages/prisma-next/src/execution/routing.ts new file mode 100644 index 00000000..895d0873 --- /dev/null +++ b/packages/prisma-next/src/execution/routing.ts @@ -0,0 +1,101 @@ +/** + * Routing-key derivation for cipherstash bulk operations. + * + * The routing key is derived from the envelope handle's + * `(table, column)` — there is no per-column override surface. Every + * cipherstash envelope passing through `bulkEncryptMiddleware` (and + * `decryptAll`) carries `(table, column)` on its handle, populated by + * the middleware's AST walk before the bulk-encrypt phase begins. + * + * `groupByRoutingKey` produces one homogeneous group per + * `(table, column)` pair so each `bulkEncrypt` call serves a single + * routing key — matching the SDK's + * `bulkEncrypt({ routingKey, values, signal })` shape. Heterogeneous + * batching is a future optimization. + */ + +import type { EncryptedEnvelopeBase } from './envelope-base'; +import type { CipherstashRoutingKey } from './sdk'; + +/** + * Per-target context the bulk-encrypt middleware accumulates while + * walking `params.entries()`. Each target carries the envelope, its + * routing key (derived from the handle), the plaintext to encrypt, and + * the param-ref handle the mutator yielded so the post-encrypt + * `replaceValues` write-back can find the slot. + * + * `plaintext` types as `unknown` because the bulk-encrypt path is + * polymorphic across every cipherstash codec (string, double, bigint, + * date, boolean, json — each with its own `T`); the SDK's + * `bulkEncrypt({ values: ReadonlyArray, ... })` shape is + * already polymorphic, and homogeneity within a `(table, column)` + * group means narrower per-cell typing is not needed for batching + * correctness. + */ +export interface BulkEncryptTarget { + readonly ref: TRef; + readonly plaintext: unknown; + readonly envelope: EncryptedEnvelopeBase; + readonly routingKey: CipherstashRoutingKey; +} + +/** + * Stable string key used to group targets by their `(table, column)` + * routing key. Exported for tests; not part of the package's public + * surface. Uses a NUL byte as the separator so the id never collides + * across pairs whose names happen to share a literal concatenation + * (e.g. `(a, bc)` vs `(ab, c)`). + */ +export function routingKeyId(routingKey: CipherstashRoutingKey): string { + return `${routingKey.table}\u0000${routingKey.column}`; +} + +/** + * Read the routing key from an envelope's internal handle. Throws if + * the handle's `(table, column)` slots are unset — which happens when + * the bulk-encrypt middleware's AST walk did not see this envelope + * (typical cause: the envelope was passed in a context the AST walk + * does not yet handle, e.g. a raw-SQL plan with no `InsertAst` / + * `UpdateAst` arm). The throw matches the codec's + * "missing ciphertext" diagnostic shape: it points at the workflow that + * should have populated the slot. + */ +export function getRoutingKey(envelope: EncryptedEnvelopeBase): CipherstashRoutingKey { + const handle = envelope.expose(); + if (handle.table === undefined || handle.column === undefined) { + throw new Error( + 'cipherstash bulk-encrypt: envelope has no (table, column) routing context. ' + + 'The bulk-encrypt middleware stamps routing context from the lowered AST ' + + '(insert/update); raw-SQL plans embedding cipherstash envelopes must stamp ' + + 'routing context explicitly before execute.', + ); + } + return { table: handle.table, column: handle.column }; +} + +/** + * Group bulk-encrypt targets by `(table, column)` routing key. Each + * `Map` entry yields one homogeneous batch suitable for a single + * `sdk.bulkEncrypt({ routingKey, values, signal })` call. + * + * Order preservation: within each group, targets keep the order they + * were collected from `params.entries()` — which is the canonical + * ParamRef order the renderer's `$N` index map and the encode-side walk + * both consume. Iteration order across groups follows the order each + * routing key was first observed in the input. + */ +export function groupByRoutingKey( + targets: ReadonlyArray>, +): Map[]> { + const groups = new Map[]>(); + for (const target of targets) { + const id = routingKeyId(target.routingKey); + let group = groups.get(id); + if (!group) { + group = []; + groups.set(id, group); + } + group.push(target); + } + return groups; +} diff --git a/packages/prisma-next/src/execution/sdk.ts b/packages/prisma-next/src/execution/sdk.ts new file mode 100644 index 00000000..260fe92b --- /dev/null +++ b/packages/prisma-next/src/execution/sdk.ts @@ -0,0 +1,74 @@ +/** + * Framework-native shape for the CipherStash SDK that the cipherstash + * extension wraps. + * + * The first-attempt SDK (see `reference/cipherstash/stack/...`) is rich + * and Prisma-adapter shaped. The framework-native shape consumed by the + * codec runtime, the bulk-encrypt middleware, and `decryptAll` is + * intentionally smaller — three async methods that each map cleanly to + * one CipherStash bulk-call shape: + * + * - `decrypt` — single-cell read used by `EncryptedString#decrypt()` + * when the user opts out of bulk decryption. + * - `bulkEncrypt` — write-side coalesced encrypt; the bulk-encrypt + * middleware calls this from `beforeExecute`. + * - `bulkDecrypt` — read-side coalesced decrypt; `decryptAll` calls + * this from a recursive walker. + * + * Each method accepts an optional `AbortSignal`. Cancellation is forwarded + * directly to the SDK (the per-execute `MiddlewareContext.signal` from + * the middleware-param-transform seam, or the caller-supplied signal on + * `decrypt({signal})`). + */ + +/** + * Routing-key tuple used by `bulkEncrypt`/`bulkDecrypt` to group requests + * so each ZeroKMS round-trip handles one homogeneous batch. Routing key + * is `(table, column)`. + */ +export interface CipherstashRoutingKey { + readonly table: string; + readonly column: string; +} + +export interface CipherstashSingleDecryptArgs { + /** + * The wire ciphertext to decrypt. Opaque to the framework; the SDK + * inspects the embedded `i.t` / `i.c` schema markers to pick the + * right `cast_as` for the round-trip. + */ + readonly ciphertext: unknown; + readonly table: string; + readonly column: string; + readonly signal?: AbortSignal; +} + +export interface CipherstashBulkEncryptArgs { + readonly routingKey: CipherstashRoutingKey; + /** + * Plaintext values to encrypt. Polymorphic at the SDK boundary: each + * batch is homogeneously typed by its `(table, column)` routing key, + * so the SDK derives the EQL `cast_as` from the search-config already + * registered on the column rather than from a per-batch hint. + */ + readonly values: ReadonlyArray; + readonly signal?: AbortSignal; +} + +export interface CipherstashBulkDecryptArgs { + readonly routingKey: CipherstashRoutingKey; + readonly ciphertexts: ReadonlyArray; + readonly signal?: AbortSignal; +} + +/** + * The framework-native CipherStash SDK contract consumed by the envelope, + * codec, middleware, and `decryptAll` surfaces. Real implementations wrap + * a CipherStash `EncryptionClient`; tests construct mock SDKs that + * implement these three methods directly. + */ +export interface CipherstashSdk { + decrypt(args: CipherstashSingleDecryptArgs): Promise; + bulkEncrypt(args: CipherstashBulkEncryptArgs): Promise>; + bulkDecrypt(args: CipherstashBulkDecryptArgs): Promise>; +} diff --git a/packages/prisma-next/src/exports/codec-types.ts b/packages/prisma-next/src/exports/codec-types.ts new file mode 100644 index 00000000..699db1d0 --- /dev/null +++ b/packages/prisma-next/src/exports/codec-types.ts @@ -0,0 +1,10 @@ +/** + * Codec-types subpath entry for the cipherstash extension. Re-exports + * the hand-written `CodecTypes` table from `../types/codec-types` so + * the contract emitter can pull it via + * `import type { CodecTypes as CipherstashTypes } from '@prisma-next/extension-cipherstash/codec-types'`. + * + * Mirrors `packages/3-extensions/pgvector/src/exports/codec-types.ts`. + */ + +export type { CodecTypes } from '../types/codec-types'; diff --git a/packages/prisma-next/src/exports/column-types.ts b/packages/prisma-next/src/exports/column-types.ts new file mode 100644 index 00000000..d7bd6a55 --- /dev/null +++ b/packages/prisma-next/src/exports/column-types.ts @@ -0,0 +1,240 @@ +/** + * TS contract factories for cipherstash-encrypted columns. + * + * Counterparts to the PSL constructors `cipherstash.Encrypted({...})` + * registered in `../contract-authoring`. The six factories + * (`encryptedString`, `encryptedDouble`, `encryptedBigInt`, + * `encryptedDate`, `encryptedBoolean`, `encryptedJson`) produce the + * same `ColumnTypeDescriptor` shape as their PSL counterparts, so + * PSL- and TS-authored contracts emit byte-identical `contract.json`. + * Pinned by the parity fixtures at + * `test/integration/test/authoring/parity/cipherstash-encrypted-{string,double,bigint,date,boolean,json}/`. + * + * Every search-mode flag defaults to `true` — searchable encryption + * is the legitimate default for an extension whose entire reason for + * existing is to make encrypted columns queryable. Users who want + * storage-only encryption opt out explicitly: + * `encryptedString({ equality: false, freeTextSearch: false, orderAndRange: false })`. + * Mirrors the PSL constructors' `true` defaults declared via + * `AuthoringArgRef.default`. + */ + +import { + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, + CIPHERSTASH_STRING_CODEC_ID, + EQL_V2_ENCRYPTED_TYPE, +} from '../extension-metadata/constants'; + +/** + * Search-mode parameters for `encryptedString({...})`. Every flag is + * optional and defaults to `true` when omitted — searchable + * encryption is the legitimate default. `orderAndRange` gives string + * columns the same sortable / range-queryable surface the numeric + + * date codecs already had. + */ +export interface EncryptedStringOptions { + readonly equality?: boolean; + readonly freeTextSearch?: boolean; + readonly orderAndRange?: boolean; +} + +export interface EncryptedStringColumnDescriptor { + readonly codecId: typeof CIPHERSTASH_STRING_CODEC_ID; + readonly nativeType: typeof EQL_V2_ENCRYPTED_TYPE; + readonly typeParams: { + readonly equality: boolean; + readonly freeTextSearch: boolean; + readonly orderAndRange: boolean; + }; +} + +/** + * `encryptedString({ equality?, freeTextSearch?, orderAndRange? })` — + * TS contract factory that lowers to a `ColumnTypeDescriptor` with + * the `cipherstash/string@1` codec and the `eql_v2_encrypted` + * Postgres native type. Each boolean flag becomes a `typeParams.*` + * slot; all default to `true`. + * + * The shape matches what the PSL constructor + * `cipherstash.EncryptedString({...})` lowers to, byte-for-byte. + */ +export function encryptedString( + options: EncryptedStringOptions = {}, +): EncryptedStringColumnDescriptor { + return { + codecId: CIPHERSTASH_STRING_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + typeParams: { + equality: options.equality ?? true, + freeTextSearch: options.freeTextSearch ?? true, + orderAndRange: options.orderAndRange ?? true, + }, + }; +} + +/** + * Search-mode parameters for `encryptedDouble({...})` and + * `encryptedBigInt({...})`. Both flags are optional and default to + * `true` when omitted — searchable encryption is the legitimate + * default. + */ +export interface EncryptedNumericOptions { + readonly equality?: boolean; + readonly orderAndRange?: boolean; +} + +export interface EncryptedDoubleColumnDescriptor { + readonly codecId: typeof CIPHERSTASH_DOUBLE_CODEC_ID; + readonly nativeType: typeof EQL_V2_ENCRYPTED_TYPE; + readonly typeParams: { + readonly equality: boolean; + readonly orderAndRange: boolean; + }; +} + +export interface EncryptedBigIntColumnDescriptor { + readonly codecId: typeof CIPHERSTASH_BIGINT_CODEC_ID; + readonly nativeType: typeof EQL_V2_ENCRYPTED_TYPE; + readonly typeParams: { + readonly equality: boolean; + readonly orderAndRange: boolean; + }; +} + +/** + * `encryptedDouble({ equality?, orderAndRange? })` — TS contract + * factory that lowers to a `ColumnTypeDescriptor` with the + * `cipherstash/double@1` codec and the `eql_v2_encrypted` Postgres + * native type. Mirrors what + * `cipherstash.EncryptedDouble({...})` lowers to byte-for-byte. + */ +export function encryptedDouble( + options: EncryptedNumericOptions = {}, +): EncryptedDoubleColumnDescriptor { + return { + codecId: CIPHERSTASH_DOUBLE_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + typeParams: { + equality: options.equality ?? true, + orderAndRange: options.orderAndRange ?? true, + }, + }; +} + +/** + * `encryptedBigInt({ equality?, orderAndRange? })` — TS contract + * factory matching `cipherstash.EncryptedBigInt({...})`. + */ +export function encryptedBigInt( + options: EncryptedNumericOptions = {}, +): EncryptedBigIntColumnDescriptor { + return { + codecId: CIPHERSTASH_BIGINT_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + typeParams: { + equality: options.equality ?? true, + orderAndRange: options.orderAndRange ?? true, + }, + }; +} + +/** + * Search-mode parameters for `encryptedDate({...})`. Both flags are + * optional and default to `true`. + */ +export interface EncryptedDateOptions { + readonly equality?: boolean; + readonly orderAndRange?: boolean; +} + +export interface EncryptedDateColumnDescriptor { + readonly codecId: typeof CIPHERSTASH_DATE_CODEC_ID; + readonly nativeType: typeof EQL_V2_ENCRYPTED_TYPE; + readonly typeParams: { + readonly equality: boolean; + readonly orderAndRange: boolean; + }; +} + +/** + * `encryptedDate({ equality?, orderAndRange? })` — TS contract factory + * matching `cipherstash.EncryptedDate({...})`. + */ +export function encryptedDate(options: EncryptedDateOptions = {}): EncryptedDateColumnDescriptor { + return { + codecId: CIPHERSTASH_DATE_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + typeParams: { + equality: options.equality ?? true, + orderAndRange: options.orderAndRange ?? true, + }, + }; +} + +/** + * Search-mode parameters for `encryptedBoolean({...})`. The flag is + * optional and defaults to `true`. Booleans only support equality + * search (no meaningful range predicate over a 2-value domain). + */ +export interface EncryptedBooleanOptions { + readonly equality?: boolean; +} + +export interface EncryptedBooleanColumnDescriptor { + readonly codecId: typeof CIPHERSTASH_BOOLEAN_CODEC_ID; + readonly nativeType: typeof EQL_V2_ENCRYPTED_TYPE; + readonly typeParams: { + readonly equality: boolean; + }; +} + +/** + * `encryptedBoolean({ equality? })` — TS contract factory matching + * `cipherstash.EncryptedBoolean({...})`. + */ +export function encryptedBoolean( + options: EncryptedBooleanOptions = {}, +): EncryptedBooleanColumnDescriptor { + return { + codecId: CIPHERSTASH_BOOLEAN_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + typeParams: { + equality: options.equality ?? true, + }, + }; +} + +/** + * Search-mode parameters for `encryptedJson({...})`. Single flag — + * `searchableJson` gates the entire `ste_vec` index family (containment + * + path-extraction predicates). Defaults to `true`. + */ +export interface EncryptedJsonOptions { + readonly searchableJson?: boolean; +} + +export interface EncryptedJsonColumnDescriptor { + readonly codecId: typeof CIPHERSTASH_JSON_CODEC_ID; + readonly nativeType: typeof EQL_V2_ENCRYPTED_TYPE; + readonly typeParams: { + readonly searchableJson: boolean; + }; +} + +/** + * `encryptedJson({ searchableJson? })` — TS contract factory matching + * `cipherstash.EncryptedJson({...})`. + */ +export function encryptedJson(options: EncryptedJsonOptions = {}): EncryptedJsonColumnDescriptor { + return { + codecId: CIPHERSTASH_JSON_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + typeParams: { + searchableJson: options.searchableJson ?? true, + }, + }; +} diff --git a/packages/prisma-next/src/exports/contract-space-typing.ts b/packages/prisma-next/src/exports/contract-space-typing.ts new file mode 100644 index 00000000..4dc87910 --- /dev/null +++ b/packages/prisma-next/src/exports/contract-space-typing.ts @@ -0,0 +1,86 @@ +/** + * Typed-narrowing helpers for the on-disk contract-space JSON artefacts + * the cipherstash control descriptor wires into its + * `SqlControlExtensionDescriptor`. + * + * JSON-imported values come back as widened, structurally-typed + * objects: branded fields (`storageHash: StorageHashBase`) and + * discriminated unions (`MigrationPlanOperation['operationClass']`) + * fall back to plain strings, so a direct assignment into the + * descriptor surfaces is a type error. The cipherstash MVP previously + * suppressed that error with `as unknown as X` triple-casts, which + * silently masks any future shape drift between the emitted JSON and + * the in-package descriptor. + * + * This module replaces the blind casts with thin runtime assertions + * that fail fast on drift and narrow the JSON inputs to the framework + * types in a single, auditable place. The assertions are intentionally + * minimal — they check the canonical discriminator fields (`storageHash`, + * `space`, `dirName`, `operationClass`, …) rather than re-validating + * the whole emitter contract — which is enough to surface schema-level + * drift while keeping the descriptor module light. + */ + +import type { Contract } from '@prisma-next/contract/types'; +import type { MigrationPlanOperation } from '@prisma-next/framework-components/control'; +import type { MigrationMetadata } from '@prisma-next/migration-tools/metadata'; +import type { SqlStorage } from '@prisma-next/sql-contract/types'; + +function fail(field: string, value: unknown): never { + throw new Error( + `cipherstash contract-space JSON is missing or malformed at "${field}" (saw ${typeof value}). The on-disk JSON drifted from the framework's expected shape — re-run \`prisma-next contract emit\` and \`prisma-next migration plan\` for the cipherstash space.`, + ); +} + +function isRecord(value: unknown): value is Record { + return typeof value === 'object' && value !== null; +} + +/** + * Narrow a JSON-imported `contract.json` value to `Contract`. + * Checks the discriminators the framework relies on at descriptor + * registration time; everything else is consumed downstream by the + * runner / verifier, which performs its own validation. + */ +export function asCipherstashContract(value: unknown): Contract { + if (!isRecord(value)) fail('', value); + if (typeof value['target'] !== 'string') fail('target', value['target']); + if (typeof value['targetFamily'] !== 'string') fail('targetFamily', value['targetFamily']); + const storage = value['storage']; + if (!isRecord(storage)) fail('storage', storage); + if (typeof storage['storageHash'] !== 'string') + fail('storage.storageHash', storage['storageHash']); + return value as unknown as Contract; +} + +/** + * Narrow a JSON-imported `migration.json` value to `MigrationMetadata`. + * The framework's runner consumes the metadata for ordering / + * provenance; missing `to` or a non-string `migrationHash` here means + * a non-emitted artefact slipped into the import path. + */ +export function asCipherstashMigrationMetadata(value: unknown): MigrationMetadata { + if (!isRecord(value)) fail('', value); + if (typeof value['to'] !== 'string') fail('to', value['to']); + if (typeof value['migrationHash'] !== 'string') fail('migrationHash', value['migrationHash']); + return value as unknown as MigrationMetadata; +} + +/** + * Narrow a JSON-imported `ops.json` value to + * `readonly MigrationPlanOperation[]`. Checks each entry carries the + * canonical `id` / `operationClass` discriminator so a malformed entry + * doesn't reach the planner. + */ +export function asCipherstashMigrationOps(value: unknown): readonly MigrationPlanOperation[] { + if (!Array.isArray(value)) fail('', value); + for (let index = 0; index < value.length; index += 1) { + const entry = value[index]; + if (!isRecord(entry)) fail(`[${index}]`, entry); + if (typeof entry['id'] !== 'string') fail(`[${index}].id`, entry['id']); + if (typeof entry['operationClass'] !== 'string') { + fail(`[${index}].operationClass`, entry['operationClass']); + } + } + return value as unknown as readonly MigrationPlanOperation[]; +} diff --git a/packages/prisma-next/src/exports/control.ts b/packages/prisma-next/src/exports/control.ts new file mode 100644 index 00000000..823de233 --- /dev/null +++ b/packages/prisma-next/src/exports/control.ts @@ -0,0 +1,111 @@ +/** + * Control-plane descriptor for the CipherStash extension. + * + * **Contract-space package layout.** The extension's contract + + * migrations are emitted by the same pipeline application authors use: + * + * `prisma-next contract emit` → `/src/contract.{json,d.ts}` + * `prisma-next migration plan` → `/migrations/

/...` + * + * The descriptor wires those JSON artefacts via JSON-import declarations + * so they flow through the consuming application's module resolver + * without filesystem assumptions, and synthesises the canonical + * {@link import('@prisma-next/framework-components/control').MigrationPackage} + * shape for the framework's runner / verifier to consume. + * + * Wired surfaces: + * + * - `contractSpace.{contractJson,migrations,headRef}` — sourced from + * the on-disk artefacts emitted by `build:contract-space`. + * - `types.codecTypes.controlPlaneHooks[CIPHERSTASH_STRING_CODEC_ID]` + * — the lifecycle hook the SQL planner extracts via + * `extractCodecControlHooks` and inlines into the application's + * migration via `planFieldEventOperations`. Implements + * `add_search_config` / `remove_search_config` / rotate behaviour + * for `searchable: true` `Encrypted` columns. + * + * @see docs/architecture docs/adrs/ADR 212 - Contract spaces.md + * (contract-space package layout convention). + */ + +import type { Contract } from '@prisma-next/contract/types'; +import type { SqlControlExtensionDescriptor } from '@prisma-next/family-sql/control'; +import { contractSpaceFromJson } from '@prisma-next/migration-tools/spaces'; +import type { SqlStorage } from '@prisma-next/sql-contract/types'; +import baselineMetadata from '../../migrations/20260601T0000_install_eql_bundle/migration.json' with { + type: 'json', +}; +import baselineOps from '../../migrations/20260601T0000_install_eql_bundle/ops.json' with { + type: 'json', +}; +import headRef from '../../migrations/refs/head.json' with { type: 'json' }; +import contractJson from '../contract.json' with { type: 'json' }; +import { + CIPHERSTASH_BASELINE_MIGRATION_NAME, + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, + CIPHERSTASH_STRING_CODEC_ID, +} from '../extension-metadata/constants'; +import { cipherstashPackMeta } from '../extension-metadata/descriptor-meta'; +import { + cipherstashBigIntCodecHooks, + cipherstashBooleanCodecHooks, + cipherstashDateCodecHooks, + cipherstashDoubleCodecHooks, + cipherstashJsonCodecHooks, + cipherstashStringCodecHooks, +} from '../migration/cipherstash-codec'; + +const cipherstashContractSpace = contractSpaceFromJson>({ + contractJson, + migrations: [ + { + dirName: CIPHERSTASH_BASELINE_MIGRATION_NAME, + metadata: baselineMetadata, + ops: baselineOps, + }, + ], + headRef, +}); + +const cipherstashExtensionDescriptor: SqlControlExtensionDescriptor<'postgres'> = { + // Spread pack-meta first so it contributes `kind` / `id` / `familyId` + // / `targetId` / `version` / `authoring` / `types.{codecTypes,storage}` + // — then overlay the contract-space block and the codec lifecycle + // hook on top. The two `types.codecTypes` slots (`codecInstances` + // from pack-meta, `controlPlaneHooks` from this descriptor) coexist + // on the same path and are merged below. + ...cipherstashPackMeta, + contractSpace: cipherstashContractSpace, + /** + * Free-form `types.codecTypes.controlPlaneHooks` block — the SQL + * family's `extractCodecControlHooks` (in `@prisma-next/family-sql/ + * control`) finds hooks via duck-typing on this exact path. Mirrors + * pgvector's wiring at `packages/3-extensions/pgvector/src/exports/ + * control.ts`. + */ + types: { + ...cipherstashPackMeta.types, + codecTypes: { + ...cipherstashPackMeta.types.codecTypes, + controlPlaneHooks: { + [CIPHERSTASH_STRING_CODEC_ID]: cipherstashStringCodecHooks, + [CIPHERSTASH_DOUBLE_CODEC_ID]: cipherstashDoubleCodecHooks, + [CIPHERSTASH_BIGINT_CODEC_ID]: cipherstashBigIntCodecHooks, + [CIPHERSTASH_DATE_CODEC_ID]: cipherstashDateCodecHooks, + [CIPHERSTASH_BOOLEAN_CODEC_ID]: cipherstashBooleanCodecHooks, + [CIPHERSTASH_JSON_CODEC_ID]: cipherstashJsonCodecHooks, + }, + }, + }, + create: () => ({ + familyId: 'sql' as const, + targetId: 'postgres' as const, + }), +}; + +export { cipherstashExtensionDescriptor }; +export default cipherstashExtensionDescriptor; diff --git a/packages/prisma-next/src/exports/middleware.ts b/packages/prisma-next/src/exports/middleware.ts new file mode 100644 index 00000000..d608ba55 --- /dev/null +++ b/packages/prisma-next/src/exports/middleware.ts @@ -0,0 +1,24 @@ +/** + * Public middleware surface for the cipherstash extension. + * + * Consumers register the bulk-encrypt middleware in their runtime so + * `EncryptedString` envelopes embedded in `INSERT` / `UPDATE` plans get + * encrypted in batches before encode runs: + * + * ```ts + * import { createCipherstashRuntimeDescriptor } from '@prisma-next/extension-cipherstash/runtime'; + * import { bulkEncryptMiddleware } from '@prisma-next/extension-cipherstash/middleware'; + * + * const runtime = createRuntime({ + * extensionPacks: [createCipherstashRuntimeDescriptor({ sdk })], + * middleware: [bulkEncryptMiddleware(sdk)], + * }); + * ``` + * + * `SqlRuntimeExtensionDescriptor` does not own a middleware slot, so + * the descriptor wrapper (`createCipherstashRuntimeDescriptor`) and + * the middleware are composed manually by callers — by convention, + * once per cipherstash SDK binding. + */ + +export { bulkEncryptMiddleware } from '../middleware/bulk-encrypt'; diff --git a/packages/prisma-next/src/exports/migration.ts b/packages/prisma-next/src/exports/migration.ts new file mode 100644 index 00000000..c982daad --- /dev/null +++ b/packages/prisma-next/src/exports/migration.ts @@ -0,0 +1,43 @@ +/** + * Public migration-time entry point for the cipherstash extension. + * + * Re-exports the user-callable factory functions used in hand-written + * migrations (or auto-imported by the planner-generated `migration.ts`) + * to wire EQL search-config rows alongside structural DDL: + * + * ```ts + * import { Migration, MigrationCLI, createTable } from '@prisma-next/target-postgres/migration'; + * import { cipherstashAddSearchConfig } from '@prisma-next/extension-cipherstash/migration'; + * + * export default class M extends Migration { + * override get operations() { + * return [ + * createTable('public', 'user', [ + * { name: 'email', typeSql: 'eql_v2_encrypted', defaultSql: '', nullable: false }, + * { name: 'id', typeSql: 'text', defaultSql: '', nullable: false }, + * ]), + * cipherstashAddSearchConfig({ table: 'user', column: 'email', index: 'unique' }), + * ]; + * } + * } + * + * MigrationCLI.run(import.meta.url, M); + * ``` + * + * Identical ergonomics to `createTable` / `setNotNull` etc. from + * `@prisma-next/target-postgres/migration`. The codec lifecycle hook + * for `Encrypted` columns calls these factories automatically + * when planning a contract diff. + * + * @see ADR 195 — Planner IR with two renderers. + * @see ADR 213 — Codec lifecycle hooks. + */ + +export type { + CipherstashSearchConfigArgs, + CipherstashSearchIndex, +} from '../migration/call-classes'; +export { + cipherstashAddSearchConfig, + cipherstashRemoveSearchConfig, +} from '../migration/call-classes'; diff --git a/packages/prisma-next/src/exports/operation-types.ts b/packages/prisma-next/src/exports/operation-types.ts new file mode 100644 index 00000000..f256c437 --- /dev/null +++ b/packages/prisma-next/src/exports/operation-types.ts @@ -0,0 +1,16 @@ +/** + * Operation type definitions for the cipherstash extension. + * + * Re-export from the types module for the public + * `@prisma-next/extension-cipherstash/operation-types` subpath. The + * contract emitter pulls these via the `types.operationTypes` / + * `types.queryOperationTypes` import declarations on + * `cipherstashPackMeta` (see `../extension-metadata/descriptor-meta.ts`); user code + * may also import them directly when authoring TS-side type + * compositions. + * + * @see ADR 211 — Extension operator surface (namespaced replacement + * operators must project type-visibility through `QueryOperationTypes`). + */ + +export type { QueryOperationTypes } from '../types/operation-types'; diff --git a/packages/prisma-next/src/exports/pack.ts b/packages/prisma-next/src/exports/pack.ts new file mode 100644 index 00000000..36080e0b --- /dev/null +++ b/packages/prisma-next/src/exports/pack.ts @@ -0,0 +1,13 @@ +/** + * Pack entry point for the cipherstash extension. + * + * Re-exports the SDK-free pack metadata so TS contract authoring + * (`defineContract({ extensionPacks: { cipherstash: cipherstashPack } })`) + * can enable the `cipherstash.*` PSL/TS namespace and the storage type + * registration without pulling in any runtime code (envelope, SDK, + * codec runtime, middleware). + * + * Mirrors `packages/3-extensions/pgvector/src/exports/pack.ts`. + */ + +export { cipherstashPackMeta as default } from '../extension-metadata/descriptor-meta'; diff --git a/packages/prisma-next/src/exports/runtime.ts b/packages/prisma-next/src/exports/runtime.ts new file mode 100644 index 00000000..1260c844 --- /dev/null +++ b/packages/prisma-next/src/exports/runtime.ts @@ -0,0 +1,171 @@ +/** + * Runtime-plane entry point for the CipherStash extension. + * + * Consumed at query time by application runtimes that need to encode / + * decode `cipherstash/string@1` columns (envelope class) and talk to the + * CipherStash SDK shape the codec runtime + bulk-encrypt middleware + * depend on. + * + * The runtime entry point is deliberately separate from `./control` + * (descriptor, codec lifecycle hook, contract-space artefacts) so apps + * that only emit migrations against cipherstash never load the runtime, + * and apps that only run queries never load the migration-time + * descriptor — the control plane and runtime plane are tree-shakable + * along this seam. + * + * `createCipherstashRuntimeDescriptor({ sdk })` is the recommended + * composition entry — it bundles the SDK-bound codec, the parameterized + * codec descriptor, and the runtime-plane `codecInstances` slot into a + * single `SqlRuntimeExtensionDescriptor<'postgres'>` mirroring + * pgvector's `runtime.ts` precedent. The bulk-encrypt middleware ships + * separately at `@prisma-next/extension-cipherstash/middleware` because + * `SqlRuntimeExtensionDescriptor` does not own a middleware slot; + * consumers register it via `createRuntime({ middleware: + * [bulkEncryptMiddleware(sdk)] })`. + */ + +import type { SqlRuntimeExtensionDescriptor } from '@prisma-next/sql-runtime'; +import { cipherstashQueryOperations } from '../execution/operators'; +import { createParameterizedCodecDescriptors } from '../execution/parameterized'; +import type { CipherstashSdk } from '../execution/sdk'; +import { + CIPHERSTASH_EXTENSION_VERSION, + CIPHERSTASH_SPACE_ID, +} from '../extension-metadata/constants'; + +export type { CipherstashStringCodec } from '../execution/codec-runtime'; +export { + CIPHERSTASH_STRING_CODEC_ID, + CipherstashCellCodec, + createCipherstashBigIntCodec, + createCipherstashBooleanCodec, + createCipherstashDateCodec, + createCipherstashDoubleCodec, + createCipherstashJsonCodec, + createCipherstashStringCodec, +} from '../execution/codec-runtime'; +export type { DecryptAllOptions } from '../execution/decrypt-all'; +export { decryptAll } from '../execution/decrypt-all'; +export type { + EncryptedBigIntFromInternalArgs, + EncryptedBigIntHandle, +} from '../execution/envelope-bigint'; +export { EncryptedBigInt } from '../execution/envelope-bigint'; +export type { + EncryptedBooleanFromInternalArgs, + EncryptedBooleanHandle, +} from '../execution/envelope-boolean'; +export { EncryptedBoolean } from '../execution/envelope-boolean'; +export type { + EncryptedDateFromInternalArgs, + EncryptedDateHandle, +} from '../execution/envelope-date'; +export { EncryptedDate } from '../execution/envelope-date'; +export type { + EncryptedDoubleFromInternalArgs, + EncryptedDoubleHandle, +} from '../execution/envelope-double'; +export { EncryptedDouble } from '../execution/envelope-double'; +export type { + EncryptedJsonFromInternalArgs, + EncryptedJsonHandle, +} from '../execution/envelope-json'; +export { EncryptedJson } from '../execution/envelope-json'; +export type { + EncryptedStringFromInternalArgs, + EncryptedStringHandle, +} from '../execution/envelope-string'; +export { EncryptedString } from '../execution/envelope-string'; +export { + cipherstashAsc, + cipherstashDesc, + cipherstashJsonbGet, + cipherstashJsonbPathQueryFirst, +} from '../execution/helpers'; +export type { + CipherstashAnyParams, + CipherstashBooleanParams, + CipherstashDateParams, + CipherstashJsonParams, + CipherstashNumericParams, + CipherstashStringParams, +} from '../execution/parameterized'; +export { + createParameterizedCodecDescriptors, + encryptedBigIntParamsSchema, + encryptedBooleanParamsSchema, + encryptedDateParamsSchema, + encryptedDoubleParamsSchema, + encryptedJsonParamsSchema, + encryptedStringParamsSchema, + renderEncryptedBigIntOutputType, + renderEncryptedBooleanOutputType, + renderEncryptedDateOutputType, + renderEncryptedDoubleOutputType, + renderEncryptedJsonOutputType, + renderEncryptedStringOutputType, +} from '../execution/parameterized'; +export type { + CipherstashBulkDecryptArgs, + CipherstashBulkEncryptArgs, + CipherstashRoutingKey, + CipherstashSdk, + CipherstashSingleDecryptArgs, +} from '../execution/sdk'; +export { + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, +} from '../extension-metadata/constants'; + +export { CIPHERSTASH_EXTENSION_VERSION }; + +export interface CreateCipherstashRuntimeDescriptorOptions { + readonly sdk: CipherstashSdk; +} + +/** + * Compose the SDK-bound codec runtime + parameterized codec descriptors + * + runtime-plane codec-instances metadata into a single + * `SqlRuntimeExtensionDescriptor<'postgres'>`. + * + * The descriptor is per-SDK: cipherstash's codec captures the SDK at + * `decode` time (read-side single-cell `decrypt`) and the bulk-encrypt + * middleware captures it at `beforeExecute` time (write-side bulk + * round-trip). Multi-tenant deployments construct one descriptor per + * tenant SDK so per-tenant key material never crosses runtimes. + * + * Mirrors `packages/3-extensions/pgvector/src/exports/runtime.ts` — + * pgvector's vectorRuntimeDescriptor is a static default-export because + * its codec is fully stateless; cipherstash needs the factory wrapper + * because the codec depends on `sdk`. + */ +export function createCipherstashRuntimeDescriptor( + opts: CreateCipherstashRuntimeDescriptorOptions, +): SqlRuntimeExtensionDescriptor<'postgres'> { + const { sdk } = opts; + const parameterizedDescriptors = createParameterizedCodecDescriptors(sdk); + + return { + kind: 'extension' as const, + id: CIPHERSTASH_SPACE_ID, + version: CIPHERSTASH_EXTENSION_VERSION, + familyId: 'sql' as const, + targetId: 'postgres' as const, + types: { + codecTypes: { + codecDescriptors: parameterizedDescriptors, + }, + }, + codecs: () => parameterizedDescriptors, + queryOperations: () => cipherstashQueryOperations(), + create() { + return { + familyId: 'sql' as const, + targetId: 'postgres' as const, + }; + }, + }; +} diff --git a/packages/prisma-next/src/exports/stack.ts b/packages/prisma-next/src/exports/stack.ts new file mode 100644 index 00000000..78e5692a --- /dev/null +++ b/packages/prisma-next/src/exports/stack.ts @@ -0,0 +1,36 @@ +/** + * `@cipherstash/prisma-next/stack` — one-call setup for the + * `@cipherstash/stack` SDK against a Prisma Next contract. + * + * The three exports here form a layered API. Most consumers want + * {@link cipherstashFromStack}; the two primitives are exposed for + * advanced users who need to interpose custom logic. + * + * - {@link deriveStackSchemas} — pure function, contract.json → + * `EncryptedTable[]`. Use to construct `Encryption({ schemas })` + * yourself while keeping schemas in lockstep with the contract. + * + * - {@link createCipherstashSdk} — wraps an initialised stack + * `EncryptionClient` in the framework-native `CipherstashSdk` + * shape. Use when you've constructed the client yourself (custom + * keyset, multi-tenant routing). + * + * - {@link cipherstashFromStack} — the all-in-one factory. + * Returns ready-to-spread arrays for `postgres({...})`. + * + * This subpath imports `@cipherstash/stack` directly. Consumers who + * implement `CipherstashSdk` against a different SDK should use + * `./runtime` and `./middleware` instead and pay no + * `@cipherstash/stack` bundle cost. + */ + +export type { ContractStorageView } from '../stack/derive-schemas' +export { deriveStackSchemas } from '../stack/derive-schemas' + +export type { + CipherstashFromStackOptions, + CipherstashFromStackResult, +} from '../stack/from-stack' +export { cipherstashFromStack } from '../stack/from-stack' + +export { createCipherstashSdk } from '../stack/sdk-adapter' diff --git a/packages/prisma-next/src/extension-metadata/codec-metadata.ts b/packages/prisma-next/src/extension-metadata/codec-metadata.ts new file mode 100644 index 00000000..f8ac68c3 --- /dev/null +++ b/packages/prisma-next/src/extension-metadata/codec-metadata.ts @@ -0,0 +1,121 @@ +/** + * SDK-free codec used in pack-meta (`cipherstashPackMeta.types.codecTypes + * .codecInstances`). Pack-meta consumers only read codec *metadata* + * (`typeId`, `targetTypes`, `traits`, `renderOutputType`) at contract + * emit time — they never call `encode`/`decode`. + * + * The SDK-bound runtime codec for actual `encode`/`decode` lives in + * `../execution/codec-runtime`; it is resolved through + * `RuntimeParameterizedCodecDescriptor.factory` at runtime instead of + * through pack-meta's `codecInstances`. + * + * Keeping the SDK-free metadata in its own module — and *not* importing + * the runtime `CipherstashStringCodec` class — preserves the control + * vs runtime split. Control-plane consumers (`exports/control.ts`, + * `exports/pack.ts`) pull this file but never touch the envelope, the + * SDK interface, or the bulk-encrypt middleware. The bundling-isolation + * test pins this property by snapshotting that the control entry's + * chunk graph does not transitively load `envelope-*.mjs`. + * + * `encode`/`decode` throw with a clear hint in the misuse case so + * accidental wiring of the metadata codec into a real runtime path + * surfaces immediately instead of silently no-op'ing. + */ + +import type { JsonValue } from '@prisma-next/contract/types'; +import { type AnyCodecDescriptor, CodecImpl } from '@prisma-next/framework-components/codec'; +import { + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_CODEC_TRAITS, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, + CIPHERSTASH_STRING_CODEC_ID, + EQL_V2_ENCRYPTED_TYPE, +} from './constants'; + +function makeMetadataDescriptor(codecId: string, typeName: string): AnyCodecDescriptor { + return { + codecId, + traits: CIPHERSTASH_CODEC_TRAITS[codecId] ?? [], + targetTypes: [EQL_V2_ENCRYPTED_TYPE], + meta: { db: { sql: { postgres: { nativeType: EQL_V2_ENCRYPTED_TYPE } } } }, + paramsSchema: { + '~standard': { + version: 1, + vendor: 'cipherstash', + validate: (value: unknown) => ({ value }), + }, + }, + isParameterized: false, + renderOutputType: () => typeName, + factory: () => () => { + throw new Error('cipherstash codec: metadata descriptor factory is not callable'); + }, + }; +} + +class CipherstashCodecMetadata extends CodecImpl { + readonly #typeName: string; + + constructor(descriptor: AnyCodecDescriptor, typeName: string) { + super(descriptor); + this.#typeName = typeName; + } + + async encode(): Promise { + throw new Error( + 'cipherstash codec: encode called on the pack-meta metadata codec. ' + + 'Construct a runtime descriptor via `createCipherstashRuntimeDescriptor({ sdk })` and use that instead.', + ); + } + + async decode(): Promise { + throw new Error( + 'cipherstash codec: decode called on the pack-meta metadata codec. ' + + 'Construct a runtime descriptor via `createCipherstashRuntimeDescriptor({ sdk })` and use that instead.', + ); + } + + encodeJson(): JsonValue { + const marker = `$${this.#typeName.charAt(0).toLowerCase()}${this.#typeName.slice(1)}`; + return { [marker]: '' } as JsonValue; + } + + decodeJson(): unknown { + throw new Error( + 'cipherstash codec: decodeJson is not supported; envelopes do not round-trip through JSON.', + ); + } +} + +export const cipherstashStringCodecMetadata = new CipherstashCodecMetadata( + makeMetadataDescriptor(CIPHERSTASH_STRING_CODEC_ID, 'EncryptedString'), + 'EncryptedString', +); + +export const cipherstashDoubleCodecMetadata = new CipherstashCodecMetadata( + makeMetadataDescriptor(CIPHERSTASH_DOUBLE_CODEC_ID, 'EncryptedDouble'), + 'EncryptedDouble', +); + +export const cipherstashBigIntCodecMetadata = new CipherstashCodecMetadata( + makeMetadataDescriptor(CIPHERSTASH_BIGINT_CODEC_ID, 'EncryptedBigInt'), + 'EncryptedBigInt', +); + +export const cipherstashDateCodecMetadata = new CipherstashCodecMetadata( + makeMetadataDescriptor(CIPHERSTASH_DATE_CODEC_ID, 'EncryptedDate'), + 'EncryptedDate', +); + +export const cipherstashBooleanCodecMetadata = new CipherstashCodecMetadata( + makeMetadataDescriptor(CIPHERSTASH_BOOLEAN_CODEC_ID, 'EncryptedBoolean'), + 'EncryptedBoolean', +); + +export const cipherstashJsonCodecMetadata = new CipherstashCodecMetadata( + makeMetadataDescriptor(CIPHERSTASH_JSON_CODEC_ID, 'EncryptedJson'), + 'EncryptedJson', +); diff --git a/packages/prisma-next/src/extension-metadata/constants.ts b/packages/prisma-next/src/extension-metadata/constants.ts new file mode 100644 index 00000000..77fae13f --- /dev/null +++ b/packages/prisma-next/src/extension-metadata/constants.ts @@ -0,0 +1,235 @@ +/** + * Static names and identifiers used across CipherStash's contract space. + * + * Centralising the strings here so: + * - the contract IR (`./contract`), the migration ops (`./migrations`), + * the head ref (`./head-ref`), and the descriptor (`../exports/control`) + * all reference the same values without typos; + * - the `cipherstash:*` invariantId namespace is locked in one place + * (once published, an invariantId cannot be renamed). + * + * The space identifier `'cipherstash'` is what the framework writes to + * the consuming app's `migrations/cipherstash/` directory and what the marker table's + * `space` column carries for CipherStash-owned rows. + */ + +export const CIPHERSTASH_SPACE_ID = 'cipherstash'; + +/** + * Version advertised by both `cipherstashPackMeta.version` (control plane) + * and the SDK-bound `SqlRuntimeExtensionDescriptor` (runtime plane). + * + * Single source of truth so the descriptor surfaces and the contract-emit + * pack metadata cannot drift apart; consumed downstream by capability + * gating and contract round-trips. + */ +export const CIPHERSTASH_EXTENSION_VERSION = '0.0.1' as const; + +/** + * Codec id the application-side `Encrypted` lowering targets. + * Lives here so the codec lifecycle hook (which emits + * `add_search_config` / `remove_search_config` ops on field events) and + * the descriptor's `controlPlaneHooks` wiring share the same constant. + */ +export const CIPHERSTASH_STRING_CODEC_ID = 'cipherstash/string@1'; + +/** + * Codec id for the `cipherstash/double@1` codec — IEEE-754 double + * plaintext (`number`) lowering to `eql_v2_encrypted` with EQL + * `cast_as = 'double'`. The id encodes plaintext shape (not storage + * type) so each cipherstash envelope class binds 1:1 with a codec + * id. + */ +export const CIPHERSTASH_DOUBLE_CODEC_ID = 'cipherstash/double@1'; + +/** + * Codec id for the `cipherstash/bigint@1` codec — JS `bigint` + * plaintext lowering to `eql_v2_encrypted` with EQL + * `cast_as = 'big_int'`. + */ +export const CIPHERSTASH_BIGINT_CODEC_ID = 'cipherstash/bigint@1'; + +/** + * Codec id for the `cipherstash/date@1` codec — `Date` plaintext + * (calendar date) lowering to `eql_v2_encrypted` with EQL + * `cast_as = 'date'`. + */ +export const CIPHERSTASH_DATE_CODEC_ID = 'cipherstash/date@1'; + +/** + * Codec id for the `cipherstash/boolean@1` codec — `boolean` + * plaintext lowering to `eql_v2_encrypted` with EQL + * `cast_as = 'boolean'`. + */ +export const CIPHERSTASH_BOOLEAN_CODEC_ID = 'cipherstash/boolean@1'; + +/** + * Codec id for the `cipherstash/json@1` codec — JSON-serialisable + * `unknown` plaintext lowering to `eql_v2_encrypted` with EQL + * `cast_as = 'jsonb'`. + */ +export const CIPHERSTASH_JSON_CODEC_ID = 'cipherstash/json@1'; + +/** + * The closed set of every codec id this package owns. Single source of + * truth for the bulk-encrypt middleware filter and any other call site + * that needs "is this a cipherstash codec id?" — using a closed set + * (rather than a `cipherstash/` prefix match) means the middleware + * never accidentally claims jurisdiction over a future cipherstash + * codec that hasn't been wired through the rest of the package yet + * (envelope subclass, codec hook, runtime descriptor, etc.). When a + * new codec is introduced its id lands here in the same diff that + * wires the rest of its surface; out-of-package consumers (e.g. tests + * pinning the closed set) catch a missed wiring with one assertion. + * + * Order mirrors `createParameterizedCodecDescriptors`'s descriptor + * list so an iteration here matches the iteration there cell-for-cell. + */ +export const CIPHERSTASH_CODEC_IDS = [ + CIPHERSTASH_STRING_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, +] as const; + +/** + * Set form of {@link CIPHERSTASH_CODEC_IDS} for `O(1)` membership + * tests (the bulk-encrypt middleware's hot per-`ParamRef` filter). + */ +export const CIPHERSTASH_CODEC_ID_SET: ReadonlySet = new Set(CIPHERSTASH_CODEC_IDS); + +/** + * Closed union of every cipherstash codec id this package owns. + * Drives compile-time exhaustiveness for codec-id-keyed dispatch + * tables (e.g. `coerceToEnvelope` in `src/execution/operators.ts`) + * and for the free-standing helpers in `src/execution/helpers.ts` + * that validate a column's codec id against the cipherstash set. + */ +export type CipherstashCodecId = (typeof CIPHERSTASH_CODEC_IDS)[number]; + +/** + * Type-guard form of {@link CIPHERSTASH_CODEC_ID_SET}. Narrows + * `string` to {@link CipherstashCodecId} for downstream + * cipherstash-only branches (e.g. helper-side codec validation). + */ +export function isCipherstashCodecId(codecId: string): codecId is CipherstashCodecId { + return CIPHERSTASH_CODEC_ID_SET.has(codecId); +} + +/** + * Cipherstash-namespaced codec traits. Used as the dispatch key for + * the multi-codec predicate operators in `src/execution/operators.ts` + * — operators register with `self: { traits: ['cipherstash:'] }` + * and the model accessor (`packages/3-extensions/sql-orm-client/src/ + * model-accessor.ts`) attaches the operator to every codec descriptor + * whose `traits` list contains the same trait identifier. + * + * The `cipherstash:` prefix is load-bearing — it isolates these + * traits from the framework's built-in trait surface (`'equality'`, + * `'orderable'`, `'numeric'`, `'boolean'`, ...) so adding them to a + * cipherstash codec does not silently re-enable a built-in operator + * (e.g. `equality` would re-attach the framework's `eq` which lowers + * to standard SQL `=` — wrong for EQL ciphers, see + * `equality-trait-removal.test.ts`). The cipherstash extension owns + * its namespace; collisions with a future framework trait are not + * possible. + * + * Codec ↔ trait mapping (see ADR 214): + * + * - `cipherstash:equality` — string, double, bigint, date, boolean + * - `cipherstash:order-and-range` — string, double, bigint, date + * - `cipherstash:free-text-search` — string + * - `cipherstash:searchable-json` — json + * + * Each predicate operator registers under exactly one of these + * traits; the codec ↔ operator visibility surface follows from the + * trait set declared on each codec descriptor. + */ +export const CIPHERSTASH_TRAIT_EQUALITY = 'cipherstash:equality' as const; +export const CIPHERSTASH_TRAIT_ORDER_AND_RANGE = 'cipherstash:order-and-range' as const; +export const CIPHERSTASH_TRAIT_FREE_TEXT_SEARCH = 'cipherstash:free-text-search' as const; +export const CIPHERSTASH_TRAIT_SEARCHABLE_JSON = 'cipherstash:searchable-json' as const; + +/** + * Per-codec trait sets keyed by codec id. Each codec descriptor in + * `parameterized.ts` / `codec-runtime.ts` / `codec-metadata.ts` reads + * the traits for its codec id from this map; the + * `equality-trait-removal.test.ts` regression also reads from here so + * the three trait declarations (runtime / parameterized / pack-meta) + * stay agreement-by-construction. + */ +// Local re-alias of the framework's `CodecTrait` union, used solely as +// the cast target below. Type-only import — adds no runtime +// dependency. +type FrameworkCodecTrait = import('@prisma-next/framework-components/codec').CodecTrait; + +const CIPHERSTASH_CODEC_TRAITS_RAW: Readonly> = { + [CIPHERSTASH_STRING_CODEC_ID]: [ + CIPHERSTASH_TRAIT_EQUALITY, + CIPHERSTASH_TRAIT_FREE_TEXT_SEARCH, + CIPHERSTASH_TRAIT_ORDER_AND_RANGE, + ], + [CIPHERSTASH_DOUBLE_CODEC_ID]: [CIPHERSTASH_TRAIT_EQUALITY, CIPHERSTASH_TRAIT_ORDER_AND_RANGE], + [CIPHERSTASH_BIGINT_CODEC_ID]: [CIPHERSTASH_TRAIT_EQUALITY, CIPHERSTASH_TRAIT_ORDER_AND_RANGE], + [CIPHERSTASH_DATE_CODEC_ID]: [CIPHERSTASH_TRAIT_EQUALITY, CIPHERSTASH_TRAIT_ORDER_AND_RANGE], + [CIPHERSTASH_BOOLEAN_CODEC_ID]: [CIPHERSTASH_TRAIT_EQUALITY], + [CIPHERSTASH_JSON_CODEC_ID]: [CIPHERSTASH_TRAIT_SEARCHABLE_JSON], +}; + +// `CodecDescriptor.traits` is typed `readonly CodecTrait[]` where +// `CodecTrait` is a closed union of framework built-ins +// (`'equality' | 'order' | 'boolean' | 'numeric' | 'textual'`). The +// cipherstash trait strings live in the extension-private +// `cipherstash:` namespace and are intentionally not part of that +// union — they sit in their own namespace so adding them here cannot +// silently re-attach a framework built-in (e.g. `'equality'` would +// re-attach the wrong-SQL `eq` footgun, see +// `equality-trait-removal.test.ts`). The model-accessor's trait +// dispatch widens `descriptor.traits` to `readonly string[]` before +// the membership check (`packages/3-extensions/sql-orm-client/src/ +// model-accessor.ts:74-80`), so the extension-namespaced strings +// round-trip through the registry unchanged at runtime; the cast +// here is purely a type-level adapter from an extension namespace +// into the framework union. AGENTS.md requires the rationale comment +// alongside any `as unknown as` cast. +export const CIPHERSTASH_CODEC_TRAITS = CIPHERSTASH_CODEC_TRAITS_RAW as unknown as Readonly< + Record +>; + +/** Schema CipherStash installs its functions/operators/casts/types into. */ +export const EQL_V2_SCHEMA = 'eql_v2'; + +/** Configuration table used by EQL's per-column index configuration. */ +export const EQL_V2_CONFIGURATION_TABLE = 'eql_v2_configuration'; + +/** Enum type backing the `state` column on `eql_v2_configuration`. */ +export const EQL_V2_CONFIGURATION_STATE_TYPE = 'eql_v2_configuration_state'; + +/** JSONB-domain composite type user `Encrypted` columns reference. */ +export const EQL_V2_ENCRYPTED_TYPE = 'eql_v2_encrypted'; + +/** + * Migration directory name for the baseline. + * + * Per the framework's per-space layout convention this name is + * preserved verbatim when the framework writes the package to + * `migrations/cipherstash//` in the user's repo. + */ +export const CIPHERSTASH_BASELINE_MIGRATION_NAME = '20260601T0000_install_eql_bundle'; + +/** + * `cipherstash:*` invariantIds emitted by the baseline migration. Each + * `cipherstash:*` id, once published, is immutable: downstream + * consumers (other extensions, the marker table) reference them by + * literal string match. + * + * Today the baseline emits a single op (`installBundle`); the bundle + * SQL is the source of truth for every typed object it creates inside + * the `eql_v2` schema. New bundle versions or additional structural + * ops will mint new `cipherstash:*` ids alongside this entry. + */ +export const CIPHERSTASH_INVARIANTS = { + installBundle: 'cipherstash:install-eql-bundle-v1', +} as const; diff --git a/packages/prisma-next/src/extension-metadata/descriptor-meta.ts b/packages/prisma-next/src/extension-metadata/descriptor-meta.ts new file mode 100644 index 00000000..06682ac9 --- /dev/null +++ b/packages/prisma-next/src/extension-metadata/descriptor-meta.ts @@ -0,0 +1,164 @@ +/** + * Pack metadata for the cipherstash extension. + * + * Mirrors `packages/3-extensions/pgvector/src/extension-metadata/descriptor-meta.ts` — + * the metadata block that gets serialized into `contract.json`'s + * `extensionPacks.cipherstash` slot at emit time. + * + * SDK-free: the runtime descriptor layers SDK-bound codec instances on + * top at execution time. The `codecInstances` slot here uses the + * metadata-only + * codec from `./codec-metadata` because pack-meta consumers only read + * codec metadata (typeId, targetTypes, traits, renderOutputType); + * runtime encode/decode always go through the SDK-bound codec produced + * by `RuntimeParameterizedCodecDescriptor.factory` (see + * `./parameterized`). + * + * The control descriptor in `../exports/control.ts` spreads this pack + * meta so the framework's contract emitter sees `authoring`, + * `types.codecTypes.codecInstances`, and `types.storage` alongside + * the contract-space and codec-lifecycle-hooks blocks already wired + * by the codec lifecycle hook block. + */ + +import { cipherstashAuthoringTypes } from '../contract-authoring'; +import { + cipherstashBigIntCodecMetadata, + cipherstashBooleanCodecMetadata, + cipherstashDateCodecMetadata, + cipherstashDoubleCodecMetadata, + cipherstashJsonCodecMetadata, + cipherstashStringCodecMetadata, +} from './codec-metadata'; +import { + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_EXTENSION_VERSION, + CIPHERSTASH_JSON_CODEC_ID, + CIPHERSTASH_SPACE_ID, + CIPHERSTASH_STRING_CODEC_ID, + EQL_V2_ENCRYPTED_TYPE, +} from './constants'; + +export { CIPHERSTASH_EXTENSION_VERSION }; + +export const cipherstashPackMeta = { + kind: 'extension', + id: CIPHERSTASH_SPACE_ID, + familyId: 'sql', + targetId: 'postgres', + version: CIPHERSTASH_EXTENSION_VERSION, + authoring: { + type: cipherstashAuthoringTypes, + }, + types: { + codecTypes: { + codecInstances: [ + cipherstashStringCodecMetadata, + cipherstashDoubleCodecMetadata, + cipherstashBigIntCodecMetadata, + cipherstashDateCodecMetadata, + cipherstashBooleanCodecMetadata, + cipherstashJsonCodecMetadata, + ], + // Drives the contract emitter to add + // `import type { CodecTypes as CipherstashTypes } from '@prisma-next/extension-cipherstash/codec-types'` + // and to intersect `CipherstashTypes` into the generated + // `CodecTypes` type alias. Without this slot the codec-id-keyed + // type lookups (`CodecTypes['cipherstash/string@1']['traits']`) + // collapse to `unknown` on the consumer side, and the + // trait-dispatched operators (`cipherstashGt`, …) never surface + // on real model accessors. Mirrors pgvector's `import:` slot. + import: { + package: '@prisma-next/extension-cipherstash/codec-types', + named: 'CodecTypes', + alias: 'CipherstashTypes', + }, + // `renderOutputType` returns the bare envelope type name (e.g. + // `EncryptedString`, `EncryptedDouble`) for parameterized + // cipherstash columns; the contract emitter needs to import each + // type alongside its occurrence so the generated `.d.ts` + // typechecks cleanly. Mirrors pgvector's `Vector` typeImports + // declaration. + typeImports: [ + { + package: '@prisma-next/extension-cipherstash/runtime', + named: 'EncryptedString', + alias: 'EncryptedString', + }, + { + package: '@prisma-next/extension-cipherstash/runtime', + named: 'EncryptedDouble', + alias: 'EncryptedDouble', + }, + { + package: '@prisma-next/extension-cipherstash/runtime', + named: 'EncryptedBigInt', + alias: 'EncryptedBigInt', + }, + { + package: '@prisma-next/extension-cipherstash/runtime', + named: 'EncryptedDate', + alias: 'EncryptedDate', + }, + { + package: '@prisma-next/extension-cipherstash/runtime', + named: 'EncryptedBoolean', + alias: 'EncryptedBoolean', + }, + { + package: '@prisma-next/extension-cipherstash/runtime', + named: 'EncryptedJson', + alias: 'EncryptedJson', + }, + ], + }, + queryOperationTypes: { + import: { + package: '@prisma-next/extension-cipherstash/operation-types', + named: 'QueryOperationTypes', + alias: 'CipherstashQueryOperationTypes', + }, + }, + storage: [ + { + typeId: CIPHERSTASH_STRING_CODEC_ID, + familyId: 'sql', + targetId: 'postgres', + nativeType: EQL_V2_ENCRYPTED_TYPE, + }, + { + typeId: CIPHERSTASH_DOUBLE_CODEC_ID, + familyId: 'sql', + targetId: 'postgres', + nativeType: EQL_V2_ENCRYPTED_TYPE, + }, + { + typeId: CIPHERSTASH_BIGINT_CODEC_ID, + familyId: 'sql', + targetId: 'postgres', + nativeType: EQL_V2_ENCRYPTED_TYPE, + }, + { + typeId: CIPHERSTASH_DATE_CODEC_ID, + familyId: 'sql', + targetId: 'postgres', + nativeType: EQL_V2_ENCRYPTED_TYPE, + }, + { + typeId: CIPHERSTASH_BOOLEAN_CODEC_ID, + familyId: 'sql', + targetId: 'postgres', + nativeType: EQL_V2_ENCRYPTED_TYPE, + }, + { + typeId: CIPHERSTASH_JSON_CODEC_ID, + familyId: 'sql', + targetId: 'postgres', + nativeType: EQL_V2_ENCRYPTED_TYPE, + }, + ], + }, +} as const; diff --git a/packages/prisma-next/src/middleware/bulk-encrypt.ts b/packages/prisma-next/src/middleware/bulk-encrypt.ts new file mode 100644 index 00000000..d8ead0be --- /dev/null +++ b/packages/prisma-next/src/middleware/bulk-encrypt.ts @@ -0,0 +1,233 @@ +/** + * Bulk-encrypt middleware for cipherstash envelopes. + * + * The middleware sits in the SQL runtime's `beforeExecute` chain and: + * + * 1. Walks the lowered query AST (`InsertAst` / `UpdateAst`) and stamps + * `(table, column)` routing context onto every cipherstash envelope + * (any `EncryptedEnvelopeBase` subclass) embedded in a `ParamRef`. + * The handle's `(table, column)` slots are the canonical input to + * {@link groupByRoutingKey}; this walk is the single place the AST's + * structural column metadata gets attached to the envelopes the SDK + * will see. + * + * 2. Iterates `params.entries()` to collect every cipherstash-codec'd + * `ParamRef` (matched against the closed + * {@link CIPHERSTASH_CODEC_ID_SET} — see the rationale on the + * constant in `extension-metadata/constants.ts`), groups them by + * routing key, and issues exactly one `sdk.bulkEncrypt(...)` call + * per group. Routing-key derivation is `(table, column)`; per-group + * homogeneity-by-column means each batch is naturally typed (every + * cell in a `(table, column)` group has the same codec id, hence + * the same plaintext type), so the SDK's polymorphic `values: + * ReadonlyArray` surface does not need narrowing inside + * this middleware. + * + * 3. Stamps each returned ciphertext onto the envelope's handle via + * `setHandleCiphertext` AND writes the **encoded wire-format + * string** (the `eql_v2_encrypted` composite-text literal produced + * by {@link encodeEqlV2EncryptedWire}) into the param slot via + * `params.replaceValues`. The wire format is what the pg driver + * can serialise directly — passing the envelope object itself + * would fail at the driver boundary. The handle's `plaintext` and + * `ciphertext` slots are both **retained** post-replacement so + * `envelope.decrypt()` continues to return the plaintext + * synchronously without consulting the SDK, and any follow-on + * query that reuses the same envelope skips the re-encrypt + * round-trip. + * + * Lifecycle: the codec's `encode` runs first (in `lower`/ + * `encodeParams`) and returns the envelope as a sentinel when no + * ciphertext is set; this middleware then runs in `beforeExecute` + * and replaces the param slot with the wire-format string before + * the driver reads `currentParams()`. See the comment on + * `CipherstashCellCodec#encode` in `../execution/cell-codec-factory.ts` + * for the full two-pass design. + * + * Cancellation: `ctx.signal` is forwarded by identity to every + * `bulkEncrypt` call via `ifDefined`; the SDK is responsible for + * honoring it. The awaiting middleware also races the SDK promise + * against `ctx.signal` via `raceCipherstashAbort` so a caller-side + * abort surfaces a `RUNTIME.ABORTED { phase: 'bulk-encrypt' }` + * envelope promptly even when the SDK body itself ignores the signal. + * A pre-flight `checkCipherstashAborted` short-circuits before any + * SDK round-trip is scheduled when the signal is already aborted at + * entry. + */ + +import type { + AnyQueryAst, + ColumnRef, + DefaultValueExpr, + InsertAst, + ParamRef, + UpdateAst, +} from '@prisma-next/sql-relational-core/ast'; +import type { + ParamRefHandle, + SqlParamRefMutator, +} from '@prisma-next/sql-relational-core/middleware'; +import type { SqlMiddleware } from '@prisma-next/sql-runtime'; +import { ifDefined } from '@prisma-next/utils/defined'; +import { checkCipherstashAborted, raceCipherstashAbort } from '../execution/abort'; +import { + EncryptedEnvelopeBase, + setHandleCiphertext, + setHandleRoutingKey, +} from '../execution/envelope-base'; +import { encodeEqlV2EncryptedWire } from '../execution/cell-codec-factory'; +import { markBulkEncryptMiddlewareRegistered } from '../execution/middleware-registry'; +import { type BulkEncryptTarget, groupByRoutingKey } from '../execution/routing'; +import type { CipherstashSdk } from '../execution/sdk'; +import { CIPHERSTASH_CODEC_ID_SET } from '../extension-metadata/constants'; + +/** + * Construct the bulk-encrypt middleware. The returned middleware is + * stateless aside from the captured `sdk` reference; one instance per + * runtime extension is the expected pattern. + */ +export function bulkEncryptMiddleware(sdk: CipherstashSdk): SqlMiddleware { + // Mark this sdk as wired up so the codec's `encode` can distinguish + // "happy path: middleware will run later and fill in the ciphertext" + // from "misconfig: this sdk has no middleware registered". See + // `../execution/middleware-registry.ts`. + markBulkEncryptMiddlewareRegistered(sdk); + return { + name: 'cipherstash.bulk-encrypt', + familyId: 'sql', + async beforeExecute(plan, ctx, params) { + if (!params) { + return; + } + + stampRoutingKeysFromAst(plan.ast); + + const targets = collectTargets(params); + if (targets.length === 0) { + return; + } + + const groups = groupByRoutingKey(targets); + for (const [groupKey, group] of groups) { + const first = group[0]; + if (!first) continue; + const routingKey = first.routingKey; + + checkCipherstashAborted(ctx.signal, 'bulk-encrypt'); + const ciphertexts = await raceCipherstashAbort( + sdk.bulkEncrypt({ + routingKey, + values: group.map((t) => t.plaintext), + ...ifDefined('signal', ctx.signal), + }), + ctx.signal, + 'bulk-encrypt', + ); + + if (ciphertexts.length !== group.length) { + throw new Error( + `cipherstash bulk-encrypt: SDK returned ${ciphertexts.length} ciphertexts ` + + `for routing key ${groupKey} but ${group.length} were requested.`, + ); + } + + // Replace each ParamRef's value with the **wire-format** + // string that the pg driver can serialise directly. Passing + // the envelope itself (an `EncryptedEnvelopeBase` instance) + // would fail at the pg layer with `could not serialize` + // because the driver only knows how to marshal primitives / + // arrays / Buffers. We also keep the envelope's ciphertext + // slot stamped via `setHandleCiphertext` so subsequent reads + // off the same envelope (e.g. immediately reusing it in a + // follow-on query) work without a re-encrypt round-trip. + params.replaceValues( + group.map((t, i) => { + const ciphertext = ciphertexts[i]; + setHandleCiphertext(t.envelope, ciphertext); + return { ref: t.ref, newValue: encodeEqlV2EncryptedWire(ciphertext) }; + }), + ); + } + }, + }; +} + +function collectTargets( + params: SqlParamRefMutator, +): BulkEncryptTarget>[] { + const targets: BulkEncryptTarget>[] = []; + for (const entry of params.entries()) { + if (entry.codecId === undefined || !CIPHERSTASH_CODEC_ID_SET.has(entry.codecId)) continue; + const value = entry.value; + if (!(value instanceof EncryptedEnvelopeBase)) continue; + const handle = value.expose(); + if (handle.plaintext === undefined) { + throw new Error( + 'cipherstash bulk-encrypt: encountered an envelope with no plaintext on the write path. ' + + 'Use the relevant `Encrypted*.from(plaintext)` factory to construct write-side envelopes.', + ); + } + if (handle.table === undefined || handle.column === undefined) { + throw new Error( + 'cipherstash bulk-encrypt: envelope reached the bulk-encrypt phase without a (table, column) ' + + "routing context. The middleware's AST walk only handles `InsertAst` and `UpdateAst`; " + + 'cipherstash envelopes embedded in other plan shapes (e.g. raw SQL) must stamp routing ' + + 'context explicitly via `setHandleRoutingKey` before execute.', + ); + } + targets.push({ + ref: entry.ref, + plaintext: handle.plaintext, + envelope: value, + routingKey: { table: handle.table, column: handle.column }, + }); + } + return targets; +} + +function stampRoutingKeysFromAst(ast: AnyQueryAst | undefined): void { + if (!ast) return; + switch (ast.kind) { + case 'insert': + stampInsert(ast); + return; + case 'update': + stampUpdate(ast); + return; + default: + return; + } +} + +function stampInsert(ast: InsertAst): void { + const tableName = ast.table.name; + for (const row of ast.rows) { + for (const [column, value] of Object.entries(row)) { + stampParamRefIfEnvelope(value, tableName, column); + } + } + if (ast.onConflict?.action.kind === 'do-update-set') { + for (const [column, value] of Object.entries(ast.onConflict.action.set)) { + stampParamRefIfEnvelope(value, tableName, column); + } + } +} + +function stampUpdate(ast: UpdateAst): void { + const tableName = ast.table.name; + for (const [column, value] of Object.entries(ast.set)) { + stampParamRefIfEnvelope(value, tableName, column); + } +} + +function stampParamRefIfEnvelope( + value: ColumnRef | ParamRef | DefaultValueExpr, + table: string, + column: string, +): void { + if (value.kind !== 'param-ref') return; + const inner = value.value; + if (inner instanceof EncryptedEnvelopeBase) { + setHandleRoutingKey(inner, table, column); + } +} diff --git a/packages/prisma-next/src/migration/call-classes.ts b/packages/prisma-next/src/migration/call-classes.ts new file mode 100644 index 00000000..298a2f21 --- /dev/null +++ b/packages/prisma-next/src/migration/call-classes.ts @@ -0,0 +1,359 @@ +/** + * Cipherstash migration IR — renderable `*Call` classes for the codec + * lifecycle hook + the public `@prisma-next/extension-cipherstash/migration` + * factory functions. + * + * Each `*Call` implements the framework `OpFactoryCall` interface (ADR + * 195) directly, so cipherstash's contributions flow through the postgres + * planner as first-class IR nodes — no `RawSqlCall` wrap, no detour + * through the unstructured-op fallback. The codec hook + * (`./cipherstash-codec.ts`) returns Calls; the postgres planner adds + * them to its call list and renders them via `renderCallsToTypeScript`. + * + * Public factory functions (`cipherstashAddSearchConfig` / + * `cipherstashRemoveSearchConfig`) are re-exported from + * `@prisma-next/extension-cipherstash/migration`. Users authoring a + * hand-written migration can call them directly: + * + * ```ts + * import { cipherstashAddSearchConfig } from '@prisma-next/extension-cipherstash/migration'; + * + * createTable('public', 'user', [...]); + * cipherstashAddSearchConfig({ table: 'user', column: 'email', index: 'unique' }); + * ``` + * + * Round-trip invariant: `toOp()` produces the same op shape the codec + * hook would emit directly — `ops.json` stays byte-identical; + * `migration.ts` carries a factory call instead of an opaque + * `rawSql({...})` block. + */ + +import type { SqlMigrationPlanOperation } from '@prisma-next/family-sql/control'; +import type { + MigrationOperationClass, + OpFactoryCall, +} from '@prisma-next/framework-components/control'; +import { type ImportRequirement, jsonToTsSource, TsExpression } from '@prisma-next/ts-render'; +import { ifDefined } from '@prisma-next/utils/defined'; + +const CIPHERSTASH_MIGRATION_MODULE = '@prisma-next/extension-cipherstash/migration'; + +/** Mirrors `eql_v2.add_search_config(table, column, index_name, cast_as)`. */ +const DEFAULT_CAST_AS = 'text'; + +/** + * EQL search-config indices the cipherstash codecs emit — one per + * enabled `typeParams` flag, across every cipherstash-encrypted column + * type: + * + * - `'unique'` — equality lookup (every codec). + * - `'match'` — free-text search (`Encrypted` only). + * - `'ore'` — order-and-range comparisons (`Encrypted` / + * `Encrypted` / `Encrypted` / + * `Encrypted`). + * - `'ste_vec'` — searchable JSON path/value queries + * (`Encrypted`). + * + * Mirrors the full EQL `add_search_config` index vocabulary; the + * `cipherstashAddSearchConfig` / `cipherstashRemoveSearchConfig` + * factories accept any of the four without further changes. + */ +export type CipherstashSearchIndex = 'unique' | 'match' | 'ore' | 'ste_vec'; + +/** + * Args shape accepted by the public `cipherstashAddSearchConfig` / + * `cipherstashRemoveSearchConfig` factory functions. + * + * `castAs` defaults to `'text'` — matches the cipherstash codec hook's + * canonical output and the EQL bundle's expected cast for + * `eql_v2_encrypted` columns. Override only if you know the runtime + * cast for your column differs. + */ +export interface CipherstashSearchConfigArgs { + readonly table: string; + readonly column: string; + readonly index: CipherstashSearchIndex; + readonly castAs?: string; +} + +type CipherstashOp = SqlMigrationPlanOperation; +type OpStep = CipherstashOp['execute'][number]; + +/** + * Escape a string so it can be embedded inside a Postgres single-quoted + * literal. Identifiers in our IR are unlikely to contain apostrophes, + * but doubling them keeps the emitted SQL safe under any future + * relaxation. + */ +function sqlLiteral(value: string): string { + return `'${value.replace(/'/g, "''")}'`; +} + +function invariantIdFor( + tableName: string, + fieldName: string, + action: 'add-search-config' | 'remove-search-config', + indexName: CipherstashSearchIndex, +): string { + return `cipherstash-codec:${tableName}.${fieldName}:${action}:${indexName}@v1`; +} + +/** + * Base class for cipherstash migration IR nodes. + * + * Each instance is *both* an `OpFactoryCall` (renderable to TypeScript, + * lowerable to a runtime op via `toOp()`) and a structurally-valid + * {@link CipherstashOp} — `id`, `label`, `operationClass`, + * `invariantId`, `target`, `precheck`, `execute`, `postcheck` are + * stored as enumerable own properties, populated in the concrete + * subclass constructors. So when the planner-rendered `migration.ts` + * runs and the user's `operations` getter returns Call instances + * directly, both `MigrationOpSchema` validation (which checks `id` / + * `label` / `operationClass`) and `JSON.stringify` (which writes + * `ops.json`) see the runtime op shape unchanged. + * + * The cipherstash-specific identity fields (`factoryName`, `table`, + * `column`, `index`, `castAs`) live on the subclass prototype as + * accessor getters and on a per-instance backing record kept in a + * private slot (`#args`). Accessor properties on the class are + * non-enumerable, and the backing record is a private field, so + * `Object.keys(call)` and `canonicalizeJson(...)` see only the op + * fields — `ops.json` and `migrationHash` stay byte-stable. + */ +abstract class CipherstashOpFactoryCallNode extends TsExpression implements OpFactoryCall { + abstract get factoryName(): string; + abstract readonly operationClass: MigrationOperationClass; + abstract readonly label: string; + abstract readonly id: string; + abstract readonly invariantId: string; + abstract readonly target: { readonly id: string }; + abstract readonly precheck: readonly OpStep[]; + abstract readonly execute: readonly OpStep[]; + abstract readonly postcheck: readonly OpStep[]; + + importRequirements(): readonly ImportRequirement[] { + return [{ moduleSpecifier: CIPHERSTASH_MIGRATION_MODULE, symbol: this.factoryName }]; + } + + /** + * Re-expose the runtime op view for callers that prefer to lower + * Calls explicitly (notably {@link renderOps} on the postgres lane). + * The returned object is a plain copy of this Call's op-shaped + * fields. + */ + toOp(): CipherstashOp { + return { + id: this.id, + label: this.label, + operationClass: this.operationClass, + invariantId: this.invariantId, + target: this.target, + precheck: this.precheck, + execute: this.execute, + postcheck: this.postcheck, + }; + } + + protected freeze(): void { + Object.freeze(this); + } +} + +/** + * `cipherstashAddSearchConfig` — register an EQL search-config row for + * the given column / index combination. Lowers to a `SELECT + * eql_v2.add_search_config('', '', '', + * '')` op, classified `'additive'`. + */ +interface AddArgs { + readonly table: string; + readonly column: string; + readonly index: CipherstashSearchIndex; + readonly castAs: string; +} + +export class CipherstashAddSearchConfigCall extends CipherstashOpFactoryCallNode { + readonly id: string; + readonly label: string; + readonly operationClass: 'additive'; + readonly invariantId: string; + readonly target: { readonly id: string }; + readonly precheck: readonly OpStep[]; + readonly execute: readonly OpStep[]; + readonly postcheck: readonly OpStep[]; + + // Private slot keeps the renderer-side args off the enumerable + // own-property surface; the public accessors below expose them + // read-only on the prototype, so neither `Object.keys` nor + // `canonicalizeJson` walks them. + readonly #args: AddArgs; + + constructor( + table: string, + column: string, + index: CipherstashSearchIndex, + castAs: string = DEFAULT_CAST_AS, + ) { + super(); + this.#args = { table, column, index, castAs }; + // Property assignment order is fixed (id → label → operationClass + // → invariantId → target → precheck → execute → postcheck) so + // `JSON.stringify(call)` lays out keys in the byte order the + // baseline `ops.json` carries. + this.id = `cipherstash-codec.${table}.${column}.add-search-config.${index}`; + this.label = `Enable cipherstash search on ${table}.${column}`; + this.operationClass = 'additive'; + this.invariantId = invariantIdFor(table, column, 'add-search-config', index); + this.target = { id: 'postgres' }; + this.precheck = []; + this.execute = [ + { + description: `Register cipherstash ${index} search config for ${table}.${column}`, + sql: `SELECT eql_v2.add_search_config(${sqlLiteral(table)}, ${sqlLiteral(column)}, ${sqlLiteral(index)}, ${sqlLiteral(castAs)});`, + }, + ]; + this.postcheck = []; + this.freeze(); + } + + get factoryName(): 'cipherstashAddSearchConfig' { + return 'cipherstashAddSearchConfig'; + } + + get table(): string { + return this.#args.table; + } + + get column(): string { + return this.#args.column; + } + + get index(): CipherstashSearchIndex { + return this.#args.index; + } + + get castAs(): string { + return this.#args.castAs; + } + + renderTypeScript(): string { + const args = { + table: this.#args.table, + column: this.#args.column, + index: this.#args.index, + ...ifDefined('castAs', this.#args.castAs !== DEFAULT_CAST_AS ? this.#args.castAs : undefined), + }; + return `cipherstashAddSearchConfig(${jsonToTsSource(args)})`; + } +} + +/** + * `cipherstashRemoveSearchConfig` — invert + * {@link CipherstashAddSearchConfigCall} for the same (table, column, + * index) tuple. Lowers to `SELECT eql_v2.remove_search_config('
', + * '', '')`, classified `'destructive'`. + * + * No `castAs` argument — `eql_v2.remove_search_config` takes only the + * three identifying fields; the cast was applied at the index's add + * site. + */ +interface RemoveArgs { + readonly table: string; + readonly column: string; + readonly index: CipherstashSearchIndex; +} + +export class CipherstashRemoveSearchConfigCall extends CipherstashOpFactoryCallNode { + readonly id: string; + readonly label: string; + readonly operationClass: 'destructive'; + readonly invariantId: string; + readonly target: { readonly id: string }; + readonly precheck: readonly OpStep[]; + readonly execute: readonly OpStep[]; + readonly postcheck: readonly OpStep[]; + + readonly #args: RemoveArgs; + + constructor(table: string, column: string, index: CipherstashSearchIndex) { + super(); + this.#args = { table, column, index }; + this.id = `cipherstash-codec.${table}.${column}.remove-search-config.${index}`; + this.label = `Disable cipherstash search on ${table}.${column}`; + this.operationClass = 'destructive'; + this.invariantId = invariantIdFor(table, column, 'remove-search-config', index); + this.target = { id: 'postgres' }; + this.precheck = []; + this.execute = [ + { + description: `Remove cipherstash ${index} search config for ${table}.${column}`, + sql: `SELECT eql_v2.remove_search_config(${sqlLiteral(table)}, ${sqlLiteral(column)}, ${sqlLiteral(index)});`, + }, + ]; + this.postcheck = []; + this.freeze(); + } + + get factoryName(): 'cipherstashRemoveSearchConfig' { + return 'cipherstashRemoveSearchConfig'; + } + + get table(): string { + return this.#args.table; + } + + get column(): string { + return this.#args.column; + } + + get index(): CipherstashSearchIndex { + return this.#args.index; + } + + renderTypeScript(): string { + return `cipherstashRemoveSearchConfig(${jsonToTsSource({ + table: this.#args.table, + column: this.#args.column, + index: this.#args.index, + })})`; + } +} + +/** + * Public factory: register a cipherstash search-config row. + * + * Use from a hand-written migration when you need to wire EQL + * search-config alongside a `createTable` / `addColumn`. The + * `Encrypted` codec hook calls this factory automatically when + * planning a contract diff that adds a `searchable: true` column. + * + * Returns the {@link CipherstashAddSearchConfigCall} IR node, which + * implements `OpFactoryCall` and is itself a `SqlMigrationPlanOperation` + * (its readonly op-shaped fields are populated in the constructor) — so + * the same value flows through both the renderer (planner-time IR) and + * the runtime ops list (`Migration.operations`) without an extra + * lowering step at the call site. + */ +export function cipherstashAddSearchConfig( + args: CipherstashSearchConfigArgs, +): CipherstashAddSearchConfigCall { + return new CipherstashAddSearchConfigCall( + args.table, + args.column, + args.index, + args.castAs ?? DEFAULT_CAST_AS, + ); +} + +/** + * Public factory: invert {@link cipherstashAddSearchConfig} for the + * same (table, column, index) tuple. + * + * Returns the {@link CipherstashRemoveSearchConfigCall} IR node — see + * {@link cipherstashAddSearchConfig} for the rationale. + */ +export function cipherstashRemoveSearchConfig( + args: CipherstashSearchConfigArgs, +): CipherstashRemoveSearchConfigCall { + return new CipherstashRemoveSearchConfigCall(args.table, args.column, args.index); +} diff --git a/packages/prisma-next/src/migration/cipherstash-codec.ts b/packages/prisma-next/src/migration/cipherstash-codec.ts new file mode 100644 index 00000000..9fad57c5 --- /dev/null +++ b/packages/prisma-next/src/migration/cipherstash-codec.ts @@ -0,0 +1,125 @@ +/** + * Control hooks for the `cipherstash:string@1` codec. + * + * Implements `CodecControlHooks.onFieldEvent` via the shared + * {@link makeCipherstashCodecHooks} factory (see + * `./codec-hooks-factory.ts` for the per-flag walk that's identical + * across every cipherstash codec). Reacts to per-field added / + * dropped / altered events as the *application* emitter diffs the + * prior contract against the new contract; the returned Calls flow + * through the SQL planner's IR alongside structural DDL and render as + * `cipherstashAddSearchConfig({...})` / + * `cipherstashRemoveSearchConfig({...})` calls in the user's + * `migration.ts` (ADR 195 two-renderer pattern). + * + * Trigger: a field uses the `cipherstash:string@1` codec. The planner + * already dispatches per `(table, field)` based on the field's + * `codecId` (new field for `'added'` / `'altered'`, prior field for + * `'dropped'`), so this hook only fires when a cipherstash field is + * involved. Per field the hook emits one + * `cipherstashAddSearchConfig` Call per enabled flag in `typeParams` + * (and one `cipherstashRemoveSearchConfig` Call per previously-enabled + * flag on drop / altered-off). + * + * Flag → EQL index mapping for the string codec: + * + * - `equality: true` → `'unique'` index + * - `freeTextSearch: true` → `'match'` index + * + * `cast_as` is `'text'` for every string-codec search-config row; the + * EQL bundle's expected cast for `eql_v2_encrypted` columns derived + * from a `text` plaintext. + */ + +import { + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, + CIPHERSTASH_STRING_CODEC_ID, +} from '../extension-metadata/constants'; +import { makeCipherstashCodecHooks } from './codec-hooks-factory'; + +export const cipherstashStringCodecHooks = makeCipherstashCodecHooks({ + flagToIndex: { + equality: 'unique', + freeTextSearch: 'match', + orderAndRange: 'ore', + }, + castAs: 'text', +}); + +/** + * Codec lifecycle hooks for `cipherstash/double@1`. The numeric codecs + * share the `{ equality, orderAndRange }` flag set and differ only in + * `cast_as` (`double` vs `big_int`). Codec ids name the underlying + * machine type (the EQL `cast_as` value) rather than the JS-language + * category; the user-facing constructor name follows the same + * naming. + */ +export const cipherstashDoubleCodecHooks = makeCipherstashCodecHooks({ + flagToIndex: { + equality: 'unique', + orderAndRange: 'ore', + }, + castAs: 'double', +}); + +/** Codec lifecycle hooks for `cipherstash/bigint@1`. */ +export const cipherstashBigIntCodecHooks = makeCipherstashCodecHooks({ + flagToIndex: { + equality: 'unique', + orderAndRange: 'ore', + }, + castAs: 'big_int', +}); + +/** + * Codec lifecycle hooks for `cipherstash/date@1`. Calendar-date plaintext + * (no time component) — flag set mirrors the numeric codecs because EQL + * supports both equality (unique-index) and order/range (ORE-index) + * predicates over dates. + */ +export const cipherstashDateCodecHooks = makeCipherstashCodecHooks({ + flagToIndex: { + equality: 'unique', + orderAndRange: 'ore', + }, + castAs: 'date', +}); + +/** + * Codec lifecycle hooks for `cipherstash/boolean@1`. Booleans only + * support equality search (a 2-value domain has no meaningful range + * predicate), so the flag set collapses to `{ equality }`. + */ +export const cipherstashBooleanCodecHooks = makeCipherstashCodecHooks({ + flagToIndex: { + equality: 'unique', + }, + castAs: 'boolean', +}); + +/** + * Codec lifecycle hooks for `cipherstash/json@1`. EQL exposes structured + * JSON predicates through the `ste_vec` (Structured Encryption Vector) + * index family — a single flag (`searchableJson`) gates the entire + * suite of containment / path-extraction operators. + */ +export const cipherstashJsonCodecHooks = makeCipherstashCodecHooks({ + flagToIndex: { + searchableJson: 'ste_vec', + }, + castAs: 'jsonb', +}); + +/** Re-export the codec ids alongside the hooks so wiring sites import them together. */ +export { + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, + CIPHERSTASH_STRING_CODEC_ID, +}; diff --git a/packages/prisma-next/src/migration/codec-hooks-factory.ts b/packages/prisma-next/src/migration/codec-hooks-factory.ts new file mode 100644 index 00000000..d56c21d2 --- /dev/null +++ b/packages/prisma-next/src/migration/codec-hooks-factory.ts @@ -0,0 +1,164 @@ +/** + * Shared factory for every cipherstash codec's `CodecControlHooks`. + * + * Every cipherstash codec (`cipherstash/string@1`, `cipherstash/double@1`, + * `cipherstash/bigint@1`, `cipherstash/date@1`, `cipherstash/boolean@1`, + * `cipherstash/json@1`) exposes the same hook-shape: + * + * - one `cipherstashAddSearchConfig` Call per enabled flag in + * `typeParams` on `'added'` / `'altered'`-flipped-on; + * - one `cipherstashRemoveSearchConfig` Call per previously-enabled + * flag on `'dropped'` / `'altered'`-flipped-off; + * - identity `expandNativeType` (the cipherstash `nativeType` is + * always `eql_v2_encrypted`; per-flag wiring is delivered by the + * `add_search_config` rows, not by widening the column type). + * + * Each codec configures the factory with two values that vary per + * codec: + * + * - `flagToIndex` — the codec's `typeParams` flag names mapped to the + * EQL `add_search_config` index name they enable (e.g. + * `equality → 'unique'`, `freeTextSearch → 'match'`, + * `orderAndRange → 'ore'`, `searchableJson → 'ste_vec'`). + * - `castAs` — the EQL `cast_as` argument passed to + * `eql_v2.add_search_config(...)` for every flag this codec emits. + * Static per codec (e.g. string → `'text'`, double → `'double'`). + * + * The factory's `onFieldEvent` body is otherwise identical across + * codecs — collapsing the ~80-line per-flag walk into one place. The + * shared shape is the natural shape for any future cipherstash codec + * that has a per-flag → per-EQL-index mapping; the contributor-facing + * per-codec wiring template at `../../DEVELOPING.md` references this + * factory as one of the substrate calls a new codec invocation needs. + */ + +import type { CodecControlHooks, FieldEventContext } from '@prisma-next/family-sql/control'; +import type { OpFactoryCall } from '@prisma-next/framework-components/control'; +import { + type CipherstashSearchIndex, + cipherstashAddSearchConfig, + cipherstashRemoveSearchConfig, +} from './call-classes'; + +export interface MakeCipherstashCodecHooksOptions { + /** + * `typeParams` flag names mapped to the EQL search-config index each + * enables. The factory walks every key in this record per + * `onFieldEvent` invocation; the order is irrelevant to ops.json + * because the planner re-canonicalises the call list, but stable + * key ordering keeps debug output predictable. + */ + readonly flagToIndex: Readonly>; + /** + * EQL `cast_as` argument for every `add_search_config` call this + * codec emits. Static per codec (`'text'` for string, `'double'` for + * IEEE-754, `'big_int'`, `'date'`, `'boolean'`, `'jsonb'`). + */ + readonly castAs: string; +} + +function isEnabled( + typeParams: Readonly> | undefined, + flag: string, +): boolean { + return typeParams !== undefined && typeParams[flag] === true; +} + +/** + * Construct the `CodecControlHooks` for a cipherstash codec given its + * per-codec flag-to-index mapping and `cast_as`. + * + * Pure and synchronous — the returned hook replays deterministically + * when the application emitter re-diffs the contract. + */ +export function makeCipherstashCodecHooks( + options: MakeCipherstashCodecHooksOptions, +): CodecControlHooks { + const { flagToIndex, castAs } = options; + const allFlags = Object.keys(flagToIndex); + + function onFieldEvent( + event: 'added' | 'dropped' | 'altered', + ctx: FieldEventContext, + ): readonly OpFactoryCall[] { + const { tableName, fieldName, priorField, newField } = ctx; + + if (event === 'added') { + if (newField === undefined) return []; + const calls: OpFactoryCall[] = []; + for (const flag of allFlags) { + if (isEnabled(newField.typeParams, flag)) { + calls.push( + cipherstashAddSearchConfig({ + table: tableName, + column: fieldName, + index: flagToIndex[flag] as CipherstashSearchIndex, + castAs, + }), + ); + } + } + return calls; + } + + if (event === 'dropped') { + if (priorField === undefined) return []; + const calls: OpFactoryCall[] = []; + for (const flag of allFlags) { + if (isEnabled(priorField.typeParams, flag)) { + calls.push( + cipherstashRemoveSearchConfig({ + table: tableName, + column: fieldName, + index: flagToIndex[flag] as CipherstashSearchIndex, + }), + ); + } + } + return calls; + } + + if (priorField === undefined || newField === undefined) return []; + const calls: OpFactoryCall[] = []; + for (const flag of allFlags) { + const before = isEnabled(priorField.typeParams, flag); + const after = isEnabled(newField.typeParams, flag); + if (after && !before) { + calls.push( + cipherstashAddSearchConfig({ + table: tableName, + column: fieldName, + index: flagToIndex[flag] as CipherstashSearchIndex, + castAs, + }), + ); + } else if (before && !after) { + calls.push( + cipherstashRemoveSearchConfig({ + table: tableName, + column: fieldName, + index: flagToIndex[flag] as CipherstashSearchIndex, + }), + ); + } + } + return calls; + } + + /** + * The DDL type for any cipherstash column is always + * `eql_v2_encrypted` regardless of `typeParams` flags: the + * search-config wiring is delivered by the codec hook's + * `cipherstashAddSearchConfig` Calls (separate rows in + * `eql_v2_configuration`), not by the column type itself. Returning + * `nativeType` unchanged tells the planner "no expansion required" — + * see `expandParameterizedTypeSql` in + * `packages/3-targets/3-targets/postgres/src/core/migrations/planner-ddl-builders.ts`, + * which only requires this hook to *exist* for any column carrying + * `typeParams`. + */ + const expandNativeType: NonNullable = ({ nativeType }) => + nativeType; + + return { onFieldEvent, expandNativeType }; +} diff --git a/packages/prisma-next/src/migration/eql-bundle.ts b/packages/prisma-next/src/migration/eql-bundle.ts new file mode 100644 index 00000000..0b084d5c --- /dev/null +++ b/packages/prisma-next/src/migration/eql-bundle.ts @@ -0,0 +1,29 @@ +/** + * Vendored CipherStash EQL bundle SQL. + * + * The CipherStash team ships the bundle as a single Postgres script + * (~5,750 lines, currently `eql-2.2.1`) that creates the `eql_v2` + * schema, the `eql_v2_*` composite types / domains, the + * `eql_v2_configuration` table, plus roughly 169 functions, 46 + * operators, 4 casts, and 9 operator classes / families. CipherStash + * treats the bundle as one indivisible artefact: its contents flow + * into the `cipherstash:install-eql-bundle-v1` migration op + * **byte-for-byte** with no fork or split. + * + * The bundle source lives in {@link ./eql-install.generated} — a + * single committed `.generated.ts` file produced by + * `scripts/vendor-eql-install.ts`. Bumping the bundle version + * regenerates that file and re-runs + * `pnpm --filter @prisma-next/extension-cipherstash test` to confirm + * descriptor self-consistency. + * + * Hash impact: the bundle string lives inside the `installEqlBundle` + * migration op's `execute[]`, **not** in `contract.json` — so swapping + * the bundle changes `migrationHash` (consumed by the runner at apply + * time, see `packages/1-framework/3-tooling/migration/src/hash.ts`) + * but leaves `headRef.hash` (which only digests the contract IR) + * untouched. The descriptor self-consistency test in + * `test/descriptor.test.ts` re-runs `assertDescriptorSelfConsistency` + * to confirm that invariant. + */ +export { EQL_INSTALL_SQL as EQL_BUNDLE_SQL, EQL_INSTALL_VERSION } from './eql-install.generated'; diff --git a/packages/prisma-next/src/migration/eql-install.generated.ts b/packages/prisma-next/src/migration/eql-install.generated.ts new file mode 100644 index 00000000..3c605a92 --- /dev/null +++ b/packages/prisma-next/src/migration/eql-install.generated.ts @@ -0,0 +1,5751 @@ +// @generated — DO NOT EDIT. +// Source: scripts/vendor-eql-install.ts +// Bundle pinned version: eql-2.2.1 +// +// This file is committed to source control so dev environments and +// offline builds work without network access. Regenerate with +// `pnpm vendor-eql-install` after bumping EQL_VERSION in the script. + +export const EQL_INSTALL_VERSION = 'eql-2.2.1' as const; + +export const EQL_INSTALL_SQL: string = `--! @file schema.sql +--! @brief EQL v2 schema creation +--! +--! Creates the eql_v2 schema which contains all Encrypt Query Language +--! functions, types, and tables. Drops existing schema if present to +--! support clean reinstallation. +--! +--! @warning DROP SCHEMA CASCADE will remove all objects in the schema +--! @note All EQL objects (functions, types, tables) reside in eql_v2 schema + +--! @brief Drop existing EQL v2 schema +--! @warning CASCADE will drop all dependent objects +DROP SCHEMA IF EXISTS eql_v2 CASCADE; + +--! @brief Create EQL v2 schema +--! @note All EQL functions and types will be created in this schema +CREATE SCHEMA eql_v2; + +--! @brief Composite type for encrypted column data +--! +--! Core type used for all encrypted columns in EQL. Stores encrypted data as JSONB +--! with the following structure: +--! - \`c\`: ciphertext (base64-encoded encrypted value) +--! - \`i\`: index terms (searchable metadata for encrypted searches) +--! - \`k\`: key ID (identifier for encryption key) +--! - \`m\`: metadata (additional encryption metadata) +--! +--! Created in public schema to persist independently of eql_v2 schema lifecycle. +--! Customer data columns use this type, so it must not be dropped if data exists. +--! +--! @note DO NOT DROP this type unless absolutely certain no encrypted data uses it +--! @see eql_v2.ciphertext +--! @see eql_v2.meta_data +--! @see eql_v2.add_column +DO $$ + BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'eql_v2_encrypted') THEN + CREATE TYPE public.eql_v2_encrypted AS ( + data jsonb + ); + END IF; + END +$$; + + + + + + + + + + +--! @brief Bloom filter index term type +--! +--! Domain type representing Bloom filter bit arrays stored as smallint arrays. +--! Used for pattern-match encrypted searches via the 'match' index type. +--! The filter is stored in the 'bf' field of encrypted data payloads. +--! +--! @see eql_v2.add_search_config +--! @see eql_v2."~~" +--! @note This is a transient type used only during query execution +CREATE DOMAIN eql_v2.bloom_filter AS smallint[]; + + + +--! @brief ORE block term type for Order-Revealing Encryption +--! +--! Composite type representing a single ORE (Order-Revealing Encryption) block term. +--! Stores encrypted data as bytea that enables range comparisons without decryption. +--! +--! @see eql_v2.ore_block_u64_8_256 +--! @see eql_v2.compare_ore_block_u64_8_256_term +CREATE TYPE eql_v2.ore_block_u64_8_256_term AS ( + bytes bytea +); + + +--! @brief ORE block index term type for range queries +--! +--! Composite type containing an array of ORE block terms. Used for encrypted +--! range queries via the 'ore' index type. The array is stored in the 'ob' field +--! of encrypted data payloads. +--! +--! @see eql_v2.add_search_config +--! @see eql_v2.compare_ore_block_u64_8_256_terms +--! @note This is a transient type used only during query execution +CREATE TYPE eql_v2.ore_block_u64_8_256 AS ( + terms eql_v2.ore_block_u64_8_256_term[] +); + +--! @brief HMAC-SHA256 index term type +--! +--! Domain type representing HMAC-SHA256 hash values. +--! Used for exact-match encrypted searches via the 'unique' index type. +--! The hash is stored in the 'hm' field of encrypted data payloads. +--! +--! @see eql_v2.add_search_config +--! @note This is a transient type used only during query execution +CREATE DOMAIN eql_v2.hmac_256 AS text; +-- AUTOMATICALLY GENERATED FILE + +--! @file common.sql +--! @brief Common utility functions +--! +--! Provides general-purpose utility functions used across EQL: +--! - Constant-time bytea comparison for security +--! - JSONB to bytea array conversion +--! - Logging helpers for debugging and testing + + +--! @brief Constant-time comparison of bytea values +--! @internal +--! +--! Compares two bytea values in constant time to prevent timing attacks. +--! Always checks all bytes even after finding differences, maintaining +--! consistent execution time regardless of where differences occur. +--! +--! @param a bytea First value to compare +--! @param b bytea Second value to compare +--! @return boolean True if values are equal +--! +--! @note Returns false immediately if lengths differ (length is not secret) +--! @note Used for secure comparison of cryptographic values +CREATE FUNCTION eql_v2.bytea_eq(a bytea, b bytea) RETURNS boolean AS $$ +DECLARE + result boolean; + differing bytea; +BEGIN + + -- Check if the bytea values are the same length + IF LENGTH(a) != LENGTH(b) THEN + RETURN false; + END IF; + + -- Compare each byte in the bytea values + result := true; + FOR i IN 1..LENGTH(a) LOOP + IF SUBSTRING(a FROM i FOR 1) != SUBSTRING(b FROM i FOR 1) THEN + result := result AND false; + END IF; + END LOOP; + + RETURN result; +END; +$$ LANGUAGE plpgsql; + + +--! @brief Convert JSONB hex array to bytea array +--! @internal +--! +--! Converts a JSONB array of hex-encoded strings into a PostgreSQL bytea array. +--! Used for deserializing binary data (like ORE terms) from JSONB storage. +--! +--! @param jsonb JSONB array of hex-encoded strings +--! @return bytea[] Array of decoded binary values +--! +--! @note Returns NULL if input is JSON null +--! @note Each array element is hex-decoded to bytea +CREATE FUNCTION eql_v2.jsonb_array_to_bytea_array(val jsonb) +RETURNS bytea[] AS $$ +DECLARE + terms_arr bytea[]; +BEGIN + IF jsonb_typeof(val) = 'null' THEN + RETURN NULL; + END IF; + + SELECT array_agg(decode(value::text, 'hex')::bytea) + INTO terms_arr + FROM jsonb_array_elements_text(val) AS value; + + RETURN terms_arr; +END; +$$ LANGUAGE plpgsql; + + +--! @brief Log message for debugging +--! +--! Convenience function to emit log messages during testing and debugging. +--! Uses RAISE NOTICE to output messages to PostgreSQL logs. +--! +--! @param text Message to log +--! +--! @note Primarily used in tests and development +--! @see eql_v2.log(text, text) for contextual logging +CREATE FUNCTION eql_v2.log(s text) + RETURNS void +AS $$ + BEGIN + RAISE NOTICE '[LOG] %', s; +END; +$$ LANGUAGE plpgsql; + + +--! @brief Log message with context +--! +--! Overload of log function that includes context label for better +--! log organization during testing. +--! +--! @param ctx text Context label (e.g., test name, module name) +--! @param s text Message to log +--! +--! @note Format: "[LOG] {ctx} {message}" +--! @see eql_v2.log(text) +CREATE FUNCTION eql_v2.log(ctx text, s text) + RETURNS void +AS $$ + BEGIN + RAISE NOTICE '[LOG] % %', ctx, s; +END; +$$ LANGUAGE plpgsql; + +--! @brief CLLW ORE index term type for range queries +--! +--! Composite type for CLLW (Copyless Logarithmic Width) Order-Revealing Encryption. +--! Each output block is 8-bits. Used for encrypted range queries via the 'ore' index type. +--! The ciphertext is stored in the 'ocf' field of encrypted data payloads. +--! +--! @see eql_v2.add_search_config +--! @see eql_v2.compare_ore_cllw_u64_8 +--! @note This is a transient type used only during query execution +CREATE TYPE eql_v2.ore_cllw_u64_8 AS ( + bytes bytea +); + +--! @file crypto.sql +--! @brief PostgreSQL pgcrypto extension enablement +--! +--! Enables the pgcrypto extension which provides cryptographic functions +--! used by EQL for hashing and other cryptographic operations. +--! +--! @note pgcrypto provides functions like digest(), hmac(), gen_random_bytes() +--! @note IF NOT EXISTS prevents errors if extension already enabled + +--! @brief Enable pgcrypto extension +--! @note Provides cryptographic functions for hashing and random number generation +CREATE EXTENSION IF NOT EXISTS pgcrypto; + + +--! @brief Extract ciphertext from encrypted JSONB value +--! +--! Extracts the ciphertext (c field) from a raw JSONB encrypted value. +--! The ciphertext is the base64-encoded encrypted data. +--! +--! @param jsonb containing encrypted EQL payload +--! @return Text Base64-encoded ciphertext string +--! @throws Exception if 'c' field is not present in JSONB +--! +--! @example +--! -- Extract ciphertext from JSONB literal +--! SELECT eql_v2.ciphertext('{"c":"AQIDBA==","i":{"unique":"..."}}'::jsonb); +--! +--! @see eql_v2.ciphertext(eql_v2_encrypted) +--! @see eql_v2.meta_data +CREATE FUNCTION eql_v2.ciphertext(val jsonb) + RETURNS text + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + IF val ? 'c' THEN + RETURN val->>'c'; + END IF; + RAISE 'Expected a ciphertext (c) value in json: %', val; + END; +$$ LANGUAGE plpgsql; + +--! @brief Extract ciphertext from encrypted column value +--! +--! Extracts the ciphertext from an encrypted column value. Convenience +--! overload that unwraps eql_v2_encrypted type and delegates to JSONB version. +--! +--! @param eql_v2_encrypted Encrypted column value +--! @return Text Base64-encoded ciphertext string +--! @throws Exception if encrypted value is malformed +--! +--! @example +--! -- Extract ciphertext from encrypted column +--! SELECT eql_v2.ciphertext(encrypted_email) FROM users; +--! +--! @see eql_v2.ciphertext(jsonb) +--! @see eql_v2.meta_data +CREATE FUNCTION eql_v2.ciphertext(val eql_v2_encrypted) + RETURNS text + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2.ciphertext(val.data); + END; +$$ LANGUAGE plpgsql; + +--! @brief State transition function for grouped_value aggregate +--! @internal +--! +--! Returns the first non-null value encountered. Used as state function +--! for the grouped_value aggregate to select first value in each group. +--! +--! @param $1 JSONB Accumulated state (first non-null value found) +--! @param $2 JSONB New value from current row +--! @return JSONB First non-null value (state or new value) +--! +--! @see eql_v2.grouped_value +CREATE FUNCTION eql_v2._first_grouped_value(jsonb, jsonb) +RETURNS jsonb AS $$ + SELECT COALESCE($1, $2); +$$ LANGUAGE sql IMMUTABLE; + +--! @brief Return first non-null encrypted value in a group +--! +--! Aggregate function that returns the first non-null encrypted value +--! encountered within a GROUP BY clause. Useful for deduplication or +--! selecting representative values from grouped encrypted data. +--! +--! @param input JSONB Encrypted values to aggregate +--! @return JSONB First non-null encrypted value in group +--! +--! @example +--! -- Get first email per user group +--! SELECT user_id, eql_v2.grouped_value(encrypted_email) +--! FROM user_emails +--! GROUP BY user_id; +--! +--! -- Deduplicate encrypted values +--! SELECT DISTINCT ON (user_id) +--! user_id, +--! eql_v2.grouped_value(encrypted_ssn) as primary_ssn +--! FROM user_records +--! GROUP BY user_id; +--! +--! @see eql_v2._first_grouped_value +CREATE AGGREGATE eql_v2.grouped_value(jsonb) ( + SFUNC = eql_v2._first_grouped_value, + STYPE = jsonb +); + +--! @brief Add validation constraint to encrypted column +--! +--! Adds a CHECK constraint to ensure column values conform to encrypted data +--! structure. Constraint uses eql_v2.check_encrypted to validate format. +--! Called automatically by eql_v2.add_column. +--! +--! @param table_name TEXT Name of table containing the column +--! @param column_name TEXT Name of column to constrain +--! @return Void +--! +--! @example +--! -- Manually add constraint (normally done by add_column) +--! SELECT eql_v2.add_encrypted_constraint('users', 'encrypted_email'); +--! +--! -- Resulting constraint: +--! -- ALTER TABLE users ADD CONSTRAINT eql_v2_encrypted_check_encrypted_email +--! -- CHECK (eql_v2.check_encrypted(encrypted_email)); +--! +--! @see eql_v2.add_column +--! @see eql_v2.remove_encrypted_constraint +CREATE FUNCTION eql_v2.add_encrypted_constraint(table_name TEXT, column_name TEXT) + RETURNS void +AS $$ + BEGIN + EXECUTE format('ALTER TABLE %I ADD CONSTRAINT eql_v2_encrypted_constraint_%I_%I CHECK (eql_v2.check_encrypted(%I))', table_name, table_name, column_name, column_name); + EXCEPTION + WHEN duplicate_table THEN + WHEN duplicate_object THEN + RAISE NOTICE 'Constraint \`eql_v2_encrypted_constraint_%_%\` already exists, skipping', table_name, column_name; + END; +$$ LANGUAGE plpgsql; + +--! @brief Remove validation constraint from encrypted column +--! +--! Removes the CHECK constraint that validates encrypted data structure. +--! Called automatically by eql_v2.remove_column. Uses IF EXISTS to avoid +--! errors if constraint doesn't exist. +--! +--! @param table_name TEXT Name of table containing the column +--! @param column_name TEXT Name of column to unconstrain +--! @return Void +--! +--! @example +--! -- Manually remove constraint (normally done by remove_column) +--! SELECT eql_v2.remove_encrypted_constraint('users', 'encrypted_email'); +--! +--! @see eql_v2.remove_column +--! @see eql_v2.add_encrypted_constraint +CREATE FUNCTION eql_v2.remove_encrypted_constraint(table_name TEXT, column_name TEXT) + RETURNS void +AS $$ + BEGIN + EXECUTE format('ALTER TABLE %I DROP CONSTRAINT IF EXISTS eql_v2_encrypted_constraint_%I_%I', table_name, table_name, column_name); + END; +$$ LANGUAGE plpgsql; + +--! @brief Extract metadata from encrypted JSONB value +--! +--! Extracts index terms (i) and version (v) from a raw JSONB encrypted value. +--! Returns metadata object containing searchable index terms without ciphertext. +--! +--! @param jsonb containing encrypted EQL payload +--! @return JSONB Metadata object with 'i' (index terms) and 'v' (version) fields +--! +--! @example +--! -- Extract metadata to inspect index terms +--! SELECT eql_v2.meta_data('{"c":"...","i":{"unique":"abc123"},"v":1}'::jsonb); +--! -- Returns: {"i":{"unique":"abc123"},"v":1} +--! +--! @see eql_v2.meta_data(eql_v2_encrypted) +--! @see eql_v2.ciphertext +CREATE FUNCTION eql_v2.meta_data(val jsonb) + RETURNS jsonb + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN jsonb_build_object( + 'i', val->'i', + 'v', val->'v' + ); + END; +$$ LANGUAGE plpgsql; + +--! @brief Extract metadata from encrypted column value +--! +--! Extracts index terms and version from an encrypted column value. +--! Convenience overload that unwraps eql_v2_encrypted type and +--! delegates to JSONB version. +--! +--! @param eql_v2_encrypted Encrypted column value +--! @return JSONB Metadata object with 'i' (index terms) and 'v' (version) fields +--! +--! @example +--! -- Inspect index terms for encrypted column +--! SELECT user_id, eql_v2.meta_data(encrypted_email) as email_metadata +--! FROM users; +--! +--! @see eql_v2.meta_data(jsonb) +--! @see eql_v2.ciphertext +CREATE FUNCTION eql_v2.meta_data(val eql_v2_encrypted) + RETURNS jsonb + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2.meta_data(val.data); + END; +$$ LANGUAGE plpgsql; + + +--! @brief Variable-width CLLW ORE index term type for range queries +--! +--! Composite type for variable-width CLLW (Copyless Logarithmic Width) Order-Revealing Encryption. +--! Each output block is 8-bits. Unlike ore_cllw_u64_8, supports variable-length ciphertexts. +--! Used for encrypted range queries via the 'ore' index type. +--! The ciphertext is stored in the 'ocv' field of encrypted data payloads. +--! +--! @see eql_v2.add_search_config +--! @see eql_v2.compare_ore_cllw_var_8 +--! @note This is a transient type used only during query execution +CREATE TYPE eql_v2.ore_cllw_var_8 AS ( + bytes bytea +); + + +--! @brief Extract CLLW ORE index term from JSONB payload +--! +--! Extracts the CLLW ORE ciphertext from the 'ocf' field of an encrypted +--! data payload. Used internally for range query comparisons. +--! +--! @param jsonb containing encrypted EQL payload +--! @return eql_v2.ore_cllw_u64_8 CLLW ORE ciphertext +--! @throws Exception if 'ocf' field is missing when ore index is expected +--! +--! @see eql_v2.has_ore_cllw_u64_8 +--! @see eql_v2.compare_ore_cllw_u64_8 +CREATE FUNCTION eql_v2.ore_cllw_u64_8(val jsonb) + RETURNS eql_v2.ore_cllw_u64_8 + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + IF val IS NULL THEN + RETURN NULL; + END IF; + + IF NOT (eql_v2.has_ore_cllw_u64_8(val)) THEN + RAISE 'Expected a ore_cllw_u64_8 index (ocf) value in json: %', val; + END IF; + + RETURN ROW(decode(val->>'ocf', 'hex')); + END; +$$ LANGUAGE plpgsql; + + +--! @brief Extract CLLW ORE index term from encrypted column value +--! +--! Extracts the CLLW ORE ciphertext from an encrypted column value by accessing +--! its underlying JSONB data field. +--! +--! @param eql_v2_encrypted Encrypted column value +--! @return eql_v2.ore_cllw_u64_8 CLLW ORE ciphertext +--! +--! @see eql_v2.ore_cllw_u64_8(jsonb) +CREATE FUNCTION eql_v2.ore_cllw_u64_8(val eql_v2_encrypted) + RETURNS eql_v2.ore_cllw_u64_8 + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN (SELECT eql_v2.ore_cllw_u64_8(val.data)); + END; +$$ LANGUAGE plpgsql; + + +--! @brief Check if JSONB payload contains CLLW ORE index term +--! +--! Tests whether the encrypted data payload includes an 'ocf' field, +--! indicating a CLLW ORE ciphertext is available for range queries. +--! +--! @param jsonb containing encrypted EQL payload +--! @return Boolean True if 'ocf' field is present and non-null +--! +--! @see eql_v2.ore_cllw_u64_8 +CREATE FUNCTION eql_v2.has_ore_cllw_u64_8(val jsonb) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN val ->> 'ocf' IS NOT NULL; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Check if encrypted column value contains CLLW ORE index term +--! +--! Tests whether an encrypted column value includes a CLLW ORE ciphertext +--! by checking its underlying JSONB data field. +--! +--! @param eql_v2_encrypted Encrypted column value +--! @return Boolean True if CLLW ORE ciphertext is present +--! +--! @see eql_v2.has_ore_cllw_u64_8(jsonb) +CREATE FUNCTION eql_v2.has_ore_cllw_u64_8(val eql_v2_encrypted) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2.has_ore_cllw_u64_8(val.data); + END; +$$ LANGUAGE plpgsql; + + + +--! @brief Compare CLLW ORE ciphertext bytes +--! @internal +--! +--! Byte-by-byte comparison of CLLW ORE ciphertexts implementing the CLLW +--! comparison algorithm. Used by both fixed-width (ore_cllw_u64_8) and +--! variable-width (ore_cllw_var_8) ORE variants. +--! +--! @param a Bytea First CLLW ORE ciphertext +--! @param b Bytea Second CLLW ORE ciphertext +--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b +--! @throws Exception if ciphertexts are different lengths +--! +--! @note Shared comparison logic for multiple ORE CLLW schemes +--! @see eql_v2.compare_ore_cllw_u64_8 +CREATE FUNCTION eql_v2.compare_ore_cllw_term_bytes(a bytea, b bytea) +RETURNS int AS $$ +DECLARE + len_a INT; + len_b INT; + x BYTEA; + y BYTEA; + i INT; + differing boolean; +BEGIN + + -- Check if the lengths of the two bytea arguments are the same + len_a := LENGTH(a); + len_b := LENGTH(b); + + IF len_a != len_b THEN + RAISE EXCEPTION 'ore_cllw index terms are not the same length'; + END IF; + + -- Iterate over each byte and compare them + FOR i IN 1..len_a LOOP + x := SUBSTRING(a FROM i FOR 1); + y := SUBSTRING(b FROM i FOR 1); + + -- Check if there's a difference + IF x != y THEN + differing := true; + EXIT; + END IF; + END LOOP; + + -- If a difference is found, compare the bytes as in Rust logic + IF differing THEN + IF (get_byte(y, 0) + 1) % 256 = get_byte(x, 0) THEN + RETURN 1; + ELSE + RETURN -1; + END IF; + ELSE + RETURN 0; + END IF; +END; +$$ LANGUAGE plpgsql; + + + +--! @brief Blake3 hash index term type +--! +--! Domain type representing Blake3 cryptographic hash values. +--! Used for exact-match encrypted searches via the 'unique' index type. +--! The hash is stored in the 'b3' field of encrypted data payloads. +--! +--! @see eql_v2.add_search_config +--! @note This is a transient type used only during query execution +CREATE DOMAIN eql_v2.blake3 AS text; + +--! @brief Extract Blake3 hash index term from JSONB payload +--! +--! Extracts the Blake3 hash value from the 'b3' field of an encrypted +--! data payload. Used internally for exact-match comparisons. +--! +--! @param jsonb containing encrypted EQL payload +--! @return eql_v2.blake3 Blake3 hash value, or NULL if not present +--! @throws Exception if 'b3' field is missing when blake3 index is expected +--! +--! @see eql_v2.has_blake3 +--! @see eql_v2.compare_blake3 +CREATE FUNCTION eql_v2.blake3(val jsonb) + RETURNS eql_v2.blake3 + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + IF val IS NULL THEN + RETURN NULL; + END IF; + + IF NOT eql_v2.has_blake3(val) THEN + RAISE 'Expected a blake3 index (b3) value in json: %', val; + END IF; + + IF val->>'b3' IS NULL THEN + RETURN NULL; + END IF; + + RETURN val->>'b3'; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Extract Blake3 hash index term from encrypted column value +--! +--! Extracts the Blake3 hash from an encrypted column value by accessing +--! its underlying JSONB data field. +--! +--! @param eql_v2_encrypted Encrypted column value +--! @return eql_v2.blake3 Blake3 hash value, or NULL if not present +--! +--! @see eql_v2.blake3(jsonb) +CREATE FUNCTION eql_v2.blake3(val eql_v2_encrypted) + RETURNS eql_v2.blake3 + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN (SELECT eql_v2.blake3(val.data)); + END; +$$ LANGUAGE plpgsql; + + +--! @brief Check if JSONB payload contains Blake3 index term +--! +--! Tests whether the encrypted data payload includes a 'b3' field, +--! indicating a Blake3 hash is available for exact-match queries. +--! +--! @param jsonb containing encrypted EQL payload +--! @return Boolean True if 'b3' field is present and non-null +--! +--! @see eql_v2.blake3 +CREATE FUNCTION eql_v2.has_blake3(val jsonb) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN val ->> 'b3' IS NOT NULL; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Check if encrypted column value contains Blake3 index term +--! +--! Tests whether an encrypted column value includes a Blake3 hash +--! by checking its underlying JSONB data field. +--! +--! @param eql_v2_encrypted Encrypted column value +--! @return Boolean True if Blake3 hash is present +--! +--! @see eql_v2.has_blake3(jsonb) +CREATE FUNCTION eql_v2.has_blake3(val eql_v2_encrypted) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2.has_blake3(val.data); + END; +$$ LANGUAGE plpgsql; + + +--! @brief Extract HMAC-SHA256 index term from JSONB payload +--! +--! Extracts the HMAC-SHA256 hash value from the 'hm' field of an encrypted +--! data payload. Used internally for exact-match comparisons. +--! +--! @param jsonb containing encrypted EQL payload +--! @return eql_v2.hmac_256 HMAC-SHA256 hash value +--! @throws Exception if 'hm' field is missing when hmac_256 index is expected +--! +--! @see eql_v2.has_hmac_256 +--! @see eql_v2.compare_hmac_256 +CREATE FUNCTION eql_v2.hmac_256(val jsonb) + RETURNS eql_v2.hmac_256 + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + IF val IS NULL THEN + RETURN NULL; + END IF; + + IF eql_v2.has_hmac_256(val) THEN + RETURN val->>'hm'; + END IF; + RAISE 'Expected a hmac_256 index (hm) value in json: %', val; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Check if JSONB payload contains HMAC-SHA256 index term +--! +--! Tests whether the encrypted data payload includes an 'hm' field, +--! indicating an HMAC-SHA256 hash is available for exact-match queries. +--! +--! @param jsonb containing encrypted EQL payload +--! @return Boolean True if 'hm' field is present and non-null +--! +--! @see eql_v2.hmac_256 +CREATE FUNCTION eql_v2.has_hmac_256(val jsonb) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN val ->> 'hm' IS NOT NULL; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Check if encrypted column value contains HMAC-SHA256 index term +--! +--! Tests whether an encrypted column value includes an HMAC-SHA256 hash +--! by checking its underlying JSONB data field. +--! +--! @param eql_v2_encrypted Encrypted column value +--! @return Boolean True if HMAC-SHA256 hash is present +--! +--! @see eql_v2.has_hmac_256(jsonb) +CREATE FUNCTION eql_v2.has_hmac_256(val eql_v2_encrypted) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2.has_hmac_256(val.data); + END; +$$ LANGUAGE plpgsql; + + + +--! @brief Extract HMAC-SHA256 index term from encrypted column value +--! +--! Extracts the HMAC-SHA256 hash from an encrypted column value by accessing +--! its underlying JSONB data field. +--! +--! @param eql_v2_encrypted Encrypted column value +--! @return eql_v2.hmac_256 HMAC-SHA256 hash value +--! +--! @see eql_v2.hmac_256(jsonb) +CREATE FUNCTION eql_v2.hmac_256(val eql_v2_encrypted) + RETURNS eql_v2.hmac_256 + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN (SELECT eql_v2.hmac_256(val.data)); + END; +$$ LANGUAGE plpgsql; + + + + +--! @brief Convert JSONB array to ORE block composite type +--! @internal +--! +--! Converts a JSONB array of hex-encoded ORE terms from the CipherStash Proxy +--! payload into the PostgreSQL composite type used for ORE operations. +--! +--! @param val JSONB Array of hex-encoded ORE block terms +--! @return eql_v2.ore_block_u64_8_256 ORE block composite type, or NULL if input is null +--! +--! @see eql_v2.ore_block_u64_8_256(jsonb) +CREATE FUNCTION eql_v2.jsonb_array_to_ore_block_u64_8_256(val jsonb) +RETURNS eql_v2.ore_block_u64_8_256 AS $$ +DECLARE + terms eql_v2.ore_block_u64_8_256_term[]; +BEGIN + IF jsonb_typeof(val) = 'null' THEN + RETURN NULL; + END IF; + + SELECT array_agg(ROW(b)::eql_v2.ore_block_u64_8_256_term) + INTO terms + FROM unnest(eql_v2.jsonb_array_to_bytea_array(val)) AS b; + + RETURN ROW(terms)::eql_v2.ore_block_u64_8_256; +END; +$$ LANGUAGE plpgsql; + + +--! @brief Extract ORE block index term from JSONB payload +--! +--! Extracts the ORE block array from the 'ob' field of an encrypted +--! data payload. Used internally for range query comparisons. +--! +--! @param jsonb containing encrypted EQL payload +--! @return eql_v2.ore_block_u64_8_256 ORE block index term +--! @throws Exception if 'ob' field is missing when ore index is expected +--! +--! @see eql_v2.has_ore_block_u64_8_256 +--! @see eql_v2.compare_ore_block_u64_8_256 +CREATE FUNCTION eql_v2.ore_block_u64_8_256(val jsonb) + RETURNS eql_v2.ore_block_u64_8_256 + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + IF val IS NULL THEN + RETURN NULL; + END IF; + + IF eql_v2.has_ore_block_u64_8_256(val) THEN + RETURN eql_v2.jsonb_array_to_ore_block_u64_8_256(val->'ob'); + END IF; + RAISE 'Expected an ore index (ob) value in json: %', val; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Extract ORE block index term from encrypted column value +--! +--! Extracts the ORE block from an encrypted column value by accessing +--! its underlying JSONB data field. +--! +--! @param eql_v2_encrypted Encrypted column value +--! @return eql_v2.ore_block_u64_8_256 ORE block index term +--! +--! @see eql_v2.ore_block_u64_8_256(jsonb) +CREATE FUNCTION eql_v2.ore_block_u64_8_256(val eql_v2_encrypted) + RETURNS eql_v2.ore_block_u64_8_256 + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2.ore_block_u64_8_256(val.data); + END; +$$ LANGUAGE plpgsql; + + +--! @brief Check if JSONB payload contains ORE block index term +--! +--! Tests whether the encrypted data payload includes an 'ob' field, +--! indicating an ORE block is available for range queries. +--! +--! @param jsonb containing encrypted EQL payload +--! @return Boolean True if 'ob' field is present and non-null +--! +--! @see eql_v2.ore_block_u64_8_256 +CREATE FUNCTION eql_v2.has_ore_block_u64_8_256(val jsonb) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN val ->> 'ob' IS NOT NULL; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Check if encrypted column value contains ORE block index term +--! +--! Tests whether an encrypted column value includes an ORE block +--! by checking its underlying JSONB data field. +--! +--! @param eql_v2_encrypted Encrypted column value +--! @return Boolean True if ORE block is present +--! +--! @see eql_v2.has_ore_block_u64_8_256(jsonb) +CREATE FUNCTION eql_v2.has_ore_block_u64_8_256(val eql_v2_encrypted) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2.has_ore_block_u64_8_256(val.data); + END; +$$ LANGUAGE plpgsql; + + + +--! @brief Compare two ORE block terms using cryptographic comparison +--! @internal +--! +--! Performs a three-way comparison (returns -1/0/1) of individual ORE block terms +--! using the ORE cryptographic protocol. Compares PRP and PRF blocks to determine +--! ordering without decryption. +--! +--! @param a eql_v2.ore_block_u64_8_256_term First ORE term to compare +--! @param b eql_v2.ore_block_u64_8_256_term Second ORE term to compare +--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b +--! @throws Exception if ciphertexts are different lengths +--! +--! @note Uses AES-ECB encryption for bit comparisons per ORE protocol +--! @see eql_v2.compare_ore_block_u64_8_256_terms +CREATE FUNCTION eql_v2.compare_ore_block_u64_8_256_term(a eql_v2.ore_block_u64_8_256_term, b eql_v2.ore_block_u64_8_256_term) + RETURNS integer +AS $$ + DECLARE + eq boolean := true; + unequal_block smallint := 0; + hash_key bytea; + data_block bytea; + encrypt_block bytea; + target_block bytea; + + left_block_size CONSTANT smallint := 16; + right_block_size CONSTANT smallint := 32; + right_offset CONSTANT smallint := 136; -- 8 * 17 + + indicator smallint := 0; + BEGIN + IF a IS NULL AND b IS NULL THEN + RETURN 0; + END IF; + + IF a IS NULL THEN + RETURN -1; + END IF; + + IF b IS NULL THEN + RETURN 1; + END IF; + + IF bit_length(a.bytes) != bit_length(b.bytes) THEN + RAISE EXCEPTION 'Ciphertexts are different lengths'; + END IF; + + FOR block IN 0..7 LOOP + -- Compare each PRP (byte from the first 8 bytes) and PRF block (8 byte + -- chunks of the rest of the value). + -- NOTE: + -- * Substr is ordinally indexed (hence 1 and not 0, and 9 and not 8). + -- * We are not worrying about timing attacks here; don't fret about + -- the OR or !=. + IF + substr(a.bytes, 1 + block, 1) != substr(b.bytes, 1 + block, 1) + OR substr(a.bytes, 9 + left_block_size * block, left_block_size) != substr(b.bytes, 9 + left_block_size * BLOCK, left_block_size) + THEN + -- set the first unequal block we find + IF eq THEN + unequal_block := block; + END IF; + eq = false; + END IF; + END LOOP; + + IF eq THEN + RETURN 0::integer; + END IF; + + -- Hash key is the IV from the right CT of b + hash_key := substr(b.bytes, right_offset + 1, 16); + + -- first right block is at right offset + nonce_size (ordinally indexed) + target_block := substr(b.bytes, right_offset + 17 + (unequal_block * right_block_size), right_block_size); + + data_block := substr(a.bytes, 9 + (left_block_size * unequal_block), left_block_size); + + encrypt_block := public.encrypt(data_block::bytea, hash_key::bytea, 'aes-ecb'); + + indicator := ( + get_bit( + encrypt_block, + 0 + ) + get_bit(target_block, get_byte(a.bytes, unequal_block))) % 2; + + IF indicator = 1 THEN + RETURN 1::integer; + ELSE + RETURN -1::integer; + END IF; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Compare arrays of ORE block terms recursively +--! @internal +--! +--! Recursively compares arrays of ORE block terms element-by-element. +--! Empty arrays are considered less than non-empty arrays. If the first elements +--! are equal, recursively compares remaining elements. +--! +--! @param a eql_v2.ore_block_u64_8_256_term[] First array of ORE terms +--! @param b eql_v2.ore_block_u64_8_256_term[] Second array of ORE terms +--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b, NULL if either array is NULL +--! +--! @note Empty arrays sort before non-empty arrays +--! @see eql_v2.compare_ore_block_u64_8_256_term +CREATE FUNCTION eql_v2.compare_ore_block_u64_8_256_terms(a eql_v2.ore_block_u64_8_256_term[], b eql_v2.ore_block_u64_8_256_term[]) +RETURNS integer AS $$ + DECLARE + cmp_result integer; + BEGIN + + -- NULLs are NULL + IF a IS NULL OR b IS NULL THEN + RETURN NULL; + END IF; + + -- empty a and b + IF cardinality(a) = 0 AND cardinality(b) = 0 THEN + RETURN 0; + END IF; + + -- empty a and some b + IF (cardinality(a) = 0) AND cardinality(b) > 0 THEN + RETURN -1; + END IF; + + -- some a and empty b + IF cardinality(a) > 0 AND (cardinality(b) = 0) THEN + RETURN 1; + END IF; + + cmp_result := eql_v2.compare_ore_block_u64_8_256_term(a[1], b[1]); + + IF cmp_result = 0 THEN + -- Removes the first element in the array, and calls this fn again to compare the next element/s in the array. + RETURN eql_v2.compare_ore_block_u64_8_256_terms(a[2:array_length(a,1)], b[2:array_length(b,1)]); + END IF; + + RETURN cmp_result; + END +$$ LANGUAGE plpgsql; + + +--! @brief Compare ORE block composite types +--! @internal +--! +--! Wrapper function that extracts term arrays from ORE block composite types +--! and delegates to the array comparison function. +--! +--! @param a eql_v2.ore_block_u64_8_256 First ORE block +--! @param b eql_v2.ore_block_u64_8_256 Second ORE block +--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b +--! +--! @see eql_v2.compare_ore_block_u64_8_256_terms(eql_v2.ore_block_u64_8_256_term[], eql_v2.ore_block_u64_8_256_term[]) +CREATE FUNCTION eql_v2.compare_ore_block_u64_8_256_terms(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256) +RETURNS integer AS $$ + BEGIN + RETURN eql_v2.compare_ore_block_u64_8_256_terms(a.terms, b.terms); + END +$$ LANGUAGE plpgsql; + + +--! @brief Extract variable-width CLLW ORE index term from JSONB payload +--! +--! Extracts the variable-width CLLW ORE ciphertext from the 'ocv' field of an encrypted +--! data payload. Used internally for range query comparisons. +--! +--! @param jsonb containing encrypted EQL payload +--! @return eql_v2.ore_cllw_var_8 Variable-width CLLW ORE ciphertext +--! @throws Exception if 'ocv' field is missing when ore index is expected +--! +--! @see eql_v2.has_ore_cllw_var_8 +--! @see eql_v2.compare_ore_cllw_var_8 +CREATE FUNCTION eql_v2.ore_cllw_var_8(val jsonb) + RETURNS eql_v2.ore_cllw_var_8 + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + + IF val IS NULL THEN + RETURN NULL; + END IF; + + IF NOT (eql_v2.has_ore_cllw_var_8(val)) THEN + RAISE 'Expected a ore_cllw_var_8 index (ocv) value in json: %', val; + END IF; + + RETURN ROW(decode(val->>'ocv', 'hex')); + END; +$$ LANGUAGE plpgsql; + + +--! @brief Extract variable-width CLLW ORE index term from encrypted column value +--! +--! Extracts the variable-width CLLW ORE ciphertext from an encrypted column value by accessing +--! its underlying JSONB data field. +--! +--! @param eql_v2_encrypted Encrypted column value +--! @return eql_v2.ore_cllw_var_8 Variable-width CLLW ORE ciphertext +--! +--! @see eql_v2.ore_cllw_var_8(jsonb) +CREATE FUNCTION eql_v2.ore_cllw_var_8(val eql_v2_encrypted) + RETURNS eql_v2.ore_cllw_var_8 + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN (SELECT eql_v2.ore_cllw_var_8(val.data)); + END; +$$ LANGUAGE plpgsql; + + +--! @brief Check if JSONB payload contains variable-width CLLW ORE index term +--! +--! Tests whether the encrypted data payload includes an 'ocv' field, +--! indicating a variable-width CLLW ORE ciphertext is available for range queries. +--! +--! @param jsonb containing encrypted EQL payload +--! @return Boolean True if 'ocv' field is present and non-null +--! +--! @see eql_v2.ore_cllw_var_8 +CREATE FUNCTION eql_v2.has_ore_cllw_var_8(val jsonb) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN val ->> 'ocv' IS NOT NULL; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Check if encrypted column value contains variable-width CLLW ORE index term +--! +--! Tests whether an encrypted column value includes a variable-width CLLW ORE ciphertext +--! by checking its underlying JSONB data field. +--! +--! @param eql_v2_encrypted Encrypted column value +--! @return Boolean True if variable-width CLLW ORE ciphertext is present +--! +--! @see eql_v2.has_ore_cllw_var_8(jsonb) +CREATE FUNCTION eql_v2.has_ore_cllw_var_8(val eql_v2_encrypted) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2.has_ore_cllw_var_8(val.data); + END; +$$ LANGUAGE plpgsql; + + +--! @brief Compare variable-width CLLW ORE ciphertext terms +--! @internal +--! +--! Three-way comparison of variable-width CLLW ORE ciphertexts. Compares the common +--! prefix using byte-by-byte CLLW comparison, then falls back to length comparison +--! if the common prefix is equal. Used by compare_ore_cllw_var_8 for range queries. +--! +--! @param a eql_v2.ore_cllw_var_8 First variable-width CLLW ORE ciphertext +--! @param b eql_v2.ore_cllw_var_8 Second variable-width CLLW ORE ciphertext +--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b +--! +--! @note Handles variable-length ciphertexts by comparing common prefix first +--! @note Returns NULL if either input is NULL +--! +--! @see eql_v2.compare_ore_cllw_term_bytes +--! @see eql_v2.compare_ore_cllw_var_8 +CREATE FUNCTION eql_v2.compare_ore_cllw_var_8_term(a eql_v2.ore_cllw_var_8, b eql_v2.ore_cllw_var_8) +RETURNS int AS $$ +DECLARE + len_a INT; + len_b INT; + -- length of the common part of the two bytea values + common_len INT; + cmp_result INT; +BEGIN + IF a IS NULL OR b IS NULL THEN + RETURN NULL; + END IF; + + -- Get the lengths of both bytea inputs + len_a := LENGTH(a.bytes); + len_b := LENGTH(b.bytes); + + -- Handle empty cases + IF len_a = 0 AND len_b = 0 THEN + RETURN 0; + ELSIF len_a = 0 THEN + RETURN -1; + ELSIF len_b = 0 THEN + RETURN 1; + END IF; + + -- Find the length of the shorter bytea + IF len_a < len_b THEN + common_len := len_a; + ELSE + common_len := len_b; + END IF; + + -- Use the compare_ore_cllw_term function to compare byte by byte + cmp_result := eql_v2.compare_ore_cllw_term_bytes( + SUBSTRING(a.bytes FROM 1 FOR common_len), + SUBSTRING(b.bytes FROM 1 FOR common_len) + ); + + -- If the comparison returns 'less' or 'greater', return that result + IF cmp_result = -1 THEN + RETURN -1; + ELSIF cmp_result = 1 THEN + RETURN 1; + END IF; + + -- If the bytea comparison is 'equal', compare lengths + IF len_a < len_b THEN + RETURN -1; + ELSIF len_a > len_b THEN + RETURN 1; + ELSE + RETURN 0; + END IF; +END; +$$ LANGUAGE plpgsql; + + + + + + +--! @brief Core comparison function for encrypted values +--! +--! Compares two encrypted values using their index terms without decryption. +--! This function implements all comparison operators required for btree indexing +--! (<, <=, =, >=, >). +--! +--! Index terms are checked in the following priority order: +--! 1. ore_block_u64_8_256 (Order-Revealing Encryption) +--! 2. ore_cllw_u64_8 (Order-Revealing Encryption) +--! 3. ore_cllw_var_8 (Order-Revealing Encryption) +--! 4. hmac_256 (Hash-based equality) +--! 5. blake3 (Hash-based equality) +--! +--! The first index term type present in both values is used for comparison. +--! If no matching index terms are found, falls back to JSONB literal comparison +--! to ensure consistent ordering (required for btree correctness). +--! +--! @param a eql_v2_encrypted First encrypted value +--! @param b eql_v2_encrypted Second encrypted value +--! @return integer -1 if a < b, 0 if a = b, 1 if a > b +--! +--! @note Literal fallback prevents "lock BufferContent is not held" errors +--! @see eql_v2.compare_ore_block_u64_8_256 +--! @see eql_v2.compare_blake3 +--! @see eql_v2.compare_hmac_256 +CREATE FUNCTION eql_v2.compare(a eql_v2_encrypted, b eql_v2_encrypted) + RETURNS integer + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + + IF a IS NULL AND b IS NULL THEN + RETURN 0; + END IF; + + IF a IS NULL THEN + RETURN -1; + END IF; + + IF b IS NULL THEN + RETURN 1; + END IF; + + a := eql_v2.to_ste_vec_value(a); + b := eql_v2.to_ste_vec_value(b); + + IF eql_v2.has_ore_block_u64_8_256(a) AND eql_v2.has_ore_block_u64_8_256(b) THEN + RETURN eql_v2.compare_ore_block_u64_8_256(a, b); + END IF; + + IF eql_v2.has_ore_cllw_u64_8(a) AND eql_v2.has_ore_cllw_u64_8(b) THEN + RETURN eql_v2.compare_ore_cllw_u64_8(a, b); + END IF; + + IF eql_v2.has_ore_cllw_var_8(a) AND eql_v2.has_ore_cllw_var_8(b) THEN + RETURN eql_v2.compare_ore_cllw_var_8(a, b); + END IF; + + IF eql_v2.has_hmac_256(a) AND eql_v2.has_hmac_256(b) THEN + RETURN eql_v2.compare_hmac_256(a, b); + END IF; + + IF eql_v2.has_blake3(a) AND eql_v2.has_blake3(b) THEN + RETURN eql_v2.compare_blake3(a, b); + END IF; + + -- Fallback to literal comparison of the encrypted data + -- Compare must have consistent ordering for a given state + -- Without this text fallback, database errors with "lock BufferContent is not held" + RETURN eql_v2.compare_literal(a, b); + + END; +$$ LANGUAGE plpgsql; + + + +--! @brief Convert JSONB to encrypted type +--! +--! Wraps a JSONB encrypted payload into the eql_v2_encrypted composite type. +--! Used internally for type conversions and operator implementations. +--! +--! @param jsonb JSONB encrypted payload with structure: {"c": "...", "i": {...}, "k": "...", "v": "2"} +--! @return eql_v2_encrypted Encrypted value wrapped in composite type +--! +--! @note This is primarily used for implicit casts in operator expressions +--! @see eql_v2.to_jsonb +CREATE FUNCTION eql_v2.to_encrypted(data jsonb) + RETURNS public.eql_v2_encrypted + IMMUTABLE STRICT PARALLEL SAFE +AS $$ +BEGIN + IF data IS NULL THEN + RETURN NULL; + END IF; + + RETURN ROW(data)::public.eql_v2_encrypted; +END; +$$ LANGUAGE plpgsql; + + +--! @brief Implicit cast from JSONB to encrypted type +--! +--! Enables PostgreSQL to automatically convert JSONB values to eql_v2_encrypted +--! in assignment contexts and comparison operations. +--! +--! @see eql_v2.to_encrypted(jsonb) +CREATE CAST (jsonb AS public.eql_v2_encrypted) + WITH FUNCTION eql_v2.to_encrypted(jsonb) AS ASSIGNMENT; + + +--! @brief Convert text to encrypted type +--! +--! Parses a text representation of encrypted JSONB payload and wraps it +--! in the eql_v2_encrypted composite type. +--! +--! @param text Text representation of JSONB encrypted payload +--! @return eql_v2_encrypted Encrypted value wrapped in composite type +--! +--! @note Delegates to eql_v2.to_encrypted(jsonb) after parsing text as JSON +--! @see eql_v2.to_encrypted(jsonb) +CREATE FUNCTION eql_v2.to_encrypted(data text) + RETURNS public.eql_v2_encrypted + IMMUTABLE STRICT PARALLEL SAFE +AS $$ +BEGIN + IF data IS NULL THEN + RETURN NULL; + END IF; + + RETURN eql_v2.to_encrypted(data::jsonb); +END; +$$ LANGUAGE plpgsql; + + +--! @brief Implicit cast from text to encrypted type +--! +--! Enables PostgreSQL to automatically convert text JSON strings to eql_v2_encrypted +--! in assignment contexts. +--! +--! @see eql_v2.to_encrypted(text) +CREATE CAST (text AS public.eql_v2_encrypted) + WITH FUNCTION eql_v2.to_encrypted(text) AS ASSIGNMENT; + + + +--! @brief Convert encrypted type to JSONB +--! +--! Extracts the underlying JSONB payload from an eql_v2_encrypted composite type. +--! Useful for debugging or when raw encrypted payload access is needed. +--! +--! @param e eql_v2_encrypted Encrypted value to unwrap +--! @return jsonb Raw JSONB encrypted payload +--! +--! @note Returns the raw encrypted structure including ciphertext and index terms +--! @see eql_v2.to_encrypted(jsonb) +CREATE FUNCTION eql_v2.to_jsonb(e public.eql_v2_encrypted) + RETURNS jsonb + IMMUTABLE STRICT PARALLEL SAFE +AS $$ +BEGIN + IF e IS NULL THEN + RETURN NULL; + END IF; + + RETURN e.data; +END; +$$ LANGUAGE plpgsql; + +--! @brief Implicit cast from encrypted type to JSONB +--! +--! Enables PostgreSQL to automatically extract the JSONB payload from +--! eql_v2_encrypted values in assignment contexts. +--! +--! @see eql_v2.to_jsonb(eql_v2_encrypted) +CREATE CAST (public.eql_v2_encrypted AS jsonb) + WITH FUNCTION eql_v2.to_jsonb(public.eql_v2_encrypted) AS ASSIGNMENT; + + + +--! @file config/types.sql +--! @brief Configuration state type definition +--! +--! Defines the ENUM type for tracking encryption configuration lifecycle states. +--! The configuration table uses this type to manage transitions between states +--! during setup, activation, and encryption operations. +--! +--! @note CREATE TYPE does not support IF NOT EXISTS, so wrapped in DO block +--! @note Configuration data stored as JSONB directly, not as DOMAIN +--! @see config/tables.sql + + +--! @brief Configuration lifecycle state +--! +--! Defines valid states for encryption configurations in the eql_v2_configuration table. +--! Configurations transition through these states during setup and activation. +--! +--! @note Only one configuration can be in 'active', 'pending', or 'encrypting' state at once +--! @see config/indexes.sql for uniqueness enforcement +--! @see config/tables.sql for usage in eql_v2_configuration table +DO $$ + BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'eql_v2_configuration_state') THEN + CREATE TYPE public.eql_v2_configuration_state AS ENUM ('active', 'inactive', 'encrypting', 'pending'); + END IF; + END +$$; + + + +--! @brief Extract Bloom filter index term from JSONB payload +--! +--! Extracts the Bloom filter array from the 'bf' field of an encrypted +--! data payload. Used internally for pattern-match queries (LIKE operator). +--! +--! @param jsonb containing encrypted EQL payload +--! @return eql_v2.bloom_filter Bloom filter as smallint array +--! @throws Exception if 'bf' field is missing when bloom_filter index is expected +--! +--! @see eql_v2.has_bloom_filter +--! @see eql_v2."~~" +CREATE FUNCTION eql_v2.bloom_filter(val jsonb) + RETURNS eql_v2.bloom_filter + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + IF val IS NULL THEN + RETURN NULL; + END IF; + + IF eql_v2.has_bloom_filter(val) THEN + RETURN ARRAY(SELECT jsonb_array_elements(val->'bf'))::eql_v2.bloom_filter; + END IF; + + RAISE 'Expected a match index (bf) value in json: %', val; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Extract Bloom filter index term from encrypted column value +--! +--! Extracts the Bloom filter from an encrypted column value by accessing +--! its underlying JSONB data field. +--! +--! @param eql_v2_encrypted Encrypted column value +--! @return eql_v2.bloom_filter Bloom filter as smallint array +--! +--! @see eql_v2.bloom_filter(jsonb) +CREATE FUNCTION eql_v2.bloom_filter(val eql_v2_encrypted) + RETURNS eql_v2.bloom_filter + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN (SELECT eql_v2.bloom_filter(val.data)); + END; +$$ LANGUAGE plpgsql; + + +--! @brief Check if JSONB payload contains Bloom filter index term +--! +--! Tests whether the encrypted data payload includes a 'bf' field, +--! indicating a Bloom filter is available for pattern-match queries. +--! +--! @param jsonb containing encrypted EQL payload +--! @return Boolean True if 'bf' field is present and non-null +--! +--! @see eql_v2.bloom_filter +CREATE FUNCTION eql_v2.has_bloom_filter(val jsonb) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN val ->> 'bf' IS NOT NULL; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Check if encrypted column value contains Bloom filter index term +--! +--! Tests whether an encrypted column value includes a Bloom filter +--! by checking its underlying JSONB data field. +--! +--! @param eql_v2_encrypted Encrypted column value +--! @return Boolean True if Bloom filter is present +--! +--! @see eql_v2.has_bloom_filter(jsonb) +CREATE FUNCTION eql_v2.has_bloom_filter(val eql_v2_encrypted) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2.has_bloom_filter(val.data); + END; +$$ LANGUAGE plpgsql; + +--! @brief Fallback literal comparison for encrypted values +--! @internal +--! +--! Compares two encrypted values by their raw JSONB representation when no +--! suitable index terms are available. This ensures consistent ordering required +--! for btree correctness and prevents "lock BufferContent is not held" errors. +--! +--! Used as a last resort fallback in eql_v2.compare() when encrypted values +--! lack matching index terms (blake3, hmac_256, ore). +--! +--! @param a eql_v2_encrypted First encrypted value +--! @param b eql_v2_encrypted Second encrypted value +--! @return integer -1 if a < b, 0 if a = b, 1 if a > b +--! +--! @note This compares the encrypted payloads directly, not the plaintext values +--! @note Ordering is consistent but not meaningful for range queries +--! @see eql_v2.compare +CREATE FUNCTION eql_v2.compare_literal(a eql_v2_encrypted, b eql_v2_encrypted) + RETURNS integer + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + DECLARE + a_data jsonb; + b_data jsonb; + BEGIN + + IF a IS NULL AND b IS NULL THEN + RETURN 0; + END IF; + + IF a IS NULL THEN + RETURN -1; + END IF; + + IF b IS NULL THEN + RETURN 1; + END IF; + + a_data := a.data; + b_data := b.data; + + IF a_data < b_data THEN + RETURN -1; + END IF; + + IF a_data > b_data THEN + RETURN 1; + END IF; + + RETURN 0; + END; +$$ LANGUAGE plpgsql; + +--! @brief Less-than comparison helper for encrypted values +--! @internal +--! +--! Internal helper that delegates to eql_v2.compare for less-than testing. +--! Returns true if first value is less than second using ORE comparison. +--! +--! @param a eql_v2_encrypted First encrypted value +--! @param b eql_v2_encrypted Second encrypted value +--! @return Boolean True if a < b (compare result = -1) +--! +--! @see eql_v2.compare +--! @see eql_v2."<" +CREATE FUNCTION eql_v2.lt(a eql_v2_encrypted, b eql_v2_encrypted) +RETURNS boolean +AS $$ + BEGIN + RETURN eql_v2.compare(a, b) = -1; + END; +$$ LANGUAGE plpgsql; + +--! @brief Less-than operator for encrypted values +--! +--! Implements the < operator for comparing two encrypted values using Order-Revealing +--! Encryption (ORE) index terms. Enables range queries and sorting without decryption. +--! Requires 'ore' index configuration on the column. +--! +--! @param a eql_v2_encrypted Left operand +--! @param b eql_v2_encrypted Right operand +--! @return Boolean True if a is less than b +--! +--! @example +--! -- Range query on encrypted timestamps +--! SELECT * FROM events +--! WHERE encrypted_timestamp < '2024-01-01'::timestamp::text::eql_v2_encrypted; +--! +--! -- Compare encrypted numeric columns +--! SELECT * FROM products WHERE encrypted_price < encrypted_discount_price; +--! +--! @see eql_v2.compare +--! @see eql_v2.add_search_config +CREATE FUNCTION eql_v2."<"(a eql_v2_encrypted, b eql_v2_encrypted) +RETURNS boolean +AS $$ + BEGIN + RETURN eql_v2.lt(a, b); + END; +$$ LANGUAGE plpgsql; + +CREATE OPERATOR <( + FUNCTION=eql_v2."<", + LEFTARG=eql_v2_encrypted, + RIGHTARG=eql_v2_encrypted, + COMMUTATOR = >, + NEGATOR = >=, + RESTRICT = scalarltsel, + JOIN = scalarltjoinsel +); + +--! @brief Less-than operator for encrypted value and JSONB +--! +--! Overload of < operator accepting JSONB on the right side. Automatically +--! casts JSONB to eql_v2_encrypted for ORE comparison. +--! +--! @param eql_v2_encrypted Left operand (encrypted value) +--! @param b JSONB Right operand (will be cast to eql_v2_encrypted) +--! @return Boolean True if a < b +--! +--! @example +--! SELECT * FROM events WHERE encrypted_age < '18'::int::text::jsonb; +--! +--! @see eql_v2."<"(eql_v2_encrypted, eql_v2_encrypted) +CREATE FUNCTION eql_v2."<"(a eql_v2_encrypted, b jsonb) +RETURNS boolean +AS $$ + BEGIN + RETURN eql_v2.lt(a, b::eql_v2_encrypted); + END; +$$ LANGUAGE plpgsql; + +CREATE OPERATOR <( + FUNCTION=eql_v2."<", + LEFTARG=eql_v2_encrypted, + RIGHTARG=jsonb, + COMMUTATOR = >, + NEGATOR = >=, + RESTRICT = scalarltsel, + JOIN = scalarltjoinsel +); + +--! @brief Less-than operator for JSONB and encrypted value +--! +--! Overload of < operator accepting JSONB on the left side. Automatically +--! casts JSONB to eql_v2_encrypted for ORE comparison. +--! +--! @param a JSONB Left operand (will be cast to eql_v2_encrypted) +--! @param eql_v2_encrypted Right operand (encrypted value) +--! @return Boolean True if a < b +--! +--! @example +--! SELECT * FROM events WHERE '2023-01-01'::date::text::jsonb < encrypted_date; +--! +--! @see eql_v2."<"(eql_v2_encrypted, eql_v2_encrypted) +CREATE FUNCTION eql_v2."<"(a jsonb, b eql_v2_encrypted) +RETURNS boolean +AS $$ + BEGIN + RETURN eql_v2.lt(a::eql_v2_encrypted, b); + END; +$$ LANGUAGE plpgsql; + + +CREATE OPERATOR <( + FUNCTION=eql_v2."<", + LEFTARG=jsonb, + RIGHTARG=eql_v2_encrypted, + COMMUTATOR = >, + NEGATOR = >=, + RESTRICT = scalarltsel, + JOIN = scalarltjoinsel +); + + + +--! @brief Less-than-or-equal comparison helper for encrypted values +--! @internal +--! +--! Internal helper that delegates to eql_v2.compare for <= testing. +--! Returns true if first value is less than or equal to second using ORE comparison. +--! +--! @param a eql_v2_encrypted First encrypted value +--! @param b eql_v2_encrypted Second encrypted value +--! @return Boolean True if a <= b (compare result <= 0) +--! +--! @see eql_v2.compare +--! @see eql_v2."<=" +CREATE FUNCTION eql_v2.lte(a eql_v2_encrypted, b eql_v2_encrypted) + RETURNS boolean +AS $$ + BEGIN + RETURN eql_v2.compare(a, b) <= 0; + END; +$$ LANGUAGE plpgsql; + +--! @brief Less-than-or-equal operator for encrypted values +--! +--! Implements the <= operator for comparing encrypted values using ORE index terms. +--! Enables range queries with inclusive lower bounds without decryption. +--! +--! @param a eql_v2_encrypted Left operand +--! @param b eql_v2_encrypted Right operand +--! @return Boolean True if a <= b +--! +--! @example +--! -- Find records with encrypted age 18 or under +--! SELECT * FROM users WHERE encrypted_age <= '18'::int::text::eql_v2_encrypted; +--! +--! @see eql_v2.compare +--! @see eql_v2.add_search_config +CREATE FUNCTION eql_v2."<="(a eql_v2_encrypted, b eql_v2_encrypted) +RETURNS boolean +AS $$ + BEGIN + RETURN eql_v2.lte(a, b); + END; +$$ LANGUAGE plpgsql; + +CREATE OPERATOR <=( + FUNCTION = eql_v2."<=", + LEFTARG = eql_v2_encrypted, + RIGHTARG = eql_v2_encrypted, + COMMUTATOR = >=, + NEGATOR = >, + RESTRICT = scalarltsel, + JOIN = scalarltjoinsel +); + +--! @brief <= operator for encrypted value and JSONB +--! @see eql_v2."<="(eql_v2_encrypted, eql_v2_encrypted) +CREATE FUNCTION eql_v2."<="(a eql_v2_encrypted, b jsonb) +RETURNS boolean +AS $$ + BEGIN + RETURN eql_v2.lte(a, b::eql_v2_encrypted); + END; +$$ LANGUAGE plpgsql; + +CREATE OPERATOR <=( + FUNCTION = eql_v2."<=", + LEFTARG = eql_v2_encrypted, + RIGHTARG = jsonb, + COMMUTATOR = >=, + NEGATOR = >, + RESTRICT = scalarltsel, + JOIN = scalarltjoinsel +); + +--! @brief <= operator for JSONB and encrypted value +--! @see eql_v2."<="(eql_v2_encrypted, eql_v2_encrypted) +CREATE FUNCTION eql_v2."<="(a jsonb, b eql_v2_encrypted) +RETURNS boolean +AS $$ + BEGIN + RETURN eql_v2.lte(a::eql_v2_encrypted, b); + END; +$$ LANGUAGE plpgsql; + + +CREATE OPERATOR <=( + FUNCTION = eql_v2."<=", + LEFTARG = jsonb, + RIGHTARG = eql_v2_encrypted, + COMMUTATOR = >=, + NEGATOR = >, + RESTRICT = scalarltsel, + JOIN = scalarltjoinsel +); + + + +--! @brief Equality comparison helper for encrypted values +--! @internal +--! +--! Internal helper that delegates to eql_v2.compare for equality testing. +--! Returns true if encrypted values are equal via encrypted index comparison. +--! +--! @param a eql_v2_encrypted First encrypted value +--! @param b eql_v2_encrypted Second encrypted value +--! @return Boolean True if values are equal (compare result = 0) +--! +--! @see eql_v2.compare +--! @see eql_v2."=" +CREATE FUNCTION eql_v2.eq(a eql_v2_encrypted, b eql_v2_encrypted) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2.compare(a, b) = 0; + END; +$$ LANGUAGE plpgsql; + +--! @brief Equality operator for encrypted values +--! +--! Implements the = operator for comparing two encrypted values using their +--! encrypted index terms (unique/blake3). Enables WHERE clause comparisons +--! without decryption. +--! +--! @param a eql_v2_encrypted Left operand +--! @param b eql_v2_encrypted Right operand +--! @return Boolean True if encrypted values are equal +--! +--! @example +--! -- Compare encrypted columns +--! SELECT * FROM users WHERE encrypted_email = other_encrypted_email; +--! +--! -- Search using encrypted literal +--! SELECT * FROM users +--! WHERE encrypted_email = '{"c":"...","i":{"unique":"..."}}'::eql_v2_encrypted; +--! +--! @see eql_v2.compare +--! @see eql_v2.add_search_config +CREATE FUNCTION eql_v2."="(a eql_v2_encrypted, b eql_v2_encrypted) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2.eq(a, b); + END; +$$ LANGUAGE plpgsql; + +CREATE OPERATOR = ( + FUNCTION=eql_v2."=", + LEFTARG=eql_v2_encrypted, + RIGHTARG=eql_v2_encrypted, + NEGATOR = <>, + RESTRICT = eqsel, + JOIN = eqjoinsel, + HASHES, + MERGES +); + +--! @brief Equality operator for encrypted value and JSONB +--! +--! Overload of = operator accepting JSONB on the right side. Automatically +--! casts JSONB to eql_v2_encrypted for comparison. Useful for comparing +--! against JSONB literals or columns. +--! +--! @param eql_v2_encrypted Left operand (encrypted value) +--! @param b JSONB Right operand (will be cast to eql_v2_encrypted) +--! @return Boolean True if values are equal +--! +--! @example +--! -- Compare encrypted column to JSONB literal +--! SELECT * FROM users +--! WHERE encrypted_email = '{"c":"...","i":{"unique":"..."}}'::jsonb; +--! +--! @see eql_v2."="(eql_v2_encrypted, eql_v2_encrypted) +CREATE FUNCTION eql_v2."="(a eql_v2_encrypted, b jsonb) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2.eq(a, b::eql_v2_encrypted); + END; +$$ LANGUAGE plpgsql; + +CREATE OPERATOR = ( + FUNCTION=eql_v2."=", + LEFTARG=eql_v2_encrypted, + RIGHTARG=jsonb, + NEGATOR = <>, + RESTRICT = eqsel, + JOIN = eqjoinsel, + HASHES, + MERGES +); + +--! @brief Equality operator for JSONB and encrypted value +--! +--! Overload of = operator accepting JSONB on the left side. Automatically +--! casts JSONB to eql_v2_encrypted for comparison. Enables commutative +--! equality comparisons. +--! +--! @param a JSONB Left operand (will be cast to eql_v2_encrypted) +--! @param eql_v2_encrypted Right operand (encrypted value) +--! @return Boolean True if values are equal +--! +--! @example +--! -- Compare JSONB literal to encrypted column +--! SELECT * FROM users +--! WHERE '{"c":"...","i":{"unique":"..."}}'::jsonb = encrypted_email; +--! +--! @see eql_v2."="(eql_v2_encrypted, eql_v2_encrypted) +CREATE FUNCTION eql_v2."="(a jsonb, b eql_v2_encrypted) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2.eq(a::eql_v2_encrypted, b); + END; +$$ LANGUAGE plpgsql; + +CREATE OPERATOR = ( + FUNCTION=eql_v2."=", + LEFTARG=jsonb, + RIGHTARG=eql_v2_encrypted, + NEGATOR = <>, + RESTRICT = eqsel, + JOIN = eqjoinsel, + HASHES, + MERGES +); + + +--! @brief Greater-than-or-equal comparison helper for encrypted values +--! @internal +--! +--! Internal helper that delegates to eql_v2.compare for >= testing. +--! Returns true if first value is greater than or equal to second using ORE comparison. +--! +--! @param a eql_v2_encrypted First encrypted value +--! @param b eql_v2_encrypted Second encrypted value +--! @return Boolean True if a >= b (compare result >= 0) +--! +--! @see eql_v2.compare +--! @see eql_v2.">=" +CREATE FUNCTION eql_v2.gte(a eql_v2_encrypted, b eql_v2_encrypted) + RETURNS boolean +AS $$ + BEGIN + RETURN eql_v2.compare(a, b) >= 0; + END; +$$ LANGUAGE plpgsql; + +--! @brief Greater-than-or-equal operator for encrypted values +--! +--! Implements the >= operator for comparing encrypted values using ORE index terms. +--! Enables range queries with inclusive upper bounds without decryption. +--! +--! @param a eql_v2_encrypted Left operand +--! @param b eql_v2_encrypted Right operand +--! @return Boolean True if a >= b +--! +--! @example +--! -- Find records with age 18 or over +--! SELECT * FROM users WHERE encrypted_age >= '18'::int::text::eql_v2_encrypted; +--! +--! @see eql_v2.compare +--! @see eql_v2.add_search_config +CREATE FUNCTION eql_v2.">="(a eql_v2_encrypted, b eql_v2_encrypted) + RETURNS boolean +AS $$ + BEGIN + RETURN eql_v2.gte(a, b); + END; +$$ LANGUAGE plpgsql; + + +CREATE OPERATOR >=( + FUNCTION = eql_v2.">=", + LEFTARG = eql_v2_encrypted, + RIGHTARG = eql_v2_encrypted, + COMMUTATOR = <=, + NEGATOR = <, + RESTRICT = scalarltsel, + JOIN = scalarltjoinsel +); + +--! @brief >= operator for encrypted value and JSONB +--! @see eql_v2.">="(eql_v2_encrypted, eql_v2_encrypted) +CREATE FUNCTION eql_v2.">="(a eql_v2_encrypted, b jsonb) +RETURNS boolean +AS $$ + BEGIN + RETURN eql_v2.gte(a, b::eql_v2_encrypted); + END; +$$ LANGUAGE plpgsql; + +CREATE OPERATOR >=( + FUNCTION = eql_v2.">=", + LEFTARG = eql_v2_encrypted, + RIGHTARG=jsonb, + COMMUTATOR = <=, + NEGATOR = <, + RESTRICT = scalarltsel, + JOIN = scalarltjoinsel +); + +--! @brief >= operator for JSONB and encrypted value +--! @see eql_v2.">="(eql_v2_encrypted, eql_v2_encrypted) +CREATE FUNCTION eql_v2.">="(a jsonb, b eql_v2_encrypted) +RETURNS boolean +AS $$ + BEGIN + RETURN eql_v2.gte(a::eql_v2_encrypted, b); + END; +$$ LANGUAGE plpgsql; + + +CREATE OPERATOR >=( + FUNCTION = eql_v2.">=", + LEFTARG = jsonb, + RIGHTARG =eql_v2_encrypted, + COMMUTATOR = <=, + NEGATOR = <, + RESTRICT = scalarltsel, + JOIN = scalarltjoinsel +); + + + +--! @brief Greater-than comparison helper for encrypted values +--! @internal +--! +--! Internal helper that delegates to eql_v2.compare for greater-than testing. +--! Returns true if first value is greater than second using ORE comparison. +--! +--! @param a eql_v2_encrypted First encrypted value +--! @param b eql_v2_encrypted Second encrypted value +--! @return Boolean True if a > b (compare result = 1) +--! +--! @see eql_v2.compare +--! @see eql_v2.">" +CREATE FUNCTION eql_v2.gt(a eql_v2_encrypted, b eql_v2_encrypted) +RETURNS boolean +AS $$ + BEGIN + RETURN eql_v2.compare(a, b) = 1; + END; +$$ LANGUAGE plpgsql; + +--! @brief Greater-than operator for encrypted values +--! +--! Implements the > operator for comparing encrypted values using ORE index terms. +--! Enables range queries and sorting without decryption. Requires 'ore' index +--! configuration on the column. +--! +--! @param a eql_v2_encrypted Left operand +--! @param b eql_v2_encrypted Right operand +--! @return Boolean True if a is greater than b +--! +--! @example +--! -- Find records above threshold +--! SELECT * FROM events +--! WHERE encrypted_value > '100'::int::text::eql_v2_encrypted; +--! +--! @see eql_v2.compare +--! @see eql_v2.add_search_config +CREATE FUNCTION eql_v2.">"(a eql_v2_encrypted, b eql_v2_encrypted) +RETURNS boolean +AS $$ + BEGIN + RETURN eql_v2.gt(a, b); + END; +$$ LANGUAGE plpgsql; + +CREATE OPERATOR >( + FUNCTION=eql_v2.">", + LEFTARG=eql_v2_encrypted, + RIGHTARG=eql_v2_encrypted, + COMMUTATOR = <, + NEGATOR = <=, + RESTRICT = scalarltsel, + JOIN = scalarltjoinsel +); + +--! @brief > operator for encrypted value and JSONB +--! @see eql_v2.">"(eql_v2_encrypted, eql_v2_encrypted) +CREATE FUNCTION eql_v2.">"(a eql_v2_encrypted, b jsonb) +RETURNS boolean +AS $$ + BEGIN + RETURN eql_v2.gt(a, b::eql_v2_encrypted); + END; +$$ LANGUAGE plpgsql; + +CREATE OPERATOR >( + FUNCTION = eql_v2.">", + LEFTARG = eql_v2_encrypted, + RIGHTARG = jsonb, + COMMUTATOR = <, + NEGATOR = <=, + RESTRICT = scalarltsel, + JOIN = scalarltjoinsel +); + +--! @brief > operator for JSONB and encrypted value +--! @see eql_v2.">"(eql_v2_encrypted, eql_v2_encrypted) +CREATE FUNCTION eql_v2.">"(a jsonb, b eql_v2_encrypted) +RETURNS boolean +AS $$ + BEGIN + RETURN eql_v2.gt(a::eql_v2_encrypted, b); + END; +$$ LANGUAGE plpgsql; + + +CREATE OPERATOR >( + FUNCTION = eql_v2.">", + LEFTARG = jsonb, + RIGHTARG = eql_v2_encrypted, + COMMUTATOR = <, + NEGATOR = <=, + RESTRICT = scalarltsel, + JOIN = scalarltjoinsel +); + + + + +--! @brief Extract STE vector index from JSONB payload +--! +--! Extracts the STE (Searchable Symmetric Encryption) vector from the 'sv' field +--! of an encrypted data payload. Returns an array of encrypted values used for +--! containment queries (@>, <@). If no 'sv' field exists, wraps the entire payload +--! as a single-element array. +--! +--! @param jsonb containing encrypted EQL payload +--! @return eql_v2_encrypted[] Array of encrypted STE vector elements +--! +--! @see eql_v2.ste_vec(eql_v2_encrypted) +--! @see eql_v2.ste_vec_contains +CREATE FUNCTION eql_v2.ste_vec(val jsonb) + RETURNS public.eql_v2_encrypted[] + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + DECLARE + sv jsonb; + ary public.eql_v2_encrypted[]; + BEGIN + + IF val ? 'sv' THEN + sv := val->'sv'; + ELSE + sv := jsonb_build_array(val); + END IF; + + SELECT array_agg(eql_v2.to_encrypted(elem)) + INTO ary + FROM jsonb_array_elements(sv) AS elem; + + RETURN ary; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Extract STE vector index from encrypted column value +--! +--! Extracts the STE vector from an encrypted column value by accessing its +--! underlying JSONB data field. Used for containment query operations. +--! +--! @param eql_v2_encrypted Encrypted column value +--! @return eql_v2_encrypted[] Array of encrypted STE vector elements +--! +--! @see eql_v2.ste_vec(jsonb) +CREATE FUNCTION eql_v2.ste_vec(val eql_v2_encrypted) + RETURNS public.eql_v2_encrypted[] + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN (SELECT eql_v2.ste_vec(val.data)); + END; +$$ LANGUAGE plpgsql; + +--! @brief Check if JSONB payload is a single-element STE vector +--! +--! Tests whether the encrypted data payload contains an 'sv' field with exactly +--! one element. Single-element STE vectors can be treated as regular encrypted values. +--! +--! @param jsonb containing encrypted EQL payload +--! @return Boolean True if 'sv' field exists with exactly one element +--! +--! @see eql_v2.to_ste_vec_value +CREATE FUNCTION eql_v2.is_ste_vec_value(val jsonb) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + IF val ? 'sv' THEN + RETURN jsonb_array_length(val->'sv') = 1; + END IF; + + RETURN false; + END; +$$ LANGUAGE plpgsql; + +--! @brief Check if encrypted column value is a single-element STE vector +--! +--! Tests whether an encrypted column value is a single-element STE vector +--! by checking its underlying JSONB data field. +--! +--! @param eql_v2_encrypted Encrypted column value +--! @return Boolean True if value is a single-element STE vector +--! +--! @see eql_v2.is_ste_vec_value(jsonb) +CREATE FUNCTION eql_v2.is_ste_vec_value(val eql_v2_encrypted) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2.is_ste_vec_value(val.data); + END; +$$ LANGUAGE plpgsql; + +--! @brief Convert single-element STE vector to regular encrypted value +--! +--! Extracts the single element from a single-element STE vector and returns it +--! as a regular encrypted value, preserving metadata. If the input is not a +--! single-element STE vector, returns it unchanged. +--! +--! @param jsonb containing encrypted EQL payload +--! @return eql_v2_encrypted Regular encrypted value (unwrapped if single-element STE vector) +--! +--! @see eql_v2.is_ste_vec_value +CREATE FUNCTION eql_v2.to_ste_vec_value(val jsonb) + RETURNS eql_v2_encrypted + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + DECLARE + meta jsonb; + sv jsonb; + BEGIN + + IF val IS NULL THEN + RETURN NULL; + END IF; + + IF eql_v2.is_ste_vec_value(val) THEN + meta := eql_v2.meta_data(val); + sv := val->'sv'; + sv := sv[0]; + + RETURN eql_v2.to_encrypted(meta || sv); + END IF; + + RETURN eql_v2.to_encrypted(val); + END; +$$ LANGUAGE plpgsql; + +--! @brief Convert single-element STE vector to regular encrypted value (encrypted type) +--! +--! Converts an encrypted column value to a regular encrypted value by unwrapping +--! if it's a single-element STE vector. +--! +--! @param eql_v2_encrypted Encrypted column value +--! @return eql_v2_encrypted Regular encrypted value (unwrapped if single-element STE vector) +--! +--! @see eql_v2.to_ste_vec_value(jsonb) +CREATE FUNCTION eql_v2.to_ste_vec_value(val eql_v2_encrypted) + RETURNS eql_v2_encrypted + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2.to_ste_vec_value(val.data); + END; +$$ LANGUAGE plpgsql; + +--! @brief Extract selector value from JSONB payload +--! +--! Extracts the selector ('s') field from an encrypted data payload. +--! Selectors are used to match STE vector elements during containment queries. +--! +--! @param jsonb containing encrypted EQL payload +--! @return Text The selector value +--! @throws Exception if 's' field is missing +--! +--! @see eql_v2.ste_vec_contains +CREATE FUNCTION eql_v2.selector(val jsonb) + RETURNS text + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + IF val IS NULL THEN + RETURN NULL; + END IF; + + IF val ? 's' THEN + RETURN val->>'s'; + END IF; + RAISE 'Expected a selector index (s) value in json: %', val; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Extract selector value from encrypted column value +--! +--! Extracts the selector from an encrypted column value by accessing its +--! underlying JSONB data field. +--! +--! @param eql_v2_encrypted Encrypted column value +--! @return Text The selector value +--! +--! @see eql_v2.selector(jsonb) +CREATE FUNCTION eql_v2.selector(val eql_v2_encrypted) + RETURNS text + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN (SELECT eql_v2.selector(val.data)); + END; +$$ LANGUAGE plpgsql; + + + +--! @brief Check if JSONB payload is marked as an STE vector array +--! +--! Tests whether the encrypted data payload has the 'a' (array) flag set to true, +--! indicating it represents an array for STE vector operations. +--! +--! @param jsonb containing encrypted EQL payload +--! @return Boolean True if 'a' field is present and true +--! +--! @see eql_v2.ste_vec +CREATE FUNCTION eql_v2.is_ste_vec_array(val jsonb) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + IF val ? 'a' THEN + RETURN (val->>'a')::boolean; + END IF; + + RETURN false; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Check if encrypted column value is marked as an STE vector array +--! +--! Tests whether an encrypted column value has the array flag set by checking +--! its underlying JSONB data field. +--! +--! @param eql_v2_encrypted Encrypted column value +--! @return Boolean True if value is marked as an STE vector array +--! +--! @see eql_v2.is_ste_vec_array(jsonb) +CREATE FUNCTION eql_v2.is_ste_vec_array(val eql_v2_encrypted) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN (SELECT eql_v2.is_ste_vec_array(val.data)); + END; +$$ LANGUAGE plpgsql; + + + +--! @brief Extract full encrypted JSONB elements as array +--! +--! Extracts all JSONB elements from the STE vector including non-deterministic fields. +--! Use jsonb_array() instead for GIN indexing and containment queries. +--! +--! @param val jsonb containing encrypted EQL payload +--! @return jsonb[] Array of full JSONB elements +--! +--! @see eql_v2.jsonb_array +CREATE FUNCTION eql_v2.jsonb_array_from_array_elements(val jsonb) +RETURNS jsonb[] +IMMUTABLE STRICT PARALLEL SAFE +LANGUAGE SQL +AS $$ + SELECT CASE + WHEN val ? 'sv' THEN + ARRAY(SELECT elem FROM jsonb_array_elements(val->'sv') AS elem) + ELSE + ARRAY[val] + END; +$$; + + +--! @brief Extract full encrypted JSONB elements as array from encrypted column +--! +--! @param val eql_v2_encrypted Encrypted column value +--! @return jsonb[] Array of full JSONB elements +--! +--! @see eql_v2.jsonb_array_from_array_elements(jsonb) +CREATE FUNCTION eql_v2.jsonb_array_from_array_elements(val eql_v2_encrypted) +RETURNS jsonb[] +IMMUTABLE STRICT PARALLEL SAFE +LANGUAGE SQL +AS $$ + SELECT eql_v2.jsonb_array_from_array_elements(val.data); +$$; + + +--! @brief Extract deterministic fields as array for GIN indexing +--! +--! Extracts only deterministic search term fields (s, b3, hm, ocv, ocf) from each +--! STE vector element. Excludes non-deterministic ciphertext for correct containment +--! comparison using PostgreSQL's native @> operator. +--! +--! @param val jsonb containing encrypted EQL payload +--! @return jsonb[] Array of JSONB elements with only deterministic fields +--! +--! @note Use this for GIN indexes and containment queries +--! @see eql_v2.jsonb_contains +CREATE FUNCTION eql_v2.jsonb_array(val jsonb) +RETURNS jsonb[] +IMMUTABLE STRICT PARALLEL SAFE +LANGUAGE SQL +AS $$ + SELECT ARRAY( + SELECT jsonb_object_agg(kv.key, kv.value) + FROM jsonb_array_elements( + CASE WHEN val ? 'sv' THEN val->'sv' ELSE jsonb_build_array(val) END + ) AS elem, + LATERAL jsonb_each(elem) AS kv(key, value) + WHERE kv.key IN ('s', 'b3', 'hm', 'ocv', 'ocf') + GROUP BY elem + ); +$$; + + +--! @brief Extract deterministic fields as array from encrypted column +--! +--! @param val eql_v2_encrypted Encrypted column value +--! @return jsonb[] Array of JSONB elements with only deterministic fields +--! +--! @see eql_v2.jsonb_array(jsonb) +CREATE FUNCTION eql_v2.jsonb_array(val eql_v2_encrypted) +RETURNS jsonb[] +IMMUTABLE STRICT PARALLEL SAFE +LANGUAGE SQL +AS $$ + SELECT eql_v2.jsonb_array(val.data); +$$; + + +--! @brief GIN-indexable JSONB containment check +--! +--! Checks if encrypted value 'a' contains all JSONB elements from 'b'. +--! Uses jsonb[] arrays internally for native PostgreSQL GIN index support. +--! +--! This function is designed for use with a GIN index on jsonb_array(column). +--! When combined with such an index, PostgreSQL can efficiently search large tables. +--! +--! @param a eql_v2_encrypted Container value (typically a table column) +--! @param b eql_v2_encrypted Value to search for +--! @return Boolean True if a contains all elements of b +--! +--! @example +--! -- Create GIN index for efficient containment queries +--! CREATE INDEX idx ON mytable USING GIN (eql_v2.jsonb_array(encrypted_col)); +--! +--! -- Query using the helper function +--! SELECT * FROM mytable WHERE eql_v2.jsonb_contains(encrypted_col, search_value); +--! +--! @see eql_v2.jsonb_array +CREATE FUNCTION eql_v2.jsonb_contains(a eql_v2_encrypted, b eql_v2_encrypted) +RETURNS boolean +IMMUTABLE STRICT PARALLEL SAFE +LANGUAGE SQL +AS $$ + SELECT eql_v2.jsonb_array(a) @> eql_v2.jsonb_array(b); +$$; + + +--! @brief GIN-indexable JSONB containment check (encrypted, jsonb) +--! +--! Checks if encrypted value 'a' contains all JSONB elements from jsonb value 'b'. +--! Uses jsonb[] arrays internally for native PostgreSQL GIN index support. +--! +--! @param a eql_v2_encrypted Container value (typically a table column) +--! @param b jsonb JSONB value to search for +--! @return Boolean True if a contains all elements of b +--! +--! @see eql_v2.jsonb_array +--! @see eql_v2.jsonb_contains(eql_v2_encrypted, eql_v2_encrypted) +CREATE FUNCTION eql_v2.jsonb_contains(a eql_v2_encrypted, b jsonb) +RETURNS boolean +IMMUTABLE STRICT PARALLEL SAFE +LANGUAGE SQL +AS $$ + SELECT eql_v2.jsonb_array(a) @> eql_v2.jsonb_array(b); +$$; + + +--! @brief GIN-indexable JSONB containment check (jsonb, encrypted) +--! +--! Checks if jsonb value 'a' contains all JSONB elements from encrypted value 'b'. +--! Uses jsonb[] arrays internally for native PostgreSQL GIN index support. +--! +--! @param a jsonb Container JSONB value +--! @param b eql_v2_encrypted Encrypted value to search for +--! @return Boolean True if a contains all elements of b +--! +--! @see eql_v2.jsonb_array +--! @see eql_v2.jsonb_contains(eql_v2_encrypted, eql_v2_encrypted) +CREATE FUNCTION eql_v2.jsonb_contains(a jsonb, b eql_v2_encrypted) +RETURNS boolean +IMMUTABLE STRICT PARALLEL SAFE +LANGUAGE SQL +AS $$ + SELECT eql_v2.jsonb_array(a) @> eql_v2.jsonb_array(b); +$$; + + +--! @brief GIN-indexable JSONB "is contained by" check +--! +--! Checks if all JSONB elements from 'a' are contained in 'b'. +--! Uses jsonb[] arrays internally for native PostgreSQL GIN index support. +--! +--! @param a eql_v2_encrypted Value to check (typically a table column) +--! @param b eql_v2_encrypted Container value +--! @return Boolean True if all elements of a are contained in b +--! +--! @see eql_v2.jsonb_array +--! @see eql_v2.jsonb_contains +CREATE FUNCTION eql_v2.jsonb_contained_by(a eql_v2_encrypted, b eql_v2_encrypted) +RETURNS boolean +IMMUTABLE STRICT PARALLEL SAFE +LANGUAGE SQL +AS $$ + SELECT eql_v2.jsonb_array(a) <@ eql_v2.jsonb_array(b); +$$; + + +--! @brief GIN-indexable JSONB "is contained by" check (encrypted, jsonb) +--! +--! Checks if all JSONB elements from encrypted value 'a' are contained in jsonb value 'b'. +--! Uses jsonb[] arrays internally for native PostgreSQL GIN index support. +--! +--! @param a eql_v2_encrypted Value to check (typically a table column) +--! @param b jsonb Container JSONB value +--! @return Boolean True if all elements of a are contained in b +--! +--! @see eql_v2.jsonb_array +--! @see eql_v2.jsonb_contained_by(eql_v2_encrypted, eql_v2_encrypted) +CREATE FUNCTION eql_v2.jsonb_contained_by(a eql_v2_encrypted, b jsonb) +RETURNS boolean +IMMUTABLE STRICT PARALLEL SAFE +LANGUAGE SQL +AS $$ + SELECT eql_v2.jsonb_array(a) <@ eql_v2.jsonb_array(b); +$$; + + +--! @brief GIN-indexable JSONB "is contained by" check (jsonb, encrypted) +--! +--! Checks if all JSONB elements from jsonb value 'a' are contained in encrypted value 'b'. +--! Uses jsonb[] arrays internally for native PostgreSQL GIN index support. +--! +--! @param a jsonb Value to check +--! @param b eql_v2_encrypted Container encrypted value +--! @return Boolean True if all elements of a are contained in b +--! +--! @see eql_v2.jsonb_array +--! @see eql_v2.jsonb_contained_by(eql_v2_encrypted, eql_v2_encrypted) +CREATE FUNCTION eql_v2.jsonb_contained_by(a jsonb, b eql_v2_encrypted) +RETURNS boolean +IMMUTABLE STRICT PARALLEL SAFE +LANGUAGE SQL +AS $$ + SELECT eql_v2.jsonb_array(a) <@ eql_v2.jsonb_array(b); +$$; + + +--! @brief Check if STE vector array contains a specific encrypted element +--! +--! Tests whether any element in the STE vector array 'a' contains the encrypted value 'b'. +--! Matching requires both the selector and encrypted value to be equal. +--! Used internally by ste_vec_contains(encrypted, encrypted) for array containment checks. +--! +--! @param eql_v2_encrypted[] STE vector array to search within +--! @param eql_v2_encrypted Encrypted element to search for +--! @return Boolean True if b is found in any element of a +--! +--! @note Compares both selector and encrypted value for match +--! +--! @see eql_v2.selector +--! @see eql_v2.ste_vec_contains(eql_v2_encrypted, eql_v2_encrypted) +CREATE FUNCTION eql_v2.ste_vec_contains(a public.eql_v2_encrypted[], b eql_v2_encrypted) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + DECLARE + result boolean; + _a public.eql_v2_encrypted; + BEGIN + + result := false; + + FOR idx IN 1..array_length(a, 1) LOOP + _a := a[idx]; + result := result OR (eql_v2.selector(_a) = eql_v2.selector(b) AND _a = b); + END LOOP; + + RETURN result; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Check if encrypted value 'a' contains all elements of encrypted value 'b' +--! +--! Performs STE vector containment comparison between two encrypted values. +--! Returns true if all elements in b's STE vector are found in a's STE vector. +--! Used internally by the @> containment operator for searchable encryption. +--! +--! @param a eql_v2_encrypted First encrypted value (container) +--! @param b eql_v2_encrypted Second encrypted value (elements to find) +--! @return Boolean True if all elements of b are contained in a +--! +--! @note Empty b is always contained in any a +--! @note Each element of b must match both selector and value in a +--! +--! @see eql_v2.ste_vec +--! @see eql_v2.ste_vec_contains(eql_v2_encrypted[], eql_v2_encrypted) +--! @see eql_v2."@>" +CREATE FUNCTION eql_v2.ste_vec_contains(a eql_v2_encrypted, b eql_v2_encrypted) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + DECLARE + result boolean; + sv_a public.eql_v2_encrypted[]; + sv_b public.eql_v2_encrypted[]; + _b public.eql_v2_encrypted; + BEGIN + + -- jsonb arrays of ste_vec encrypted values + sv_a := eql_v2.ste_vec(a); + sv_b := eql_v2.ste_vec(b); + + -- an empty b is always contained in a + IF array_length(sv_b, 1) IS NULL THEN + RETURN true; + END IF; + + IF array_length(sv_a, 1) IS NULL THEN + RETURN false; + END IF; + + result := true; + + -- for each element of b check if it is in a + FOR idx IN 1..array_length(sv_b, 1) LOOP + _b := sv_b[idx]; + result := result AND eql_v2.ste_vec_contains(sv_a, _b); + END LOOP; + + RETURN result; + END; +$$ LANGUAGE plpgsql; + +--! @file config/tables.sql +--! @brief Encryption configuration storage table +--! +--! Defines the main table for storing EQL v2 encryption configurations. +--! Each row represents a configuration specifying which tables/columns to encrypt +--! and what index types to use. Configurations progress through lifecycle states. +--! +--! @see config/types.sql for state ENUM definition +--! @see config/indexes.sql for state uniqueness constraints +--! @see config/constraints.sql for data validation + + +--! @brief Encryption configuration table +--! +--! Stores encryption configurations with their state and metadata. +--! The 'data' JSONB column contains the full configuration structure including +--! table/column mappings, index types, and casting rules. +--! +--! @note Only one configuration can be 'active', 'pending', or 'encrypting' at once +--! @note 'id' is auto-generated identity column +--! @note 'state' defaults to 'pending' for new configurations +--! @note 'data' validated by CHECK constraint (see config/constraints.sql) +CREATE TABLE IF NOT EXISTS public.eql_v2_configuration +( + id bigint GENERATED ALWAYS AS IDENTITY, + state eql_v2_configuration_state NOT NULL DEFAULT 'pending', + data jsonb, + created_at timestamptz not null default current_timestamp, + PRIMARY KEY(id) +); + + +--! @brief Initialize default configuration structure +--! @internal +--! +--! Creates a default configuration object if input is NULL. Used internally +--! by public configuration functions to ensure consistent structure. +--! +--! @param config JSONB Existing configuration or NULL +--! @return JSONB Configuration with default structure (version 1, empty tables) +CREATE FUNCTION eql_v2.config_default(config jsonb) + RETURNS jsonb + IMMUTABLE PARALLEL SAFE +AS $$ + BEGIN + IF config IS NULL THEN + SELECT jsonb_build_object('v', 1, 'tables', jsonb_build_object()) INTO config; + END IF; + RETURN config; + END; +$$ LANGUAGE plpgsql; + +--! @brief Add table to configuration if not present +--! @internal +--! +--! Ensures the specified table exists in the configuration structure. +--! Creates empty table entry if needed. Idempotent operation. +--! +--! @param table_name Text Name of table to add +--! @param config JSONB Configuration object +--! @return JSONB Updated configuration with table entry +CREATE FUNCTION eql_v2.config_add_table(table_name text, config jsonb) + RETURNS jsonb + IMMUTABLE PARALLEL SAFE +AS $$ + DECLARE + tbl jsonb; + BEGIN + IF NOT config #> array['tables'] ? table_name THEN + SELECT jsonb_insert(config, array['tables', table_name], jsonb_build_object()) INTO config; + END IF; + RETURN config; + END; +$$ LANGUAGE plpgsql; + +--! @brief Add column to table configuration if not present +--! @internal +--! +--! Ensures the specified column exists in the table's configuration structure. +--! Creates empty column entry with indexes object if needed. Idempotent operation. +--! +--! @param table_name Text Name of parent table +--! @param column_name Text Name of column to add +--! @param config JSONB Configuration object +--! @return JSONB Updated configuration with column entry +CREATE FUNCTION eql_v2.config_add_column(table_name text, column_name text, config jsonb) + RETURNS jsonb + IMMUTABLE PARALLEL SAFE +AS $$ + DECLARE + col jsonb; + BEGIN + IF NOT config #> array['tables', table_name] ? column_name THEN + SELECT jsonb_build_object('indexes', jsonb_build_object()) into col; + SELECT jsonb_set(config, array['tables', table_name, column_name], col) INTO config; + END IF; + RETURN config; + END; +$$ LANGUAGE plpgsql; + +--! @brief Set cast type for column in configuration +--! @internal +--! +--! Updates the cast_as field for a column, specifying the PostgreSQL type +--! that decrypted values should be cast to. +--! +--! @param table_name Text Name of parent table +--! @param column_name Text Name of column +--! @param cast_as Text PostgreSQL type for casting (e.g., 'text', 'int', 'jsonb') +--! @param config JSONB Configuration object +--! @return JSONB Updated configuration with cast_as set +CREATE FUNCTION eql_v2.config_add_cast(table_name text, column_name text, cast_as text, config jsonb) + RETURNS jsonb + IMMUTABLE PARALLEL SAFE +AS $$ + BEGIN + SELECT jsonb_set(config, array['tables', table_name, column_name, 'cast_as'], to_jsonb(cast_as)) INTO config; + RETURN config; + END; +$$ LANGUAGE plpgsql; + +--! @brief Add search index to column configuration +--! @internal +--! +--! Inserts a search index entry (unique, match, ore, ste_vec) with its options +--! into the column's indexes object. +--! +--! @param table_name Text Name of parent table +--! @param column_name Text Name of column +--! @param index_name Text Type of index to add +--! @param opts JSONB Index-specific options +--! @param config JSONB Configuration object +--! @return JSONB Updated configuration with index added +CREATE FUNCTION eql_v2.config_add_index(table_name text, column_name text, index_name text, opts jsonb, config jsonb) + RETURNS jsonb + IMMUTABLE PARALLEL SAFE +AS $$ + BEGIN + SELECT jsonb_insert(config, array['tables', table_name, column_name, 'indexes', index_name], opts) INTO config; + RETURN config; + END; +$$ LANGUAGE plpgsql; + +--! @brief Generate default options for match index +--! @internal +--! +--! Returns default configuration for match (LIKE) indexes: k=6, bf=2048, +--! ngram tokenizer with token_length=3, downcase filter, include_original=true. +--! +--! @return JSONB Default match index options +CREATE FUNCTION eql_v2.config_match_default() + RETURNS jsonb +LANGUAGE sql STRICT PARALLEL SAFE +BEGIN ATOMIC + SELECT jsonb_build_object( + 'k', 6, + 'bf', 2048, + 'include_original', true, + 'tokenizer', json_build_object('kind', 'ngram', 'token_length', 3), + 'token_filters', json_build_array(json_build_object('kind', 'downcase'))); +END; +-- AUTOMATICALLY GENERATED FILE +-- Source is version-template.sql + +DROP FUNCTION IF EXISTS eql_v2.version(); + +--! @file version.sql +--! @brief EQL version reporting +--! +--! This file is auto-generated from version.template during build. +--! The version string placeholder is replaced with the actual release version. + +--! @brief Get EQL library version string +--! +--! Returns the version string for the installed EQL library. +--! This value is set at build time from the project version. +--! +--! @return text Version string (e.g., "2.1.0" or "DEV" for development builds) +--! +--! @note Auto-generated during build from version.template +--! +--! @example +--! -- Check installed EQL version +--! SELECT eql_v2.version(); +--! -- Returns: '2.1.0' +CREATE FUNCTION eql_v2.version() + RETURNS text + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + SELECT 'eql-2.2.1'; +$$ LANGUAGE SQL; + + + +--! @brief Compare two encrypted values using variable-width CLLW ORE index terms +--! +--! Performs a three-way comparison (returns -1/0/1) of encrypted values using +--! their variable-width CLLW ORE ciphertext index terms. Used internally by range operators +--! (<, <=, >, >=) for order-revealing comparisons without decryption. +--! +--! @param a eql_v2_encrypted First encrypted value to compare +--! @param b eql_v2_encrypted Second encrypted value to compare +--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b +--! +--! @note NULL values are sorted before non-NULL values +--! @note Uses variable-width CLLW ORE cryptographic protocol for secure comparisons +--! +--! @see eql_v2.ore_cllw_var_8 +--! @see eql_v2.has_ore_cllw_var_8 +--! @see eql_v2.compare_ore_cllw_var_8_term +--! @see eql_v2."<" +--! @see eql_v2.">" +CREATE FUNCTION eql_v2.compare_ore_cllw_var_8(a eql_v2_encrypted, b eql_v2_encrypted) + RETURNS integer + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + DECLARE + a_term eql_v2.ore_cllw_var_8; + b_term eql_v2.ore_cllw_var_8; + BEGIN + + -- PERFORM eql_v2.log('eql_v2.compare_ore_cllw_var_8'); + -- PERFORM eql_v2.log('a', a::text); + -- PERFORM eql_v2.log('b', b::text); + + IF a IS NULL AND b IS NULL THEN + RETURN 0; + END IF; + + IF a IS NULL THEN + RETURN -1; + END IF; + + IF b IS NULL THEN + RETURN 1; + END IF; + + IF eql_v2.has_ore_cllw_var_8(a) THEN + a_term := eql_v2.ore_cllw_var_8(a); + END IF; + + IF eql_v2.has_ore_cllw_var_8(a) THEN + b_term := eql_v2.ore_cllw_var_8(b); + END IF; + + IF a_term IS NULL AND b_term IS NULL THEN + RETURN 0; + END IF; + + IF a_term IS NULL THEN + RETURN -1; + END IF; + + IF b_term IS NULL THEN + RETURN 1; + END IF; + + RETURN eql_v2.compare_ore_cllw_var_8_term(a_term, b_term); + END; +$$ LANGUAGE plpgsql; + + + +--! @brief Compare two encrypted values using CLLW ORE index terms +--! +--! Performs a three-way comparison (returns -1/0/1) of encrypted values using +--! their CLLW ORE ciphertext index terms. Used internally by range operators +--! (<, <=, >, >=) for order-revealing comparisons without decryption. +--! +--! @param a eql_v2_encrypted First encrypted value to compare +--! @param b eql_v2_encrypted Second encrypted value to compare +--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b +--! +--! @note NULL values are sorted before non-NULL values +--! @note Uses CLLW ORE cryptographic protocol for secure comparisons +--! +--! @see eql_v2.ore_cllw_u64_8 +--! @see eql_v2.has_ore_cllw_u64_8 +--! @see eql_v2.compare_ore_cllw_term_bytes +--! @see eql_v2."<" +--! @see eql_v2.">" +CREATE FUNCTION eql_v2.compare_ore_cllw_u64_8(a eql_v2_encrypted, b eql_v2_encrypted) + RETURNS integer + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + DECLARE + a_term eql_v2.ore_cllw_u64_8; + b_term eql_v2.ore_cllw_u64_8; + BEGIN + + -- PERFORM eql_v2.log('eql_v2.compare_ore_cllw_u64_8'); + -- PERFORM eql_v2.log('a', a::text); + -- PERFORM eql_v2.log('b', b::text); + + IF a IS NULL AND b IS NULL THEN + RETURN 0; + END IF; + + IF a IS NULL THEN + RETURN -1; + END IF; + + IF b IS NULL THEN + RETURN 1; + END IF; + + IF eql_v2.has_ore_cllw_u64_8(a) THEN + a_term := eql_v2.ore_cllw_u64_8(a); + END IF; + + IF eql_v2.has_ore_cllw_u64_8(a) THEN + b_term := eql_v2.ore_cllw_u64_8(b); + END IF; + + IF a_term IS NULL AND b_term IS NULL THEN + RETURN 0; + END IF; + + IF a_term IS NULL THEN + RETURN -1; + END IF; + + IF b_term IS NULL THEN + RETURN 1; + END IF; + + RETURN eql_v2.compare_ore_cllw_term_bytes(a_term.bytes, b_term.bytes); + END; +$$ LANGUAGE plpgsql; + +-- NOTE FILE IS DISABLED + + +--! @brief Equality operator for ORE block types +--! @internal +--! +--! Implements the = operator for direct ORE block comparisons. +--! +--! @param a eql_v2.ore_block_u64_8_256 Left operand +--! @param b eql_v2.ore_block_u64_8_256 Right operand +--! @return Boolean True if ORE blocks are equal +--! +--! @note FILE IS DISABLED - Not included in build +--! @see eql_v2.compare_ore_block_u64_8_256_terms +CREATE FUNCTION eql_v2.ore_block_u64_8_256_eq(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256) +RETURNS boolean AS $$ + SELECT eql_v2.compare_ore_block_u64_8_256_terms(a, b) = 0 +$$ LANGUAGE SQL; + + + +--! @brief Not equal operator for ORE block types +--! @internal +--! +--! Implements the <> operator for direct ORE block comparisons. +--! +--! @param a eql_v2.ore_block_u64_8_256 Left operand +--! @param b eql_v2.ore_block_u64_8_256 Right operand +--! @return Boolean True if ORE blocks are not equal +--! +--! @note FILE IS DISABLED - Not included in build +--! @see eql_v2.compare_ore_block_u64_8_256_terms +CREATE FUNCTION eql_v2.ore_block_u64_8_256_neq(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256) +RETURNS boolean AS $$ + SELECT eql_v2.compare_ore_block_u64_8_256_terms(a, b) <> 0 +$$ LANGUAGE SQL; + + + +--! @brief Less than operator for ORE block types +--! @internal +--! +--! Implements the < operator for direct ORE block comparisons. +--! +--! @param a eql_v2.ore_block_u64_8_256 Left operand +--! @param b eql_v2.ore_block_u64_8_256 Right operand +--! @return Boolean True if left operand is less than right operand +--! +--! @note FILE IS DISABLED - Not included in build +--! @see eql_v2.compare_ore_block_u64_8_256_terms +CREATE FUNCTION eql_v2.ore_block_u64_8_256_lt(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256) +RETURNS boolean AS $$ + SELECT eql_v2.compare_ore_block_u64_8_256_terms(a, b) = -1 +$$ LANGUAGE SQL; + + + +--! @brief Less than or equal operator for ORE block types +--! @internal +--! +--! Implements the <= operator for direct ORE block comparisons. +--! +--! @param a eql_v2.ore_block_u64_8_256 Left operand +--! @param b eql_v2.ore_block_u64_8_256 Right operand +--! @return Boolean True if left operand is less than or equal to right operand +--! +--! @note FILE IS DISABLED - Not included in build +--! @see eql_v2.compare_ore_block_u64_8_256_terms +CREATE FUNCTION eql_v2.ore_block_u64_8_256_lte(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256) +RETURNS boolean AS $$ + SELECT eql_v2.compare_ore_block_u64_8_256_terms(a, b) != 1 +$$ LANGUAGE SQL; + + + +--! @brief Greater than operator for ORE block types +--! @internal +--! +--! Implements the > operator for direct ORE block comparisons. +--! +--! @param a eql_v2.ore_block_u64_8_256 Left operand +--! @param b eql_v2.ore_block_u64_8_256 Right operand +--! @return Boolean True if left operand is greater than right operand +--! +--! @note FILE IS DISABLED - Not included in build +--! @see eql_v2.compare_ore_block_u64_8_256_terms +CREATE FUNCTION eql_v2.ore_block_u64_8_256_gt(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256) +RETURNS boolean AS $$ + SELECT eql_v2.compare_ore_block_u64_8_256_terms(a, b) = 1 +$$ LANGUAGE SQL; + + + +--! @brief Greater than or equal operator for ORE block types +--! @internal +--! +--! Implements the >= operator for direct ORE block comparisons. +--! +--! @param a eql_v2.ore_block_u64_8_256 Left operand +--! @param b eql_v2.ore_block_u64_8_256 Right operand +--! @return Boolean True if left operand is greater than or equal to right operand +--! +--! @note FILE IS DISABLED - Not included in build +--! @see eql_v2.compare_ore_block_u64_8_256_terms +CREATE FUNCTION eql_v2.ore_block_u64_8_256_gte(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256) +RETURNS boolean AS $$ + SELECT eql_v2.compare_ore_block_u64_8_256_terms(a, b) != -1 +$$ LANGUAGE SQL; + + + +--! @brief = operator for ORE block types +--! @note FILE IS DISABLED - Not included in build +CREATE OPERATOR = ( + FUNCTION=eql_v2.ore_block_u64_8_256_eq, + LEFTARG=eql_v2.ore_block_u64_8_256, + RIGHTARG=eql_v2.ore_block_u64_8_256, + NEGATOR = <>, + RESTRICT = eqsel, + JOIN = eqjoinsel, + HASHES, + MERGES +); + + + +--! @brief <> operator for ORE block types +--! @note FILE IS DISABLED - Not included in build +CREATE OPERATOR <> ( + FUNCTION=eql_v2.ore_block_u64_8_256_neq, + LEFTARG=eql_v2.ore_block_u64_8_256, + RIGHTARG=eql_v2.ore_block_u64_8_256, + NEGATOR = =, + RESTRICT = eqsel, + JOIN = eqjoinsel, + HASHES, + MERGES +); + + +--! @brief > operator for ORE block types +--! @note FILE IS DISABLED - Not included in build +CREATE OPERATOR > ( + FUNCTION=eql_v2.ore_block_u64_8_256_gt, + LEFTARG=eql_v2.ore_block_u64_8_256, + RIGHTARG=eql_v2.ore_block_u64_8_256, + COMMUTATOR = <, + NEGATOR = <=, + RESTRICT = scalargtsel, + JOIN = scalargtjoinsel +); + + + +--! @brief < operator for ORE block types +--! @note FILE IS DISABLED - Not included in build +CREATE OPERATOR < ( + FUNCTION=eql_v2.ore_block_u64_8_256_lt, + LEFTARG=eql_v2.ore_block_u64_8_256, + RIGHTARG=eql_v2.ore_block_u64_8_256, + COMMUTATOR = >, + NEGATOR = >=, + RESTRICT = scalarltsel, + JOIN = scalarltjoinsel +); + + + +--! @brief <= operator for ORE block types +--! @note FILE IS DISABLED - Not included in build +CREATE OPERATOR <= ( + FUNCTION=eql_v2.ore_block_u64_8_256_lte, + LEFTARG=eql_v2.ore_block_u64_8_256, + RIGHTARG=eql_v2.ore_block_u64_8_256, + COMMUTATOR = >=, + NEGATOR = >, + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel +); + + + +--! @brief >= operator for ORE block types +--! @note FILE IS DISABLED - Not included in build +CREATE OPERATOR >= ( + FUNCTION=eql_v2.ore_block_u64_8_256_gte, + LEFTARG=eql_v2.ore_block_u64_8_256, + RIGHTARG=eql_v2.ore_block_u64_8_256, + COMMUTATOR = <=, + NEGATOR = <, + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel +); +-- NOTE FILE IS DISABLED + + + +--! @brief B-tree operator family for ORE block types +--! +--! Defines the operator family for creating B-tree indexes on ORE block types. +--! +--! @note FILE IS DISABLED - Not included in build +--! @see eql_v2.ore_block_u64_8_256_operator_class +CREATE OPERATOR FAMILY eql_v2.ore_block_u64_8_256_operator_family USING btree; + +--! @brief B-tree operator class for ORE block encrypted values +--! +--! Defines the operator class required for creating B-tree indexes on columns +--! using the ore_block_u64_8_256 type. Enables range queries and ORDER BY on +--! ORE-encrypted data without decryption. +--! +--! Supports operators: <, <=, =, >=, > +--! Uses comparison function: compare_ore_block_u64_8_256_terms +--! +--! @note FILE IS DISABLED - Not included in build +--! +--! @example +--! -- Would be used like (if enabled): +--! CREATE INDEX ON events USING btree ( +--! (encrypted_timestamp::jsonb->'ob')::eql_v2.ore_block_u64_8_256 +--! ); +--! +--! @see CREATE OPERATOR CLASS in PostgreSQL documentation +--! @see eql_v2.compare_ore_block_u64_8_256_terms +CREATE OPERATOR CLASS eql_v2.ore_block_u64_8_256_operator_class DEFAULT FOR TYPE eql_v2.ore_block_u64_8_256 USING btree FAMILY eql_v2.ore_block_u64_8_256_operator_family AS + OPERATOR 1 <, + OPERATOR 2 <=, + OPERATOR 3 =, + OPERATOR 4 >=, + OPERATOR 5 >, + FUNCTION 1 eql_v2.compare_ore_block_u64_8_256_terms(a eql_v2.ore_block_u64_8_256, b eql_v2.ore_block_u64_8_256); + + +--! @brief Compare two encrypted values using ORE block index terms +--! +--! Performs a three-way comparison (returns -1/0/1) of encrypted values using +--! their ORE block index terms. Used internally by range operators (<, <=, >, >=) +--! for order-revealing comparisons without decryption. +--! +--! @param a eql_v2_encrypted First encrypted value to compare +--! @param b eql_v2_encrypted Second encrypted value to compare +--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b +--! +--! @note NULL values are sorted before non-NULL values +--! @note Uses ORE cryptographic protocol for secure comparisons +--! +--! @see eql_v2.ore_block_u64_8_256 +--! @see eql_v2.has_ore_block_u64_8_256 +--! @see eql_v2."<" +--! @see eql_v2.">" +CREATE FUNCTION eql_v2.compare_ore_block_u64_8_256(a eql_v2_encrypted, b eql_v2_encrypted) + RETURNS integer + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + DECLARE + a_term eql_v2.ore_block_u64_8_256; + b_term eql_v2.ore_block_u64_8_256; + BEGIN + + IF a IS NULL AND b IS NULL THEN + RETURN 0; + END IF; + + IF a IS NULL THEN + RETURN -1; + END IF; + + IF b IS NULL THEN + RETURN 1; + END IF; + + IF eql_v2.has_ore_block_u64_8_256(a) THEN + a_term := eql_v2.ore_block_u64_8_256(a); + END IF; + + IF eql_v2.has_ore_block_u64_8_256(a) THEN + b_term := eql_v2.ore_block_u64_8_256(b); + END IF; + + IF a_term IS NULL AND b_term IS NULL THEN + RETURN 0; + END IF; + + IF a_term IS NULL THEN + RETURN -1; + END IF; + + IF b_term IS NULL THEN + RETURN 1; + END IF; + + RETURN eql_v2.compare_ore_block_u64_8_256_terms(a_term.terms, b_term.terms); + END; +$$ LANGUAGE plpgsql; + + +--! @brief Cast text to ORE block term +--! @internal +--! +--! Converts text to bytea and wraps in ore_block_u64_8_256_term type. +--! Used internally for ORE block extraction and manipulation. +--! +--! @param t Text Text value to convert +--! @return eql_v2.ore_block_u64_8_256_term ORE term containing bytea representation +--! +--! @see eql_v2.ore_block_u64_8_256_term +CREATE FUNCTION eql_v2.text_to_ore_block_u64_8_256_term(t text) + RETURNS eql_v2.ore_block_u64_8_256_term + LANGUAGE sql IMMUTABLE STRICT PARALLEL SAFE +BEGIN ATOMIC + RETURN t::bytea; +END; + +--! @brief Implicit cast from text to ORE block term +--! +--! Defines an implicit cast allowing automatic conversion of text values +--! to ore_block_u64_8_256_term type for ORE operations. +--! +--! @see eql_v2.text_to_ore_block_u64_8_256_term +CREATE CAST (text AS eql_v2.ore_block_u64_8_256_term) + WITH FUNCTION eql_v2.text_to_ore_block_u64_8_256_term(text) AS IMPLICIT; + +--! @brief Pattern matching helper using bloom filters +--! @internal +--! +--! Internal helper for LIKE-style pattern matching on encrypted values. +--! Uses bloom filter index terms to test substring containment without decryption. +--! Requires 'match' index configuration on the column. +--! +--! @param a eql_v2_encrypted Haystack (value to search in) +--! @param b eql_v2_encrypted Needle (pattern to search for) +--! @return Boolean True if bloom filter of a contains bloom filter of b +--! +--! @see eql_v2."~~" +--! @see eql_v2.bloom_filter +--! @see eql_v2.add_search_config +CREATE FUNCTION eql_v2.like(a eql_v2_encrypted, b eql_v2_encrypted) +RETURNS boolean AS $$ + SELECT eql_v2.bloom_filter(a) @> eql_v2.bloom_filter(b); +$$ LANGUAGE SQL; + +--! @brief Case-insensitive pattern matching helper +--! @internal +--! +--! Internal helper for ILIKE-style case-insensitive pattern matching. +--! Case sensitivity is controlled by index configuration (token_filters with downcase). +--! This function has same implementation as like() - actual case handling is in index terms. +--! +--! @param a eql_v2_encrypted Haystack (value to search in) +--! @param b eql_v2_encrypted Needle (pattern to search for) +--! @return Boolean True if bloom filter of a contains bloom filter of b +--! +--! @note Case sensitivity depends on match index token_filters configuration +--! @see eql_v2."~~" +--! @see eql_v2.add_search_config +CREATE FUNCTION eql_v2.ilike(a eql_v2_encrypted, b eql_v2_encrypted) +RETURNS boolean AS $$ + SELECT eql_v2.bloom_filter(a) @> eql_v2.bloom_filter(b); +$$ LANGUAGE SQL; + +--! @brief LIKE operator for encrypted values (pattern matching) +--! +--! Implements the ~~ (LIKE) operator for substring/pattern matching on encrypted +--! text using bloom filter index terms. Enables WHERE col LIKE '%pattern%' queries +--! without decryption. Requires 'match' index configuration on the column. +--! +--! Pattern matching uses n-gram tokenization configured in match index. Token length +--! and filters affect matching behavior. +--! +--! @param a eql_v2_encrypted Haystack (encrypted text to search in) +--! @param b eql_v2_encrypted Needle (encrypted pattern to search for) +--! @return Boolean True if a contains b as substring +--! +--! @example +--! -- Search for substring in encrypted email +--! SELECT * FROM users +--! WHERE encrypted_email ~~ '%@example.com%'::text::eql_v2_encrypted; +--! +--! -- Pattern matching on encrypted names +--! SELECT * FROM customers +--! WHERE encrypted_name ~~ 'John%'::text::eql_v2_encrypted; +--! +--! @brief SQL LIKE operator (~~ operator) for encrypted text pattern matching +--! +--! @param a eql_v2_encrypted Left operand (encrypted value) +--! @param b eql_v2_encrypted Right operand (encrypted pattern) +--! @return boolean True if pattern matches +--! +--! @note Requires match index: eql_v2.add_search_config(table, column, 'match') +--! @see eql_v2.like +--! @see eql_v2.add_search_config +CREATE FUNCTION eql_v2."~~"(a eql_v2_encrypted, b eql_v2_encrypted) + RETURNS boolean +AS $$ + BEGIN + RETURN eql_v2.like(a, b); + END; +$$ LANGUAGE plpgsql; + +CREATE OPERATOR ~~( + FUNCTION=eql_v2."~~", + LEFTARG=eql_v2_encrypted, + RIGHTARG=eql_v2_encrypted, + RESTRICT = eqsel, + JOIN = eqjoinsel, + HASHES, + MERGES +); + +--! @brief Case-insensitive LIKE operator (~~*) +--! +--! Implements ~~* (ILIKE) operator for case-insensitive pattern matching. +--! Case handling depends on match index token_filters configuration (use downcase filter). +--! Same implementation as ~~, with case sensitivity controlled by index configuration. +--! +--! @param a eql_v2_encrypted Haystack +--! @param b eql_v2_encrypted Needle +--! @return Boolean True if a contains b (case-insensitive) +--! +--! @note Configure match index with downcase token filter for case-insensitivity +--! @see eql_v2."~~" +CREATE OPERATOR ~~*( + FUNCTION=eql_v2."~~", + LEFTARG=eql_v2_encrypted, + RIGHTARG=eql_v2_encrypted, + RESTRICT = eqsel, + JOIN = eqjoinsel, + HASHES, + MERGES +); + +--! @brief LIKE operator for encrypted value and JSONB +--! +--! Overload of ~~ operator accepting JSONB on the right side. Automatically +--! casts JSONB to eql_v2_encrypted for bloom filter pattern matching. +--! +--! @param eql_v2_encrypted Haystack (encrypted value) +--! @param b JSONB Needle (will be cast to eql_v2_encrypted) +--! @return Boolean True if a contains b as substring +--! +--! @example +--! SELECT * FROM users WHERE encrypted_email ~~ '%gmail%'::jsonb; +--! +--! @see eql_v2."~~"(eql_v2_encrypted, eql_v2_encrypted) +CREATE FUNCTION eql_v2."~~"(a eql_v2_encrypted, b jsonb) + RETURNS boolean +AS $$ + BEGIN + RETURN eql_v2.like(a, b::eql_v2_encrypted); + END; +$$ LANGUAGE plpgsql; + + +CREATE OPERATOR ~~( + FUNCTION=eql_v2."~~", + LEFTARG=eql_v2_encrypted, + RIGHTARG=jsonb, + RESTRICT = eqsel, + JOIN = eqjoinsel, + HASHES, + MERGES +); + +CREATE OPERATOR ~~*( + FUNCTION=eql_v2."~~", + LEFTARG=eql_v2_encrypted, + RIGHTARG=jsonb, + RESTRICT = eqsel, + JOIN = eqjoinsel, + HASHES, + MERGES +); + +--! @brief LIKE operator for JSONB and encrypted value +--! +--! Overload of ~~ operator accepting JSONB on the left side. Automatically +--! casts JSONB to eql_v2_encrypted for bloom filter pattern matching. +--! +--! @param a JSONB Haystack (will be cast to eql_v2_encrypted) +--! @param eql_v2_encrypted Needle (encrypted pattern) +--! @return Boolean True if a contains b as substring +--! +--! @example +--! SELECT * FROM users WHERE 'test@example.com'::jsonb ~~ encrypted_pattern; +--! +--! @see eql_v2."~~"(eql_v2_encrypted, eql_v2_encrypted) +CREATE FUNCTION eql_v2."~~"(a jsonb, b eql_v2_encrypted) + RETURNS boolean +AS $$ + BEGIN + RETURN eql_v2.like(a::eql_v2_encrypted, b); + END; +$$ LANGUAGE plpgsql; + + +CREATE OPERATOR ~~( + FUNCTION=eql_v2."~~", + LEFTARG=jsonb, + RIGHTARG=eql_v2_encrypted, + RESTRICT = eqsel, + JOIN = eqjoinsel, + HASHES, + MERGES +); + +CREATE OPERATOR ~~*( + FUNCTION=eql_v2."~~", + LEFTARG=jsonb, + RIGHTARG=eql_v2_encrypted, + RESTRICT = eqsel, + JOIN = eqjoinsel, + HASHES, + MERGES +); + + +-- ----------------------------------------------------------------------------- + +--! @brief Extract ORE index term for ordering encrypted values +--! +--! Helper function that extracts the ore_block_u64_8_256 index term from an encrypted value +--! for use in ORDER BY clauses when comparison operators are not appropriate or available. +--! +--! @param eql_v2_encrypted Encrypted value to extract order term from +--! @return eql_v2.ore_block_u64_8_256 ORE index term for ordering +--! +--! @example +--! -- Order encrypted values without using comparison operators +--! SELECT * FROM users ORDER BY eql_v2.order_by(encrypted_age); +--! +--! @note Requires 'ore' index configuration on the column +--! @see eql_v2.ore_block_u64_8_256 +--! @see eql_v2.add_search_config +CREATE FUNCTION eql_v2.order_by(a eql_v2_encrypted) + RETURNS eql_v2.ore_block_u64_8_256 + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2.ore_block_u64_8_256(a); + END; +$$ LANGUAGE plpgsql; + + + + +--! @brief PostgreSQL operator class definitions for encrypted value indexing +--! +--! Defines the operator family and operator class required for btree indexing +--! of encrypted values. This enables PostgreSQL to use encrypted columns in: +--! - CREATE INDEX statements +--! - ORDER BY clauses +--! - Range queries +--! - Primary key constraints +--! +--! The operator class maps the five comparison operators (<, <=, =, >=, >) +--! to the eql_v2.compare() support function for btree index operations. +--! +--! @note This is the default operator class for eql_v2_encrypted type +--! @see eql_v2.compare +--! @see PostgreSQL documentation on operator classes + +-------------------- + +CREATE OPERATOR FAMILY eql_v2.encrypted_operator_family USING btree; + +CREATE OPERATOR CLASS eql_v2.encrypted_operator_class DEFAULT FOR TYPE eql_v2_encrypted USING btree FAMILY eql_v2.encrypted_operator_family AS + OPERATOR 1 <, + OPERATOR 2 <=, + OPERATOR 3 =, + OPERATOR 4 >=, + OPERATOR 5 >, + FUNCTION 1 eql_v2.compare(a eql_v2_encrypted, b eql_v2_encrypted); + + +-------------------- + +-- CREATE OPERATOR FAMILY eql_v2.encrypted_operator_ordered USING btree; + +-- CREATE OPERATOR CLASS eql_v2.encrypted_operator_ordered FOR TYPE eql_v2_encrypted USING btree FAMILY eql_v2.encrypted_operator_ordered AS +-- OPERATOR 1 <, +-- OPERATOR 2 <=, +-- OPERATOR 3 =, +-- OPERATOR 4 >=, +-- OPERATOR 5 >, +-- FUNCTION 1 eql_v2.compare_ore_block_u64_8_256(a eql_v2_encrypted, b eql_v2_encrypted); + +-------------------- + +-- CREATE OPERATOR FAMILY eql_v2.encrypted_hmac_256_operator USING btree; + +-- CREATE OPERATOR CLASS eql_v2.encrypted_hmac_256_operator FOR TYPE eql_v2_encrypted USING btree FAMILY eql_v2.encrypted_hmac_256_operator AS +-- OPERATOR 1 <, +-- OPERATOR 2 <=, +-- OPERATOR 3 =, +-- OPERATOR 4 >=, +-- OPERATOR 5 >, +-- FUNCTION 1 eql_v2.compare_hmac(a eql_v2_encrypted, b eql_v2_encrypted); + + +--! @brief Contains operator for encrypted values (@>) +--! +--! Implements the @> (contains) operator for testing if left encrypted value +--! contains the right encrypted value. Uses ste_vec (secure tree encoding vector) +--! index terms for containment testing without decryption. +--! +--! Primarily used for encrypted array or set containment queries. +--! +--! @param a eql_v2_encrypted Left operand (container) +--! @param b eql_v2_encrypted Right operand (contained value) +--! @return Boolean True if a contains b +--! +--! @example +--! -- Check if encrypted array contains value +--! SELECT * FROM documents +--! WHERE encrypted_tags @> '["security"]'::jsonb::eql_v2_encrypted; +--! +--! @note Requires ste_vec index configuration +--! @see eql_v2.ste_vec_contains +--! @see eql_v2.add_search_config +CREATE FUNCTION eql_v2."@>"(a eql_v2_encrypted, b eql_v2_encrypted) +RETURNS boolean AS $$ + SELECT eql_v2.ste_vec_contains(a, b) +$$ LANGUAGE SQL; + +CREATE OPERATOR @>( + FUNCTION=eql_v2."@>", + LEFTARG=eql_v2_encrypted, + RIGHTARG=eql_v2_encrypted +); + +--! @brief Contained-by operator for encrypted values (<@) +--! +--! Implements the <@ (contained-by) operator for testing if left encrypted value +--! is contained by the right encrypted value. Uses ste_vec (secure tree encoding vector) +--! index terms for containment testing without decryption. Reverse of @> operator. +--! +--! Primarily used for encrypted array or set containment queries. +--! +--! @param a eql_v2_encrypted Left operand (contained value) +--! @param b eql_v2_encrypted Right operand (container) +--! @return Boolean True if a is contained by b +--! +--! @example +--! -- Check if value is contained in encrypted array +--! SELECT * FROM documents +--! WHERE '["security"]'::jsonb::eql_v2_encrypted <@ encrypted_tags; +--! +--! @note Requires ste_vec index configuration +--! @see eql_v2.ste_vec_contains +--! @see eql_v2.\\"@>\\" +--! @see eql_v2.add_search_config + +CREATE FUNCTION eql_v2."<@"(a eql_v2_encrypted, b eql_v2_encrypted) +RETURNS boolean AS $$ + -- Contains with reversed arguments + SELECT eql_v2.ste_vec_contains(b, a) +$$ LANGUAGE SQL; + +CREATE OPERATOR <@( + FUNCTION=eql_v2."<@", + LEFTARG=eql_v2_encrypted, + RIGHTARG=eql_v2_encrypted +); + +--! @brief Not-equal comparison helper for encrypted values +--! @internal +--! +--! Internal helper that delegates to eql_v2.compare for inequality testing. +--! Returns true if encrypted values are not equal via encrypted index comparison. +--! +--! @param a eql_v2_encrypted First encrypted value +--! @param b eql_v2_encrypted Second encrypted value +--! @return Boolean True if values are not equal (compare result <> 0) +--! +--! @see eql_v2.compare +--! @see eql_v2."<>" +CREATE FUNCTION eql_v2.neq(a eql_v2_encrypted, b eql_v2_encrypted) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2.compare(a, b) <> 0; + END; +$$ LANGUAGE plpgsql; + +--! @brief Not-equal operator for encrypted values +--! +--! Implements the <> (not equal) operator for comparing encrypted values using their +--! encrypted index terms. Enables WHERE clause inequality comparisons without decryption. +--! +--! @param a eql_v2_encrypted Left operand +--! @param b eql_v2_encrypted Right operand +--! @return Boolean True if encrypted values are not equal +--! +--! @example +--! -- Find records with non-matching values +--! SELECT * FROM users +--! WHERE encrypted_email <> 'admin@example.com'::text::eql_v2_encrypted; +--! +--! @see eql_v2.compare +--! @see eql_v2."=" +CREATE FUNCTION eql_v2."<>"(a eql_v2_encrypted, b eql_v2_encrypted) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2.neq(a, b ); + END; +$$ LANGUAGE plpgsql; + + +CREATE OPERATOR <> ( + FUNCTION=eql_v2."<>", + LEFTARG=eql_v2_encrypted, + RIGHTARG=eql_v2_encrypted, + NEGATOR = =, + RESTRICT = eqsel, + JOIN = eqjoinsel, + HASHES, + MERGES +); + +--! @brief <> operator for encrypted value and JSONB +--! @see eql_v2."<>"(eql_v2_encrypted, eql_v2_encrypted) +CREATE FUNCTION eql_v2."<>"(a eql_v2_encrypted, b jsonb) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2.neq(a, b::eql_v2_encrypted); + END; +$$ LANGUAGE plpgsql; + +CREATE OPERATOR <> ( + FUNCTION=eql_v2."<>", + LEFTARG=eql_v2_encrypted, + RIGHTARG=jsonb, + NEGATOR = =, + RESTRICT = eqsel, + JOIN = eqjoinsel, + HASHES, + MERGES +); + +--! @brief <> operator for JSONB and encrypted value +--! +--! @param jsonb Plain JSONB value +--! @param eql_v2_encrypted Encrypted value +--! @return boolean True if values are not equal +--! +--! @see eql_v2."<>"(eql_v2_encrypted, eql_v2_encrypted) +CREATE FUNCTION eql_v2."<>"(a jsonb, b eql_v2_encrypted) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2.neq(a::eql_v2_encrypted, b); + END; +$$ LANGUAGE plpgsql; + +CREATE OPERATOR <> ( + FUNCTION=eql_v2."<>", + LEFTARG=jsonb, + RIGHTARG=eql_v2_encrypted, + NEGATOR = =, + RESTRICT = eqsel, + JOIN = eqjoinsel, + HASHES, + MERGES +); + + + + + +--! @brief JSONB field accessor operator alias (->>) +--! +--! Implements the ->> operator as an alias of -> for encrypted JSONB data. This mirrors +--! PostgreSQL semantics where ->> returns text via implicit casts. The underlying +--! implementation delegates to eql_v2."->" and allows PostgreSQL to coerce the result. +--! +--! Provides two overloads: +--! - (eql_v2_encrypted, text) - Field name selector +--! - (eql_v2_encrypted, eql_v2_encrypted) - Encrypted selector +--! +--! @see eql_v2."->" +--! @see eql_v2.selector + +--! @brief ->> operator with text selector +--! @param eql_v2_encrypted Encrypted JSONB data +--! @param text Field name to extract +--! @return text Encrypted value at selector, implicitly cast from eql_v2_encrypted +--! @example +--! SELECT encrypted_json ->> 'field_name' FROM table; +CREATE FUNCTION eql_v2."->>"(e eql_v2_encrypted, selector text) + RETURNS text +IMMUTABLE STRICT PARALLEL SAFE +AS $$ + DECLARE + found eql_v2_encrypted; + BEGIN + -- found = eql_v2."->"(e, selector); + -- RETURN eql_v2.ciphertext(found); + RETURN eql_v2."->"(e, selector); + END; +$$ LANGUAGE plpgsql; + + +CREATE OPERATOR ->> ( + FUNCTION=eql_v2."->>", + LEFTARG=eql_v2_encrypted, + RIGHTARG=text +); + + + +--------------------------------------------------- + +--! @brief ->> operator with encrypted selector +--! @param e eql_v2_encrypted Encrypted JSONB data +--! @param selector eql_v2_encrypted Encrypted field selector +--! @return text Encrypted value at selector, implicitly cast from eql_v2_encrypted +--! @see eql_v2."->>"(eql_v2_encrypted, text) +CREATE FUNCTION eql_v2."->>"(e eql_v2_encrypted, selector eql_v2_encrypted) + RETURNS text + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2."->>"(e, eql_v2.selector(selector)); + END; +$$ LANGUAGE plpgsql; + + +CREATE OPERATOR ->> ( + FUNCTION=eql_v2."->>", + LEFTARG=eql_v2_encrypted, + RIGHTARG=eql_v2_encrypted +); + +--! @brief JSONB field accessor operator for encrypted values (->) +--! +--! Implements the -> operator to access fields/elements from encrypted JSONB data. +--! Returns encrypted value matching the provided selector without decryption. +--! +--! Encrypted JSON is represented as an array of eql_v2_encrypted values in the ste_vec format. +--! Each element has a selector, ciphertext, and index terms: +--! {"sv": [{"c": "", "s": "", "b3": ""}]} +--! +--! Provides three overloads: +--! - (eql_v2_encrypted, text) - Field name selector +--! - (eql_v2_encrypted, eql_v2_encrypted) - Encrypted selector +--! - (eql_v2_encrypted, integer) - Array index selector (0-based) +--! +--! @note Operator resolution: Assignment casts are considered (PostgreSQL standard behavior). +--! To use text selector, parameter may need explicit cast to text. +--! +--! @see eql_v2.ste_vec +--! @see eql_v2.selector +--! @see eql_v2."->>" + +--! @brief -> operator with text selector +--! @param eql_v2_encrypted Encrypted JSONB data +--! @param text Field name to extract +--! @return eql_v2_encrypted Encrypted value at selector +--! @example +--! SELECT encrypted_json -> 'field_name' FROM table; +CREATE FUNCTION eql_v2."->"(e eql_v2_encrypted, selector text) + RETURNS eql_v2_encrypted + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + DECLARE + meta jsonb; + sv eql_v2_encrypted[]; + found jsonb; + BEGIN + + IF e IS NULL THEN + RETURN NULL; + END IF; + + -- Column identifier and version + meta := eql_v2.meta_data(e); + + sv := eql_v2.ste_vec(e); + + FOR idx IN 1..array_length(sv, 1) LOOP + if eql_v2.selector(sv[idx]) = selector THEN + found := sv[idx]; + END IF; + END LOOP; + + RETURN (meta || found)::eql_v2_encrypted; + END; +$$ LANGUAGE plpgsql; + + +CREATE OPERATOR ->( + FUNCTION=eql_v2."->", + LEFTARG=eql_v2_encrypted, + RIGHTARG=text +); + +--------------------------------------------------- + +--! @brief -> operator with encrypted selector +--! @param e eql_v2_encrypted Encrypted JSONB data +--! @param selector eql_v2_encrypted Encrypted field selector +--! @return eql_v2_encrypted Encrypted value at selector +--! @see eql_v2."->"(eql_v2_encrypted, text) +CREATE FUNCTION eql_v2."->"(e eql_v2_encrypted, selector eql_v2_encrypted) + RETURNS eql_v2_encrypted + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN eql_v2."->"(e, eql_v2.selector(selector)); + END; +$$ LANGUAGE plpgsql; + + + +CREATE OPERATOR ->( + FUNCTION=eql_v2."->", + LEFTARG=eql_v2_encrypted, + RIGHTARG=eql_v2_encrypted +); + + +--------------------------------------------------- + +--! @brief -> operator with integer array index +--! @param eql_v2_encrypted Encrypted array data +--! @param integer Array index (0-based, JSONB convention) +--! @return eql_v2_encrypted Encrypted value at array index +--! @note Array index is 0-based (JSONB standard) despite PostgreSQL arrays being 1-based +--! @example +--! SELECT encrypted_array -> 0 FROM table; +--! @see eql_v2.is_ste_vec_array +CREATE FUNCTION eql_v2."->"(e eql_v2_encrypted, selector integer) + RETURNS eql_v2_encrypted + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + DECLARE + sv eql_v2_encrypted[]; + found eql_v2_encrypted; + BEGIN + IF NOT eql_v2.is_ste_vec_array(e) THEN + RETURN NULL; + END IF; + + sv := eql_v2.ste_vec(e); + + -- PostgreSQL arrays are 1-based + -- JSONB arrays are 0-based and so the selector is 0-based + FOR idx IN 1..array_length(sv, 1) LOOP + if (idx-1) = selector THEN + found := sv[idx]; + END IF; + END LOOP; + + RETURN found; + END; +$$ LANGUAGE plpgsql; + + + + + +CREATE OPERATOR ->( + FUNCTION=eql_v2."->", + LEFTARG=eql_v2_encrypted, + RIGHTARG=integer +); + + +--! @file jsonb/functions.sql +--! @brief JSONB path query and array manipulation functions for encrypted data +--! +--! These functions provide PostgreSQL-compatible operations on encrypted JSONB values +--! using Structured Transparent Encryption (STE). They support: +--! - Path-based queries to extract nested encrypted values +--! - Existence checks for encrypted fields +--! - Array operations (length, elements extraction) +--! +--! @note STE stores encrypted JSONB as a vector of encrypted elements ('sv') with selectors +--! @note Functions suppress errors for missing fields, type mismatches (similar to PostgreSQL jsonpath) + + +--! @brief Query encrypted JSONB for elements matching selector +--! +--! Searches the Structured Transparent Encryption (STE) vector for elements matching +--! the given selector path. Returns all matching encrypted elements. If multiple +--! matches form an array, they are wrapped with array metadata. +--! +--! @param jsonb Encrypted JSONB payload containing STE vector ('sv') +--! @param text Path selector to match against encrypted elements +--! @return SETOF eql_v2_encrypted Matching encrypted elements (may return multiple rows) +--! +--! @note Returns empty set if selector is not found (does not throw exception) +--! @note Array elements use same selector; multiple matches wrapped with 'a' flag +--! @note Returns a set containing NULL if val is NULL; returns empty set if no matches found +--! @see eql_v2.jsonb_path_query_first +--! @see eql_v2.jsonb_path_exists +CREATE FUNCTION eql_v2.jsonb_path_query(val jsonb, selector text) + RETURNS SETOF eql_v2_encrypted + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + DECLARE + sv eql_v2_encrypted[]; + found jsonb[]; + e jsonb; + meta jsonb; + ary boolean; + BEGIN + + IF val IS NULL THEN + RETURN NEXT NULL; + END IF; + + -- Column identifier and version + meta := eql_v2.meta_data(val); + + sv := eql_v2.ste_vec(val); + + FOR idx IN 1..array_length(sv, 1) LOOP + e := sv[idx]; + + IF eql_v2.selector(e) = selector THEN + found := array_append(found, e); + IF eql_v2.is_ste_vec_array(e) THEN + ary := true; + END IF; + + END IF; + END LOOP; + + IF found IS NOT NULL THEN + + IF ary THEN + -- Wrap found array elements as eql_v2_encrypted + + RETURN NEXT (meta || jsonb_build_object( + 'sv', found, + 'a', 1 + ))::eql_v2_encrypted; + + ELSE + RETURN NEXT (meta || found[1])::eql_v2_encrypted; + END IF; + + END IF; + + RETURN; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Query encrypted JSONB with encrypted selector +--! +--! Overload that accepts encrypted selector and extracts its plaintext value +--! before delegating to main jsonb_path_query implementation. +--! +--! @param val eql_v2_encrypted Encrypted JSONB value to query +--! @param selector eql_v2_encrypted Encrypted selector to match against +--! @return SETOF eql_v2_encrypted Matching encrypted elements +--! +--! @see eql_v2.jsonb_path_query(jsonb, text) +CREATE FUNCTION eql_v2.jsonb_path_query(val eql_v2_encrypted, selector eql_v2_encrypted) + RETURNS SETOF eql_v2_encrypted + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN QUERY + SELECT * FROM eql_v2.jsonb_path_query(val.data, eql_v2.selector(selector)); + END; +$$ LANGUAGE plpgsql; + + +--! @brief Query encrypted JSONB with text selector +--! +--! Overload that accepts encrypted JSONB value and text selector, +--! extracting the JSONB payload before querying. +--! +--! @param eql_v2_encrypted Encrypted JSONB value to query +--! @param text Path selector to match against +--! @return SETOF eql_v2_encrypted Matching encrypted elements +--! +--! @example +--! -- Query encrypted JSONB for specific field +--! SELECT * FROM eql_v2.jsonb_path_query(encrypted_document, '$.address.city'); +--! +--! @see eql_v2.jsonb_path_query(jsonb, text) +CREATE FUNCTION eql_v2.jsonb_path_query(val eql_v2_encrypted, selector text) + RETURNS SETOF eql_v2_encrypted + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN QUERY + SELECT * FROM eql_v2.jsonb_path_query(val.data, selector); + END; +$$ LANGUAGE plpgsql; + + +------------------------------------------------------------------------------------ + + +--! @brief Check if selector path exists in encrypted JSONB +--! +--! Tests whether any encrypted elements match the given selector path. +--! More efficient than jsonb_path_query when only existence check is needed. +--! +--! @param jsonb Encrypted JSONB payload to check +--! @param text Path selector to test +--! @return boolean True if matching element exists, false otherwise +--! +--! @see eql_v2.jsonb_path_query(jsonb, text) +CREATE FUNCTION eql_v2.jsonb_path_exists(val jsonb, selector text) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN EXISTS ( + SELECT eql_v2.jsonb_path_query(val, selector) + ); + END; +$$ LANGUAGE plpgsql; + + +--! @brief Check existence with encrypted selector +--! +--! Overload that accepts encrypted selector and extracts its value +--! before checking existence. +--! +--! @param val eql_v2_encrypted Encrypted JSONB value to check +--! @param selector eql_v2_encrypted Encrypted selector to test +--! @return boolean True if path exists +--! +--! @see eql_v2.jsonb_path_exists(jsonb, text) +CREATE FUNCTION eql_v2.jsonb_path_exists(val eql_v2_encrypted, selector eql_v2_encrypted) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN EXISTS ( + SELECT eql_v2.jsonb_path_query(val, eql_v2.selector(selector)) + ); + END; +$$ LANGUAGE plpgsql; + + +--! @brief Check existence with text selector +--! +--! Overload that accepts encrypted JSONB value and text selector. +--! +--! @param eql_v2_encrypted Encrypted JSONB value to check +--! @param text Path selector to test +--! @return boolean True if path exists +--! +--! @example +--! -- Check if encrypted document has address field +--! SELECT eql_v2.jsonb_path_exists(encrypted_document, '$.address'); +--! +--! @see eql_v2.jsonb_path_exists(jsonb, text) +CREATE FUNCTION eql_v2.jsonb_path_exists(val eql_v2_encrypted, selector text) + RETURNS boolean + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN EXISTS ( + SELECT eql_v2.jsonb_path_query(val, selector) + ); + END; +$$ LANGUAGE plpgsql; + + +------------------------------------------------------------------------------------ + + +--! @brief Get first element matching selector +--! +--! Returns only the first encrypted element matching the selector path, +--! or NULL if no match found. More efficient than jsonb_path_query when +--! only one result is needed. +--! +--! @param jsonb Encrypted JSONB payload to query +--! @param text Path selector to match +--! @return eql_v2_encrypted First matching element or NULL +--! +--! @note Uses LIMIT 1 internally for efficiency +--! @see eql_v2.jsonb_path_query(jsonb, text) +CREATE FUNCTION eql_v2.jsonb_path_query_first(val jsonb, selector text) + RETURNS eql_v2_encrypted + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN ( + SELECT e + FROM eql_v2.jsonb_path_query(val, selector) AS e + LIMIT 1 + ); + END; +$$ LANGUAGE plpgsql; + + +--! @brief Get first element with encrypted selector +--! +--! Overload that accepts encrypted selector and extracts its value +--! before querying for first match. +--! +--! @param val eql_v2_encrypted Encrypted JSONB value to query +--! @param selector eql_v2_encrypted Encrypted selector to match +--! @return eql_v2_encrypted First matching element or NULL +--! +--! @see eql_v2.jsonb_path_query_first(jsonb, text) +CREATE FUNCTION eql_v2.jsonb_path_query_first(val eql_v2_encrypted, selector eql_v2_encrypted) + RETURNS eql_v2_encrypted + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN ( + SELECT e + FROM eql_v2.jsonb_path_query(val.data, eql_v2.selector(selector)) AS e + LIMIT 1 + ); + END; +$$ LANGUAGE plpgsql; + + +--! @brief Get first element with text selector +--! +--! Overload that accepts encrypted JSONB value and text selector. +--! +--! @param eql_v2_encrypted Encrypted JSONB value to query +--! @param text Path selector to match +--! @return eql_v2_encrypted First matching element or NULL +--! +--! @example +--! -- Get first matching address from encrypted document +--! SELECT eql_v2.jsonb_path_query_first(encrypted_document, '$.addresses[*]'); +--! +--! @see eql_v2.jsonb_path_query_first(jsonb, text) +CREATE FUNCTION eql_v2.jsonb_path_query_first(val eql_v2_encrypted, selector text) + RETURNS eql_v2_encrypted + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN ( + SELECT e + FROM eql_v2.jsonb_path_query(val.data, selector) AS e + LIMIT 1 + ); + END; +$$ LANGUAGE plpgsql; + + + +------------------------------------------------------------------------------------ + + +--! @brief Get length of encrypted JSONB array +--! +--! Returns the number of elements in an encrypted JSONB array by counting +--! elements in the STE vector ('sv'). The encrypted value must have the +--! array flag ('a') set to true. +--! +--! @param jsonb Encrypted JSONB payload representing an array +--! @return integer Number of elements in the array +--! @throws Exception 'cannot get array length of a non-array' if 'a' flag is missing or not true +--! +--! @note Array flag 'a' must be present and set to true value +--! @see eql_v2.jsonb_array_elements +CREATE FUNCTION eql_v2.jsonb_array_length(val jsonb) + RETURNS integer + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + DECLARE + sv eql_v2_encrypted[]; + found eql_v2_encrypted[]; + BEGIN + + IF val IS NULL THEN + RETURN NULL; + END IF; + + IF eql_v2.is_ste_vec_array(val) THEN + sv := eql_v2.ste_vec(val); + RETURN array_length(sv, 1); + END IF; + + RAISE 'cannot get array length of a non-array'; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Get array length from encrypted type +--! +--! Overload that accepts encrypted composite type and extracts the +--! JSONB payload before computing array length. +--! +--! @param eql_v2_encrypted Encrypted array value +--! @return integer Number of elements in the array +--! @throws Exception if value is not an array +--! +--! @example +--! -- Get length of encrypted array +--! SELECT eql_v2.jsonb_array_length(encrypted_tags); +--! +--! @see eql_v2.jsonb_array_length(jsonb) +CREATE FUNCTION eql_v2.jsonb_array_length(val eql_v2_encrypted) + RETURNS integer + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN ( + SELECT eql_v2.jsonb_array_length(val.data) + ); + END; +$$ LANGUAGE plpgsql; + + + + +--! @brief Extract elements from encrypted JSONB array +--! +--! Returns each element of an encrypted JSONB array as a separate row. +--! Each element is returned as an eql_v2_encrypted value with metadata +--! preserved from the parent array. +--! +--! @param jsonb Encrypted JSONB payload representing an array +--! @return SETOF eql_v2_encrypted One row per array element +--! @throws Exception if value is not an array (missing 'a' flag) +--! +--! @note Each element inherits metadata (version, ident) from parent +--! @see eql_v2.jsonb_array_length +--! @see eql_v2.jsonb_array_elements_text +CREATE FUNCTION eql_v2.jsonb_array_elements(val jsonb) + RETURNS SETOF eql_v2_encrypted + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + DECLARE + sv eql_v2_encrypted[]; + meta jsonb; + item jsonb; + BEGIN + + IF NOT eql_v2.is_ste_vec_array(val) THEN + RAISE 'cannot extract elements from non-array'; + END IF; + + -- Column identifier and version + meta := eql_v2.meta_data(val); + + sv := eql_v2.ste_vec(val); + + FOR idx IN 1..array_length(sv, 1) LOOP + item = sv[idx]; + RETURN NEXT (meta || item)::eql_v2_encrypted; + END LOOP; + + RETURN; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Extract elements from encrypted array type +--! +--! Overload that accepts encrypted composite type and extracts each +--! array element as a separate row. +--! +--! @param eql_v2_encrypted Encrypted array value +--! @return SETOF eql_v2_encrypted One row per array element +--! @throws Exception if value is not an array +--! +--! @example +--! -- Expand encrypted array into rows +--! SELECT * FROM eql_v2.jsonb_array_elements(encrypted_tags); +--! +--! @see eql_v2.jsonb_array_elements(jsonb) +CREATE FUNCTION eql_v2.jsonb_array_elements(val eql_v2_encrypted) + RETURNS SETOF eql_v2_encrypted + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN QUERY + SELECT * FROM eql_v2.jsonb_array_elements(val.data); + END; +$$ LANGUAGE plpgsql; + + + +--! @brief Extract encrypted array elements as ciphertext +--! +--! Returns each element of an encrypted JSONB array as its raw ciphertext +--! value (text representation). Unlike jsonb_array_elements, this returns +--! only the ciphertext 'c' field without metadata. +--! +--! @param jsonb Encrypted JSONB payload representing an array +--! @return SETOF text One ciphertext string per array element +--! @throws Exception if value is not an array (missing 'a' flag) +--! +--! @note Returns ciphertext only, not full encrypted structure +--! @see eql_v2.jsonb_array_elements +CREATE FUNCTION eql_v2.jsonb_array_elements_text(val jsonb) + RETURNS SETOF text + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + DECLARE + sv eql_v2_encrypted[]; + found eql_v2_encrypted[]; + BEGIN + IF NOT eql_v2.is_ste_vec_array(val) THEN + RAISE 'cannot extract elements from non-array'; + END IF; + + sv := eql_v2.ste_vec(val); + + FOR idx IN 1..array_length(sv, 1) LOOP + RETURN NEXT eql_v2.ciphertext(sv[idx]); + END LOOP; + + RETURN; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Extract array elements as ciphertext from encrypted type +--! +--! Overload that accepts encrypted composite type and extracts each +--! array element's ciphertext as text. +--! +--! @param eql_v2_encrypted Encrypted array value +--! @return SETOF text One ciphertext string per array element +--! @throws Exception if value is not an array +--! +--! @example +--! -- Get ciphertext of each array element +--! SELECT * FROM eql_v2.jsonb_array_elements_text(encrypted_tags); +--! +--! @see eql_v2.jsonb_array_elements_text(jsonb) +CREATE FUNCTION eql_v2.jsonb_array_elements_text(val eql_v2_encrypted) + RETURNS SETOF text + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN QUERY + SELECT * FROM eql_v2.jsonb_array_elements_text(val.data); + END; +$$ LANGUAGE plpgsql; + + +--! @brief Compare two encrypted values using HMAC-SHA256 index terms +--! +--! Performs a three-way comparison (returns -1/0/1) of encrypted values using +--! their HMAC-SHA256 hash index terms. Used internally by the equality operator (=) +--! for exact-match queries without decryption. +--! +--! @param a eql_v2_encrypted First encrypted value to compare +--! @param b eql_v2_encrypted Second encrypted value to compare +--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b +--! +--! @note NULL values are sorted before non-NULL values +--! @note Comparison uses underlying text type ordering of HMAC-SHA256 hashes +--! +--! @see eql_v2.hmac_256 +--! @see eql_v2.has_hmac_256 +--! @see eql_v2."=" +CREATE FUNCTION eql_v2.compare_hmac_256(a eql_v2_encrypted, b eql_v2_encrypted) + RETURNS integer + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + DECLARE + a_term eql_v2.hmac_256; + b_term eql_v2.hmac_256; + BEGIN + + IF a IS NULL AND b IS NULL THEN + RETURN 0; + END IF; + + IF a IS NULL THEN + RETURN -1; + END IF; + + IF b IS NULL THEN + RETURN 1; + END IF; + + IF eql_v2.has_hmac_256(a) THEN + a_term = eql_v2.hmac_256(a); + END IF; + + IF eql_v2.has_hmac_256(b) THEN + b_term = eql_v2.hmac_256(b); + END IF; + + IF a_term IS NULL AND b_term IS NULL THEN + RETURN 0; + END IF; + + IF a_term IS NULL THEN + RETURN -1; + END IF; + + IF b_term IS NULL THEN + RETURN 1; + END IF; + + -- Using the underlying text type comparison + IF a_term = b_term THEN + RETURN 0; + END IF; + + IF a_term < b_term THEN + RETURN -1; + END IF; + + IF a_term > b_term THEN + RETURN 1; + END IF; + + END; +$$ LANGUAGE plpgsql; +--! @file encryptindex/functions.sql +--! @brief Configuration lifecycle and column encryption management +--! +--! Provides functions for managing encryption configuration transitions: +--! - Comparing configurations to identify changes +--! - Identifying columns needing encryption +--! - Creating and renaming encrypted columns during initial setup +--! - Tracking encryption progress +--! +--! These functions support the workflow of activating a pending configuration +--! and performing the initial encryption of plaintext columns. + + +--! @brief Compare two configurations and find differences +--! @internal +--! +--! Returns table/column pairs where configuration differs between two configs. +--! Used to identify which columns need encryption when activating a pending config. +--! +--! @param a jsonb First configuration to compare +--! @param b jsonb Second configuration to compare +--! @return TABLE(table_name text, column_name text) Columns with differing configuration +--! +--! @note Compares configuration structure, not just presence/absence +--! @see eql_v2.select_pending_columns +CREATE FUNCTION eql_v2.diff_config(a JSONB, b JSONB) + RETURNS TABLE(table_name TEXT, column_name TEXT) +IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + RETURN QUERY + WITH table_keys AS ( + SELECT jsonb_object_keys(a->'tables') AS key + UNION + SELECT jsonb_object_keys(b->'tables') AS key + ), + column_keys AS ( + SELECT tk.key AS table_key, jsonb_object_keys(a->'tables'->tk.key) AS column_key + FROM table_keys tk + UNION + SELECT tk.key AS table_key, jsonb_object_keys(b->'tables'->tk.key) AS column_key + FROM table_keys tk + ) + SELECT + ck.table_key AS table_name, + ck.column_key AS column_name + FROM + column_keys ck + WHERE + (a->'tables'->ck.table_key->ck.column_key IS DISTINCT FROM b->'tables'->ck.table_key->ck.column_key); + END; +$$ LANGUAGE plpgsql; + + +--! @brief Get columns with pending configuration changes +--! +--! Compares 'pending' and 'active' configurations to identify columns that need +--! encryption or re-encryption. Returns columns where configuration differs. +--! +--! @return TABLE(table_name text, column_name text) Columns needing encryption +--! @throws Exception if no pending configuration exists +--! +--! @note Treats missing active config as empty config +--! @see eql_v2.diff_config +--! @see eql_v2.select_target_columns +CREATE FUNCTION eql_v2.select_pending_columns() + RETURNS TABLE(table_name TEXT, column_name TEXT) +AS $$ + DECLARE + active JSONB; + pending JSONB; + config_id BIGINT; + BEGIN + SELECT data INTO active FROM eql_v2_configuration WHERE state = 'active'; + + -- set default config + IF active IS NULL THEN + active := '{}'; + END IF; + + SELECT id, data INTO config_id, pending FROM eql_v2_configuration WHERE state = 'pending'; + + -- set default config + IF config_id IS NULL THEN + RAISE EXCEPTION 'No pending configuration exists to encrypt'; + END IF; + + RETURN QUERY + SELECT d.table_name, d.column_name FROM eql_v2.diff_config(active, pending) as d; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Map pending columns to their encrypted target columns +--! +--! For each column with pending configuration, identifies the corresponding +--! encrypted column. During initial encryption, target is '{column_name}_encrypted'. +--! Returns NULL for target_column if encrypted column doesn't exist yet. +--! +--! @return TABLE(table_name text, column_name text, target_column text) Column mappings +--! +--! @note Target column is NULL if no column exists matching either 'column_name' or 'column_name_encrypted' with type eql_v2_encrypted +--! @note The LEFT JOIN checks both original and '_encrypted' suffix variations with type verification +--! @see eql_v2.select_pending_columns +--! @see eql_v2.create_encrypted_columns +CREATE FUNCTION eql_v2.select_target_columns() + RETURNS TABLE(table_name TEXT, column_name TEXT, target_column TEXT) + STABLE STRICT PARALLEL SAFE +AS $$ + SELECT + c.table_name, + c.column_name, + s.column_name as target_column + FROM + eql_v2.select_pending_columns() c + LEFT JOIN information_schema.columns s ON + s.table_name = c.table_name AND + (s.column_name = c.column_name OR s.column_name = c.column_name || '_encrypted') AND + s.udt_name = 'eql_v2_encrypted'; +$$ LANGUAGE sql; + + +--! @brief Check if database is ready for encryption +--! +--! Verifies that all columns with pending configuration have corresponding +--! encrypted target columns created. Returns true if encryption can proceed. +--! +--! @return boolean True if all pending columns have target encrypted columns +--! +--! @note Returns false if any pending column lacks encrypted column +--! @see eql_v2.select_target_columns +--! @see eql_v2.create_encrypted_columns +CREATE FUNCTION eql_v2.ready_for_encryption() + RETURNS BOOLEAN + STABLE STRICT PARALLEL SAFE +AS $$ + SELECT EXISTS ( + SELECT * + FROM eql_v2.select_target_columns() AS c + WHERE c.target_column IS NOT NULL); +$$ LANGUAGE sql; + + +--! @brief Create encrypted columns for initial encryption +--! +--! For each plaintext column with pending configuration that lacks an encrypted +--! target column, creates a new column '{column_name}_encrypted' of type +--! eql_v2_encrypted. This prepares the database schema for initial encryption. +--! +--! @return TABLE(table_name text, column_name text) Created encrypted columns +--! +--! @warning Executes dynamic DDL (ALTER TABLE ADD COLUMN) - modifies database schema +--! @note Only creates columns that don't already exist +--! @see eql_v2.select_target_columns +--! @see eql_v2.rename_encrypted_columns +CREATE FUNCTION eql_v2.create_encrypted_columns() + RETURNS TABLE(table_name TEXT, column_name TEXT) +AS $$ + BEGIN + FOR table_name, column_name IN + SELECT c.table_name, (c.column_name || '_encrypted') FROM eql_v2.select_target_columns() AS c WHERE c.target_column IS NULL + LOOP + EXECUTE format('ALTER TABLE %I ADD column %I eql_v2_encrypted;', table_name, column_name); + RETURN NEXT; + END LOOP; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Finalize initial encryption by renaming columns +--! +--! After initial encryption completes, renames columns to complete the transition: +--! - Plaintext column '{column_name}' → '{column_name}_plaintext' +--! - Encrypted column '{column_name}_encrypted' → '{column_name}' +--! +--! This makes the encrypted column the primary column with the original name. +--! +--! @return TABLE(table_name text, column_name text, target_column text) Renamed columns +--! +--! @warning Executes dynamic DDL (ALTER TABLE RENAME COLUMN) - modifies database schema +--! @note Only renames columns where target is '{column_name}_encrypted' +--! @see eql_v2.create_encrypted_columns +CREATE FUNCTION eql_v2.rename_encrypted_columns() + RETURNS TABLE(table_name TEXT, column_name TEXT, target_column TEXT) +AS $$ + BEGIN + FOR table_name, column_name, target_column IN + SELECT * FROM eql_v2.select_target_columns() as c WHERE c.target_column = c.column_name || '_encrypted' + LOOP + EXECUTE format('ALTER TABLE %I RENAME %I TO %I;', table_name, column_name, column_name || '_plaintext'); + EXECUTE format('ALTER TABLE %I RENAME %I TO %I;', table_name, target_column, column_name); + RETURN NEXT; + END LOOP; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Count rows encrypted with active configuration +--! @internal +--! +--! Counts rows in a table where the encrypted column was encrypted using +--! the currently active configuration. Used to track encryption progress. +--! +--! @param table_name text Name of table to check +--! @param column_name text Name of encrypted column to check +--! @return bigint Count of rows encrypted with active configuration +--! +--! @note The 'v' field in encrypted payloads stores the payload version ("2"), not the configuration ID +--! @note Configuration tracking mechanism is implementation-specific +CREATE FUNCTION eql_v2.count_encrypted_with_active_config(table_name TEXT, column_name TEXT) + RETURNS BIGINT +AS $$ +DECLARE + result BIGINT; +BEGIN + EXECUTE format( + 'SELECT COUNT(%I) FROM %s t WHERE %I->>%L = (SELECT id::TEXT FROM eql_v2_configuration WHERE state = %L)', + column_name, table_name, column_name, 'v', 'active' + ) + INTO result; + RETURN result; +END; +$$ LANGUAGE plpgsql; + + + +--! @brief Validate presence of ident field in encrypted payload +--! @internal +--! +--! Checks that the encrypted JSONB payload contains the required 'i' (ident) field. +--! The ident field tracks which table and column the encrypted value belongs to. +--! +--! @param jsonb Encrypted payload to validate +--! @return Boolean True if 'i' field is present +--! @throws Exception if 'i' field is missing +--! +--! @note Used in CHECK constraints to ensure payload structure +--! @see eql_v2.check_encrypted +CREATE FUNCTION eql_v2._encrypted_check_i(val jsonb) + RETURNS boolean +AS $$ + BEGIN + IF val ? 'i' THEN + RETURN true; + END IF; + RAISE 'Encrypted column missing ident (i) field: %', val; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Validate table and column fields in ident +--! @internal +--! +--! Checks that the 'i' (ident) field contains both 't' (table) and 'c' (column) +--! subfields, which identify the origin of the encrypted value. +--! +--! @param jsonb Encrypted payload to validate +--! @return Boolean True if both 't' and 'c' subfields are present +--! @throws Exception if 't' or 'c' subfields are missing +--! +--! @note Used in CHECK constraints to ensure payload structure +--! @see eql_v2.check_encrypted +CREATE FUNCTION eql_v2._encrypted_check_i_ct(val jsonb) + RETURNS boolean +AS $$ + BEGIN + IF (val->'i' ?& array['t', 'c']) THEN + RETURN true; + END IF; + RAISE 'Encrypted column ident (i) missing table (t) or column (c) fields: %', val; + END; +$$ LANGUAGE plpgsql; + +--! @brief Validate version field in encrypted payload +--! @internal +--! +--! Checks that the encrypted payload has version field 'v' set to '2', +--! the current EQL v2 payload version. +--! +--! @param jsonb Encrypted payload to validate +--! @return Boolean True if 'v' field is present and equals '2' +--! @throws Exception if 'v' field is missing or not '2' +--! +--! @note Used in CHECK constraints to ensure payload structure +--! @see eql_v2.check_encrypted +CREATE FUNCTION eql_v2._encrypted_check_v(val jsonb) + RETURNS boolean +AS $$ + BEGIN + IF (val ? 'v') THEN + + IF val->>'v' <> '2' THEN + RAISE 'Expected encrypted column version (v) 2'; + RETURN false; + END IF; + + RETURN true; + END IF; + RAISE 'Encrypted column missing version (v) field: %', val; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Validate ciphertext field in encrypted payload +--! @internal +--! +--! Checks that the encrypted payload contains the required 'c' (ciphertext) field +--! which stores the encrypted data. +--! +--! @param jsonb Encrypted payload to validate +--! @return Boolean True if 'c' field is present +--! @throws Exception if 'c' field is missing +--! +--! @note Used in CHECK constraints to ensure payload structure +--! @see eql_v2.check_encrypted +CREATE FUNCTION eql_v2._encrypted_check_c(val jsonb) + RETURNS boolean +AS $$ + BEGIN + IF (val ? 'c') THEN + RETURN true; + END IF; + RAISE 'Encrypted column missing ciphertext (c) field: %', val; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Validate complete encrypted payload structure +--! +--! Comprehensive validation function that checks all required fields in an +--! encrypted JSONB payload: version ('v'), ciphertext ('c'), ident ('i'), +--! and ident subfields ('t', 'c'). +--! +--! This function is used in CHECK constraints to ensure encrypted column +--! data integrity at the database level. +--! +--! @param jsonb Encrypted payload to validate +--! @return Boolean True if all structure checks pass +--! @throws Exception if any required field is missing or invalid +--! +--! @example +--! -- Add validation constraint to encrypted column +--! ALTER TABLE users ADD CONSTRAINT check_email_encrypted +--! CHECK (eql_v2.check_encrypted(encrypted_email::jsonb)); +--! +--! @see eql_v2._encrypted_check_v +--! @see eql_v2._encrypted_check_c +--! @see eql_v2._encrypted_check_i +--! @see eql_v2._encrypted_check_i_ct +CREATE FUNCTION eql_v2.check_encrypted(val jsonb) + RETURNS BOOLEAN +LANGUAGE sql IMMUTABLE STRICT PARALLEL SAFE +BEGIN ATOMIC + RETURN ( + eql_v2._encrypted_check_v(val) AND + eql_v2._encrypted_check_c(val) AND + eql_v2._encrypted_check_i(val) AND + eql_v2._encrypted_check_i_ct(val) + ); +END; + + +--! @brief Validate encrypted composite type structure +--! +--! Validates an eql_v2_encrypted composite type by checking its underlying +--! JSONB payload. Delegates to eql_v2.check_encrypted(jsonb). +--! +--! @param eql_v2_encrypted Encrypted value to validate +--! @return Boolean True if structure is valid +--! @throws Exception if any required field is missing or invalid +--! +--! @see eql_v2.check_encrypted(jsonb) +CREATE FUNCTION eql_v2.check_encrypted(val eql_v2_encrypted) + RETURNS BOOLEAN +LANGUAGE sql IMMUTABLE STRICT PARALLEL SAFE +BEGIN ATOMIC + RETURN eql_v2.check_encrypted(val.data); +END; + + +-- Aggregate functions for ORE + +--! @brief State transition function for min aggregate +--! @internal +--! +--! Returns the smaller of two encrypted values for use in MIN aggregate. +--! Comparison uses ORE index terms without decryption. +--! +--! @param a eql_v2_encrypted First encrypted value +--! @param b eql_v2_encrypted Second encrypted value +--! @return eql_v2_encrypted The smaller of the two values +--! +--! @see eql_v2.min(eql_v2_encrypted) +CREATE FUNCTION eql_v2.min(a eql_v2_encrypted, b eql_v2_encrypted) + RETURNS eql_v2_encrypted +STRICT +AS $$ + BEGIN + IF a < b THEN + RETURN a; + ELSE + RETURN b; + END IF; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Find minimum encrypted value in a group +--! +--! Aggregate function that returns the minimum encrypted value in a group +--! using ORE index term comparisons without decryption. +--! +--! @param input eql_v2_encrypted Encrypted values to aggregate +--! @return eql_v2_encrypted Minimum value in the group +--! +--! @example +--! -- Find minimum age per department +--! SELECT department, eql_v2.min(encrypted_age) +--! FROM employees +--! GROUP BY department; +--! +--! @note Requires 'ore' index configuration on the column +--! @see eql_v2.min(eql_v2_encrypted, eql_v2_encrypted) +CREATE AGGREGATE eql_v2.min(eql_v2_encrypted) +( + sfunc = eql_v2.min, + stype = eql_v2_encrypted +); + + +--! @brief State transition function for max aggregate +--! @internal +--! +--! Returns the larger of two encrypted values for use in MAX aggregate. +--! Comparison uses ORE index terms without decryption. +--! +--! @param a eql_v2_encrypted First encrypted value +--! @param b eql_v2_encrypted Second encrypted value +--! @return eql_v2_encrypted The larger of the two values +--! +--! @see eql_v2.max(eql_v2_encrypted) +CREATE FUNCTION eql_v2.max(a eql_v2_encrypted, b eql_v2_encrypted) +RETURNS eql_v2_encrypted +STRICT +AS $$ + BEGIN + IF a > b THEN + RETURN a; + ELSE + RETURN b; + END IF; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Find maximum encrypted value in a group +--! +--! Aggregate function that returns the maximum encrypted value in a group +--! using ORE index term comparisons without decryption. +--! +--! @param input eql_v2_encrypted Encrypted values to aggregate +--! @return eql_v2_encrypted Maximum value in the group +--! +--! @example +--! -- Find maximum salary per department +--! SELECT department, eql_v2.max(encrypted_salary) +--! FROM employees +--! GROUP BY department; +--! +--! @note Requires 'ore' index configuration on the column +--! @see eql_v2.max(eql_v2_encrypted, eql_v2_encrypted) +CREATE AGGREGATE eql_v2.max(eql_v2_encrypted) +( + sfunc = eql_v2.max, + stype = eql_v2_encrypted +); + + +--! @file config/indexes.sql +--! @brief Configuration state uniqueness indexes +--! +--! Creates partial unique indexes to enforce that only one configuration +--! can be in 'active', 'pending', or 'encrypting' state at any time. +--! Multiple 'inactive' configurations are allowed. +--! +--! @note Uses partial indexes (WHERE clauses) for efficiency +--! @note Prevents conflicting configurations from being active simultaneously +--! @see config/types.sql for state definitions + + +--! @brief Unique active configuration constraint +--! @note Only one configuration can be 'active' at once +CREATE UNIQUE INDEX ON public.eql_v2_configuration (state) WHERE state = 'active'; + +--! @brief Unique pending configuration constraint +--! @note Only one configuration can be 'pending' at once +CREATE UNIQUE INDEX ON public.eql_v2_configuration (state) WHERE state = 'pending'; + +--! @brief Unique encrypting configuration constraint +--! @note Only one configuration can be 'encrypting' at once +CREATE UNIQUE INDEX ON public.eql_v2_configuration (state) WHERE state = 'encrypting'; + + +--! @brief Add a search index configuration for an encrypted column +--! +--! Configures a searchable encryption index (unique, match, ore, or ste_vec) on an +--! encrypted column. Creates or updates the pending configuration, then migrates +--! and activates it unless migrating flag is set. +--! +--! @param table_name Text Name of the table containing the column +--! @param column_name Text Name of the column to configure +--! @param index_name Text Type of index ('unique', 'match', 'ore', 'ste_vec') +--! @param cast_as Text PostgreSQL type for decrypted values (default: 'text') +--! @param opts JSONB Index-specific options (default: '{}') +--! @param migrating Boolean Skip auto-migration if true (default: false) +--! @return JSONB Updated configuration object +--! @throws Exception if index already exists for this column +--! @throws Exception if cast_as is not a valid type +--! +--! @example +--! -- Add unique index for exact-match searches +--! SELECT eql_v2.add_search_config('users', 'email', 'unique'); +--! +--! -- Add match index for LIKE searches with custom token length +--! SELECT eql_v2.add_search_config('posts', 'content', 'match', 'text', +--! '{"token_filters": [{"kind": "downcase"}], "tokenizer": {"kind": "ngram", "token_length": 3}}' +--! ); +--! +--! @see eql_v2.add_column +--! @see eql_v2.remove_search_config +CREATE FUNCTION eql_v2.add_search_config(table_name text, column_name text, index_name text, cast_as text DEFAULT 'text', opts jsonb DEFAULT '{}', migrating boolean DEFAULT false) + RETURNS jsonb + +AS $$ + DECLARE + o jsonb; + _config jsonb; + BEGIN + + -- set the active config + SELECT data INTO _config FROM public.eql_v2_configuration WHERE state = 'active' OR state = 'pending' ORDER BY state DESC; + + -- if index exists + IF _config #> array['tables', table_name, column_name, 'indexes'] ? index_name THEN + RAISE EXCEPTION '% index exists for column: % %', index_name, table_name, column_name; + END IF; + + IF NOT cast_as = ANY('{text, int, small_int, big_int, real, double, boolean, date, jsonb}') THEN + RAISE EXCEPTION '% is not a valid cast type', cast_as; + END IF; + + -- set default config + SELECT eql_v2.config_default(_config) INTO _config; + + SELECT eql_v2.config_add_table(table_name, _config) INTO _config; + + SELECT eql_v2.config_add_column(table_name, column_name, _config) INTO _config; + + SELECT eql_v2.config_add_cast(table_name, column_name, cast_as, _config) INTO _config; + + -- set default options for index if opts empty + IF index_name = 'match' AND opts = '{}' THEN + SELECT eql_v2.config_match_default() INTO opts; + END IF; + + SELECT eql_v2.config_add_index(table_name, column_name, index_name, opts, _config) INTO _config; + + -- create a new pending record if we don't have one + INSERT INTO public.eql_v2_configuration (state, data) VALUES ('pending', _config) + ON CONFLICT (state) + WHERE state = 'pending' + DO UPDATE + SET data = _config; + + IF NOT migrating THEN + PERFORM eql_v2.migrate_config(); + PERFORM eql_v2.activate_config(); + END IF; + + PERFORM eql_v2.add_encrypted_constraint(table_name, column_name); + + -- exeunt + RETURN _config; + END; +$$ LANGUAGE plpgsql; + +--! @brief Remove a search index configuration from an encrypted column +--! +--! Removes a previously configured search index from an encrypted column. +--! Updates the pending configuration, then migrates and activates it +--! unless migrating flag is set. +--! +--! @param table_name Text Name of the table containing the column +--! @param column_name Text Name of the column +--! @param index_name Text Type of index to remove +--! @param migrating Boolean Skip auto-migration if true (default: false) +--! @return JSONB Updated configuration object +--! @throws Exception if no active or pending configuration exists +--! @throws Exception if table is not configured +--! @throws Exception if column is not configured +--! +--! @example +--! -- Remove match index from column +--! SELECT eql_v2.remove_search_config('posts', 'content', 'match'); +--! +--! @see eql_v2.add_search_config +--! @see eql_v2.modify_search_config +CREATE FUNCTION eql_v2.remove_search_config(table_name text, column_name text, index_name text, migrating boolean DEFAULT false) + RETURNS jsonb +AS $$ + DECLARE + _config jsonb; + BEGIN + + -- set the active config + SELECT data INTO _config FROM public.eql_v2_configuration WHERE state = 'active' OR state = 'pending' ORDER BY state DESC; + + -- if no config + IF _config IS NULL THEN + RAISE EXCEPTION 'No active or pending configuration exists'; + END IF; + + -- if the table doesn't exist + IF NOT _config #> array['tables'] ? table_name THEN + RAISE EXCEPTION 'No configuration exists for table: %', table_name; + END IF; + + -- if the index does not exist + -- IF NOT _config->key ? index_name THEN + IF NOT _config #> array['tables', table_name] ? column_name THEN + RAISE EXCEPTION 'No % index exists for column: % %', index_name, table_name, column_name; + END IF; + + -- create a new pending record if we don't have one + INSERT INTO public.eql_v2_configuration (state, data) VALUES ('pending', _config) + ON CONFLICT (state) + WHERE state = 'pending' + DO NOTHING; + + -- remove the index + SELECT _config #- array['tables', table_name, column_name, 'indexes', index_name] INTO _config; + + -- update the config and migrate (even if empty) + UPDATE public.eql_v2_configuration SET data = _config WHERE state = 'pending'; + + IF NOT migrating THEN + PERFORM eql_v2.migrate_config(); + PERFORM eql_v2.activate_config(); + END IF; + + -- exeunt + RETURN _config; + END; +$$ LANGUAGE plpgsql; + +--! @brief Modify a search index configuration for an encrypted column +--! +--! Updates an existing search index configuration by removing and re-adding it +--! with new options. Convenience function that combines remove and add operations. +--! If index does not exist, it is added. +--! +--! @param table_name Text Name of the table containing the column +--! @param column_name Text Name of the column +--! @param index_name Text Type of index to modify +--! @param cast_as Text PostgreSQL type for decrypted values (default: 'text') +--! @param opts JSONB New index-specific options (default: '{}') +--! @param migrating Boolean Skip auto-migration if true (default: false) +--! @return JSONB Updated configuration object +--! +--! @example +--! -- Change match index tokenizer settings +--! SELECT eql_v2.modify_search_config('posts', 'content', 'match', 'text', +--! '{"tokenizer": {"kind": "ngram", "token_length": 4}}' +--! ); +--! +--! @see eql_v2.add_search_config +--! @see eql_v2.remove_search_config +CREATE FUNCTION eql_v2.modify_search_config(table_name text, column_name text, index_name text, cast_as text DEFAULT 'text', opts jsonb DEFAULT '{}', migrating boolean DEFAULT false) + RETURNS jsonb +AS $$ + BEGIN + PERFORM eql_v2.remove_search_config(table_name, column_name, index_name, migrating); + RETURN eql_v2.add_search_config(table_name, column_name, index_name, cast_as, opts, migrating); + END; +$$ LANGUAGE plpgsql; + +--! @brief Migrate pending configuration to encrypting state +--! +--! Transitions the pending configuration to encrypting state, validating that +--! all configured columns have encrypted target columns ready. This is part of +--! the configuration lifecycle: pending → encrypting → active. +--! +--! @return Boolean True if migration succeeds +--! @throws Exception if encryption already in progress +--! @throws Exception if no pending configuration exists +--! @throws Exception if configured columns lack encrypted targets +--! +--! @example +--! -- Manually migrate configuration (normally done automatically) +--! SELECT eql_v2.migrate_config(); +--! +--! @see eql_v2.activate_config +--! @see eql_v2.add_column +CREATE FUNCTION eql_v2.migrate_config() + RETURNS boolean +AS $$ + BEGIN + + IF EXISTS (SELECT FROM public.eql_v2_configuration c WHERE c.state = 'encrypting') THEN + RAISE EXCEPTION 'An encryption is already in progress'; + END IF; + + IF NOT EXISTS (SELECT FROM public.eql_v2_configuration c WHERE c.state = 'pending') THEN + RAISE EXCEPTION 'No pending configuration exists to encrypt'; + END IF; + + IF NOT eql_v2.ready_for_encryption() THEN + RAISE EXCEPTION 'Some pending columns do not have an encrypted target'; + END IF; + + UPDATE public.eql_v2_configuration SET state = 'encrypting' WHERE state = 'pending'; + RETURN true; + END; +$$ LANGUAGE plpgsql; + +--! @brief Activate encrypting configuration +--! +--! Transitions the encrypting configuration to active state, making it the +--! current operational configuration. Marks previous active configuration as +--! inactive. Final step in configuration lifecycle: pending → encrypting → active. +--! +--! @return Boolean True if activation succeeds +--! @throws Exception if no encrypting configuration exists to activate +--! +--! @example +--! -- Manually activate configuration (normally done automatically) +--! SELECT eql_v2.activate_config(); +--! +--! @see eql_v2.migrate_config +--! @see eql_v2.add_column +CREATE FUNCTION eql_v2.activate_config() + RETURNS boolean +AS $$ + BEGIN + + IF EXISTS (SELECT FROM public.eql_v2_configuration c WHERE c.state = 'encrypting') THEN + UPDATE public.eql_v2_configuration SET state = 'inactive' WHERE state = 'active'; + UPDATE public.eql_v2_configuration SET state = 'active' WHERE state = 'encrypting'; + RETURN true; + ELSE + RAISE EXCEPTION 'No encrypting configuration exists to activate'; + END IF; + END; +$$ LANGUAGE plpgsql; + +--! @brief Discard pending configuration +--! +--! Deletes the pending configuration without applying changes. Use this to +--! abandon configuration changes before they are migrated and activated. +--! +--! @return Boolean True if discard succeeds +--! @throws Exception if no pending configuration exists to discard +--! +--! @example +--! -- Discard uncommitted configuration changes +--! SELECT eql_v2.discard(); +--! +--! @see eql_v2.add_column +--! @see eql_v2.add_search_config +CREATE FUNCTION eql_v2.discard() + RETURNS boolean +AS $$ + BEGIN + IF EXISTS (SELECT FROM public.eql_v2_configuration c WHERE c.state = 'pending') THEN + DELETE FROM public.eql_v2_configuration WHERE state = 'pending'; + RETURN true; + ELSE + RAISE EXCEPTION 'No pending configuration exists to discard'; + END IF; + END; +$$ LANGUAGE plpgsql; + +--! @brief Configure a column for encryption +--! +--! Adds a column to the encryption configuration, making it eligible for +--! encrypted storage and search indexes. Creates or updates pending configuration, +--! adds encrypted constraint, then migrates and activates unless migrating flag is set. +--! +--! @param table_name Text Name of the table containing the column +--! @param column_name Text Name of the column to encrypt +--! @param cast_as Text PostgreSQL type to cast decrypted values (default: 'text') +--! @param migrating Boolean Skip auto-migration if true (default: false) +--! @return JSONB Updated configuration object +--! @throws Exception if column already configured for encryption +--! +--! @example +--! -- Configure email column for encryption +--! SELECT eql_v2.add_column('users', 'email', 'text'); +--! +--! -- Configure age column with integer casting +--! SELECT eql_v2.add_column('users', 'age', 'int'); +--! +--! @see eql_v2.add_search_config +--! @see eql_v2.remove_column +CREATE FUNCTION eql_v2.add_column(table_name text, column_name text, cast_as text DEFAULT 'text', migrating boolean DEFAULT false) + RETURNS jsonb +AS $$ + DECLARE + key text; + _config jsonb; + BEGIN + -- set the active config + SELECT data INTO _config FROM public.eql_v2_configuration WHERE state = 'active' OR state = 'pending' ORDER BY state DESC; + + -- set default config + SELECT eql_v2.config_default(_config) INTO _config; + + -- if index exists + IF _config #> array['tables', table_name] ? column_name THEN + RAISE EXCEPTION 'Config exists for column: % %', table_name, column_name; + END IF; + + SELECT eql_v2.config_add_table(table_name, _config) INTO _config; + + SELECT eql_v2.config_add_column(table_name, column_name, _config) INTO _config; + + SELECT eql_v2.config_add_cast(table_name, column_name, cast_as, _config) INTO _config; + + -- create a new pending record if we don't have one + INSERT INTO public.eql_v2_configuration (state, data) VALUES ('pending', _config) + ON CONFLICT (state) + WHERE state = 'pending' + DO UPDATE + SET data = _config; + + IF NOT migrating THEN + PERFORM eql_v2.migrate_config(); + PERFORM eql_v2.activate_config(); + END IF; + + PERFORM eql_v2.add_encrypted_constraint(table_name, column_name); + + -- exeunt + RETURN _config; + END; +$$ LANGUAGE plpgsql; + +--! @brief Remove a column from encryption configuration +--! +--! Removes a column from the encryption configuration, including all associated +--! search indexes. Removes encrypted constraint, updates pending configuration, +--! then migrates and activates unless migrating flag is set. +--! +--! @param table_name Text Name of the table containing the column +--! @param column_name Text Name of the column to remove +--! @param migrating Boolean Skip auto-migration if true (default: false) +--! @return JSONB Updated configuration object +--! @throws Exception if no active or pending configuration exists +--! @throws Exception if table is not configured +--! @throws Exception if column is not configured +--! +--! @example +--! -- Remove email column from encryption +--! SELECT eql_v2.remove_column('users', 'email'); +--! +--! @see eql_v2.add_column +--! @see eql_v2.remove_search_config +CREATE FUNCTION eql_v2.remove_column(table_name text, column_name text, migrating boolean DEFAULT false) + RETURNS jsonb +AS $$ + DECLARE + key text; + _config jsonb; + BEGIN + -- set the active config + SELECT data INTO _config FROM public.eql_v2_configuration WHERE state = 'active' OR state = 'pending' ORDER BY state DESC; + + -- if no config + IF _config IS NULL THEN + RAISE EXCEPTION 'No active or pending configuration exists'; + END IF; + + -- if the table doesn't exist + IF NOT _config #> array['tables'] ? table_name THEN + RAISE EXCEPTION 'No configuration exists for table: %', table_name; + END IF; + + -- if the column does not exist + IF NOT _config #> array['tables', table_name] ? column_name THEN + RAISE EXCEPTION 'No configuration exists for column: % %', table_name, column_name; + END IF; + + -- create a new pending record if we don't have one + INSERT INTO public.eql_v2_configuration (state, data) VALUES ('pending', _config) + ON CONFLICT (state) + WHERE state = 'pending' + DO NOTHING; + + -- remove the column + SELECT _config #- array['tables', table_name, column_name] INTO _config; + + -- if table is now empty, remove the table + IF _config #> array['tables', table_name] = '{}' THEN + SELECT _config #- array['tables', table_name] INTO _config; + END IF; + + PERFORM eql_v2.remove_encrypted_constraint(table_name, column_name); + + -- update the config (even if empty) and activate + UPDATE public.eql_v2_configuration SET data = _config WHERE state = 'pending'; + + IF NOT migrating THEN + -- For empty configs, skip migration validation and directly activate + IF _config #> array['tables'] = '{}' THEN + UPDATE public.eql_v2_configuration SET state = 'inactive' WHERE state = 'active'; + UPDATE public.eql_v2_configuration SET state = 'active' WHERE state = 'pending'; + ELSE + PERFORM eql_v2.migrate_config(); + PERFORM eql_v2.activate_config(); + END IF; + END IF; + + -- exeunt + RETURN _config; + + END; +$$ LANGUAGE plpgsql; + +--! @brief Reload configuration from CipherStash Proxy +--! +--! Placeholder function for reloading configuration from the CipherStash Proxy. +--! Currently returns NULL without side effects. +--! +--! @return Void +--! +--! @note This function may be used for configuration synchronization in future versions +CREATE FUNCTION eql_v2.reload_config() + RETURNS void +LANGUAGE sql STRICT PARALLEL SAFE +BEGIN ATOMIC + RETURN NULL; +END; + +--! @brief Query encryption configuration in tabular format +--! +--! Returns the active encryption configuration as a table for easier querying +--! and filtering. Shows all configured tables, columns, cast types, and indexes. +--! +--! @return TABLE Contains configuration state, relation name, column name, cast type, and indexes +--! +--! @example +--! -- View all encrypted columns +--! SELECT * FROM eql_v2.config(); +--! +--! -- Find all columns with match indexes +--! SELECT relation, col_name FROM eql_v2.config() +--! WHERE indexes ? 'match'; +--! +--! @see eql_v2.add_column +--! @see eql_v2.add_search_config +CREATE FUNCTION eql_v2.config() RETURNS TABLE ( + state eql_v2_configuration_state, + relation text, + col_name text, + decrypts_as text, + indexes jsonb +) +AS $$ +BEGIN + RETURN QUERY + WITH tables AS ( + SELECT config.state, tables.key AS table, tables.value AS config + FROM public.eql_v2_configuration config, jsonb_each(data->'tables') tables + WHERE config.data->>'v' = '1' + ) + SELECT + tables.state, + tables.table, + column_config.key, + column_config.value->>'cast_as', + column_config.value->'indexes' + FROM tables, jsonb_each(tables.config) column_config; +END; +$$ LANGUAGE plpgsql; + +--! @file config/constraints.sql +--! @brief Configuration validation functions and constraints +--! +--! Provides CHECK constraint functions to validate encryption configuration structure. +--! Ensures configurations have required fields (version, tables) and valid values +--! for index types and cast types before being stored. +--! +--! @see config/tables.sql where constraints are applied + + +--! @brief Extract index type names from configuration +--! @internal +--! +--! Helper function that extracts all index type names from the configuration's +--! 'indexes' sections across all tables and columns. +--! +--! @param jsonb Configuration data to extract from +--! @return SETOF text Index type names (e.g., 'match', 'ore', 'unique', 'ste_vec') +--! +--! @note Used by config_check_indexes for validation +--! @see eql_v2.config_check_indexes +CREATE FUNCTION eql_v2.config_get_indexes(val jsonb) + RETURNS SETOF text + LANGUAGE sql IMMUTABLE STRICT PARALLEL SAFE +BEGIN ATOMIC + SELECT jsonb_object_keys(jsonb_path_query(val,'$.tables.*.*.indexes')); +END; + + +--! @brief Validate index types in configuration +--! @internal +--! +--! Checks that all index types specified in the configuration are valid. +--! Valid index types are: match, ore, unique, ste_vec. +--! +--! @param jsonb Configuration data to validate +--! @return boolean True if all index types are valid +--! @throws Exception if any invalid index type found +--! +--! @note Used in CHECK constraint on eql_v2_configuration table +--! @see eql_v2.config_get_indexes +CREATE FUNCTION eql_v2.config_check_indexes(val jsonb) + RETURNS BOOLEAN + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + BEGIN + + IF (SELECT EXISTS (SELECT eql_v2.config_get_indexes(val))) THEN + IF (SELECT bool_and(index = ANY('{match, ore, unique, ste_vec}')) FROM eql_v2.config_get_indexes(val) AS index) THEN + RETURN true; + END IF; + RAISE 'Configuration has an invalid index (%). Index should be one of {match, ore, unique, ste_vec}', val; + END IF; + RETURN true; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Validate cast types in configuration +--! @internal +--! +--! Checks that all 'cast_as' types specified in the configuration are valid. +--! Valid cast types are: text, int, small_int, big_int, real, double, boolean, date, jsonb. +--! +--! @param jsonb Configuration data to validate +--! @return boolean True if all cast types are valid or no cast types specified +--! @throws Exception if any invalid cast type found +--! +--! @note Used in CHECK constraint on eql_v2_configuration table +--! @note Empty configurations (no cast_as fields) are valid +--! @note Cast type names are EQL's internal representations, not PostgreSQL native types +CREATE FUNCTION eql_v2.config_check_cast(val jsonb) + RETURNS BOOLEAN +AS $$ + BEGIN + -- If there are cast_as fields, validate them + IF EXISTS (SELECT jsonb_array_elements_text(jsonb_path_query_array(val, '$.tables.*.*.cast_as'))) THEN + IF (SELECT bool_and(cast_as = ANY('{text, int, small_int, big_int, real, double, boolean, date, jsonb}')) + FROM (SELECT jsonb_array_elements_text(jsonb_path_query_array(val, '$.tables.*.*.cast_as')) AS cast_as) casts) THEN + RETURN true; + END IF; + RAISE 'Configuration has an invalid cast_as (%). Cast should be one of {text, int, small_int, big_int, real, double, boolean, date, jsonb}', val; + END IF; + -- If no cast_as fields exist (empty config), that's valid + RETURN true; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Validate tables field presence +--! @internal +--! +--! Ensures the configuration has a 'tables' field, which is required +--! to specify which database tables contain encrypted columns. +--! +--! @param jsonb Configuration data to validate +--! @return boolean True if 'tables' field exists +--! @throws Exception if 'tables' field is missing +--! +--! @note Used in CHECK constraint on eql_v2_configuration table +CREATE FUNCTION eql_v2.config_check_tables(val jsonb) + RETURNS boolean +AS $$ + BEGIN + IF (val ? 'tables') THEN + RETURN true; + END IF; + RAISE 'Configuration missing tables (tables) field: %', val; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Validate version field presence +--! @internal +--! +--! Ensures the configuration has a 'v' (version) field, which tracks +--! the configuration format version. +--! +--! @param jsonb Configuration data to validate +--! @return boolean True if 'v' field exists +--! @throws Exception if 'v' field is missing +--! +--! @note Used in CHECK constraint on eql_v2_configuration table +CREATE FUNCTION eql_v2.config_check_version(val jsonb) + RETURNS boolean +AS $$ + BEGIN + IF (val ? 'v') THEN + RETURN true; + END IF; + RAISE 'Configuration missing version (v) field: %', val; + END; +$$ LANGUAGE plpgsql; + + +--! @brief Drop existing data validation constraint if present +--! @note Allows constraint to be recreated during upgrades +ALTER TABLE public.eql_v2_configuration DROP CONSTRAINT IF EXISTS eql_v2_configuration_data_check; + + +--! @brief Comprehensive configuration data validation +--! +--! CHECK constraint that validates all aspects of configuration data: +--! - Version field presence +--! - Tables field presence +--! - Valid cast_as types +--! - Valid index types +--! +--! @note Combines all config_check_* validation functions +--! @see eql_v2.config_check_version +--! @see eql_v2.config_check_tables +--! @see eql_v2.config_check_cast +--! @see eql_v2.config_check_indexes +ALTER TABLE public.eql_v2_configuration + ADD CONSTRAINT eql_v2_configuration_data_check CHECK ( + eql_v2.config_check_version(data) AND + eql_v2.config_check_tables(data) AND + eql_v2.config_check_cast(data) AND + eql_v2.config_check_indexes(data) +); + + + + +--! @brief Compare two encrypted values using Blake3 hash index terms +--! +--! Performs a three-way comparison (returns -1/0/1) of encrypted values using +--! their Blake3 hash index terms. Used internally by the equality operator (=) +--! for exact-match queries without decryption. +--! +--! @param a eql_v2_encrypted First encrypted value to compare +--! @param b eql_v2_encrypted Second encrypted value to compare +--! @return Integer -1 if a < b, 0 if a = b, 1 if a > b +--! +--! @note NULL values are sorted before non-NULL values +--! @note Comparison uses underlying text type ordering of Blake3 hashes +--! +--! @see eql_v2.blake3 +--! @see eql_v2.has_blake3 +--! @see eql_v2."=" +CREATE FUNCTION eql_v2.compare_blake3(a eql_v2_encrypted, b eql_v2_encrypted) + RETURNS integer + IMMUTABLE STRICT PARALLEL SAFE +AS $$ + DECLARE + a_term eql_v2.blake3; + b_term eql_v2.blake3; + BEGIN + + IF a IS NULL AND b IS NULL THEN + RETURN 0; + END IF; + + IF a IS NULL THEN + RETURN -1; + END IF; + + IF b IS NULL THEN + RETURN 1; + END IF; + + IF eql_v2.has_blake3(a) THEN + a_term = eql_v2.blake3(a); + END IF; + + IF eql_v2.has_blake3(b) THEN + b_term = eql_v2.blake3(b); + END IF; + + IF a_term IS NULL AND b_term IS NULL THEN + RETURN 0; + END IF; + + IF a_term IS NULL THEN + RETURN -1; + END IF; + + IF b_term IS NULL THEN + RETURN 1; + END IF; + + -- Using the underlying text type comparison + IF a_term = b_term THEN + RETURN 0; + END IF; + + IF a_term < b_term THEN + RETURN -1; + END IF; + + IF a_term > b_term THEN + RETURN 1; + END IF; + + END; +$$ LANGUAGE plpgsql; +`; diff --git a/packages/prisma-next/src/stack/derive-schemas.ts b/packages/prisma-next/src/stack/derive-schemas.ts new file mode 100644 index 00000000..582f8199 --- /dev/null +++ b/packages/prisma-next/src/stack/derive-schemas.ts @@ -0,0 +1,142 @@ +/** + * Derive `@cipherstash/stack` encryption schemas from a Prisma Next + * contract. + * + * `contract.json` already declares every encrypted column the + * framework knows about — its physical table name (after `@map(...)` + * collapsing), its physical column name, its codec id + * (`cipherstash/@1`), and its per-flag search-mode `typeParams`. + * `deriveStackSchemas` walks `storage.tables` and returns one + * `EncryptedTable` per table with at least one cipherstash-codec'd + * column, ready to pass to `Encryption({ schemas })`. Skipping the + * hand-written second declaration removes a runtime-correctness + * footgun where the SDK's index set silently disagrees with the EQL + * bundle's installed configuration. + */ + +import { + type EncryptedColumn, + encryptedColumn, + encryptedTable, + type EncryptedTable, + type EncryptedTableColumn, +} from '@cipherstash/stack/schema' + +import { + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + type CipherstashCodecId, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, + CIPHERSTASH_STRING_CODEC_ID, + isCipherstashCodecId, +} from '../extension-metadata/constants' + +/** + * Structural shape of the subset of `contract.json` this derivation + * reads. Declared structurally (vs importing the framework type) so + * the derivation has no `@prisma-next/*` import-edge and is callable + * against a raw JSON-typed import. + */ +export interface ContractStorageView { + readonly storage?: { + readonly tables?: Readonly> + } +} + +interface StorageTableView { + readonly columns?: Readonly> +} + +interface StorageColumnView { + readonly codecId?: string | null + readonly typeParams?: Readonly> | null +} + +type DataType = 'string' | 'number' | 'bigint' | 'date' | 'boolean' | 'json' + +const CODEC_ID_TO_DATA_TYPE: Readonly> = { + [CIPHERSTASH_STRING_CODEC_ID]: 'string', + [CIPHERSTASH_DOUBLE_CODEC_ID]: 'number', + [CIPHERSTASH_BIGINT_CODEC_ID]: 'bigint', + [CIPHERSTASH_DATE_CODEC_ID]: 'date', + [CIPHERSTASH_BOOLEAN_CODEC_ID]: 'boolean', + [CIPHERSTASH_JSON_CODEC_ID]: 'json', +} + +// Single source of truth for the cipherstash typeParams flag set: the +// keys drive both the dispatch table in `applyTypeParams` and the +// "Known flags" line in its error message. +const FLAG_DISPATCH = { + equality: (b: EncryptedColumn) => b.equality(), + freeTextSearch: (b: EncryptedColumn) => b.freeTextSearch(), + orderAndRange: (b: EncryptedColumn) => b.orderAndRange(), + searchableJson: (b: EncryptedColumn) => b.searchableJson(), +} as const + +type CipherstashFlag = keyof typeof FLAG_DISPATCH + +/** + * Derive an array of `EncryptedTable` builders from a Prisma Next + * contract. Returns an empty array when no cipherstash columns are + * present; callers must still pass at least one `EncryptedTable` to + * `Encryption({ schemas })`, which requires a non-empty array. + */ +export function deriveStackSchemas( + contractJson: ContractStorageView, +): ReadonlyArray> { + const tables = contractJson.storage?.tables + if (!tables) return [] + + const result: EncryptedTable[] = [] + + for (const [tableName, table] of Object.entries(tables)) { + const columns = table.columns + if (!columns) continue + + const builders: Record = {} + for (const [columnName, column] of Object.entries(columns)) { + const codecId = column.codecId + if (codecId == null || !isCipherstashCodecId(codecId)) continue + + const dataType = CODEC_ID_TO_DATA_TYPE[codecId] + builders[columnName] = applyTypeParams( + encryptedColumn(columnName).dataType(dataType), + column.typeParams ?? {}, + tableName, + columnName, + ) + } + + if (Object.keys(builders).length === 0) continue + result.push(encryptedTable(tableName, builders)) + } + + return result +} + +function applyTypeParams( + builder: EncryptedColumn, + typeParams: Readonly>, + tableName: string, + columnName: string, +): EncryptedColumn { + let result = builder + for (const [flag, value] of Object.entries(typeParams)) { + if (value !== true) continue + if (!isCipherstashFlag(flag)) { + throw new Error( + `deriveStackSchemas: unrecognised cipherstash typeParams flag "${flag}" ` + + `on column "${tableName}"."${columnName}". ` + + `Known flags: ${Object.keys(FLAG_DISPATCH).join(', ')}.`, + ) + } + result = FLAG_DISPATCH[flag](result) + } + return result +} + +function isCipherstashFlag(value: string): value is CipherstashFlag { + return value in FLAG_DISPATCH +} diff --git a/packages/prisma-next/src/stack/from-stack.ts b/packages/prisma-next/src/stack/from-stack.ts new file mode 100644 index 00000000..d83b5d70 --- /dev/null +++ b/packages/prisma-next/src/stack/from-stack.ts @@ -0,0 +1,189 @@ +/** + * One-call setup for `@cipherstash/prisma-next` against + * `@cipherstash/stack`. + * + * Replaces the manual three-step wiring (derive schemas → construct + * `Encryption({ schemas })` → build `CipherstashSdk` adapter → wrap + * with `createCipherstashRuntimeDescriptor` and `bulkEncryptMiddleware`) + * with a single async factory that returns ready-to-spread arrays for + * `postgres({...})`: + * + * const cipherstash = await cipherstashFromStack({ contractJson }) + * + * const db = postgres({ + * contractJson, + * extensions: cipherstash.extensions, + * middleware: cipherstash.middleware, + * }) + * + * Override semantics: a user-supplied `schemas` array is allowed to + * add tables the contract doesn't model. For tables the contract + * **does** declare, the override must agree on column names, + * `cast_as`, and the installed index set — divergence throws at + * setup so ZeroKMS can't end up with an index set that the EQL + * bundle's installed configuration disagrees with. + */ + +import { Encryption } from '@cipherstash/stack' +import type { EncryptionClient } from '@cipherstash/stack/client' +import { + type CastAs, + type EncryptedTable, + type EncryptedTableColumn, + toEqlCastAs, +} from '@cipherstash/stack/schema' +import type { SqlMiddleware, SqlRuntimeExtensionDescriptor } from '@prisma-next/sql-runtime' + +import { createCipherstashRuntimeDescriptor } from '../exports/runtime' +import { bulkEncryptMiddleware } from '../middleware/bulk-encrypt' +import { + type ContractStorageView, + deriveStackSchemas, +} from './derive-schemas' +import { createCipherstashSdk } from './sdk-adapter' + +export interface CipherstashFromStackOptions { + /** The contract.json artefact emitted by `prisma-next contract emit`. */ + readonly contractJson: ContractStorageView + + /** + * Optional schema override. Use this to add tables the contract + * does not model. For tables the contract **does** declare, the + * override must match on column names, `cast_as`, and installed + * indices — divergence throws at setup. + */ + readonly schemas?: ReadonlyArray> + + /** Pass-through to `Encryption({ config })` (keyset overrides, logging, …). */ + readonly encryptionConfig?: Parameters[0]['config'] +} + +export interface CipherstashFromStackResult { + /** Ready to spread into `postgres({ extensions })`. */ + readonly extensions: ReadonlyArray> + /** Ready to spread into `postgres({ middleware })`. */ + readonly middleware: ReadonlyArray + /** The initialised `EncryptionClient` for direct SDK access outside the ORM path. */ + readonly encryptionClient: EncryptionClient +} + +export async function cipherstashFromStack( + opts: CipherstashFromStackOptions, +): Promise { + const derived = deriveStackSchemas(opts.contractJson) + const schemas = resolveSchemas(derived, opts.schemas) + const [first, ...rest] = schemas + if (first === undefined) { + throw new Error( + 'cipherstashFromStack: no cipherstash columns found in contract.json AND no override `schemas` supplied. ' + + '`@cipherstash/stack`\'s `Encryption({ schemas })` requires at least one `EncryptedTable`. ' + + 'Check that prisma/schema.prisma declares at least one `cipherstash.Encrypted*()` column and that ' + + '`pnpm emit` has been run since the last edit.', + ) + } + + const encryptionClient = await Encryption({ + schemas: [first, ...rest], + ...(opts.encryptionConfig !== undefined ? { config: opts.encryptionConfig } : {}), + }) + + const sdk = createCipherstashSdk(encryptionClient, schemas) + + return { + extensions: [createCipherstashRuntimeDescriptor({ sdk })], + middleware: [bulkEncryptMiddleware(sdk)], + encryptionClient, + } +} + +function resolveSchemas( + derived: ReadonlyArray>, + override: ReadonlyArray> | undefined, +): ReadonlyArray> { + if (override === undefined || override.length === 0) return derived + + const derivedByName = new Map(derived.map((t) => [t.tableName, t])) + const overrideByName = new Map(override.map((t) => [t.tableName, t])) + + for (const [tableName, derivedTable] of derivedByName) { + const overrideTable = overrideByName.get(tableName) + if (overrideTable === undefined) continue + assertSchemasAgree(derivedTable, overrideTable) + } + + return [ + ...derived, + ...override.filter((t) => !derivedByName.has(t.tableName)), + ] +} + +function assertSchemasAgree( + derived: EncryptedTable, + user: EncryptedTable, +): void { + const derivedDef = derived.build() + const userDef = user.build() + + const derivedCols = new Set(Object.keys(derivedDef.columns)) + const userCols = new Set(Object.keys(userDef.columns)) + + const missingInUser = [...derivedCols].filter((c) => !userCols.has(c)).sort() + const extraInUser = [...userCols].filter((c) => !derivedCols.has(c)).sort() + + if (missingInUser.length > 0 || extraInUser.length > 0) { + const parts: string[] = [] + if (missingInUser.length > 0) parts.push(`missing in override: [${missingInUser.join(', ')}]`) + if (extraInUser.length > 0) parts.push(`extra in override: [${extraInUser.join(', ')}]`) + divergence( + `table "${derived.tableName}"`, + `declares columns [${[...derivedCols].sort().join(', ')}]`, + `declares [${[...userCols].sort().join(', ')}] (${parts.join('; ')})`, + 'Override `schemas` must match the contract on every contract-declared table; use it only to add tables the contract does not model.', + ) + } + + for (const colName of derivedCols) { + const d = derivedDef.columns[colName]! + const u = userDef.columns[colName]! + + // Normalise through `toEqlCastAs` so SDK-facing aliases agree — + // `dataType('string')` and `dataType('text')` both lower to EQL `'text'`. + const dCast = toEqlCastAs(d.cast_as as CastAs) + const uCast = toEqlCastAs(u.cast_as as CastAs) + if (dCast !== uCast) { + divergence( + `column "${derived.tableName}"."${colName}"`, + `declares cast_as="${d.cast_as}"`, + `declares cast_as="${u.cast_as}" (EQL cast_as "${dCast}" vs "${uCast}")`, + 'Fix prisma/schema.prisma and re-emit the contract rather than overriding.', + ) + } + + const derivedIndexes = indexKeys(d.indexes) + const userIndexes = indexKeys(u.indexes) + if (!setsEqual(derivedIndexes, userIndexes)) { + divergence( + `column "${derived.tableName}"."${colName}"`, + `installs indexes [${[...derivedIndexes].sort().join(', ') || '(none)'}]`, + `installs [${[...userIndexes].sort().join(', ') || '(none)'}]`, + 'Fix prisma/schema.prisma and re-emit the contract rather than overriding.', + ) + } + } +} + +function divergence(loc: string, contractSide: string, overrideSide: string, hint: string): never { + throw new Error( + `cipherstashFromStack: schema divergence on ${loc}. Contract ${contractSide} but override ${overrideSide}. ${hint}`, + ) +} + +function indexKeys(indexes: Record): Set { + return new Set(Object.keys(indexes).filter((k) => indexes[k] !== undefined)) +} + +function setsEqual(a: Set, b: Set): boolean { + if (a.size !== b.size) return false + for (const v of a) if (!b.has(v)) return false + return true +} diff --git a/packages/prisma-next/src/stack/sdk-adapter.ts b/packages/prisma-next/src/stack/sdk-adapter.ts new file mode 100644 index 00000000..7cf11df3 --- /dev/null +++ b/packages/prisma-next/src/stack/sdk-adapter.ts @@ -0,0 +1,214 @@ +/** + * Adapt `@cipherstash/stack`'s `EncryptionClient` to the + * framework-native `CipherstashSdk` shape consumed by + * `createCipherstashRuntimeDescriptor({ sdk })` and + * `bulkEncryptMiddleware(sdk)`. + * + * The framework calls into the SDK with `(table, column)` routing-key + * strings — it doesn't know about stack's typed `EncryptedTable` / + * `EncryptedColumn` objects. The adapter resolves those strings back + * to the typed objects via a registry built from the supplied + * schemas. The registry is keyed on the **physical** column name + * (post-`@map`), matching how the framework's bulk-encrypt middleware + * derives routing keys from the lowered AST. + * + * Plaintext coercion at the boundary: + * + * - `bigint → number` (ZeroKMS's `big_int` cast accepts numeric + * plaintexts only; values must fit in the JS safe-integer range; + * overflow throws eagerly). + * - `Date → ISO 8601 string` (both ZeroKMS and the EQL bundle accept + * the textual form). + * + * Every other JS value type is passed through to the stack SDK as-is; + * the stack SDK then validates against its declared per-column + * `dataType()` on each `bulkEncrypt` call. + */ + +import { type Encrypted, isEncryptedPayload } from '@cipherstash/stack' +import type { EncryptionClient } from '@cipherstash/stack/client' +import { + EncryptedColumn, + EncryptedField, + type EncryptedTable, + type EncryptedTableColumn, +} from '@cipherstash/stack/schema' + +import type { + CipherstashRoutingKey, + CipherstashSdk, +} from '../execution/sdk' + +// `JsPlaintext` is the input type `@cipherstash/stack`'s `bulkEncrypt` +// accepts for non-bigint, non-Date values. Redeclared locally because +// `@cipherstash/stack` does not re-export it through its public surface. +type JsPlaintext = + | string + | number + | boolean + | { [key: string]: unknown } + | JsPlaintext[] + +type StackColumn = EncryptedColumn | EncryptedField + +interface RegistryEntry { + readonly table: EncryptedTable + readonly columns: Readonly> +} + +/** + * Build a `CipherstashSdk` from an initialised `@cipherstash/stack` + * `EncryptionClient` plus the schemas it was constructed with. + * + * `schemas` should be the exact `EncryptedTable[]` array passed to + * `Encryption({ schemas })` (or its return value from + * {@link deriveStackSchemas}). The adapter uses it to translate + * framework `(table, column)` routing-key strings back to the typed + * schema objects the stack SDK's `bulkEncrypt(...)` expects in + * `{ column, table }`. + */ +export function createCipherstashSdk( + encryptionClient: EncryptionClient, + schemas: ReadonlyArray>, +): CipherstashSdk { + const registry = buildRegistry(schemas) + + return { + async bulkEncrypt({ values, routingKey }) { + const { table, column } = lookup(registry, routingKey) + const result = await encryptionClient.bulkEncrypt( + values.map((plaintext) => ({ plaintext: toJsPlaintext(plaintext) })), + { column, table }, + ) + return unwrap(result, 'bulkEncrypt').map((entry) => entry.data) + }, + + async bulkDecrypt({ ciphertexts }) { + const payload = ciphertexts.map((data, index) => ({ + data: ensureEncryptedEnvelope(data, 'bulkDecrypt', index), + })) + const result = await encryptionClient.bulkDecrypt(payload) + return unwrap(result, 'bulkDecrypt').map(unwrapBulkDecryptEntry) + }, + + async decrypt({ ciphertext }) { + const result = await encryptionClient.decrypt( + ensureEncryptedEnvelope(ciphertext, 'decrypt'), + ) + return asSdkPlaintext(unwrap(result, 'decrypt')) + }, + } +} + +// Mirrors `@byteslice/result`'s discriminated `Result` shape +// (which `@cipherstash/stack` returns) without forcing the package to +// depend on `@byteslice/result` directly. +type StackResult = + | { readonly failure?: never; readonly data: T } + | { readonly failure: { readonly message: string } } + +function unwrap(result: StackResult, op: string): T { + if (result.failure) { + throw new Error(`cipherstash ${op} failed: ${result.failure.message}`) + } + return result.data +} + +function unwrapBulkDecryptEntry(entry: { data?: unknown; error?: unknown }): string { + if ('error' in entry && entry.error !== undefined) { + throw new Error(`cipherstash bulkDecrypt entry failed: ${String(entry.error)}`) + } + return asSdkPlaintext(entry.data) +} + +function buildRegistry( + schemas: ReadonlyArray>, +): Map { + const registry = new Map() + for (const table of schemas) { + const columns: Record = {} + // `encryptedTable(name, builders)` intersects each column builder + // onto the table instance under its own key, so iterating own + // enumerable properties recovers them. + for (const [key, value] of Object.entries(table)) { + if (value instanceof EncryptedColumn || value instanceof EncryptedField) { + columns[key] = value + } + } + registry.set(table.tableName, { table, columns }) + } + return registry +} + +function lookup( + registry: Map, + routingKey: CipherstashRoutingKey, +): { table: EncryptedTable; column: StackColumn } { + const entry = registry.get(routingKey.table) + if (entry === undefined) { + throw new Error( + `cipherstash SDK adapter: routing-key table "${routingKey.table}" is not in the stack schemas. ` + + 'If you derived your schemas with `deriveStackSchemas(contractJson)`, this means the contract has no ' + + 'cipherstash columns on that table — check `prisma/schema.prisma` and re-emit the contract.', + ) + } + const column = entry.columns[routingKey.column] + if (column === undefined) { + throw new Error( + `cipherstash SDK adapter: routing-key column "${routingKey.column}" is not on stack table "${routingKey.table}". ` + + 'Routing keys use physical column names (post-`@map`). Check `prisma/schema.prisma` and re-emit the contract.', + ) + } + return { table: entry.table, column } +} + +function ensureEncryptedEnvelope( + value: unknown, + kind: 'decrypt' | 'bulkDecrypt', + index?: number, +): Encrypted { + // `isEncryptedPayload` checks the v1/v2 envelope basics (object, + // numeric `v`, `i` object, `c` or `sv` present). We additionally + // require the EQL v2 `i: { t, c }` substructure because that's + // what the framework's bulk-decrypt path expects. + const valid = + isEncryptedPayload(value) && + typeof (value as { i: { t?: unknown; c?: unknown } }).i === 'object' && + 't' in (value as { i: object }).i && + 'c' in (value as { i: object }).i + if (!valid) { + const where = index === undefined ? '' : ` at index ${index}` + throw new Error( + `cipherstash ${kind}: ciphertext${where} is not a valid EQL v2 envelope ` + + '(expected an object with `i: { t, c }`, numeric `v`, and `c` or `sv`).', + ) + } + return value +} + +function toJsPlaintext(value: unknown): JsPlaintext { + if (typeof value === 'bigint') { + // ZeroKMS's `big_int` cast accepts only numeric plaintexts. Throw + // eagerly on overflow rather than truncating silently on the wire. + if ( + value > BigInt(Number.MAX_SAFE_INTEGER) || + value < BigInt(Number.MIN_SAFE_INTEGER) + ) { + throw new Error( + `cipherstash bigint plaintext ${value} exceeds Number.MAX_SAFE_INTEGER; ` + + 'ZeroKMS does not accept string plaintexts for the big_int cast type.', + ) + } + return Number(value) + } + if (value instanceof Date) return value.toISOString() + return value as JsPlaintext +} + +// The framework's `CipherstashSdk.decrypt` is typed `Promise` +// but every envelope's `parseDecryptedValue` hook narrows the raw value +// to its concrete plaintext type. Forwarding through this cast keeps the +// codec-side polymorphism intact across the SDK contract. +function asSdkPlaintext(value: unknown): string { + return value as string +} diff --git a/packages/prisma-next/src/types/codec-types.ts b/packages/prisma-next/src/types/codec-types.ts new file mode 100644 index 00000000..a21e6a65 --- /dev/null +++ b/packages/prisma-next/src/types/codec-types.ts @@ -0,0 +1,94 @@ +/** + * Codec type definitions for the cipherstash extension. + * + * Type-only definitions for codec input/output/traits — consumed by + * the contract emitter when generating an application's + * `contract.d.ts`. Importing this subpath registers every cipherstash + * codec id with its `cipherstash:*` traits, so trait-dispatched + * operators (`cipherstashGt`, `cipherstashBetween`, + * `cipherstashInArray`, …) surface on real model accessors. + * + * # Why this is hand-written, not derived via `ExtractCodecTypes` + * + * The framework's `ExtractCodecTypes` helper projects descriptor-keyed + * types via `traits: TTraits[number] & CodecTrait`. The framework's + * `CodecTrait` is a closed union of built-ins (`'equality'`, + * `'order'`, `'numeric'`, `'boolean'`, `'textual'`); the cipherstash + * trait strings (`'cipherstash:equality'`, `'cipherstash:order-and-range'`, + * `'cipherstash:free-text-search'`, `'cipherstash:searchable-json'`) + * deliberately sit outside that union (see ADR 214 + the + * `equality-trait-removal.test.ts` regression — namespacing isolates + * the cipherstash dispatch surface from framework built-in operators + * like `eq` that would lower to standard SQL `=`, which is wrong for + * EQL ciphertexts). Running cipherstash descriptors through + * `ExtractCodecTypes` would intersect each trait string with + * `CodecTrait` and collapse to `never`, defeating the whole point of + * the augmentation. + * + * The hand-written shape preserves the literal trait strings so the + * model accessor's trait-dispatch type-level lookup + * (`SqlQueryOperationTypes` → `OpMatchesField`) sees the actual + * cipherstash trait names and surfaces the right operator on the + * right column. + * + * # Output type uses the envelope class + * + * Each codec's runtime `decode` returns an `EncryptedEnvelopeBase` + * subclass instance. The `output` slot here is the envelope class so + * `FieldOutputTypes['User']['email']` resolves to `EncryptedString` + * (and the ORM read path returns an envelope the user calls + * `.decrypt()` on); `input` is the union of the envelope class and + * the bare plaintext, mirroring the polymorphic argument shapes the + * predicate operators accept (`coerceToEnvelope` in + * `src/execution/operators.ts`). + */ + +// Type-only imports — the codec-types subpath compiles to an empty +// JS module under tsdown (every import below is elided), so importing +// the envelope classes by type carries no runtime cost in the +// generated `codec-types.mjs` chunk. +import type { EncryptedBigInt } from '../execution/envelope-bigint'; +import type { EncryptedBoolean } from '../execution/envelope-boolean'; +import type { EncryptedDate } from '../execution/envelope-date'; +import type { EncryptedDouble } from '../execution/envelope-double'; +import type { EncryptedJson } from '../execution/envelope-json'; +import type { EncryptedString } from '../execution/envelope-string'; + +export type CodecTypes = { + readonly 'cipherstash/string@1': { + readonly input: string | EncryptedString; + readonly output: EncryptedString; + readonly traits: + | 'cipherstash:equality' + | 'cipherstash:free-text-search' + | 'cipherstash:order-and-range'; + }; + readonly 'cipherstash/double@1': { + readonly input: number | EncryptedDouble; + readonly output: EncryptedDouble; + readonly traits: 'cipherstash:equality' | 'cipherstash:order-and-range'; + }; + readonly 'cipherstash/bigint@1': { + readonly input: bigint | EncryptedBigInt; + readonly output: EncryptedBigInt; + readonly traits: 'cipherstash:equality' | 'cipherstash:order-and-range'; + }; + readonly 'cipherstash/date@1': { + readonly input: Date | EncryptedDate; + readonly output: EncryptedDate; + readonly traits: 'cipherstash:equality' | 'cipherstash:order-and-range'; + }; + readonly 'cipherstash/boolean@1': { + readonly input: boolean | EncryptedBoolean; + readonly output: EncryptedBoolean; + readonly traits: 'cipherstash:equality'; + }; + readonly 'cipherstash/json@1': { + // `unknown` already subsumes `EncryptedJson`, but the alias is kept in + // scope (via the import above) so the codec entry still flags JSON as + // an envelope-bearing codec at the type-import layer. + readonly input: unknown; + readonly output: EncryptedJson; + readonly traits: 'cipherstash:searchable-json'; + }; +}; diff --git a/packages/prisma-next/src/types/operation-types.ts b/packages/prisma-next/src/types/operation-types.ts new file mode 100644 index 00000000..11dbd2dd --- /dev/null +++ b/packages/prisma-next/src/types/operation-types.ts @@ -0,0 +1,175 @@ +/** + * Operation type definitions for the cipherstash extension. + * + * Mirrors `packages/3-extensions/pgvector/src/types/operation-types.ts` + * — the type-only counterpart to `cipherstashQueryOperations()` in + * `../execution/operators.ts`. Every entry's `self` dispatch shape + * mirrors the runtime registration 1:1: + * + * - Single-codec entries (`cipherstashEq`, `cipherstashIlike`, + * `cipherstashNotIlike`, `cipherstashJsonbPathExists`) declare + * `self: { codecId: '' }`. The framework's `OpMatchesField` + * direct-codec-id branch surfaces the method on columns whose + * codec id is the literal — no consumer-side `CodecTypes` + * augmentation needed. + * + * - Multi-codec entries (the equality / order-and-range operators) + * declare `self: { traits: ['cipherstash:'] }`. Trait dispatch + * surfaces the method on every column whose codec id resolves to + * a `CodecTypes` entry whose `traits` set includes the same + * identifier. The cipherstash-namespaced `cipherstash:` prefix + * isolates these from the framework's closed `CodecTrait` union + * so adding the trait to a cipherstash codec descriptor cannot + * silently re-attach a framework built-in. + * + * Both surfaces (codec-keyed `OperationTypes` and flat + * `QueryOperationTypes`) get composed into the consuming + * application's generated `contract.d.ts` by the contract emitter, + * via the `types.queryOperationTypes` import declaration on + * `cipherstashPackMeta` (`../extension-metadata/descriptor-meta.ts`). + * + * Return-codec id is `pg/bool@1` for every predicate operator — + * pinned to what the runtime impl builds (`../execution/operators.ts` + * `PG_BOOL_CODEC_ID`). The framework's predicate machinery looks at + * the return codec's `'boolean'` trait to decide a value is suitable + * for a WHERE clause. + */ + +import type { CodecExpression, Expression } from '@prisma-next/sql-relational-core/expression'; + +type CodecTypesBase = Record; + +const CIPHERSTASH_STRING_CODEC = 'cipherstash/string@1'; +type CipherstashStringCodec = typeof CIPHERSTASH_STRING_CODEC; + +type PgBoolReturn = Expression<{ codecId: 'pg/bool@1'; nullable: false }>; + +/** + * Trait tuples used to gate multi-codec operators (see ADR 214). + * + * Cipherstash uses extension-namespaced trait identifiers + * (`cipherstash:equality`, `cipherstash:order-and-range`) that + * intentionally live outside the framework's closed `CodecTrait` + * union. Preserving the literal trait strings at the type level is + * load-bearing: the consuming `OpMatchesField` predicate (in + * `packages/3-extensions/sql-orm-client/src/types.ts`) reads + * `Self.traits` and tests + * `[traits[number]] extends [CT[CodecId]['traits']]`, so widening to + * the framework's closed `CodecTrait` union (or to `never[]` via + * intersection) erases the extension's dispatch information and + * collapses every codec into a trait match. + * + * The framework's `QueryOperationSelfSpec` types `traits` as + * `readonly CodecTrait[]`; cipherstash's `QueryOperationTypes` + * therefore declares its entries directly (rather than via the + * `SqlQueryOperationTypes` wrapper that constrains + * `T extends Record`) so the + * literal trait strings flow through untouched. The consumer-side + * pipeline (`ExtractQueryOperationTypes` -> `OpMatchesField`) walks + * the entries structurally and accepts any `traits` shape + * extending `readonly string[]`. AGENTS.md requires the rationale + * comment alongside any non-standard surface; this is the type-only + * twin of `extension-metadata/constants.ts:CIPHERSTASH_CODEC_TRAITS`, + * which carries the runtime-side rationale for the same pattern. + */ +type EqualityTraits = readonly ['cipherstash:equality']; +type OrderAndRangeTraits = readonly ['cipherstash:order-and-range']; +type FreeTextSearchTraits = readonly ['cipherstash:free-text-search']; +type SearchableJsonTraits = readonly ['cipherstash:searchable-json']; + +/** + * Schematic constraint on `self` for a multi-codec cipherstash + * predicate. The runtime impl reads `self.returnType.codecId` and + * dispatches to the matching `Encrypted*` envelope — accepting any + * `Expression` here is correct because the surface is column-method + * autocomplete, not a free-standing helper. The framework's + * `OpMatchesField` is what restricts visibility to codecs declaring + * the gating trait; this `self` argument type is irrelevant to that + * dispatch. + */ +type AnyExpressionLike = Expression<{ readonly codecId: string; readonly nullable: boolean }>; + +/** + * Flat operation signatures consumed by the SQL query builder. Read + * via the `queryOperations` slot on the runtime context to project + * the cipherstash predicate methods onto cipherstash column accessors + * inside `model.where(...)` / `sql(t).where(...)` callbacks. + * + * Every operator's runtime impl (`../execution/operators.ts`) wraps + * the user-supplied argument(s) in the appropriate `Encrypted*` + * envelope at lowering time and stamps the column's `(table, column)` + * routing context, then lowers to the canonical EQL function call. + * + * The user-facing argument type is intentionally permissive + * (`unknown` for multi-codec ops, `pg/text@1` for the legacy + * single-codec ops). The cipherstash extension does not ship a + * `codec-types` augmentation declaring `input` / `output` shapes for + * the cipherstash codec ids, so the symmetric encrypted-codec-typed + * `other` shape pgvector uses for its `cosineDistance` arg would only + * accept full `Expression` values, not raw plaintext literals. The + * asymmetry mirrors the runtime: the column `self` is the encrypted + * column; the comparand is plaintext the operator encrypts on the + * user's behalf. + */ +export type QueryOperationTypes = CT extends CodecTypesBase + ? { + readonly cipherstashEq: { + readonly self: { readonly codecId: CipherstashStringCodec }; + readonly impl: ( + self: CodecExpression, + other: CodecExpression<'pg/text@1', boolean, CT>, + ) => PgBoolReturn; + }; + readonly cipherstashIlike: { + readonly self: { readonly codecId: CipherstashStringCodec }; + readonly impl: ( + self: CodecExpression, + pattern: CodecExpression<'pg/text@1', boolean, CT>, + ) => PgBoolReturn; + }; + readonly cipherstashNotIlike: { + readonly self: { readonly traits: FreeTextSearchTraits }; + readonly impl: (self: AnyExpressionLike, pattern: string) => PgBoolReturn; + }; + readonly cipherstashNe: { + readonly self: { readonly traits: EqualityTraits }; + readonly impl: (self: AnyExpressionLike, other: unknown) => PgBoolReturn; + }; + readonly cipherstashInArray: { + readonly self: { readonly traits: EqualityTraits }; + readonly impl: (self: AnyExpressionLike, values: readonly unknown[]) => PgBoolReturn; + }; + readonly cipherstashNotInArray: { + readonly self: { readonly traits: EqualityTraits }; + readonly impl: (self: AnyExpressionLike, values: readonly unknown[]) => PgBoolReturn; + }; + readonly cipherstashGt: { + readonly self: { readonly traits: OrderAndRangeTraits }; + readonly impl: (self: AnyExpressionLike, other: unknown) => PgBoolReturn; + }; + readonly cipherstashGte: { + readonly self: { readonly traits: OrderAndRangeTraits }; + readonly impl: (self: AnyExpressionLike, other: unknown) => PgBoolReturn; + }; + readonly cipherstashLt: { + readonly self: { readonly traits: OrderAndRangeTraits }; + readonly impl: (self: AnyExpressionLike, other: unknown) => PgBoolReturn; + }; + readonly cipherstashLte: { + readonly self: { readonly traits: OrderAndRangeTraits }; + readonly impl: (self: AnyExpressionLike, other: unknown) => PgBoolReturn; + }; + readonly cipherstashBetween: { + readonly self: { readonly traits: OrderAndRangeTraits }; + readonly impl: (self: AnyExpressionLike, low: unknown, high: unknown) => PgBoolReturn; + }; + readonly cipherstashNotBetween: { + readonly self: { readonly traits: OrderAndRangeTraits }; + readonly impl: (self: AnyExpressionLike, low: unknown, high: unknown) => PgBoolReturn; + }; + readonly cipherstashJsonbPathExists: { + readonly self: { readonly traits: SearchableJsonTraits }; + readonly impl: (self: AnyExpressionLike, path: string) => PgBoolReturn; + }; + } + : never; diff --git a/packages/prisma-next/test/abort.test.ts b/packages/prisma-next/test/abort.test.ts new file mode 100644 index 00000000..2f101f26 --- /dev/null +++ b/packages/prisma-next/test/abort.test.ts @@ -0,0 +1,397 @@ +/** + * Cipherstash cancellation umbrella. + * + * Pins the contract for the cipherstash-internal `RUNTIME.ABORTED` + * envelope wrapping at every async observation point the extension + * exposes: + * + * - `bulk-encrypt` — bulk-encrypt middleware`s `sdk.bulkEncrypt` call. + * - `decrypt` — single-cell `EncryptedString#decrypt()` SDK call. + * - `decrypt-all` — `decryptAll` walker`s `sdk.bulkDecrypt` calls. + * + * The codec`s `encode` / `decode` paths are deliberately NOT wrapped + * here; both are synchronous (encode reads `handle.ciphertext`; decode + * constructs a fresh envelope from `wire` + `ctx.column` + `sdk`). The + * surrounding async work — the per-cell `Promise.all` race in the + * framework`s `encodeParams` / `decodeRow` paths — already throws + * `RUNTIME.ABORTED` with `phase: 'encode'` / `phase: 'decode'` per + * ADR 207. The cipherstash phases below cover the async work the + * framework cannot see (bulk SDK calls in `beforeExecute` middleware + * and post-stream caller-driven `decrypt()` / `decryptAll()` sites). + * + * Envelope shape contract: every cipherstash phase wrapping reuses + * the framework`s `RUNTIME.ABORTED` envelope (`code === 'RUNTIME.ABORTED'`, + * `category === 'RUNTIME'`, `severity === 'error'`, `details.phase`, + * `cause`). Only the `phase` string values are cipherstash-specific — + * the structural shape (and the `runtimeError` envelope-builder + * behind it) come from the framework. See ADR 207 / 027. + */ + +import type { Contract } from '@prisma-next/contract/types'; +import { isRuntimeError, RUNTIME_ABORTED } from '@prisma-next/framework-components/runtime'; +import type { SqlStorage } from '@prisma-next/sql-contract/types'; +import { InsertAst, ParamRef, TableSource } from '@prisma-next/sql-relational-core/ast'; +import { createSqlParamRefMutator } from '@prisma-next/sql-relational-core/middleware'; +import type { SqlExecutionPlan } from '@prisma-next/sql-relational-core/plan'; +import type { SqlMiddlewareContext } from '@prisma-next/sql-runtime'; +import { describe, expect, it, vi } from 'vitest'; +import { decryptAll } from '../src/execution/decrypt-all'; +import { + EncryptedString, + type EncryptedStringFromInternalArgs, + setHandleRoutingKey, +} from '../src/execution/envelope-string'; +import type { CipherstashSdk } from '../src/execution/sdk'; +import { CIPHERSTASH_STRING_CODEC_ID } from '../src/extension-metadata/constants'; +import { bulkEncryptMiddleware } from '../src/middleware/bulk-encrypt'; + +interface CounterSdk extends CipherstashSdk { + readonly bulkEncryptCalls: number; + readonly bulkDecryptCalls: number; + readonly singleDecryptCalls: number; +} + +/** + * Build an SDK whose async methods never settle until the supplied + * controller aborts (or the test forcibly resolves them). Used to + * exercise mid-flight aborts where the wrapping must observe the + * abort and reject the awaiting caller before the SDK promise + * resolves — even when the SDK body itself ignores the signal. + * + * The default SDK behaviour (no `behaviour` arg) returns a "stuck" + * promise that only the abort can break; `behaviour: 'instant'` + * gives a synchronously-resolved promise so the pre-aborted-at-entry + * tests can run without a real signal handler. + */ +function makeStuckSdk(behaviour: 'stuck' | 'instant' = 'stuck'): CounterSdk { + let bulkEncryptCalls = 0; + let bulkDecryptCalls = 0; + let singleDecryptCalls = 0; + return { + get bulkEncryptCalls() { + return bulkEncryptCalls; + }, + get bulkDecryptCalls() { + return bulkDecryptCalls; + }, + get singleDecryptCalls() { + return singleDecryptCalls; + }, + decrypt() { + singleDecryptCalls++; + if (behaviour === 'instant') { + return Promise.resolve('plaintext'); + } + return new Promise(() => undefined); + }, + bulkEncrypt(args) { + bulkEncryptCalls++; + if (behaviour === 'instant') { + return Promise.resolve(args.values.map((v) => `ct:${v}`)); + } + return new Promise(() => undefined); + }, + bulkDecrypt(args) { + bulkDecryptCalls++; + if (behaviour === 'instant') { + return Promise.resolve(args.ciphertexts.map(() => 'plaintext')); + } + return new Promise(() => undefined); + }, + }; +} + +function expectAbortedEnvelope(error: unknown, phase: string): void { + expect(isRuntimeError(error)).toBe(true); + if (!isRuntimeError(error)) return; + expect(error.code).toBe(RUNTIME_ABORTED); + expect(error.category).toBe('RUNTIME'); + expect(error.severity).toBe('error'); + expect(error.details).toEqual({ phase }); +} + +function makeMiddlewareCtx(signal: AbortSignal | undefined): SqlMiddlewareContext { + return { + contract: {} as Contract, + mode: 'strict', + now: () => Date.now(), + log: { info: vi.fn(), warn: vi.fn(), error: vi.fn() }, + contentHash: async () => 'mock-hash', + ...(signal === undefined ? {} : { signal }), + }; +} + +function buildInsertPlan(envelopes: ReadonlyArray): SqlExecutionPlan { + const params: unknown[] = []; + const astRows = envelopes.map((envelope) => { + const ref = ParamRef.of(envelope, { codec: { codecId: CIPHERSTASH_STRING_CODEC_ID } }); + params.push(envelope); + return { email: ref }; + }); + const ast = new InsertAst(TableSource.named('user'), astRows); + return { + sql: `INSERT INTO "user" (email) VALUES ...`, + params, + meta: { target: 'postgres', storageHash: 'sha256:test', lane: 'dsl' }, + ast, + } as SqlExecutionPlan; +} + +interface MakeReadEnvelopeArgs { + readonly plaintext: string; + readonly table: string; + readonly column: string; + readonly sdk: CipherstashSdk; +} + +function makeReadEnvelope(args: MakeReadEnvelopeArgs): EncryptedString { + const fromInternalArgs: EncryptedStringFromInternalArgs = { + ciphertext: { c: `ct:${args.plaintext}` }, + table: args.table, + column: args.column, + sdk: args.sdk, + }; + return EncryptedString.fromInternal(fromInternalArgs); +} + +describe('bulk-encrypt middleware — RUNTIME.ABORTED { phase: "bulk-encrypt" }', () => { + it('pre-aborted ctx.signal short-circuits before sdk.bulkEncrypt is called', async () => { + const sdk = makeStuckSdk('stuck'); + const middleware = bulkEncryptMiddleware(sdk); + const envelope = EncryptedString.from('alice@example.com'); + setHandleRoutingKey(envelope, 'user', 'email'); + const plan = buildInsertPlan([envelope]); + const params = createSqlParamRefMutator(plan); + const controller = new AbortController(); + controller.abort(new Error('client gone')); + + const pending = middleware.beforeExecute?.(plan, makeMiddlewareCtx(controller.signal), params); + if (!pending) throw new Error('beforeExecute is required for this test'); + const error = await pending.then( + () => { + throw new Error('expected RUNTIME.ABORTED rejection'); + }, + (err: unknown) => err, + ); + + expectAbortedEnvelope(error, 'bulk-encrypt'); + // The SDK must not have been entered; the pre-check fires before + // the bulk-encrypt round-trip is scheduled. + expect(sdk.bulkEncryptCalls).toBe(0); + }); + + it('mid-flight abort surfaces RUNTIME.ABORTED { phase: "bulk-encrypt" } via the race', async () => { + const sdk = makeStuckSdk('stuck'); + const middleware = bulkEncryptMiddleware(sdk); + const envelope = EncryptedString.from('alice@example.com'); + setHandleRoutingKey(envelope, 'user', 'email'); + const plan = buildInsertPlan([envelope]); + const params = createSqlParamRefMutator(plan); + const controller = new AbortController(); + + const pending = middleware.beforeExecute?.(plan, makeMiddlewareCtx(controller.signal), params); + queueMicrotask(() => controller.abort(new Error('client gone'))); + + const error = await pending?.then( + () => { + throw new Error('expected RUNTIME.ABORTED rejection'); + }, + (err: unknown) => err, + ); + + expectAbortedEnvelope(error, 'bulk-encrypt'); + // The SDK call was scheduled (counter increments before the + // underlying promise settles) but never resolved; the wrapping + // observed the abort and rejected the awaiter. + expect(sdk.bulkEncryptCalls).toBe(1); + }); +}); + +describe('EncryptedString.decrypt — RUNTIME.ABORTED { phase: "decrypt" }', () => { + it('pre-aborted signal short-circuits before sdk.decrypt is called', async () => { + const sdk = makeStuckSdk('stuck'); + const envelope = makeReadEnvelope({ + plaintext: 'alice@example.com', + table: 'user', + column: 'email', + sdk, + }); + const controller = new AbortController(); + controller.abort(new Error('client gone')); + + const error = await envelope.decrypt({ signal: controller.signal }).then( + () => { + throw new Error('expected RUNTIME.ABORTED rejection'); + }, + (err: unknown) => err, + ); + + expectAbortedEnvelope(error, 'decrypt'); + expect(sdk.singleDecryptCalls).toBe(0); + }); + + it('mid-flight abort surfaces RUNTIME.ABORTED { phase: "decrypt" } via the race', async () => { + const sdk = makeStuckSdk('stuck'); + const envelope = makeReadEnvelope({ + plaintext: 'alice@example.com', + table: 'user', + column: 'email', + sdk, + }); + const controller = new AbortController(); + const pending = envelope.decrypt({ signal: controller.signal }); + queueMicrotask(() => controller.abort(new Error('client gone'))); + + const error = await pending.then( + () => { + throw new Error('expected RUNTIME.ABORTED rejection'); + }, + (err: unknown) => err, + ); + + expectAbortedEnvelope(error, 'decrypt'); + expect(sdk.singleDecryptCalls).toBe(1); + }); + + it('cached-plaintext fast path bypasses signal observation entirely (synchronous return)', async () => { + // A write-side envelope (or a previously-decrypted read-side + // envelope) returns its cached plaintext without consulting the + // SDK; the abort wrapping is therefore irrelevant — even an + // already-aborted signal must not turn the cached return into + // a `RUNTIME.ABORTED` rejection. Pins the no-IO short-circuit. + const envelope = EncryptedString.from('cached'); + const controller = new AbortController(); + controller.abort(new Error('client gone')); + expect(await envelope.decrypt({ signal: controller.signal })).toBe('cached'); + }); +}); + +describe('decryptAll — RUNTIME.ABORTED { phase: "decrypt-all" }', () => { + it('pre-aborted signal short-circuits before sdk.bulkDecrypt is called', async () => { + const sdk = makeStuckSdk('stuck'); + const envelope = makeReadEnvelope({ + plaintext: 'alice@example.com', + table: 'user', + column: 'email', + sdk, + }); + const controller = new AbortController(); + controller.abort(new Error('client gone')); + + const error = await decryptAll([envelope], { signal: controller.signal }).then( + () => { + throw new Error('expected RUNTIME.ABORTED rejection'); + }, + (err: unknown) => err, + ); + + expectAbortedEnvelope(error, 'decrypt-all'); + expect(sdk.bulkDecryptCalls).toBe(0); + }); + + it('mid-flight abort surfaces RUNTIME.ABORTED { phase: "decrypt-all" } via the race', async () => { + const sdk = makeStuckSdk('stuck'); + const envelope = makeReadEnvelope({ + plaintext: 'alice@example.com', + table: 'user', + column: 'email', + sdk, + }); + const controller = new AbortController(); + const pending = decryptAll([envelope], { signal: controller.signal }); + queueMicrotask(() => controller.abort(new Error('client gone'))); + + const error = await pending.then( + () => { + throw new Error('expected RUNTIME.ABORTED rejection'); + }, + (err: unknown) => err, + ); + + expectAbortedEnvelope(error, 'decrypt-all'); + expect(sdk.bulkDecryptCalls).toBe(1); + }); + + it('no-envelope walk is a no-op even when the signal is aborted', async () => { + // The walker pre-checks signal abort only when there is work to + // do. A walk that finds zero envelopes returns immediately + // without observing the signal — symmetric with `decryptAll`'s + // documented "no SDK call when no envelopes are reachable" + // contract. + const controller = new AbortController(); + controller.abort(new Error('client gone')); + await expect(decryptAll({}, { signal: controller.signal })).resolves.toBeUndefined(); + }); +}); + +describe('cipherstash phase wrappings preserve cause and reuse the framework envelope', () => { + it('the controller-supplied reason flows through `cause` for every cipherstash phase', async () => { + // The framework`s `runtimeAborted` carries `signal.reason` + // verbatim (per ADR 207). Cipherstash`s wrapping reuses the + // same envelope construction, so the reason must round-trip + // identically — codec authors / app callers reading + // `error.cause` see the same shape regardless of which phase + // observed the abort. + const reason = new Error('explicit-controller-reason'); + const controller = new AbortController(); + controller.abort(reason); + + // bulk-encrypt + { + const sdk = makeStuckSdk('stuck'); + const envelope = EncryptedString.from('alice@example.com'); + setHandleRoutingKey(envelope, 'user', 'email'); + const plan = buildInsertPlan([envelope]); + const params = createSqlParamRefMutator(plan); + const pending = bulkEncryptMiddleware(sdk).beforeExecute?.( + plan, + makeMiddlewareCtx(controller.signal), + params, + ); + if (!pending) throw new Error('beforeExecute is required for this test'); + const error = await pending.then( + () => { + throw new Error('expected RUNTIME.ABORTED rejection'); + }, + (err: unknown) => err, + ); + expect((error as { cause?: unknown }).cause).toBe(reason); + } + + // decrypt + { + const sdk = makeStuckSdk('stuck'); + const envelope = makeReadEnvelope({ + plaintext: 'alice', + table: 'user', + column: 'email', + sdk, + }); + const error = await envelope.decrypt({ signal: controller.signal }).then( + () => { + throw new Error('expected RUNTIME.ABORTED rejection'); + }, + (err: unknown) => err, + ); + expect((error as { cause?: unknown }).cause).toBe(reason); + } + + // decrypt-all + { + const sdk = makeStuckSdk('stuck'); + const envelope = makeReadEnvelope({ + plaintext: 'alice', + table: 'user', + column: 'email', + sdk, + }); + const error = await decryptAll([envelope], { signal: controller.signal }).then( + () => { + throw new Error('expected RUNTIME.ABORTED rejection'); + }, + (err: unknown) => err, + ); + expect((error as { cause?: unknown }).cause).toBe(reason); + } + }); +}); diff --git a/packages/prisma-next/test/authoring.test.ts b/packages/prisma-next/test/authoring.test.ts new file mode 100644 index 00000000..6beaaba6 --- /dev/null +++ b/packages/prisma-next/test/authoring.test.ts @@ -0,0 +1,234 @@ +/** + * Pack-meta authoring contributions for the cipherstash extension. + * + * Pinned behaviour: + * - Pack-meta exposes `cipherstash.EncryptedString` as a namespaced + * `typeConstructor`. + * - The constructor takes a single OPTIONAL object argument with + * optional boolean `equality` and `freeTextSearch` properties (so + * `cipherstash.EncryptedString()` and `cipherstash.EncryptedString({})` + * both parse). + * - The output template lowers to a `ColumnTypeDescriptor` with + * `codecId: 'cipherstash/string@1'`, `nativeType: 'eql_v2_encrypted'`, + * and an `AuthoringArgRef`-based `typeParams` block carrying + * `true` defaults for both flags — searchable encryption is the + * legitimate default; users opt out explicitly. + * + * Full PSL→ColumnTypeDescriptor lowering is exercised in + * `test/psl-interpretation.test.ts`. + */ + +import { describe, expect, it } from 'vitest'; +import { cipherstashAuthoringTypes } from '../src/contract-authoring'; +import cipherstashPack from '../src/exports/pack'; + +describe('cipherstash pack authoring contributions', () => { + it('exposes cipherstash.EncryptedString as a namespaced type constructor', () => { + expect(cipherstashPack.authoring?.type).toMatchObject({ + cipherstash: { + EncryptedString: { + kind: 'typeConstructor', + }, + }, + }); + }); + + it('declares a single optional object argument with optional equality + freeTextSearch + orderAndRange boolean properties', () => { + expect(cipherstashAuthoringTypes.cipherstash.EncryptedString).toMatchObject({ + kind: 'typeConstructor', + args: [ + { + kind: 'object', + optional: true, + properties: { + equality: { kind: 'boolean', optional: true }, + freeTextSearch: { kind: 'boolean', optional: true }, + orderAndRange: { kind: 'boolean', optional: true }, + }, + }, + ], + }); + }); + + it('lowers to ColumnTypeDescriptor with codecId cipherstash/string@1 + nativeType eql_v2_encrypted, defaulting all flags to true', () => { + expect(cipherstashAuthoringTypes.cipherstash.EncryptedString.output).toMatchObject({ + codecId: 'cipherstash/string@1', + nativeType: 'eql_v2_encrypted', + typeParams: { + equality: { kind: 'arg', index: 0, path: ['equality'], default: true }, + freeTextSearch: { + kind: 'arg', + index: 0, + path: ['freeTextSearch'], + default: true, + }, + orderAndRange: { + kind: 'arg', + index: 0, + path: ['orderAndRange'], + default: true, + }, + }, + }); + }); + + it('exposes the storage type registration via pack meta', () => { + expect(cipherstashPack.types?.storage).toContainEqual({ + typeId: 'cipherstash/string@1', + familyId: 'sql', + targetId: 'postgres', + nativeType: 'eql_v2_encrypted', + }); + }); + + describe('cipherstash.EncryptedDouble', () => { + it('exposes EncryptedDouble as a namespaced type constructor', () => { + expect(cipherstashPack.authoring?.type).toMatchObject({ + cipherstash: { EncryptedDouble: { kind: 'typeConstructor' } }, + }); + }); + + it('declares { equality, orderAndRange } booleans, defaulting both to true', () => { + expect(cipherstashAuthoringTypes.cipherstash.EncryptedDouble).toMatchObject({ + kind: 'typeConstructor', + args: [ + { + kind: 'object', + optional: true, + properties: { + equality: { kind: 'boolean', optional: true }, + orderAndRange: { kind: 'boolean', optional: true }, + }, + }, + ], + }); + expect(cipherstashAuthoringTypes.cipherstash.EncryptedDouble.output).toMatchObject({ + codecId: 'cipherstash/double@1', + nativeType: 'eql_v2_encrypted', + typeParams: { + equality: { kind: 'arg', index: 0, path: ['equality'], default: true }, + orderAndRange: { kind: 'arg', index: 0, path: ['orderAndRange'], default: true }, + }, + }); + }); + + it('registers the cipherstash/double@1 storage type', () => { + expect(cipherstashPack.types?.storage).toContainEqual({ + typeId: 'cipherstash/double@1', + familyId: 'sql', + targetId: 'postgres', + nativeType: 'eql_v2_encrypted', + }); + }); + }); + + describe('cipherstash.EncryptedBigInt', () => { + it('exposes EncryptedBigInt as a namespaced type constructor', () => { + expect(cipherstashPack.authoring?.type).toMatchObject({ + cipherstash: { EncryptedBigInt: { kind: 'typeConstructor' } }, + }); + }); + + it('lowers to ColumnTypeDescriptor with codecId cipherstash/bigint@1, defaulting both flags to true', () => { + expect(cipherstashAuthoringTypes.cipherstash.EncryptedBigInt.output).toMatchObject({ + codecId: 'cipherstash/bigint@1', + nativeType: 'eql_v2_encrypted', + typeParams: { + equality: { kind: 'arg', index: 0, path: ['equality'], default: true }, + orderAndRange: { kind: 'arg', index: 0, path: ['orderAndRange'], default: true }, + }, + }); + }); + + it('registers the cipherstash/bigint@1 storage type', () => { + expect(cipherstashPack.types?.storage).toContainEqual({ + typeId: 'cipherstash/bigint@1', + familyId: 'sql', + targetId: 'postgres', + nativeType: 'eql_v2_encrypted', + }); + }); + }); + + describe('cipherstash.EncryptedDate', () => { + it('exposes EncryptedDate as a namespaced type constructor', () => { + expect(cipherstashPack.authoring?.type).toMatchObject({ + cipherstash: { EncryptedDate: { kind: 'typeConstructor' } }, + }); + }); + + it('lowers to ColumnTypeDescriptor with codecId cipherstash/date@1, defaulting both flags to true', () => { + expect(cipherstashAuthoringTypes.cipherstash.EncryptedDate.output).toMatchObject({ + codecId: 'cipherstash/date@1', + nativeType: 'eql_v2_encrypted', + typeParams: { + equality: { kind: 'arg', index: 0, path: ['equality'], default: true }, + orderAndRange: { kind: 'arg', index: 0, path: ['orderAndRange'], default: true }, + }, + }); + }); + + it('registers the cipherstash/date@1 storage type', () => { + expect(cipherstashPack.types?.storage).toContainEqual({ + typeId: 'cipherstash/date@1', + familyId: 'sql', + targetId: 'postgres', + nativeType: 'eql_v2_encrypted', + }); + }); + }); + + describe('cipherstash.EncryptedBoolean', () => { + it('exposes EncryptedBoolean as a namespaced type constructor', () => { + expect(cipherstashPack.authoring?.type).toMatchObject({ + cipherstash: { EncryptedBoolean: { kind: 'typeConstructor' } }, + }); + }); + + it('lowers to ColumnTypeDescriptor with codecId cipherstash/boolean@1, defaulting equality to true', () => { + expect(cipherstashAuthoringTypes.cipherstash.EncryptedBoolean.output).toMatchObject({ + codecId: 'cipherstash/boolean@1', + nativeType: 'eql_v2_encrypted', + typeParams: { + equality: { kind: 'arg', index: 0, path: ['equality'], default: true }, + }, + }); + }); + + it('registers the cipherstash/boolean@1 storage type', () => { + expect(cipherstashPack.types?.storage).toContainEqual({ + typeId: 'cipherstash/boolean@1', + familyId: 'sql', + targetId: 'postgres', + nativeType: 'eql_v2_encrypted', + }); + }); + }); + + describe('cipherstash.EncryptedJson', () => { + it('exposes EncryptedJson as a namespaced type constructor', () => { + expect(cipherstashPack.authoring?.type).toMatchObject({ + cipherstash: { EncryptedJson: { kind: 'typeConstructor' } }, + }); + }); + + it('lowers to ColumnTypeDescriptor with codecId cipherstash/json@1, defaulting searchableJson to true', () => { + expect(cipherstashAuthoringTypes.cipherstash.EncryptedJson.output).toMatchObject({ + codecId: 'cipherstash/json@1', + nativeType: 'eql_v2_encrypted', + typeParams: { + searchableJson: { kind: 'arg', index: 0, path: ['searchableJson'], default: true }, + }, + }); + }); + + it('registers the cipherstash/json@1 storage type', () => { + expect(cipherstashPack.types?.storage).toContainEqual({ + typeId: 'cipherstash/json@1', + familyId: 'sql', + targetId: 'postgres', + nativeType: 'eql_v2_encrypted', + }); + }); + }); +}); diff --git a/packages/prisma-next/test/bulk-encrypt-middleware.test.ts b/packages/prisma-next/test/bulk-encrypt-middleware.test.ts new file mode 100644 index 00000000..a01ecfa9 --- /dev/null +++ b/packages/prisma-next/test/bulk-encrypt-middleware.test.ts @@ -0,0 +1,533 @@ +/** + * Bulk-encrypt middleware behaviour. + * + * Drives `bulkEncryptMiddleware(sdk).beforeExecute(plan, ctx, params)` + * against an instrumented mock `CipherstashSdk` and asserts: + * + * - One `bulkEncrypt` call per `(table, column)` group; N envelopes + * in the same column collapse into a single SDK round-trip. + * - `(table, column)` is derived from the lowered `InsertAst` / + * `UpdateAst` via the middleware's AST walk and stamped onto each + * envelope handle before grouping. A pre-stamped routing context + * (write-once-wins) is preserved. + * - The SDK-returned ciphertext is stamped onto every envelope + * handle via `setHandleCiphertext`; codec.encode then reads it + * on the wire. + * - `ctx.signal` is forwarded by identity to the SDK so downstream + * cancellation observes the same `AbortSignal`. + * - The handle's `plaintext` slot is **retained** post-encrypt — + * `envelope.decrypt()` returns the cached plaintext synchronously + * without consulting the SDK. + * + * Plus the no-op shape (no cipherstash params → no SDK call) and the + * SDK-shape error path (wrong number of ciphertexts → diagnostic). + */ + +import type { Contract, PlanMeta } from '@prisma-next/contract/types'; +import type { SqlStorage } from '@prisma-next/sql-contract/types'; +import { + type ColumnRef, + InsertAst, + ParamRef, + TableSource, + UpdateAst, +} from '@prisma-next/sql-relational-core/ast'; +import { createSqlParamRefMutator } from '@prisma-next/sql-relational-core/middleware'; +import type { SqlExecutionPlan } from '@prisma-next/sql-relational-core/plan'; +import type { SqlMiddlewareContext } from '@prisma-next/sql-runtime'; +import { describe, expect, it, vi } from 'vitest'; +import { EncryptedString, setHandleRoutingKey } from '../src/execution/envelope-string'; +import type { + CipherstashBulkDecryptArgs, + CipherstashBulkEncryptArgs, + CipherstashSdk, + CipherstashSingleDecryptArgs, +} from '../src/execution/sdk'; +import { CIPHERSTASH_STRING_CODEC_ID } from '../src/extension-metadata/constants'; +import { bulkEncryptMiddleware } from '../src/middleware/bulk-encrypt'; + +const baseMeta: PlanMeta = { + target: 'postgres', + storageHash: 'sha256:test', + lane: 'dsl', +}; + +function createCtx(overrides?: Partial): SqlMiddlewareContext { + return { + contract: {} as Contract, + mode: 'strict' as const, + now: () => Date.now(), + log: { + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + }, + contentHash: async () => 'mock-hash', + ...overrides, + }; +} + +interface CounterSdk extends CipherstashSdk { + readonly bulkEncryptCalls: CipherstashBulkEncryptArgs[]; + readonly bulkDecryptCalls: CipherstashBulkDecryptArgs[]; + readonly singleDecryptCalls: CipherstashSingleDecryptArgs[]; +} + +function makeCounterSdk(options?: { + encryptImpl?: (args: CipherstashBulkEncryptArgs) => ReadonlyArray; +}): CounterSdk { + const bulkEncryptCalls: CipherstashBulkEncryptArgs[] = []; + const bulkDecryptCalls: CipherstashBulkDecryptArgs[] = []; + const singleDecryptCalls: CipherstashSingleDecryptArgs[] = []; + const encryptImpl = + options?.encryptImpl ?? + ((args: CipherstashBulkEncryptArgs) => + args.values.map( + (plaintext) => `cipher:${args.routingKey.table}.${args.routingKey.column}:${plaintext}`, + )); + return { + bulkEncryptCalls, + bulkDecryptCalls, + singleDecryptCalls, + decrypt(args) { + singleDecryptCalls.push(args); + return Promise.resolve(`single:${String(args.ciphertext)}`); + }, + bulkEncrypt(args) { + bulkEncryptCalls.push(args); + return Promise.resolve(encryptImpl(args)); + }, + bulkDecrypt(args) { + bulkDecryptCalls.push(args); + return Promise.resolve(args.ciphertexts.map((c) => `bulk-decrypt:${String(c)}`)); + }, + }; +} + +function buildInsertPlan( + table: string, + rows: ReadonlyArray>, +): SqlExecutionPlan { + const params: unknown[] = []; + const astRows = rows.map((row) => { + const out: Record = {}; + for (const [column, value] of Object.entries(row)) { + const ref = ParamRef.of(value, { codec: { codecId: CIPHERSTASH_STRING_CODEC_ID } }); + out[column] = ref; + params.push(value); + } + return out; + }); + const ast = new InsertAst(TableSource.named(table), astRows); + return { + sql: `INSERT INTO "${table}" (...) VALUES (...)`, + params, + meta: { ...baseMeta }, + ast, + } as SqlExecutionPlan; +} + +function buildUpdatePlan(table: string, set: Record): SqlExecutionPlan { + const params: unknown[] = []; + const astSet: Record = {}; + for (const [column, value] of Object.entries(set)) { + const ref = ParamRef.of(value, { codec: { codecId: CIPHERSTASH_STRING_CODEC_ID } }); + astSet[column] = ref; + params.push(value); + } + const ast = new UpdateAst(TableSource.named(table), astSet); + return { + sql: `UPDATE "${table}" SET ...`, + params, + meta: { ...baseMeta }, + ast, + } as SqlExecutionPlan; +} + +describe('bulkEncryptMiddleware', () => { + describe('one bulkEncrypt call per (table, column) group', () => { + it('issues exactly one bulkEncrypt call when 10 rows insert into one column', async () => { + const sdk = makeCounterSdk(); + const middleware = bulkEncryptMiddleware(sdk); + const envelopes = Array.from({ length: 10 }, (_, i) => + EncryptedString.from(`alice${i}@example.com`), + ); + const plan = buildInsertPlan( + 'user', + envelopes.map((e) => ({ email: e })), + ); + const params = createSqlParamRefMutator(plan); + + await middleware.beforeExecute?.(plan, createCtx(), params); + + expect(sdk.bulkEncryptCalls).toHaveLength(1); + expect(sdk.bulkEncryptCalls[0]?.routingKey).toEqual({ table: 'user', column: 'email' }); + expect(sdk.bulkEncryptCalls[0]?.values).toEqual( + envelopes.map((_, i) => `alice${i}@example.com`), + ); + }); + + it('partitions targets across (table, column) groups: one bulkEncrypt per group', async () => { + const sdk = makeCounterSdk(); + const middleware = bulkEncryptMiddleware(sdk); + const e1 = EncryptedString.from('a@x.com'); + const e2 = EncryptedString.from('b@x.com'); + const e3 = EncryptedString.from('alice'); + const plan = buildInsertPlan('user', [ + { email: e1, username: e3 }, + { email: e2, username: EncryptedString.from('bob') }, + ]); + const params = createSqlParamRefMutator(plan); + + await middleware.beforeExecute?.(plan, createCtx(), params); + + expect(sdk.bulkEncryptCalls).toHaveLength(2); + const byColumn = new Map(sdk.bulkEncryptCalls.map((c) => [c.routingKey.column, c])); + expect(byColumn.get('email')?.values).toEqual(['a@x.com', 'b@x.com']); + expect(byColumn.get('username')?.values).toEqual(['alice', 'bob']); + }); + }); + + describe('ciphertext is stamped onto each envelope handle', () => { + it('populates handle.ciphertext with the SDK-returned wire value', async () => { + const sdk = makeCounterSdk(); + const middleware = bulkEncryptMiddleware(sdk); + const envelope = EncryptedString.from('alice@example.com'); + const plan = buildInsertPlan('user', [{ email: envelope }]); + const params = createSqlParamRefMutator(plan); + + await middleware.beforeExecute?.(plan, createCtx(), params); + + expect(envelope.expose().ciphertext).toBe('cipher:user.email:alice@example.com'); + }); + }); + + describe('param slot carries the encoded wire-format string post-middleware', () => { + it('replaces the envelope with the eql_v2_encrypted composite-text literal', async () => { + // The pg driver only knows how to serialise primitives / + // arrays / Buffers — passing the `EncryptedEnvelopeBase` + // instance through would fail at the driver boundary with a + // confusing serialize error. The middleware therefore writes + // the wire-format string (the `("...")` composite-text + // literal) into the param slot via `params.replaceValues`, + // and the runtime's `currentParams()` view reflects that + // before the driver reads. + const sdk = makeCounterSdk(); + const middleware = bulkEncryptMiddleware(sdk); + const envelope = EncryptedString.from('alice@example.com'); + const plan = buildInsertPlan('user', [{ email: envelope }]); + const params = createSqlParamRefMutator(plan); + + await middleware.beforeExecute?.(plan, createCtx(), params); + + const finalParams = params.currentParams(); + expect(finalParams.length).toBe(1); + const onlyParam = finalParams[0]; + expect(typeof onlyParam).toBe('string'); + // Composite-text literal: `("` + escaped JSON of the ciphertext + // + `")`. Double-quotes inside the JSON are doubled. + const expectedPayload = JSON.stringify('cipher:user.email:alice@example.com').replaceAll( + '"', + '""', + ); + expect(onlyParam).toBe(`("${expectedPayload}")`); + }); + }); + + describe('ctx.signal is forwarded by identity to the SDK', () => { + it('passes ctx.signal to bulkEncrypt by reference', async () => { + const sdk = makeCounterSdk(); + const middleware = bulkEncryptMiddleware(sdk); + const envelope = EncryptedString.from('alice@example.com'); + const plan = buildInsertPlan('user', [{ email: envelope }]); + const params = createSqlParamRefMutator(plan); + const controller = new AbortController(); + + await middleware.beforeExecute?.(plan, createCtx({ signal: controller.signal }), params); + + expect(sdk.bulkEncryptCalls).toHaveLength(1); + expect(sdk.bulkEncryptCalls[0]?.signal).toBe(controller.signal); + }); + + it('omits signal when ctx.signal is undefined', async () => { + const sdk = makeCounterSdk(); + const middleware = bulkEncryptMiddleware(sdk); + const envelope = EncryptedString.from('alice@example.com'); + const plan = buildInsertPlan('user', [{ email: envelope }]); + const params = createSqlParamRefMutator(plan); + + await middleware.beforeExecute?.(plan, createCtx(), params); + + expect(sdk.bulkEncryptCalls).toHaveLength(1); + expect(sdk.bulkEncryptCalls[0]?.signal).toBeUndefined(); + }); + }); + + describe('plaintext slot is retained post-encrypt', () => { + it('decrypt() returns plaintext synchronously without consulting the SDK', async () => { + const sdk = makeCounterSdk(); + const middleware = bulkEncryptMiddleware(sdk); + const envelope = EncryptedString.from('alice@example.com'); + const plan = buildInsertPlan('user', [{ email: envelope }]); + const params = createSqlParamRefMutator(plan); + + await middleware.beforeExecute?.(plan, createCtx(), params); + const plaintext = await envelope.decrypt(); + + expect(plaintext).toBe('alice@example.com'); + expect(sdk.singleDecryptCalls).toEqual([]); + expect(sdk.bulkDecryptCalls).toEqual([]); + }); + + it('keeps handle.plaintext populated after middleware returns', async () => { + const sdk = makeCounterSdk(); + const middleware = bulkEncryptMiddleware(sdk); + const envelope = EncryptedString.from('alice@example.com'); + const plan = buildInsertPlan('user', [{ email: envelope }]); + const params = createSqlParamRefMutator(plan); + + await middleware.beforeExecute?.(plan, createCtx(), params); + + expect(envelope.expose().plaintext).toBe('alice@example.com'); + }); + }); + + describe('routing key is derived from envelope handle (table, column)', () => { + it('stamps (table, column) from InsertAst before grouping', async () => { + const sdk = makeCounterSdk(); + const middleware = bulkEncryptMiddleware(sdk); + const envelope = EncryptedString.from('alice@example.com'); + const plan = buildInsertPlan('user', [{ email: envelope }]); + const params = createSqlParamRefMutator(plan); + + await middleware.beforeExecute?.(plan, createCtx(), params); + + expect(envelope.expose().table).toBe('user'); + expect(envelope.expose().column).toBe('email'); + }); + + it('stamps (table, column) from UpdateAst before grouping', async () => { + const sdk = makeCounterSdk(); + const middleware = bulkEncryptMiddleware(sdk); + const envelope = EncryptedString.from('alice@example.com'); + const plan = buildUpdatePlan('admin', { email: envelope }); + const params = createSqlParamRefMutator(plan); + + await middleware.beforeExecute?.(plan, createCtx(), params); + + expect(sdk.bulkEncryptCalls).toHaveLength(1); + expect(sdk.bulkEncryptCalls[0]?.routingKey).toEqual({ + table: 'admin', + column: 'email', + }); + }); + + it('rejects re-binding a pre-stamped envelope to a different routing target', async () => { + // Reusing an envelope already bound to one (table, column) routing + // target inside a bulk-encrypt plan that lowers to a different + // target is a programming error: `setHandleRoutingKey` throws on a + // conflicting reassignment so the envelope cannot silently retain + // a stale binding and route to the wrong bulk-encrypt batch. + const sdk = makeCounterSdk(); + const middleware = bulkEncryptMiddleware(sdk); + const envelope = EncryptedString.from('alice@example.com'); + setHandleRoutingKey(envelope, 'admin', 'email'); + const plan = buildInsertPlan('user', [{ email: envelope }]); + const params = createSqlParamRefMutator(plan); + + await expect(middleware.beforeExecute?.(plan, createCtx(), params)).rejects.toThrow( + /routing-key table conflict/, + ); + expect(sdk.bulkEncryptCalls).toHaveLength(0); + }); + + it('re-stamping with the same routing target is a no-op', async () => { + const sdk = makeCounterSdk(); + const middleware = bulkEncryptMiddleware(sdk); + const envelope = EncryptedString.from('alice@example.com'); + setHandleRoutingKey(envelope, 'user', 'email'); + const plan = buildInsertPlan('user', [{ email: envelope }]); + const params = createSqlParamRefMutator(plan); + + await middleware.beforeExecute?.(plan, createCtx(), params); + + expect(sdk.bulkEncryptCalls[0]?.routingKey).toEqual({ + table: 'user', + column: 'email', + }); + }); + }); + + describe('no-op cases', () => { + it('does not call bulkEncrypt when the plan has no cipherstash params', async () => { + const sdk = makeCounterSdk(); + const middleware = bulkEncryptMiddleware(sdk); + const ast = new InsertAst(TableSource.named('user'), [{ id: ParamRef.of(1) }]); + const plan = { + sql: 'INSERT INTO "user" (id) VALUES ($1)', + params: [1], + meta: { ...baseMeta }, + ast, + } as SqlExecutionPlan; + const params = createSqlParamRefMutator(plan); + + await middleware.beforeExecute?.(plan, createCtx(), params); + + expect(sdk.bulkEncryptCalls).toEqual([]); + }); + + it('skips when params is undefined', async () => { + const sdk = makeCounterSdk(); + const middleware = bulkEncryptMiddleware(sdk); + const plan = { + sql: 'SELECT 1', + params: [], + meta: { ...baseMeta }, + } as unknown as SqlExecutionPlan; + + await middleware.beforeExecute?.(plan, createCtx()); + + expect(sdk.bulkEncryptCalls).toEqual([]); + }); + }); + + describe('matches every cipherstash codec id', () => { + // The middleware filters `params.entries()` against the closed set + // `CIPHERSTASH_CODEC_ID_SET` rather than the single string codec + // id; this exercises that every codec in the package's surface + // (string + double + bigint + date + boolean + json) routes + // through the bulk-encrypt path, and that every plaintext slot + // in a mixed-codec INSERT participates in exactly one + // `bulkEncrypt` call per `(table, column)` group. + + function buildHeterogeneousInsertPlan( + table: string, + columns: ReadonlyArray<{ name: string; codecId: string; envelope: unknown }>, + ): SqlExecutionPlan { + const params: unknown[] = []; + const row: Record = {}; + for (const col of columns) { + const ref = ParamRef.of(col.envelope, { codec: { codecId: col.codecId } }); + row[col.name] = ref; + params.push(col.envelope); + } + const ast = new InsertAst(TableSource.named(table), [row]); + return { + sql: `INSERT INTO "${table}" (...) VALUES (...)`, + params, + meta: { ...baseMeta }, + ast, + } as SqlExecutionPlan; + } + + it('routes envelopes for each of the six cipherstash codec ids through bulk-encrypt', async () => { + const { EncryptedDouble } = await import('../src/execution/envelope-double'); + const { EncryptedBigInt } = await import('../src/execution/envelope-bigint'); + const { EncryptedDate } = await import('../src/execution/envelope-date'); + const { EncryptedBoolean } = await import('../src/execution/envelope-boolean'); + const { EncryptedJson } = await import('../src/execution/envelope-json'); + const { + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, + } = await import('../src/extension-metadata/constants'); + + const sdk = makeCounterSdk({ + encryptImpl: (args) => args.values.map((_, i) => `ct:${args.routingKey.column}:${i}`), + }); + const middleware = bulkEncryptMiddleware(sdk); + + const stringEnv = EncryptedString.from('alice@example.com'); + const doubleEnv = EncryptedDouble.from(3.14); + const bigIntEnv = EncryptedBigInt.from(42n); + const dateEnv = EncryptedDate.from(new Date('2024-01-01')); + const boolEnv = EncryptedBoolean.from(true); + const jsonEnv = EncryptedJson.from({ k: 'v' }); + + const plan = buildHeterogeneousInsertPlan('item', [ + { name: 'email', codecId: CIPHERSTASH_STRING_CODEC_ID, envelope: stringEnv }, + { name: 'score', codecId: CIPHERSTASH_DOUBLE_CODEC_ID, envelope: doubleEnv }, + { name: 'amount', codecId: CIPHERSTASH_BIGINT_CODEC_ID, envelope: bigIntEnv }, + { name: 'birthday', codecId: CIPHERSTASH_DATE_CODEC_ID, envelope: dateEnv }, + { name: 'enabled', codecId: CIPHERSTASH_BOOLEAN_CODEC_ID, envelope: boolEnv }, + { name: 'payload', codecId: CIPHERSTASH_JSON_CODEC_ID, envelope: jsonEnv }, + ]); + const params = createSqlParamRefMutator(plan); + + await middleware.beforeExecute?.(plan, createCtx(), params); + + // One bulkEncrypt per (table, column) — six columns, one envelope + // each, so six bulkEncrypt calls. Every envelope's ciphertext + // slot ends up populated. + expect(sdk.bulkEncryptCalls).toHaveLength(6); + const byColumn = new Map(sdk.bulkEncryptCalls.map((c) => [c.routingKey.column, c])); + expect(byColumn.has('email')).toBe(true); + expect(byColumn.has('score')).toBe(true); + expect(byColumn.has('amount')).toBe(true); + expect(byColumn.has('birthday')).toBe(true); + expect(byColumn.has('enabled')).toBe(true); + expect(byColumn.has('payload')).toBe(true); + + // Per-envelope plaintext is forwarded to the SDK as `unknown` + // — the SDK sees the original JS plaintext untouched. + expect(byColumn.get('score')?.values).toEqual([3.14]); + expect(byColumn.get('amount')?.values).toEqual([42n]); + expect(byColumn.get('enabled')?.values).toEqual([true]); + expect(byColumn.get('payload')?.values).toEqual([{ k: 'v' }]); + + // Routing context stamped, ciphertext written back. + for (const env of [stringEnv, doubleEnv, bigIntEnv, dateEnv, boolEnv, jsonEnv]) { + expect(env.expose().table).toBe('item'); + expect(env.expose().ciphertext).toBeDefined(); + } + }); + + it('does not route non-cipherstash codec ids through bulk-encrypt', async () => { + // A `ParamRef` carrying a non-cipherstash codec id must not be + // observed by the middleware. The closed-set filter is the + // single defensible boundary against future codec-id collisions. + const sdk = makeCounterSdk(); + const middleware = bulkEncryptMiddleware(sdk); + const ast = new InsertAst(TableSource.named('user'), [ + { id: ParamRef.of(1, { codec: { codecId: 'pg/text@1' } }) }, + ]); + const plan = { + sql: 'INSERT INTO "user" (id) VALUES ($1)', + params: [1], + meta: { ...baseMeta }, + ast, + } as SqlExecutionPlan; + const params = createSqlParamRefMutator(plan); + + await middleware.beforeExecute?.(plan, createCtx(), params); + + expect(sdk.bulkEncryptCalls).toEqual([]); + }); + }); + + describe('error paths', () => { + it('throws when the SDK returns the wrong number of ciphertexts', async () => { + const sdk = makeCounterSdk({ encryptImpl: () => ['only-one'] }); + const middleware = bulkEncryptMiddleware(sdk); + const plan = buildInsertPlan('user', [ + { email: EncryptedString.from('a@x') }, + { email: EncryptedString.from('b@y') }, + ]); + const params = createSqlParamRefMutator(plan); + + await expect(middleware.beforeExecute?.(plan, createCtx(), params)).rejects.toThrow( + /1 ciphertexts.*2 were requested/, + ); + }); + }); +}); + +describe('bulkEncryptMiddleware — name + family identity', () => { + it('declares the SQL family + a stable middleware name', () => { + const middleware = bulkEncryptMiddleware(makeCounterSdk()); + expect(middleware.familyId).toBe('sql'); + expect(middleware.name).toBe('cipherstash.bulk-encrypt'); + }); +}); diff --git a/packages/prisma-next/test/bundling-isolation.test.ts b/packages/prisma-next/test/bundling-isolation.test.ts new file mode 100644 index 00000000..9698f644 --- /dev/null +++ b/packages/prisma-next/test/bundling-isolation.test.ts @@ -0,0 +1,236 @@ +/** + * Control vs runtime/middleware byte-level subpath isolation. + * + * The cipherstash extension publishes three runtime-relevant subpath + * entries: `./control` (contract-space authoring + the codec lifecycle + * hook), `./runtime` (envelope + SDK + codec runtime), and + * `./middleware` (bulk-encrypt middleware). Each entry must compose + * tree-shakably so a consumer pulling `./runtime` does not drag in the + * EQL bundle SQL, the cipherstash baseline migration, or the codec + * lifecycle hook (any of which would defeat the runtime-bundle size + * budget and leak control-plane behaviour into runtime call paths) and + * a consumer pulling `./control` does not drag in `EncryptedString`, + * the SDK interface, the codec runtime, or the bulk-encrypt middleware. + * + * This test is the canonical isolation guard. It asserts: + * + * 1. **Entry-body forbidden-substring check** (per entry): the + * entry `.js` body — both the inline source and its `import` / + * `export` statements — does not contain forbidden symbol names. + * Mirrors the predecessor `wip/verify-cipherstash-isolation.js` + * shallow check, which catches both inlined runtime behavior in + * a control entry and cross-chunk leaks via named-import lines + * (`import { ForbiddenName } from "./.js"`). Forbidden + * identifiers occurring inside a chunk's JSDoc or as a PSL type + * identifier string literal are out of scope — they ship no + * executable behavior — and are caught structurally by the + * disjointness check below if the chunk crosses planes. + * 2. **Chunk-graph disjointness**: control's transitively reachable + * chunk-file set and runtime's (resp. middleware's) chunk-file + * set are disjoint, modulo the shared `constants-*.js` chunk + * (pure literal constants — no SDK / codec / migration code). + * + * The dist outputs are produced by `tsdown` from `src/exports/*.ts`. + * `@prisma-next/extension-cipherstash#test` is wired in the root + * `turbo.json` to depend on its own `build`, so the assertions below + * always read fresh dist output for the current source. + */ + +import { existsSync, readFileSync } from 'node:fs'; +import { fileURLToPath } from 'node:url'; +import { dirname, join } from 'pathe'; +import { describe, expect, it } from 'vitest'; + +const PACKAGE_ROOT = dirname(dirname(fileURLToPath(import.meta.url))); +const DIST = join(PACKAGE_ROOT, 'dist'); + +const ENTRY_FILES = ['control.js', 'runtime.js', 'middleware.js'] as const; + +/** + * Forbidden in `control.js` and its transitive chunk graph. + * These are runtime-plane symbols (envelope / SDK interface / codec + * runtime / middleware factory) that must never reach a control-plane + * consumer. + */ +const CONTROL_FORBIDDEN = [ + 'EncryptedString', + 'EncryptedDouble', + 'EncryptedBigInt', + 'EncryptedDate', + 'EncryptedBoolean', + 'EncryptedJson', + 'setHandleCiphertext', + 'CipherstashSdk', + 'bulkEncryptMiddleware', + 'createCipherstashStringCodec', + 'createCipherstashDoubleCodec', + 'createCipherstashBigIntCodec', + 'createCipherstashDateCodec', + 'createCipherstashBooleanCodec', + 'createCipherstashJsonCodec', + 'createCipherstashRuntimeDescriptor', + 'cipherstashAsc', + 'cipherstashDesc', + 'cipherstashJsonbGet', + 'cipherstashJsonbPathQueryFirst', +] as const; + +/** + * Forbidden in `runtime.js` / `middleware.js` and their transitive + * chunk graph. These are contract-space artefacts (EQL bundle SQL, + * cipherstash contract IR, baseline migration, head-ref, the + * codec-control lifecycle hook, EQL bundle migration-op terms) that + * must never reach a runtime consumer. + */ +const RUNTIME_FORBIDDEN = [ + 'EQL_BUNDLE_SQL', + 'cipherstashContract', + 'cipherstashBaselineMigration', + 'cipherstashHeadRef', + 'cipherstashStringCodecHooks', + 'cipherstashDoubleCodecHooks', + 'cipherstashBigIntCodecHooks', + 'cipherstashDateCodecHooks', + 'cipherstashBooleanCodecHooks', + 'cipherstashJsonCodecHooks', + 'add_search_config', + 'remove_search_config', +] as const; + +/** + * Chunks whose name matches this pattern are allowed to appear in + * both the control graph and the runtime graph. Our tsup build emits + * code-split chunks as `chunk-.js` (vs upstream's `tsdown`, + * which uses content-named `constants-.mjs`). The cross-plane + * shared chunk in our output carries pure literal constants (codec + * id, native types, invariant ids) — sharing them is safe and + * desirable. `ALLOWED_SHARED_CHUNK_CONTENT_MARKERS` below guards that + * the matched chunk's body does not also smuggle runtime-plane logic + * across the boundary. + */ +const SHARED_CHUNK_PATTERN = /^chunk-[A-Za-z0-9_-]+\.js$/; + +/** + * Identifiers that uniquely fingerprint the shared constants chunk: + * every shared chunk we accept must export every one of these. If a + * `chunk-*.js` is shared between planes but does NOT include all of + * these markers, it is not the constants chunk and the test rightly + * fails. + */ +const ALLOWED_SHARED_CHUNK_CONTENT_MARKERS = [ + 'CIPHERSTASH_STRING_CODEC_ID', + 'CIPHERSTASH_DOUBLE_CODEC_ID', + 'CIPHERSTASH_BIGINT_CODEC_ID', + 'CIPHERSTASH_DATE_CODEC_ID', + 'CIPHERSTASH_BOOLEAN_CODEC_ID', + 'CIPHERSTASH_JSON_CODEC_ID', +] as const; + +interface ChunkFile { + readonly file: string; + readonly body: string; + readonly size: number; +} + +function readChunk(file: string): ChunkFile { + const path = join(DIST, file); + const body = readFileSync(path, 'utf8'); + return { file, body, size: Buffer.byteLength(body, 'utf8') }; +} + +// Captures relative `.js` edges in three forms: +// `from "./x.js"` — `import ... from`, `export ... from` +// `import "./x.js"` — side-effect imports +// `import("./x.js")` — dynamic imports +// Without each of these the disjointness check can silently pass for a +// chunk graph that re-exports cross-plane state through side-effect +// imports or `export ... from` edges. +const RELATIVE_IMPORT_RE = /(?:from|import)\s*\(?\s*["'](\.\/[^"']+\.js)["']/g; + +function collectGraph(entry: string): Map { + const graph = new Map(); + const queue: string[] = [entry]; + while (queue.length > 0) { + const next = queue.shift(); + if (next === undefined || graph.has(next)) { + continue; + } + const chunk = readChunk(next); + graph.set(next, chunk); + for (const match of chunk.body.matchAll(RELATIVE_IMPORT_RE)) { + const importPath = match[1]; + if (importPath === undefined) { + continue; + } + const importFile = importPath.replace(/^\.\//, ''); + if (!graph.has(importFile)) { + queue.push(importFile); + } + } + } + return graph; +} + +function findLeaksInEntry(entry: ChunkFile, forbidden: readonly string[]): string[] { + return forbidden.filter((needle) => entry.body.includes(needle)); +} + +function isAllowedSharedChunk(chunk: string): boolean { + if (!SHARED_CHUNK_PATTERN.test(chunk)) { + return false; + } + const body = readChunk(chunk).body; + return ALLOWED_SHARED_CHUNK_CONTENT_MARKERS.every((marker) => body.includes(marker)); +} + +describe('bundling isolation', () => { + it('dist entry files exist (run `pnpm --filter @cipherstash/prisma-next build` first)', () => { + for (const entry of ENTRY_FILES) { + expect(existsSync(join(DIST, entry)), `dist/${entry} is missing`).toBe(true); + } + }); + + it('control.js does not pull runtime-plane symbols', () => { + const entry = readChunk('control.js'); + const leaks = findLeaksInEntry(entry, CONTROL_FORBIDDEN); + expect(leaks, `control entry leaks: ${leaks.join(', ')}`).toEqual([]); + }); + + it('runtime.js does not pull contract-space artefacts', () => { + const entry = readChunk('runtime.js'); + const leaks = findLeaksInEntry(entry, RUNTIME_FORBIDDEN); + expect(leaks, `runtime entry leaks: ${leaks.join(', ')}`).toEqual([]); + }); + + it('middleware.js does not pull contract-space artefacts', () => { + const entry = readChunk('middleware.js'); + const leaks = findLeaksInEntry(entry, RUNTIME_FORBIDDEN); + expect(leaks, `middleware entry leaks: ${leaks.join(', ')}`).toEqual([]); + }); + + it('control vs runtime chunk graphs are disjoint (modulo shared constants chunk)', () => { + const controlChunks = new Set(collectGraph('control.js').keys()); + const runtimeChunks = new Set(collectGraph('runtime.js').keys()); + controlChunks.delete('control.js'); + runtimeChunks.delete('runtime.js'); + const intersection = [...controlChunks].filter((f) => runtimeChunks.has(f)); + const unexpectedShared = intersection.filter((f) => !isAllowedSharedChunk(f)); + expect( + unexpectedShared, + `control & runtime share unexpected chunks: ${unexpectedShared.join(', ')}`, + ).toEqual([]); + }); + + it('control vs middleware chunk graphs are disjoint (modulo shared constants chunk)', () => { + const controlChunks = new Set(collectGraph('control.js').keys()); + const middlewareChunks = new Set(collectGraph('middleware.js').keys()); + controlChunks.delete('control.js'); + middlewareChunks.delete('middleware.js'); + const intersection = [...controlChunks].filter((f) => middlewareChunks.has(f)); + const unexpectedShared = intersection.filter((f) => !isAllowedSharedChunk(f)); + expect( + unexpectedShared, + `control & middleware share unexpected chunks: ${unexpectedShared.join(', ')}`, + ).toEqual([]); + }); +}); diff --git a/packages/prisma-next/test/call-classes.test.ts b/packages/prisma-next/test/call-classes.test.ts new file mode 100644 index 00000000..0890d7eb --- /dev/null +++ b/packages/prisma-next/test/call-classes.test.ts @@ -0,0 +1,188 @@ +/** + * Cipherstash migration IR call classes. + * + * Each `*Call` is a renderable node implementing the framework + * `OpFactoryCall` interface. The class carries the literal arguments its + * backing factory would receive, computes a human-readable `label` in its + * constructor, and exposes: + * + * - `toOp()` — produces the runtime op shape that the codec hook used + * to build via `buildAddOp` / `buildRemoveOp`. + * - `renderTypeScript()` — emits a `cipherstashAddSearchConfig({...})` + * / `cipherstashRemoveSearchConfig({...})` factory call so the + * generated `migration.ts` reads as a normal authored migration. + * - `importRequirements()` — declares the factory symbol pulled from + * `@prisma-next/extension-cipherstash/migration`. + */ + +import { describe, expect, it } from 'vitest'; +import { + CipherstashAddSearchConfigCall, + CipherstashRemoveSearchConfigCall, + type CipherstashSearchIndex, + cipherstashAddSearchConfig, + cipherstashRemoveSearchConfig, +} from '../src/migration/call-classes'; + +const TABLE = 'user'; +const FIELD = 'email'; +const MIGRATION_MODULE = '@prisma-next/extension-cipherstash/migration'; + +describe('CipherstashAddSearchConfigCall', () => { + it('exposes factoryName, operationClass and label per (table, field, index)', () => { + const call = new CipherstashAddSearchConfigCall(TABLE, FIELD, 'unique'); + expect(call.factoryName).toBe('cipherstashAddSearchConfig'); + expect(call.operationClass).toBe('additive'); + expect(call.label).toBe(`Enable cipherstash search on ${TABLE}.${FIELD}`); + }); + + it('toOp() produces the canonical add_search_config@v1 op shape', () => { + const call = new CipherstashAddSearchConfigCall(TABLE, FIELD, 'unique'); + expect(call.toOp()).toEqual({ + id: `cipherstash-codec.${TABLE}.${FIELD}.add-search-config.unique`, + label: `Enable cipherstash search on ${TABLE}.${FIELD}`, + operationClass: 'additive', + invariantId: `cipherstash-codec:${TABLE}.${FIELD}:add-search-config:unique@v1`, + target: { id: 'postgres' }, + precheck: [], + execute: [ + { + description: `Register cipherstash unique search config for ${TABLE}.${FIELD}`, + sql: `SELECT eql_v2.add_search_config('${TABLE}', '${FIELD}', 'unique', 'text');`, + }, + ], + postcheck: [], + }); + }); + + it("toOp() embeds 'match' when the index is 'match'", () => { + const call = new CipherstashAddSearchConfigCall(TABLE, FIELD, 'match'); + const op = call.toOp(); + expect(op.id).toBe(`cipherstash-codec.${TABLE}.${FIELD}.add-search-config.match`); + expect(op.invariantId).toBe(`cipherstash-codec:${TABLE}.${FIELD}:add-search-config:match@v1`); + expect(op.execute[0]!.sql).toContain(`'match'`); + }); + + it("toOp() defaults the cast type to 'text'", () => { + const call = new CipherstashAddSearchConfigCall(TABLE, FIELD, 'unique'); + expect(call.toOp().execute[0]!.sql).toContain(`, 'text')`); + }); + + it('toOp() honours an explicit castAs override', () => { + const call = new CipherstashAddSearchConfigCall(TABLE, FIELD, 'unique', 'jsonb'); + expect(call.toOp().execute[0]!.sql).toContain(`, 'jsonb')`); + }); + + it('toOp() escapes embedded single quotes in identifiers', () => { + const call = new CipherstashAddSearchConfigCall("us'er", "em'ail", 'unique'); + expect(call.toOp().execute[0]!.sql).toContain("'us''er'"); + expect(call.toOp().execute[0]!.sql).toContain("'em''ail'"); + }); + + it("renderTypeScript() emits cipherstashAddSearchConfig({...}) without castAs when 'text'", () => { + const call = new CipherstashAddSearchConfigCall(TABLE, FIELD, 'unique'); + expect(call.renderTypeScript()).toBe( + `cipherstashAddSearchConfig({ table: "${TABLE}", column: "${FIELD}", index: "unique" })`, + ); + }); + + it('renderTypeScript() emits castAs only when it differs from the default', () => { + const call = new CipherstashAddSearchConfigCall(TABLE, FIELD, 'match', 'jsonb'); + expect(call.renderTypeScript()).toBe( + `cipherstashAddSearchConfig({ table: "${TABLE}", column: "${FIELD}", index: "match", castAs: "jsonb" })`, + ); + }); + + it('importRequirements() pulls cipherstashAddSearchConfig from the /migration subpath', () => { + const call = new CipherstashAddSearchConfigCall(TABLE, FIELD, 'unique'); + expect(call.importRequirements()).toEqual([ + { moduleSpecifier: MIGRATION_MODULE, symbol: 'cipherstashAddSearchConfig' }, + ]); + }); + + it('is frozen at construction', () => { + const call = new CipherstashAddSearchConfigCall(TABLE, FIELD, 'unique'); + expect(Object.isFrozen(call)).toBe(true); + }); +}); + +describe('CipherstashRemoveSearchConfigCall', () => { + it('exposes factoryName, operationClass and label per (table, field, index)', () => { + const call = new CipherstashRemoveSearchConfigCall(TABLE, FIELD, 'match'); + expect(call.factoryName).toBe('cipherstashRemoveSearchConfig'); + expect(call.operationClass).toBe('destructive'); + expect(call.label).toBe(`Disable cipherstash search on ${TABLE}.${FIELD}`); + }); + + it('toOp() produces the canonical remove_search_config@v1 op shape', () => { + const call = new CipherstashRemoveSearchConfigCall(TABLE, FIELD, 'unique'); + expect(call.toOp()).toEqual({ + id: `cipherstash-codec.${TABLE}.${FIELD}.remove-search-config.unique`, + label: `Disable cipherstash search on ${TABLE}.${FIELD}`, + operationClass: 'destructive', + invariantId: `cipherstash-codec:${TABLE}.${FIELD}:remove-search-config:unique@v1`, + target: { id: 'postgres' }, + precheck: [], + execute: [ + { + description: `Remove cipherstash unique search config for ${TABLE}.${FIELD}`, + sql: `SELECT eql_v2.remove_search_config('${TABLE}', '${FIELD}', 'unique');`, + }, + ], + postcheck: [], + }); + }); + + it('renderTypeScript() emits cipherstashRemoveSearchConfig({...}) (castAs is irrelevant)', () => { + const call = new CipherstashRemoveSearchConfigCall(TABLE, FIELD, 'match'); + expect(call.renderTypeScript()).toBe( + `cipherstashRemoveSearchConfig({ table: "${TABLE}", column: "${FIELD}", index: "match" })`, + ); + }); + + it('importRequirements() pulls cipherstashRemoveSearchConfig from the /migration subpath', () => { + const call = new CipherstashRemoveSearchConfigCall(TABLE, FIELD, 'unique'); + expect(call.importRequirements()).toEqual([ + { moduleSpecifier: MIGRATION_MODULE, symbol: 'cipherstashRemoveSearchConfig' }, + ]); + }); + + it('is frozen at construction', () => { + const call = new CipherstashRemoveSearchConfigCall(TABLE, FIELD, 'unique'); + expect(Object.isFrozen(call)).toBe(true); + }); +}); + +describe('cipherstashAddSearchConfig / cipherstashRemoveSearchConfig factories', () => { + it('cipherstashAddSearchConfig constructs an Add call with the given args', () => { + const call = cipherstashAddSearchConfig({ table: TABLE, column: FIELD, index: 'unique' }); + expect(call).toBeInstanceOf(CipherstashAddSearchConfigCall); + expect(call.toOp().invariantId).toBe( + `cipherstash-codec:${TABLE}.${FIELD}:add-search-config:unique@v1`, + ); + }); + + it('cipherstashAddSearchConfig honours an explicit castAs override', () => { + const call = cipherstashAddSearchConfig({ + table: TABLE, + column: FIELD, + index: 'unique', + castAs: 'jsonb', + }); + expect(call.toOp().execute[0]!.sql).toContain(`, 'jsonb')`); + expect(call.renderTypeScript()).toContain('castAs: "jsonb"'); + }); + + it('cipherstashRemoveSearchConfig constructs a Remove call with the given args', () => { + const call = cipherstashRemoveSearchConfig({ table: TABLE, column: FIELD, index: 'match' }); + expect(call).toBeInstanceOf(CipherstashRemoveSearchConfigCall); + expect(call.toOp().invariantId).toBe( + `cipherstash-codec:${TABLE}.${FIELD}:remove-search-config:match@v1`, + ); + }); + + it('CipherstashSearchIndex narrows to the two supported indices', () => { + const indices: readonly CipherstashSearchIndex[] = ['unique', 'match']; + expect(indices).toEqual(['unique', 'match']); + }); +}); diff --git a/packages/prisma-next/test/call-classes.types.test-d.ts b/packages/prisma-next/test/call-classes.types.test-d.ts new file mode 100644 index 00000000..af983cdc --- /dev/null +++ b/packages/prisma-next/test/call-classes.types.test-d.ts @@ -0,0 +1,50 @@ +/** + * Type-shape tests pinning `CipherstashSearchIndex` to the full EQL + * `add_search_config` index vocabulary used across every cipherstash + * codec (string, double, bigint, date, boolean, json). + * + * Negative cases use `@ts-expect-error` per `AGENTS.md § Typesafety + * rules` — the documented carve-out for negative type tests. + */ + +import { + type CipherstashSearchIndex, + cipherstashAddSearchConfig, + cipherstashRemoveSearchConfig, +} from '../src/migration/call-classes'; + +// --- Positive: every EQL index name is an inhabitant of the union. ----- + +const _unique: CipherstashSearchIndex = 'unique'; +const _match: CipherstashSearchIndex = 'match'; +const _ore: CipherstashSearchIndex = 'ore'; +const _steVec: CipherstashSearchIndex = 'ste_vec'; +void _unique; +void _match; +void _ore; +void _steVec; + +// The factory functions accept all four index names without per-codec +// changes — the widening is purely a type-union extension; the factory +// bodies already accept arbitrary `index: string` at runtime. +void cipherstashAddSearchConfig({ table: 't', column: 'c', index: 'unique' }); +void cipherstashAddSearchConfig({ table: 't', column: 'c', index: 'match' }); +void cipherstashAddSearchConfig({ table: 't', column: 'c', index: 'ore' }); +void cipherstashAddSearchConfig({ table: 't', column: 'c', index: 'ste_vec' }); + +void cipherstashRemoveSearchConfig({ table: 't', column: 'c', index: 'unique' }); +void cipherstashRemoveSearchConfig({ table: 't', column: 'c', index: 'match' }); +void cipherstashRemoveSearchConfig({ table: 't', column: 'c', index: 'ore' }); +void cipherstashRemoveSearchConfig({ table: 't', column: 'c', index: 'ste_vec' }); + +// --- Negative: an index name outside the EQL vocabulary is rejected. --- + +// @ts-expect-error — `'btree'` is not in the EQL search-config index +// vocabulary; the union exists precisely to catch typos at the +// authoring boundary. +const _bogus: CipherstashSearchIndex = 'btree'; +void _bogus; + +// @ts-expect-error — same negative case routed through the factory: +// no `index` value outside the union compiles. +void cipherstashAddSearchConfig({ table: 't', column: 'c', index: 'btree' }); diff --git a/packages/prisma-next/test/cipherstash-codec-numeric.test.ts b/packages/prisma-next/test/cipherstash-codec-numeric.test.ts new file mode 100644 index 00000000..05cea8b6 --- /dev/null +++ b/packages/prisma-next/test/cipherstash-codec-numeric.test.ts @@ -0,0 +1,133 @@ +/** + * Codec lifecycle hook tests for the numeric cipherstash codecs + * (`cipherstash/double@1`, `cipherstash/bigint@1`). + * + * Numeric codecs share the `{ equality, orderAndRange }` flag set; the + * only delta between them is the `cast_as` argument + * (`'double'` vs `'big_int'`). + * + * `invariantId` template (shared with the string codec): + * `cipherstash-codec:
.::@v1` + */ + +import type { SqlMigrationPlanOperation } from '@prisma-next/family-sql/control'; +import type { StorageColumn } from '@prisma-next/sql-contract/types'; +import { describe, expect, it } from 'vitest'; +import { + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, +} from '../src/extension-metadata/constants'; +import { + cipherstashBigIntCodecHooks, + cipherstashDoubleCodecHooks, +} from '../src/migration/cipherstash-codec'; + +const TABLE = 'User'; +const FIELD = 'email'; + +describe('cipherstashDoubleCodecHooks — flag → index mapping', () => { + function ctxNumeric(args: { + prior?: Partial | undefined; + next?: Partial | undefined; + codecId: string; + }): { + readonly tableName: string; + readonly fieldName: string; + readonly priorField?: StorageColumn; + readonly newField?: StorageColumn; + } { + const baseCol: StorageColumn = { + codecId: args.codecId, + nativeType: 'eql_v2_encrypted', + nullable: false, + }; + return { + tableName: TABLE, + fieldName: FIELD, + ...(args.prior !== undefined ? { priorField: { ...baseCol, ...args.prior } } : {}), + ...(args.next !== undefined ? { newField: { ...baseCol, ...args.next } } : {}), + }; + } + + const onFieldEvent = ( + event: 'added' | 'dropped' | 'altered', + args: { prior?: Partial; next?: Partial }, + ): readonly SqlMigrationPlanOperation[] => + cipherstashDoubleCodecHooks.onFieldEvent!( + event, + ctxNumeric({ ...args, codecId: CIPHERSTASH_DOUBLE_CODEC_ID }), + ).map((c) => c.toOp() as SqlMigrationPlanOperation); + + it("emits add_search_config(unique) with cast_as='double' when equality flips on", () => { + const ops = onFieldEvent('added', { next: { typeParams: { equality: true } } }); + expect(ops).toHaveLength(1); + expect(ops[0]!.invariantId).toBe( + `cipherstash-codec:${TABLE}.${FIELD}:add-search-config:unique@v1`, + ); + expect(ops[0]!.execute[0]!.sql).toContain(`'unique'`); + expect(ops[0]!.execute[0]!.sql).toContain(`'double'`); + }); + + it("emits add_search_config(ore) with cast_as='double' when orderAndRange flips on", () => { + const ops = onFieldEvent('added', { next: { typeParams: { orderAndRange: true } } }); + expect(ops).toHaveLength(1); + expect(ops[0]!.invariantId).toBe( + `cipherstash-codec:${TABLE}.${FIELD}:add-search-config:ore@v1`, + ); + expect(ops[0]!.execute[0]!.sql).toContain(`'ore'`); + expect(ops[0]!.execute[0]!.sql).toContain(`'double'`); + }); + + it('emits one op per enabled flag when both are true', () => { + const ops = onFieldEvent('added', { + next: { typeParams: { equality: true, orderAndRange: true } }, + }); + expect(ops).toHaveLength(2); + const ids = ops.map((o) => o.invariantId).sort(); + expect(ids).toEqual([ + `cipherstash-codec:${TABLE}.${FIELD}:add-search-config:ore@v1`, + `cipherstash-codec:${TABLE}.${FIELD}:add-search-config:unique@v1`, + ]); + }); + + it('emits remove ops on drop for previously-enabled flags', () => { + const ops = onFieldEvent('dropped', { + prior: { typeParams: { equality: true, orderAndRange: true } }, + }); + expect(ops).toHaveLength(2); + const ids = ops.map((o) => o.invariantId).sort(); + expect(ids).toEqual([ + `cipherstash-codec:${TABLE}.${FIELD}:remove-search-config:ore@v1`, + `cipherstash-codec:${TABLE}.${FIELD}:remove-search-config:unique@v1`, + ]); + }); + + it('emits no ops when freeTextSearch is set (the string-only flag is silently ignored)', () => { + // Numeric codecs do not register `freeTextSearch` in their + // `flagToIndex`, so a stale `freeTextSearch: true` slot in + // `typeParams` produces no ops. Authoring-time PSL/TS rejection + // catches the mistake earlier — see psl-interpretation.test.ts. + expect(onFieldEvent('added', { next: { typeParams: { freeTextSearch: true } } })).toEqual([]); + }); +}); + +describe('cipherstashBigIntCodecHooks — cast_as=big_int', () => { + it("emits add_search_config(unique) with cast_as='big_int' when equality flips on", () => { + const ctxArg = { + tableName: TABLE, + fieldName: FIELD, + newField: { + codecId: CIPHERSTASH_BIGINT_CODEC_ID, + nativeType: 'eql_v2_encrypted', + nullable: false, + typeParams: { equality: true }, + } as StorageColumn, + }; + const ops = cipherstashBigIntCodecHooks.onFieldEvent!('added', ctxArg).map( + (c) => c.toOp() as SqlMigrationPlanOperation, + ); + expect(ops).toHaveLength(1); + expect(ops[0]!.execute[0]!.sql).toContain(`'unique'`); + expect(ops[0]!.execute[0]!.sql).toContain(`'big_int'`); + }); +}); diff --git a/packages/prisma-next/test/cipherstash-codec-other-codecs.test.ts b/packages/prisma-next/test/cipherstash-codec-other-codecs.test.ts new file mode 100644 index 00000000..ca26d0c3 --- /dev/null +++ b/packages/prisma-next/test/cipherstash-codec-other-codecs.test.ts @@ -0,0 +1,132 @@ +/** + * Codec lifecycle hook tests for the date, boolean, and JSON + * cipherstash codecs. + * + * Each codec exposes a narrower flag set than the string codec: + * + * - `cipherstash/date@1` — `{ equality, orderAndRange }`, cast_as=date + * - `cipherstash/boolean@1` — `{ equality }` only, cast_as=boolean + * - `cipherstash/json@1` — `{ searchableJson }`, cast_as=jsonb + * + * `invariantId` template (shared with the string codec): + * `cipherstash-codec:
.::@v1` + */ + +import type { SqlMigrationPlanOperation } from '@prisma-next/family-sql/control'; +import type { StorageColumn } from '@prisma-next/sql-contract/types'; +import { describe, expect, it } from 'vitest'; +import { + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, +} from '../src/extension-metadata/constants'; +import { + cipherstashBooleanCodecHooks, + cipherstashDateCodecHooks, + cipherstashJsonCodecHooks, +} from '../src/migration/cipherstash-codec'; + +const TABLE = 'User'; +const FIELD = 'email'; + +describe('cipherstashDateCodecHooks — cast_as=date', () => { + it("emits add_search_config(unique) with cast_as='date' when equality flips on", () => { + const ctxArg = { + tableName: TABLE, + fieldName: FIELD, + newField: { + codecId: CIPHERSTASH_DATE_CODEC_ID, + nativeType: 'eql_v2_encrypted', + nullable: false, + typeParams: { equality: true, orderAndRange: true }, + } as StorageColumn, + }; + const ops = cipherstashDateCodecHooks.onFieldEvent!('added', ctxArg).map( + (c) => c.toOp() as SqlMigrationPlanOperation, + ); + expect(ops).toHaveLength(2); + const sqls = ops.map((o) => o.execute[0]!.sql); + expect(sqls.some((s) => s.includes(`'unique'`))).toBe(true); + expect(sqls.some((s) => s.includes(`'ore'`))).toBe(true); + for (const s of sqls) expect(s).toContain(`'date'`); + }); +}); + +describe('cipherstashBooleanCodecHooks — equality-only, cast_as=boolean', () => { + it('emits a single add_search_config(unique) with cast_as=boolean when equality flips on', () => { + const ctxArg = { + tableName: TABLE, + fieldName: FIELD, + newField: { + codecId: CIPHERSTASH_BOOLEAN_CODEC_ID, + nativeType: 'eql_v2_encrypted', + nullable: false, + typeParams: { equality: true }, + } as StorageColumn, + }; + const ops = cipherstashBooleanCodecHooks.onFieldEvent!('added', ctxArg).map( + (c) => c.toOp() as SqlMigrationPlanOperation, + ); + expect(ops).toHaveLength(1); + expect(ops[0]!.execute[0]!.sql).toContain(`'unique'`); + expect(ops[0]!.execute[0]!.sql).toContain(`'boolean'`); + }); + + it('does not emit ore ops — booleans have no orderAndRange flag', () => { + const ctxArg = { + tableName: TABLE, + fieldName: FIELD, + newField: { + codecId: CIPHERSTASH_BOOLEAN_CODEC_ID, + nativeType: 'eql_v2_encrypted', + nullable: false, + typeParams: { equality: true, orderAndRange: true }, + } as StorageColumn, + }; + const ops = cipherstashBooleanCodecHooks.onFieldEvent!('added', ctxArg).map( + (c) => c.toOp() as SqlMigrationPlanOperation, + ); + expect(ops).toHaveLength(1); + expect(ops[0]!.execute[0]!.sql).not.toContain(`'ore'`); + }); +}); + +describe('cipherstashJsonCodecHooks — searchableJson → ste_vec, cast_as=jsonb', () => { + it('emits add_search_config(ste_vec) with cast_as=jsonb when searchableJson flips on', () => { + const ctxArg = { + tableName: TABLE, + fieldName: FIELD, + newField: { + codecId: CIPHERSTASH_JSON_CODEC_ID, + nativeType: 'eql_v2_encrypted', + nullable: false, + typeParams: { searchableJson: true }, + } as StorageColumn, + }; + const ops = cipherstashJsonCodecHooks.onFieldEvent!('added', ctxArg).map( + (c) => c.toOp() as SqlMigrationPlanOperation, + ); + expect(ops).toHaveLength(1); + expect(ops[0]!.execute[0]!.sql).toContain(`'ste_vec'`); + expect(ops[0]!.execute[0]!.sql).toContain(`'jsonb'`); + }); + + it('emits remove_search_config(ste_vec) on drop when searchableJson was previously enabled', () => { + const ctxArg = { + tableName: TABLE, + fieldName: FIELD, + priorField: { + codecId: CIPHERSTASH_JSON_CODEC_ID, + nativeType: 'eql_v2_encrypted', + nullable: false, + typeParams: { searchableJson: true }, + } as StorageColumn, + }; + const ops = cipherstashJsonCodecHooks.onFieldEvent!('dropped', ctxArg).map( + (c) => c.toOp() as SqlMigrationPlanOperation, + ); + expect(ops).toHaveLength(1); + expect(ops[0]!.execute[0]!.sql).toContain('eql_v2.remove_search_config'); + expect(ops[0]!.execute[0]!.sql).toContain(`'ste_vec'`); + }); +}); diff --git a/packages/prisma-next/test/cipherstash-codec-string.test.ts b/packages/prisma-next/test/cipherstash-codec-string.test.ts new file mode 100644 index 00000000..9e45f0e2 --- /dev/null +++ b/packages/prisma-next/test/cipherstash-codec-string.test.ts @@ -0,0 +1,318 @@ +/** + * Codec lifecycle hook tests for `cipherstash:string@1`. + * + * Each enabled flag in the column's `typeParams` + * maps to its own EQL search-config index: + * + * - `equality: true` → `'unique'` index + * - `freeTextSearch: true` → `'match'` index + * - `orderAndRange: true` → `'ore'` index + * + * The codec hook emits **one `add_search_config@v1` op per enabled + * flag** — each op is independently invertible by + * a paired `remove_search_config@v1` op carrying the same index name, + * which keeps the op-graph simple and the diff per-flag granular. + * + * `'altered'` events decompose into per-flag adds and removes against + * the prior side: a flag flipped on emits an add op for that index, a + * flag flipped off emits a remove op. Flags whose enabled state did + * not change yield no op (the index already matches the desired + * configuration). + * + * `invariantId` template: + * `cipherstash-codec:
.::@v1` + * + * Stable across regenerations — every input is deterministic. + */ + +import type { SqlMigrationPlanOperation } from '@prisma-next/family-sql/control'; +import type { StorageColumn } from '@prisma-next/sql-contract/types'; +import { describe, expect, it } from 'vitest'; +import { CIPHERSTASH_STRING_CODEC_ID } from '../src/extension-metadata/constants'; +import { cipherstashStringCodecHooks } from '../src/migration/cipherstash-codec'; + +const TABLE = 'User'; +const FIELD = 'email'; + +function ctx(args: { + prior?: Partial | undefined; + next?: Partial | undefined; + tableName?: string; + fieldName?: string; +}): { + readonly tableName: string; + readonly fieldName: string; + readonly priorField?: StorageColumn; + readonly newField?: StorageColumn; +} { + const baseCol: StorageColumn = { + codecId: CIPHERSTASH_STRING_CODEC_ID, + nativeType: 'eql_v2_encrypted', + nullable: false, + }; + return { + tableName: args.tableName ?? TABLE, + fieldName: args.fieldName ?? FIELD, + ...(args.prior !== undefined ? { priorField: { ...baseCol, ...args.prior } } : {}), + ...(args.next !== undefined ? { newField: { ...baseCol, ...args.next } } : {}), + }; +} + +describe('cipherstashStringCodecHooks.onFieldEvent — flag → index mapping', () => { + // The hook returns `OpFactoryCall` instances (ADR 195). These tests + // verify the runtime op shape, so we lower each Call to its op via + // `.toOp()` once at the test boundary and assert against the + // resulting array. Render-side / class-side coverage lives in + // migration-call-classes.test.ts. + const onFieldEventCalls = cipherstashStringCodecHooks.onFieldEvent!; + const onFieldEvent: ( + ...args: Parameters + ) => readonly SqlMigrationPlanOperation[] = (...args) => + onFieldEventCalls(...args).map((c) => c.toOp() as SqlMigrationPlanOperation); + + describe("event 'added' — one add op per enabled flag", () => { + it('emits add_search_config(unique) when typeParams.equality is true', () => { + const ops = onFieldEvent('added', ctx({ next: { typeParams: { equality: true } } })); + expect(ops).toHaveLength(1); + expect(ops[0]!.invariantId).toBe( + `cipherstash-codec:${TABLE}.${FIELD}:add-search-config:unique@v1`, + ); + expect(ops[0]!.execute[0]!.sql).toContain('eql_v2.add_search_config'); + expect(ops[0]!.execute[0]!.sql).toContain(`'unique'`); + expect(ops[0]!.execute[0]!.sql).toContain(`'${TABLE}'`); + expect(ops[0]!.execute[0]!.sql).toContain(`'${FIELD}'`); + }); + + it('emits add_search_config(match) when typeParams.freeTextSearch is true', () => { + const ops = onFieldEvent('added', ctx({ next: { typeParams: { freeTextSearch: true } } })); + expect(ops).toHaveLength(1); + expect(ops[0]!.invariantId).toBe( + `cipherstash-codec:${TABLE}.${FIELD}:add-search-config:match@v1`, + ); + expect(ops[0]!.execute[0]!.sql).toContain(`'match'`); + }); + + it('emits add_search_config(ore) when typeParams.orderAndRange is true', () => { + const ops = onFieldEvent('added', ctx({ next: { typeParams: { orderAndRange: true } } })); + expect(ops).toHaveLength(1); + expect(ops[0]!.invariantId).toBe( + `cipherstash-codec:${TABLE}.${FIELD}:add-search-config:ore@v1`, + ); + expect(ops[0]!.execute[0]!.sql).toContain(`'ore'`); + }); + + it('emits one op per enabled flag when both flags are true', () => { + const ops = onFieldEvent( + 'added', + ctx({ next: { typeParams: { equality: true, freeTextSearch: true } } }), + ); + expect(ops).toHaveLength(2); + const invariantIds = ops.map((op) => op.invariantId).sort(); + expect(invariantIds).toEqual([ + `cipherstash-codec:${TABLE}.${FIELD}:add-search-config:match@v1`, + `cipherstash-codec:${TABLE}.${FIELD}:add-search-config:unique@v1`, + ]); + }); + + it('emits nothing when no flag is enabled', () => { + expect(onFieldEvent('added', ctx({ next: {} }))).toEqual([]); + expect(onFieldEvent('added', ctx({ next: { typeParams: {} } }))).toEqual([]); + expect( + onFieldEvent( + 'added', + ctx({ next: { typeParams: { equality: false, freeTextSearch: false } } }), + ), + ).toEqual([]); + }); + }); + + describe("event 'dropped' — one remove op per previously-enabled flag", () => { + it('emits remove_search_config(unique) when prior typeParams.equality was true', () => { + const ops = onFieldEvent('dropped', ctx({ prior: { typeParams: { equality: true } } })); + expect(ops).toHaveLength(1); + expect(ops[0]!.invariantId).toBe( + `cipherstash-codec:${TABLE}.${FIELD}:remove-search-config:unique@v1`, + ); + expect(ops[0]!.execute[0]!.sql).toContain('eql_v2.remove_search_config'); + expect(ops[0]!.execute[0]!.sql).toContain(`'unique'`); + }); + + it('emits remove_search_config(match) when prior typeParams.freeTextSearch was true', () => { + const ops = onFieldEvent('dropped', ctx({ prior: { typeParams: { freeTextSearch: true } } })); + expect(ops).toHaveLength(1); + expect(ops[0]!.invariantId).toBe( + `cipherstash-codec:${TABLE}.${FIELD}:remove-search-config:match@v1`, + ); + expect(ops[0]!.execute[0]!.sql).toContain(`'match'`); + }); + + it('emits one remove op per previously-enabled flag when both flags were true', () => { + const ops = onFieldEvent( + 'dropped', + ctx({ prior: { typeParams: { equality: true, freeTextSearch: true } } }), + ); + expect(ops).toHaveLength(2); + const invariantIds = ops.map((op) => op.invariantId).sort(); + expect(invariantIds).toEqual([ + `cipherstash-codec:${TABLE}.${FIELD}:remove-search-config:match@v1`, + `cipherstash-codec:${TABLE}.${FIELD}:remove-search-config:unique@v1`, + ]); + }); + + it('emits nothing when prior column had no flags enabled', () => { + expect(onFieldEvent('dropped', ctx({ prior: {} }))).toEqual([]); + expect(onFieldEvent('dropped', ctx({ prior: { typeParams: { equality: false } } }))).toEqual( + [], + ); + }); + }); + + describe("event 'altered' — per-flag delta against the prior side", () => { + it('emits an add op only for flags newly enabled', () => { + const ops = onFieldEvent( + 'altered', + ctx({ + prior: { typeParams: { equality: false, freeTextSearch: false } }, + next: { typeParams: { equality: true, freeTextSearch: false } }, + }), + ); + expect(ops).toHaveLength(1); + expect(ops[0]!.invariantId).toBe( + `cipherstash-codec:${TABLE}.${FIELD}:add-search-config:unique@v1`, + ); + }); + + it('emits a remove op only for flags newly disabled', () => { + const ops = onFieldEvent( + 'altered', + ctx({ + prior: { typeParams: { equality: true, freeTextSearch: false } }, + next: { typeParams: { equality: false, freeTextSearch: false } }, + }), + ); + expect(ops).toHaveLength(1); + expect(ops[0]!.invariantId).toBe( + `cipherstash-codec:${TABLE}.${FIELD}:remove-search-config:unique@v1`, + ); + }); + + it('emits an add and a remove op when one flag flips on while another flips off', () => { + const ops = onFieldEvent( + 'altered', + ctx({ + prior: { typeParams: { equality: true, freeTextSearch: false } }, + next: { typeParams: { equality: false, freeTextSearch: true } }, + }), + ); + expect(ops).toHaveLength(2); + const invariantIds = ops.map((op) => op.invariantId).sort(); + expect(invariantIds).toEqual([ + `cipherstash-codec:${TABLE}.${FIELD}:add-search-config:match@v1`, + `cipherstash-codec:${TABLE}.${FIELD}:remove-search-config:unique@v1`, + ]); + }); + + it('emits nothing when flags are unchanged', () => { + const same = { equality: true, freeTextSearch: true }; + expect( + onFieldEvent('altered', ctx({ prior: { typeParams: same }, next: { typeParams: same } })), + ).toEqual([]); + }); + + it('emits nothing when neither side has flags enabled', () => { + expect( + onFieldEvent( + 'altered', + ctx({ prior: { typeParams: {} }, next: { typeParams: { other: 1 } } }), + ), + ).toEqual([]); + }); + }); + + describe('operation labels (first-time-user-readable)', () => { + it('add op label is action-first / column-first and free of extension jargon', () => { + const [op] = onFieldEvent('added', ctx({ next: { typeParams: { equality: true } } })); + expect(op!.label).toBe(`Enable cipherstash search on ${TABLE}.${FIELD}`); + // Legacy wording must not reappear (regression bar). + expect(op!.label).not.toContain('Register cipherstash search config'); + }); + + it('remove op label is action-first / column-first', () => { + const [op] = onFieldEvent('dropped', ctx({ prior: { typeParams: { equality: true } } })); + expect(op!.label).toBe(`Disable cipherstash search on ${TABLE}.${FIELD}`); + expect(op!.label).not.toContain('Remove cipherstash search config'); + }); + + it('altered op labels stay action-first when adding an index alongside an existing one', () => { + // Codec emits per-flag deltas: flipping `freeTextSearch` on while + // `equality` stays on produces a single add op (the rotate UX is + // expressed as add+remove pairs across flag transitions). + const ops = onFieldEvent( + 'altered', + ctx({ + prior: { typeParams: { equality: true } }, + next: { typeParams: { equality: true, freeTextSearch: true } }, + }), + ); + expect(ops).toHaveLength(1); + expect(ops[0]!.label).toBe(`Enable cipherstash search on ${TABLE}.${FIELD}`); + expect(ops[0]!.label).not.toContain('Register cipherstash search config'); + }); + }); + + describe('invariantId + SQL conventions', () => { + it('namespaces every emitted op under cipherstash-codec:*', () => { + const allOps = [ + ...onFieldEvent( + 'added', + ctx({ next: { typeParams: { equality: true, freeTextSearch: true } } }), + ), + ...onFieldEvent( + 'dropped', + ctx({ prior: { typeParams: { equality: true, freeTextSearch: true } } }), + ), + ...onFieldEvent( + 'altered', + ctx({ + prior: { typeParams: { equality: false, freeTextSearch: true } }, + next: { typeParams: { equality: true, freeTextSearch: false } }, + }), + ), + ]; + expect(allOps.length).toBeGreaterThan(0); + for (const op of allOps) { + expect(op.invariantId).toMatch(/^cipherstash-codec:/); + } + }); + + it('escapes embedded apostrophes in table/field identifiers', () => { + const ops = onFieldEvent( + 'added', + ctx({ + tableName: "us'er", + fieldName: "em'ail", + next: { typeParams: { equality: true } }, + }), + ); + expect(ops[0]!.execute[0]!.sql).toContain("'us''er'"); + expect(ops[0]!.execute[0]!.sql).toContain("'em''ail'"); + }); + + it('classifies add ops as additive and remove ops as destructive', () => { + const adds = onFieldEvent( + 'added', + ctx({ next: { typeParams: { equality: true, freeTextSearch: true } } }), + ); + const removes = onFieldEvent( + 'dropped', + ctx({ prior: { typeParams: { equality: true, freeTextSearch: true } } }), + ); + for (const op of adds) { + expect(op.operationClass).toBe('additive'); + } + for (const op of removes) { + expect(op.operationClass).toBe('destructive'); + } + }); + }); +}); diff --git a/packages/prisma-next/test/cipherstash-codec.test.ts b/packages/prisma-next/test/cipherstash-codec.test.ts new file mode 100644 index 00000000..980793b2 --- /dev/null +++ b/packages/prisma-next/test/cipherstash-codec.test.ts @@ -0,0 +1,153 @@ +/** + * Wiring tests for the cipherstash extension's codec lifecycle hooks. + * + * Two layers are pinned here: + * + * 1. `cipherstash descriptor wiring` — every codec hook is reachable + * under `types.codecTypes.controlPlaneHooks` on the descriptor, + * and `extractCodecControlHooks` discovers all of them. + * 2. `planFieldEventOperations driving the cipherstash hook` — + * end-to-end through the planner: per-flag add/remove ops are + * inlined on contract diffs, and an unchanged contract yields no + * ops. + * + * Per-codec hook behaviour (flag → index mapping) lives in the + * sibling test files: + * + * - `cipherstash-codec-string.test.ts` + * - `cipherstash-codec-numeric.test.ts` + * - `cipherstash-codec-other-codecs.test.ts` + */ + +import type { Contract, StorageHashBase } from '@prisma-next/contract/types'; +import { profileHash } from '@prisma-next/contract/types'; +import { + extractCodecControlHooks, + planFieldEventOperations, +} from '@prisma-next/family-sql/control'; +import type { TargetBoundComponentDescriptor } from '@prisma-next/framework-components/components'; +import type { SqlStorage, StorageTable } from '@prisma-next/sql-contract/types'; +import { ifDefined } from '@prisma-next/utils/defined'; +import { describe, expect, it } from 'vitest'; +import cipherstashExtensionDescriptor from '../src/exports/control'; +import { + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, + CIPHERSTASH_STRING_CODEC_ID, +} from '../src/extension-metadata/constants'; +import { + cipherstashBigIntCodecHooks, + cipherstashBooleanCodecHooks, + cipherstashDateCodecHooks, + cipherstashDoubleCodecHooks, + cipherstashJsonCodecHooks, + cipherstashStringCodecHooks, +} from '../src/migration/cipherstash-codec'; + +describe('cipherstash descriptor wiring', () => { + it('exposes every codec hook under types.codecTypes.controlPlaneHooks', () => { + const hooks = ( + cipherstashExtensionDescriptor as { + types?: { codecTypes?: { controlPlaneHooks?: Record } }; + } + ).types?.codecTypes?.controlPlaneHooks; + expect(hooks?.[CIPHERSTASH_STRING_CODEC_ID]).toBe(cipherstashStringCodecHooks); + expect(hooks?.[CIPHERSTASH_DOUBLE_CODEC_ID]).toBe(cipherstashDoubleCodecHooks); + expect(hooks?.[CIPHERSTASH_BIGINT_CODEC_ID]).toBe(cipherstashBigIntCodecHooks); + expect(hooks?.[CIPHERSTASH_DATE_CODEC_ID]).toBe(cipherstashDateCodecHooks); + expect(hooks?.[CIPHERSTASH_BOOLEAN_CODEC_ID]).toBe(cipherstashBooleanCodecHooks); + expect(hooks?.[CIPHERSTASH_JSON_CODEC_ID]).toBe(cipherstashJsonCodecHooks); + }); + + it('extractCodecControlHooks finds every cipherstash hook on the descriptor', () => { + const map = extractCodecControlHooks([ + cipherstashExtensionDescriptor as unknown as TargetBoundComponentDescriptor<'sql', string>, + ]); + expect(map.get(CIPHERSTASH_STRING_CODEC_ID)).toBe(cipherstashStringCodecHooks); + expect(map.get(CIPHERSTASH_DOUBLE_CODEC_ID)).toBe(cipherstashDoubleCodecHooks); + expect(map.get(CIPHERSTASH_BIGINT_CODEC_ID)).toBe(cipherstashBigIntCodecHooks); + expect(map.get(CIPHERSTASH_DATE_CODEC_ID)).toBe(cipherstashDateCodecHooks); + expect(map.get(CIPHERSTASH_BOOLEAN_CODEC_ID)).toBe(cipherstashBooleanCodecHooks); + expect(map.get(CIPHERSTASH_JSON_CODEC_ID)).toBe(cipherstashJsonCodecHooks); + }); +}); + +describe('planFieldEventOperations driving the cipherstash hook', () => { + function userTable(typeParams?: Record): StorageTable { + return { + columns: { + id: { codecId: 'pg/text@1', nativeType: 'text', nullable: false }, + email: { + codecId: CIPHERSTASH_STRING_CODEC_ID, + nativeType: 'eql_v2_encrypted', + nullable: false, + ...ifDefined('typeParams', typeParams), + }, + }, + uniques: [], + indexes: [], + foreignKeys: [], + }; + } + + function build(tables: Record): Contract { + return { + target: 'postgres', + targetFamily: 'sql', + profileHash: profileHash('sha256:test'), + storage: { + storageHash: 'sha256:test' as StorageHashBase, + tables, + }, + models: {}, + roots: {}, + capabilities: {}, + extensionPacks: {}, + meta: {}, + }; + } + + const codecHooks = extractCodecControlHooks([ + cipherstashExtensionDescriptor as unknown as TargetBoundComponentDescriptor<'sql', string>, + ]); + + it('inlines per-flag add ops on first emit (priorContract null) when flags are enabled', () => { + const ops = planFieldEventOperations({ + priorContract: null, + newContract: build({ User: userTable({ equality: true, freeTextSearch: true }) }), + codecHooks, + }); + expect(ops).toHaveLength(2); + const ids = ops.map((c) => c.toOp().invariantId).sort(); + expect(ids).toEqual([ + 'cipherstash-codec:User.email:add-search-config:match@v1', + 'cipherstash-codec:User.email:add-search-config:unique@v1', + ]); + }); + + it('inlines per-flag remove ops when previously-flagged column is dropped', () => { + const prior = build({ User: userTable({ equality: true, freeTextSearch: true }) }); + const newer = build({ + User: { ...userTable(), columns: { id: userTable().columns['id']! } }, + }); + const ops = planFieldEventOperations({ + priorContract: prior, + newContract: newer, + codecHooks, + }); + expect(ops).toHaveLength(2); + const ids = ops.map((c) => c.toOp().invariantId).sort(); + expect(ids).toEqual([ + 'cipherstash-codec:User.email:remove-search-config:match@v1', + 'cipherstash-codec:User.email:remove-search-config:unique@v1', + ]); + }); + + it('emits nothing when contract is unchanged', () => { + const c = build({ User: userTable({ equality: true }) }); + expect(planFieldEventOperations({ priorContract: c, newContract: c, codecHooks })).toEqual([]); + }); +}); diff --git a/packages/prisma-next/test/codec-runtime.test.ts b/packages/prisma-next/test/codec-runtime.test.ts new file mode 100644 index 00000000..1ccee5d2 --- /dev/null +++ b/packages/prisma-next/test/codec-runtime.test.ts @@ -0,0 +1,496 @@ +/** + * Behavioural tests for the cipherstash storage codec runtime + the + * parameterized codec descriptor. + * + * The codec runtime is constructed via `codec({ ... })` from + * `@prisma-next/sql-relational-core/ast`. Author-side `encode`/`decode` + * are sync; the factory lifts them to Promise-returning at the boundary + * (same pattern pgvector follows). + */ + +import type { SqlCodecCallContext } from '@prisma-next/sql-relational-core/ast'; +import { describe, expect, it, vi } from 'vitest'; +import { + CIPHERSTASH_STRING_CODEC_ID, + createCipherstashBigIntCodec, + createCipherstashBooleanCodec, + createCipherstashDateCodec, + createCipherstashDoubleCodec, + createCipherstashJsonCodec, + createCipherstashStringCodec, +} from '../src/execution/codec-runtime'; +import { EncryptedBigInt } from '../src/execution/envelope-bigint'; +import { EncryptedBoolean } from '../src/execution/envelope-boolean'; +import { EncryptedDate } from '../src/execution/envelope-date'; +import { EncryptedDouble } from '../src/execution/envelope-double'; +import { EncryptedJson } from '../src/execution/envelope-json'; +import { EncryptedString, setHandleCiphertext } from '../src/execution/envelope-string'; +import { bulkEncryptMiddleware } from '../src/middleware/bulk-encrypt'; +import { + createParameterizedCodecDescriptors, + encryptedBigIntParamsSchema, + encryptedBooleanParamsSchema, + encryptedDateParamsSchema, + encryptedDoubleParamsSchema, + encryptedJsonParamsSchema, + encryptedStringParamsSchema, +} from '../src/execution/parameterized'; +import type { CipherstashSdk } from '../src/execution/sdk'; +import { + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, +} from '../src/extension-metadata/constants'; + +function emptySdk(): CipherstashSdk { + return { + decrypt: vi.fn(), + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; +} + +function ctxWithColumn(table: string, name: string): SqlCodecCallContext { + return { column: { table, name } }; +} + +const ctxWithoutColumn: SqlCodecCallContext = {}; + +describe('createCipherstashStringCodec — registration shape', () => { + it('uses cipherstash/string@1 as the codec id', () => { + const codec = createCipherstashStringCodec(emptySdk()); + expect(codec.id).toBe(CIPHERSTASH_STRING_CODEC_ID); + expect(CIPHERSTASH_STRING_CODEC_ID).toBe('cipherstash/string@1'); + }); + + it('targets the eql_v2_encrypted Postgres native type', () => { + const codec = createCipherstashStringCodec(emptySdk()); + expect(codec.descriptor.targetTypes).toEqual(['eql_v2_encrypted']); + const dbMeta = codec.descriptor.meta?.['db'] as + | { sql?: { postgres?: { nativeType?: string } } } + | undefined; + expect(dbMeta?.sql?.postgres?.nativeType).toBe('eql_v2_encrypted'); + }); + + it('declares cipherstash-namespaced traits but never the framework `equality` trait', () => { + // Regression test: cipherstash columns do NOT advertise the + // framework`s `equality` trait at the codec level — the + // framework`s built-in `eq` is gated on `equality` and lowers to + // standard SQL `=`, which is wrong for EQL ciphers (randomized + // nonces). Equality search on cipherstash columns is delivered + // exclusively via the cipherstash-namespaced `cipherstashEq` + // operator (see `src/execution/operators.ts`). + // + // The cipherstash-namespaced traits (`cipherstash:equality`, + // `cipherstash:order-and-range`, etc.) ARE expected — they are the + // dispatch keys for the cipherstash-namespaced operator surface + // (multi-codec dispatch via `self: { traits: [...] }` in the + // model accessor). They are isolated from framework built-ins by + // the `cipherstash:` prefix. + const codec = createCipherstashStringCodec(emptySdk()); + const traits: ReadonlyArray = codec.descriptor.traits ?? []; + expect(traits.includes('equality')).toBe(false); + expect(traits.includes('cipherstash:equality')).toBe(true); + expect(traits.includes('cipherstash:free-text-search')).toBe(true); + expect(traits.includes('cipherstash:order-and-range')).toBe(true); + }); +}); + +describe('codec.decode(wire, ctx)', () => { + it('constructs an envelope carrying the column identity from ctx.column', async () => { + const sdk = emptySdk(); + const codec = createCipherstashStringCodec(sdk); + const wire = `("${JSON.stringify({ c: 'cipher' }).replaceAll('"', '""')}")`; + const envelope = await codec.decode(wire, ctxWithColumn('user', 'email')); + expect(envelope).toBeInstanceOf(EncryptedString); + const handle = envelope.expose(); + expect(handle.table).toBe('user'); + expect(handle.column).toBe('email'); + expect(handle.sdk).toBe(sdk); + }); + + it('throws a RUNTIME.DECODE_FAILED envelope when the column routing context is absent', async () => { + const codec = createCipherstashStringCodec(emptySdk()); + const wire = `("${JSON.stringify({}).replaceAll('"', '""')}")`; + await expect(codec.decode(wire, ctxWithoutColumn)).rejects.toMatchObject({ + code: 'RUNTIME.DECODE_FAILED', + category: 'RUNTIME', + details: { + codecId: 'cipherstash/string@1', + reason: 'cipherstash-decode-column-context-missing', + }, + }); + }); +}); + +describe('codec.encode(envelope, ctx)', () => { + it('extracts the ciphertext from the envelope handle', async () => { + const codec = createCipherstashStringCodec(emptySdk()); + const envelope = EncryptedString.from('alice@example.com'); + const ciphertextPayload = { c: 'cipher', i: { t: 'user', c: 'email' } }; + setHandleCiphertext(envelope, ciphertextPayload); + const wire = await codec.encode(envelope, ctxWithoutColumn); + expect(typeof wire).toBe('string'); + expect(wire).toBe(`("${JSON.stringify(ciphertextPayload).replaceAll('"', '""')}")`); + }); + + it('returns the envelope unchanged when ciphertext is missing AND the bulk-encrypt middleware is registered for the sdk', async () => { + // Happy-path encode lifecycle: in the SQL runtime, + // `lower`/`encodeParams` runs before the `beforeExecute` + // middleware chain. Throwing here would block the bulk-encrypt + // middleware from ever running. The codec's contract is therefore + // "return *something* (the envelope itself is sufficient — it + // will be replaced via `params.replaceValues(...)`) when no + // ciphertext is set yet, **provided** the middleware has been + // wired up against the same sdk." + const sdk = emptySdk(); + bulkEncryptMiddleware(sdk); // marks `sdk` as registered + const codec = createCipherstashStringCodec(sdk); + const envelope = EncryptedString.from('alice@example.com'); + const result = await codec.encode(envelope, ctxWithoutColumn); + expect(result).toBe(envelope); + }); + + it('throws a clear RUNTIME.ENCODE_FAILED envelope when the bulk-encrypt middleware was never constructed against the sdk', async () => { + // Misconfig diagnostic: when the user forgets to construct + // `bulkEncryptMiddleware(sdk)`, the two-pass write path can never + // complete — there's no middleware to fill in the ciphertext. The + // codec surfaces the error at the codec boundary with a copy- + // pasteable wiring snippet, rather than letting the un-encrypted + // envelope reach the pg driver and produce an opaque serialise + // error. + const sdk = emptySdk(); // never passed to `bulkEncryptMiddleware` + const codec = createCipherstashStringCodec(sdk); + const envelope = EncryptedString.from('alice@example.com'); + await expect(codec.encode(envelope, ctxWithoutColumn)).rejects.toThrow( + /bulkEncryptMiddleware\(sdk\)/, + ); + }); +}); + +describe('codec.descriptor.renderOutputType', () => { + it('returns "EncryptedString"', () => { + const codec = createCipherstashStringCodec(emptySdk()); + expect(codec.descriptor.renderOutputType?.({})).toBe('EncryptedString'); + }); +}); + +describe('eql_v2_encrypted wire-format round-trip — wire-format fix', () => { + it('encode then decode preserves the ciphertext payload through composite text format', async () => { + const sdk = emptySdk(); + const codec = createCipherstashStringCodec(sdk); + const payload = { + c: 'mBbLh1eMyM/Iq/M=', + i: { t: 'user', c: 'email' }, + v: 2, + }; + const envelope = EncryptedString.from('alice@example.com'); + setHandleCiphertext(envelope, payload); + + const wire = await codec.encode(envelope, ctxWithoutColumn); + expect(typeof wire).toBe('string'); + const wireString = wire as string; + expect(wireString.startsWith('("')).toBe(true); + expect(wireString.endsWith('")')).toBe(true); + + const decoded = await codec.decode(wireString, ctxWithColumn('user', 'email')); + expect(decoded.expose().ciphertext).toEqual(payload); + }); + + it('decode accepts a pre-parsed { data: ... } row from the pg driver', async () => { + const sdk = emptySdk(); + const codec = createCipherstashStringCodec(sdk); + const payload = { c: 'cipher', i: { t: 'user', c: 'email' } }; + const decoded = await codec.decode( + { data: payload } as unknown as string, + ctxWithColumn('user', 'email'), + ); + expect(decoded.expose().ciphertext).toEqual(payload); + }); + + it('decode passes through null/undefined unchanged', async () => { + const codec = createCipherstashStringCodec(emptySdk()); + const decoded = await codec.decode(null as unknown as string, ctxWithColumn('user', 'email')); + expect(decoded.expose().ciphertext).toBeNull(); + }); + + it('encode then decode preserves embedded double quotes via the composite text-format escape', async () => { + const codec = createCipherstashStringCodec(emptySdk()); + const payload = { c: 'has "quotes" inside' }; + const envelope = EncryptedString.from('plain'); + setHandleCiphertext(envelope, payload); + const wire = await codec.encode(envelope, ctxWithoutColumn); + const wireString = wire as string; + expect(wireString.includes('""')).toBe(true); + const decoded = await codec.decode(wireString, ctxWithColumn('user', 'email')); + expect(decoded.expose().ciphertext).toEqual(payload); + }); +}); + +describe('createParameterizedCodecDescriptors', () => { + // Pins the full six-descriptor surface — string + double + + // bigint + date + boolean + json — in stable order. + it('exposes the cipherstash/{string,double,bigint,date,boolean,json}@1 descriptors in stable order', () => { + const descriptors = createParameterizedCodecDescriptors(emptySdk()); + expect(descriptors).toHaveLength(6); + expect(descriptors.map((d) => d.codecId)).toEqual([ + CIPHERSTASH_STRING_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, + ]); + for (const descriptor of descriptors) { + expect(descriptor.targetTypes).toEqual(['eql_v2_encrypted']); + // Per-codec `cipherstash:*` traits drive the multi-codec + // operator dispatch (see `extension-metadata/constants.ts`); the + // framework `'equality'` trait is intentionally absent across + // every cipherstash codec to keep the wrong-SQL `eq` footgun + // closed (see `equality-trait-removal.test.ts`). + const traits: ReadonlyArray = descriptor.traits ?? []; + expect(traits.includes('equality')).toBe(false); + expect(traits.length).toBeGreaterThan(0); + for (const trait of traits) { + expect(trait.startsWith('cipherstash:')).toBe(true); + } + } + }); + + it('renderOutputType returns the per-codec envelope class name', () => { + const [ + stringDescriptor, + doubleDescriptor, + bigIntDescriptor, + dateDescriptor, + booleanDescriptor, + jsonDescriptor, + ] = createParameterizedCodecDescriptors(emptySdk()); + expect( + stringDescriptor?.renderOutputType?.({ + equality: true, + freeTextSearch: true, + orderAndRange: true, + }), + ).toBe('EncryptedString'); + expect(doubleDescriptor?.renderOutputType?.({ equality: true, orderAndRange: true })).toBe( + 'EncryptedDouble', + ); + expect(bigIntDescriptor?.renderOutputType?.({ equality: true, orderAndRange: true })).toBe( + 'EncryptedBigInt', + ); + expect(dateDescriptor?.renderOutputType?.({ equality: true, orderAndRange: true })).toBe( + 'EncryptedDate', + ); + expect(booleanDescriptor?.renderOutputType?.({ equality: true })).toBe('EncryptedBoolean'); + expect(jsonDescriptor?.renderOutputType?.({ searchableJson: true })).toBe('EncryptedJson'); + }); + + it('paramsSchema accepts { equality, freeTextSearch, orderAndRange } booleans via Standard Schema', () => { + const result = encryptedStringParamsSchema['~standard'].validate({ + equality: true, + freeTextSearch: false, + orderAndRange: true, + }); + if (result instanceof Promise) throw new Error('expected synchronous validation'); + if (result.issues) + throw new Error(`expected success, got issues: ${JSON.stringify(result.issues)}`); + expect(result.value).toEqual({ + equality: true, + freeTextSearch: false, + orderAndRange: true, + }); + }); + + it('paramsSchema rejects non-boolean fields via Standard Schema', () => { + const result = encryptedStringParamsSchema['~standard'].validate({ + equality: 'yes', + freeTextSearch: false, + orderAndRange: true, + }); + if (result instanceof Promise) throw new Error('expected synchronous validation'); + expect(result.issues?.length).toBeGreaterThan(0); + }); + + it('factory(params)(ctx) yields the codec instance', () => { + const sdk = emptySdk(); + const [descriptor] = createParameterizedCodecDescriptors(sdk); + const codecForInstance = descriptor!.factory({ + equality: true, + freeTextSearch: false, + orderAndRange: true, + })({ + name: 'User.email', + }); + expect(codecForInstance.id).toBe(CIPHERSTASH_STRING_CODEC_ID); + }); + + it('numeric paramsSchemas accept { equality, orderAndRange } booleans via Standard Schema', () => { + for (const schema of [encryptedDoubleParamsSchema, encryptedBigIntParamsSchema]) { + const ok = schema['~standard'].validate({ equality: true, orderAndRange: false }); + if (ok instanceof Promise) throw new Error('expected synchronous validation'); + if (ok.issues) throw new Error(`expected success, got issues: ${JSON.stringify(ok.issues)}`); + expect(ok.value).toEqual({ equality: true, orderAndRange: false }); + + const bad = schema['~standard'].validate({ equality: 'yes', orderAndRange: true }); + if (bad instanceof Promise) throw new Error('expected synchronous validation'); + expect(bad.issues?.length).toBeGreaterThan(0); + } + }); +}); + +describe('createCipherstashDoubleCodec — registration shape', () => { + it('uses cipherstash/double@1 as the codec id and targets eql_v2_encrypted', () => { + const codec = createCipherstashDoubleCodec(emptySdk()); + expect(codec.id).toBe(CIPHERSTASH_DOUBLE_CODEC_ID); + expect(codec.descriptor.targetTypes).toEqual(['eql_v2_encrypted']); + expect(codec.descriptor.traits).toEqual([ + 'cipherstash:equality', + 'cipherstash:order-and-range', + ]); + expect(codec.descriptor.renderOutputType?.({})).toBe('EncryptedDouble'); + }); + + it('encode → decode round-trip preserves the ciphertext through the composite text format', async () => { + const sdk = emptySdk(); + const codec = createCipherstashDoubleCodec(sdk); + const payload = { c: 'numeric-cipher', i: { t: 'metric', c: 'value' }, v: 2 }; + const envelope = EncryptedDouble.from(3.14); + // The base's `setHandleCiphertext` helper accepts any envelope + // subclass; we re-use the string export as it's the same generic + // helper. (envelope.ts re-exports it; the function itself lives + // in envelope-base.ts and is generic over `T`.) + setHandleCiphertext(envelope, payload); + + const wire = await codec.encode(envelope, ctxWithoutColumn); + const decoded = await codec.decode(wire as string, ctxWithColumn('metric', 'value')); + expect(decoded).toBeInstanceOf(EncryptedDouble); + expect(decoded.expose().ciphertext).toEqual(payload); + }); +}); + +describe('createCipherstashBigIntCodec — registration shape', () => { + it('uses cipherstash/bigint@1 as the codec id and targets eql_v2_encrypted', () => { + const codec = createCipherstashBigIntCodec(emptySdk()); + expect(codec.id).toBe(CIPHERSTASH_BIGINT_CODEC_ID); + expect(codec.descriptor.targetTypes).toEqual(['eql_v2_encrypted']); + expect(codec.descriptor.traits).toEqual([ + 'cipherstash:equality', + 'cipherstash:order-and-range', + ]); + expect(codec.descriptor.renderOutputType?.({})).toBe('EncryptedBigInt'); + }); + + it('encode → decode round-trip preserves the ciphertext', async () => { + const sdk = emptySdk(); + const codec = createCipherstashBigIntCodec(sdk); + const payload = { c: 'bigint-cipher', i: { t: 'ledger', c: 'amount' } }; + const envelope = EncryptedBigInt.from(42n); + setHandleCiphertext(envelope, payload); + const wire = await codec.encode(envelope, ctxWithoutColumn); + const decoded = await codec.decode(wire as string, ctxWithColumn('ledger', 'amount')); + expect(decoded).toBeInstanceOf(EncryptedBigInt); + expect(decoded.expose().ciphertext).toEqual(payload); + }); +}); + +describe('createCipherstashDateCodec — registration shape + round-trip', () => { + it('uses cipherstash/date@1 as the codec id and targets eql_v2_encrypted', () => { + const codec = createCipherstashDateCodec(emptySdk()); + expect(codec.id).toBe(CIPHERSTASH_DATE_CODEC_ID); + expect(codec.descriptor.targetTypes).toEqual(['eql_v2_encrypted']); + expect(codec.descriptor.traits).toEqual([ + 'cipherstash:equality', + 'cipherstash:order-and-range', + ]); + expect(codec.descriptor.renderOutputType?.({})).toBe('EncryptedDate'); + }); + + it('encode → decode round-trip preserves the ciphertext', async () => { + const sdk = emptySdk(); + const codec = createCipherstashDateCodec(sdk); + const payload = { c: 'date-cipher', i: { t: 'event', c: 'occurred_on' } }; + const envelope = EncryptedDate.from(new Date('2024-01-01')); + setHandleCiphertext(envelope, payload); + const wire = await codec.encode(envelope, ctxWithoutColumn); + const decoded = await codec.decode(wire as string, ctxWithColumn('event', 'occurred_on')); + expect(decoded).toBeInstanceOf(EncryptedDate); + expect(decoded.expose().ciphertext).toEqual(payload); + }); +}); + +describe('createCipherstashBooleanCodec — registration shape + round-trip', () => { + it('uses cipherstash/boolean@1 as the codec id and targets eql_v2_encrypted', () => { + const codec = createCipherstashBooleanCodec(emptySdk()); + expect(codec.id).toBe(CIPHERSTASH_BOOLEAN_CODEC_ID); + expect(codec.descriptor.targetTypes).toEqual(['eql_v2_encrypted']); + expect(codec.descriptor.traits).toEqual(['cipherstash:equality']); + expect(codec.descriptor.renderOutputType?.({})).toBe('EncryptedBoolean'); + }); + + it('encode → decode round-trip preserves the ciphertext', async () => { + const sdk = emptySdk(); + const codec = createCipherstashBooleanCodec(sdk); + const payload = { c: 'bool-cipher', i: { t: 'feature', c: 'enabled' } }; + const envelope = EncryptedBoolean.from(true); + setHandleCiphertext(envelope, payload); + const wire = await codec.encode(envelope, ctxWithoutColumn); + const decoded = await codec.decode(wire as string, ctxWithColumn('feature', 'enabled')); + expect(decoded).toBeInstanceOf(EncryptedBoolean); + expect(decoded.expose().ciphertext).toEqual(payload); + }); +}); + +describe('createCipherstashJsonCodec — registration shape + round-trip', () => { + it('uses cipherstash/json@1 as the codec id and targets eql_v2_encrypted', () => { + const codec = createCipherstashJsonCodec(emptySdk()); + expect(codec.id).toBe(CIPHERSTASH_JSON_CODEC_ID); + expect(codec.descriptor.targetTypes).toEqual(['eql_v2_encrypted']); + expect(codec.descriptor.traits).toEqual(['cipherstash:searchable-json']); + expect(codec.descriptor.renderOutputType?.({})).toBe('EncryptedJson'); + }); + + it('encode → decode round-trip preserves the ciphertext for arbitrary JSON', async () => { + const sdk = emptySdk(); + const codec = createCipherstashJsonCodec(sdk); + const payload = { c: 'json-cipher', i: { t: 'audit', c: 'payload' } }; + const envelope = EncryptedJson.from({ event: 'login', userId: 42 }); + setHandleCiphertext(envelope, payload); + const wire = await codec.encode(envelope, ctxWithoutColumn); + const decoded = await codec.decode(wire as string, ctxWithColumn('audit', 'payload')); + expect(decoded).toBeInstanceOf(EncryptedJson); + expect(decoded.expose().ciphertext).toEqual(payload); + }); +}); + +describe('paramsSchemas for date / boolean / json', () => { + it('encryptedDateParamsSchema accepts { equality, orderAndRange } booleans', () => { + const ok = encryptedDateParamsSchema['~standard'].validate({ + equality: true, + orderAndRange: false, + }); + if (ok instanceof Promise) throw new Error('expected synchronous validation'); + if (ok.issues) throw new Error(`expected success, got: ${JSON.stringify(ok.issues)}`); + expect(ok.value).toEqual({ equality: true, orderAndRange: false }); + }); + + it('encryptedBooleanParamsSchema accepts { equality } and rejects extras of wrong type', () => { + const ok = encryptedBooleanParamsSchema['~standard'].validate({ equality: true }); + if (ok instanceof Promise) throw new Error('expected synchronous validation'); + if (ok.issues) throw new Error(`expected success, got: ${JSON.stringify(ok.issues)}`); + expect(ok.value).toEqual({ equality: true }); + + const bad = encryptedBooleanParamsSchema['~standard'].validate({ equality: 'yes' }); + if (bad instanceof Promise) throw new Error('expected synchronous validation'); + expect(bad.issues?.length).toBeGreaterThan(0); + }); + + it('encryptedJsonParamsSchema accepts { searchableJson } booleans', () => { + const ok = encryptedJsonParamsSchema['~standard'].validate({ searchableJson: false }); + if (ok instanceof Promise) throw new Error('expected synchronous validation'); + if (ok.issues) throw new Error(`expected success, got: ${JSON.stringify(ok.issues)}`); + expect(ok.value).toEqual({ searchableJson: false }); + }); +}); diff --git a/packages/prisma-next/test/column-types.test.ts b/packages/prisma-next/test/column-types.test.ts new file mode 100644 index 00000000..2ccdea5c --- /dev/null +++ b/packages/prisma-next/test/column-types.test.ts @@ -0,0 +1,228 @@ +/** + * TS contract factory for cipherstash-encrypted string columns. + * + * The factory must produce a `ColumnTypeDescriptor` byte-identical to + * the lowering output of the PSL constructor `cipherstash.EncryptedString` + * registered in `src/contract-authoring.ts`. The full byte-equality is verified + * by the integration parity fixture; these unit tests pin the shape + * locally so a regression is caught in the package suite first. + */ + +import { describe, expect, it } from 'vitest'; +import { + encryptedBigInt, + encryptedBoolean, + encryptedDate, + encryptedDouble, + encryptedJson, + encryptedString, +} from '../src/exports/column-types'; + +describe('cipherstash column-types', () => { + describe('encryptedString({...}) factory', () => { + it('produces a ColumnTypeDescriptor with cipherstash/string@1 codec id', () => { + const descriptor = encryptedString(); + expect(descriptor).toMatchObject({ + codecId: 'cipherstash/string@1', + nativeType: 'eql_v2_encrypted', + }); + }); + + it('defaults all flags to true when called with no arguments', () => { + expect(encryptedString()).toEqual({ + codecId: 'cipherstash/string@1', + nativeType: 'eql_v2_encrypted', + typeParams: { equality: true, freeTextSearch: true, orderAndRange: true }, + }); + }); + + it('defaults all flags to true for an empty options object', () => { + expect(encryptedString({})).toEqual({ + codecId: 'cipherstash/string@1', + nativeType: 'eql_v2_encrypted', + typeParams: { equality: true, freeTextSearch: true, orderAndRange: true }, + }); + }); + + it('lets equality be explicitly disabled', () => { + expect(encryptedString({ equality: false })).toMatchObject({ + typeParams: { equality: false, freeTextSearch: true, orderAndRange: true }, + }); + }); + + it('lets freeTextSearch be explicitly disabled', () => { + expect(encryptedString({ freeTextSearch: false })).toMatchObject({ + typeParams: { equality: true, freeTextSearch: false, orderAndRange: true }, + }); + }); + + it('lets orderAndRange be explicitly disabled', () => { + expect(encryptedString({ orderAndRange: false })).toMatchObject({ + typeParams: { equality: true, freeTextSearch: true, orderAndRange: false }, + }); + }); + + it('lets all flags be explicitly disabled (storage-only encryption)', () => { + expect( + encryptedString({ equality: false, freeTextSearch: false, orderAndRange: false }), + ).toMatchObject({ + typeParams: { equality: false, freeTextSearch: false, orderAndRange: false }, + }); + }); + + it('preserves all flags when explicitly enabled', () => { + expect( + encryptedString({ equality: true, freeTextSearch: true, orderAndRange: true }), + ).toMatchObject({ + typeParams: { equality: true, freeTextSearch: true, orderAndRange: true }, + }); + }); + + it('returns a structurally equivalent descriptor to the PSL constructor lowering', () => { + expect( + encryptedString({ equality: true, freeTextSearch: true, orderAndRange: true }), + ).toEqual({ + codecId: 'cipherstash/string@1', + nativeType: 'eql_v2_encrypted', + typeParams: { equality: true, freeTextSearch: true, orderAndRange: true }, + }); + }); + }); + + describe('encryptedDouble({...}) factory', () => { + it('produces a ColumnTypeDescriptor with cipherstash/double@1 codec id', () => { + expect(encryptedDouble()).toMatchObject({ + codecId: 'cipherstash/double@1', + nativeType: 'eql_v2_encrypted', + }); + }); + + it('defaults both flags to true when called with no arguments', () => { + expect(encryptedDouble()).toMatchObject({ + typeParams: { equality: true, orderAndRange: true }, + }); + }); + + it('defaults both flags to true for an empty options object', () => { + expect(encryptedDouble({})).toMatchObject({ + typeParams: { equality: true, orderAndRange: true }, + }); + }); + + it('lets equality be explicitly disabled', () => { + expect(encryptedDouble({ equality: false })).toMatchObject({ + typeParams: { equality: false, orderAndRange: true }, + }); + }); + + it('lets orderAndRange be explicitly disabled', () => { + expect(encryptedDouble({ orderAndRange: false })).toMatchObject({ + typeParams: { equality: true, orderAndRange: false }, + }); + }); + + it('lets both flags be explicitly disabled (storage-only encryption)', () => { + expect(encryptedDouble({ equality: false, orderAndRange: false })).toEqual({ + codecId: 'cipherstash/double@1', + nativeType: 'eql_v2_encrypted', + typeParams: { equality: false, orderAndRange: false }, + }); + }); + }); + + describe('encryptedBigInt({...}) factory', () => { + it('produces a ColumnTypeDescriptor with cipherstash/bigint@1 codec id', () => { + expect(encryptedBigInt()).toMatchObject({ + codecId: 'cipherstash/bigint@1', + nativeType: 'eql_v2_encrypted', + }); + }); + + it('defaults both flags to true when called with no arguments', () => { + expect(encryptedBigInt()).toMatchObject({ + typeParams: { equality: true, orderAndRange: true }, + }); + }); + + it('lets both flags be explicitly disabled (storage-only encryption)', () => { + expect(encryptedBigInt({ equality: false, orderAndRange: false })).toEqual({ + codecId: 'cipherstash/bigint@1', + nativeType: 'eql_v2_encrypted', + typeParams: { equality: false, orderAndRange: false }, + }); + }); + }); + + describe('encryptedDate({...}) factory', () => { + it('produces a ColumnTypeDescriptor with cipherstash/date@1 codec id', () => { + expect(encryptedDate()).toMatchObject({ + codecId: 'cipherstash/date@1', + nativeType: 'eql_v2_encrypted', + }); + }); + + it('defaults both flags to true when called with no arguments', () => { + expect(encryptedDate()).toMatchObject({ + typeParams: { equality: true, orderAndRange: true }, + }); + }); + + it('lets both flags be explicitly disabled', () => { + expect(encryptedDate({ equality: false, orderAndRange: false })).toEqual({ + codecId: 'cipherstash/date@1', + nativeType: 'eql_v2_encrypted', + typeParams: { equality: false, orderAndRange: false }, + }); + }); + }); + + describe('encryptedBoolean({...}) factory', () => { + it('produces a ColumnTypeDescriptor with cipherstash/boolean@1 codec id', () => { + expect(encryptedBoolean()).toMatchObject({ + codecId: 'cipherstash/boolean@1', + nativeType: 'eql_v2_encrypted', + }); + }); + + it('defaults equality to true when called with no arguments', () => { + expect(encryptedBoolean()).toEqual({ + codecId: 'cipherstash/boolean@1', + nativeType: 'eql_v2_encrypted', + typeParams: { equality: true }, + }); + }); + + it('lets equality be explicitly disabled', () => { + expect(encryptedBoolean({ equality: false })).toEqual({ + codecId: 'cipherstash/boolean@1', + nativeType: 'eql_v2_encrypted', + typeParams: { equality: false }, + }); + }); + }); + + describe('encryptedJson({...}) factory', () => { + it('produces a ColumnTypeDescriptor with cipherstash/json@1 codec id', () => { + expect(encryptedJson()).toMatchObject({ + codecId: 'cipherstash/json@1', + nativeType: 'eql_v2_encrypted', + }); + }); + + it('defaults searchableJson to true when called with no arguments', () => { + expect(encryptedJson()).toEqual({ + codecId: 'cipherstash/json@1', + nativeType: 'eql_v2_encrypted', + typeParams: { searchableJson: true }, + }); + }); + + it('lets searchableJson be explicitly disabled (storage-only encryption)', () => { + expect(encryptedJson({ searchableJson: false })).toEqual({ + codecId: 'cipherstash/json@1', + nativeType: 'eql_v2_encrypted', + typeParams: { searchableJson: false }, + }); + }); + }); +}); diff --git a/packages/prisma-next/test/decrypt-all.test.ts b/packages/prisma-next/test/decrypt-all.test.ts new file mode 100644 index 00000000..5ffa2fe7 --- /dev/null +++ b/packages/prisma-next/test/decrypt-all.test.ts @@ -0,0 +1,564 @@ +/** + * `decryptAll` — read-side bulk-decrypt walker. + * + * Pinned behaviour: + * + * - Walks recursively (objects, arrays, nested envelopes) and + * decrypts every `EncryptedString` it finds. + * - K envelopes across distinct routing keys ⇒ exactly one + * `bulkDecrypt` per routing-key group. + * - After return, every touched envelope`s `decrypt()` returns the + * cached plaintext synchronously without consulting the SDK. + * - `opts.signal` forwarded by identity to the SDK on every + * `bulkDecrypt` call — matching the bulk-encrypt middleware and + * single-cell `decrypt` patterns. + * + * The tests use an in-memory `CounterSdk` mirroring the storage + * round-trip e2e`s mock SDK — `bulkDecrypt({ ciphertexts })` reads the + * synthetic `{ c: 'ct:' }` ciphertexts and returns the + * stripped plaintexts. A counter on each SDK method backs the + * "exactly one bulkDecrypt per routing-key group" assertion. + */ + +import { describe, expect, it, vi } from 'vitest'; +import { decryptAll } from '../src/execution/decrypt-all'; +import { EncryptedBigInt } from '../src/execution/envelope-bigint'; +import { EncryptedBoolean } from '../src/execution/envelope-boolean'; +import { EncryptedDate } from '../src/execution/envelope-date'; +import { EncryptedDouble } from '../src/execution/envelope-double'; +import { EncryptedJson } from '../src/execution/envelope-json'; +import { + EncryptedString, + type EncryptedStringFromInternalArgs, + isHandleDecrypted, +} from '../src/execution/envelope-string'; +import type { + CipherstashBulkDecryptArgs, + CipherstashBulkEncryptArgs, + CipherstashSdk, + CipherstashSingleDecryptArgs, +} from '../src/execution/sdk'; + +interface CounterSdk extends CipherstashSdk { + readonly bulkDecryptCalls: CipherstashBulkDecryptArgs[]; + readonly bulkEncryptCalls: CipherstashBulkEncryptArgs[]; + readonly singleDecryptCalls: CipherstashSingleDecryptArgs[]; +} + +function makeCounterSdk(): CounterSdk { + const bulkDecryptCalls: CipherstashBulkDecryptArgs[] = []; + const bulkEncryptCalls: CipherstashBulkEncryptArgs[] = []; + const singleDecryptCalls: CipherstashSingleDecryptArgs[] = []; + return { + bulkDecryptCalls, + bulkEncryptCalls, + singleDecryptCalls, + decrypt(args) { + singleDecryptCalls.push(args); + const ct = args.ciphertext as { c?: string } | null; + if (!ct || typeof ct.c !== 'string' || !ct.c.startsWith('ct:')) { + throw new Error(`mock SDK: cannot decrypt: ${JSON.stringify(args.ciphertext)}`); + } + return Promise.resolve(ct.c.slice('ct:'.length)); + }, + bulkEncrypt(args) { + bulkEncryptCalls.push(args); + return Promise.resolve( + args.values.map((plaintext) => ({ + c: `ct:${plaintext}`, + t: args.routingKey.table, + col: args.routingKey.column, + })), + ); + }, + bulkDecrypt(args) { + bulkDecryptCalls.push(args); + return Promise.resolve( + args.ciphertexts.map((ciphertext) => { + const ct = ciphertext as { c?: string } | null; + if (!ct || typeof ct.c !== 'string' || !ct.c.startsWith('ct:')) { + throw new Error(`mock SDK: cannot bulk-decrypt: ${JSON.stringify(ciphertext)}`); + } + return ct.c.slice('ct:'.length); + }), + ); + }, + }; +} + +interface MakeReadEnvelopeArgs { + readonly plaintext: string; + readonly table: string; + readonly column: string; + readonly sdk: CipherstashSdk; +} + +/** + * Build a read-side envelope mirroring the codec.decode body's call + * site: the wire ciphertext is the synthetic `{ c: 'ct:<plaintext>' }` + * payload, and the handle carries (table, column) routing context plus + * the SDK reference so subsequent `decrypt()` calls (or `bulkDecrypt` + * via `decryptAll`) can resolve the plaintext. + */ +function makeReadEnvelope(args: MakeReadEnvelopeArgs): EncryptedString { + const fromInternalArgs: EncryptedStringFromInternalArgs = { + ciphertext: { c: `ct:${args.plaintext}`, t: args.table, col: args.column }, + table: args.table, + column: args.column, + sdk: args.sdk, + }; + return EncryptedString.fromInternal(fromInternalArgs); +} + +describe('decryptAll — walks recursively and decrypts every envelope', () => { + it('decrypts a single envelope inside a flat row', async () => { + const sdk = makeCounterSdk(); + const envelope = makeReadEnvelope({ + plaintext: 'alice@example.com', + table: 'User', + column: 'email', + sdk, + }); + const rows = [{ id: 'u-1', email: envelope }]; + + await decryptAll(rows); + + expect(sdk.bulkDecryptCalls).toHaveLength(1); + expect(isHandleDecrypted(envelope)).toBe(true); + }); + + it('walks arrays of rows, plain object trees, and nested arrays', async () => { + const sdk = makeCounterSdk(); + const envelopes = ['a', 'b', 'c', 'd'].map((p) => + makeReadEnvelope({ plaintext: p, table: 'User', column: 'email', sdk }), + ); + const rows = [ + { id: 'u-1', email: envelopes[0], profile: { contactEmail: envelopes[1] } }, + { id: 'u-2', email: envelopes[2], aliases: [{ email: envelopes[3] }] }, + ]; + + await decryptAll(rows); + + expect(sdk.bulkDecryptCalls).toHaveLength(1); + expect(sdk.bulkDecryptCalls[0]?.ciphertexts).toHaveLength(4); + for (const e of envelopes) { + expect(isHandleDecrypted(e)).toBe(true); + } + }); + + it('skips envelopes whose plaintext is already cached (write-side or prior decrypt)', async () => { + const sdk = makeCounterSdk(); + const writeSide = EncryptedString.from('cached'); + const readSide = makeReadEnvelope({ + plaintext: 'fresh', + table: 'User', + column: 'email', + sdk, + }); + + await decryptAll([{ a: writeSide, b: readSide }]); + + expect(sdk.bulkDecryptCalls).toHaveLength(1); + expect(sdk.bulkDecryptCalls[0]?.ciphertexts).toHaveLength(1); + expect(await readSide.decrypt()).toBe('fresh'); + expect(await writeSide.decrypt()).toBe('cached'); + expect(sdk.singleDecryptCalls).toHaveLength(0); + }); + + it('returns immediately (no SDK call) when no envelopes are reachable', async () => { + const sdk = makeCounterSdk(); + await decryptAll({ id: 'u-1', email: null, profile: { contactEmail: undefined } }); + await decryptAll([]); + await decryptAll(null); + await decryptAll(undefined); + await decryptAll('not a row'); + + expect(sdk.bulkDecryptCalls).toHaveLength(0); + }); + + it('does not recurse into Date / Map / Set / typed-array containers', async () => { + // Walker is scoped to plain objects + plain arrays so that exotic + // host objects (Date, Map, Set, typed arrays, ArrayBuffer-views, + // Buffers, Errors with cyclic causes, etc.) cannot trip the walker + // or its cycle-detection. Cipherstash envelopes are user data — + // they would not normally be embedded inside these containers; if + // a future caller needs to bulk-decrypt envelopes inside a Map, + // they extract them into a plain row first. + const sdk = makeCounterSdk(); + const envelope = makeReadEnvelope({ + plaintext: 'alice', + table: 'User', + column: 'email', + sdk, + }); + const map = new Map<string, EncryptedString>([['email', envelope]]); + const set = new Set<EncryptedString>([envelope]); + const date = new Date(0); + const typedArray = new Uint8Array([0, 1, 2]); + + await decryptAll({ map, set, date, typedArray }); + + expect(sdk.bulkDecryptCalls).toHaveLength(0); + }); + + it('cycle-safe: does not loop on self-referential row trees', async () => { + const sdk = makeCounterSdk(); + const envelope = makeReadEnvelope({ + plaintext: 'alice', + table: 'User', + column: 'email', + sdk, + }); + const node: { email: EncryptedString; self?: unknown } = { email: envelope }; + node.self = node; + const rows = [node, node]; + + await decryptAll(rows); + + expect(sdk.bulkDecryptCalls).toHaveLength(1); + expect(sdk.bulkDecryptCalls[0]?.ciphertexts).toHaveLength(1); + expect(isHandleDecrypted(envelope)).toBe(true); + }); +}); + +describe('decryptAll — one bulkDecrypt per routing-key group', () => { + it('groups envelopes by (table, column) and issues one bulkDecrypt per group', async () => { + const sdk = makeCounterSdk(); + const usersEmails = ['a', 'b', 'c'].map((p) => + makeReadEnvelope({ plaintext: p, table: 'User', column: 'email', sdk }), + ); + const userNotes = ['n1', 'n2'].map((p) => + makeReadEnvelope({ plaintext: p, table: 'User', column: 'notes', sdk }), + ); + const orderShippingNotes = ['s1'].map((p) => + makeReadEnvelope({ plaintext: p, table: 'Order', column: 'shippingNotes', sdk }), + ); + const rows = [ + ...usersEmails.map((email, i) => ({ id: `u-${i}`, email })), + ...userNotes.map((notes, i) => ({ id: `un-${i}`, notes })), + ...orderShippingNotes.map((notes, i) => ({ id: `o-${i}`, shippingNotes: notes })), + ]; + + await decryptAll(rows); + + expect(sdk.bulkDecryptCalls).toHaveLength(3); + const callsByGroup = new Map( + sdk.bulkDecryptCalls.map( + (c) => [`${c.routingKey.table}\u0000${c.routingKey.column}`, c] as const, + ), + ); + expect(callsByGroup.get('User\u0000email')?.ciphertexts).toHaveLength(3); + expect(callsByGroup.get('User\u0000notes')?.ciphertexts).toHaveLength(2); + expect(callsByGroup.get('Order\u0000shippingNotes')?.ciphertexts).toHaveLength(1); + }); + + it('preserves observation order within each group', async () => { + const sdk = makeCounterSdk(); + const envelopes = ['x', 'y', 'z'].map((p) => + makeReadEnvelope({ plaintext: p, table: 'User', column: 'email', sdk }), + ); + + await decryptAll(envelopes); + + expect(sdk.bulkDecryptCalls).toHaveLength(1); + const call = sdk.bulkDecryptCalls[0]; + expect(call?.ciphertexts).toHaveLength(3); + // Order is the walker's discovery order — for a flat array this + // is the array's own order; the assertion pins that the bulk + // decrypt's `ciphertexts` slot lines up with the envelopes the + // walker visits in sequence. + expect((call?.ciphertexts[0] as { c: string }).c).toBe('ct:x'); + expect((call?.ciphertexts[1] as { c: string }).c).toBe('ct:y'); + expect((call?.ciphertexts[2] as { c: string }).c).toBe('ct:z'); + }); + + it('groups by (sdk, routing key) so multi-tenant SDKs stay isolated', async () => { + // Per `runtime.ts`'s docblock: "The descriptor is per-SDK ... + // Multi-tenant deployments construct one descriptor per tenant SDK + // so per-tenant key material never crosses runtimes." `decryptAll` + // honors the same boundary: an envelope's handle carries its own + // SDK reference (set by the codec.decode site), and grouping splits + // by SDK identity in addition to routing key so a tenant's + // ciphertexts never reach another tenant's bulkDecrypt. + const tenantA = makeCounterSdk(); + const tenantB = makeCounterSdk(); + const aEnv = makeReadEnvelope({ + plaintext: 'alice', + table: 'User', + column: 'email', + sdk: tenantA, + }); + const bEnv = makeReadEnvelope({ + plaintext: 'bob', + table: 'User', + column: 'email', + sdk: tenantB, + }); + + await decryptAll([{ email: aEnv }, { email: bEnv }]); + + expect(tenantA.bulkDecryptCalls).toHaveLength(1); + expect(tenantA.bulkDecryptCalls[0]?.ciphertexts).toHaveLength(1); + expect(tenantB.bulkDecryptCalls).toHaveLength(1); + expect(tenantB.bulkDecryptCalls[0]?.ciphertexts).toHaveLength(1); + }); +}); + +describe('decryptAll — cached plaintext after return', () => { + it('subsequent envelope.decrypt() returns synchronously without consulting the SDK', async () => { + const sdk = makeCounterSdk(); + const envelopes = ['a', 'b', 'c'].map((p) => + makeReadEnvelope({ plaintext: p, table: 'User', column: 'email', sdk }), + ); + + await decryptAll(envelopes); + + expect(sdk.singleDecryptCalls).toHaveLength(0); + for (let i = 0; i < envelopes.length; i++) { + // Strictly synchronous-from-cache — the resolved value matches + // the original plaintext, and the SDK's single-cell decrypt + // counter stays at zero (envelope.decrypt() short-circuits when + // handle.plaintext is already populated). + const e = envelopes[i]; + if (!e) throw new Error('envelope undefined'); + expect(await e.decrypt()).toBe(['a', 'b', 'c'][i]); + } + expect(sdk.singleDecryptCalls).toHaveLength(0); + }); +}); + +describe('decryptAll — forwards opts.signal to the SDK', () => { + it('forwards signal by identity on every bulkDecrypt call', async () => { + const sdk = makeCounterSdk(); + const usersEmails = ['a', 'b'].map((p) => + makeReadEnvelope({ plaintext: p, table: 'User', column: 'email', sdk }), + ); + const orderEmails = ['x'].map((p) => + makeReadEnvelope({ plaintext: p, table: 'Order', column: 'recipientEmail', sdk }), + ); + const controller = new AbortController(); + + await decryptAll([...usersEmails, ...orderEmails], { signal: controller.signal }); + + expect(sdk.bulkDecryptCalls).toHaveLength(2); + expect(sdk.bulkDecryptCalls[0]?.signal).toBe(controller.signal); + expect(sdk.bulkDecryptCalls[1]?.signal).toBe(controller.signal); + }); + + it('omits signal entirely when opts is not supplied', async () => { + const sdk = makeCounterSdk(); + const envelope = makeReadEnvelope({ + plaintext: 'alice', + table: 'User', + column: 'email', + sdk, + }); + + await decryptAll([envelope]); + + expect(sdk.bulkDecryptCalls).toHaveLength(1); + expect(sdk.bulkDecryptCalls[0]?.signal).toBeUndefined(); + }); + + it('omits signal when opts is supplied without signal', async () => { + const sdk = makeCounterSdk(); + const envelope = makeReadEnvelope({ + plaintext: 'alice', + table: 'User', + column: 'email', + sdk, + }); + + await decryptAll([envelope], {}); + + expect(sdk.bulkDecryptCalls).toHaveLength(1); + expect(sdk.bulkDecryptCalls[0]?.signal).toBeUndefined(); + }); +}); + +describe('decryptAll — diagnostics on misuse', () => { + it('throws a clear diagnostic when an envelope lacks (table, column) routing context', async () => { + // Read-side envelopes are constructed via codec.decode → fromInternal + // and always carry routing context. The only way an envelope lacks + // (table, column) at decryptAll time is misuse — e.g. a user reaches + // into the package internals and constructs an envelope manually. + // The walker surfaces this loudly so the misuse is debuggable. + const sdk = makeCounterSdk(); + // Construct an envelope with no routing context by using a fresh + // `from(plaintext)` (write side) and then artificially clearing + // the cached plaintext to force the walker to consider it as a + // bulk-decrypt target. The cleanest way to exercise the negative + // path without reaching into private APIs is to pass an envelope + // whose handle is in the ill-formed shape; this is an explicit + // misuse case, not a supported flow. + const envelope = EncryptedString.fromInternal({ + ciphertext: { c: 'ct:alice' }, + // Cast through unknown to exercise the diagnostic path; the + // type-level contract requires both fields. + table: undefined as unknown as string, + column: undefined as unknown as string, + sdk, + }); + + await expect(decryptAll([{ email: envelope }])).rejects.toThrow(/routing context|table|column/); + }); + + it('propagates SDK errors without retrying or swallowing', async () => { + // The walker is a pure orchestrator — failure modes are the SDK's, + // surfaced unchanged so callers can attribute them via existing + // SDK error taxonomy. RUNTIME.ABORTED phase-tag wrapping lives in + // the cancellation umbrella, not here. + const sdk = makeCounterSdk(); + const bulkDecryptSpy = vi.fn(() => Promise.reject(new Error('SDK boom'))); + sdk.bulkDecrypt = bulkDecryptSpy; + const envelope = makeReadEnvelope({ + plaintext: 'alice', + table: 'User', + column: 'email', + sdk, + }); + + await expect(decryptAll([envelope])).rejects.toThrow('SDK boom'); + expect(bulkDecryptSpy).toHaveBeenCalledTimes(1); + }); +}); + +describe('decryptAll — heterogeneous envelope subclasses', () => { + // The walker decrypts every `EncryptedEnvelopeBase` subclass + // (string + double + bigint + date + boolean + json) and dispatches + // through each subclass's `parseDecryptedValue` hook to narrow the + // SDK's polymorphic `bulkDecrypt` return to the per-type plaintext. + // Pins both invariants together: one `bulkDecrypt` per + // `(table, column)` group across mixed types, and each envelope's + // `decrypt()` returns the narrowed cached value synchronously. + // + // The mock SDK below stores the original plaintext on the + // ciphertext envelope's `v` slot so each per-type narrowing hook + // sees a value of its expected shape on the way back. + interface MultiSdk extends CipherstashSdk { + readonly bulkDecryptCalls: CipherstashBulkDecryptArgs[]; + } + + function makeMultiSdk(): MultiSdk { + const bulkDecryptCalls: CipherstashBulkDecryptArgs[] = []; + return { + bulkDecryptCalls, + decrypt: vi.fn(), + bulkEncrypt: vi.fn(), + bulkDecrypt(args) { + bulkDecryptCalls.push(args); + return Promise.resolve(args.ciphertexts.map((ct) => (ct as { v: unknown }).v)); + }, + }; + } + + it('groups heterogeneous types by (table, column) — one bulkDecrypt per group, narrowed plaintexts', async () => { + const sdk = makeMultiSdk(); + const stringEnv = EncryptedString.fromInternal({ + ciphertext: { v: 'alice@example.com' }, + table: 'User', + column: 'email', + sdk, + }); + const doubleEnv = EncryptedDouble.fromInternal({ + ciphertext: { v: 3.14 }, + table: 'User', + column: 'score', + sdk, + }); + const dateEnv = EncryptedDate.fromInternal({ + ciphertext: { v: '2024-06-15' }, + table: 'User', + column: 'birthday', + sdk, + }); + const boolEnv = EncryptedBoolean.fromInternal({ + ciphertext: { v: true }, + table: 'Feature', + column: 'enabled', + sdk, + }); + const jsonEnv = EncryptedJson.fromInternal({ + ciphertext: { v: { k: 'v' } }, + table: 'Audit', + column: 'payload', + sdk, + }); + const bigIntEnv = EncryptedBigInt.fromInternal({ + ciphertext: { v: 42n }, + table: 'Ledger', + column: 'amount', + sdk, + }); + + const rows = [ + { id: 'r-1', email: stringEnv, score: doubleEnv, birthday: dateEnv }, + { id: 'r-2', enabled: boolEnv, payload: jsonEnv, amount: bigIntEnv }, + ]; + + await decryptAll(rows); + + expect(sdk.bulkDecryptCalls).toHaveLength(6); + const callsByGroup = new Map( + sdk.bulkDecryptCalls.map( + (c) => [`${c.routingKey.table}\u0000${c.routingKey.column}`, c] as const, + ), + ); + expect(callsByGroup.get('User\u0000email')?.ciphertexts).toHaveLength(1); + expect(callsByGroup.get('User\u0000score')?.ciphertexts).toHaveLength(1); + expect(callsByGroup.get('User\u0000birthday')?.ciphertexts).toHaveLength(1); + expect(callsByGroup.get('Feature\u0000enabled')?.ciphertexts).toHaveLength(1); + expect(callsByGroup.get('Audit\u0000payload')?.ciphertexts).toHaveLength(1); + expect(callsByGroup.get('Ledger\u0000amount')?.ciphertexts).toHaveLength(1); + + expect(await stringEnv.decrypt()).toBe('alice@example.com'); + expect(await doubleEnv.decrypt()).toBe(3.14); + const decryptedDate = await dateEnv.decrypt(); + expect(decryptedDate).toBeInstanceOf(Date); + expect(decryptedDate.toISOString()).toBe('2024-06-15T00:00:00.000Z'); + expect(await boolEnv.decrypt()).toBe(true); + expect(await jsonEnv.decrypt()).toEqual({ k: 'v' }); + expect(await bigIntEnv.decrypt()).toBe(42n); + }); + + it('groups envelopes that share (table, column) into one bulkDecrypt, preserving sibling column splits', async () => { + // The framework guarantees per-cell-codec homogeneity within a + // `(table, column)` slot, but the walker's grouping logic does + // not depend on that property — it groups purely by + // `(sdk, table, column)`. This test exercises the grouping + // contract with two envelopes of the same type at the same + // routing key + a third envelope at a sibling column to confirm + // the per-(table,column) split is preserved. + const { EncryptedDouble } = await import('../src/execution/envelope-double'); + const sdk = makeMultiSdk(); + const a = EncryptedString.fromInternal({ + ciphertext: { v: 'alice' }, + table: 'User', + column: 'email', + sdk, + }); + const b = EncryptedString.fromInternal({ + ciphertext: { v: 'bob' }, + table: 'User', + column: 'email', + sdk, + }); + const score = EncryptedDouble.fromInternal({ + ciphertext: { v: 9.5 }, + table: 'User', + column: 'score', + sdk, + }); + + await decryptAll([{ email: a, score }, { email: b }]); + + expect(sdk.bulkDecryptCalls).toHaveLength(2); + const callsByGroup = new Map( + sdk.bulkDecryptCalls.map( + (c) => [`${c.routingKey.table}\u0000${c.routingKey.column}`, c] as const, + ), + ); + expect(callsByGroup.get('User\u0000email')?.ciphertexts).toHaveLength(2); + expect(callsByGroup.get('User\u0000score')?.ciphertexts).toHaveLength(1); + }); +}); diff --git a/packages/prisma-next/test/derive-schemas.test.ts b/packages/prisma-next/test/derive-schemas.test.ts new file mode 100644 index 00000000..a4b35496 --- /dev/null +++ b/packages/prisma-next/test/derive-schemas.test.ts @@ -0,0 +1,178 @@ +/** + * Pin the shape of {@link deriveStackSchemas} against the full set of + * cipherstash codecs and search-mode flags. Uses synthesised contract + * JSON fragments so the test is hermetic — no dependency on the + * example app's contract.json or on the framework's contract emitter. + */ + +import { describe, expect, it } from 'vitest' + +import { + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, + CIPHERSTASH_STRING_CODEC_ID, +} from '../src/extension-metadata/constants' +import { deriveStackSchemas } from '../src/stack/derive-schemas' + +function makeContract( + tables: Record< + string, + Record< + string, + { codecId: string; typeParams?: Record<string, boolean> | null } + > + >, +) { + return { + storage: { + tables: Object.fromEntries( + Object.entries(tables).map(([name, cols]) => [ + name, + { columns: cols as Record<string, { codecId: string; typeParams?: Record<string, unknown> | null }> }, + ]), + ), + }, + } +} + +describe('deriveStackSchemas', () => { + it('returns an empty array when contract has no storage tables', () => { + expect(deriveStackSchemas({})).toEqual([]) + expect(deriveStackSchemas({ storage: {} })).toEqual([]) + expect(deriveStackSchemas({ storage: { tables: {} } })).toEqual([]) + }) + + it('skips tables with no cipherstash columns', () => { + const contract = makeContract({ + users: { + id: { codecId: 'pg/text@1', typeParams: null }, + }, + }) + expect(deriveStackSchemas(contract)).toEqual([]) + }) + + it('derives one EncryptedTable per table that has cipherstash columns', () => { + const contract = makeContract({ + users: { + id: { codecId: 'pg/text@1', typeParams: null }, + email: { + codecId: CIPHERSTASH_STRING_CODEC_ID, + typeParams: { equality: true }, + }, + }, + audit_log: { + message: { + codecId: CIPHERSTASH_STRING_CODEC_ID, + typeParams: { equality: true }, + }, + }, + }) + const schemas = deriveStackSchemas(contract) + expect(schemas).toHaveLength(2) + expect(schemas.map((t) => t.tableName).sort()).toEqual(['audit_log', 'users']) + }) + + it('maps each cipherstash codec id to the correct dataType', () => { + const contract = makeContract({ + t: { + s: { codecId: CIPHERSTASH_STRING_CODEC_ID, typeParams: { equality: true } }, + d: { codecId: CIPHERSTASH_DOUBLE_CODEC_ID, typeParams: { equality: true } }, + b: { codecId: CIPHERSTASH_BIGINT_CODEC_ID, typeParams: { equality: true } }, + dt: { codecId: CIPHERSTASH_DATE_CODEC_ID, typeParams: { equality: true } }, + bo: { codecId: CIPHERSTASH_BOOLEAN_CODEC_ID, typeParams: { equality: true } }, + j: { codecId: CIPHERSTASH_JSON_CODEC_ID, typeParams: { searchableJson: true } }, + }, + }) + const [t] = deriveStackSchemas(contract) + const built = t!.build() + // `build().cast_as` returns SDK-facing aliases ('string', 'number', 'bigint', ...); + // the EQL `cast_as` lower-form ('text', 'double', 'big_int', ...) is derived + // internally by the stack client at encrypt time. + expect(built.columns.s?.cast_as).toBe('string') + expect(built.columns.d?.cast_as).toBe('number') + expect(built.columns.b?.cast_as).toBe('bigint') + expect(built.columns.dt?.cast_as).toBe('date') + expect(built.columns.bo?.cast_as).toBe('boolean') + expect(built.columns.j?.cast_as).toBe('json') + }) + + it('installs index methods for each true-valued search-mode flag', () => { + const contract = makeContract({ + users: { + email: { + codecId: CIPHERSTASH_STRING_CODEC_ID, + typeParams: { equality: true, freeTextSearch: true, orderAndRange: true }, + }, + bio: { + codecId: CIPHERSTASH_STRING_CODEC_ID, + typeParams: { equality: false, freeTextSearch: true, orderAndRange: false }, + }, + preferences: { + codecId: CIPHERSTASH_JSON_CODEC_ID, + typeParams: { searchableJson: true }, + }, + }, + }) + const [users] = deriveStackSchemas(contract) + const built = users!.build() + + // email — all three indices + expect(Object.keys(built.columns.email?.indexes ?? {})).toEqual( + expect.arrayContaining(['unique', 'match', 'ore']), + ) + + // bio — only match (freeTextSearch); equality/orderAndRange false → no unique/ore + expect(built.columns.bio?.indexes.unique).toBeUndefined() + expect(built.columns.bio?.indexes.match).toBeDefined() + expect(built.columns.bio?.indexes.ore).toBeUndefined() + + // preferences — ste_vec only + expect(built.columns.preferences?.indexes.ste_vec).toBeDefined() + }) + + it('skips false-valued flags (treats absent and false as equivalent)', () => { + const contract = makeContract({ + t: { + c: { + codecId: CIPHERSTASH_STRING_CODEC_ID, + // explicit false on every flag should produce a column with no indices + typeParams: { equality: false, freeTextSearch: false, orderAndRange: false }, + }, + }, + }) + const [t] = deriveStackSchemas(contract) + const built = t!.build() + expect(built.columns.c?.indexes).toEqual({}) + }) + + it('throws on an unrecognised typeParams flag (catches framework-vs-SDK vocabulary drift)', () => { + const contract = makeContract({ + t: { + c: { + codecId: CIPHERSTASH_STRING_CODEC_ID, + typeParams: { equality: true, futureFlag: true } as Record<string, boolean>, + }, + }, + }) + expect(() => deriveStackSchemas(contract)).toThrow(/futureFlag/) + }) + + it('uses the physical column name (the storage IR key, post-@map)', () => { + // contract.json's `storage.tables.<table>.columns.<col>` keys are + // already the physical post-@map names. The derivation must keep + // those names verbatim, not the PSL field names. + const contract = makeContract({ + users: { + emailverified: { + codecId: CIPHERSTASH_BOOLEAN_CODEC_ID, + typeParams: { equality: true }, + }, + }, + }) + const [users] = deriveStackSchemas(contract) + expect(users!.build().columns.emailverified).toBeDefined() + }) +}) diff --git a/packages/prisma-next/test/descriptor.test.ts b/packages/prisma-next/test/descriptor.test.ts new file mode 100644 index 00000000..77b8a052 --- /dev/null +++ b/packages/prisma-next/test/descriptor.test.ts @@ -0,0 +1,106 @@ +/** + * Structural verification for the CipherStash extension descriptor. + * + * **Contract-space package layout.** The descriptor's + * contract / migrations / head ref now flow through JSON-import + * declarations from the package's emitted artefacts: + * + * - `<package>/contract.json` + * - `<package>/migrations/<dirName>/{migration,ops}.json` + * - `<package>/refs/head.json` + * + * These assertions lock down the wiring: the descriptor exposes + * structurally correct values; the emitted bundle SQL flows through + * `ops.json` byte-for-byte; and the head ref tracks the latest + * migration's `to` hash. + * + * Hash-level values are sourced from the on-disk artefacts (via the + * descriptor's contractSpace) rather than hand-pinned in the test, so + * the assertions stay honest under re-emission. Mirrors the synthetic + * extension's `test/descriptor.test.ts` reference model. + * + * @see docs/architecture docs/adrs/ADR 212 - Contract spaces.md + */ + +import { assertDescriptorSelfConsistency } from '@prisma-next/migration-tools/spaces'; +import { describe, expect, it } from 'vitest'; +import cipherstashExtensionDescriptor from '../src/exports/control'; +import { + CIPHERSTASH_BASELINE_MIGRATION_NAME, + CIPHERSTASH_INVARIANTS, + CIPHERSTASH_SPACE_ID, + EQL_V2_CONFIGURATION_TABLE, +} from '../src/extension-metadata/constants'; +import { EQL_BUNDLE_SQL } from '../src/migration/eql-bundle'; + +describe('cipherstash extension descriptor (contract-space package layout)', () => { + it('identifies as a SQL extension targeted at postgres', () => { + expect(cipherstashExtensionDescriptor).toMatchObject({ + kind: 'extension', + id: CIPHERSTASH_SPACE_ID, + familyId: 'sql', + targetId: 'postgres', + }); + }); + + it('exposes a contractSpace declaring the eql_v2_configuration table', () => { + const space = cipherstashExtensionDescriptor.contractSpace; + expect(space).toBeDefined(); + expect(Object.keys(space!.contractJson.storage.tables)).toEqual([EQL_V2_CONFIGURATION_TABLE]); + }); + + it('publishes one baseline migration sourced from the on-disk emit pipeline', () => { + const space = cipherstashExtensionDescriptor.contractSpace!; + expect(space.migrations).toHaveLength(1); + const baseline = space.migrations[0]!; + expect(baseline.dirName).toBe(CIPHERSTASH_BASELINE_MIGRATION_NAME); + expect(baseline.metadata.from).toBeNull(); + expect(baseline.metadata.to).toBe(space.contractJson.storage.storageHash); + }); + + it('baseline ops carry the installEqlBundle op + structural create-* ops', () => { + const space = cipherstashExtensionDescriptor.contractSpace!; + const baseline = space.migrations[0]!; + const opIds = baseline.ops.map((op) => op.invariantId).filter(Boolean); + expect(opIds).toEqual([CIPHERSTASH_INVARIANTS.installBundle]); + }); + + it('namespaces every baseline op invariantId under cipherstash:*', () => { + const baseline = cipherstashExtensionDescriptor.contractSpace!.migrations[0]!; + const ids = baseline.ops.map((op) => op.invariantId).filter(Boolean); + expect(ids.length).toBeGreaterThan(0); + for (const id of ids) { + expect(id).toMatch(/^cipherstash:/); + } + }); + + it('inlines the EQL bundle SQL byte-for-byte through ops.json', () => { + const baseline = cipherstashExtensionDescriptor.contractSpace!.migrations[0]!; + const installOp = baseline.ops.find( + (op) => op.invariantId === CIPHERSTASH_INVARIANTS.installBundle, + ) as { readonly execute?: ReadonlyArray<{ readonly sql: string }> } | undefined; + expect(installOp).toBeDefined(); + expect(installOp?.execute?.[0]?.sql).toBe(EQL_BUNDLE_SQL); + }); + + it("points the head ref at the latest migration's destination hash", () => { + const space = cipherstashExtensionDescriptor.contractSpace!; + expect(space.headRef.hash).toBe(space.migrations[0]!.metadata.to); + expect([...space.headRef.invariants].sort()).toEqual( + [...space.migrations[0]!.metadata.providedInvariants].sort(), + ); + }); + + it('self-consistency check passes — headRef.hash matches re-derived storage hash', () => { + const space = cipherstashExtensionDescriptor.contractSpace!; + expect(() => + assertDescriptorSelfConsistency({ + extensionId: CIPHERSTASH_SPACE_ID, + target: space.contractJson.target, + targetFamily: space.contractJson.targetFamily, + storage: space.contractJson.storage as unknown as Record<string, unknown>, + headRefHash: space.headRef.hash, + }), + ).not.toThrow(); + }); +}); diff --git a/packages/prisma-next/test/envelope-bigint.test.ts b/packages/prisma-next/test/envelope-bigint.test.ts new file mode 100644 index 00000000..45ca7e3e --- /dev/null +++ b/packages/prisma-next/test/envelope-bigint.test.ts @@ -0,0 +1,182 @@ +/** + * Behavioural tests for the `EncryptedBigInt` envelope. + * + * Pins the subclass surface, redaction overrides, and `toJSON` + * placeholder shape for the `cipherstash/bigint@1` codec; mirrors + * `envelope-double.test.ts` byte-for-byte beyond the plaintext type + * and marker name. + */ + +import { inspect } from 'node:util'; +import { describe, expect, it, vi } from 'vitest'; +import { EncryptedEnvelopeBase } from '../src/execution/envelope-base'; +import { EncryptedBigInt } from '../src/execution/envelope-bigint'; +import type { CipherstashSdk } from '../src/execution/sdk'; + +describe('EncryptedBigInt.from(plaintext)', () => { + it('returns an EncryptedBigInt instance that extends EncryptedEnvelopeBase', () => { + const envelope = EncryptedBigInt.from(9007199254740993n); + expect(envelope).toBeInstanceOf(EncryptedBigInt); + expect(envelope).toBeInstanceOf(EncryptedEnvelopeBase); + }); + + it('decrypt() resolves to the original bigint plaintext on the write side', async () => { + const envelope = EncryptedBigInt.from(123456789012345678901234567890n); + await expect(envelope.decrypt()).resolves.toBe(123456789012345678901234567890n); + }); + + it('preserves negative bigint values', async () => { + await expect(EncryptedBigInt.from(-1n).decrypt()).resolves.toBe(-1n); + }); +}); + +describe('EncryptedBigInt.fromInternal(...) — read-side round-trip', () => { + it('decrypt() calls the SDK single-cell decrypt and returns the bigint plaintext', async () => { + const ciphertext = { c: 'cipher', i: { t: 'ledger', c: 'amount' } }; + const decryptMock = vi.fn().mockResolvedValue(7n); + const sdk: CipherstashSdk = { + decrypt: decryptMock, + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; + + const envelope = EncryptedBigInt.fromInternal({ + ciphertext, + table: 'ledger', + column: 'amount', + sdk, + }); + + await expect(envelope.decrypt()).resolves.toBe(7n); + expect(decryptMock).toHaveBeenCalledTimes(1); + }); + + it('coerces a number-shaped SDK plaintext into a bigint', async () => { + const ciphertext = { c: 'cipher', i: { t: 'ledger', c: 'amount' } }; + const sdk: CipherstashSdk = { + decrypt: vi.fn().mockResolvedValue(42), + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; + const envelope = EncryptedBigInt.fromInternal({ + ciphertext, + table: 'ledger', + column: 'amount', + sdk, + }); + await expect(envelope.decrypt()).resolves.toBe(42n); + }); + + it('coerces a decimal-string SDK plaintext into a bigint', async () => { + const ciphertext = { c: 'cipher', i: { t: 'ledger', c: 'amount' } }; + const sdk: CipherstashSdk = { + decrypt: vi.fn().mockResolvedValue('123456789012345678'), + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; + const envelope = EncryptedBigInt.fromInternal({ + ciphertext, + table: 'ledger', + column: 'amount', + sdk, + }); + await expect(envelope.decrypt()).resolves.toBe(123456789012345678n); + }); + + it('rejects non-integer numbers', async () => { + const ciphertext = { c: 'cipher', i: { t: 'ledger', c: 'amount' } }; + const sdk: CipherstashSdk = { + decrypt: vi.fn().mockResolvedValue(3.14), + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; + const envelope = EncryptedBigInt.fromInternal({ + ciphertext, + table: 'ledger', + column: 'amount', + sdk, + }); + await expect(envelope.decrypt()).rejects.toThrow(/not a safe integer/); + }); + + it('rejects numbers above Number.MAX_SAFE_INTEGER', async () => { + const ciphertext = { c: 'cipher', i: { t: 'ledger', c: 'amount' } }; + const sdk: CipherstashSdk = { + decrypt: vi.fn().mockResolvedValue(Number.MAX_SAFE_INTEGER + 1), + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; + const envelope = EncryptedBigInt.fromInternal({ + ciphertext, + table: 'ledger', + column: 'amount', + sdk, + }); + await expect(envelope.decrypt()).rejects.toThrow(/not a safe integer/); + }); + + it('rejects non-numeric string plaintexts', async () => { + const ciphertext = { c: 'cipher', i: { t: 'ledger', c: 'amount' } }; + const sdk: CipherstashSdk = { + decrypt: vi.fn().mockResolvedValue('abc'), + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; + const envelope = EncryptedBigInt.fromInternal({ + ciphertext, + table: 'ledger', + column: 'amount', + sdk, + }); + await expect(envelope.decrypt()).rejects.toThrow(/not a valid bigint literal/); + }); + + it('rejects unsupported plaintext types', async () => { + const ciphertext = { c: 'cipher', i: { t: 'ledger', c: 'amount' } }; + const sdk: CipherstashSdk = { + decrypt: vi.fn().mockResolvedValue(true), + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; + const envelope = EncryptedBigInt.fromInternal({ + ciphertext, + table: 'ledger', + column: 'amount', + sdk, + }); + await expect(envelope.decrypt()).rejects.toThrow(/unsupported SDK plaintext type/); + }); +}); + +describe('EncryptedBigInt — accidental-exposure overrides', () => { + it('toString() returns [REDACTED]', () => { + expect(EncryptedBigInt.from(42n).toString()).toBe('[REDACTED]'); + }); + + it('valueOf() returns [REDACTED]', () => { + expect(EncryptedBigInt.from(42n).valueOf()).toBe('[REDACTED]'); + }); + + it('Symbol.toPrimitive returns [REDACTED] for template-literal coercion', () => { + const envelope = EncryptedBigInt.from(42n); + expect(`v=${envelope}`).toBe('v=[REDACTED]'); + }); + + it('util.inspect returns [REDACTED]', () => { + const envelope = EncryptedBigInt.from(42n); + const inspected = inspect(envelope, { depth: Number.POSITIVE_INFINITY, getters: true }); + expect(inspected).not.toContain('42'); + expect(inspected).toContain('[REDACTED]'); + }); + + it('JSON.stringify renders the per-type placeholder marker shape', () => { + const envelope = EncryptedBigInt.from(42n); + expect(JSON.parse(JSON.stringify(envelope))).toEqual({ $encryptedBigInt: '<opaque>' }); + }); + + it('JSON.stringify cannot leak plaintext', () => { + const envelope = EncryptedBigInt.from(987654321n); + const json = JSON.stringify({ amount: envelope }); + expect(json).not.toContain('987654321'); + }); +}); diff --git a/packages/prisma-next/test/envelope-boolean.test.ts b/packages/prisma-next/test/envelope-boolean.test.ts new file mode 100644 index 00000000..daf03e10 --- /dev/null +++ b/packages/prisma-next/test/envelope-boolean.test.ts @@ -0,0 +1,105 @@ +/** + * Behavioural tests for the `EncryptedBoolean` envelope. + * + * Pins the subclass surface, redaction overrides, and `toJSON` + * placeholder shape for the `cipherstash/boolean@1` codec. + */ + +import { inspect } from 'node:util'; +import { describe, expect, it, vi } from 'vitest'; +import { EncryptedEnvelopeBase } from '../src/execution/envelope-base'; +import { EncryptedBoolean } from '../src/execution/envelope-boolean'; +import type { CipherstashSdk } from '../src/execution/sdk'; + +function emptySdk(): CipherstashSdk { + return { + decrypt: vi.fn(), + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; +} + +describe('EncryptedBoolean.from(plaintext)', () => { + it('returns an EncryptedBoolean instance that extends EncryptedEnvelopeBase', () => { + const envelope = EncryptedBoolean.from(true); + expect(envelope).toBeInstanceOf(EncryptedBoolean); + expect(envelope).toBeInstanceOf(EncryptedEnvelopeBase); + }); + + it('decrypt() resolves to the original boolean plaintext on the write side', async () => { + await expect(EncryptedBoolean.from(true).decrypt()).resolves.toBe(true); + await expect(EncryptedBoolean.from(false).decrypt()).resolves.toBe(false); + }); +}); + +describe('EncryptedBoolean.fromInternal(...) — read-side round-trip', () => { + it('decrypt({signal}) calls the SDK single-cell decrypt and returns the boolean plaintext', async () => { + const ciphertext = { c: 'cipher', i: { t: 'feature', c: 'enabled' } }; + const decryptMock = vi.fn().mockResolvedValue(true); + const sdk: CipherstashSdk = { + decrypt: decryptMock, + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; + + const envelope = EncryptedBoolean.fromInternal({ + ciphertext, + table: 'feature', + column: 'enabled', + sdk, + }); + + const result = await envelope.decrypt(); + expect(result).toBe(true); + expect(decryptMock).toHaveBeenCalledTimes(1); + }); +}); + +describe('EncryptedBoolean — accidental-exposure overrides', () => { + it('toString() returns [REDACTED]', () => { + expect(EncryptedBoolean.from(true).toString()).toBe('[REDACTED]'); + }); + + it('valueOf() returns [REDACTED]', () => { + expect(EncryptedBoolean.from(true).valueOf()).toBe('[REDACTED]'); + }); + + it('Symbol.toPrimitive returns [REDACTED] for template-literal coercion', () => { + expect(`v=${EncryptedBoolean.from(true)}`).toBe('v=[REDACTED]'); + }); + + it('util.inspect returns [REDACTED]', () => { + const envelope = EncryptedBoolean.from(true); + const inspected = inspect(envelope, { depth: Number.POSITIVE_INFINITY, getters: true }); + expect(inspected).not.toContain('true'); + expect(inspected).toContain('[REDACTED]'); + }); + + it('JSON.stringify renders the per-type placeholder marker shape', () => { + const envelope = EncryptedBoolean.from(true); + expect(JSON.parse(JSON.stringify(envelope))).toEqual({ $encryptedBoolean: '<opaque>' }); + }); + + it('JSON.stringify cannot leak plaintext', () => { + const envelope = EncryptedBoolean.from(true); + const json = JSON.stringify({ value: envelope }); + expect(json).not.toContain('true'); + }); +}); + +describe('EncryptedBoolean — fromInternal preserves SDK references', () => { + it('exposes the (table, column) routing context + SDK on the handle', () => { + const sdk = emptySdk(); + const envelope = EncryptedBoolean.fromInternal({ + ciphertext: 'wire', + table: 'feature', + column: 'enabled', + sdk, + }); + const handle = envelope.expose(); + expect(handle.table).toBe('feature'); + expect(handle.column).toBe('enabled'); + expect(handle.sdk).toBe(sdk); + expect(handle.plaintext).toBeUndefined(); + }); +}); diff --git a/packages/prisma-next/test/envelope-date.test.ts b/packages/prisma-next/test/envelope-date.test.ts new file mode 100644 index 00000000..c057e592 --- /dev/null +++ b/packages/prisma-next/test/envelope-date.test.ts @@ -0,0 +1,185 @@ +/** + * Behavioural tests for the `EncryptedDate` envelope. + * + * Pins the per-type `parseDecryptedValue` narrowing path for the + * `cipherstash/date@1` codec (the SDK returns `unknown`; the + * envelope coerces ISO strings / numeric epoch ms / `Date` + * instances into a single `Date` shape for the user). + */ + +import { inspect } from 'node:util'; +import { describe, expect, it, vi } from 'vitest'; +import { EncryptedEnvelopeBase } from '../src/execution/envelope-base'; +import { EncryptedDate } from '../src/execution/envelope-date'; +import type { CipherstashSdk } from '../src/execution/sdk'; + +function emptySdk(): CipherstashSdk { + return { + decrypt: vi.fn(), + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; +} + +describe('EncryptedDate.from(plaintext)', () => { + it('returns an EncryptedDate instance that extends EncryptedEnvelopeBase', () => { + const envelope = EncryptedDate.from(new Date('2024-01-01')); + expect(envelope).toBeInstanceOf(EncryptedDate); + expect(envelope).toBeInstanceOf(EncryptedEnvelopeBase); + }); + + it('decrypt() resolves to the original Date plaintext on the write side', async () => { + const original = new Date('2024-06-15'); + const envelope = EncryptedDate.from(original); + await expect(envelope.decrypt()).resolves.toBe(original); + }); +}); + +describe('EncryptedDate.fromInternal(...) — read-side round-trip + parseDecryptedValue narrowing', () => { + it('coerces an ISO date string from the SDK into a Date instance', async () => { + const ciphertext = { c: 'cipher', i: { t: 'event', c: 'occurred_on' } }; + const decryptMock = vi.fn().mockResolvedValue('2023-01-01'); + const sdk: CipherstashSdk = { + decrypt: decryptMock, + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; + + const envelope = EncryptedDate.fromInternal({ + ciphertext, + table: 'event', + column: 'occurred_on', + sdk, + }); + + const result = await envelope.decrypt(); + expect(result).toBeInstanceOf(Date); + expect(result.toISOString()).toBe('2023-01-01T00:00:00.000Z'); + expect(decryptMock).toHaveBeenCalledTimes(1); + }); + + it('passes through a Date instance from the SDK unchanged', async () => { + const sdkDate = new Date('2025-04-01'); + const sdk: CipherstashSdk = { + decrypt: vi.fn().mockResolvedValue(sdkDate), + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; + const envelope = EncryptedDate.fromInternal({ + ciphertext: 'wire', + table: 'event', + column: 'occurred_on', + sdk, + }); + await expect(envelope.decrypt()).resolves.toBe(sdkDate); + }); + + it('coerces an epoch-ms number from the SDK into a Date instance', async () => { + const epochMs = 1_700_000_000_000; + const sdk: CipherstashSdk = { + decrypt: vi.fn().mockResolvedValue(epochMs), + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; + const envelope = EncryptedDate.fromInternal({ + ciphertext: 'wire', + table: 'event', + column: 'occurred_on', + sdk, + }); + const result = await envelope.decrypt(); + expect(result).toBeInstanceOf(Date); + expect(result.getTime()).toBe(epochMs); + }); + + it('throws when the SDK returns an invalid Date shape', async () => { + const sdk: CipherstashSdk = { + decrypt: vi.fn().mockResolvedValue({ not: 'a date' }), + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; + const envelope = EncryptedDate.fromInternal({ + ciphertext: 'wire', + table: 'event', + column: 'occurred_on', + sdk, + }); + await expect(envelope.decrypt()).rejects.toThrow(/EncryptedDate.parseDecryptedValue/); + }); + + it('throws when the SDK returns an unparseable date string', async () => { + const sdk: CipherstashSdk = { + decrypt: vi.fn().mockResolvedValue('not-a-real-date'), + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; + const envelope = EncryptedDate.fromInternal({ + ciphertext: 'wire', + table: 'event', + column: 'occurred_on', + sdk, + }); + await expect(envelope.decrypt()).rejects.toThrow(/does not parse to a valid Date/); + }); +}); + +describe('EncryptedDate.from(plaintext) — input validation', () => { + it('throws when plaintext is an Invalid Date (NaN time)', () => { + expect(() => EncryptedDate.from(new Date('not-a-date'))).toThrow( + /must be a valid Date instance/, + ); + }); +}); + +describe('EncryptedDate — accidental-exposure overrides', () => { + it('toString() returns [REDACTED]', () => { + expect(EncryptedDate.from(new Date('2024-01-01')).toString()).toBe('[REDACTED]'); + }); + + it('valueOf() returns [REDACTED]', () => { + expect(EncryptedDate.from(new Date('2024-01-01')).valueOf()).toBe('[REDACTED]'); + }); + + it('Symbol.toPrimitive returns [REDACTED] for template-literal coercion', () => { + const envelope = EncryptedDate.from(new Date('2024-01-01')); + expect(`v=${envelope}`).toBe('v=[REDACTED]'); + }); + + it('util.inspect returns [REDACTED]', () => { + const envelope = EncryptedDate.from(new Date('2024-01-01')); + const inspected = inspect(envelope, { depth: Number.POSITIVE_INFINITY, getters: true }); + expect(inspected).not.toContain('2024'); + expect(inspected).toContain('[REDACTED]'); + }); + + it('JSON.stringify renders the per-type placeholder marker shape', () => { + const envelope = EncryptedDate.from(new Date('2024-01-01')); + expect(JSON.parse(JSON.stringify(envelope))).toEqual({ $encryptedDate: '<opaque>' }); + }); + + it('JSON.stringify cannot leak plaintext', () => { + const envelope = EncryptedDate.from(new Date('2024-06-15T12:34:56.789Z')); + const json = JSON.stringify({ value: envelope }); + expect(json).not.toContain('2024'); + expect(json).not.toContain('06'); + }); +}); + +describe('EncryptedDate — fromInternal preserves SDK references', () => { + it('exposes the (table, column) routing context + SDK on the handle', () => { + const sdk = emptySdk(); + const envelope = EncryptedDate.fromInternal({ + ciphertext: 'wire', + table: 'event', + column: 'occurred_on', + sdk, + }); + const handle = envelope.expose(); + expect(handle).toMatchObject({ + table: 'event', + column: 'occurred_on', + plaintext: undefined, + }); + expect(handle.sdk).toBe(sdk); + }); +}); diff --git a/packages/prisma-next/test/envelope-double.test.ts b/packages/prisma-next/test/envelope-double.test.ts new file mode 100644 index 00000000..78caa9e4 --- /dev/null +++ b/packages/prisma-next/test/envelope-double.test.ts @@ -0,0 +1,123 @@ +/** + * Behavioural tests for the `EncryptedDouble` envelope. + * + * Pins the subclass surface (`from` + `fromInternal` + decrypt + * round-trip), the four non-`toJSON` redaction overrides (return + * `[REDACTED]`), and the `JSON.stringify(envelope)` placeholder + * shape `{ "$encryptedDouble": "<opaque>" }`. + */ + +import { inspect } from 'node:util'; +import { describe, expect, it, vi } from 'vitest'; +import { EncryptedEnvelopeBase } from '../src/execution/envelope-base'; +import { EncryptedDouble } from '../src/execution/envelope-double'; +import type { CipherstashSdk } from '../src/execution/sdk'; + +function emptySdk(): CipherstashSdk { + return { + decrypt: vi.fn(), + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; +} + +describe('EncryptedDouble.from(plaintext)', () => { + it('returns an EncryptedDouble instance that extends EncryptedEnvelopeBase', () => { + const envelope = EncryptedDouble.from(3.14); + expect(envelope).toBeInstanceOf(EncryptedDouble); + expect(envelope).toBeInstanceOf(EncryptedEnvelopeBase); + }); + + it('decrypt() resolves to the original numeric plaintext on the write side', async () => { + const envelope = EncryptedDouble.from(2.5); + await expect(envelope.decrypt()).resolves.toBe(2.5); + }); + + it('preserves negative and zero values without coercion', async () => { + await expect(EncryptedDouble.from(-1.5).decrypt()).resolves.toBe(-1.5); + await expect(EncryptedDouble.from(0).decrypt()).resolves.toBe(0); + }); +}); + +describe('EncryptedDouble.fromInternal(...) — read-side round-trip', () => { + it('decrypt({signal}) calls the SDK single-cell decrypt and returns the numeric plaintext', async () => { + const ciphertext = { c: 'cipher', i: { t: 'metric', c: 'value' } }; + const decryptMock = vi.fn().mockResolvedValue(42.5); + const sdk: CipherstashSdk = { + decrypt: decryptMock, + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; + + const envelope = EncryptedDouble.fromInternal({ + ciphertext, + table: 'metric', + column: 'value', + sdk, + }); + + const ac = new AbortController(); + const result = await envelope.decrypt({ signal: ac.signal }); + + expect(result).toBe(42.5); + expect(decryptMock).toHaveBeenCalledTimes(1); + expect(decryptMock.mock.calls[0]?.[0]).toMatchObject({ + ciphertext, + table: 'metric', + column: 'value', + signal: ac.signal, + }); + }); +}); + +describe('EncryptedDouble — accidental-exposure overrides', () => { + // The four non-`toJSON` coercion paths return `[REDACTED]`; + // `toJSON` returns the per-type placeholder object. + it('toString() returns [REDACTED] regardless of plaintext value', () => { + expect(EncryptedDouble.from(42).toString()).toBe('[REDACTED]'); + }); + + it('valueOf() returns [REDACTED]', () => { + expect(EncryptedDouble.from(42).valueOf()).toBe('[REDACTED]'); + }); + + it('Symbol.toPrimitive returns [REDACTED] for template-literal coercion', () => { + const envelope = EncryptedDouble.from(42); + expect(`v=${envelope}`).toBe('v=[REDACTED]'); + }); + + it('util.inspect returns [REDACTED]', () => { + const envelope = EncryptedDouble.from(42); + const inspected = inspect(envelope, { depth: Number.POSITIVE_INFINITY, getters: true }); + expect(inspected).not.toContain('42'); + expect(inspected).toContain('[REDACTED]'); + }); + + it('JSON.stringify renders the per-type placeholder marker shape', () => { + const envelope = EncryptedDouble.from(42); + expect(JSON.parse(JSON.stringify(envelope))).toEqual({ $encryptedDouble: '<opaque>' }); + }); + + it('JSON.stringify cannot leak plaintext', () => { + const envelope = EncryptedDouble.from(123.456789); + const json = JSON.stringify({ value: envelope }); + expect(json).not.toContain('123.456789'); + }); +}); + +describe('EncryptedDouble — fromInternal preserves SDK references', () => { + it('exposes the (table, column) routing context + SDK on the handle', () => { + const sdk = emptySdk(); + const envelope = EncryptedDouble.fromInternal({ + ciphertext: 'wire', + table: 'metric', + column: 'value', + sdk, + }); + const handle = envelope.expose(); + expect(handle.table).toBe('metric'); + expect(handle.column).toBe('value'); + expect(handle.sdk).toBe(sdk); + expect(handle.plaintext).toBeUndefined(); + }); +}); diff --git a/packages/prisma-next/test/envelope-json.test.ts b/packages/prisma-next/test/envelope-json.test.ts new file mode 100644 index 00000000..37dcf877 --- /dev/null +++ b/packages/prisma-next/test/envelope-json.test.ts @@ -0,0 +1,116 @@ +/** + * Behavioural tests for the `EncryptedJson` envelope. + * + * Pins the subclass surface, redaction overrides, and `toJSON` + * placeholder shape for the `cipherstash/json@1` codec. + * The plaintext type is intentionally `unknown` (any + * JSON-serialisable shape) — we exercise objects, arrays, and + * primitives to confirm the envelope round-trips opaque payloads + * without inspecting their structure. + */ + +import { inspect } from 'node:util'; +import { describe, expect, it, vi } from 'vitest'; +import { EncryptedEnvelopeBase } from '../src/execution/envelope-base'; +import { EncryptedJson } from '../src/execution/envelope-json'; +import type { CipherstashSdk } from '../src/execution/sdk'; + +function emptySdk(): CipherstashSdk { + return { + decrypt: vi.fn(), + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; +} + +describe('EncryptedJson.from(plaintext)', () => { + it('returns an EncryptedJson instance that extends EncryptedEnvelopeBase', () => { + const envelope = EncryptedJson.from({ k: 1 }); + expect(envelope).toBeInstanceOf(EncryptedJson); + expect(envelope).toBeInstanceOf(EncryptedEnvelopeBase); + }); + + it('decrypt() round-trips an object plaintext on the write side', async () => { + const payload = { user: 'alice', roles: ['admin', 'editor'] }; + await expect(EncryptedJson.from(payload).decrypt()).resolves.toBe(payload); + }); + + it('decrypt() round-trips array and primitive JSON plaintexts', async () => { + await expect(EncryptedJson.from([1, 2, 3]).decrypt()).resolves.toEqual([1, 2, 3]); + await expect(EncryptedJson.from(null).decrypt()).resolves.toBeNull(); + }); +}); + +describe('EncryptedJson.fromInternal(...) — read-side round-trip', () => { + it('decrypt({signal}) calls the SDK single-cell decrypt and returns the JSON plaintext as-is', async () => { + const ciphertext = { c: 'cipher', i: { t: 'audit', c: 'payload' } }; + const decoded = { event: 'login', userId: 42 }; + const decryptMock = vi.fn().mockResolvedValue(decoded); + const sdk: CipherstashSdk = { + decrypt: decryptMock, + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; + + const envelope = EncryptedJson.fromInternal({ + ciphertext, + table: 'audit', + column: 'payload', + sdk, + }); + + const result = await envelope.decrypt(); + expect(result).toBe(decoded); + expect(decryptMock).toHaveBeenCalledTimes(1); + }); +}); + +describe('EncryptedJson — accidental-exposure overrides', () => { + it('toString() returns [REDACTED]', () => { + expect(EncryptedJson.from({ secret: 'value' }).toString()).toBe('[REDACTED]'); + }); + + it('valueOf() returns [REDACTED]', () => { + expect(EncryptedJson.from({ secret: 'value' }).valueOf()).toBe('[REDACTED]'); + }); + + it('Symbol.toPrimitive returns [REDACTED] for template-literal coercion', () => { + expect(`v=${EncryptedJson.from({ secret: 'value' })}`).toBe('v=[REDACTED]'); + }); + + it('util.inspect returns [REDACTED]', () => { + const envelope = EncryptedJson.from({ secret: 'leak-me' }); + const inspected = inspect(envelope, { depth: Number.POSITIVE_INFINITY, getters: true }); + expect(inspected).not.toContain('leak-me'); + expect(inspected).toContain('[REDACTED]'); + }); + + it('JSON.stringify renders the per-type placeholder marker shape', () => { + const envelope = EncryptedJson.from({ k: 'v' }); + expect(JSON.parse(JSON.stringify(envelope))).toEqual({ $encryptedJson: '<opaque>' }); + }); + + it('JSON.stringify cannot leak nested plaintext fields', () => { + const envelope = EncryptedJson.from({ secret: 'TOPSECRET' }); + const json = JSON.stringify({ value: envelope }); + expect(json).not.toContain('TOPSECRET'); + expect(json).not.toContain('secret'); + }); +}); + +describe('EncryptedJson — fromInternal preserves SDK references', () => { + it('exposes the (table, column) routing context + SDK on the handle', () => { + const sdk = emptySdk(); + const envelope = EncryptedJson.fromInternal({ + ciphertext: 'wire', + table: 'audit', + column: 'payload', + sdk, + }); + const handle = envelope.expose(); + expect(handle.table).toBe('audit'); + expect(handle.column).toBe('payload'); + expect(handle.sdk).toBe(sdk); + expect(handle.plaintext).toBeUndefined(); + }); +}); diff --git a/packages/prisma-next/test/envelope-string.test.ts b/packages/prisma-next/test/envelope-string.test.ts new file mode 100644 index 00000000..f41282e4 --- /dev/null +++ b/packages/prisma-next/test/envelope-string.test.ts @@ -0,0 +1,223 @@ +/** + * Behavioural tests for the `EncryptedString` envelope and the + * `CipherstashSdk` shape it talks to. + * + * The envelope does **not** zero its handle's plaintext slot + * post-encrypt. As a side effect a write-side envelope's `decrypt()` + * returns the original plaintext synchronously without consulting the + * SDK; the bulk-encrypt middleware builds on this property. + */ + +import { inspect } from 'node:util'; +import { describe, expect, it, vi } from 'vitest'; +import { EncryptedString, setHandleRoutingKey } from '../src/execution/envelope-string'; +import type { CipherstashSdk } from '../src/execution/sdk'; + +describe('EncryptedString.from(plaintext)', () => { + it('returns an EncryptedString instance', () => { + const envelope = EncryptedString.from('alice@example.com'); + expect(envelope).toBeInstanceOf(EncryptedString); + }); + + it('decrypt() resolves to the original plaintext on the write-side handle', async () => { + const envelope = EncryptedString.from('alice@example.com'); + await expect(envelope.decrypt()).resolves.toBe('alice@example.com'); + }); + + it('decrypt() does not consult an SDK on the write-side handle', async () => { + // Write-side envelopes built via `from(plaintext)` carry no SDK + // reference: `decrypt()` resolves directly from the cached + // plaintext slot without dispatching to any external service. + const envelope = EncryptedString.from('hello'); + await expect(envelope.decrypt()).resolves.toBe('hello'); + }); +}); + +describe('EncryptedString.fromInternal(...) — read-side', () => { + it('decrypt({signal}) calls the SDK single-cell decrypt and returns plaintext', async () => { + const ciphertext = { c: 'cipher', i: { t: 'user', c: 'email' } }; + const decryptMock = vi.fn().mockResolvedValue('alice@example.com'); + const sdk: CipherstashSdk = { + decrypt: decryptMock, + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; + + const envelope = EncryptedString.fromInternal({ + ciphertext, + table: 'user', + column: 'email', + sdk, + }); + + const ac = new AbortController(); + const result = await envelope.decrypt({ signal: ac.signal }); + + expect(result).toBe('alice@example.com'); + expect(decryptMock).toHaveBeenCalledTimes(1); + expect(decryptMock.mock.calls[0]?.[0]).toMatchObject({ + ciphertext, + table: 'user', + column: 'email', + signal: ac.signal, + }); + }); + + it('forwards the caller-provided AbortSignal to the SDK by identity', async () => { + const decryptMock = vi.fn().mockResolvedValue('plain'); + const sdk: CipherstashSdk = { + decrypt: decryptMock, + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; + const envelope = EncryptedString.fromInternal({ + ciphertext: 'wire', + table: 't', + column: 'c', + sdk, + }); + const ac = new AbortController(); + await envelope.decrypt({ signal: ac.signal }); + const callArg = decryptMock.mock.calls[0]?.[0] as { signal?: AbortSignal }; + expect(callArg.signal).toBe(ac.signal); + }); + + it('omits signal in the SDK call when none is provided', async () => { + const decryptMock = vi.fn().mockResolvedValue('plain'); + const sdk: CipherstashSdk = { + decrypt: decryptMock, + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; + const envelope = EncryptedString.fromInternal({ + ciphertext: 'wire', + table: 't', + column: 'c', + sdk, + }); + await envelope.decrypt(); + const callArg = decryptMock.mock.calls[0]?.[0] as { signal?: AbortSignal }; + expect(Object.hasOwn(callArg, 'signal')).toBe(false); + }); + + it('caches the decrypted plaintext for subsequent calls', async () => { + const decryptMock = vi.fn().mockResolvedValue('plain'); + const sdk: CipherstashSdk = { + decrypt: decryptMock, + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; + const envelope = EncryptedString.fromInternal({ + ciphertext: 'wire', + table: 't', + column: 'c', + sdk, + }); + await envelope.decrypt(); + await envelope.decrypt(); + expect(decryptMock).toHaveBeenCalledTimes(1); + }); +}); + +describe('EncryptedString — accidental-exposure overrides (Rust `secrecy` style)', () => { + // The handle stays reachable on purpose: `expose()` is the explicit + // opt-in. What these tests pin is that *every common implicit* + // exposure path — JSON, console, stringification, primitive coercion + // — refuses to leak the plaintext. If a future refactor drops one of + // these overrides, the regression surfaces here. + + it('exposes no own enumerable property', () => { + const envelope = EncryptedString.from('secret'); + expect(Object.keys(envelope)).toEqual([]); + }); + + it('expose() is the explicit access path — returns the wrapped handle', () => { + const envelope = EncryptedString.from('top-secret'); + const handle = envelope.expose(); + expect(handle.plaintext).toBe('top-secret'); + }); + + it('JSON.stringify cannot leak plaintext', () => { + const envelope = EncryptedString.from('top-secret'); + const json = JSON.stringify({ email: envelope }); + expect(json).not.toContain('top-secret'); + }); + + it('JSON.stringify renders the per-type placeholder marker shape', () => { + const envelope = EncryptedString.from('top-secret'); + expect(JSON.parse(JSON.stringify(envelope))).toEqual({ $encryptedString: '<opaque>' }); + }); + + it('toJSON returns the placeholder object directly', () => { + const envelope = EncryptedString.from('top-secret'); + expect(envelope.toJSON()).toEqual({ $encryptedString: '<opaque>' }); + }); + + it('String(envelope) and toString() cannot leak plaintext', () => { + const envelope = EncryptedString.from('top-secret'); + expect(String(envelope)).not.toContain('top-secret'); + expect(envelope.toString()).not.toContain('top-secret'); + }); + + it('template-literal coercion (Symbol.toPrimitive) cannot leak plaintext', () => { + const envelope = EncryptedString.from('top-secret'); + const interpolated = `email is ${envelope}`; + expect(interpolated).not.toContain('top-secret'); + }); + + it('valueOf() cannot leak plaintext', () => { + const envelope = EncryptedString.from('top-secret'); + expect(String(envelope.valueOf())).not.toContain('top-secret'); + }); + + it('util.inspect (and therefore console.log) cannot leak plaintext', () => { + const envelope = EncryptedString.from('top-secret'); + const inspected = inspect(envelope, { + depth: Number.POSITIVE_INFINITY, + getters: true, + showHidden: true, + }); + expect(inspected).not.toContain('top-secret'); + }); + + it('inspecting an object that contains an envelope does not leak plaintext', () => { + const envelope = EncryptedString.from('top-secret'); + const inspected = inspect( + { user: { id: 'u1', email: envelope } }, + { depth: Number.POSITIVE_INFINITY }, + ); + expect(inspected).not.toContain('top-secret'); + }); +}); + +describe('setHandleRoutingKey', () => { + it('stamps table/column on a fresh envelope', () => { + const envelope = EncryptedString.from('a@b.com'); + setHandleRoutingKey(envelope, 'users', 'email'); + const handle = envelope.expose(); + expect(handle.table).toBe('users'); + expect(handle.column).toBe('email'); + }); + + it('re-stamping the same routing key is a no-op', () => { + const envelope = EncryptedString.from('a@b.com'); + setHandleRoutingKey(envelope, 'users', 'email'); + expect(() => setHandleRoutingKey(envelope, 'users', 'email')).not.toThrow(); + }); + + it('rejects conflicting table reassignment', () => { + const envelope = EncryptedString.from('a@b.com'); + setHandleRoutingKey(envelope, 'users', 'email'); + expect(() => setHandleRoutingKey(envelope, 'accounts', 'email')).toThrow( + /routing-key table conflict/, + ); + }); + + it('rejects conflicting column reassignment', () => { + const envelope = EncryptedString.from('a@b.com'); + setHandleRoutingKey(envelope, 'users', 'email'); + expect(() => setHandleRoutingKey(envelope, 'users', 'username')).toThrow( + /routing-key column conflict/, + ); + }); +}); diff --git a/packages/prisma-next/test/envelope.types.test-d.ts b/packages/prisma-next/test/envelope.types.test-d.ts new file mode 100644 index 00000000..236d3e32 --- /dev/null +++ b/packages/prisma-next/test/envelope.types.test-d.ts @@ -0,0 +1,42 @@ +/** + * Type-shape tests for the `EncryptedString` envelope's public surface. + * + * The envelope follows the Rust `secrecy` pattern: the wrapped handle is + * reachable via the explicit `expose()` method (and `EncryptedStringHandle` + * is part of the public surface), but no *direct* property accessor — + * `envelope.plaintext`, `envelope.ciphertext`, `envelope.handle` — exists, + * so the only way to reach the handle is to ask for it by name. + * + * `@ts-expect-error` is permitted in negative type tests per + * `AGENTS.md § Typesafety rules`. + */ + +import type { EncryptedEnvelopePlaceholder } from '../src/execution/envelope-base'; +import { EncryptedString, type EncryptedStringHandle } from '../src/exports/runtime'; + +const envelope = EncryptedString.from('alice@example.com'); + +// -- Negative: no direct property accessors (forces explicit expose()) --- + +// @ts-expect-error — direct `.handle` accessor is not part of the public surface. +envelope.handle; +// @ts-expect-error — direct `.plaintext` accessor is not part of the public surface. +envelope.plaintext; +// @ts-expect-error — direct `.ciphertext` accessor is not part of the public surface. +envelope.ciphertext; + +// -- Positive: explicit access via expose() returns the handle type ----- + +const _expose: () => EncryptedStringHandle = envelope.expose.bind(envelope); + +const _decrypt: (opts?: { signal?: AbortSignal }) => Promise<string> = + envelope.decrypt.bind(envelope); +// `toJSON` returns the per-type placeholder object (see envelope-base +// for the rationale). Pinning the shape here catches a regression +// that would re-flatten it back to a bare string and lose the +// machine-readable marker. +const _toJson: () => EncryptedEnvelopePlaceholder = envelope.toJSON.bind(envelope); + +void _expose; +void _decrypt; +void _toJson; diff --git a/packages/prisma-next/test/equality-trait-removal.test.ts b/packages/prisma-next/test/equality-trait-removal.test.ts new file mode 100644 index 00000000..e18e91bd --- /dev/null +++ b/packages/prisma-next/test/equality-trait-removal.test.ts @@ -0,0 +1,121 @@ +/** + * Regression test. + * + * The cipherstash storage codec must NOT advertise the framework's + * `equality` trait. Re-adding it without re-routing through the + * cipherstash-namespaced operator surface (`cipherstashEq` / + * `cipherstashIlike` in `src/execution/operators.ts`) silently re-introduces + * a wrong-SQL footgun on cipherstash columns: + * + * - `COMPARISON_METHODS_META.eq` (in `packages/3-extensions/sql-orm-client/ + * src/types.ts`) gates the framework`s built-in `eq` on the column + * codec`s `equality` trait. The built-in lowers to standard SQL `=` + * via `BinaryExpr eq`. + * - EQL ciphertexts contain randomized nonces, so two encrypts of the + * same plaintext do not byte-equal under SQL `=`. A built-in + * `email.eq('alice@example.com')` on a cipherstash column would + * therefore produce `"email" = $1::eql_v2_encrypted` and silently + * return zero matches at runtime. + * - The supported equality-search call is `email.cipherstashEq(value)`, + * which lowers to `eql_v2.eq(...)` (snapshot-pinned in + * `operator-lowering.test.ts`). + * + * The user-facing `EncryptedString({ equality: true })` flag in PSL/TS + * authoring is a SEPARATE concept from this codec trait — that flag + * controls whether the codec lifecycle hook emits an `add_search_config` + * op for the column`s `unique` index at migration time. The two + * `equality` concepts share only their name. + * + * Recorded here so a future change that flips the trait declaration + * without re-routing the dispatch trips this test loudly rather than + * re-opening the footgun. + */ + +import { describe, expect, it, vi } from 'vitest'; +import { createCipherstashStringCodec } from '../src/execution/codec-runtime'; +import { createParameterizedCodecDescriptors } from '../src/execution/parameterized'; +import type { CipherstashSdk } from '../src/execution/sdk'; +import { cipherstashStringCodecMetadata } from '../src/extension-metadata/codec-metadata'; + +function emptySdk(): CipherstashSdk { + return { + decrypt: vi.fn(), + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; +} + +describe('cipherstash codec: no `equality` trait', () => { + it('runtime codec never advertises the framework `equality` trait', () => { + const codec = createCipherstashStringCodec(emptySdk()); + const traits: ReadonlyArray<string> = codec.descriptor.traits ?? []; + expect(traits).not.toContain('equality'); + // Cipherstash-namespaced traits (load-bearing for the multi-codec + // operator dispatch) ARE expected — they're isolated from + // framework built-ins by the `cipherstash:` prefix. + expect(traits.some((t) => t.startsWith('cipherstash:'))).toBe(true); + }); + + it('parameterized codec descriptors (the ones the runtime consumes for dispatch) never advertise `equality`', () => { + const descriptors = createParameterizedCodecDescriptors(emptySdk()); + expect(descriptors.length).toBeGreaterThan(0); + for (const descriptor of descriptors) { + const traits: ReadonlyArray<string> = descriptor.traits ?? []; + expect(traits).not.toContain('equality'); + expect(traits.some((t) => t.startsWith('cipherstash:'))).toBe(true); + } + }); + + it('SDK-free pack-meta codec metadata never advertises `equality`', () => { + const traits: ReadonlyArray<string> = cipherstashStringCodecMetadata.descriptor.traits ?? []; + expect(traits).not.toContain('equality'); + expect(traits.some((t) => t.startsWith('cipherstash:'))).toBe(true); + }); + + it('the three trait declarations agree (runtime / parameterized / pack-meta) for the string codec', () => { + // If these three diverge, contract emit (which reads pack-meta) and + // the runtime (which reads the parameterized descriptor) will + // disagree about which built-in operations are reachable on + // cipherstash columns. They must always be identical. + const runtime = createCipherstashStringCodec(emptySdk()).descriptor.traits ?? []; + const parameterized = + createParameterizedCodecDescriptors(emptySdk()).find( + (d) => d.codecId === 'cipherstash/string@1', + )?.traits ?? []; + const packMeta = cipherstashStringCodecMetadata.descriptor.traits ?? []; + expect([...runtime].sort()).toEqual([...parameterized].sort()); + expect([...runtime].sort()).toEqual([...packMeta].sort()); + }); +}); + +describe('cipherstash columns: framework built-in `eq` is not reachable', () => { + it('documents the gating contract — built-in `eq` requires `equality` in column traits', () => { + // This test pins the contract that `cipherstash/string@1` columns + // intentionally lack the `equality` trait, so the per-column + // accessor synthesis in `createScalarFieldAccessor` (sql-orm-client) + // skips `COMPARISON_METHODS_META.eq` (it`s gated on `equality`). + // The accessor surface for a cipherstash column therefore has no + // `eq` / `neq` / `in` / `notIn` / `like` / `ilike` keys and only + // exposes the cipherstash-namespaced operators + // (`cipherstashEq` / `cipherstashIlike`) plus the always-on null + // checks (`isNull` / `isNotNull`). + // + // The end-to-end behavior — `(model accessor for cipherstash column).eq` + // is `undefined` — is exercised at the `sql-orm-client` layer + // (`packages/3-extensions/sql-orm-client/test/model-accessor.test.ts` + // already pins gating behavior for non-textual codecs via the + // `does not expose ilike on non-textual fields` test). Cipherstash + // does not depend on `sql-orm-client`, so this test asserts the + // *cause* (empty trait list) rather than the *effect* (undefined + // accessor key); a sibling `does not expose eq on cipherstash + // columns` test belongs in `sql-orm-client/test/model-accessor.test.ts` + // when that fixture grows a cipherstash codec entry. + // Widen via `ReadonlyArray<string>` so `includes('equality')` is + // well-typed even when TS narrows the codec`s `traits` to + // `readonly never[]` (which is itself a strong static signal that + // the trait can`t be present). + const traits: ReadonlyArray<string> = + createCipherstashStringCodec(emptySdk()).descriptor.traits ?? []; + expect(traits.includes('equality')).toBe(false); + }); +}); diff --git a/packages/prisma-next/test/from-stack-divergence.test.ts b/packages/prisma-next/test/from-stack-divergence.test.ts new file mode 100644 index 00000000..69a155da --- /dev/null +++ b/packages/prisma-next/test/from-stack-divergence.test.ts @@ -0,0 +1,112 @@ +/** + * Pin the divergence-check semantics of `cipherstashFromStack`. + * + * The full `cipherstashFromStack` path is not exercisable in unit + * tests because it calls `Encryption({ schemas })` which talks to + * ZeroKMS at module-evaluation time. We instead pull out the + * divergence check by calling `cipherstashFromStack` with an + * intentionally-broken override; the assertion fires before any + * SDK round-trip is attempted, so the test stays hermetic. + * + * The happy end-to-end path is covered by the example app's live + * `pnpm start` flow. + */ + +import { encryptedColumn, encryptedTable } from '@cipherstash/stack/schema' +import { describe, expect, it } from 'vitest' + +import { + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_STRING_CODEC_ID, +} from '../src/extension-metadata/constants' +import { cipherstashFromStack } from '../src/stack/from-stack' + +function makeContract() { + return { + storage: { + tables: { + users: { + columns: { + email: { + codecId: CIPHERSTASH_STRING_CODEC_ID, + typeParams: { equality: true, freeTextSearch: true }, + }, + verified: { + codecId: CIPHERSTASH_BOOLEAN_CODEC_ID, + typeParams: { equality: true }, + }, + }, + }, + }, + }, + } +} + +describe('cipherstashFromStack — divergence check', () => { + it('throws when an override drops a column the contract declares', async () => { + const override = encryptedTable('users', { + email: encryptedColumn('email').equality().freeTextSearch(), + // `verified` dropped from override + }) + + await expect( + cipherstashFromStack({ + contractJson: makeContract(), + schemas: [override], + }), + ).rejects.toThrow(/schema divergence on table "users"/) + }) + + it('throws when an override adds a column the contract does not declare', async () => { + const override = encryptedTable('users', { + email: encryptedColumn('email').equality().freeTextSearch(), + verified: encryptedColumn('verified').dataType('boolean').equality(), + phantom: encryptedColumn('phantom').equality(), + }) + + await expect( + cipherstashFromStack({ + contractJson: makeContract(), + schemas: [override], + }), + ).rejects.toThrow(/schema divergence on table "users"/) + }) + + it('throws when an override changes a column\'s cast_as', async () => { + const override = encryptedTable('users', { + email: encryptedColumn('email').dataType('number').equality(), + verified: encryptedColumn('verified').dataType('boolean').equality(), + }) + + await expect( + cipherstashFromStack({ + contractJson: makeContract(), + schemas: [override], + }), + ).rejects.toThrow( + /schema divergence on column "users"\."email".*cast_as="string".*cast_as="number"/s, + ) + }) + + it('throws when an override changes a column\'s installed index set', async () => { + const override = encryptedTable('users', { + // dropped `.freeTextSearch()` — contract declared it + email: encryptedColumn('email').equality(), + verified: encryptedColumn('verified').dataType('boolean').equality(), + }) + + await expect( + cipherstashFromStack({ + contractJson: makeContract(), + schemas: [override], + }), + ).rejects.toThrow(/schema divergence on column "users"\."email".*indexes/s) + }) + + it('throws when the contract has no cipherstash columns and no override is supplied', async () => { + const emptyContract = { storage: { tables: { users: { columns: {} } } } } + await expect(cipherstashFromStack({ contractJson: emptyContract })).rejects.toThrow( + /no cipherstash columns found/, + ) + }) +}) diff --git a/packages/prisma-next/test/helpers.test.ts b/packages/prisma-next/test/helpers.test.ts new file mode 100644 index 00000000..c9d52aec --- /dev/null +++ b/packages/prisma-next/test/helpers.test.ts @@ -0,0 +1,317 @@ +/** + * Free-standing helper tests — sort + JSON SELECT-expression + * helpers. + * + * These are not registered operators; they're pure functions imported + * from the runtime entry. The tests here pin: + * + * - **AST shape** — `cipherstashAsc(col)` produces an + * `OrderByItem` with `dir: 'asc'` wrapping the column's AST; + * `cipherstashDesc` mirrors with `dir: 'desc'`. + * - **SQL snapshot** — the lowered SELECT shape with the helper + * in `ORDER BY` (sort) or in the projection list (JSON helpers) + * pins the user-visible SQL the live-Postgres e2e harness + * executes against the EQL bundle. + * - **Error path** — each helper rejects a non-cipherstash column + * (or, for the JSON helpers, a cipherstash-but-non-JSON column) + * with a `TypeError` naming the helper and the accepted codec + * ids. + * + * Type-level tests are inline in `helpers.types.test-d.ts`; the + * helpers are typed at their declaration site (no + * `QueryOperationTypes` entry). + */ + +import postgresRuntimeAdapter from '@prisma-next/adapter-postgres/runtime'; +import type { PostgresContract } from '@prisma-next/adapter-postgres/types'; +import { emptyCodecLookup } from '@prisma-next/framework-components/codec'; +import type { + RuntimeExtensionDescriptor, + RuntimeTargetDescriptor, +} from '@prisma-next/framework-components/execution'; +import { validateContract } from '@prisma-next/sql-contract/validate'; +import { + type AnyExpression, + ColumnRef, + OrderByItem, + ProjectionItem, + SelectAst, + TableSource, +} from '@prisma-next/sql-relational-core/ast'; +import type { Expression, ScopeField } from '@prisma-next/sql-relational-core/expression'; +import { describe, expect, it, vi } from 'vitest'; +import { + cipherstashAsc, + cipherstashDesc, + cipherstashJsonbGet, + cipherstashJsonbPathQueryFirst, +} from '../src/execution/helpers'; +import type { CipherstashSdk } from '../src/execution/sdk'; +import { createCipherstashRuntimeDescriptor } from '../src/exports/runtime'; +import { + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, + CIPHERSTASH_STRING_CODEC_ID, + EQL_V2_ENCRYPTED_TYPE, +} from '../src/extension-metadata/constants'; + +function emptySdk(): CipherstashSdk { + return { + decrypt: vi.fn(), + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; +} + +const TABLE = 'user'; + +const contract = validateContract<PostgresContract>( + { + target: 'postgres', + targetFamily: 'sql', + profileHash: 'sha256:cipherstash-helpers-test', + roots: {}, + capabilities: {}, + extensionPacks: {}, + meta: {}, + storage: { + storageHash: 'sha256:cipherstash-helpers-test-storage', + tables: { + [TABLE]: { + columns: { + id: { codecId: 'pg/text@1', nativeType: 'text', nullable: false }, + email: { + codecId: CIPHERSTASH_STRING_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + nullable: true, + }, + score: { + codecId: CIPHERSTASH_DOUBLE_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + nullable: true, + }, + amount: { + codecId: CIPHERSTASH_BIGINT_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + nullable: true, + }, + birthday: { + codecId: CIPHERSTASH_DATE_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + nullable: true, + }, + enabled: { + codecId: CIPHERSTASH_BOOLEAN_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + nullable: true, + }, + payload: { + codecId: CIPHERSTASH_JSON_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + nullable: true, + }, + plain: { codecId: 'pg/text@1', nativeType: 'text', nullable: false }, + }, + uniques: [], + indexes: [], + foreignKeys: [], + }, + }, + }, + models: {}, + }, + emptyCodecLookup, +); + +const stubRuntimeTarget: RuntimeTargetDescriptor<'sql', 'postgres'> = { + kind: 'target', + id: 'postgres', + version: '0.0.1', + familyId: 'sql', + targetId: 'postgres', + create() { + return { familyId: 'sql', targetId: 'postgres' }; + }, +}; + +function makeAdapter() { + const cipherstash: RuntimeExtensionDescriptor<'sql', 'postgres'> = + createCipherstashRuntimeDescriptor({ sdk: emptySdk() }); + return postgresRuntimeAdapter.create({ + target: stubRuntimeTarget, + adapter: postgresRuntimeAdapter, + driver: undefined, + extensionPacks: [cipherstash], + }); +} + +function columnAccessor(table: string, column: string, codecId: string): Expression<ScopeField> { + const ref = ColumnRef.of(table, column); + return { + returnType: { codecId, nullable: true }, + buildAst: () => ref, + }; +} + +function selectWithOrderBy(items: readonly OrderByItem[]) { + return SelectAst.from(TableSource.named(TABLE)) + .withProjection([ProjectionItem.of('id', ColumnRef.of(TABLE, 'id'))]) + .withOrderBy(items); +} + +function selectWithProjection(name: string, expr: AnyExpression) { + return SelectAst.from(TableSource.named(TABLE)).withProjection([ProjectionItem.of(name, expr)]); +} + +describe('cipherstashAsc / cipherstashDesc — AST shape', () => { + it('cipherstashAsc returns an OrderByItem with dir asc wrapping the column buildAst', () => { + const col = columnAccessor(TABLE, 'email', CIPHERSTASH_STRING_CODEC_ID); + const item = cipherstashAsc(col); + expect(item).toBeInstanceOf(OrderByItem); + expect(item).toMatchObject({ dir: 'asc', expr: col.buildAst() }); + }); + + it('cipherstashDesc returns an OrderByItem with dir desc wrapping the column buildAst', () => { + const col = columnAccessor(TABLE, 'score', CIPHERSTASH_DOUBLE_CODEC_ID); + const item = cipherstashDesc(col); + expect(item).toBeInstanceOf(OrderByItem); + expect(item).toMatchObject({ dir: 'desc', expr: col.buildAst() }); + }); +}); + +describe('cipherstashAsc / cipherstashDesc — SQL snapshot', () => { + it('lowers ORDER BY cipherstashAsc(email) to a bare-column ASC clause', () => { + const col = columnAccessor(TABLE, 'email', CIPHERSTASH_STRING_CODEC_ID); + const ast = selectWithOrderBy([cipherstashAsc(col)]); + const lowered = makeAdapter().lower(ast, { contract }); + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT "user"."id" AS "id" FROM "user" ORDER BY "user"."email" ASC"`, + ); + expect(lowered.params).toHaveLength(0); + }); + + it('lowers ORDER BY cipherstashDesc(birthday) to a bare-column DESC clause', () => { + const col = columnAccessor(TABLE, 'birthday', CIPHERSTASH_DATE_CODEC_ID); + const ast = selectWithOrderBy([cipherstashDesc(col)]); + const lowered = makeAdapter().lower(ast, { contract }); + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT "user"."id" AS "id" FROM "user" ORDER BY "user"."birthday" DESC"`, + ); + }); + + it('lowers a multi-key ORDER BY with mixed directions', () => { + const score = columnAccessor(TABLE, 'score', CIPHERSTASH_DOUBLE_CODEC_ID); + const amount = columnAccessor(TABLE, 'amount', CIPHERSTASH_BIGINT_CODEC_ID); + const ast = selectWithOrderBy([cipherstashDesc(score), cipherstashAsc(amount)]); + const lowered = makeAdapter().lower(ast, { contract }); + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT "user"."id" AS "id" FROM "user" ORDER BY "user"."score" DESC, "user"."amount" ASC"`, + ); + }); +}); + +describe('cipherstashAsc / cipherstashDesc — error paths', () => { + it('cipherstashAsc rejects a non-cipherstash column', () => { + const col = columnAccessor(TABLE, 'plain', 'pg/text@1'); + expect(() => cipherstashAsc(col)).toThrowError( + /cipherstashAsc.*pg\/text@1.*one of.*cipherstash\/string@1.*cipherstash\/double@1.*cipherstash\/bigint@1.*cipherstash\/date@1/s, + ); + }); + + it('cipherstashAsc rejects a cipherstash boolean column (not in order-and-range set)', () => { + const col = columnAccessor(TABLE, 'enabled', CIPHERSTASH_BOOLEAN_CODEC_ID); + expect(() => cipherstashAsc(col)).toThrowError( + /cipherstashAsc.*cipherstash\/boolean@1.*does not support order-and-range/, + ); + }); + + it('cipherstashAsc rejects a cipherstash json column (not in order-and-range set)', () => { + const col = columnAccessor(TABLE, 'payload', CIPHERSTASH_JSON_CODEC_ID); + expect(() => cipherstashAsc(col)).toThrowError(/cipherstashAsc.*cipherstash\/json@1/); + }); + + it('cipherstashDesc rejects a non-cipherstash column with the same diagnostic shape', () => { + const col = columnAccessor(TABLE, 'plain', 'pg/text@1'); + expect(() => cipherstashDesc(col)).toThrowError(/cipherstashDesc.*pg\/text@1/); + }); +}); + +describe('cipherstashJsonbPathQueryFirst — AST shape and SQL snapshot', () => { + it('returns an Expression whose returnType is cipherstash/json@1', () => { + const col = columnAccessor(TABLE, 'payload', CIPHERSTASH_JSON_CODEC_ID); + const expr = cipherstashJsonbPathQueryFirst(col, '$.user.email'); + expect(expr.returnType).toEqual({ codecId: CIPHERSTASH_JSON_CODEC_ID, nullable: false }); + const ast = expr.buildAst(); + expect(ast.kind).toBe('operation'); + }); + + it('lowers to eql_v2.jsonb_path_query_first("payload", $1) with the path bound as pg/text@1', () => { + const col = columnAccessor(TABLE, 'payload', CIPHERSTASH_JSON_CODEC_ID); + const expr = cipherstashJsonbPathQueryFirst(col, '$.user.email'); + const ast = selectWithProjection('first_email', expr.buildAst()); + const lowered = makeAdapter().lower(ast, { contract }); + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT eql_v2.jsonb_path_query_first("user"."payload", $1) AS "first_email" FROM "user""`, + ); + expect(lowered.params).toEqual(['$.user.email']); + }); +}); + +describe('cipherstashJsonbGet — AST shape and SQL snapshot', () => { + it('returns an Expression whose returnType is cipherstash/json@1', () => { + const col = columnAccessor(TABLE, 'payload', CIPHERSTASH_JSON_CODEC_ID); + const expr = cipherstashJsonbGet(col, 'email'); + expect(expr.returnType).toEqual({ codecId: CIPHERSTASH_JSON_CODEC_ID, nullable: false }); + }); + + it('lowers to eql_v2."->"("payload", $1) with the key bound as pg/text@1', () => { + const col = columnAccessor(TABLE, 'payload', CIPHERSTASH_JSON_CODEC_ID); + const expr = cipherstashJsonbGet(col, 'email'); + const ast = selectWithProjection('email_field', expr.buildAst()); + const lowered = makeAdapter().lower(ast, { contract }); + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT eql_v2."->"("user"."payload", $1) AS "email_field" FROM "user""`, + ); + expect(lowered.params).toEqual(['email']); + }); +}); + +describe('cipherstashJsonbPathQueryFirst / cipherstashJsonbGet — error paths', () => { + it('cipherstashJsonbPathQueryFirst rejects a non-cipherstash column', () => { + const col = columnAccessor(TABLE, 'plain', 'pg/text@1'); + expect(() => cipherstashJsonbPathQueryFirst(col, '$.foo')).toThrowError( + /cipherstashJsonbPathQueryFirst.*pg\/text@1.*cipherstash\/json@1/, + ); + }); + + it('cipherstashJsonbPathQueryFirst rejects a cipherstash-but-non-json column', () => { + const col = columnAccessor(TABLE, 'email', CIPHERSTASH_STRING_CODEC_ID); + expect(() => cipherstashJsonbPathQueryFirst(col, '$.foo')).toThrowError( + /cipherstashJsonbPathQueryFirst.*cipherstash\/string@1.*cipherstash\/json@1/, + ); + }); + + it('cipherstashJsonbPathQueryFirst rejects a non-string path', () => { + const col = columnAccessor(TABLE, 'payload', CIPHERSTASH_JSON_CODEC_ID); + expect(() => cipherstashJsonbPathQueryFirst(col, 42 as unknown as string)).toThrowError( + /cipherstashJsonbPathQueryFirst.*string path.*number/, + ); + }); + + it('cipherstashJsonbGet rejects a non-json cipherstash column with a json-specific diagnostic', () => { + const col = columnAccessor(TABLE, 'score', CIPHERSTASH_DOUBLE_CODEC_ID); + expect(() => cipherstashJsonbGet(col, 'foo')).toThrowError( + /cipherstashJsonbGet.*cipherstash\/double@1.*cipherstash\/json@1/, + ); + }); + + it('cipherstashJsonbGet rejects a non-string path', () => { + const col = columnAccessor(TABLE, 'payload', CIPHERSTASH_JSON_CODEC_ID); + expect(() => cipherstashJsonbGet(col, null as unknown as string)).toThrowError( + /cipherstashJsonbGet.*string path.*null/, + ); + }); +}); diff --git a/packages/prisma-next/test/helpers.types.test-d.ts b/packages/prisma-next/test/helpers.types.test-d.ts new file mode 100644 index 00000000..be218b44 --- /dev/null +++ b/packages/prisma-next/test/helpers.types.test-d.ts @@ -0,0 +1,70 @@ +/** + * Type-level tests for the free-standing helpers. + * + * The helpers are typed at their declaration site (no + * `QueryOperationTypes` entry). These assertions pin: + * + * - sort helpers return `OrderByItem` + * - JSON SELECT-expression helpers return + * `Expression<{ codecId: 'cipherstash/json@1'; nullable: false }>` + * + * Argument validation (cipherstash codec id required at runtime) is + * deliberately not type-enforced — the helpers accept + * `Expression<ScopeField>` so the column-bound expression types from + * the model accessor flow through without round-tripping through the + * codec-types augmentation. Runtime guard tests live in + * `helpers.test.ts`. + */ + +import type { OrderByItem } from '@prisma-next/sql-relational-core/ast'; +import type { Expression, ScopeField } from '@prisma-next/sql-relational-core/expression'; +import { expectTypeOf } from 'vitest'; +import { + cipherstashAsc, + cipherstashDesc, + cipherstashJsonbGet, + cipherstashJsonbPathQueryFirst, +} from '../src/execution/helpers'; + +declare const anyCol: Expression<ScopeField>; + +expectTypeOf(cipherstashAsc(anyCol)).toEqualTypeOf<OrderByItem>(); +expectTypeOf(cipherstashDesc(anyCol)).toEqualTypeOf<OrderByItem>(); + +type JsonReturn = Expression<{ codecId: 'cipherstash/json@1'; nullable: false }>; + +// Bidirectional assignability check. `JsonReturn` is the +// `Expression<{codecId: 'cipherstash/json@1', nullable: false}>` type +// the JSON helpers commit to producing; the helpers must produce +// something assignable to that slot, and a `JsonReturn` value must +// be assignable back to the helper-return type. Direct `toEqualTypeOf<JsonReturn>` fails +// strict equality because `Expression<R>` is an intersection of +// `QueryOperationReturn` and the narrowed `{returnType: R}` shape; +// the intersection's `returnType` field carries both the broad +// `{codecId: string; nullable: boolean}` quotient and the narrow +// literal at once, which expectTypeOf's strict comparator does not +// collapse. Bidirectional assignability is the exact assertion that +// the helper output is interchangeable with the typed slot — the +// stronger `toEqualTypeOf` shape would not catch any additional +// drift in practice. +declare const expectedJson: JsonReturn; +const pathQuery = cipherstashJsonbPathQueryFirst(anyCol, '$.foo'); +const pathGet = cipherstashJsonbGet(anyCol, 'foo'); +const _assignA: JsonReturn = pathQuery; +const _assignB: JsonReturn = pathGet; +const _assignC: typeof pathQuery = expectedJson; +const _assignD: typeof pathGet = expectedJson; +void _assignA; +void _assignB; +void _assignC; +void _assignD; + +// The path must be a string (compile-time error on number / null / +// undefined). `@ts-expect-error` directives keep the negative +// assertion structurally — if the helper signature accidentally +// widens its `path` parameter, the directive becomes a noop and the +// test fails. +// @ts-expect-error path is required to be a string +cipherstashJsonbPathQueryFirst(anyCol, 42); +// @ts-expect-error path is required to be a string +cipherstashJsonbGet(anyCol, null); diff --git a/packages/prisma-next/test/operation-types.types.test-d.ts b/packages/prisma-next/test/operation-types.types.test-d.ts new file mode 100644 index 00000000..33c2cd8e --- /dev/null +++ b/packages/prisma-next/test/operation-types.types.test-d.ts @@ -0,0 +1,253 @@ +/** + * Type-level acceptance tests for `QueryOperationTypes` in + * `src/types/operation-types.ts`. + * + * Mirrors the framework's `OpMatchesField` predicate (defined in + * `packages/3-extensions/sql-orm-client/src/types.ts`) inline so the + * matching behaviour can be exercised against a synthetic + * `CodecTypes` table without pulling in the full ORM model accessor. + * + * The tests pin two surface contracts: + * + * 1. **Codec-id dispatch (positive/negative)** for the legacy and + * single-codec entries (`cipherstashEq`, `cipherstashIlike`, + * `cipherstashNotIlike`, `cipherstashJsonbPathExists`): + * the operator must surface on its target codec id and on no + * other. + * + * 2. **Trait dispatch (positive/negative)** for the multi-codec + * entries (`cipherstashNe`, `cipherstashInArray`, + * `cipherstashNotInArray`, `cipherstashGt`, `cipherstashGte`, + * `cipherstashLt`, `cipherstashLte`, `cipherstashBetween`, + * `cipherstashNotBetween`): the operator must surface on every + * cipherstash codec whose trait set carries the gating trait + * and on no codec without it (notably `pg/text@1`, which is the + * regression-pinning negative case for the wrong-SQL `eq` + * footgun). + * + * AGENTS.md permits `@ts-expect-error` exclusively in negative + * type-test files; this is one of them. + */ + +import type { QueryOperationTypes } from '../src/types/operation-types'; + +// -- Synthetic CodecTypes table ---------------------------------------------- +// +// Declares each cipherstash codec id under test with the traits its +// runtime descriptor advertises (per +// `extension-metadata/constants.ts:CIPHERSTASH_CODEC_TRAITS`). The +// trait identifiers use the `cipherstash:` namespace literally, +// matching the runtime widening of `descriptor.traits` to +// `readonly string[]` performed by the ORM model accessor. +// +// `pg/text@1` is included as the regression-pinning negative codec — +// it carries the framework's built-in `textual` / `equality` traits +// but none of the cipherstash-namespaced traits, so trait-dispatched +// cipherstash operators must NOT surface on it. + +type CSEq = 'cipherstash:equality'; +type CSOR = 'cipherstash:order-and-range'; +type CSFTS = 'cipherstash:free-text-search'; +type CSSJ = 'cipherstash:searchable-json'; + +type CT = { + readonly 'cipherstash/string@1': { + readonly input: string; + readonly output: string; + readonly traits: CSEq | CSOR | CSFTS; + }; + readonly 'cipherstash/double@1': { + readonly input: number; + readonly output: number; + readonly traits: CSEq | CSOR; + }; + readonly 'cipherstash/bigint@1': { + readonly input: bigint; + readonly output: bigint; + readonly traits: CSEq | CSOR; + }; + readonly 'cipherstash/date@1': { + readonly input: Date; + readonly output: Date; + readonly traits: CSEq | CSOR; + }; + readonly 'cipherstash/boolean@1': { + readonly input: boolean; + readonly output: boolean; + readonly traits: CSEq; + }; + readonly 'cipherstash/json@1': { + readonly input: unknown; + readonly output: unknown; + readonly traits: CSSJ; + }; + readonly 'pg/text@1': { + readonly input: string; + readonly output: string; + readonly traits: 'textual' | 'equality'; + }; + readonly 'pg/bool@1': { + readonly input: boolean; + readonly output: boolean; + readonly traits: 'boolean'; + }; +}; + +type Ops = QueryOperationTypes<CT>; + +// -- Inline `OpMatchesField` (mirrors the framework definition) -------------- + +type OpMatchesField<Op, C extends string, Cct extends Record<string, unknown>> = Op extends { + readonly self: infer Self; +} + ? Self extends { readonly codecId: C } + ? true + : Self extends { readonly traits: infer R extends readonly string[] } + ? C extends keyof Cct + ? Cct[C] extends { readonly traits: infer FT } + ? [R[number]] extends [FT] + ? true + : false + : false + : false + : false + : false; + +type Expect<T extends true> = T; +type M<N extends keyof Ops, C extends string> = OpMatchesField<Ops[N], C, CT>; + +// -- cipherstashEq (string only) -------------------------------------------- + +type _eq_string_pos = Expect<M<'cipherstashEq', 'cipherstash/string@1'>>; +// @ts-expect-error cipherstashEq must not surface on cipherstash/double@1. +type _eq_double_neg = Expect<M<'cipherstashEq', 'cipherstash/double@1'>>; +// @ts-expect-error cipherstashEq must not surface on pg/text@1. +type _eq_text_neg = Expect<M<'cipherstashEq', 'pg/text@1'>>; + +// -- cipherstashIlike (string only) ----------------------------------------- + +type _ilike_string_pos = Expect<M<'cipherstashIlike', 'cipherstash/string@1'>>; +// @ts-expect-error cipherstashIlike must not surface on cipherstash/double@1. +type _ilike_double_neg = Expect<M<'cipherstashIlike', 'cipherstash/double@1'>>; + +// -- cipherstashNotIlike (string only — single-codec dispatch) --------------- + +type _notilike_string_pos = Expect<M<'cipherstashNotIlike', 'cipherstash/string@1'>>; +// @ts-expect-error cipherstashNotIlike must not surface on cipherstash/double@1. +type _notilike_double_neg = Expect<M<'cipherstashNotIlike', 'cipherstash/double@1'>>; +// @ts-expect-error cipherstashNotIlike must not surface on pg/text@1. +type _notilike_text_neg = Expect<M<'cipherstashNotIlike', 'pg/text@1'>>; + +// -- cipherstashNe (equality trait — string/double/bigint/date/boolean) ------ + +type _ne_string_pos = Expect<M<'cipherstashNe', 'cipherstash/string@1'>>; +type _ne_double_pos = Expect<M<'cipherstashNe', 'cipherstash/double@1'>>; +type _ne_bigint_pos = Expect<M<'cipherstashNe', 'cipherstash/bigint@1'>>; +type _ne_date_pos = Expect<M<'cipherstashNe', 'cipherstash/date@1'>>; +type _ne_boolean_pos = Expect<M<'cipherstashNe', 'cipherstash/boolean@1'>>; +// @ts-expect-error cipherstashNe must not surface on cipherstash/json@1 (no equality trait). +type _ne_json_neg = Expect<M<'cipherstashNe', 'cipherstash/json@1'>>; +// @ts-expect-error regression: framework `equality` trait must not re-attach cipherstash ops on pg/text@1. +type _ne_text_neg = Expect<M<'cipherstashNe', 'pg/text@1'>>; + +// -- cipherstashInArray (equality trait) ------------------------------------ + +type _ina_string_pos = Expect<M<'cipherstashInArray', 'cipherstash/string@1'>>; +type _ina_boolean_pos = Expect<M<'cipherstashInArray', 'cipherstash/boolean@1'>>; +// @ts-expect-error cipherstashInArray must not surface on cipherstash/json@1. +type _ina_json_neg = Expect<M<'cipherstashInArray', 'cipherstash/json@1'>>; +// @ts-expect-error cipherstashInArray must not surface on pg/text@1. +type _ina_text_neg = Expect<M<'cipherstashInArray', 'pg/text@1'>>; + +// -- cipherstashNotInArray (equality trait) --------------------------------- + +type _nina_double_pos = Expect<M<'cipherstashNotInArray', 'cipherstash/double@1'>>; +// @ts-expect-error cipherstashNotInArray must not surface on cipherstash/json@1. +type _nina_json_neg = Expect<M<'cipherstashNotInArray', 'cipherstash/json@1'>>; + +// -- cipherstashGt (order-and-range trait — string/double/bigint/date) ------- + +type _gt_string_pos = Expect<M<'cipherstashGt', 'cipherstash/string@1'>>; +type _gt_double_pos = Expect<M<'cipherstashGt', 'cipherstash/double@1'>>; +type _gt_bigint_pos = Expect<M<'cipherstashGt', 'cipherstash/bigint@1'>>; +type _gt_date_pos = Expect<M<'cipherstashGt', 'cipherstash/date@1'>>; +// @ts-expect-error cipherstashGt must not surface on cipherstash/boolean@1 (no order-and-range trait). +type _gt_boolean_neg = Expect<M<'cipherstashGt', 'cipherstash/boolean@1'>>; +// @ts-expect-error cipherstashGt must not surface on cipherstash/json@1. +type _gt_json_neg = Expect<M<'cipherstashGt', 'cipherstash/json@1'>>; +// @ts-expect-error cipherstashGt must not surface on pg/text@1. +type _gt_text_neg = Expect<M<'cipherstashGt', 'pg/text@1'>>; + +// -- cipherstashGte / cipherstashLt / cipherstashLte (same trait set) ------- + +type _gte_double_pos = Expect<M<'cipherstashGte', 'cipherstash/double@1'>>; +type _lt_bigint_pos = Expect<M<'cipherstashLt', 'cipherstash/bigint@1'>>; +type _lte_date_pos = Expect<M<'cipherstashLte', 'cipherstash/date@1'>>; +// @ts-expect-error cipherstashGte must not surface on cipherstash/boolean@1. +type _gte_boolean_neg = Expect<M<'cipherstashGte', 'cipherstash/boolean@1'>>; +// @ts-expect-error cipherstashLt must not surface on cipherstash/json@1. +type _lt_json_neg = Expect<M<'cipherstashLt', 'cipherstash/json@1'>>; + +// -- cipherstashBetween / cipherstashNotBetween (order-and-range) ----------- + +type _between_string_pos = Expect<M<'cipherstashBetween', 'cipherstash/string@1'>>; +type _between_double_pos = Expect<M<'cipherstashBetween', 'cipherstash/double@1'>>; +type _notbetween_date_pos = Expect<M<'cipherstashNotBetween', 'cipherstash/date@1'>>; +// @ts-expect-error cipherstashBetween must not surface on cipherstash/boolean@1. +type _between_boolean_neg = Expect<M<'cipherstashBetween', 'cipherstash/boolean@1'>>; +// @ts-expect-error cipherstashNotBetween must not surface on pg/text@1. +type _notbetween_text_neg = Expect<M<'cipherstashNotBetween', 'pg/text@1'>>; + +// -- cipherstashJsonbPathExists (json only — single-codec dispatch) --------- + +type _jpe_json_pos = Expect<M<'cipherstashJsonbPathExists', 'cipherstash/json@1'>>; +// @ts-expect-error cipherstashJsonbPathExists must not surface on cipherstash/string@1. +type _jpe_string_neg = Expect<M<'cipherstashJsonbPathExists', 'cipherstash/string@1'>>; +// @ts-expect-error cipherstashJsonbPathExists must not surface on pg/text@1. +type _jpe_text_neg = Expect<M<'cipherstashJsonbPathExists', 'pg/text@1'>>; + +// -- Anchor unused type aliases so noUnusedLocals stays happy --------------- + +export type _Anchors = [ + _eq_string_pos, + _eq_double_neg, + _eq_text_neg, + _ilike_string_pos, + _ilike_double_neg, + _notilike_string_pos, + _notilike_double_neg, + _notilike_text_neg, + _ne_string_pos, + _ne_double_pos, + _ne_bigint_pos, + _ne_date_pos, + _ne_boolean_pos, + _ne_json_neg, + _ne_text_neg, + _ina_string_pos, + _ina_boolean_pos, + _ina_json_neg, + _ina_text_neg, + _nina_double_pos, + _nina_json_neg, + _gt_string_pos, + _gt_double_pos, + _gt_bigint_pos, + _gt_date_pos, + _gt_boolean_neg, + _gt_json_neg, + _gt_text_neg, + _gte_double_pos, + _lt_bigint_pos, + _lte_date_pos, + _gte_boolean_neg, + _lt_json_neg, + _between_string_pos, + _between_double_pos, + _notbetween_date_pos, + _between_boolean_neg, + _notbetween_text_neg, + _jpe_json_pos, + _jpe_string_neg, + _jpe_text_neg, +]; diff --git a/packages/prisma-next/test/operator-lowering-equality.test.ts b/packages/prisma-next/test/operator-lowering-equality.test.ts new file mode 100644 index 00000000..bd5d404a --- /dev/null +++ b/packages/prisma-next/test/operator-lowering-equality.test.ts @@ -0,0 +1,165 @@ +/** + * Operator lowering — equality-family operators on + * `cipherstash/string@1` columns: + * + * - `cipherstashEq` (single-codec registration on the string codec) + * - `cipherstashNe` / `cipherstashInArray` / `cipherstashNotInArray` + * (trait-dispatched via `cipherstash:equality`) + * + * The lowered SQL pins the `eql_v2.eq(...)` shape (positive form) and + * the `NOT eql_v2.eq(...)` / OR-of-equalities (variable-arity forms). + * Each bound param is an `EncryptedString` envelope tagged with the + * `(table, column)` routing key — the cipherstash bulk-encrypt + * middleware identifies envelopes via `instanceof` and groups them by + * routing key at the encode-params boundary. + * + * Shared adapter / contract / operator-invocation scaffolding lives in + * `operator-lowering.helpers.ts`. + */ + +import { describe, expect, it } from 'vitest'; +import { EncryptedString } from '../src/execution/envelope-string'; +import { + COLUMN, + callOperator, + columnAccessor, + contract, + getOperator, + makeAdapter, + selectWithWhere, + TABLE, +} from './operator-lowering.helpers'; + +describe('cipherstash operator lowering — cipherstashEq', () => { + it('lowers email.cipherstashEq(plaintext) to eql_v2.eq("email", $1::eql_v2_encrypted)', () => { + const op = getOperator('cipherstashEq'); + const predicate = callOperator(op, columnAccessor(TABLE, COLUMN), 'alice@example.com'); + const ast = selectWithWhere(predicate); + + const lowered = makeAdapter().lower(ast, { contract }); + + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT "user"."id" AS "id" FROM "user" WHERE eql_v2.eq("user"."email", $1::eql_v2_encrypted)"`, + ); + }); + + it('binds the plaintext as an EncryptedString envelope tagged with the cipherstash routing key', () => { + const op = getOperator('cipherstashEq'); + const predicate = callOperator(op, columnAccessor(TABLE, COLUMN), 'alice@example.com'); + const ast = selectWithWhere(predicate); + + const lowered = makeAdapter().lower(ast, { contract }); + + // Single bound param; it is the `EncryptedString` envelope (NOT the + // raw plaintext string) so the bulk-encrypt middleware can identify + // it via `value instanceof EncryptedString` and group it by routing + // key. Stamping `(table, column)` on the envelope at lowering time + // is the mechanism that lets the SELECT-side (which + // `bulk-encrypt.ts:stampRoutingKeysFromAst` does not walk — only + // insert/update) still participate in the routing-key grouping. + expect(lowered.params).toHaveLength(1); + const envelope = lowered.params[0]; + expect(envelope).toBeInstanceOf(EncryptedString); + const handle = (envelope as EncryptedString).expose(); + expect(handle.plaintext).toBe('alice@example.com'); + expect(handle.table).toBe(TABLE); + expect(handle.column).toBe(COLUMN); + }); + + it('passes a pre-built EncryptedString envelope through unchanged (advanced caller path)', () => { + const op = getOperator('cipherstashEq'); + const userEnvelope = EncryptedString.from('alice@example.com'); + const predicate = callOperator(op, columnAccessor(TABLE, COLUMN), userEnvelope); + const ast = selectWithWhere(predicate); + + const lowered = makeAdapter().lower(ast, { contract }); + + // The same envelope object flows through; the operator only + // augments it with the routing key (write-once-wins semantics — + // see `setHandleRoutingKey`). + expect(lowered.params[0]).toBe(userEnvelope); + const handle = userEnvelope.expose(); + expect(handle.table).toBe(TABLE); + expect(handle.column).toBe(COLUMN); + }); +}); + +describe('cipherstash operator lowering — equality extensions', () => { + // `cipherstashNe`, `cipherstashInArray`, `cipherstashNotInArray` + // dispatch via the `cipherstash:equality` trait — visible on + // string, double, bigint, date, boolean codecs. + + it('lowers email.cipherstashNe(plaintext) to NOT eql_v2.eq(...)', () => { + const op = getOperator('cipherstashNe'); + const predicate = callOperator(op, columnAccessor(TABLE, COLUMN), 'alice@example.com'); + const lowered = makeAdapter().lower(selectWithWhere(predicate), { contract }); + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT "user"."id" AS "id" FROM "user" WHERE NOT eql_v2.eq("user"."email", $1::eql_v2_encrypted)"`, + ); + expect(lowered.params).toHaveLength(1); + expect(lowered.params[0]).toBeInstanceOf(EncryptedString); + }); + + it('lowers cipherstashInArray with a single element to a one-term OR', () => { + const op = getOperator('cipherstashInArray'); + const predicate = callOperator(op, columnAccessor(TABLE, COLUMN), ['alice@example.com']); + const lowered = makeAdapter().lower(selectWithWhere(predicate), { contract }); + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT "user"."id" AS "id" FROM "user" WHERE (eql_v2.eq("user"."email", $1::eql_v2_encrypted))"`, + ); + expect(lowered.params).toHaveLength(1); + }); + + it('lowers cipherstashInArray with two elements to a two-term OR', () => { + const op = getOperator('cipherstashInArray'); + const predicate = callOperator(op, columnAccessor(TABLE, COLUMN), ['a@x.com', 'b@x.com']); + const lowered = makeAdapter().lower(selectWithWhere(predicate), { contract }); + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT "user"."id" AS "id" FROM "user" WHERE (eql_v2.eq("user"."email", $1::eql_v2_encrypted) OR eql_v2.eq("user"."email", $2::eql_v2_encrypted))"`, + ); + expect(lowered.params).toHaveLength(2); + }); + + it('lowers cipherstashInArray with three elements to a three-term OR', () => { + const op = getOperator('cipherstashInArray'); + const predicate = callOperator(op, columnAccessor(TABLE, COLUMN), [ + 'a@x.com', + 'b@x.com', + 'c@x.com', + ]); + const lowered = makeAdapter().lower(selectWithWhere(predicate), { contract }); + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT "user"."id" AS "id" FROM "user" WHERE (eql_v2.eq("user"."email", $1::eql_v2_encrypted) OR eql_v2.eq("user"."email", $2::eql_v2_encrypted) OR eql_v2.eq("user"."email", $3::eql_v2_encrypted))"`, + ); + expect(lowered.params).toHaveLength(3); + // Every envelope shares the same `(table, column)` routing key — + // the bulk-encrypt grouping invariant for variable-arity ops. + for (const param of lowered.params) { + expect(param).toBeInstanceOf(EncryptedString); + const handle = (param as EncryptedString).expose(); + expect(handle.table).toBe(TABLE); + expect(handle.column).toBe(COLUMN); + } + }); + + it('lowers cipherstashNotInArray to NOT-prefixed OR-of-equalities', () => { + const op = getOperator('cipherstashNotInArray'); + const predicate = callOperator(op, columnAccessor(TABLE, COLUMN), ['a@x.com', 'b@x.com']); + const lowered = makeAdapter().lower(selectWithWhere(predicate), { contract }); + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT "user"."id" AS "id" FROM "user" WHERE NOT (eql_v2.eq("user"."email", $1::eql_v2_encrypted) OR eql_v2.eq("user"."email", $2::eql_v2_encrypted))"`, + ); + }); + + it('cipherstashInArray rejects empty arrays with a descriptive error', () => { + const op = getOperator('cipherstashInArray'); + expect(() => callOperator(op, columnAccessor(TABLE, COLUMN), [])).toThrow(/empty array/); + }); + + it('cipherstashInArray rejects non-array arguments with a descriptive error', () => { + const op = getOperator('cipherstashInArray'); + expect(() => callOperator(op, columnAccessor(TABLE, COLUMN), 'not-an-array')).toThrow( + /expected an array/, + ); + }); +}); diff --git a/packages/prisma-next/test/operator-lowering-order-range.test.ts b/packages/prisma-next/test/operator-lowering-order-range.test.ts new file mode 100644 index 00000000..94c69c38 --- /dev/null +++ b/packages/prisma-next/test/operator-lowering-order-range.test.ts @@ -0,0 +1,96 @@ +/** + * Operator lowering — order-and-range operators trait-dispatched via + * `cipherstash:order-and-range`: + * + * - `cipherstashGt` / `cipherstashGte` / `cipherstashLt` / + * `cipherstashLte` + * - `cipherstashBetween` / `cipherstashNotBetween` + * + * The trait is visible on the string, double, bigint, and date + * codecs; this file exercises the lowered SQL shape against the + * string column. Per-codec envelope wrapping (the dispatch table + * picks `EncryptedDouble` / `EncryptedBigInt` / `EncryptedDate` + * subclasses for the matching columns) lives in the keep file + * `operator-lowering.test.ts`. + * + * Shared adapter / contract / operator-invocation scaffolding lives in + * `operator-lowering.helpers.ts`. + */ + +import { describe, expect, it } from 'vitest'; +import { + COLUMN, + callOperator, + columnAccessor, + contract, + getOperator, + makeAdapter, + selectWithWhere, + TABLE, +} from './operator-lowering.helpers'; + +describe('cipherstash operator lowering — order-and-range extensions', () => { + // `cipherstashGt/Gte/Lt/Lte/Between/NotBetween` dispatch via the + // `cipherstash:order-and-range` trait — visible on string, + // double, bigint, date codecs. + + it('lowers cipherstashGt(plaintext) to eql_v2.gt(...)', () => { + const op = getOperator('cipherstashGt'); + const predicate = callOperator(op, columnAccessor(TABLE, COLUMN), 'm'); + const lowered = makeAdapter().lower(selectWithWhere(predicate), { contract }); + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT "user"."id" AS "id" FROM "user" WHERE eql_v2.gt("user"."email", $1::eql_v2_encrypted)"`, + ); + }); + + it('lowers cipherstashGte(plaintext) to eql_v2.gte(...)', () => { + const op = getOperator('cipherstashGte'); + const predicate = callOperator(op, columnAccessor(TABLE, COLUMN), 'm'); + const lowered = makeAdapter().lower(selectWithWhere(predicate), { contract }); + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT "user"."id" AS "id" FROM "user" WHERE eql_v2.gte("user"."email", $1::eql_v2_encrypted)"`, + ); + }); + + it('lowers cipherstashLt(plaintext) to eql_v2.lt(...)', () => { + const op = getOperator('cipherstashLt'); + const predicate = callOperator(op, columnAccessor(TABLE, COLUMN), 'm'); + const lowered = makeAdapter().lower(selectWithWhere(predicate), { contract }); + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT "user"."id" AS "id" FROM "user" WHERE eql_v2.lt("user"."email", $1::eql_v2_encrypted)"`, + ); + }); + + it('lowers cipherstashLte(plaintext) to eql_v2.lte(...)', () => { + const op = getOperator('cipherstashLte'); + const predicate = callOperator(op, columnAccessor(TABLE, COLUMN), 'm'); + const lowered = makeAdapter().lower(selectWithWhere(predicate), { contract }); + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT "user"."id" AS "id" FROM "user" WHERE eql_v2.lte("user"."email", $1::eql_v2_encrypted)"`, + ); + }); + + it('lowers cipherstashBetween(lo, hi) to gte AND lte', () => { + const op = getOperator('cipherstashBetween'); + const predicate = callOperator(op, columnAccessor(TABLE, COLUMN), 'a', 'm'); + const lowered = makeAdapter().lower(selectWithWhere(predicate), { contract }); + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT "user"."id" AS "id" FROM "user" WHERE eql_v2.gte("user"."email", $1::eql_v2_encrypted) AND eql_v2.lte("user"."email", $2::eql_v2_encrypted)"`, + ); + expect(lowered.params).toHaveLength(2); + }); + + it('lowers cipherstashNotBetween(lo, hi) to NOT (gte AND lte)', () => { + const op = getOperator('cipherstashNotBetween'); + const predicate = callOperator(op, columnAccessor(TABLE, COLUMN), 'a', 'm'); + const lowered = makeAdapter().lower(selectWithWhere(predicate), { contract }); + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT "user"."id" AS "id" FROM "user" WHERE NOT (eql_v2.gte("user"."email", $1::eql_v2_encrypted) AND eql_v2.lte("user"."email", $2::eql_v2_encrypted))"`, + ); + }); + + it('cipherstashBetween rejects wrong arity with a descriptive error', () => { + const op = getOperator('cipherstashBetween'); + expect(() => callOperator(op, columnAccessor(TABLE, COLUMN), 'a')).toThrow(/expected 2/); + }); +}); diff --git a/packages/prisma-next/test/operator-lowering-text-search.test.ts b/packages/prisma-next/test/operator-lowering-text-search.test.ts new file mode 100644 index 00000000..0601331a --- /dev/null +++ b/packages/prisma-next/test/operator-lowering-text-search.test.ts @@ -0,0 +1,69 @@ +/** + * Operator lowering — free-text-search operators: + * + * - `cipherstashIlike` (single-codec registration on the string codec) + * - `cipherstashNotIlike` (trait-dispatched via + * `cipherstash:free-text-search`) + * + * EQL's `ilike` function takes an encrypted match-term (the pattern is + * encrypted just like an `eq` value); the bound param is an + * `EncryptedString` envelope tagged with the `(table, column)` routing + * key. + * + * Shared adapter / contract / operator-invocation scaffolding lives in + * `operator-lowering.helpers.ts`. + */ + +import { describe, expect, it } from 'vitest'; +import { EncryptedString } from '../src/execution/envelope-string'; +import { + COLUMN, + callOperator, + columnAccessor, + contract, + getOperator, + makeAdapter, + selectWithWhere, + TABLE, +} from './operator-lowering.helpers'; + +describe('cipherstash operator lowering — cipherstashIlike', () => { + it('lowers email.cipherstashIlike(pattern) to eql_v2.ilike("email", $1::eql_v2_encrypted)', () => { + const op = getOperator('cipherstashIlike'); + const predicate = callOperator(op, columnAccessor(TABLE, COLUMN), '%alice%'); + const ast = selectWithWhere(predicate); + + const lowered = makeAdapter().lower(ast, { contract }); + + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT "user"."id" AS "id" FROM "user" WHERE eql_v2.ilike("user"."email", $1::eql_v2_encrypted)"`, + ); + }); + + it('binds the pattern as an EncryptedString envelope tagged with the cipherstash routing key', () => { + const op = getOperator('cipherstashIlike'); + const predicate = callOperator(op, columnAccessor(TABLE, COLUMN), '%alice%'); + const ast = selectWithWhere(predicate); + + const lowered = makeAdapter().lower(ast, { contract }); + + expect(lowered.params).toHaveLength(1); + const envelope = lowered.params[0]; + expect(envelope).toBeInstanceOf(EncryptedString); + const handle = (envelope as EncryptedString).expose(); + expect(handle.plaintext).toBe('%alice%'); + expect(handle.table).toBe(TABLE); + expect(handle.column).toBe(COLUMN); + }); +}); + +describe('cipherstash operator lowering — free-text-search extensions', () => { + it('lowers cipherstashNotIlike(pattern) to NOT eql_v2.ilike(...)', () => { + const op = getOperator('cipherstashNotIlike'); + const predicate = callOperator(op, columnAccessor(TABLE, COLUMN), '%alice%'); + const lowered = makeAdapter().lower(selectWithWhere(predicate), { contract }); + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT "user"."id" AS "id" FROM "user" WHERE NOT eql_v2.ilike("user"."email", $1::eql_v2_encrypted)"`, + ); + }); +}); diff --git a/packages/prisma-next/test/operator-lowering.helpers.ts b/packages/prisma-next/test/operator-lowering.helpers.ts new file mode 100644 index 00000000..cfdf0901 --- /dev/null +++ b/packages/prisma-next/test/operator-lowering.helpers.ts @@ -0,0 +1,216 @@ +/** + * Shared scaffolding for the `operator-lowering*.test.ts` files. + * + * The cipherstash operator-lowering tests all use the same: + * - Postgres runtime adapter composed with the cipherstash runtime + * descriptor (so `cipherstash/*@1` codecs are resolvable at + * lower-time and `renderTypedParam` can emit + * `$N::eql_v2_encrypted`). + * - Contract scaffold with one row of per-codec columns on a `user` + * table so trait-dispatched operators can be exercised against + * each codec. + * - Operator-invocation glue (`getOperator`, `callOperator`, + * `columnAccessor`, `selectWithWhere`). + * + * The lowering shape is verified against the stack-composed Postgres + * runtime adapter (the helper at `packages/3-targets/6-adapters/ + * postgres/test/helpers/composed-adapter.ts` reproduced inline so + * cipherstash does not pick up a postgres-package test export + * dependency) loaded with the cipherstash runtime descriptor. The + * adapter's `lower` is what the runtime's encode pipeline calls before + * driver execution; pinning its output is the strongest unit-level + * assurance available without standing up a real Postgres + EQL + * bundle. + */ + +import postgresRuntimeAdapter from '@prisma-next/adapter-postgres/runtime'; +import type { PostgresContract } from '@prisma-next/adapter-postgres/types'; +import { emptyCodecLookup } from '@prisma-next/framework-components/codec'; +import type { + RuntimeExtensionDescriptor, + RuntimeTargetDescriptor, +} from '@prisma-next/framework-components/execution'; +import { validateContract } from '@prisma-next/sql-contract/validate'; +import type { SqlOperationDescriptor } from '@prisma-next/sql-operations'; +import { + type AnyExpression, + ColumnRef, + ProjectionItem, + SelectAst, + TableSource, +} from '@prisma-next/sql-relational-core/ast'; +import { vi } from 'vitest'; +import { cipherstashQueryOperations } from '../src/execution/operators'; +import type { CipherstashSdk } from '../src/execution/sdk'; +import { createCipherstashRuntimeDescriptor } from '../src/exports/runtime'; +import { + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, + CIPHERSTASH_STRING_CODEC_ID, + EQL_V2_ENCRYPTED_TYPE, +} from '../src/extension-metadata/constants'; + +// Minimal SDK stub. Operator lowering doesn't talk to the SDK — the codec +// captures it lazily for the read-side decrypt path — but +// `createCipherstashRuntimeDescriptor({ sdk })` requires one. +export function emptySdk(): CipherstashSdk { + return { + decrypt: vi.fn(), + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; +} + +export const TABLE = 'user'; +export const COLUMN = 'email'; + +export const contract = validateContract<PostgresContract>( + { + target: 'postgres', + targetFamily: 'sql', + profileHash: 'sha256:cipherstash-operator-lowering-test', + roots: {}, + capabilities: {}, + extensionPacks: {}, + meta: {}, + storage: { + storageHash: 'sha256:cipherstash-operator-lowering-test-storage', + tables: { + [TABLE]: { + columns: { + id: { codecId: 'pg/text@1', nativeType: 'text', nullable: false }, + [COLUMN]: { + codecId: CIPHERSTASH_STRING_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + nullable: true, + }, + // Per-codec columns so the trait-dispatched operators + // can be exercised against each column type (the + // postgres renderer reads `nativeType` from the codec + // descriptor at lower time; the column is what gives + // the renderer the codec id to look up). + score: { + codecId: CIPHERSTASH_DOUBLE_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + nullable: true, + }, + amount: { + codecId: CIPHERSTASH_BIGINT_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + nullable: true, + }, + birthday: { + codecId: CIPHERSTASH_DATE_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + nullable: true, + }, + enabled: { + codecId: CIPHERSTASH_BOOLEAN_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + nullable: true, + }, + payload: { + codecId: CIPHERSTASH_JSON_CODEC_ID, + nativeType: EQL_V2_ENCRYPTED_TYPE, + nullable: true, + }, + }, + uniques: [], + indexes: [], + foreignKeys: [], + }, + }, + }, + models: {}, + }, + emptyCodecLookup, +); + +// Stub runtime target — the Postgres adapter only consults `familyId` / +// `targetId` on the target during `create`. Replicates the helper at +// `packages/3-targets/6-adapters/postgres/test/helpers/composed-adapter.ts` +// inline so cipherstash does not depend on a postgres-package test export. +const stubRuntimeTarget: RuntimeTargetDescriptor<'sql', 'postgres'> = { + kind: 'target', + id: 'postgres', + version: '0.0.1', + familyId: 'sql', + targetId: 'postgres', + create() { + return { familyId: 'sql', targetId: 'postgres' }; + }, +}; + +export function makeAdapter() { + // Compose the Postgres runtime adapter with the cipherstash runtime + // descriptor so the `cipherstash/string@1` codec is resolvable at + // lower-time. `renderTypedParam` reads + // `meta.db.sql.postgres.nativeType` off the registered codec to emit + // `$N::eql_v2_encrypted`; without the cipherstash pack in the stack + // the codec lookup would throw with a "missing extension pack" hint. + const cipherstash: RuntimeExtensionDescriptor<'sql', 'postgres'> = + createCipherstashRuntimeDescriptor({ sdk: emptySdk() }); + return postgresRuntimeAdapter.create({ + target: stubRuntimeTarget, + adapter: postgresRuntimeAdapter, + driver: undefined, + extensionPacks: [cipherstash], + }); +} + +const cipherstashOperatorsByMethod = cipherstashQueryOperations(); + +export function getOperator(method: string): SqlOperationDescriptor { + const op = cipherstashOperatorsByMethod[method]; + if (!op) { + throw new Error(`cipherstash operator descriptor for method "${method}" not found`); + } + return op; +} + +/** + * Invoke an operator's `impl` and return the produced AST node. The + * impl's declared return type is the framework's `QueryOperationReturn` + * (intentionally narrow — `sql-contract` does not depend on + * `relational-core`); the practical shape every `buildOperation`-built + * impl returns is `Expression<...>` whose `buildAst()` yields an + * `AnyExpression`. Mirrors the cast in + * `packages/3-extensions/sql-orm-client/src/model-accessor.ts:170`. + */ +export function callOperator(op: SqlOperationDescriptor, ...args: unknown[]): AnyExpression { + // `op.impl` is typed `(...args: never[]) => QueryOperationReturn` to + // block accidental direct invocation; the practical shape is + // `(self, ...args) => Expression<...>`. Cast through `unknown` to + // bridge the framework's intentionally-narrow declared type. + const impl = op.impl as unknown as (...args: unknown[]) => { buildAst(): AnyExpression }; + return impl(...args).buildAst(); +} + +/** + * Build the same `Expression`-like shape the ORM model accessor + * synthesises for a column field (see + * `packages/3-extensions/sql-orm-client/src/model-accessor.ts:139-150`): + * an object with `buildAst()` returning the underlying `ColumnRef` plus + * the column's return type metadata. The operator impls call + * `toExpr(self)` which destructures `buildAst()` to get the AST node. + */ +export function columnAccessor( + table: string, + column: string, + codecId: string = CIPHERSTASH_STRING_CODEC_ID, +) { + const ref = ColumnRef.of(table, column); + return { + returnType: { codecId, nullable: true }, + buildAst: () => ref, + }; +} + +export function selectWithWhere(whereExpr: AnyExpression) { + return SelectAst.from(TableSource.named(TABLE)) + .withProjection([ProjectionItem.of('id', ColumnRef.of(TABLE, 'id'))]) + .withWhere(whereExpr); +} diff --git a/packages/prisma-next/test/operator-lowering.test.ts b/packages/prisma-next/test/operator-lowering.test.ts new file mode 100644 index 00000000..a48ea02a --- /dev/null +++ b/packages/prisma-next/test/operator-lowering.test.ts @@ -0,0 +1,253 @@ +/** + * Operator lowering — cross-cutting cipherstash predicates that don't + * fit into a single operator-family file: + * + * - `null short-circuit` — `WHERE col IS [NOT] NULL` lowers to a + * plain Postgres null check (no EQL function call); the + * cipherstash extension must not intercept null checks. Null-check + * methods construct `NullCheckExpr` directly and never enter the + * operator-registry dispatch path, so cipherstash does not need to + * register an extension handler. The snapshot pins the absence of + * any EQL function call. + * - `per-codec envelope dispatch` — trait-dispatched operators + * (`cipherstashGt`, `cipherstashNe`, …) wrap the user-supplied + * value in the envelope subclass that matches the column's codec + * id at impl time. Each row pins the dispatch is correct for one + * codec (string / double / bigint / date / boolean). + * - `cipherstashJsonbPathExists` — lowers to + * `eql_v2.jsonb_path_exists(col, $1)`. The path is a plain text + * bind, not an envelope. + * - `createCipherstashRuntimeDescriptor — queryOperations + * registration` — exposes the full cipherstash operator surface + * via the runtime descriptor. Names are cipherstash-prefixed so + * they coexist with the framework's built-in `eq` / `ilike` + * registrations rather than overriding them. Two registration + * shapes coexist (see ADR 214): single-codec (`cipherstashEq` / + * `cipherstashIlike` target the string codec by id) and + * trait-namespaced (every other operator targets a `cipherstash:*` + * trait, attached to every codec descriptor whose `traits` list + * contains that identifier). + * + * Single-codec operator families have their own files: + * - `operator-lowering-equality.test.ts` + * - `operator-lowering-text-search.test.ts` + * - `operator-lowering-order-range.test.ts` + * + * The shared adapter / contract / operator-invocation scaffolding + * lives in `operator-lowering.helpers.ts` and is reused across all + * four operator-lowering test files. + * + * Why we do not exercise the bulk-encrypt middleware here. The + * middleware reads `params.entries()` and stamps ciphertexts via + * `replaceValues` — a concern of the runtime's `beforeExecute` chain, + * not of the AST → SQL lowering. The middleware's contract is covered + * exhaustively by `bulk-encrypt-middleware.test.ts` and the SDK-call- + * counter assertion of `storage-roundtrip.e2e.integration.test.ts`. + * These snapshot tests assert only that the SQL shape produced by + * lowering would be a valid input to that middleware (a `ParamRef` + * carrying an `EncryptedString` envelope tagged with the cipherstash + * codec id). + */ + +import { ColumnRef, NullCheckExpr } from '@prisma-next/sql-relational-core/ast'; +import { describe, expect, it } from 'vitest'; +import { EncryptedBigInt } from '../src/execution/envelope-bigint'; +import { EncryptedBoolean } from '../src/execution/envelope-boolean'; +import { EncryptedDate } from '../src/execution/envelope-date'; +import { EncryptedDouble } from '../src/execution/envelope-double'; +import { createCipherstashRuntimeDescriptor } from '../src/exports/runtime'; +import { + CIPHERSTASH_BIGINT_CODEC_ID, + CIPHERSTASH_BOOLEAN_CODEC_ID, + CIPHERSTASH_DATE_CODEC_ID, + CIPHERSTASH_DOUBLE_CODEC_ID, + CIPHERSTASH_JSON_CODEC_ID, + CIPHERSTASH_STRING_CODEC_ID, +} from '../src/extension-metadata/constants'; +import { + COLUMN, + callOperator, + columnAccessor, + contract, + emptySdk, + getOperator, + makeAdapter, + selectWithWhere, + TABLE, +} from './operator-lowering.helpers'; + +describe('cipherstash operator lowering — null short-circuit', () => { + // The `isNull` / `isNotNull` ORM column methods construct + // `NullCheckExpr` directly (see + // `packages/3-extensions/sql-orm-client/src/types.ts:374-381`); they + // never enter the operator-registry dispatch path, so cipherstash + // does not need to register an extension handler. The snapshot pins + // the absence of any EQL function call — the lowering is the same + // shape Postgres uses for any other column type. + + it('lowers email IS NULL to "user"."email" IS NULL — no EQL function call', () => { + const ast = selectWithWhere(NullCheckExpr.isNull(ColumnRef.of(TABLE, COLUMN))); + + const lowered = makeAdapter().lower(ast, { contract }); + + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT "user"."id" AS "id" FROM "user" WHERE "user"."email" IS NULL"`, + ); + expect(lowered.sql).not.toContain('eql_v2.'); + expect(lowered.params).toHaveLength(0); + }); + + it('lowers email IS NOT NULL to "user"."email" IS NOT NULL — no EQL function call', () => { + const ast = selectWithWhere(NullCheckExpr.isNotNull(ColumnRef.of(TABLE, COLUMN))); + + const lowered = makeAdapter().lower(ast, { contract }); + + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT "user"."id" AS "id" FROM "user" WHERE "user"."email" IS NOT NULL"`, + ); + expect(lowered.sql).not.toContain('eql_v2.'); + expect(lowered.params).toHaveLength(0); + }); +}); + +describe('cipherstash operator lowering — per-codec envelope dispatch', () => { + // Trait-dispatched operators wrap the user-supplied value in the + // envelope subclass that matches the column's codec id at impl + // time. Each row here pins the dispatch is correct for one codec. + + it('cipherstashGt on a double column wraps the value in EncryptedDouble', () => { + const op = getOperator('cipherstashGt'); + const predicate = callOperator( + op, + columnAccessor(TABLE, 'score', CIPHERSTASH_DOUBLE_CODEC_ID), + 3.14, + ); + const lowered = makeAdapter().lower(selectWithWhere(predicate), { contract }); + expect(lowered.params).toHaveLength(1); + const envelope = lowered.params[0]; + expect(envelope).toBeInstanceOf(EncryptedDouble); + }); + + it('cipherstashGt on a bigint column wraps the value in EncryptedBigInt', () => { + const op = getOperator('cipherstashGt'); + const predicate = callOperator( + op, + columnAccessor(TABLE, 'amount', CIPHERSTASH_BIGINT_CODEC_ID), + 42n, + ); + const lowered = makeAdapter().lower(selectWithWhere(predicate), { contract }); + expect(lowered.params[0]).toBeInstanceOf(EncryptedBigInt); + }); + + it('cipherstashGt on a date column wraps the value in EncryptedDate', () => { + const op = getOperator('cipherstashGt'); + const predicate = callOperator( + op, + columnAccessor(TABLE, 'birthday', CIPHERSTASH_DATE_CODEC_ID), + new Date('2024-01-01'), + ); + const lowered = makeAdapter().lower(selectWithWhere(predicate), { contract }); + expect(lowered.params[0]).toBeInstanceOf(EncryptedDate); + }); + + it('cipherstashNe on a boolean column wraps the value in EncryptedBoolean', () => { + const op = getOperator('cipherstashNe'); + const predicate = callOperator( + op, + columnAccessor(TABLE, 'enabled', CIPHERSTASH_BOOLEAN_CODEC_ID), + true, + ); + const lowered = makeAdapter().lower(selectWithWhere(predicate), { contract }); + expect(lowered.params[0]).toBeInstanceOf(EncryptedBoolean); + }); + + it('cipherstashGt rejects a non-matching plaintext type for the column codec', () => { + const op = getOperator('cipherstashGt'); + // Passing a string to a double column triggers the per-codec + // envelope coercion's diagnostic. + expect(() => + callOperator(op, columnAccessor(TABLE, 'score', CIPHERSTASH_DOUBLE_CODEC_ID), 'not-a-number'), + ).toThrow(/EncryptedDouble/); + }); +}); + +describe('cipherstash operator lowering — JSON path predicate', () => { + it('lowers cipherstashJsonbPathExists(path) to eql_v2.jsonb_path_exists(...)', () => { + const op = getOperator('cipherstashJsonbPathExists'); + const predicate = callOperator( + op, + columnAccessor(TABLE, 'payload', CIPHERSTASH_JSON_CODEC_ID), + '$.k', + ); + const lowered = makeAdapter().lower(selectWithWhere(predicate), { contract }); + expect(lowered.sql).toMatchInlineSnapshot( + `"SELECT "user"."id" AS "id" FROM "user" WHERE eql_v2.jsonb_path_exists("user"."payload", $1)"`, + ); + // Path is a plain text bind — no envelope wrapping. + expect(lowered.params).toEqual(['$.k']); + }); + + it('cipherstashJsonbPathExists rejects non-string path arguments', () => { + const op = getOperator('cipherstashJsonbPathExists'); + expect(() => + callOperator(op, columnAccessor(TABLE, 'payload', CIPHERSTASH_JSON_CODEC_ID), 42), + ).toThrow(/string path/); + }); +}); + +describe('createCipherstashRuntimeDescriptor — queryOperations registration', () => { + it('exposes the full cipherstash operator surface via the runtime descriptor', () => { + // Names are cipherstash-prefixed so they coexist with the + // framework`s built-in `eq` / `ilike` registrations rather than + // overriding them. The trade-off is documented in + // `src/execution/operators.ts`'s top-level docblock. + // + // Two registration shapes coexist (see ADR 214): + // - Single-codec: `cipherstashEq` / `cipherstashIlike` (the + // original predicate pair) target the string codec by id. + // - Trait-namespaced: every other operator targets a + // `cipherstash:*` trait. The model accessor attaches the + // operator to every codec descriptor whose `traits` list + // contains that identifier. + const descriptor = createCipherstashRuntimeDescriptor({ sdk: emptySdk() }); + const ops = descriptor.queryOperations?.() ?? {}; + const methods = Object.keys(ops).sort(); + expect(methods).toEqual([ + 'cipherstashBetween', + 'cipherstashEq', + 'cipherstashGt', + 'cipherstashGte', + 'cipherstashIlike', + 'cipherstashInArray', + 'cipherstashJsonbPathExists', + 'cipherstashLt', + 'cipherstashLte', + 'cipherstashNe', + 'cipherstashNotBetween', + 'cipherstashNotIlike', + 'cipherstashNotInArray', + ]); + for (const method of ['cipherstashEq', 'cipherstashIlike']) { + expect(ops[method]?.self).toEqual({ codecId: CIPHERSTASH_STRING_CODEC_ID }); + } + for (const method of ['cipherstashNe', 'cipherstashInArray', 'cipherstashNotInArray']) { + expect(ops[method]?.self).toEqual({ traits: ['cipherstash:equality'] }); + } + expect(ops['cipherstashNotIlike']?.self).toEqual({ + traits: ['cipherstash:free-text-search'], + }); + for (const method of [ + 'cipherstashGt', + 'cipherstashGte', + 'cipherstashLt', + 'cipherstashLte', + 'cipherstashBetween', + 'cipherstashNotBetween', + ]) { + expect(ops[method]?.self).toEqual({ traits: ['cipherstash:order-and-range'] }); + } + expect(ops['cipherstashJsonbPathExists']?.self).toEqual({ + traits: ['cipherstash:searchable-json'], + }); + }); +}); diff --git a/packages/prisma-next/test/psl-interpretation-numeric.test.ts b/packages/prisma-next/test/psl-interpretation-numeric.test.ts new file mode 100644 index 00000000..bf395981 --- /dev/null +++ b/packages/prisma-next/test/psl-interpretation-numeric.test.ts @@ -0,0 +1,175 @@ +/** + * PSL→ColumnTypeDescriptor lowering for the numeric cipherstash + * constructors: `cipherstash.EncryptedDouble` / `cipherstash.EncryptedBigInt`. + * + * Pinned behaviour for numeric codecs (shared by both): + * - Full args lower to `typeParams { equality, orderAndRange }`. + * - Empty `{}` (and the no-args form) defaults both flags to `true`. + * - `freeTextSearch` is rejected with `PSL_INVALID_ATTRIBUTE_ARGUMENT` + * — numeric codecs do not expose the string-only flag. + * - The inline-form lowered descriptor mirrors the TS factory output + * byte-for-byte (PSL/TS parity). + */ + +import { parsePslDocument } from '@prisma-next/psl-parser'; +import { interpretPslDocumentToSqlContract } from '@prisma-next/sql-contract-psl'; +import { describe, expect, it } from 'vitest'; +import cipherstashControl from '../src/exports/control'; +import cipherstashPack from '../src/exports/pack'; + +const postgresTarget = { + kind: 'target' as const, + familyId: 'sql' as const, + targetId: 'postgres' as const, + id: 'postgres', + version: '0.0.1', + capabilities: {}, +}; + +const postgresScalarTypeDescriptors = new Map([ + ['String', { codecId: 'pg/text@1', nativeType: 'text' }], + ['Boolean', { codecId: 'pg/bool@1', nativeType: 'bool' }], + ['Int', { codecId: 'pg/int4@1', nativeType: 'int4' }], +]); + +function interpret(schema: string) { + return interpretPslDocumentToSqlContract({ + document: parsePslDocument({ schema, sourceId: 'schema.prisma' }), + target: postgresTarget, + scalarTypeDescriptors: postgresScalarTypeDescriptors, + composedExtensionPacks: [cipherstashControl.id], + authoringContributions: { type: cipherstashPack.authoring.type, field: {} }, + }); +} + +// The interpreter returns `Result<Contract, ContractSourceDiagnostics>` and +// `Contract.storage` is the opaque `StorageBase<string>`. Tests treat it as +// the structural shape it actually is (tables / types) — same pattern used +// by `packages/2-sql/2-authoring/contract-psl/test/interpreter.relations.test.ts`. +type StorageView = { + readonly tables: Record< + string, + { + readonly columns: Record<string, Record<string, unknown>>; + } + >; + readonly types?: Record<string, Record<string, unknown>>; +}; +const asStorage = (storage: unknown): StorageView => storage as StorageView; + +describe('PSL interpretation: cipherstash.EncryptedDouble constructor', () => { + it('lowers full args to a column with cipherstash/double@1 codec, eql_v2_encrypted nativeType', () => { + const result = interpret(`model Metric { + id Int @id + value cipherstash.EncryptedDouble({ equality: true, orderAndRange: true }) +} +`); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(asStorage(result.value.storage).tables['metric']?.columns['value']).toMatchObject({ + codecId: 'cipherstash/double@1', + nativeType: 'eql_v2_encrypted', + typeParams: { equality: true, orderAndRange: true }, + nullable: false, + }); + }); + + it('defaults both flags to true for an empty options literal', () => { + const result = interpret(`model Metric { + id Int @id + value cipherstash.EncryptedDouble({}) +} +`); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(asStorage(result.value.storage).tables['metric']?.columns['value']).toMatchObject({ + codecId: 'cipherstash/double@1', + typeParams: { equality: true, orderAndRange: true }, + }); + }); + + it('rejects unknown argument names with PSL_INVALID_ATTRIBUTE_ARGUMENT', () => { + const result = interpret(`model Metric { + id Int @id + value cipherstash.EncryptedDouble({ freeTextSearch: true }) +} +`); + expect(result.ok).toBe(false); + if (result.ok) return; + expect(result.failure.diagnostics).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + code: 'PSL_INVALID_ATTRIBUTE_ARGUMENT', + message: expect.stringContaining('freeTextSearch'), + }), + ]), + ); + }); + + it('produces an inline-form descriptor structurally identical to the TS factory output', () => { + const result = interpret(`model Metric { + id Int @id + value cipherstash.EncryptedDouble({ equality: true, orderAndRange: false }) +} +`); + expect(result.ok).toBe(true); + if (!result.ok) return; + const col = asStorage(result.value.storage).tables['metric']?.columns['value']; + // Stripping `nullable` (PSL-specific) the column descriptor mirrors + // the TS factory's lowered shape byte-for-byte (PSL/TS parity). + expect(col).toMatchObject({ + codecId: 'cipherstash/double@1', + nativeType: 'eql_v2_encrypted', + typeParams: { equality: true, orderAndRange: false }, + }); + }); +}); + +describe('PSL interpretation: cipherstash.EncryptedBigInt constructor', () => { + it('lowers full args to a column with cipherstash/bigint@1 codec, eql_v2_encrypted nativeType', () => { + const result = interpret(`model Ledger { + id Int @id + amount cipherstash.EncryptedBigInt({ equality: true, orderAndRange: true }) +} +`); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(asStorage(result.value.storage).tables['ledger']?.columns['amount']).toMatchObject({ + codecId: 'cipherstash/bigint@1', + nativeType: 'eql_v2_encrypted', + typeParams: { equality: true, orderAndRange: true }, + }); + }); + + it('defaults both flags to true with no arguments', () => { + const result = interpret(`model Ledger { + id Int @id + amount cipherstash.EncryptedBigInt() +} +`); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(asStorage(result.value.storage).tables['ledger']?.columns['amount']).toMatchObject({ + codecId: 'cipherstash/bigint@1', + typeParams: { equality: true, orderAndRange: true }, + }); + }); + + it('rejects unknown argument names with PSL_INVALID_ATTRIBUTE_ARGUMENT', () => { + const result = interpret(`model Ledger { + id Int @id + amount cipherstash.EncryptedBigInt({ freeTextSearch: true }) +} +`); + expect(result.ok).toBe(false); + if (result.ok) return; + expect(result.failure.diagnostics).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + code: 'PSL_INVALID_ATTRIBUTE_ARGUMENT', + message: expect.stringContaining('freeTextSearch'), + }), + ]), + ); + }); +}); diff --git a/packages/prisma-next/test/psl-interpretation-other-types.test.ts b/packages/prisma-next/test/psl-interpretation-other-types.test.ts new file mode 100644 index 00000000..e9eca947 --- /dev/null +++ b/packages/prisma-next/test/psl-interpretation-other-types.test.ts @@ -0,0 +1,188 @@ +/** + * PSL→ColumnTypeDescriptor lowering for the date, boolean, and JSON + * cipherstash constructors: + * + * - `cipherstash.EncryptedDate` — `{ equality, orderAndRange }` + * - `cipherstash.EncryptedBoolean` — `{ equality }` only; + * `orderAndRange` is rejected with `PSL_INVALID_ATTRIBUTE_ARGUMENT`. + * - `cipherstash.EncryptedJson` — `{ searchableJson }`; + * `equality` is rejected with `PSL_INVALID_ATTRIBUTE_ARGUMENT`. + * + * Empty `{}` (and the no-args form) defaults the codec's flag(s) to + * `true` in every case. + */ + +import { parsePslDocument } from '@prisma-next/psl-parser'; +import { interpretPslDocumentToSqlContract } from '@prisma-next/sql-contract-psl'; +import { describe, expect, it } from 'vitest'; +import cipherstashControl from '../src/exports/control'; +import cipherstashPack from '../src/exports/pack'; + +const postgresTarget = { + kind: 'target' as const, + familyId: 'sql' as const, + targetId: 'postgres' as const, + id: 'postgres', + version: '0.0.1', + capabilities: {}, +}; + +const postgresScalarTypeDescriptors = new Map([ + ['String', { codecId: 'pg/text@1', nativeType: 'text' }], + ['Boolean', { codecId: 'pg/bool@1', nativeType: 'bool' }], + ['Int', { codecId: 'pg/int4@1', nativeType: 'int4' }], +]); + +function interpret(schema: string) { + return interpretPslDocumentToSqlContract({ + document: parsePslDocument({ schema, sourceId: 'schema.prisma' }), + target: postgresTarget, + scalarTypeDescriptors: postgresScalarTypeDescriptors, + composedExtensionPacks: [cipherstashControl.id], + authoringContributions: { type: cipherstashPack.authoring.type, field: {} }, + }); +} + +// The interpreter returns `Result<Contract, ContractSourceDiagnostics>` and +// `Contract.storage` is the opaque `StorageBase<string>`. Tests treat it as +// the structural shape it actually is (tables / types) — same pattern used +// by `packages/2-sql/2-authoring/contract-psl/test/interpreter.relations.test.ts`. +type StorageView = { + readonly tables: Record< + string, + { + readonly columns: Record<string, Record<string, unknown>>; + } + >; + readonly types?: Record<string, Record<string, unknown>>; +}; +const asStorage = (storage: unknown): StorageView => storage as StorageView; + +describe('PSL interpretation: cipherstash.EncryptedDate constructor', () => { + it('lowers full args to a column with cipherstash/date@1 codec, eql_v2_encrypted nativeType', () => { + const result = interpret(`model Event { + id Int @id + occurredOn cipherstash.EncryptedDate({ equality: true, orderAndRange: true }) +} +`); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(asStorage(result.value.storage).tables['event']?.columns['occurredOn']).toMatchObject({ + codecId: 'cipherstash/date@1', + nativeType: 'eql_v2_encrypted', + typeParams: { equality: true, orderAndRange: true }, + }); + }); + + it('defaults both flags to true with no arguments', () => { + const result = interpret(`model Event { + id Int @id + occurredOn cipherstash.EncryptedDate() +} +`); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(asStorage(result.value.storage).tables['event']?.columns['occurredOn']).toMatchObject({ + codecId: 'cipherstash/date@1', + typeParams: { equality: true, orderAndRange: true }, + }); + }); +}); + +describe('PSL interpretation: cipherstash.EncryptedBoolean constructor', () => { + it('lowers full args to a column with cipherstash/boolean@1 codec, equality typeParam', () => { + const result = interpret(`model Feature { + id Int @id + enabled cipherstash.EncryptedBoolean({ equality: true }) +} +`); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(asStorage(result.value.storage).tables['feature']?.columns['enabled']).toMatchObject({ + codecId: 'cipherstash/boolean@1', + nativeType: 'eql_v2_encrypted', + typeParams: { equality: true }, + }); + }); + + it('defaults equality to true with no arguments', () => { + const result = interpret(`model Feature { + id Int @id + enabled cipherstash.EncryptedBoolean() +} +`); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(asStorage(result.value.storage).tables['feature']?.columns['enabled']).toMatchObject({ + codecId: 'cipherstash/boolean@1', + typeParams: { equality: true }, + }); + }); + + it('rejects orderAndRange (not a boolean codec flag)', () => { + const result = interpret(`model Feature { + id Int @id + enabled cipherstash.EncryptedBoolean({ orderAndRange: true }) +} +`); + expect(result.ok).toBe(false); + if (result.ok) return; + expect(result.failure.diagnostics).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + code: 'PSL_INVALID_ATTRIBUTE_ARGUMENT', + message: expect.stringContaining('orderAndRange'), + }), + ]), + ); + }); +}); + +describe('PSL interpretation: cipherstash.EncryptedJson constructor', () => { + it('lowers full args to a column with cipherstash/json@1 codec, searchableJson typeParam', () => { + const result = interpret(`model Audit { + id Int @id + payload cipherstash.EncryptedJson({ searchableJson: true }) +} +`); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(asStorage(result.value.storage).tables['audit']?.columns['payload']).toMatchObject({ + codecId: 'cipherstash/json@1', + nativeType: 'eql_v2_encrypted', + typeParams: { searchableJson: true }, + }); + }); + + it('defaults searchableJson to true with no arguments', () => { + const result = interpret(`model Audit { + id Int @id + payload cipherstash.EncryptedJson() +} +`); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(asStorage(result.value.storage).tables['audit']?.columns['payload']).toMatchObject({ + codecId: 'cipherstash/json@1', + typeParams: { searchableJson: true }, + }); + }); + + it('rejects equality (not a json codec flag)', () => { + const result = interpret(`model Audit { + id Int @id + payload cipherstash.EncryptedJson({ equality: true }) +} +`); + expect(result.ok).toBe(false); + if (result.ok) return; + expect(result.failure.diagnostics).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + code: 'PSL_INVALID_ATTRIBUTE_ARGUMENT', + message: expect.stringContaining('equality'), + }), + ]), + ); + }); +}); diff --git a/packages/prisma-next/test/psl-interpretation.test.ts b/packages/prisma-next/test/psl-interpretation.test.ts new file mode 100644 index 00000000..6f696084 --- /dev/null +++ b/packages/prisma-next/test/psl-interpretation.test.ts @@ -0,0 +1,309 @@ +/** + * Full PSL→ColumnTypeDescriptor lowering for the + * `cipherstash.EncryptedString({...})` constructor. + * + * Exercises the interpreter end-to-end (parser → authoring contributions + * → SQL contract IR) so the assertions are about *what users observe* + * in the emitted contract, not about the descriptor template metadata. + * + * Pinned behaviour: + * - Full args lower to `typeParams { equality, freeTextSearch, orderAndRange }`. + * - Empty `{}` (and the no-args form) defaults all three flags to `true` — + * searchable encryption is the legitimate default; users opt out + * explicitly with `equality: false` / `freeTextSearch: false` / + * `orderAndRange: false`. + * - `?` produces `nullable: true` on the column descriptor. + * - Unknown property name → `PSL_INVALID_ATTRIBUTE_ARGUMENT`. + * - Wrong type → `PSL_INVALID_ATTRIBUTE_ARGUMENT` mentioning + * "boolean"; diagnostic span points at the offending value. + * - `types { ... }` alias resolves and is reachable from a model + * field via `typeRef`; the alias's named-type descriptor matches + * the inline-form column's codec/nativeType/typeParams + * byte-for-byte. + * + * Sister files cover the other cipherstash constructors: + * - `psl-interpretation-numeric.test.ts` + * (`EncryptedDouble`, `EncryptedBigInt`) + * - `psl-interpretation-other-types.test.ts` + * (`EncryptedDate`, `EncryptedBoolean`, `EncryptedJson`) + */ + +import { parsePslDocument } from '@prisma-next/psl-parser'; +import { interpretPslDocumentToSqlContract } from '@prisma-next/sql-contract-psl'; +import { describe, expect, it } from 'vitest'; +import cipherstashControl from '../src/exports/control'; +import cipherstashPack from '../src/exports/pack'; + +const postgresTarget = { + kind: 'target' as const, + familyId: 'sql' as const, + targetId: 'postgres' as const, + id: 'postgres', + version: '0.0.1', + capabilities: {}, +}; + +const postgresScalarTypeDescriptors = new Map([ + ['String', { codecId: 'pg/text@1', nativeType: 'text' }], + ['Boolean', { codecId: 'pg/bool@1', nativeType: 'bool' }], + ['Int', { codecId: 'pg/int4@1', nativeType: 'int4' }], +]); + +function interpret(schema: string) { + return interpretPslDocumentToSqlContract({ + document: parsePslDocument({ schema, sourceId: 'schema.prisma' }), + target: postgresTarget, + scalarTypeDescriptors: postgresScalarTypeDescriptors, + composedExtensionPacks: [cipherstashControl.id], + authoringContributions: { type: cipherstashPack.authoring.type, field: {} }, + }); +} + +// The interpreter returns `Result<Contract, ContractSourceDiagnostics>` and +// `Contract.storage` is the opaque `StorageBase<string>`. Tests treat it as +// the structural shape it actually is (tables / types) — same pattern used +// by `packages/2-sql/2-authoring/contract-psl/test/interpreter.relations.test.ts`. +type StorageView = { + readonly tables: Record< + string, + { + readonly columns: Record<string, Record<string, unknown>>; + } + >; + readonly types?: Record<string, Record<string, unknown>>; +}; +const asStorage = (storage: unknown): StorageView => storage as StorageView; + +describe('PSL interpretation: cipherstash.EncryptedString constructor', () => { + it('lowers full args to a column with codecId, nativeType, typeParams', () => { + const result = interpret(`model User { + id Int @id + email cipherstash.EncryptedString({ equality: true, freeTextSearch: true, orderAndRange: true }) +} +`); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(asStorage(result.value.storage).tables['user']?.columns['email']).toEqual( + expect.objectContaining({ + codecId: 'cipherstash/string@1', + nativeType: 'eql_v2_encrypted', + typeParams: { equality: true, freeTextSearch: true, orderAndRange: true }, + nullable: false, + }), + ); + }); + + it('defaults all flags to true for an empty options literal', () => { + const result = interpret(`model User { + id Int @id + notes cipherstash.EncryptedString({}) +} +`); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(asStorage(result.value.storage).tables['user']?.columns['notes']).toEqual( + expect.objectContaining({ + codecId: 'cipherstash/string@1', + nativeType: 'eql_v2_encrypted', + typeParams: { equality: true, freeTextSearch: true, orderAndRange: true }, + nullable: false, + }), + ); + }); + + it('defaults all flags to true when called with no arguments', () => { + const result = interpret(`model User { + id Int @id + notes cipherstash.EncryptedString() +} +`); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(asStorage(result.value.storage).tables['user']?.columns['notes']).toEqual( + expect.objectContaining({ + codecId: 'cipherstash/string@1', + nativeType: 'eql_v2_encrypted', + typeParams: { equality: true, freeTextSearch: true, orderAndRange: true }, + nullable: false, + }), + ); + }); + + it('lets orderAndRange be explicitly disabled', () => { + const result = interpret(`model User { + id Int @id + notes cipherstash.EncryptedString({ orderAndRange: false }) +} +`); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(asStorage(result.value.storage).tables['user']?.columns['notes']).toEqual( + expect.objectContaining({ + codecId: 'cipherstash/string@1', + typeParams: { equality: true, freeTextSearch: true, orderAndRange: false }, + }), + ); + }); + + it('lets equality be explicitly disabled', () => { + const result = interpret(`model User { + id Int @id + notes cipherstash.EncryptedString({ equality: false }) +} +`); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(asStorage(result.value.storage).tables['user']?.columns['notes']).toMatchObject({ + codecId: 'cipherstash/string@1', + nativeType: 'eql_v2_encrypted', + typeParams: { equality: false, freeTextSearch: true }, + nullable: false, + }); + }); + + it('lets both flags be explicitly disabled (storage-only encryption)', () => { + const result = interpret(`model User { + id Int @id + notes cipherstash.EncryptedString({ equality: false, freeTextSearch: false }) +} +`); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(asStorage(result.value.storage).tables['user']?.columns['notes']).toMatchObject({ + codecId: 'cipherstash/string@1', + nativeType: 'eql_v2_encrypted', + typeParams: { equality: false, freeTextSearch: false }, + nullable: false, + }); + }); + + it('marks nullable columns as nullable', () => { + const result = interpret(`model User { + id Int @id + username cipherstash.EncryptedString({ freeTextSearch: false })? +} +`); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(asStorage(result.value.storage).tables['user']?.columns['username']).toMatchObject({ + codecId: 'cipherstash/string@1', + nativeType: 'eql_v2_encrypted', + typeParams: { equality: true, freeTextSearch: false }, + nullable: true, + }); + }); + + it('rejects unknown argument names with PSL_INVALID_ATTRIBUTE_ARGUMENT', () => { + const result = interpret(`model User { + id Int @id + email cipherstash.EncryptedString({ unknownFlag: true }) +} +`); + expect(result.ok).toBe(false); + if (result.ok) return; + expect(result.failure.diagnostics).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + code: 'PSL_INVALID_ATTRIBUTE_ARGUMENT', + message: expect.stringContaining('unknownFlag'), + }), + ]), + ); + }); + + it('rejects wrong-typed argument values with PSL_INVALID_ATTRIBUTE_ARGUMENT', () => { + const result = interpret(`model User { + id Int @id + email cipherstash.EncryptedString({ equality: "yes" }) +} +`); + expect(result.ok).toBe(false); + if (result.ok) return; + expect(result.failure.diagnostics).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + code: 'PSL_INVALID_ATTRIBUTE_ARGUMENT', + message: expect.stringContaining('boolean'), + }), + ]), + ); + }); + + it('resolves a named-type alias under types {} and uses it on a model field', () => { + const result = interpret(`types { + SearchableEmail = cipherstash.EncryptedString({ freeTextSearch: false }) +} + +model User { + id Int @id + email SearchableEmail +} +`); + expect(result.ok).toBe(true); + if (!result.ok) return; + const storage = asStorage(result.value.storage); + expect(storage.types?.['SearchableEmail']).toMatchObject({ + codecId: 'cipherstash/string@1', + nativeType: 'eql_v2_encrypted', + typeParams: { equality: true, freeTextSearch: false }, + }); + expect(storage.tables['user']?.columns['email']).toMatchObject({ + codecId: 'cipherstash/string@1', + nativeType: 'eql_v2_encrypted', + nullable: false, + typeRef: 'SearchableEmail', + }); + }); + + it('produces an alias whose typeParams match the inline-constructor form for the same args', () => { + const aliasResult = interpret(`types { + SearchableEmail = cipherstash.EncryptedString({ equality: true, freeTextSearch: true }) +} + +model User { + id Int @id + email SearchableEmail +} +`); + const inlineResult = interpret(`model User { + id Int @id + email cipherstash.EncryptedString({ equality: true, freeTextSearch: true }) +} +`); + expect(aliasResult.ok).toBe(true); + expect(inlineResult.ok).toBe(true); + if (!aliasResult.ok || !inlineResult.ok) return; + + const aliasNamedType = asStorage(aliasResult.value.storage).types?.['SearchableEmail']; + const inlineCol = asStorage(inlineResult.value.storage).tables['user']?.columns['email']; + expect(inlineCol).toBeDefined(); + if (!inlineCol) return; + + // The named type's storage descriptor and the inline column's + // codec/nativeType/typeParams must agree byte-for-byte; the inline + // column carries `nullable` (and may carry `default`/etc.) which the + // named-type descriptor does not. + expect(aliasNamedType).toEqual({ + codecId: inlineCol['codecId'], + nativeType: inlineCol['nativeType'], + typeParams: inlineCol['typeParams'], + }); + }); + + it('reports a span at the offending argument value', () => { + const result = interpret(`model User { + id Int @id + email cipherstash.EncryptedString({ equality: 42 }) +} +`); + expect(result.ok).toBe(false); + if (result.ok) return; + const diag = result.failure.diagnostics.find( + (d) => d.code === 'PSL_INVALID_ATTRIBUTE_ARGUMENT', + ); + expect(diag?.span).toMatchObject({ + start: { line: expect.any(Number), column: expect.any(Number) }, + end: { line: expect.any(Number), column: expect.any(Number) }, + }); + }); +}); diff --git a/packages/prisma-next/test/routing.test.ts b/packages/prisma-next/test/routing.test.ts new file mode 100644 index 00000000..033c4ed5 --- /dev/null +++ b/packages/prisma-next/test/routing.test.ts @@ -0,0 +1,107 @@ +/** + * Routing-key derivation for cipherstash bulk operations. + * + * The routing key is `(table, column)` derived from the envelope + * handle, with no per-column override surface. + * + * Tests cover: + * - `routingKeyId(...)` produces stable, collision-free string keys. + * - `getRoutingKey(envelope)` reads `(table, column)` from the + * envelope handle, throwing a routing-context diagnostic when the + * handle slots are unset (canonical "AST walk did not see this + * envelope" failure mode). + * - `groupByRoutingKey(targets)` collapses a homogeneous batch into + * one group, partitions a heterogeneous batch into per-key groups, + * and preserves within-group order (the canonical ParamRef order + * consumed by the renderer's `$N` index map and the encode-side + * metadata walk). + */ + +import { describe, expect, it } from 'vitest'; +import { EncryptedString, setHandleRoutingKey } from '../src/execution/envelope-string'; +import { + type BulkEncryptTarget, + getRoutingKey, + groupByRoutingKey, + routingKeyId, +} from '../src/execution/routing'; + +function makeTarget(plaintext: string, table: string, column: string): BulkEncryptTarget { + const envelope = EncryptedString.from(plaintext); + setHandleRoutingKey(envelope, table, column); + return { + ref: Symbol(`${table}.${column}`), + plaintext, + envelope, + routingKey: { table, column }, + }; +} + +describe('routingKeyId — stable string identity per (table, column)', () => { + it('produces the same id for equal (table, column) pairs', () => { + expect(routingKeyId({ table: 'user', column: 'email' })).toBe( + routingKeyId({ table: 'user', column: 'email' }), + ); + }); + + it('produces distinct ids when the table or column differs', () => { + expect(routingKeyId({ table: 'user', column: 'email' })).not.toBe( + routingKeyId({ table: 'user', column: 'username' }), + ); + expect(routingKeyId({ table: 'user', column: 'email' })).not.toBe( + routingKeyId({ table: 'admin', column: 'email' }), + ); + }); + + it('does not collide on names that share a literal concatenation', () => { + const a = routingKeyId({ table: 'a', column: 'bc' }); + const b = routingKeyId({ table: 'ab', column: 'c' }); + expect(a).not.toBe(b); + }); +}); + +describe('getRoutingKey — reads (table, column) from envelope handle', () => { + it('returns the handle-stamped routing key', () => { + const envelope = EncryptedString.from('alice@example.com'); + setHandleRoutingKey(envelope, 'user', 'email'); + expect(getRoutingKey(envelope)).toEqual({ table: 'user', column: 'email' }); + }); + + it('throws with a routing-context diagnostic when the handle is unstamped', () => { + const envelope = EncryptedString.from('alice@example.com'); + expect(() => getRoutingKey(envelope)).toThrow(/routing context/); + }); +}); + +describe('groupByRoutingKey — one group per (table, column)', () => { + it('collapses N targets with one routing key into a single group', () => { + const targets = Array.from({ length: 5 }, (_, i) => makeTarget(`u${i}@x`, 'user', 'email')); + const groups = groupByRoutingKey(targets); + expect(groups.size).toBe(1); + const only = [...groups.values()][0]; + expect(only).toHaveLength(5); + expect(only?.map((t) => t.plaintext)).toEqual(['u0@x', 'u1@x', 'u2@x', 'u3@x', 'u4@x']); + }); + + it('partitions targets by routing key, preserving within-group order', () => { + const targets: BulkEncryptTarget[] = [ + makeTarget('a@x', 'user', 'email'), + makeTarget('b@y', 'admin', 'email'), + makeTarget('c@x', 'user', 'email'), + makeTarget('d@y', 'admin', 'email'), + makeTarget('e@u', 'user', 'username'), + ]; + const groups = groupByRoutingKey(targets); + expect(groups.size).toBe(3); + const userEmail = groups.get(routingKeyId({ table: 'user', column: 'email' })); + const adminEmail = groups.get(routingKeyId({ table: 'admin', column: 'email' })); + const userUsername = groups.get(routingKeyId({ table: 'user', column: 'username' })); + expect(userEmail?.map((t) => t.plaintext)).toEqual(['a@x', 'c@x']); + expect(adminEmail?.map((t) => t.plaintext)).toEqual(['b@y', 'd@y']); + expect(userUsername?.map((t) => t.plaintext)).toEqual(['e@u']); + }); + + it('returns an empty map for empty input', () => { + expect(groupByRoutingKey([]).size).toBe(0); + }); +}); diff --git a/packages/prisma-next/test/runtime-descriptor.test.ts b/packages/prisma-next/test/runtime-descriptor.test.ts new file mode 100644 index 00000000..03e063a8 --- /dev/null +++ b/packages/prisma-next/test/runtime-descriptor.test.ts @@ -0,0 +1,123 @@ +/** + * `createCipherstashRuntimeDescriptor({ sdk })` — the consumer-facing + * wrapper that composes the SDK-bound parameterized codec descriptor + * into a single `SqlRuntimeExtensionDescriptor<'postgres'>`. + * + * The wrapper exposes the parameterized descriptor on + * `types.codecTypes.codecDescriptors` and through `codecs()`. The + * runtime extracts the descriptor at dispatch time and resolves a + * per-instance codec via `descriptor.factory(params)(ctx)`. The + * bulk-encrypt middleware ships separately under `./middleware`. + * + * Mirrors the pgvector wrapper at + * `packages/3-extensions/pgvector/src/exports/runtime.ts:62-88`. + */ + +import { describe, expect, it, vi } from 'vitest'; +import type { CipherstashSdk } from '../src/execution/sdk'; +import { + CIPHERSTASH_EXTENSION_VERSION, + createCipherstashRuntimeDescriptor, +} from '../src/exports/runtime'; +import { + CIPHERSTASH_SPACE_ID, + CIPHERSTASH_STRING_CODEC_ID, +} from '../src/extension-metadata/constants'; + +function emptySdk(): CipherstashSdk { + return { + decrypt: vi.fn(), + bulkEncrypt: vi.fn(), + bulkDecrypt: vi.fn(), + }; +} + +describe('createCipherstashRuntimeDescriptor — descriptor shape', () => { + it('declares kind=extension with the cipherstash id, version, family, target', () => { + const descriptor = createCipherstashRuntimeDescriptor({ sdk: emptySdk() }); + expect(descriptor.kind).toBe('extension'); + expect(descriptor.id).toBe(CIPHERSTASH_SPACE_ID); + expect(descriptor.version).toBe(CIPHERSTASH_EXTENSION_VERSION); + expect(descriptor.familyId).toBe('sql'); + expect(descriptor.targetId).toBe('postgres'); + }); + + it('exposes the cipherstash codec descriptors under types.codecTypes.codecDescriptors', () => { + // The descriptor wires the full six-codec surface (string + + // double + bigint + date + boolean + json). The current count + + // ordering is pinned here so a missed wiring surfaces in unit + // tests instead of leaking through e2e. + const descriptor = createCipherstashRuntimeDescriptor({ sdk: emptySdk() }); + const codecDescriptors = descriptor.types?.codecTypes?.codecDescriptors ?? []; + expect(codecDescriptors).toHaveLength(6); + expect(codecDescriptors[0]?.codecId).toBe(CIPHERSTASH_STRING_CODEC_ID); + expect(codecDescriptors[1]?.codecId).toBe('cipherstash/double@1'); + expect(codecDescriptors[2]?.codecId).toBe('cipherstash/bigint@1'); + expect(codecDescriptors[3]?.codecId).toBe('cipherstash/date@1'); + expect(codecDescriptors[4]?.codecId).toBe('cipherstash/boolean@1'); + expect(codecDescriptors[5]?.codecId).toBe('cipherstash/json@1'); + }); +}); + +describe('createCipherstashRuntimeDescriptor — codecs()', () => { + it('returns the parameterized codec descriptors in stable order', () => { + const descriptor = createCipherstashRuntimeDescriptor({ sdk: emptySdk() }); + const codecs = descriptor.codecs?.() ?? []; + expect(codecs).toHaveLength(6); + expect(codecs.map((c) => c.codecId)).toEqual([ + CIPHERSTASH_STRING_CODEC_ID, + 'cipherstash/double@1', + 'cipherstash/bigint@1', + 'cipherstash/date@1', + 'cipherstash/boolean@1', + 'cipherstash/json@1', + ]); + for (const c of codecs) { + expect(c.targetTypes).toEqual(['eql_v2_encrypted']); + // Per-codec `cipherstash:*` namespaced traits drive the + // multi-codec operator dispatch (see + // `extension-metadata/constants.ts` → + // `CIPHERSTASH_CODEC_TRAITS`); the framework `'equality'` trait + // is intentionally absent across every cipherstash codec so the + // built-in `eq` does not silently re-attach (see + // `equality-trait-removal.test.ts`). + const traits: ReadonlyArray<string> = c.traits ?? []; + expect(traits.includes('equality')).toBe(false); + expect(traits.length).toBeGreaterThan(0); + for (const trait of traits) { + expect(trait.startsWith('cipherstash:')).toBe(true); + } + } + }); +}); + +describe('createCipherstashRuntimeDescriptor — create() returns a target-bound instance', () => { + it('returns a SqlRuntimeExtensionInstance carrying the SQL family and Postgres target', () => { + const descriptor = createCipherstashRuntimeDescriptor({ sdk: emptySdk() }); + const instance = descriptor.create(); + expect(instance.familyId).toBe('sql'); + expect(instance.targetId).toBe('postgres'); + }); +}); + +describe('createCipherstashRuntimeDescriptor — SDK isolation per descriptor', () => { + it('produces a different codec instance per invocation so per-tenant SDKs do not cross-talk', () => { + const a = createCipherstashRuntimeDescriptor({ sdk: emptySdk() }); + const b = createCipherstashRuntimeDescriptor({ sdk: emptySdk() }); + const codecA = a.codecs?.()[0]?.factory({ + equality: false, + freeTextSearch: false, + orderAndRange: false, + })({ + name: 'x.y', + }); + const codecB = b.codecs?.()[0]?.factory({ + equality: false, + freeTextSearch: false, + orderAndRange: false, + })({ + name: 'x.y', + }); + expect(codecA).not.toBe(codecB); + }); +}); diff --git a/packages/prisma-next/test/sdk-adapter.test.ts b/packages/prisma-next/test/sdk-adapter.test.ts new file mode 100644 index 00000000..6147a808 --- /dev/null +++ b/packages/prisma-next/test/sdk-adapter.test.ts @@ -0,0 +1,244 @@ +/** + * Behaviour pins for `createCipherstashSdk`. + * + * Uses a hand-built fake `EncryptionClient` (no live ZeroKMS) — every + * call returns a deterministic, inspectable result so the adapter's + * routing, coercion, and error-mapping logic can be observed at the + * boundary. + */ + +import { encryptedColumn, encryptedTable } from '@cipherstash/stack/schema' +import type { EncryptionClient } from '@cipherstash/stack/client' +import { describe, expect, it, vi } from 'vitest' + +import { createCipherstashSdk } from '../src/stack/sdk-adapter' + +interface FakeBulkEncryptCall { + readonly plaintexts: ReadonlyArray<unknown> + readonly column: unknown + readonly table: unknown +} + +interface FakeClientHandle { + readonly client: EncryptionClient + readonly bulkEncryptCalls: FakeBulkEncryptCall[] + readonly bulkDecryptCalls: ReadonlyArray<unknown>[] + readonly decryptCalls: unknown[] +} + +function makeFakeClient(): FakeClientHandle { + const bulkEncryptCalls: FakeBulkEncryptCall[] = [] + const bulkDecryptCalls: ReadonlyArray<unknown>[] = [] + const decryptCalls: unknown[] = [] + + const client = { + bulkEncrypt: vi.fn(async (plaintexts: ReadonlyArray<{ plaintext: unknown }>, opts: { column: unknown; table: unknown }) => { + bulkEncryptCalls.push({ + plaintexts: plaintexts.map((p) => p.plaintext), + column: opts.column, + table: opts.table, + }) + return { + failure: null, + data: plaintexts.map((_, i) => ({ data: `ct-${i}` as unknown })), + } as { failure: null; data: ReadonlyArray<{ data: unknown }> } + }), + bulkDecrypt: vi.fn(async (payload: ReadonlyArray<{ data: unknown }>) => { + bulkDecryptCalls.push(payload.map((p) => p.data)) + return { + failure: null, + data: payload.map((p, i) => ({ id: i, data: `pt-${i}` as unknown })), + } as { failure: null; data: ReadonlyArray<{ id?: number; data?: unknown; error?: unknown }> } + }), + decrypt: vi.fn(async (ciphertext: unknown) => { + decryptCalls.push(ciphertext) + return { failure: null, data: 'pt-single' as unknown } + }), + } + + return { + client: client as unknown as EncryptionClient, + bulkEncryptCalls, + bulkDecryptCalls, + decryptCalls, + } +} + +const validEnvelope = { + v: 2, + i: { t: 'users', c: 'email' }, + c: 'ct-blob', +} + +describe('createCipherstashSdk — routing-key lookup', () => { + it('resolves a (table, column) routing key to the typed schema objects', async () => { + const users = encryptedTable('users', { + email: encryptedColumn('email').equality(), + }) + const fake = makeFakeClient() + const sdk = createCipherstashSdk(fake.client, [users]) + + await sdk.bulkEncrypt({ + routingKey: { table: 'users', column: 'email' }, + values: ['alice'], + }) + + expect(fake.bulkEncryptCalls).toHaveLength(1) + expect(fake.bulkEncryptCalls[0]?.table).toBe(users) + expect(fake.bulkEncryptCalls[0]?.column).toBe(users.email) + expect(fake.bulkEncryptCalls[0]?.plaintexts).toEqual(['alice']) + }) + + it('throws a clear error when the routing-key table is unknown', async () => { + const users = encryptedTable('users', { + email: encryptedColumn('email').equality(), + }) + const fake = makeFakeClient() + const sdk = createCipherstashSdk(fake.client, [users]) + + await expect( + sdk.bulkEncrypt({ + routingKey: { table: 'audit_log', column: 'message' }, + values: ['x'], + }), + ).rejects.toThrow(/routing-key table "audit_log"/) + }) + + it('throws a clear error when the routing-key column is unknown on a known table', async () => { + const users = encryptedTable('users', { + email: encryptedColumn('email').equality(), + }) + const fake = makeFakeClient() + const sdk = createCipherstashSdk(fake.client, [users]) + + await expect( + sdk.bulkEncrypt({ + routingKey: { table: 'users', column: 'phone' }, + values: ['x'], + }), + ).rejects.toThrow(/column "phone" is not on stack table "users"/) + }) +}) + +describe('createCipherstashSdk — plaintext coercion at the boundary', () => { + it('coerces bigint to Number when in the safe-integer range', async () => { + const accounts = encryptedTable('accounts', { + id: encryptedColumn('id').dataType('bigint').equality(), + }) + const fake = makeFakeClient() + const sdk = createCipherstashSdk(fake.client, [accounts]) + + await sdk.bulkEncrypt({ + routingKey: { table: 'accounts', column: 'id' }, + values: [123_456n, 0n, -9_007_199_254_740_991n], + }) + + expect(fake.bulkEncryptCalls[0]?.plaintexts).toEqual([ + 123_456, 0, -9_007_199_254_740_991, + ]) + }) + + it('throws on bigint overflow rather than truncating silently', async () => { + const accounts = encryptedTable('accounts', { + id: encryptedColumn('id').dataType('bigint').equality(), + }) + const fake = makeFakeClient() + const sdk = createCipherstashSdk(fake.client, [accounts]) + + await expect( + sdk.bulkEncrypt({ + routingKey: { table: 'accounts', column: 'id' }, + // 2^54 — one past Number.MAX_SAFE_INTEGER + values: [BigInt(2) ** BigInt(54)], + }), + ).rejects.toThrow(/exceeds Number\.MAX_SAFE_INTEGER/) + }) + + it('coerces Date to an ISO 8601 string', async () => { + const events = encryptedTable('events', { + at: encryptedColumn('at').dataType('date').equality(), + }) + const fake = makeFakeClient() + const sdk = createCipherstashSdk(fake.client, [events]) + + await sdk.bulkEncrypt({ + routingKey: { table: 'events', column: 'at' }, + values: [new Date('2026-05-13T08:00:00.000Z')], + }) + + expect(fake.bulkEncryptCalls[0]?.plaintexts).toEqual([ + '2026-05-13T08:00:00.000Z', + ]) + }) + + it('passes string / number / boolean / object plaintexts through unchanged', async () => { + const t = encryptedTable('t', { + c: encryptedColumn('c').equality(), + }) + const fake = makeFakeClient() + const sdk = createCipherstashSdk(fake.client, [t]) + + await sdk.bulkEncrypt({ + routingKey: { table: 't', column: 'c' }, + values: ['s', 1, true, { k: 'v' }], + }) + + expect(fake.bulkEncryptCalls[0]?.plaintexts).toEqual([ + 's', + 1, + true, + { k: 'v' }, + ]) + }) +}) + +describe('createCipherstashSdk — bulkDecrypt envelope validation', () => { + it('rejects ciphertext values that are not EQL v2 envelopes with a clear error', async () => { + const t = encryptedTable('t', { c: encryptedColumn('c').equality() }) + const fake = makeFakeClient() + const sdk = createCipherstashSdk(fake.client, [t]) + + await expect( + sdk.bulkDecrypt({ + routingKey: { table: 't', column: 'c' }, + ciphertexts: [validEnvelope, { not: 'an envelope' }], + }), + ).rejects.toThrow(/at index 1.*not a valid EQL v2 envelope/) + }) + + it('forwards valid envelopes to the underlying client.bulkDecrypt', async () => { + const t = encryptedTable('t', { c: encryptedColumn('c').equality() }) + const fake = makeFakeClient() + const sdk = createCipherstashSdk(fake.client, [t]) + + const result = await sdk.bulkDecrypt({ + routingKey: { table: 't', column: 'c' }, + ciphertexts: [validEnvelope, validEnvelope], + }) + + expect(fake.bulkDecryptCalls).toHaveLength(1) + expect(fake.bulkDecryptCalls[0]).toEqual([validEnvelope, validEnvelope]) + expect(result).toEqual(['pt-0', 'pt-1']) + }) +}) + +describe('createCipherstashSdk — error mapping', () => { + it('propagates underlying client failures as Error with the failure message', async () => { + const t = encryptedTable('t', { c: encryptedColumn('c').equality() }) + const failingClient = { + bulkEncrypt: async () => ({ + failure: { message: 'workspace credentials missing' }, + }), + bulkDecrypt: async () => ({ failure: null, data: [] }), + decrypt: async () => ({ failure: null, data: '' }), + } as unknown as EncryptionClient + const sdk = createCipherstashSdk(failingClient, [t]) + + await expect( + sdk.bulkEncrypt({ + routingKey: { table: 't', column: 'c' }, + values: ['x'], + }), + ).rejects.toThrow(/workspace credentials missing/) + }) +}) diff --git a/packages/prisma-next/test/sdk.types.test-d.ts b/packages/prisma-next/test/sdk.types.test-d.ts new file mode 100644 index 00000000..3b42bc24 --- /dev/null +++ b/packages/prisma-next/test/sdk.types.test-d.ts @@ -0,0 +1,77 @@ +/** + * Type-shape tests pinning the polymorphic `CipherstashSdk` boundary. + * + * Each batch the SDK sees is homogeneously typed by its `(table, column)` + * routing key, so the SDK accepts and returns `ReadonlyArray<unknown>` + * — no per-batch `cast_as` hint is needed at the framework boundary. + * + * Negative cases use `@ts-expect-error` per `AGENTS.md § Typesafety + * rules` (negative type tests are the documented carve-out). + */ + +import type { + CipherstashBulkDecryptArgs, + CipherstashBulkEncryptArgs, + CipherstashSdk, +} from '../src/execution/sdk'; + +declare const sdk: CipherstashSdk; +declare const routingKey: { readonly table: string; readonly column: string }; +declare const unknownValues: ReadonlyArray<unknown>; +declare const unknownCiphertexts: ReadonlyArray<unknown>; +declare const stringValues: ReadonlyArray<string>; +declare const numberValues: ReadonlyArray<number>; +declare const dateValues: ReadonlyArray<Date>; + +// --- Positive: polymorphic in / out ---------------------------------- + +const _encryptUnknown: Promise<ReadonlyArray<unknown>> = sdk.bulkEncrypt({ + routingKey, + values: unknownValues, +}); +void _encryptUnknown; + +const _decryptUnknown: Promise<ReadonlyArray<unknown>> = sdk.bulkDecrypt({ + routingKey, + ciphertexts: unknownCiphertexts, +}); +void _decryptUnknown; + +// Concrete subtypes flow in via natural variance — no per-codec adapter +// is required at the framework boundary. +void sdk.bulkEncrypt({ routingKey, values: stringValues }); +void sdk.bulkEncrypt({ routingKey, values: numberValues }); +void sdk.bulkEncrypt({ routingKey, values: dateValues }); + +// Args expose `values` and `ciphertexts` as `ReadonlyArray<unknown>`. +const _argsAreUnknown: ReadonlyArray<unknown> = (null as unknown as CipherstashBulkEncryptArgs) + .values; +const _ciphertextsAreUnknown: ReadonlyArray<unknown> = ( + null as unknown as CipherstashBulkDecryptArgs +).ciphertexts; +void _argsAreUnknown; +void _ciphertextsAreUnknown; + +// --- Negative: a string-only `bulkEncrypt` rejects `ReadonlyArray<unknown>` + +// A hypothetical narrower contract: `values` typed as `ReadonlyArray<string>`. +// Callers who pass a polymorphic batch (the actual contract the SDK +// boundary commits to) no longer compile — proving the polymorphic +// shape is what makes the framework boundary work. +declare const narrowedBulkEncrypt: (args: { + readonly routingKey: { readonly table: string; readonly column: string }; + readonly values: ReadonlyArray<string>; +}) => Promise<ReadonlyArray<unknown>>; + +// @ts-expect-error — `ReadonlyArray<unknown>` is not assignable to +// `ReadonlyArray<string>`. The polymorphic SDK boundary exists +// precisely so non-string codecs (Double, Date, BigInt, ...) can pass +// their batches through without per-codec adapters. +void narrowedBulkEncrypt({ routingKey, values: unknownValues }); + +// `bulkDecrypt` has no symmetric negative case: a `Promise<ReadonlyArray<string>>` +// return is a *refinement* of the polymorphic `Promise<ReadonlyArray<unknown>>` +// return (covariance permits it). The framework boundary still requires the +// wide return so the per-envelope `parseDecryptedValue` hook can narrow each +// codec's plaintext to its own `T` (e.g. `EncryptedDate` returns a `Date`). +// Pin the wide-return shape via the positive `_decryptUnknown` check above. diff --git a/packages/prisma-next/tsconfig.json b/packages/prisma-next/tsconfig.json new file mode 100644 index 00000000..6ce239f2 --- /dev/null +++ b/packages/prisma-next/tsconfig.json @@ -0,0 +1,23 @@ +{ + "compilerOptions": { + "lib": ["ESNext"], + "target": "ESNext", + "module": "ESNext", + "moduleDetection": "force", + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "verbatimModuleSyntax": true, + "resolveJsonModule": true, + "noEmit": true, + "esModuleInterop": true, + "allowJs": true, + "strict": true, + "skipLibCheck": true, + "noFallthroughCasesInSwitch": true, + "noUnusedLocals": false, + "noUnusedParameters": false, + "rootDir": "." + }, + "include": ["src/**/*.ts", "test/**/*.ts"], + "exclude": ["dist", "node_modules"] +} diff --git a/packages/prisma-next/tsup.config.ts b/packages/prisma-next/tsup.config.ts new file mode 100644 index 00000000..8448346a --- /dev/null +++ b/packages/prisma-next/tsup.config.ts @@ -0,0 +1,21 @@ +import { defineConfig } from 'tsup' + +export default defineConfig({ + entry: [ + 'src/exports/codec-types.ts', + 'src/exports/column-types.ts', + 'src/exports/control.ts', + 'src/exports/middleware.ts', + 'src/exports/migration.ts', + 'src/exports/operation-types.ts', + 'src/exports/pack.ts', + 'src/exports/runtime.ts', + 'src/exports/stack.ts', + ], + outDir: 'dist', + format: ['esm'], + sourcemap: true, + dts: true, + clean: true, + splitting: true, +}) diff --git a/packages/prisma-next/vitest.config.ts b/packages/prisma-next/vitest.config.ts new file mode 100644 index 00000000..51b7828b --- /dev/null +++ b/packages/prisma-next/vitest.config.ts @@ -0,0 +1,23 @@ +import { defineConfig } from 'vitest/config'; + +export default defineConfig({ + test: { + globals: true, + environment: 'node', + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html'], + include: ['src/**/*.ts'], + exclude: [ + 'dist/**', + 'test/**', + '**/*.test.ts', + '**/*.test-d.ts', + '**/*.config.ts', + '**/exports/**', + // Emitted contract artefact (typecheck-only). + 'src/contract.d.ts', + ], + }, + }, +}); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 7484a9c5..7a303074 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -93,6 +93,64 @@ importers: specifier: catalog:repo version: 5.6.3 + examples/prisma: + dependencies: + '@cipherstash/prisma-next': + specifier: workspace:* + version: link:../../packages/prisma-next + '@cipherstash/stack': + specifier: workspace:* + version: link:../../packages/stack + '@prisma-next/adapter-postgres': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8(typanion@3.14.0) + '@prisma-next/contract': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/driver-postgres': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/family-sql': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/framework-components': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/postgres': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8(typanion@3.14.0) + '@prisma-next/sql-contract': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/sql-contract-psl': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/sql-orm-client': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/sql-runtime': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/target-postgres': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8(typanion@3.14.0) + dotenv: + specifier: ^16.4.5 + version: 16.6.1 + devDependencies: + '@prisma-next/cli': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8(typanion@3.14.0) + '@types/node': + specifier: ^22.15.12 + version: 22.19.3 + tsx: + specifier: catalog:repo + version: 4.19.3 + typescript: + specifier: catalog:repo + version: 5.6.3 + packages/bench: dependencies: '@cipherstash/stack': @@ -252,10 +310,6 @@ importers: next: specifier: ^14 || ^15 version: 15.5.10(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - optionalDependencies: - '@rollup/rollup-linux-x64-gnu': - specifier: 4.24.0 - version: 4.24.0 devDependencies: '@clerk/nextjs': specifier: catalog:security @@ -272,6 +326,86 @@ importers: vitest: specifier: catalog:repo version: 3.1.3(@types/node@22.19.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.19.3)(yaml@2.8.4) + optionalDependencies: + '@rollup/rollup-linux-x64-gnu': + specifier: 4.24.0 + version: 4.24.0 + + packages/prisma-next: + dependencies: + '@cipherstash/stack': + specifier: workspace:* + version: link:../stack + '@prisma-next/contract': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/family-sql': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/framework-components': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/migration-tools': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/sql-contract': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/sql-operations': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/sql-relational-core': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/sql-runtime': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/ts-render': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/utils': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + arktype: + specifier: ^2.1.29 + version: 2.2.0 + devDependencies: + '@prisma-next/adapter-postgres': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8(typanion@3.14.0) + '@prisma-next/cli': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8(typanion@3.14.0) + '@prisma-next/driver-postgres': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/psl-parser': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/sql-contract-psl': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/sql-contract-ts': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/sql-schema-ir': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8 + '@prisma-next/target-postgres': + specifier: 0.6.0-dev.8 + version: 0.6.0-dev.8(typanion@3.14.0) + pathe: + specifier: ^2.0.3 + version: 2.0.3 + tsup: + specifier: catalog:repo + version: 8.4.0(jiti@2.6.1)(postcss@8.5.6)(tsx@4.19.3)(typescript@5.6.3)(yaml@2.8.4) + typescript: + specifier: catalog:repo + version: 5.6.3 + vitest: + specifier: catalog:repo + version: 3.1.3(@types/node@22.19.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.19.3)(yaml@2.8.4) packages/protect: dependencies: @@ -293,10 +427,6 @@ importers: zod: specifier: ^3.24.2 version: 3.24.2 - optionalDependencies: - '@rollup/rollup-linux-x64-gnu': - specifier: 4.24.0 - version: 4.24.0 devDependencies: '@supabase/supabase-js': specifier: ^2.47.10 @@ -322,6 +452,10 @@ importers: vitest: specifier: catalog:repo version: 3.1.3(@types/node@22.19.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.19.3)(yaml@2.8.4) + optionalDependencies: + '@rollup/rollup-linux-x64-gnu': + specifier: 4.24.0 + version: 4.24.0 packages/protect-dynamodb: dependencies: @@ -396,7 +530,7 @@ importers: version: 16.4.7 drizzle-orm: specifier: ^0.45.2 - version: 0.45.2(@types/pg@8.16.0)(gel@2.2.0)(mysql2@3.16.0)(pg@8.16.3)(postgres@3.4.9) + version: 0.45.2(@types/pg@8.16.0)(gel@2.2.0)(mysql2@3.16.0)(pg@8.20.0)(postgres@3.4.9) execa: specifier: ^9.5.2 version: 9.6.1 @@ -483,6 +617,12 @@ packages: resolution: {integrity: sha512-60vepv88RwcJtSHrD6MjIL6Ta3SOYbgfnkHb+ppAVK+o9mXprRtulx7VlRl3lN3bbvysAfCS7WMVfhUYemB0IQ==} engines: {node: '>= 16'} + '@ark/schema@0.56.0': + resolution: {integrity: sha512-ECg3hox/6Z/nLajxXqNhgPtNdHWC9zNsDyskwO28WinoFEnWow4IsERNz9AnXRhTZJnYIlAJ4uGn3nlLk65vZA==} + + '@ark/util@0.56.0': + resolution: {integrity: sha512-BghfRC8b9pNs3vBoDJhcta0/c1J1rsoS1+HgVUreMFPdhz/CRAKReAu57YEllNaSy98rWAdY1gE+gFup7OXpgA==} + '@babel/runtime@7.26.10': resolution: {integrity: sha512-2WJMeRQPHKSPemqk/awGrAiuFfzBmOIPXKizAsVhWH9YJqLZ0H+HS4c8loHGgW6utJ3E/ejXQUsiGaQy2NZ9Fw==} engines: {node: '>=6.9.0'} @@ -509,24 +649,28 @@ packages: engines: {node: '>=14.21.3'} cpu: [arm64] os: [linux] + libc: [musl] '@biomejs/cli-linux-arm64@1.9.4': resolution: {integrity: sha512-fJIW0+LYujdjUgJJuwesP4EjIBl/N/TcOX3IvIHJQNsAqvV2CHIogsmA94BPG6jZATS4Hi+xv4SkBBQSt1N4/g==} engines: {node: '>=14.21.3'} cpu: [arm64] os: [linux] + libc: [glibc] '@biomejs/cli-linux-x64-musl@1.9.4': resolution: {integrity: sha512-gEhi/jSBhZ2m6wjV530Yy8+fNqG8PAinM3oV7CyO+6c3CEh16Eizm21uHVsyVBEB6RIM8JHIl6AGYCv6Q6Q9Tg==} engines: {node: '>=14.21.3'} cpu: [x64] os: [linux] + libc: [musl] '@biomejs/cli-linux-x64@1.9.4': resolution: {integrity: sha512-lRCJv/Vi3Vlwmbd6K+oQ0KhLHMAysN8lXoCI7XeHlxaajk06u7G+UsFSO01NAs5iYuWKmVZjmiOzJ0OJmGsMwg==} engines: {node: '>=14.21.3'} cpu: [x64] os: [linux] + libc: [glibc] '@biomejs/cli-win32-arm64@1.9.4': resolution: {integrity: sha512-tlbhLk+WXZmgwoIKwHIHEBZUwxml7bRJgk0X2sPyNR3S93cdRq6XulAZRQJ17FYGGzWne0fgrXBKpl7l4M87Hg==} @@ -612,16 +756,19 @@ packages: resolution: {integrity: sha512-PDpm1EHC1XzVtEDGzcyr0UXNca8IFkfPusqqVJ5CSpzCtlYipIClYui197zQ4NGMHIAQD168IEFOK2TROyb4Tw==} cpu: [arm64] os: [linux] + libc: [glibc] '@cipherstash/auth-linux-x64-gnu@0.36.0': resolution: {integrity: sha512-Gm20ezVlGmNrkMH4s+I+JT13hDRD6vEX3fu3VDQQhWUiYCdgbdVsNJQgOr6QMY1cJkkmGyNlQKfiCPn4zlqtMg==} cpu: [x64] os: [linux] + libc: [glibc] '@cipherstash/auth-linux-x64-musl@0.36.0': resolution: {integrity: sha512-RUQeLc19JnURAMEoemP3+2DyptK+pqNFrVGgiKKOMVql0SZDVMlN2IyFrTKJ2emv1yuf4Gr1+E4jIdKPR0Oh+g==} cpu: [x64] os: [linux] + libc: [musl] '@cipherstash/auth-win32-x64-msvc@0.36.0': resolution: {integrity: sha512-1mQ8E6YFy7frHkvrDmSixpy47EakGPRh4qgoXPgk9lqZnlbMECYZhoKWQEs5wa3tLGgiX5G6jKC3NQZsOOqEfQ==} @@ -667,9 +814,17 @@ packages: '@clack/core@0.4.2': resolution: {integrity: sha512-NYQfcEy8MWIxrT5Fj8nIVchfRFA26yYKJcvBS7WlUIlw2OmQOY9DhGGXMovyI5J5PpxrCPGkgUi207EBrjpBvg==} + '@clack/core@1.3.0': + resolution: {integrity: sha512-xJPHpAmEQUBrXSLx0gF+q5K/IyihXpsHZcha+jB+tyahsKRK3Dxo4D0coZDewHo12NhiuzC3dTtMPbm53GEAAA==} + engines: {node: '>= 20.12.0'} + '@clack/prompts@0.10.1': resolution: {integrity: sha512-Q0T02vx8ZM9XSv9/Yde0jTmmBQufZhPJfYAg2XrrrxWWaZgq1rr8nU8Hv710BQ1dhoP8rtY7YUdpGej2Qza/cw==} + '@clack/prompts@1.3.0': + resolution: {integrity: sha512-GgcWwRCs/xPtaqlMy8qRhPnZf9vlWcWZNHAitnVQ3yk7JmSralSiq5q07yaffYE8SogtDm7zFeKccx1QNVARpw==} + engines: {node: '>= 20.12.0'} + '@clerk/backend@2.33.3': resolution: {integrity: sha512-cgkFVEYFG2nZn4QDuYBhiAwPtMdo8Yj7DAtq/SBQ5C/ainh3uxNRDgUj4bFn52qJkWLiCkraYJIw1b8dEUbUBg==} engines: {node: '>=18.17.0'} @@ -705,6 +860,12 @@ packages: resolution: {integrity: sha512-t5ypYYDkT5TPaNIDjLnYk9GpkJgwNTBiS7h6FuUTjoySQtf7amNDS1A1eOu7NOcVpqiSeKg+0wzGxxcre00kMA==} engines: {node: '>=18.17.0'} + '@dagrejs/dagre@3.0.0': + resolution: {integrity: sha512-ZzhnTy1rfuoew9Ez3EIw4L2znPGnYYhfn8vc9c4oB8iw6QAsszbiU0vRhlxWPFnmmNSFAkrYeF1PhM5m4lAN0Q==} + + '@dagrejs/graphlib@4.0.1': + resolution: {integrity: sha512-IvcV6FduIIAmLwnH+yun+QtV36SC7mERqa86aClNqmMN09WhmPPYU8ckHrZBozErf+UvHPWOTJYaGYiIcs0DgA==} + '@drizzle-team/brocli@0.10.2': resolution: {integrity: sha512-z33Il7l5dKjUgGULTqBsQBQwckHh5AbIuxhdsIxDDiZAzBOrZO6q9ogcWC65kU382AfynTfgNumVcNIjuIua6w==} @@ -731,6 +892,12 @@ packages: cpu: [ppc64] os: [aix] + '@esbuild/aix-ppc64@0.28.0': + resolution: {integrity: sha512-lhRUCeuOyJQURhTxl4WkpFTjIsbDayJHih5kZC1giwE+MhIzAb7mEsQMqMf18rHLsrb5qI1tafG20mLxEWcWlA==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [aix] + '@esbuild/android-arm64@0.18.20': resolution: {integrity: sha512-Nz4rJcchGDtENV0eMKUNa6L12zz2zBDXuhj/Vjh18zGqB44Bi7MBMSXjgunJgjRhCmKOjnPuZp4Mb6OKqtMHLQ==} engines: {node: '>=12'} @@ -749,6 +916,12 @@ packages: cpu: [arm64] os: [android] + '@esbuild/android-arm64@0.28.0': + resolution: {integrity: sha512-+WzIXQOSaGs33tLEgYPYe/yQHf0WTU0X42Jca3y8NWMbUVhp7rUnw+vAsRC/QiDrdD31IszMrZy+qwPOPjd+rw==} + engines: {node: '>=18'} + cpu: [arm64] + os: [android] + '@esbuild/android-arm@0.18.20': resolution: {integrity: sha512-fyi7TDI/ijKKNZTUJAQqiG5T7YjJXgnzkURqmGj13C6dCqckZBLdl4h7bkhHt/t0WP+zO9/zwroDvANaOqO5Sw==} engines: {node: '>=12'} @@ -767,6 +940,12 @@ packages: cpu: [arm] os: [android] + '@esbuild/android-arm@0.28.0': + resolution: {integrity: sha512-wqh0ByljabXLKHeWXYLqoJ5jKC4XBaw6Hk08OfMrCRd2nP2ZQ5eleDZC41XHyCNgktBGYMbqnrJKq/K/lzPMSQ==} + engines: {node: '>=18'} + cpu: [arm] + os: [android] + '@esbuild/android-x64@0.18.20': resolution: {integrity: sha512-8GDdlePJA8D6zlZYJV/jnrRAi6rOiNaCC/JclcXpB+KIuvfBN4owLtgzY2bsxnx666XjJx2kDPUmnTtR8qKQUg==} engines: {node: '>=12'} @@ -785,6 +964,12 @@ packages: cpu: [x64] os: [android] + '@esbuild/android-x64@0.28.0': + resolution: {integrity: sha512-+VJggoaKhk2VNNqVL7f6S189UzShHC/mR9EE8rDdSkdpN0KflSwWY/gWjDrNxxisg8Fp1ZCD9jLMo4m0OUfeUA==} + engines: {node: '>=18'} + cpu: [x64] + os: [android] + '@esbuild/darwin-arm64@0.18.20': resolution: {integrity: sha512-bxRHW5kHU38zS2lPTPOyuyTm+S+eobPUnTNkdJEfAddYgEcll4xkT8DB9d2008DtTbl7uJag2HuE5NZAZgnNEA==} engines: {node: '>=12'} @@ -803,6 +988,12 @@ packages: cpu: [arm64] os: [darwin] + '@esbuild/darwin-arm64@0.28.0': + resolution: {integrity: sha512-0T+A9WZm+bZ84nZBtk1ckYsOvyA3x7e2Acj1KdVfV4/2tdG4fzUp91YHx+GArWLtwqp77pBXVCPn2We7Letr0Q==} + engines: {node: '>=18'} + cpu: [arm64] + os: [darwin] + '@esbuild/darwin-x64@0.18.20': resolution: {integrity: sha512-pc5gxlMDxzm513qPGbCbDukOdsGtKhfxD1zJKXjCCcU7ju50O7MeAZ8c4krSJcOIJGFR+qx21yMMVYwiQvyTyQ==} engines: {node: '>=12'} @@ -821,6 +1012,12 @@ packages: cpu: [x64] os: [darwin] + '@esbuild/darwin-x64@0.28.0': + resolution: {integrity: sha512-fyzLm/DLDl/84OCfp2f/XQ4flmORsjU7VKt8HLjvIXChJoFFOIL6pLJPH4Yhd1n1gGFF9mPwtlN5Wf82DZs+LQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [darwin] + '@esbuild/freebsd-arm64@0.18.20': resolution: {integrity: sha512-yqDQHy4QHevpMAaxhhIwYPMv1NECwOvIpGCZkECn8w2WFHXjEwrBn3CeNIYsibZ/iZEUemj++M26W3cNR5h+Tw==} engines: {node: '>=12'} @@ -839,6 +1036,12 @@ packages: cpu: [arm64] os: [freebsd] + '@esbuild/freebsd-arm64@0.28.0': + resolution: {integrity: sha512-l9GeW5UZBT9k9brBYI+0WDffcRxgHQD8ShN2Ur4xWq/NFzUKm3k5lsH4PdaRgb2w7mI9u61nr2gI2mLI27Nh3Q==} + engines: {node: '>=18'} + cpu: [arm64] + os: [freebsd] + '@esbuild/freebsd-x64@0.18.20': resolution: {integrity: sha512-tgWRPPuQsd3RmBZwarGVHZQvtzfEBOreNuxEMKFcd5DaDn2PbBxfwLcj4+aenoh7ctXcbXmOQIn8HI6mCSw5MQ==} engines: {node: '>=12'} @@ -857,6 +1060,12 @@ packages: cpu: [x64] os: [freebsd] + '@esbuild/freebsd-x64@0.28.0': + resolution: {integrity: sha512-BXoQai/A0wPO6Es3yFJ7APCiKGc1tdAEOgeTNy3SsB491S3aHn4S4r3e976eUnPdU+NbdtmBuLncYir2tMU9Nw==} + engines: {node: '>=18'} + cpu: [x64] + os: [freebsd] + '@esbuild/linux-arm64@0.18.20': resolution: {integrity: sha512-2YbscF+UL7SQAVIpnWvYwM+3LskyDmPhe31pE7/aoTMFKKzIc9lLbyGUpmmb8a8AixOL61sQ/mFh3jEjHYFvdA==} engines: {node: '>=12'} @@ -875,6 +1084,12 @@ packages: cpu: [arm64] os: [linux] + '@esbuild/linux-arm64@0.28.0': + resolution: {integrity: sha512-RVyzfb3FWsGA55n6WY0MEIEPURL1FcbhFE6BffZEMEekfCzCIMtB5yyDcFnVbTnwk+CLAgTujmV/Lgvih56W+A==} + engines: {node: '>=18'} + cpu: [arm64] + os: [linux] + '@esbuild/linux-arm@0.18.20': resolution: {integrity: sha512-/5bHkMWnq1EgKr1V+Ybz3s1hWXok7mDFUMQ4cG10AfW3wL02PSZi5kFpYKrptDsgb2WAJIvRcDm+qIvXf/apvg==} engines: {node: '>=12'} @@ -893,6 +1108,12 @@ packages: cpu: [arm] os: [linux] + '@esbuild/linux-arm@0.28.0': + resolution: {integrity: sha512-CjaaREJagqJp7iTaNQjjidaNbCKYcd4IDkzbwwxtSvjI7NZm79qiHc8HqciMddQ6CKvJT6aBd8lO9kN/ZudLlw==} + engines: {node: '>=18'} + cpu: [arm] + os: [linux] + '@esbuild/linux-ia32@0.18.20': resolution: {integrity: sha512-P4etWwq6IsReT0E1KHU40bOnzMHoH73aXp96Fs8TIT6z9Hu8G6+0SHSw9i2isWrD2nbx2qo5yUqACgdfVGx7TA==} engines: {node: '>=12'} @@ -911,6 +1132,12 @@ packages: cpu: [ia32] os: [linux] + '@esbuild/linux-ia32@0.28.0': + resolution: {integrity: sha512-KBnSTt1kxl9x70q+ydterVdl+Cn0H18ngRMRCEQfrbqdUuntQQ0LoMZv47uB97NljZFzY6HcfqEZ2SAyIUTQBQ==} + engines: {node: '>=18'} + cpu: [ia32] + os: [linux] + '@esbuild/linux-loong64@0.18.20': resolution: {integrity: sha512-nXW8nqBTrOpDLPgPY9uV+/1DjxoQ7DoB2N8eocyq8I9XuqJ7BiAMDMf9n1xZM9TgW0J8zrquIb/A7s3BJv7rjg==} engines: {node: '>=12'} @@ -929,6 +1156,12 @@ packages: cpu: [loong64] os: [linux] + '@esbuild/linux-loong64@0.28.0': + resolution: {integrity: sha512-zpSlUce1mnxzgBADvxKXX5sl8aYQHo2ezvMNI8I0lbblJtp8V4odlm3Yzlj7gPyt3T8ReksE6bK+pT3WD+aJRg==} + engines: {node: '>=18'} + cpu: [loong64] + os: [linux] + '@esbuild/linux-mips64el@0.18.20': resolution: {integrity: sha512-d5NeaXZcHp8PzYy5VnXV3VSd2D328Zb+9dEq5HE6bw6+N86JVPExrA6O68OPwobntbNJ0pzCpUFZTo3w0GyetQ==} engines: {node: '>=12'} @@ -947,6 +1180,12 @@ packages: cpu: [mips64el] os: [linux] + '@esbuild/linux-mips64el@0.28.0': + resolution: {integrity: sha512-2jIfP6mmjkdmeTlsX/9vmdmhBmKADrWqN7zcdtHIeNSCH1SqIoNI63cYsjQR8J+wGa4Y5izRcSHSm8K3QWmk3w==} + engines: {node: '>=18'} + cpu: [mips64el] + os: [linux] + '@esbuild/linux-ppc64@0.18.20': resolution: {integrity: sha512-WHPyeScRNcmANnLQkq6AfyXRFr5D6N2sKgkFo2FqguP44Nw2eyDlbTdZwd9GYk98DZG9QItIiTlFLHJHjxP3FA==} engines: {node: '>=12'} @@ -965,6 +1204,12 @@ packages: cpu: [ppc64] os: [linux] + '@esbuild/linux-ppc64@0.28.0': + resolution: {integrity: sha512-bc0FE9wWeC0WBm49IQMPSPILRocGTQt3j5KPCA8os6VprfuJ7KD+5PzESSrJ6GmPIPJK965ZJHTUlSA6GNYEhg==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [linux] + '@esbuild/linux-riscv64@0.18.20': resolution: {integrity: sha512-WSxo6h5ecI5XH34KC7w5veNnKkju3zBRLEQNY7mv5mtBmrP/MjNBCAlsM2u5hDBlS3NGcTQpoBvRzqBcRtpq1A==} engines: {node: '>=12'} @@ -983,6 +1228,12 @@ packages: cpu: [riscv64] os: [linux] + '@esbuild/linux-riscv64@0.28.0': + resolution: {integrity: sha512-SQPZOwoTTT/HXFXQJG/vBX8sOFagGqvZyXcgLA3NhIqcBv1BJU1d46c0rGcrij2B56Z2rNiSLaZOYW5cUk7yLQ==} + engines: {node: '>=18'} + cpu: [riscv64] + os: [linux] + '@esbuild/linux-s390x@0.18.20': resolution: {integrity: sha512-+8231GMs3mAEth6Ja1iK0a1sQ3ohfcpzpRLH8uuc5/KVDFneH6jtAJLFGafpzpMRO6DzJ6AvXKze9LfFMrIHVQ==} engines: {node: '>=12'} @@ -1001,6 +1252,12 @@ packages: cpu: [s390x] os: [linux] + '@esbuild/linux-s390x@0.28.0': + resolution: {integrity: sha512-SCfR0HN8CEEjnYnySJTd2cw0k9OHB/YFzt5zgJEwa+wL/T/raGWYMBqwDNAC6dqFKmJYZoQBRfHjgwLHGSrn3Q==} + engines: {node: '>=18'} + cpu: [s390x] + os: [linux] + '@esbuild/linux-x64@0.18.20': resolution: {integrity: sha512-UYqiqemphJcNsFEskc73jQ7B9jgwjWrSayxawS6UVFZGWrAAtkzjxSqnoclCXxWtfwLdzU+vTpcNYhpn43uP1w==} engines: {node: '>=12'} @@ -1019,12 +1276,24 @@ packages: cpu: [x64] os: [linux] + '@esbuild/linux-x64@0.28.0': + resolution: {integrity: sha512-us0dSb9iFxIi8srnpl931Nvs65it/Jd2a2K3qs7fz2WfGPHqzfzZTfec7oxZJRNPXPnNYZtanmRc4AL/JwVzHQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [linux] + '@esbuild/netbsd-arm64@0.25.12': resolution: {integrity: sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==} engines: {node: '>=18'} cpu: [arm64] os: [netbsd] + '@esbuild/netbsd-arm64@0.28.0': + resolution: {integrity: sha512-CR/RYotgtCKwtftMwJlUU7xCVNg3lMYZ0RzTmAHSfLCXw3NtZtNpswLEj/Kkf6kEL3Gw+BpOekRX0BYCtklhUw==} + engines: {node: '>=18'} + cpu: [arm64] + os: [netbsd] + '@esbuild/netbsd-x64@0.18.20': resolution: {integrity: sha512-iO1c++VP6xUBUmltHZoMtCUdPlnPGdBom6IrO4gyKPFFVBKioIImVooR5I83nTew5UOYrk3gIJhbZh8X44y06A==} engines: {node: '>=12'} @@ -1043,12 +1312,24 @@ packages: cpu: [x64] os: [netbsd] + '@esbuild/netbsd-x64@0.28.0': + resolution: {integrity: sha512-nU1yhmYutL+fQ71Kxnhg8uEOdC0pwEW9entHykTgEbna2pw2dkbFSMeqjjyHZoCmt8SBkOSvV+yNmm94aUrrqw==} + engines: {node: '>=18'} + cpu: [x64] + os: [netbsd] + '@esbuild/openbsd-arm64@0.25.12': resolution: {integrity: sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==} engines: {node: '>=18'} cpu: [arm64] os: [openbsd] + '@esbuild/openbsd-arm64@0.28.0': + resolution: {integrity: sha512-cXb5vApOsRsxsEl4mcZ1XY3D4DzcoMxR/nnc4IyqYs0rTI8ZKmW6kyyg+11Z8yvgMfAEldKzP7AdP64HnSC/6g==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openbsd] + '@esbuild/openbsd-x64@0.18.20': resolution: {integrity: sha512-e5e4YSsuQfX4cxcygw/UCPIEP6wbIL+se3sxPdCiMbFLBWu0eiZOJ7WoD+ptCLrmjZBK1Wk7I6D/I3NglUGOxg==} engines: {node: '>=12'} @@ -1067,12 +1348,24 @@ packages: cpu: [x64] os: [openbsd] + '@esbuild/openbsd-x64@0.28.0': + resolution: {integrity: sha512-8wZM2qqtv9UP3mzy7HiGYNH/zjTA355mpeuA+859TyR+e+Tc08IHYpLJuMsfpDJwoLo1ikIJI8jC3GFjnRClzA==} + engines: {node: '>=18'} + cpu: [x64] + os: [openbsd] + '@esbuild/openharmony-arm64@0.25.12': resolution: {integrity: sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==} engines: {node: '>=18'} cpu: [arm64] os: [openharmony] + '@esbuild/openharmony-arm64@0.28.0': + resolution: {integrity: sha512-FLGfyizszcef5C3YtoyQDACyg95+dndv79i2EekILBofh5wpCa1KuBqOWKrEHZg3zrL3t5ouE5jgr94vA+Wb2w==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openharmony] + '@esbuild/sunos-x64@0.18.20': resolution: {integrity: sha512-kDbFRFp0YpTQVVrqUd5FTYmWo45zGaXe0X8E1G/LKFC0v8x0vWrhOWSLITcCn63lmZIxfOMXtCfti/RxN/0wnQ==} engines: {node: '>=12'} @@ -1091,6 +1384,12 @@ packages: cpu: [x64] os: [sunos] + '@esbuild/sunos-x64@0.28.0': + resolution: {integrity: sha512-1ZgjUoEdHZZl/YlV76TSCz9Hqj9h9YmMGAgAPYd+q4SicWNX3G5GCyx9uhQWSLcbvPW8Ni7lj4gDa1T40akdlw==} + engines: {node: '>=18'} + cpu: [x64] + os: [sunos] + '@esbuild/win32-arm64@0.18.20': resolution: {integrity: sha512-ddYFR6ItYgoaq4v4JmQQaAI5s7npztfV4Ag6NrhiaW0RrnOXqBkgwZLofVTlq1daVTQNhtI5oieTvkRPfZrePg==} engines: {node: '>=12'} @@ -1109,6 +1408,12 @@ packages: cpu: [arm64] os: [win32] + '@esbuild/win32-arm64@0.28.0': + resolution: {integrity: sha512-Q9StnDmQ/enxnpxCCLSg0oo4+34B9TdXpuyPeTedN/6+iXBJ4J+zwfQI28u/Jl40nOYAxGoNi7mFP40RUtkmUA==} + engines: {node: '>=18'} + cpu: [arm64] + os: [win32] + '@esbuild/win32-ia32@0.18.20': resolution: {integrity: sha512-Wv7QBi3ID/rROT08SABTS7eV4hX26sVduqDOTe1MvGMjNd3EjOz4b7zeexIR62GTIEKrfJXKL9LFxTYgkyeu7g==} engines: {node: '>=12'} @@ -1127,6 +1432,12 @@ packages: cpu: [ia32] os: [win32] + '@esbuild/win32-ia32@0.28.0': + resolution: {integrity: sha512-zF3ag/gfiCe6U2iczcRzSYJKH1DCI+ByzSENHlM2FcDbEeo5Zd2C86Aq0tKUYAJJ1obRP84ymxIAksZUcdztHA==} + engines: {node: '>=18'} + cpu: [ia32] + os: [win32] + '@esbuild/win32-x64@0.18.20': resolution: {integrity: sha512-kTdfRcSiDfQca/y9QIkng02avJ+NCaQvrMejlsB3RRv5sE9rRoeBPISaZpKxHELzRxZyLvNts1P27W3wV+8geQ==} engines: {node: '>=12'} @@ -1145,6 +1456,12 @@ packages: cpu: [x64] os: [win32] + '@esbuild/win32-x64@0.28.0': + resolution: {integrity: sha512-pEl1bO9mfAmIC+tW5btTmrKaujg3zGtUmWNdCw/xs70FBjwAL3o9OEKNHvNmnyylD6ubxUERiEhdsL0xBQ9efw==} + engines: {node: '>=18'} + cpu: [x64] + os: [win32] + '@hono/node-server@1.19.12': resolution: {integrity: sha512-txsUW4SQ1iilgE0l9/e9VQWmELXifEFvmdA1j6WFh/aFPj99hIntrSsq/if0UWyGVkmrRPKA1wCeP+UCr1B9Uw==} engines: {node: '>=18.14.1'} @@ -1181,89 +1498,105 @@ packages: resolution: {integrity: sha512-excjX8DfsIcJ10x1Kzr4RcWe1edC9PquDRRPx3YVCvQv+U5p7Yin2s32ftzikXojb1PIFc/9Mt28/y+iRklkrw==} cpu: [arm64] os: [linux] + libc: [glibc] '@img/sharp-libvips-linux-arm@1.2.4': resolution: {integrity: sha512-bFI7xcKFELdiNCVov8e44Ia4u2byA+l3XtsAj+Q8tfCwO6BQ8iDojYdvoPMqsKDkuoOo+X6HZA0s0q11ANMQ8A==} cpu: [arm] os: [linux] + libc: [glibc] '@img/sharp-libvips-linux-ppc64@1.2.4': resolution: {integrity: sha512-FMuvGijLDYG6lW+b/UvyilUWu5Ayu+3r2d1S8notiGCIyYU/76eig1UfMmkZ7vwgOrzKzlQbFSuQfgm7GYUPpA==} cpu: [ppc64] os: [linux] + libc: [glibc] '@img/sharp-libvips-linux-riscv64@1.2.4': resolution: {integrity: sha512-oVDbcR4zUC0ce82teubSm+x6ETixtKZBh/qbREIOcI3cULzDyb18Sr/Wcyx7NRQeQzOiHTNbZFF1UwPS2scyGA==} cpu: [riscv64] os: [linux] + libc: [glibc] '@img/sharp-libvips-linux-s390x@1.2.4': resolution: {integrity: sha512-qmp9VrzgPgMoGZyPvrQHqk02uyjA0/QrTO26Tqk6l4ZV0MPWIW6LTkqOIov+J1yEu7MbFQaDpwdwJKhbJvuRxQ==} cpu: [s390x] os: [linux] + libc: [glibc] '@img/sharp-libvips-linux-x64@1.2.4': resolution: {integrity: sha512-tJxiiLsmHc9Ax1bz3oaOYBURTXGIRDODBqhveVHonrHJ9/+k89qbLl0bcJns+e4t4rvaNBxaEZsFtSfAdquPrw==} cpu: [x64] os: [linux] + libc: [glibc] '@img/sharp-libvips-linuxmusl-arm64@1.2.4': resolution: {integrity: sha512-FVQHuwx1IIuNow9QAbYUzJ+En8KcVm9Lk5+uGUQJHaZmMECZmOlix9HnH7n1TRkXMS0pGxIJokIVB9SuqZGGXw==} cpu: [arm64] os: [linux] + libc: [musl] '@img/sharp-libvips-linuxmusl-x64@1.2.4': resolution: {integrity: sha512-+LpyBk7L44ZIXwz/VYfglaX/okxezESc6UxDSoyo2Ks6Jxc4Y7sGjpgU9s4PMgqgjj1gZCylTieNamqA1MF7Dg==} cpu: [x64] os: [linux] + libc: [musl] '@img/sharp-linux-arm64@0.34.5': resolution: {integrity: sha512-bKQzaJRY/bkPOXyKx5EVup7qkaojECG6NLYswgktOZjaXecSAeCWiZwwiFf3/Y+O1HrauiE3FVsGxFg8c24rZg==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm64] os: [linux] + libc: [glibc] '@img/sharp-linux-arm@0.34.5': resolution: {integrity: sha512-9dLqsvwtg1uuXBGZKsxem9595+ujv0sJ6Vi8wcTANSFpwV/GONat5eCkzQo/1O6zRIkh0m/8+5BjrRr7jDUSZw==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm] os: [linux] + libc: [glibc] '@img/sharp-linux-ppc64@0.34.5': resolution: {integrity: sha512-7zznwNaqW6YtsfrGGDA6BRkISKAAE1Jo0QdpNYXNMHu2+0dTrPflTLNkpc8l7MUP5M16ZJcUvysVWWrMefZquA==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [ppc64] os: [linux] + libc: [glibc] '@img/sharp-linux-riscv64@0.34.5': resolution: {integrity: sha512-51gJuLPTKa7piYPaVs8GmByo7/U7/7TZOq+cnXJIHZKavIRHAP77e3N2HEl3dgiqdD/w0yUfiJnII77PuDDFdw==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [riscv64] os: [linux] + libc: [glibc] '@img/sharp-linux-s390x@0.34.5': resolution: {integrity: sha512-nQtCk0PdKfho3eC5MrbQoigJ2gd1CgddUMkabUj+rBevs8tZ2cULOx46E7oyX+04WGfABgIwmMC0VqieTiR4jg==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [s390x] os: [linux] + libc: [glibc] '@img/sharp-linux-x64@0.34.5': resolution: {integrity: sha512-MEzd8HPKxVxVenwAa+JRPwEC7QFjoPWuS5NZnBt6B3pu7EG2Ge0id1oLHZpPJdn3OQK+BQDiw9zStiHBTJQQQQ==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [x64] os: [linux] + libc: [glibc] '@img/sharp-linuxmusl-arm64@0.34.5': resolution: {integrity: sha512-fprJR6GtRsMt6Kyfq44IsChVZeGN97gTD331weR1ex1c1rypDEABN6Tm2xa1wE6lYb5DdEnk03NZPqA7Id21yg==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm64] os: [linux] + libc: [musl] '@img/sharp-linuxmusl-x64@0.34.5': resolution: {integrity: sha512-Jg8wNT1MUzIvhBFxViqrEhWDGzqymo3sV7z7ZsaWbZNDLXRJZoRGrjulp60YYtV4wfY8VIKcWidjojlLcWrd8Q==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [x64] os: [linux] + libc: [musl] '@img/sharp-wasm32@0.34.5': resolution: {integrity: sha512-OdWTEiVkY2PHwqkbBI8frFxQQFekHaSSkUIJkwzclWZe64O1X4UlUjqqqLaPbUpMOQk6FBu/HtlGXNblIs0huw==} @@ -1355,24 +1688,28 @@ packages: engines: {node: '>= 10'} cpu: [arm64] os: [linux] + libc: [glibc] '@next/swc-linux-arm64-musl@15.5.7': resolution: {integrity: sha512-nfymt+SE5cvtTrG9u1wdoxBr9bVB7mtKTcj0ltRn6gkP/2Nu1zM5ei8rwP9qKQP0Y//umK+TtkKgNtfboBxRrw==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] + libc: [musl] '@next/swc-linux-x64-gnu@15.5.7': resolution: {integrity: sha512-hvXcZvCaaEbCZcVzcY7E1uXN9xWZfFvkNHwbe/n4OkRhFWrs1J1QV+4U1BN06tXLdaS4DazEGXwgqnu/VMcmqw==} engines: {node: '>= 10'} cpu: [x64] os: [linux] + libc: [glibc] '@next/swc-linux-x64-musl@15.5.7': resolution: {integrity: sha512-4IUO539b8FmF0odY6/SqANJdgwn1xs1GkPO5doZugwZ3ETF6JUdckk7RGmsfSf7ws8Qb2YB5It33mvNL/0acqA==} engines: {node: '>= 10'} cpu: [x64] os: [linux] + libc: [musl] '@next/swc-win32-arm64-msvc@15.5.7': resolution: {integrity: sha512-CpJVTkYI3ZajQkC5vajM7/ApKJUOlm6uP4BknM3XKvJ7VXAvCqSjSLmM0LKdYzn6nBJVSjdclx8nYJSa3xlTgQ==} @@ -1386,6 +1723,10 @@ packages: cpu: [x64] os: [win32] + '@noble/hashes@2.2.0': + resolution: {integrity: sha512-IYqDGiTXab6FniAgnSdZwgWbomxpy9FtYvLKs7wCUs2a8RkITG+DFGO1DM9cr+E3/RgADRpFjrKVaJ1z6sjtEg==} + engines: {node: '>= 20.19.0'} + '@nodelib/fs.scandir@2.1.5': resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} engines: {node: '>= 8'} @@ -1404,6 +1745,110 @@ packages: '@posthog/core@1.24.4': resolution: {integrity: sha512-S+TolwBHSSJz7WWtgaELQWQqXviSm3uf1e+qorWUts0bZcgPwWzhnmhCUZAhvn0NVpTQHDJ3epv+hHbPLl5dHg==} + '@prisma-next/adapter-postgres@0.6.0-dev.8': + resolution: {integrity: sha512-Y4c3PakfHTE33dJJjKBT4umWOCbo4yYaovsgdNGpm4rAR6D56qxNVDtsl6ILZT/p+Pytpa9anXjs3JYf1Fs5TA==} + + '@prisma-next/cli@0.6.0-dev.8': + resolution: {integrity: sha512-/1/+JPo2FI9K4YapfE2t2yTCDfFpCeW5v/JlMAnCZaH/WkOgHxk10uFwIGkgJQHQ1jKKhhqS4AEFv1eWRKjihw==} + hasBin: true + + '@prisma-next/config@0.6.0-dev.8': + resolution: {integrity: sha512-DdtKwTUQCuRhhLqb8e1icvrtYwpXmrJEgqYFcXPo3cEPzPxW+5M5mxQkoIVCly0yhCt1BKnznK06nwg4R0YSdQ==} + engines: {node: '>=20'} + + '@prisma-next/contract-authoring@0.6.0-dev.8': + resolution: {integrity: sha512-p7yfnCagxv7g7YVihIlGM1r+NSu+vLlrTaMe8FB8Th/jbV5wjYiyKF0FNMsxoGCd7XfA7q46qf6iINaO+26O5A==} + + '@prisma-next/contract@0.6.0-dev.8': + resolution: {integrity: sha512-2IIuFFFNXiPZwvtbysWmMfLfSVT0ehe5DsiCCVUX3sHxC7hqfn65e4dHXJoASp8aqHWmUeKlByDX34RVv2tI2Q==} + engines: {node: '>=20'} + + '@prisma-next/driver-postgres@0.6.0-dev.8': + resolution: {integrity: sha512-rqmuiwy5jUX8OxU/o9w3qxz3trf9URSFtPE5Kz5cHsb9i7fPaLJvAUGuOTew+Y4BOj3zrFuujCZyWLuoNiAlYg==} + + '@prisma-next/emitter@0.6.0-dev.8': + resolution: {integrity: sha512-3BoSGXhxIPumn7M2OdvcrB6JdV5w1ErFUXvVq1QJ4juc1B2/J5ZZO9CwnRUnzPUMZJaZxAsfPcX6mI/JT1m1lg==} + + '@prisma-next/errors@0.6.0-dev.8': + resolution: {integrity: sha512-AjRvbAh/N/MfIW/PqFjCqn3hHNSINyjZNai0A9KL56Sy1pTymapt/m1gt7udxPcLpPh2NsUmjuMQJV6HVERV5w==} + engines: {node: '>=20'} + + '@prisma-next/family-sql@0.6.0-dev.8': + resolution: {integrity: sha512-toFN9MJIWkHJw/Yn2ZFXRfUQSZSq6Hk5inUZeFl80hgQTZ9CvyLsIak8tdPqSA1NF15oDDl1k38x7ZeyZ0VqYw==} + + '@prisma-next/framework-components@0.6.0-dev.8': + resolution: {integrity: sha512-OGXw5/3lPA+OVTB/gb/KHicPfO2CptEc8QVT78cX53U5oTLCmmilU5xJwkmC6uEFbhy2Uk4NYB1xRHjU0PhSCw==} + + '@prisma-next/ids@0.6.0-dev.8': + resolution: {integrity: sha512-hmXJmR0ItlulYj2NurjqUkx12pMEgPashf+taltOf6d95mBQb524ESvL+4Pqo26fEx+DoCzS8nkExyiygEU9qw==} + + '@prisma-next/migration-tools@0.6.0-dev.8': + resolution: {integrity: sha512-qJeDnqEriBn0GWWsNuNUHhUQ+ArNLw1UBotkJ9p8rYU6NARM6o2gJtOs91nw05wkvDFIEkvzoagU6dcuhNptZA==} + engines: {node: '>=20'} + + '@prisma-next/operations@0.6.0-dev.8': + resolution: {integrity: sha512-TD7m+Ko6iFVu8wAnm4zVAOyLcqOysdwveZB23EKIQhKxjdRMjfiS3MbBf064AuV1TkD6dLhmCV6spTGgAoA8ug==} + engines: {node: '>=20'} + + '@prisma-next/postgres@0.6.0-dev.8': + resolution: {integrity: sha512-gqAp4e9vpDGAQqI1v5MwnIYvEQrZClZVFk3cfiFmFZw0UDlE7oOdbvPbLSbHYIkI1qeQpOobw9/1wpp30op7Yg==} + engines: {node: '>=20'} + + '@prisma-next/psl-parser@0.6.0-dev.8': + resolution: {integrity: sha512-bxQ9Q44c+LCQ+G+N+X1gKW8d7QzQ2fVCugE4t5IbMLKMFvpVYtLJ/grf3D2PNwEptpK08CX+aAkso9LoR3suYQ==} + + '@prisma-next/psl-printer@0.6.0-dev.8': + resolution: {integrity: sha512-e6Bzey9y3r8xpAHDNR0QegAiWUOVoH+9SP4Yl06ZRgxJBM04pw541hk+OHPkzTJqt9iekrDhVXW7lOQsl3viWQ==} + + '@prisma-next/sql-builder@0.6.0-dev.8': + resolution: {integrity: sha512-C7WSoDKDQZ8U473GBVkjGkVCCIEvwXJO6/ug70IIBG/KH6hjKSyNk5DWHpSrEAI3zVtmwaFfNMxR6Yt/+dUbaw==} + engines: {node: '>=20'} + + '@prisma-next/sql-contract-emitter@0.6.0-dev.8': + resolution: {integrity: sha512-9wrfgxqC+0aHF2I3C73zluiH48WPl5k4NYv7nAaQVEqveSUeZ6lyGK7+AV/46GaV7eKhdD5heBy7XQYbKE0rwQ==} + + '@prisma-next/sql-contract-psl@0.6.0-dev.8': + resolution: {integrity: sha512-JR0izkN3CaIBr5xmXus1wvH1eQcH1GLXLoJMDtuVSpEpJC5S2XjfB+/qKxpgAEyTM5nZvstBNSAT3f+uKufjew==} + + '@prisma-next/sql-contract-ts@0.6.0-dev.8': + resolution: {integrity: sha512-XRqdCEk8ywxct1Pnmw9dRV+Cxlnqzc5TXAaSaH6+kndVyZRrvd6H3w0z2qeoSDEYeeRbGNSAXgUI9a7SCMwYZQ==} + + '@prisma-next/sql-contract@0.6.0-dev.8': + resolution: {integrity: sha512-0wWPTCFpwcRyfmkb3LnHKFSREoVT3AVxwnS4sxxJZ06fXDApHVU54IWeWzpRN9mt1R2hVjBIbBcMhC8apx+Hkg==} + + '@prisma-next/sql-errors@0.6.0-dev.8': + resolution: {integrity: sha512-qQ5AAZUNymSB1xZTjocQApfu5PagFJLX3XPQtR5rOINFtgB3a7/3YXiKFHIPkTePRXywcarP8wUjnYBo+sNcGg==} + engines: {node: '>=20'} + + '@prisma-next/sql-operations@0.6.0-dev.8': + resolution: {integrity: sha512-uzxyBKu/9p9OOYIAlayuV5BYTCuOJPDlIeuxRORq6iIkiLSJsScaYFolWGd1brLzefP83+OUl0YosphA9bviHg==} + engines: {node: '>=20'} + + '@prisma-next/sql-orm-client@0.6.0-dev.8': + resolution: {integrity: sha512-zGLgFdn+dVhES4zQ/QFvWJTPZEYr995ulDuJut9oRu3hN/rDMw36S0Lumq3LMBOFWtQQ2IbHeR9SZuCFvuPLlA==} + engines: {node: '>=20'} + + '@prisma-next/sql-relational-core@0.6.0-dev.8': + resolution: {integrity: sha512-Zg/raUomgOzyz7C7MCsieESARfHk5rk3xEVenNlbb3oazR6i2eEIXzINN4icdZU2vQD8PUnfFyzLZw4YFOpDiQ==} + + '@prisma-next/sql-runtime@0.6.0-dev.8': + resolution: {integrity: sha512-5uFwOKJjsKBTWz3Y6yzJyC9au9BwByIRZ4wAE9k1D60sPlE0zgaNmzd7fLzS0pmvgpv05l4A8BKHi3m5kDxK7Q==} + + '@prisma-next/sql-schema-ir@0.6.0-dev.8': + resolution: {integrity: sha512-ggISridMa2om0TgYV1A0RF5ii6d83JmmsyTAoaybqmWwJ0tnjmidE+tRf2FD0DruIquEbVU/emb44v7bRk6F1Q==} + engines: {node: '>=20'} + + '@prisma-next/target-postgres@0.6.0-dev.8': + resolution: {integrity: sha512-+9zoNO53w9XyeeHGhRx5o7oc5axkMrEmp9oFy31pvlgBr10zqz4rwCCZhVLbDxiYBvPOKxKNNwMknIUmiEUB6w==} + + '@prisma-next/ts-render@0.6.0-dev.8': + resolution: {integrity: sha512-qA6bZVH0Q7Puw24cdhaRUtTq/nGRaM9uUo/Gf1lA/ZeRyqVwHyAMNSmRD9jxfO/bYHknpjcpAdd9btfmX+ONeg==} + engines: {node: '>=20'} + + '@prisma-next/utils@0.6.0-dev.8': + resolution: {integrity: sha512-DMf0TiVqQgIZqHnDwt+RTBwAotcyl9OQLXHVQlJhKEf2XMRXSWVCqpKecNsfn8ZdC7hLsv/SXmfRPOoxhAQArw==} + engines: {node: '>=20'} + '@rollup/rollup-android-arm-eabi@4.59.0': resolution: {integrity: sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==} cpu: [arm] @@ -1438,71 +1883,85 @@ packages: resolution: {integrity: sha512-t4ONHboXi/3E0rT6OZl1pKbl2Vgxf9vJfWgmUoCEVQVxhW6Cw/c8I6hbbu7DAvgp82RKiH7TpLwxnJeKv2pbsw==} cpu: [arm] os: [linux] + libc: [glibc] '@rollup/rollup-linux-arm-musleabihf@4.59.0': resolution: {integrity: sha512-CikFT7aYPA2ufMD086cVORBYGHffBo4K8MQ4uPS/ZnY54GKj36i196u8U+aDVT2LX4eSMbyHtyOh7D7Zvk2VvA==} cpu: [arm] os: [linux] + libc: [musl] '@rollup/rollup-linux-arm64-gnu@4.59.0': resolution: {integrity: sha512-jYgUGk5aLd1nUb1CtQ8E+t5JhLc9x5WdBKew9ZgAXg7DBk0ZHErLHdXM24rfX+bKrFe+Xp5YuJo54I5HFjGDAA==} cpu: [arm64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-arm64-musl@4.59.0': resolution: {integrity: sha512-peZRVEdnFWZ5Bh2KeumKG9ty7aCXzzEsHShOZEFiCQlDEepP1dpUl/SrUNXNg13UmZl+gzVDPsiCwnV1uI0RUA==} cpu: [arm64] os: [linux] + libc: [musl] '@rollup/rollup-linux-loong64-gnu@4.59.0': resolution: {integrity: sha512-gbUSW/97f7+r4gHy3Jlup8zDG190AuodsWnNiXErp9mT90iCy9NKKU0Xwx5k8VlRAIV2uU9CsMnEFg/xXaOfXg==} cpu: [loong64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-loong64-musl@4.59.0': resolution: {integrity: sha512-yTRONe79E+o0FWFijasoTjtzG9EBedFXJMl888NBEDCDV9I2wGbFFfJQQe63OijbFCUZqxpHz1GzpbtSFikJ4Q==} cpu: [loong64] os: [linux] + libc: [musl] '@rollup/rollup-linux-ppc64-gnu@4.59.0': resolution: {integrity: sha512-sw1o3tfyk12k3OEpRddF68a1unZ5VCN7zoTNtSn2KndUE+ea3m3ROOKRCZxEpmT9nsGnogpFP9x6mnLTCaoLkA==} cpu: [ppc64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-ppc64-musl@4.59.0': resolution: {integrity: sha512-+2kLtQ4xT3AiIxkzFVFXfsmlZiG5FXYW7ZyIIvGA7Bdeuh9Z0aN4hVyXS/G1E9bTP/vqszNIN/pUKCk/BTHsKA==} cpu: [ppc64] os: [linux] + libc: [musl] '@rollup/rollup-linux-riscv64-gnu@4.59.0': resolution: {integrity: sha512-NDYMpsXYJJaj+I7UdwIuHHNxXZ/b/N2hR15NyH3m2qAtb/hHPA4g4SuuvrdxetTdndfj9b1WOmy73kcPRoERUg==} cpu: [riscv64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-riscv64-musl@4.59.0': resolution: {integrity: sha512-nLckB8WOqHIf1bhymk+oHxvM9D3tyPndZH8i8+35p/1YiVoVswPid2yLzgX7ZJP0KQvnkhM4H6QZ5m0LzbyIAg==} cpu: [riscv64] os: [linux] + libc: [musl] '@rollup/rollup-linux-s390x-gnu@4.59.0': resolution: {integrity: sha512-oF87Ie3uAIvORFBpwnCvUzdeYUqi2wY6jRFWJAy1qus/udHFYIkplYRW+wo+GRUP4sKzYdmE1Y3+rY5Gc4ZO+w==} cpu: [s390x] os: [linux] + libc: [glibc] '@rollup/rollup-linux-x64-gnu@4.24.0': resolution: {integrity: sha512-ZXFk7M72R0YYFN5q13niV0B7G8/5dcQ9JDp8keJSfr3GoZeXEoMHP/HlvqROA3OMbMdfr19IjCeNAnPUG93b6A==} cpu: [x64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-x64-gnu@4.59.0': resolution: {integrity: sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==} cpu: [x64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-x64-musl@4.59.0': resolution: {integrity: sha512-2UdiwS/9cTAx7qIUZB/fWtToJwvt0Vbo0zmnYt7ED35KPg13Q0ym1g442THLC7VyI6JfYTP4PiSOWyoMdV2/xg==} cpu: [x64] os: [linux] + libc: [musl] '@rollup/rollup-openbsd-x64@4.59.0': resolution: {integrity: sha512-M3bLRAVk6GOwFlPTIxVBSYKUaqfLrn8l0psKinkCFxl4lQvOSz8ZrKDz2gxcBwHFpci0B6rttydI4IpS4IS/jQ==} @@ -1544,6 +2003,9 @@ packages: '@stablelib/base64@1.0.1': resolution: {integrity: sha512-1bnPQqSxSuc3Ii6MhBysoWCg58j97aUjuCSZrGSmDxNqtytIi0k8utUenAwTZN4V5mXXYGsVUI9zeBqy+jBOSQ==} + '@standard-schema/spec@1.1.0': + resolution: {integrity: sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==} + '@stricli/core@1.2.5': resolution: {integrity: sha512-+afyztQW7fwWkqmU2WQZbdc3LjnZThWYdtE0l+hykZ1Rvy7YGxZSvsVCS/wZ/2BNv117pQ9TU1GZZRIcPnB4tw==} @@ -1666,6 +2128,10 @@ packages: resolution: {integrity: sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==} engines: {node: '>=12'} + ansi-styles@6.2.3: + resolution: {integrity: sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==} + engines: {node: '>=12'} + any-promise@1.3.0: resolution: {integrity: sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==} @@ -1675,6 +2141,12 @@ packages: argparse@2.0.1: resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + arkregex@0.0.5: + resolution: {integrity: sha512-ncYjBdLlh5/QnVsAA8De16Tc9EqmYM7y/WU9j+236KcyYNUXogpz3sC4ATIZYzzLxwI+0sEOaQLEmLmRleaEXw==} + + arktype@2.2.0: + resolution: {integrity: sha512-t54MZ7ti5BhOEvzEkgKnWvqj+UbDfWig+DHr5I34xatymPusKLS0lQpNJd8M6DzmIto2QGszHfNKoFIT8tMCZQ==} + array-union@2.1.0: resolution: {integrity: sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==} engines: {node: '>=8'} @@ -1720,6 +2192,14 @@ packages: resolution: {integrity: sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==} engines: {node: '>= 0.8'} + c12@3.3.4: + resolution: {integrity: sha512-cM0ApFQSBXuourJejzwv/AuPRvAxordTyParRVcHjjtXirtkzM0uK2L9TTn9s0cXZbG7E55jCivRQzoxYmRAlA==} + peerDependencies: + magicast: '*' + peerDependenciesMeta: + magicast: + optional: true + cac@6.7.14: resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==} engines: {node: '>=8'} @@ -1750,6 +2230,10 @@ packages: resolution: {integrity: sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==} engines: {node: '>= 14.16.0'} + chokidar@5.0.0: + resolution: {integrity: sha512-TQMmc3w+5AxjpL8iIiwebF73dRDF4fBIieAqGn9RGCWaEVwQ6Fb2cGe31Yns0RRIzii5goJ1Y7xbMwo1TxMplw==} + engines: {node: '>= 20.19.0'} + ci-info@3.9.0: resolution: {integrity: sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==} engines: {node: '>=8'} @@ -1757,6 +2241,21 @@ packages: client-only@0.0.1: resolution: {integrity: sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==} + clipanion@4.0.0-rc.4: + resolution: {integrity: sha512-CXkMQxU6s9GklO/1f714dkKBMu1lopS1WFF0B8o4AxPykR1hpozxSiUZ5ZUeBjfPgCWqbcNOtZVFhB8Lkfp1+Q==} + peerDependencies: + typanion: '*' + + closest-match@1.3.3: + resolution: {integrity: sha512-RSdHrZwNOvt2uMQgqJDJdM/I+5MlJ1tQJEXYrbRjSMXWiCRo06g2hwObJ7+WKt2J9ySK9/pJ0Q2vbL+BPkofDA==} + + colorette@2.0.20: + resolution: {integrity: sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==} + + commander@14.0.3: + resolution: {integrity: sha512-H+y0Jo/T1RZ9qPP4Eh1pkcQcLRglraJaSLoyOtHxu6AapkjWVCy2Sit1QQ4x3Dng8qDlSsZEet7g5Pq06MvTgw==} + engines: {node: '>=20'} + commander@2.20.3: resolution: {integrity: sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==} @@ -1764,6 +2263,9 @@ packages: resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==} engines: {node: '>= 6'} + confbox@0.2.4: + resolution: {integrity: sha512-ysOGlgTFbN2/Y6Cg3Iye8YKulHw+R2fNXHrgSmXISQdMnomY6eNDprVdW9R5xBguEqI954+S6709UyiO7B+6OQ==} + consola@3.4.2: resolution: {integrity: sha512-5IKcdX0nnYavi6G7TtOhwkYzyjfJlatbjMjuLSfE2kYT5pMDOilZ4OvMhi637CcDICTmz3wARPoyhqyX1Y+XvA==} engines: {node: ^14.18.0 || >=16.10.0} @@ -1808,6 +2310,9 @@ packages: resolution: {integrity: sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==} engines: {node: '>=6'} + defu@6.1.7: + resolution: {integrity: sha512-7z22QmUWiQ/2d0KkdYmANbRUVABpZ9SNYyH5vx6PZ+nE5bcC0l7uFvEfHlyld/HcGBFTL536ClDt3DEcSlEJAQ==} + denque@2.1.0: resolution: {integrity: sha512-HVQE3AAb/pxF8fQAoiqpvg9i3evqug3hoiwakOyZAwJm+6vZehbkYXZ0l4JxS+I3QxM97v5aaRNhj8v5oBhekw==} engines: {node: '>=0.10'} @@ -1820,6 +2325,9 @@ packages: resolution: {integrity: sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==} engines: {node: '>=6'} + destr@2.0.5: + resolution: {integrity: sha512-ugFTXCtDZunbzasqBxrK93Ik/DRYsO6S/fedkWEMKqt04xZ4csmnmwGDBAb07QWNaGMAmnTIemsYZCksjATwsA==} + detect-indent@6.1.0: resolution: {integrity: sha512-reYkTUJAZb9gUuZ2RvVCNhVHdg62RHnJ7WJl8ftMi4diZ6NWlciOzQN88pUhSELEwflJht4oQDv0F0BMlwaYtA==} engines: {node: '>=8'} @@ -1840,6 +2348,10 @@ packages: resolution: {integrity: sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==} engines: {node: '>=12'} + dotenv@17.4.2: + resolution: {integrity: sha512-nI4U3TottKAcAD9LLud4Cb7b2QztQMUEfHbvhTH09bqXTxnSie8WnjPALV/WMCrJZ6UV/qHJ6L03OqO3LcdYZw==} + engines: {node: '>=12'} + drizzle-kit@0.30.6: resolution: {integrity: sha512-U4wWit0fyZuGuP7iNmRleQyK2V8wCuv57vf5l3MnG4z4fzNTjY/U13M8owyQ5RavqvqxBifWORaR3wIUzlN64g==} hasBin: true @@ -1990,6 +2502,11 @@ packages: engines: {node: '>=18'} hasBin: true + esbuild@0.28.0: + resolution: {integrity: sha512-sNR9MHpXSUV/XB4zmsFKN+QgVG82Cc7+/aaxJ8Adi8hyOac+EXptIp45QBPaVyX3N70664wRbTcLTOemCAnyqw==} + engines: {node: '>=18'} + hasBin: true + escape-html@1.0.3: resolution: {integrity: sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==} @@ -2051,6 +2568,9 @@ packages: resolution: {integrity: sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw==} engines: {node: '>= 18'} + exsolve@1.0.8: + resolution: {integrity: sha512-LmDxfWXwcTArk8fUEnOfSZpHOJ6zOMUJKOtFLFqJLoKJetuQG874Uc7/Kki7zFLzYybmZhp1M7+98pfMqeX8yA==} + extendable-error@0.1.7: resolution: {integrity: sha512-UOiS2in6/Q0FK0R0q6UY9vYpQ21mr/Qn1KOnte7vsACuNJf514WvCCUHSRCPcgjPT2bAhNIJdlE6bVap1GKmeg==} @@ -2068,9 +2588,18 @@ packages: fast-sha256@1.3.0: resolution: {integrity: sha512-n11RGP/lrWEFI/bWdygLxhI+pVeo1ZYIVwvvPkW7azl/rOy+F3HYRZ2K5zeE9mmkhQppyv9sQFx0JM9UabnpPQ==} + fast-string-truncated-width@3.0.3: + resolution: {integrity: sha512-0jjjIEL6+0jag3l2XWWizO64/aZVtpiGE3t0Zgqxv0DPuxiMjvB3M24fCyhZUO4KomJQPj3LTSUnDP3GpdwC0g==} + + fast-string-width@3.0.2: + resolution: {integrity: sha512-gX8LrtNEI5hq8DVUfRQMbr5lpaS4nMIWV+7XEbXk2b8kiQIizgnlr12B4dA3ZEx3308ze0O4Q1R+cHts8kyUJg==} + fast-uri@3.1.0: resolution: {integrity: sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==} + fast-wrap-ansi@0.2.0: + resolution: {integrity: sha512-rLV8JHxTyhVmFYhBJuMujcrHqOT2cnO5Zxj37qROj23CP39GXubJRBUFF0z8KFK77Uc0SukZUf7JZhsVEQ6n8w==} + fastq@1.19.1: resolution: {integrity: sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==} @@ -2131,6 +2660,10 @@ packages: generate-function@2.3.1: resolution: {integrity: sha512-eeB5GfMNeevm/GRYq20ShmsaGcmI81kIX2K9XQx5miC8KdHaC6Jm0qQ8ZNeGOi7wYB8OsdxKs+Y2oVuTFuVwKQ==} + get-east-asian-width@1.5.0: + resolution: {integrity: sha512-CQ+bEO+Tva/qlmw24dCejulK5pMzVnUOFOijVogd3KQs07HnRIgp8TGipvCCRT06xeYEbpbgwaCxglFyiuIcmA==} + engines: {node: '>=18'} + get-intrinsic@1.3.0: resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==} engines: {node: '>= 0.4'} @@ -2146,6 +2679,10 @@ packages: get-tsconfig@4.13.0: resolution: {integrity: sha512-1VKTZJCwBrvbd+Wn3AOgQP/2Av+TfTCOlE4AcRJE72W1ksZXbAx8PPBR9RzgTeSPzlPMHrbANMH3LbltH73wxQ==} + giget@3.2.0: + resolution: {integrity: sha512-GvHTWcykIR/fP8cj8dMpuMMkvaeJfPvYnhq0oW+chSeIr+ldX21ifU2Ms6KBoyKZQZmVaUAAhQ2EZ68KJF8a7A==} + hasBin: true + glob-parent@5.1.2: resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} engines: {node: '>= 6'} @@ -2301,6 +2838,9 @@ packages: json-schema-typed@8.0.2: resolution: {integrity: sha512-fQhoXdcvc3V28x7C7BMs4P5+kNlgUURe2jmUT1T//oBRMDrqy1QPelJimwZGo7Hg9VPV3EQV5Bnq4hbFy2vetA==} + jsonc-parser@3.3.1: + resolution: {integrity: sha512-HUgH65KyejrUFPvHFPbqOY0rsFip3Bo5wb4ngvdi1EpCYWUQDC5V+Y7mZws+DLkr4M//zQJoanu1SP+87Dv1oQ==} + jsonfile@4.0.0: resolution: {integrity: sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==} @@ -2339,24 +2879,28 @@ packages: engines: {node: '>= 12.0.0'} cpu: [arm64] os: [linux] + libc: [glibc] lightningcss-linux-arm64-musl@1.30.2: resolution: {integrity: sha512-5Vh9dGeblpTxWHpOx8iauV02popZDsCYMPIgiuw97OJ5uaDsL86cnqSFs5LZkG3ghHoX5isLgWzMs+eD1YzrnA==} engines: {node: '>= 12.0.0'} cpu: [arm64] os: [linux] + libc: [musl] lightningcss-linux-x64-gnu@1.30.2: resolution: {integrity: sha512-Cfd46gdmj1vQ+lR6VRTTadNHu6ALuw2pKR9lYq4FnhvgBc4zWY1EtZcAc6EffShbb1MFrIPfLDXD6Xprbnni4w==} engines: {node: '>= 12.0.0'} cpu: [x64] os: [linux] + libc: [glibc] lightningcss-linux-x64-musl@1.30.2: resolution: {integrity: sha512-XJaLUUFXb6/QG2lGIW6aIk6jKdtjtcffUT0NKvIqhSBY3hh9Ch+1LCeH80dR9q9LBjG3ewbDjnumefsLsP6aiA==} engines: {node: '>= 12.0.0'} cpu: [x64] os: [linux] + libc: [musl] lightningcss-win32-arm64-msvc@1.30.2: resolution: {integrity: sha512-FZn+vaj7zLv//D/192WFFVA0RgHawIcHqLX9xuWiQt7P0PtdFEVaxgF9rjM/IRYHQXNnk61/H/gb2Ei+kUQ4xQ==} @@ -2520,6 +3064,9 @@ packages: resolution: {integrity: sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==} engines: {node: '>= 0.4'} + ohash@2.0.11: + resolution: {integrity: sha512-RdR9FQrFwNBNXAr4GixM8YaRZRJ5PUWbKYbE5eOsrwAjJW0q2REGcf79oYPsLyskQCZG1PLN+S/K1V00joZAoQ==} + on-finished@2.4.1: resolution: {integrity: sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==} engines: {node: '>= 0.8'} @@ -2556,6 +3103,9 @@ packages: package-manager-detector@0.2.11: resolution: {integrity: sha512-BEnLolu+yuz22S56CU1SUKq3XC3PkwD5wv4ikR4MfGvnRVcmzXR9DwSlW2fEamyTPyXHomBJRzgapeuBvRNzJQ==} + package-manager-detector@1.6.0: + resolution: {integrity: sha512-61A5ThoTiDG/C8s8UMZwSorAGwMJ0ERVGj2OjoW5pAalsNOg15+iQiPzrLJ4jhZ1HJzmC2PIHT2oEiH3R5fzNA==} + parse-ms@4.0.0: resolution: {integrity: sha512-TXfryirbmq34y8QBwgqCVLi+8oA3oWx2eAnSn62ITyEhEYaWRlVZ2DvMM9eZbMs/RfxPu/PK/aBLyGj4IrqMHw==} engines: {node: '>=18'} @@ -2594,12 +3144,20 @@ packages: resolution: {integrity: sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==} engines: {node: '>= 14.16'} + perfect-debounce@2.1.0: + resolution: {integrity: sha512-LjgdTytVFXeUgtHZr9WYViYSM/g8MkcTPYDlPa3cDqMirHjKiSZPYd6DoL7pK8AJQr+uWkQvCjHNdiMqsrJs+g==} + pg-cloudflare@1.3.0: resolution: {integrity: sha512-6lswVVSztmHiRtD6I8hw4qP/nDm1EJbKMRhf3HCYaqud7frGysPv7FYJ5noZQdhQtN2xJnimfMtvQq21pdbzyQ==} pg-connection-string@2.12.0: resolution: {integrity: sha512-U7qg+bpswf3Cs5xLzRqbXbQl85ng0mfSV/J0nnA31MCLgvEaAo7CIhmeyrmJpOr7o+zm0rXK+hNnT5l9RHkCkQ==} + pg-cursor@2.19.0: + resolution: {integrity: sha512-J5cF1MUz7LRJ9emOqF/06QjabMHMZy587rSPF0UuA8rCwKeeYl2co8Pp+6k5UU9YrAYHMzWkLxilfZB0hqsWWw==} + peerDependencies: + pg: ^8 + pg-int8@1.0.1: resolution: {integrity: sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==} engines: {node: '>=4.0.0'} @@ -2634,6 +3192,15 @@ packages: pg-native: optional: true + pg@8.20.0: + resolution: {integrity: sha512-ldhMxz2r8fl/6QkXnBD3CR9/xg694oT6DZQ2s6c/RI28OjtSOpxnPrUCGOBJ46RCUxcWdx3p6kw/xnDHjKvaRA==} + engines: {node: '>= 16.0.0'} + peerDependencies: + pg-native: '>=3.0.1' + peerDependenciesMeta: + pg-native: + optional: true + pgpass@1.0.5: resolution: {integrity: sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==} @@ -2660,6 +3227,9 @@ packages: resolution: {integrity: sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ==} engines: {node: '>=16.20.0'} + pkg-types@2.3.1: + resolution: {integrity: sha512-y+ichcgc2LrADuhLNAx8DFjVfgz91pRxfZdI3UDhxHvcVEZsenLO+7XaU5vOp0u/7V/wZ+plyuQxtrDlZJ+yeg==} + postcss-load-config@6.0.1: resolution: {integrity: sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g==} engines: {node: '>= 18'} @@ -2725,6 +3295,11 @@ packages: engines: {node: '>=14'} hasBin: true + prettier@3.8.3: + resolution: {integrity: sha512-7igPTM53cGHMW8xWuVTydi2KO233VFiTNyF5hLJqpilHfmn8C8gPf+PS7dUT64YcXFbiMGZxS9pCSxL/Dxm/Jw==} + engines: {node: '>=14'} + hasBin: true + pretty-ms@9.3.0: resolution: {integrity: sha512-gjVS5hOP+M3wMm5nmNOucbIrqudzs9v/57bWRHQWLYklXqoXKrVfYW2W9+glfGsqtPgpiz5WwyEEB+ksXIx3gQ==} engines: {node: '>=18'} @@ -2758,6 +3333,9 @@ packages: resolution: {integrity: sha512-K5zQjDllxWkf7Z5xJdV0/B0WTNqx6vxG70zJE4N0kBs4LovmEYWJzQGxC9bS9RAKu3bgM40lrd5zoLJ12MQ5BA==} engines: {node: '>= 0.10'} + rc9@3.0.1: + resolution: {integrity: sha512-gMDyleLWVE+i6Sgtc0QbbY6pEKqYs97NGi6isHQPqYlLemPoO8dxQ3uGi0f4NiP98c+jMW6cG1Kx9dDwfvqARQ==} + react-dom@19.2.3: resolution: {integrity: sha512-yELu4WmLPw5Mr/lmeEpox5rw3RETacE++JgHqQzd2dg+YbJuat3jH4ingc+WPZhxaoFzdv9y33G+F7Nl5O0GBg==} peerDependencies: @@ -2775,6 +3353,10 @@ packages: resolution: {integrity: sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==} engines: {node: '>= 14.18.0'} + readdirp@5.0.0: + resolution: {integrity: sha512-9u/XQ1pvrQtYyMpZe7DXKv2p5CNvyVwzUB6uhLAnQwHMSgKMBR62lc7AHljaeteeHXn11XTAaLLUVZYVZyuRBQ==} + engines: {node: '>= 20.19.0'} + regenerator-runtime@0.14.1: resolution: {integrity: sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==} @@ -2927,6 +3509,10 @@ packages: std-env@3.10.0: resolution: {integrity: sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==} + string-width@8.2.1: + resolution: {integrity: sha512-IIaP0g3iy9Cyy18w3M9YcaDudujEAVHKt3a3QJg1+sr/oX96TbaGUubG0hJyCjCBThFH+tFpcIyoUHUn1ogaLA==} + engines: {node: '>=20'} + strip-ansi@6.0.1: resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} engines: {node: '>=8'} @@ -3025,6 +3611,9 @@ packages: ts-interface-checker@0.1.13: resolution: {integrity: sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==} + ts-toolbelt@9.6.0: + resolution: {integrity: sha512-nsZd8ZeNUzukXPlJmTBwUAuABDe/9qtVDelJeT/qW0ow3ZS3BsQJtNkan1802aM9Uf68/Y8ljw86Hu0h5IUW3w==} + tslib@2.8.1: resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} @@ -3086,6 +3675,9 @@ packages: resolution: {integrity: sha512-u9gUDkmR9dFS8b5kAYqIETK4OnzsS4l2ragJ0+soSMHh6VEeNHjTfSjk1tKxCqLyziCrPogadxP680J+v6yGHw==} hasBin: true + typanion@3.14.0: + resolution: {integrity: sha512-ZW/lVMRabETuYCd9O9ZvMhAh8GslSqaUjxmK/JLPCh6l73CvLBiuXswj/+7LdnWOgYsQ130FqLzFz5aGT4I3Ug==} + type-is@2.0.1: resolution: {integrity: sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw==} engines: {node: '>= 0.6'} @@ -3102,6 +3694,10 @@ packages: resolution: {integrity: sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA==} engines: {node: '>=18'} + uniku@0.0.12: + resolution: {integrity: sha512-wqt0D/ZcBTDprQxlFpxDm4jCHlQyHWQ62PMXPF0AfU7oxHm4g5++7EY7/+uwApCMOhfzRE8fTf4WATe6lH1vkg==} + engines: {node: '>=24.13.0'} + universalify@0.1.2: resolution: {integrity: sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==} engines: {node: '>= 4.0.0'} @@ -3217,6 +3813,10 @@ packages: engines: {node: '>=8'} hasBin: true + wrap-ansi@10.0.0: + resolution: {integrity: sha512-SGcvg80f0wUy2/fXES19feHMz8E0JoXv2uNgHOu4Dgi2OrCy1lqwFYEJz1BLbDI0exjPMe/ZdzZ/YpGECBG/aQ==} + engines: {node: '>=20'} + wrappy@1.0.2: resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} @@ -3289,6 +3889,12 @@ snapshots: '@types/json-schema': 7.0.15 js-yaml: 4.1.1 + '@ark/schema@0.56.0': + dependencies: + '@ark/util': 0.56.0 + + '@ark/util@0.56.0': {} + '@babel/runtime@7.26.10': dependencies: regenerator-runtime: 0.14.1 @@ -3535,12 +4141,24 @@ snapshots: picocolors: 1.1.1 sisteransi: 1.0.5 + '@clack/core@1.3.0': + dependencies: + fast-wrap-ansi: 0.2.0 + sisteransi: 1.0.5 + '@clack/prompts@0.10.1': dependencies: '@clack/core': 0.4.2 picocolors: 1.1.1 sisteransi: 1.0.5 + '@clack/prompts@1.3.0': + dependencies: + '@clack/core': 1.3.0 + fast-string-width: 3.0.2 + fast-wrap-ansi: 0.2.0 + sisteransi: 1.0.5 + '@clerk/backend@2.33.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': dependencies: '@clerk/shared': 3.47.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) @@ -3589,6 +4207,12 @@ snapshots: - react - react-dom + '@dagrejs/dagre@3.0.0': + dependencies: + '@dagrejs/graphlib': 4.0.1 + + '@dagrejs/graphlib@4.0.1': {} + '@drizzle-team/brocli@0.10.2': {} '@emnapi/runtime@1.7.1': @@ -3612,6 +4236,9 @@ snapshots: '@esbuild/aix-ppc64@0.25.12': optional: true + '@esbuild/aix-ppc64@0.28.0': + optional: true + '@esbuild/android-arm64@0.18.20': optional: true @@ -3621,6 +4248,9 @@ snapshots: '@esbuild/android-arm64@0.25.12': optional: true + '@esbuild/android-arm64@0.28.0': + optional: true + '@esbuild/android-arm@0.18.20': optional: true @@ -3630,6 +4260,9 @@ snapshots: '@esbuild/android-arm@0.25.12': optional: true + '@esbuild/android-arm@0.28.0': + optional: true + '@esbuild/android-x64@0.18.20': optional: true @@ -3639,6 +4272,9 @@ snapshots: '@esbuild/android-x64@0.25.12': optional: true + '@esbuild/android-x64@0.28.0': + optional: true + '@esbuild/darwin-arm64@0.18.20': optional: true @@ -3648,6 +4284,9 @@ snapshots: '@esbuild/darwin-arm64@0.25.12': optional: true + '@esbuild/darwin-arm64@0.28.0': + optional: true + '@esbuild/darwin-x64@0.18.20': optional: true @@ -3657,6 +4296,9 @@ snapshots: '@esbuild/darwin-x64@0.25.12': optional: true + '@esbuild/darwin-x64@0.28.0': + optional: true + '@esbuild/freebsd-arm64@0.18.20': optional: true @@ -3666,6 +4308,9 @@ snapshots: '@esbuild/freebsd-arm64@0.25.12': optional: true + '@esbuild/freebsd-arm64@0.28.0': + optional: true + '@esbuild/freebsd-x64@0.18.20': optional: true @@ -3675,6 +4320,9 @@ snapshots: '@esbuild/freebsd-x64@0.25.12': optional: true + '@esbuild/freebsd-x64@0.28.0': + optional: true + '@esbuild/linux-arm64@0.18.20': optional: true @@ -3684,6 +4332,9 @@ snapshots: '@esbuild/linux-arm64@0.25.12': optional: true + '@esbuild/linux-arm64@0.28.0': + optional: true + '@esbuild/linux-arm@0.18.20': optional: true @@ -3693,6 +4344,9 @@ snapshots: '@esbuild/linux-arm@0.25.12': optional: true + '@esbuild/linux-arm@0.28.0': + optional: true + '@esbuild/linux-ia32@0.18.20': optional: true @@ -3702,6 +4356,9 @@ snapshots: '@esbuild/linux-ia32@0.25.12': optional: true + '@esbuild/linux-ia32@0.28.0': + optional: true + '@esbuild/linux-loong64@0.18.20': optional: true @@ -3711,6 +4368,9 @@ snapshots: '@esbuild/linux-loong64@0.25.12': optional: true + '@esbuild/linux-loong64@0.28.0': + optional: true + '@esbuild/linux-mips64el@0.18.20': optional: true @@ -3720,6 +4380,9 @@ snapshots: '@esbuild/linux-mips64el@0.25.12': optional: true + '@esbuild/linux-mips64el@0.28.0': + optional: true + '@esbuild/linux-ppc64@0.18.20': optional: true @@ -3729,6 +4392,9 @@ snapshots: '@esbuild/linux-ppc64@0.25.12': optional: true + '@esbuild/linux-ppc64@0.28.0': + optional: true + '@esbuild/linux-riscv64@0.18.20': optional: true @@ -3738,6 +4404,9 @@ snapshots: '@esbuild/linux-riscv64@0.25.12': optional: true + '@esbuild/linux-riscv64@0.28.0': + optional: true + '@esbuild/linux-s390x@0.18.20': optional: true @@ -3747,6 +4416,9 @@ snapshots: '@esbuild/linux-s390x@0.25.12': optional: true + '@esbuild/linux-s390x@0.28.0': + optional: true + '@esbuild/linux-x64@0.18.20': optional: true @@ -3756,9 +4428,15 @@ snapshots: '@esbuild/linux-x64@0.25.12': optional: true + '@esbuild/linux-x64@0.28.0': + optional: true + '@esbuild/netbsd-arm64@0.25.12': optional: true + '@esbuild/netbsd-arm64@0.28.0': + optional: true + '@esbuild/netbsd-x64@0.18.20': optional: true @@ -3768,9 +4446,15 @@ snapshots: '@esbuild/netbsd-x64@0.25.12': optional: true + '@esbuild/netbsd-x64@0.28.0': + optional: true + '@esbuild/openbsd-arm64@0.25.12': optional: true + '@esbuild/openbsd-arm64@0.28.0': + optional: true + '@esbuild/openbsd-x64@0.18.20': optional: true @@ -3780,9 +4464,15 @@ snapshots: '@esbuild/openbsd-x64@0.25.12': optional: true + '@esbuild/openbsd-x64@0.28.0': + optional: true + '@esbuild/openharmony-arm64@0.25.12': optional: true + '@esbuild/openharmony-arm64@0.28.0': + optional: true + '@esbuild/sunos-x64@0.18.20': optional: true @@ -3792,6 +4482,9 @@ snapshots: '@esbuild/sunos-x64@0.25.12': optional: true + '@esbuild/sunos-x64@0.28.0': + optional: true + '@esbuild/win32-arm64@0.18.20': optional: true @@ -3801,6 +4494,9 @@ snapshots: '@esbuild/win32-arm64@0.25.12': optional: true + '@esbuild/win32-arm64@0.28.0': + optional: true + '@esbuild/win32-ia32@0.18.20': optional: true @@ -3810,6 +4506,9 @@ snapshots: '@esbuild/win32-ia32@0.25.12': optional: true + '@esbuild/win32-ia32@0.28.0': + optional: true + '@esbuild/win32-x64@0.18.20': optional: true @@ -3819,6 +4518,9 @@ snapshots: '@esbuild/win32-x64@0.25.12': optional: true + '@esbuild/win32-x64@0.28.0': + optional: true + '@hono/node-server@1.19.12(hono@4.12.9)': dependencies: hono: 4.12.9 @@ -4015,6 +4717,8 @@ snapshots: '@next/swc-win32-x64-msvc@15.5.7': optional: true + '@noble/hashes@2.2.0': {} + '@nodelib/fs.scandir@2.1.5': dependencies: '@nodelib/fs.stat': 2.0.5 @@ -4033,6 +4737,295 @@ snapshots: dependencies: cross-spawn: 7.0.6 + '@prisma-next/adapter-postgres@0.6.0-dev.8(typanion@3.14.0)': + dependencies: + '@prisma-next/contract': 0.6.0-dev.8 + '@prisma-next/contract-authoring': 0.6.0-dev.8 + '@prisma-next/family-sql': 0.6.0-dev.8 + '@prisma-next/framework-components': 0.6.0-dev.8 + '@prisma-next/ids': 0.6.0-dev.8 + '@prisma-next/sql-contract': 0.6.0-dev.8 + '@prisma-next/sql-contract-psl': 0.6.0-dev.8 + '@prisma-next/sql-contract-ts': 0.6.0-dev.8 + '@prisma-next/sql-operations': 0.6.0-dev.8 + '@prisma-next/sql-relational-core': 0.6.0-dev.8 + '@prisma-next/sql-runtime': 0.6.0-dev.8 + '@prisma-next/sql-schema-ir': 0.6.0-dev.8 + '@prisma-next/target-postgres': 0.6.0-dev.8(typanion@3.14.0) + '@prisma-next/utils': 0.6.0-dev.8 + arktype: 2.2.0 + transitivePeerDependencies: + - magicast + - typanion + + '@prisma-next/cli@0.6.0-dev.8(typanion@3.14.0)': + dependencies: + '@clack/prompts': 1.3.0 + '@dagrejs/dagre': 3.0.0 + '@prisma-next/config': 0.6.0-dev.8 + '@prisma-next/contract': 0.6.0-dev.8 + '@prisma-next/emitter': 0.6.0-dev.8 + '@prisma-next/errors': 0.6.0-dev.8 + '@prisma-next/framework-components': 0.6.0-dev.8 + '@prisma-next/migration-tools': 0.6.0-dev.8 + '@prisma-next/psl-printer': 0.6.0-dev.8 + '@prisma-next/utils': 0.6.0-dev.8 + arktype: 2.2.0 + c12: 3.3.4 + clipanion: 4.0.0-rc.4(typanion@3.14.0) + closest-match: 1.3.3 + colorette: 2.0.20 + commander: 14.0.3 + esbuild: 0.28.0 + jsonc-parser: 3.3.1 + package-manager-detector: 1.6.0 + pathe: 2.0.3 + string-width: 8.2.1 + strip-ansi: 7.2.0 + wrap-ansi: 10.0.0 + transitivePeerDependencies: + - magicast + - typanion + + '@prisma-next/config@0.6.0-dev.8': + dependencies: + '@prisma-next/contract': 0.6.0-dev.8 + '@prisma-next/framework-components': 0.6.0-dev.8 + '@prisma-next/utils': 0.6.0-dev.8 + arktype: 2.2.0 + + '@prisma-next/contract-authoring@0.6.0-dev.8': {} + + '@prisma-next/contract@0.6.0-dev.8': + dependencies: + '@prisma-next/utils': 0.6.0-dev.8 + '@standard-schema/spec': 1.1.0 + arktype: 2.2.0 + + '@prisma-next/driver-postgres@0.6.0-dev.8': + dependencies: + '@prisma-next/contract': 0.6.0-dev.8 + '@prisma-next/errors': 0.6.0-dev.8 + '@prisma-next/framework-components': 0.6.0-dev.8 + '@prisma-next/sql-contract': 0.6.0-dev.8 + '@prisma-next/sql-errors': 0.6.0-dev.8 + '@prisma-next/sql-operations': 0.6.0-dev.8 + '@prisma-next/sql-relational-core': 0.6.0-dev.8 + '@prisma-next/utils': 0.6.0-dev.8 + arktype: 2.2.0 + pg: 8.20.0 + pg-cursor: 2.19.0(pg@8.20.0) + transitivePeerDependencies: + - pg-native + + '@prisma-next/emitter@0.6.0-dev.8': + dependencies: + '@prisma-next/contract': 0.6.0-dev.8 + '@prisma-next/framework-components': 0.6.0-dev.8 + '@prisma-next/operations': 0.6.0-dev.8 + '@prisma-next/utils': 0.6.0-dev.8 + arktype: 2.2.0 + prettier: 3.8.3 + + '@prisma-next/errors@0.6.0-dev.8': + dependencies: + '@prisma-next/framework-components': 0.6.0-dev.8 + '@prisma-next/utils': 0.6.0-dev.8 + + '@prisma-next/family-sql@0.6.0-dev.8': + dependencies: + '@prisma-next/contract': 0.6.0-dev.8 + '@prisma-next/emitter': 0.6.0-dev.8 + '@prisma-next/framework-components': 0.6.0-dev.8 + '@prisma-next/migration-tools': 0.6.0-dev.8 + '@prisma-next/operations': 0.6.0-dev.8 + '@prisma-next/sql-contract': 0.6.0-dev.8 + '@prisma-next/sql-contract-emitter': 0.6.0-dev.8 + '@prisma-next/sql-contract-ts': 0.6.0-dev.8 + '@prisma-next/sql-operations': 0.6.0-dev.8 + '@prisma-next/sql-relational-core': 0.6.0-dev.8 + '@prisma-next/sql-runtime': 0.6.0-dev.8 + '@prisma-next/sql-schema-ir': 0.6.0-dev.8 + '@prisma-next/utils': 0.6.0-dev.8 + arktype: 2.2.0 + + '@prisma-next/framework-components@0.6.0-dev.8': + dependencies: + '@prisma-next/contract': 0.6.0-dev.8 + '@prisma-next/operations': 0.6.0-dev.8 + '@prisma-next/ts-render': 0.6.0-dev.8 + '@prisma-next/utils': 0.6.0-dev.8 + '@standard-schema/spec': 1.1.0 + + '@prisma-next/ids@0.6.0-dev.8': + dependencies: + '@prisma-next/contract': 0.6.0-dev.8 + '@prisma-next/framework-components': 0.6.0-dev.8 + '@prisma-next/utils': 0.6.0-dev.8 + uniku: 0.0.12 + + '@prisma-next/migration-tools@0.6.0-dev.8': + dependencies: + '@prisma-next/contract': 0.6.0-dev.8 + '@prisma-next/framework-components': 0.6.0-dev.8 + '@prisma-next/utils': 0.6.0-dev.8 + arktype: 2.2.0 + pathe: 2.0.3 + prettier: 3.8.3 + + '@prisma-next/operations@0.6.0-dev.8': {} + + '@prisma-next/postgres@0.6.0-dev.8(typanion@3.14.0)': + dependencies: + '@prisma-next/adapter-postgres': 0.6.0-dev.8(typanion@3.14.0) + '@prisma-next/cli': 0.6.0-dev.8(typanion@3.14.0) + '@prisma-next/config': 0.6.0-dev.8 + '@prisma-next/contract': 0.6.0-dev.8 + '@prisma-next/driver-postgres': 0.6.0-dev.8 + '@prisma-next/family-sql': 0.6.0-dev.8 + '@prisma-next/framework-components': 0.6.0-dev.8 + '@prisma-next/sql-builder': 0.6.0-dev.8 + '@prisma-next/sql-contract': 0.6.0-dev.8 + '@prisma-next/sql-contract-psl': 0.6.0-dev.8 + '@prisma-next/sql-contract-ts': 0.6.0-dev.8 + '@prisma-next/sql-orm-client': 0.6.0-dev.8 + '@prisma-next/sql-relational-core': 0.6.0-dev.8 + '@prisma-next/sql-runtime': 0.6.0-dev.8 + '@prisma-next/target-postgres': 0.6.0-dev.8(typanion@3.14.0) + '@prisma-next/utils': 0.6.0-dev.8 + pathe: 2.0.3 + pg: 8.20.0 + transitivePeerDependencies: + - magicast + - pg-native + - typanion + + '@prisma-next/psl-parser@0.6.0-dev.8': + dependencies: + '@prisma-next/framework-components': 0.6.0-dev.8 + '@prisma-next/utils': 0.6.0-dev.8 + + '@prisma-next/psl-printer@0.6.0-dev.8': + dependencies: + '@prisma-next/framework-components': 0.6.0-dev.8 + + '@prisma-next/sql-builder@0.6.0-dev.8': + dependencies: + '@prisma-next/contract': 0.6.0-dev.8 + '@prisma-next/framework-components': 0.6.0-dev.8 + '@prisma-next/sql-contract': 0.6.0-dev.8 + '@prisma-next/sql-operations': 0.6.0-dev.8 + '@prisma-next/sql-relational-core': 0.6.0-dev.8 + + '@prisma-next/sql-contract-emitter@0.6.0-dev.8': + dependencies: + '@prisma-next/contract': 0.6.0-dev.8 + '@prisma-next/emitter': 0.6.0-dev.8 + '@prisma-next/sql-contract': 0.6.0-dev.8 + '@prisma-next/utils': 0.6.0-dev.8 + + '@prisma-next/sql-contract-psl@0.6.0-dev.8': + dependencies: + '@prisma-next/config': 0.6.0-dev.8 + '@prisma-next/contract': 0.6.0-dev.8 + '@prisma-next/framework-components': 0.6.0-dev.8 + '@prisma-next/psl-parser': 0.6.0-dev.8 + '@prisma-next/sql-contract': 0.6.0-dev.8 + '@prisma-next/sql-contract-ts': 0.6.0-dev.8 + '@prisma-next/utils': 0.6.0-dev.8 + pathe: 2.0.3 + + '@prisma-next/sql-contract-ts@0.6.0-dev.8': + dependencies: + '@prisma-next/config': 0.6.0-dev.8 + '@prisma-next/contract': 0.6.0-dev.8 + '@prisma-next/contract-authoring': 0.6.0-dev.8 + '@prisma-next/framework-components': 0.6.0-dev.8 + '@prisma-next/sql-contract': 0.6.0-dev.8 + '@prisma-next/utils': 0.6.0-dev.8 + arktype: 2.2.0 + pathe: 2.0.3 + ts-toolbelt: 9.6.0 + + '@prisma-next/sql-contract@0.6.0-dev.8': + dependencies: + '@prisma-next/contract': 0.6.0-dev.8 + '@prisma-next/framework-components': 0.6.0-dev.8 + arktype: 2.2.0 + + '@prisma-next/sql-errors@0.6.0-dev.8': {} + + '@prisma-next/sql-operations@0.6.0-dev.8': + dependencies: + '@prisma-next/operations': 0.6.0-dev.8 + '@prisma-next/sql-contract': 0.6.0-dev.8 + arktype: 2.2.0 + + '@prisma-next/sql-orm-client@0.6.0-dev.8': + dependencies: + '@prisma-next/contract': 0.6.0-dev.8 + '@prisma-next/framework-components': 0.6.0-dev.8 + '@prisma-next/operations': 0.6.0-dev.8 + '@prisma-next/sql-contract': 0.6.0-dev.8 + '@prisma-next/sql-operations': 0.6.0-dev.8 + '@prisma-next/sql-relational-core': 0.6.0-dev.8 + '@prisma-next/sql-runtime': 0.6.0-dev.8 + '@prisma-next/utils': 0.6.0-dev.8 + + '@prisma-next/sql-relational-core@0.6.0-dev.8': + dependencies: + '@prisma-next/contract': 0.6.0-dev.8 + '@prisma-next/framework-components': 0.6.0-dev.8 + '@prisma-next/operations': 0.6.0-dev.8 + '@prisma-next/sql-contract': 0.6.0-dev.8 + '@prisma-next/sql-operations': 0.6.0-dev.8 + '@prisma-next/utils': 0.6.0-dev.8 + '@standard-schema/spec': 1.1.0 + arktype: 2.2.0 + ts-toolbelt: 9.6.0 + + '@prisma-next/sql-runtime@0.6.0-dev.8': + dependencies: + '@prisma-next/contract': 0.6.0-dev.8 + '@prisma-next/framework-components': 0.6.0-dev.8 + '@prisma-next/ids': 0.6.0-dev.8 + '@prisma-next/operations': 0.6.0-dev.8 + '@prisma-next/sql-contract': 0.6.0-dev.8 + '@prisma-next/sql-operations': 0.6.0-dev.8 + '@prisma-next/sql-relational-core': 0.6.0-dev.8 + '@prisma-next/utils': 0.6.0-dev.8 + arktype: 2.2.0 + + '@prisma-next/sql-schema-ir@0.6.0-dev.8': + dependencies: + '@prisma-next/contract': 0.6.0-dev.8 + + '@prisma-next/target-postgres@0.6.0-dev.8(typanion@3.14.0)': + dependencies: + '@prisma-next/cli': 0.6.0-dev.8(typanion@3.14.0) + '@prisma-next/contract': 0.6.0-dev.8 + '@prisma-next/errors': 0.6.0-dev.8 + '@prisma-next/family-sql': 0.6.0-dev.8 + '@prisma-next/framework-components': 0.6.0-dev.8 + '@prisma-next/migration-tools': 0.6.0-dev.8 + '@prisma-next/sql-contract': 0.6.0-dev.8 + '@prisma-next/sql-errors': 0.6.0-dev.8 + '@prisma-next/sql-operations': 0.6.0-dev.8 + '@prisma-next/sql-relational-core': 0.6.0-dev.8 + '@prisma-next/sql-schema-ir': 0.6.0-dev.8 + '@prisma-next/ts-render': 0.6.0-dev.8 + '@prisma-next/utils': 0.6.0-dev.8 + '@standard-schema/spec': 1.1.0 + arktype: 2.2.0 + pathe: 2.0.3 + transitivePeerDependencies: + - magicast + - typanion + + '@prisma-next/ts-render@0.6.0-dev.8': {} + + '@prisma-next/utils@0.6.0-dev.8': {} + '@rollup/rollup-android-arm-eabi@4.59.0': optional: true @@ -4117,6 +5110,8 @@ snapshots: '@stablelib/base64@1.0.1': {} + '@standard-schema/spec@1.1.0': {} + '@stricli/core@1.2.5': {} '@supabase/auth-js@2.89.0': @@ -4258,6 +5253,8 @@ snapshots: ansi-regex@6.2.2: {} + ansi-styles@6.2.3: {} + any-promise@1.3.0: {} argparse@1.0.10: @@ -4266,6 +5263,16 @@ snapshots: argparse@2.0.1: {} + arkregex@0.0.5: + dependencies: + '@ark/util': 0.56.0 + + arktype@2.2.0: + dependencies: + '@ark/schema': 0.56.0 + '@ark/util': 0.56.0 + arkregex: 0.0.5 + array-union@2.1.0: {} assertion-error@2.0.1: {} @@ -4310,6 +5317,21 @@ snapshots: bytes@3.1.2: {} + c12@3.3.4: + dependencies: + chokidar: 5.0.0 + confbox: 0.2.4 + defu: 6.1.7 + dotenv: 17.4.2 + exsolve: 1.0.8 + giget: 3.2.0 + jiti: 2.6.1 + ohash: 2.0.11 + pathe: 2.0.3 + perfect-debounce: 2.1.0 + pkg-types: 2.3.1 + rc9: 3.0.1 + cac@6.7.14: {} call-bind-apply-helpers@1.0.2: @@ -4340,15 +5362,31 @@ snapshots: dependencies: readdirp: 4.1.2 + chokidar@5.0.0: + dependencies: + readdirp: 5.0.0 + ci-info@3.9.0: {} client-only@0.0.1: {} + clipanion@4.0.0-rc.4(typanion@3.14.0): + dependencies: + typanion: 3.14.0 + + closest-match@1.3.3: {} + + colorette@2.0.20: {} + + commander@14.0.3: {} + commander@2.20.3: optional: true commander@4.1.1: {} + confbox@0.2.4: {} + consola@3.4.2: {} content-disposition@1.0.1: {} @@ -4378,6 +5416,8 @@ snapshots: deep-eql@5.0.2: {} + defu@6.1.7: {} + denque@2.1.0: optional: true @@ -4385,6 +5425,8 @@ snapshots: dequal@2.0.3: {} + destr@2.0.5: {} + detect-indent@6.1.0: {} detect-libc@2.1.2: @@ -4398,6 +5440,8 @@ snapshots: dotenv@16.6.1: {} + dotenv@17.4.2: {} + drizzle-kit@0.30.6: dependencies: '@drizzle-team/brocli': 0.10.2 @@ -4424,6 +5468,14 @@ snapshots: pg: 8.16.3 postgres: 3.4.9 + drizzle-orm@0.45.2(@types/pg@8.16.0)(gel@2.2.0)(mysql2@3.16.0)(pg@8.20.0)(postgres@3.4.9): + optionalDependencies: + '@types/pg': 8.16.0 + gel: 2.2.0 + mysql2: 3.16.0 + pg: 8.20.0 + postgres: 3.4.9 + dunder-proto@1.0.1: dependencies: call-bind-apply-helpers: 1.0.2 @@ -4538,6 +5590,35 @@ snapshots: '@esbuild/win32-ia32': 0.25.12 '@esbuild/win32-x64': 0.25.12 + esbuild@0.28.0: + optionalDependencies: + '@esbuild/aix-ppc64': 0.28.0 + '@esbuild/android-arm': 0.28.0 + '@esbuild/android-arm64': 0.28.0 + '@esbuild/android-x64': 0.28.0 + '@esbuild/darwin-arm64': 0.28.0 + '@esbuild/darwin-x64': 0.28.0 + '@esbuild/freebsd-arm64': 0.28.0 + '@esbuild/freebsd-x64': 0.28.0 + '@esbuild/linux-arm': 0.28.0 + '@esbuild/linux-arm64': 0.28.0 + '@esbuild/linux-ia32': 0.28.0 + '@esbuild/linux-loong64': 0.28.0 + '@esbuild/linux-mips64el': 0.28.0 + '@esbuild/linux-ppc64': 0.28.0 + '@esbuild/linux-riscv64': 0.28.0 + '@esbuild/linux-s390x': 0.28.0 + '@esbuild/linux-x64': 0.28.0 + '@esbuild/netbsd-arm64': 0.28.0 + '@esbuild/netbsd-x64': 0.28.0 + '@esbuild/openbsd-arm64': 0.28.0 + '@esbuild/openbsd-x64': 0.28.0 + '@esbuild/openharmony-arm64': 0.28.0 + '@esbuild/sunos-x64': 0.28.0 + '@esbuild/win32-arm64': 0.28.0 + '@esbuild/win32-ia32': 0.28.0 + '@esbuild/win32-x64': 0.28.0 + escape-html@1.0.3: {} esprima@4.0.1: {} @@ -4611,6 +5692,8 @@ snapshots: transitivePeerDependencies: - supports-color + exsolve@1.0.8: {} + extendable-error@0.1.7: {} fast-check@4.4.0: @@ -4629,8 +5712,18 @@ snapshots: fast-sha256@1.3.0: {} + fast-string-truncated-width@3.0.3: {} + + fast-string-width@3.0.2: + dependencies: + fast-string-truncated-width: 3.0.3 + fast-uri@3.1.0: {} + fast-wrap-ansi@0.2.0: + dependencies: + fast-string-width: 3.0.2 + fastq@1.19.1: dependencies: reusify: 1.1.0 @@ -4700,6 +5793,8 @@ snapshots: is-property: 1.0.2 optional: true + get-east-asian-width@1.5.0: {} + get-intrinsic@1.3.0: dependencies: call-bind-apply-helpers: 1.0.2 @@ -4727,6 +5822,8 @@ snapshots: dependencies: resolve-pkg-maps: 1.0.0 + giget@3.2.0: {} + glob-parent@5.1.2: dependencies: is-glob: 4.0.3 @@ -4855,6 +5952,8 @@ snapshots: json-schema-typed@8.0.2: {} + jsonc-parser@3.3.1: {} + jsonfile@4.0.0: optionalDependencies: graceful-fs: 4.2.11 @@ -5036,6 +6135,8 @@ snapshots: object-inspect@1.13.4: {} + ohash@2.0.11: {} + on-finished@2.4.1: dependencies: ee-first: 1.1.1 @@ -5068,6 +6169,8 @@ snapshots: dependencies: quansync: 0.2.11 + package-manager-detector@1.6.0: {} + parse-ms@4.0.0: {} parseurl@1.3.3: {} @@ -5091,11 +6194,17 @@ snapshots: pathval@2.0.1: {} + perfect-debounce@2.1.0: {} + pg-cloudflare@1.3.0: optional: true pg-connection-string@2.12.0: {} + pg-cursor@2.19.0(pg@8.20.0): + dependencies: + pg: 8.20.0 + pg-int8@1.0.1: {} pg-pool@3.13.0(pg@8.13.1): @@ -5106,6 +6215,10 @@ snapshots: dependencies: pg: 8.16.3 + pg-pool@3.13.0(pg@8.20.0): + dependencies: + pg: 8.20.0 + pg-protocol@1.13.0: {} pg-types@2.2.0: @@ -5136,6 +6249,16 @@ snapshots: optionalDependencies: pg-cloudflare: 1.3.0 + pg@8.20.0: + dependencies: + pg-connection-string: 2.12.0 + pg-pool: 3.13.0(pg@8.20.0) + pg-protocol: 1.13.0 + pg-types: 2.2.0 + pgpass: 1.0.5 + optionalDependencies: + pg-cloudflare: 1.3.0 + pgpass@1.0.5: dependencies: split2: 4.2.0 @@ -5152,6 +6275,12 @@ snapshots: pkce-challenge@5.0.1: {} + pkg-types@2.3.1: + dependencies: + confbox: 0.2.4 + exsolve: 1.0.8 + pathe: 2.0.3 + postcss-load-config@6.0.1(jiti@2.6.1)(postcss@8.5.6)(tsx@4.19.3)(yaml@2.8.4): dependencies: lilconfig: 3.1.3 @@ -5193,6 +6322,8 @@ snapshots: prettier@3.7.4: {} + prettier@3.8.3: {} + pretty-ms@9.3.0: dependencies: parse-ms: 4.0.0 @@ -5223,6 +6354,11 @@ snapshots: iconv-lite: 0.7.2 unpipe: 1.0.0 + rc9@3.0.1: + dependencies: + defu: 6.1.7 + destr: 2.0.5 + react-dom@19.2.3(react@19.2.3): dependencies: react: 19.2.3 @@ -5239,6 +6375,8 @@ snapshots: readdirp@4.1.2: {} + readdirp@5.0.0: {} + regenerator-runtime@0.14.1: {} require-from-string@2.0.2: {} @@ -5449,6 +6587,11 @@ snapshots: std-env@3.10.0: {} + string-width@8.2.1: + dependencies: + get-east-asian-width: 1.5.0 + strip-ansi: 7.2.0 + strip-ansi@6.0.1: dependencies: ansi-regex: 5.0.1 @@ -5531,6 +6674,8 @@ snapshots: ts-interface-checker@0.1.13: {} + ts-toolbelt@9.6.0: {} + tslib@2.8.1: {} tsup@8.4.0(jiti@2.6.1)(postcss@8.5.6)(tsx@4.19.3)(typescript@5.6.3)(yaml@2.8.4): @@ -5594,6 +6739,8 @@ snapshots: turbo-windows-64: 2.1.1 turbo-windows-arm64: 2.1.1 + typanion@3.14.0: {} + type-is@2.0.1: dependencies: content-type: 1.0.5 @@ -5606,6 +6753,10 @@ snapshots: unicorn-magic@0.3.0: {} + uniku@0.0.12: + dependencies: + '@noble/hashes': 2.2.0 + universalify@0.1.2: {} unpipe@1.0.0: {} @@ -5716,6 +6867,12 @@ snapshots: siginfo: 2.0.0 stackback: 0.0.2 + wrap-ansi@10.0.0: + dependencies: + ansi-styles: 6.2.3 + string-width: 8.2.1 + strip-ansi: 7.2.0 + wrappy@1.0.2: {} ws@8.18.3: {} diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml index ee3fd6da..a7afc8aa 100644 --- a/pnpm-workspace.yaml +++ b/pnpm-workspace.yaml @@ -21,3 +21,9 @@ catalogs: minimumReleaseAge: 10080 # Forbid git/tarball deps anywhere in the lockfile (pnpm ≥ 10.26). blockExoticSubdeps: true +# Excluded from the 7-day cooldown: first-party packages from Prisma that +# the @cipherstash/prisma-next integration is built against. These ship +# alongside the framework's own release cadence and are not exotic deps. +# Wildcard covers direct + transitive @prisma-next/* dependencies. +minimumReleaseAgeExclude: + - '@prisma-next/*' From 74a388f387b4335e34c1ce680e16e4eb0c7eee9b Mon Sep 17 00:00:00 2001 From: CJ Brewer <brewercalvinj@gmail.com> Date: Wed, 13 May 2026 16:14:07 -0600 Subject: [PATCH 2/4] fix: prisma ci tests --- packages/prisma-next/turbo.json | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 packages/prisma-next/turbo.json diff --git a/packages/prisma-next/turbo.json b/packages/prisma-next/turbo.json new file mode 100644 index 00000000..5ff71660 --- /dev/null +++ b/packages/prisma-next/turbo.json @@ -0,0 +1,9 @@ +{ + "$schema": "https://turbo.build/schema.json", + "extends": ["//"], + "tasks": { + "test": { + "dependsOn": ["build"] + } + } +} From 4c2d6d42ec62d56748e37ce941a9867acd8eb370 Mon Sep 17 00:00:00 2001 From: CJ Brewer <brewercalvinj@gmail.com> Date: Thu, 14 May 2026 08:24:12 -0600 Subject: [PATCH 3/4] feat: address feedback and e2e tests --- .github/workflows/prisma-next-e2e.yml | 112 +++++++++ .gitignore | 1 + examples/prisma/.cipherstash/context.json | 17 -- examples/prisma/.env.example | 2 +- examples/prisma/package.json | 5 +- examples/prisma/src/db.ts | 8 +- examples/prisma/test/e2e/README.md | 41 ++++ examples/prisma/test/e2e/bigint.e2e.test.ts | 122 ++++++++++ examples/prisma/test/e2e/bool.e2e.test.ts | 93 ++++++++ examples/prisma/test/e2e/date.e2e.test.ts | 95 ++++++++ examples/prisma/test/e2e/docker-compose.yml | 34 +++ examples/prisma/test/e2e/global-setup.ts | 134 +++++++++++ examples/prisma/test/e2e/harness.ts | 81 +++++++ examples/prisma/test/e2e/json.e2e.test.ts | 131 +++++++++++ examples/prisma/test/e2e/mixed.e2e.test.ts | 213 ++++++++++++++++++ examples/prisma/test/e2e/num.e2e.test.ts | 108 +++++++++ .../prisma/test/e2e/str-range.e2e.test.ts | 88 ++++++++ examples/prisma/test/e2e/vitest.config.ts | 22 ++ packages/prisma-next/DEVELOPING.md | 21 +- packages/prisma-next/README.md | 20 +- packages/prisma-next/package.json | 2 +- .../src/execution/cell-codec-factory.ts | 2 +- .../prisma-next/src/execution/operators.ts | 5 +- packages/prisma-next/src/exports/migration.ts | 5 +- packages/prisma-next/src/exports/pack.ts | 2 - .../test/bundling-isolation.test.ts | 8 +- pnpm-lock.yaml | 6 + 27 files changed, 1318 insertions(+), 60 deletions(-) create mode 100644 .github/workflows/prisma-next-e2e.yml delete mode 100644 examples/prisma/.cipherstash/context.json create mode 100644 examples/prisma/test/e2e/README.md create mode 100644 examples/prisma/test/e2e/bigint.e2e.test.ts create mode 100644 examples/prisma/test/e2e/bool.e2e.test.ts create mode 100644 examples/prisma/test/e2e/date.e2e.test.ts create mode 100644 examples/prisma/test/e2e/docker-compose.yml create mode 100644 examples/prisma/test/e2e/global-setup.ts create mode 100644 examples/prisma/test/e2e/harness.ts create mode 100644 examples/prisma/test/e2e/json.e2e.test.ts create mode 100644 examples/prisma/test/e2e/mixed.e2e.test.ts create mode 100644 examples/prisma/test/e2e/num.e2e.test.ts create mode 100644 examples/prisma/test/e2e/str-range.e2e.test.ts create mode 100644 examples/prisma/test/e2e/vitest.config.ts diff --git a/.github/workflows/prisma-next-e2e.yml b/.github/workflows/prisma-next-e2e.yml new file mode 100644 index 00000000..785240ad --- /dev/null +++ b/.github/workflows/prisma-next-e2e.yml @@ -0,0 +1,112 @@ +name: Prisma Next E2E + +# End-to-end tests for `@cipherstash/prisma-next`: spins up a real +# Postgres container, applies the cipherstash baseline migration +# (EQL bundle install) + the example app's schema, then runs the +# suite at `examples/prisma/test/e2e/` against a live ZeroKMS +# workspace. +# +# Triggers only on changes that affect the package or the example +# (the unit-test suite in `tests.yml` covers everything that doesn't +# need a live workspace). + +on: + push: + branches: + - main + paths: + - 'packages/prisma-next/**' + - 'examples/prisma/**' + - '.github/workflows/prisma-next-e2e.yml' + pull_request: + branches: + - '**' + paths: + - 'packages/prisma-next/**' + - 'examples/prisma/**' + - '.github/workflows/prisma-next-e2e.yml' + +jobs: + e2e: + name: Run Prisma Next E2E + runs-on: blacksmith-4vcpu-ubuntu-2404 + + # Skip cleanly on fork PRs where secrets aren't available. The + # global-setup hook in the suite hard-errors when `CS_WORKSPACE_CRN` + # is unset; gating at the job level produces a clean "skipped" + # status instead of a noisy failure. + if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }} + + env: + CS_WORKSPACE_CRN: ${{ secrets.CS_WORKSPACE_CRN }} + CS_CLIENT_ID: ${{ secrets.CS_CLIENT_ID }} + CS_CLIENT_KEY: ${{ secrets.CS_CLIENT_KEY }} + CS_CLIENT_ACCESS_KEY: ${{ secrets.CS_CLIENT_ACCESS_KEY }} + + steps: + - name: Checkout Repo + uses: actions/checkout@v6 + + - uses: pnpm/action-setup@v6.0.3 + name: Install pnpm + with: + run_install: false + + - name: Install Node.js + uses: actions/setup-node@v6 + with: + node-version: 22 + cache: 'pnpm' + + # node-pty's install hook falls back to `node-gyp rebuild` when no + # linux-x64 prebuild matches. pnpm/action-setup v6 no longer ships + # node-gyp on PATH, so install it explicitly. + - name: Install node-gyp + run: npm install -g node-gyp + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + # Write the CS_* credentials and the harness DATABASE_URL into the + # example app's .env so the runtime + the `prisma-next migration + # apply` invocation in global-setup both pick them up. The harness + # also overrides DATABASE_URL inside the test process to point at + # the container, but the migration:apply subprocess relies on + # prisma-next.config.ts → process.env['DATABASE_URL'] being set + # before the test runner spawns it. + - name: Create .env file in examples/prisma + run: | + touch ./examples/prisma/.env + echo "DATABASE_URL=postgres://cipherstash:cipherstash@localhost:54329/cipherstash_e2e" >> ./examples/prisma/.env + echo "CS_WORKSPACE_CRN=${{ secrets.CS_WORKSPACE_CRN }}" >> ./examples/prisma/.env + echo "CS_CLIENT_ID=${{ secrets.CS_CLIENT_ID }}" >> ./examples/prisma/.env + echo "CS_CLIENT_KEY=${{ secrets.CS_CLIENT_KEY }}" >> ./examples/prisma/.env + echo "CS_CLIENT_ACCESS_KEY=${{ secrets.CS_CLIENT_ACCESS_KEY }}" >> ./examples/prisma/.env + + - name: Build @cipherstash/prisma-next + run: pnpm --filter @cipherstash/prisma-next build + + - name: Emit example contract + run: pnpm --filter @cipherstash/prisma-next-example emit + + - name: Start E2E Postgres container + working-directory: examples/prisma + run: | + docker compose -f test/e2e/docker-compose.yml up -d + # Wait for pg_isready before handing off to the suite — the + # global-setup hook expects the container to already be up. + for i in {1..60}; do + if docker exec cipherstash-e2e-postgres pg_isready -U cipherstash -d cipherstash_e2e >/dev/null 2>&1; then + echo "Postgres ready" + break + fi + sleep 1 + done + + - name: Run E2E suite + run: pnpm --filter @cipherstash/prisma-next-example test:e2e + + - name: Stop E2E Postgres container + if: always() + working-directory: examples/prisma + run: docker compose -f test/e2e/docker-compose.yml down -v diff --git a/.gitignore b/.gitignore index c8717622..bdf2c99c 100644 --- a/.gitignore +++ b/.gitignore @@ -66,5 +66,6 @@ mise.local.toml cipherstash.toml cipherstash.secret.toml sql/cipherstash-*.sql +.cipherstash/ notes/ diff --git a/examples/prisma/.cipherstash/context.json b/examples/prisma/.cipherstash/context.json deleted file mode 100644 index c3d1261b..00000000 --- a/examples/prisma/.cipherstash/context.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "cliVersion": "0.14.0", - "integration": "prisma-next", - "encryptionClientPath": "./src/encryption/index.ts", - "packageManager": "npm", - "installCommand": "npm install @cipherstash/stack", - "envKeys": [ - "CS_CLIENT_ACCESS_KEY", - "CS_CLIENT_ID", - "CS_CLIENT_KEY", - "CS_WORKSPACE_CRN", - "DATABASE_URL" - ], - "schemas": [], - "installedSkills": [], - "generatedAt": "2026-05-13T18:16:34.922Z" -} diff --git a/examples/prisma/.env.example b/examples/prisma/.env.example index e21c34f9..86442335 100644 --- a/examples/prisma/.env.example +++ b/examples/prisma/.env.example @@ -9,7 +9,7 @@ DATABASE_URL=postgres://postgres:postgres@localhost:5544/cipherstash_prisma_exam # CipherStash workspace credentials — **deployment only**. # -# For local development, run `stash auth login` once. The PKCE flow +# For local development, run `npx stash auth login` once. The PKCE flow # stores per-developer credentials in your OS keychain, and the # `@cipherstash/stack` `EncryptionClient` picks them up automatically. # No CS_* env vars needed. diff --git a/examples/prisma/package.json b/examples/prisma/package.json index 98421f0f..cabd17c9 100644 --- a/examples/prisma/package.json +++ b/examples/prisma/package.json @@ -9,6 +9,7 @@ "migration:plan": "prisma-next migration plan", "migration:apply": "prisma-next migration apply", "start": "tsx src/index.ts", + "test:e2e": "vitest run --config test/e2e/vitest.config.ts", "typecheck": "tsc --project tsconfig.json --noEmit" }, "dependencies": { @@ -30,7 +31,9 @@ "devDependencies": { "@prisma-next/cli": "0.6.0-dev.8", "@types/node": "^22.15.12", + "pathe": "^2.0.3", "tsx": "catalog:repo", - "typescript": "catalog:repo" + "typescript": "catalog:repo", + "vitest": "catalog:repo" } } diff --git a/examples/prisma/src/db.ts b/examples/prisma/src/db.ts index b9f31e68..befbc1c1 100644 --- a/examples/prisma/src/db.ts +++ b/examples/prisma/src/db.ts @@ -4,10 +4,10 @@ * * `cipherstashFromStack({ contractJson })` derives the encryption * schemas from the contract, constructs the `@cipherstash/stack` - * `EncryptionClient` against your `CS_*` env vars, builds the SDK - * adapter, and returns ready-to-spread arrays for `extensions` and - * `middleware`. Override `schemas` only if you have additional tables - * the contract does not model. + * `EncryptionClient` against your `CS_*` env vars or local profile, + * builds the SDK adapter, and returns ready-to-spread arrays for + * `extensions` and `middleware`. Override `schemas` only if you have + * additional tables the contract does not model. */ import 'dotenv/config' diff --git a/examples/prisma/test/e2e/README.md b/examples/prisma/test/e2e/README.md new file mode 100644 index 00000000..cce2113b --- /dev/null +++ b/examples/prisma/test/e2e/README.md @@ -0,0 +1,41 @@ +# Live PG + EQL + ZeroKMS e2e harness + +This directory hosts the live-Postgres + EQL bundle + ZeroKMS end-to-end harness for the @cipherstash/prisma-next example app. Seven `*.e2e.test.ts` files cover one codec or scenario each: + +- `num.e2e.test.ts` — `EncryptedDouble` round-trip; `Gt`/`Gte`/`Lt`/`Lte`/`Between`/`Asc`/`Desc`. +- `bigint.e2e.test.ts` — `EncryptedBigInt` round-trip; equality + range + sort. +- `date.e2e.test.ts` — `EncryptedDate` round-trip; date range + sort. +- `bool.e2e.test.ts` — `EncryptedBoolean` round-trip; `Eq` / `Ne` / `InArray`. +- `json.e2e.test.ts` — `EncryptedJson` round-trip + `cipherstashJsonbPathQueryFirst` / `cipherstashJsonbGet` SELECT-expression helpers. The `cipherstashJsonbPathExists` predicate clause is skipped per the known limitation tracked at [TML-2504](https://linear.app/prisma-company/issue/TML-2504). +- `str-range.e2e.test.ts` — `EncryptedString({ orderAndRange: true })` supports `Gt` + `Asc` + `Ilike` coexistence. +- `mixed.e2e.test.ts` — mixed-codec query issues the minimum SDK round-trips (one per `(table, column)`). + +## Local setup + +```bash +pnpm --filter @cipherstash/prisma-next-example test:e2e +``` + +The harness's Vitest global setup (`global-setup.ts`): + +1. `docker compose up -d` and waits for `pg_isready`. +2. Sets `DATABASE_URL` to the harness's local Postgres URL. +3. Runs `prisma-next migration apply` against the example app (installs the cipherstash baseline migration + the `users` table). +4. Skips cleanly (logging the missing env var) when `CS_WORKSPACE_CRN` / `CS_CLIENT_ID` / `CS_CLIENT_KEY` / `CS_DEFAULT_KEY_ID` are unset, so PRs without secrets configured don't fail the suite. + +`vitest.config.ts` wires the global setup, scopes the run to `*.e2e.test.ts`, and pins `pool: 'threads'` + `maxWorkers: 1` + `isolate: false` + `fileParallelism: false` so every test file shares one Postgres connection and one CipherStash SDK encryption client (and the SDK isn't asked to run encrypts across files concurrently). Each test file truncates `users` in its `beforeAll` for clean-slate isolation. + +## Container + +The `docker-compose.yml` runs `postgres:16-alpine` on host port `54329` (non-standard to dodge a developer's locally installed Postgres on `5432`). `tmpfs` data volume so every boot starts from an empty cluster. Container name `cipherstash-e2e-postgres` avoids colliding with the workspace-root `docker-compose.yaml` (port `5433`, used by the framework's own e2e suite). + +## Known limitations covered by skips + +- **`cipherstashJsonbPathExists` predicate clause.** The EQL bundle's `jsonb_path_exists` function expects a hashed STE-VEC selector computed client-side by the CipherStash SDK's `selector(...)` API; the framework currently binds the JSONpath as a plain `pg/text@1` `ParamRef`. Predicate queries return zero rows. Tracked at [TML-2504](https://linear.app/prisma-company/issue/TML-2504); the round-trip and the two SELECT-expression helpers work correctly against the same column. +- **`EncryptedBigInt` capped at `Number.MAX_SAFE_INTEGER`.** `@cipherstash/stack`'s SDK and ZeroKMS only accept `JsPlaintext = string | number | boolean | object | array` for plaintexts (no `bigint`); the example app's SDK adapter at `src/sdk.ts` converts `bigint → Number` with an eager `Number.MAX_SAFE_INTEGER` bounds check. Values beyond the safe-integer range cannot be encrypted today. + +## EQL bundle quoted-identifier workaround + +`eql_v2.add_encrypted_constraint(table, column)` interpolates `%I` for both the constraint-name prefix **and** the (already double-quoted) identifier suffix, producing invalid SQL like `CONSTRAINT eql_v2_encrypted_constraint_"users"_"accountId"` whenever either name needs quoting (mixed case, reserved word, etc.). + +Worked around in the example schema by `@map`-ing `accountId` → `accountid` and `emailVerified` → `emailverified` (matching the existing `@@map("users")` workaround for the reserved-word case). File upstream + drop the workaround when the bundle is fixed. diff --git a/examples/prisma/test/e2e/bigint.e2e.test.ts b/examples/prisma/test/e2e/bigint.e2e.test.ts new file mode 100644 index 00000000..07e900a8 --- /dev/null +++ b/examples/prisma/test/e2e/bigint.e2e.test.ts @@ -0,0 +1,122 @@ +/** + * End-to-end round-trip for `EncryptedBigInt` against live + * Postgres + EQL bundle + ZeroKMS. + * + * Pins the cipherstash bigint codec's encrypt + decrypt + range + + * sort behaviour with bigint-specific assertions on top of the + * general numeric coverage in `num.e2e.test.ts`. + * + * # Known limitation: Number.MAX_SAFE_INTEGER cap + * + * The underlying `@cipherstash/stack` SDK accepts only the + * `string | number | boolean | object | array` `JsPlaintext` shape for + * `bulkEncrypt`, and ZeroKMS's `big_int` cast rejects string + * plaintexts (`Cannot convert String to BigInt`). The example SDK + * adapter therefore converts `bigint` → JS `number` and throws + * eagerly above `Number.MAX_SAFE_INTEGER` rather than silently + * truncating. Consequently the live BigInt round-trip is bounded by + * `Number.MAX_SAFE_INTEGER` (2^53 − 1) today; lifting the cap + * requires SDK work documented in `examples/prisma/ + * src/sdk.ts` (`toJsPlaintext`). The negative test below pins the + * boundary explicitly. + */ + +import { + cipherstashAsc, + decryptAll, + EncryptedBigInt, + EncryptedBoolean, + EncryptedDate, + EncryptedDouble, + EncryptedJson, + EncryptedString, +} from '@cipherstash/prisma-next/runtime'; +import { beforeAll, describe, expect, it } from 'vitest'; +import { db, ensureConnected, truncateUsers } from './harness'; + +const SEED = [ + { id: 'e2e-bigint-0', accountId: 1_000_000_000_001n }, + { id: 'e2e-bigint-1', accountId: 1_000_000_000_002n }, + { id: 'e2e-bigint-2', accountId: 9_000_000_000_000_000n }, + { id: 'e2e-bigint-3', accountId: BigInt(Number.MAX_SAFE_INTEGER) }, +] as const; + +function seedRow(s: (typeof SEED)[number]) { + return { + id: s.id, + email: EncryptedString.from(`${s.id}@example.com`), + salary: EncryptedDouble.from(50_000), + accountId: EncryptedBigInt.from(s.accountId), + birthday: EncryptedDate.from(new Date('1990-01-01')), + emailVerified: EncryptedBoolean.from(true), + preferences: EncryptedJson.from({ marker: 'bigint' }), + }; +} + +describe('EncryptedBigInt e2e (live PG + EQL + ZeroKMS)', () => { + beforeAll(async () => { + await ensureConnected(); + await truncateUsers(); + await Promise.all(SEED.map((s) => db.orm.User.create(seedRow(s)))); + }); + + it('round-trips an EncryptedBigInt through bulkEncrypt + bulkDecrypt', async () => { + const rows = await db.orm.User.all(); + expect(rows).toHaveLength(SEED.length); + await decryptAll(rows); + const byId = new Map(rows.map((r) => [r.id, r] as const)); + for (const s of SEED) { + const r = byId.get(s.id); + expect(r, `seed row ${s.id} present`).toBeDefined(); + expect(r ? await r.accountId.decrypt() : undefined).toBe(s.accountId); + } + }); + + it('cipherstashGt filters by encrypted bigint numeric order', async () => { + const rows = await db.orm.User.where((u) => + u.accountId.cipherstashGt(1_000_000_000_002n), + ).all(); + expect(rows.map((r) => r.id).sort()).toEqual(['e2e-bigint-2', 'e2e-bigint-3']); + }); + + it('cipherstashLte includes the equality boundary', async () => { + const rows = await db.orm.User.where((u) => + u.accountId.cipherstashLte(1_000_000_000_002n), + ).all(); + expect(rows.map((r) => r.id).sort()).toEqual(['e2e-bigint-0', 'e2e-bigint-1']); + }); + + it('cipherstashBetween filters by encrypted bigint range', async () => { + const rows = await db.orm.User.where((u) => + u.accountId.cipherstashBetween(1_000_000_000_002n, 9_000_000_000_000_000n), + ).all(); + expect(rows.map((r) => r.id).sort()).toEqual(['e2e-bigint-1', 'e2e-bigint-2']); + }); + + it('cipherstashInArray returns rows whose value matches any of the supplied bigints', async () => { + const rows = await db.orm.User.where((u) => + u.accountId.cipherstashInArray([1_000_000_000_001n, BigInt(Number.MAX_SAFE_INTEGER)]), + ).all(); + expect(rows.map((r) => r.id).sort()).toEqual(['e2e-bigint-0', 'e2e-bigint-3']); + }); + + it('cipherstashAsc orders by bigint value (bare-column ORDER BY)', async () => { + const rows = await db.orm.User.orderBy((u) => cipherstashAsc(u.accountId)).all(); + expect(rows.map((r) => r.id)).toEqual([ + 'e2e-bigint-0', + 'e2e-bigint-1', + 'e2e-bigint-2', + 'e2e-bigint-3', + ]); + }); + + it('accepts bigint plaintexts above Number.MAX_SAFE_INTEGER at construction', () => { + expect(() => EncryptedBigInt.from(BigInt(Number.MAX_SAFE_INTEGER) + 1n)).not.toThrow(); + // The construction is fine — the failure surfaces at the SDK + // boundary (`toJsPlaintext`) the moment a bulk-encrypt fires for + // this envelope. We pin the boundary in the SDK adapter's unit + // test rather than wire a live-ZeroKMS round-trip we expect to + // fail; surfacing the limit eagerly at the call site keeps test + // signals readable. + }); +}); diff --git a/examples/prisma/test/e2e/bool.e2e.test.ts b/examples/prisma/test/e2e/bool.e2e.test.ts new file mode 100644 index 00000000..57a76c78 --- /dev/null +++ b/examples/prisma/test/e2e/bool.e2e.test.ts @@ -0,0 +1,93 @@ +/** + * End-to-end round-trip for `EncryptedBoolean` against live + * Postgres + EQL bundle + ZeroKMS. + * + * Booleans surface only the equality-trait operators (no + * order-and-range) so this file pins: + * - INSERT + decrypt round-trip recovers `true` / `false`. + * - Equality filters to the matching set. Note: `cipherstashEq` is + * the legacy single-codec operator pinned to `cipherstash/string@1`. + * For non-string equality, the trait-namespaced + * `cipherstashInArray([value])` is the canonical form (see + * `src/index.ts`'s boolean demo). We exercise + * `cipherstashInArray` (the trait-dispatched live form) and + * `cipherstashNe` (the inequality direction). + */ + +import { + decryptAll, + EncryptedBigInt, + EncryptedBoolean, + EncryptedDate, + EncryptedDouble, + EncryptedJson, + EncryptedString, +} from '@cipherstash/prisma-next/runtime'; +import { beforeAll, describe, expect, it } from 'vitest'; +import { db, ensureConnected, truncateUsers } from './harness'; + +const SEED = [ + { id: 'e2e-bool-0', emailVerified: true }, + { id: 'e2e-bool-1', emailVerified: false }, + { id: 'e2e-bool-2', emailVerified: true }, + { id: 'e2e-bool-3', emailVerified: false }, +] as const; + +function seedRow(s: (typeof SEED)[number]) { + return { + id: s.id, + email: EncryptedString.from(`${s.id}@example.com`), + salary: EncryptedDouble.from(50_000), + accountId: EncryptedBigInt.from(1_000_000n), + birthday: EncryptedDate.from(new Date('1990-01-01')), + emailVerified: EncryptedBoolean.from(s.emailVerified), + preferences: EncryptedJson.from({ marker: 'bool' }), + }; +} + +describe('EncryptedBoolean e2e (live PG + EQL + ZeroKMS)', () => { + beforeAll(async () => { + await ensureConnected(); + await truncateUsers(); + await Promise.all(SEED.map((s) => db.orm.User.create(seedRow(s)))); + }); + + it('round-trips an EncryptedBoolean through bulkEncrypt + bulkDecrypt', async () => { + const rows = await db.orm.User.all(); + expect(rows).toHaveLength(SEED.length); + await decryptAll(rows); + const byId = new Map(rows.map((r) => [r.id, r] as const)); + for (const s of SEED) { + const r = byId.get(s.id); + expect(r, `seed row ${s.id} present`).toBeDefined(); + expect(r ? await r.emailVerified.decrypt() : undefined).toBe(s.emailVerified); + } + }); + + it('cipherstashInArray([true]) returns the verified subset', async () => { + const rows = await db.orm.User.where((u) => u.emailVerified.cipherstashInArray([true])).all(); + expect(rows.map((r) => r.id).sort()).toEqual(['e2e-bool-0', 'e2e-bool-2']); + }); + + it('cipherstashInArray([false]) returns the unverified subset', async () => { + const rows = await db.orm.User.where((u) => u.emailVerified.cipherstashInArray([false])).all(); + expect(rows.map((r) => r.id).sort()).toEqual(['e2e-bool-1', 'e2e-bool-3']); + }); + + it('cipherstashInArray([true, false]) returns the entire population', async () => { + const rows = await db.orm.User.where((u) => + u.emailVerified.cipherstashInArray([true, false]), + ).all(); + expect(rows.map((r) => r.id).sort()).toEqual([ + 'e2e-bool-0', + 'e2e-bool-1', + 'e2e-bool-2', + 'e2e-bool-3', + ]); + }); + + it('cipherstashNe([true]) excludes the equality match', async () => { + const rows = await db.orm.User.where((u) => u.emailVerified.cipherstashNe(true)).all(); + expect(rows.map((r) => r.id).sort()).toEqual(['e2e-bool-1', 'e2e-bool-3']); + }); +}); diff --git a/examples/prisma/test/e2e/date.e2e.test.ts b/examples/prisma/test/e2e/date.e2e.test.ts new file mode 100644 index 00000000..ce9a7993 --- /dev/null +++ b/examples/prisma/test/e2e/date.e2e.test.ts @@ -0,0 +1,95 @@ +/** + * End-to-end round-trip for `EncryptedDate` against live + * Postgres + EQL bundle + ZeroKMS. + * + * Pins: + * - INSERT + decrypt round-trip recovers the source `Date`. + * - `cipherstashGt(<date>)` returns rows whose date is later. + * - `cipherstashAsc` orders by calendar date. + * - `cipherstashBetween` filters a closed interval. + * + * Encoded form is ISO 8601 (`.toISOString()`); both ZeroKMS and the + * EQL bundle accept the textual form for `cast_as: 'date'`. + */ + +import { + cipherstashAsc, + cipherstashDesc, + decryptAll, + EncryptedBigInt, + EncryptedBoolean, + EncryptedDate, + EncryptedDouble, + EncryptedJson, + EncryptedString, +} from '@cipherstash/prisma-next/runtime'; +import { beforeAll, describe, expect, it } from 'vitest'; +import { db, ensureConnected, truncateUsers } from './harness'; + +const SEED = [ + { id: 'e2e-date-0', birthday: new Date('1980-05-10') }, + { id: 'e2e-date-1', birthday: new Date('1990-04-12') }, + { id: 'e2e-date-2', birthday: new Date('2000-11-30') }, + { id: 'e2e-date-3', birthday: new Date('2010-01-01') }, +] as const; + +function seedRow(s: (typeof SEED)[number]) { + return { + id: s.id, + email: EncryptedString.from(`${s.id}@example.com`), + salary: EncryptedDouble.from(50_000), + accountId: EncryptedBigInt.from(1_000_000n), + birthday: EncryptedDate.from(s.birthday), + emailVerified: EncryptedBoolean.from(true), + preferences: EncryptedJson.from({ marker: 'date' }), + }; +} + +describe('EncryptedDate e2e (live PG + EQL + ZeroKMS)', () => { + beforeAll(async () => { + await ensureConnected(); + await truncateUsers(); + await Promise.all(SEED.map((s) => db.orm.User.create(seedRow(s)))); + }); + + it('round-trips an EncryptedDate through bulkEncrypt + bulkDecrypt', async () => { + const rows = await db.orm.User.all(); + expect(rows).toHaveLength(SEED.length); + await decryptAll(rows); + const byId = new Map(rows.map((r) => [r.id, r] as const)); + for (const s of SEED) { + const r = byId.get(s.id); + expect(r, `seed row ${s.id} present`).toBeDefined(); + const got = r ? await r.birthday.decrypt() : undefined; + // The cipherstash date codec round-trips through `cast_as: 'date'` + // which is calendar-day-precision; comparing day-equivalence is + // the meaningful assertion. + expect(got).toBeInstanceOf(Date); + expect((got as Date).toISOString().slice(0, 10)).toBe(s.birthday.toISOString().slice(0, 10)); + } + }); + + it('cipherstashGt filters dates after the cutoff', async () => { + const rows = await db.orm.User.where((u) => + u.birthday.cipherstashGt(new Date('1995-01-01')), + ).all(); + expect(rows.map((r) => r.id).sort()).toEqual(['e2e-date-2', 'e2e-date-3']); + }); + + it('cipherstashBetween filters a closed date interval', async () => { + const rows = await db.orm.User.where((u) => + u.birthday.cipherstashBetween(new Date('1985-01-01'), new Date('2005-12-31')), + ).all(); + expect(rows.map((r) => r.id).sort()).toEqual(['e2e-date-1', 'e2e-date-2']); + }); + + it('cipherstashAsc orders by calendar date (bare-column ORDER BY)', async () => { + const rows = await db.orm.User.orderBy((u) => cipherstashAsc(u.birthday)).all(); + expect(rows.map((r) => r.id)).toEqual(['e2e-date-0', 'e2e-date-1', 'e2e-date-2', 'e2e-date-3']); + }); + + it('cipherstashDesc reverses the date order', async () => { + const rows = await db.orm.User.orderBy((u) => cipherstashDesc(u.birthday)).all(); + expect(rows.map((r) => r.id)).toEqual(['e2e-date-3', 'e2e-date-2', 'e2e-date-1', 'e2e-date-0']); + }); +}); diff --git a/examples/prisma/test/e2e/docker-compose.yml b/examples/prisma/test/e2e/docker-compose.yml new file mode 100644 index 00000000..74031a0e --- /dev/null +++ b/examples/prisma/test/e2e/docker-compose.yml @@ -0,0 +1,34 @@ +# Docker compose for the cipherstash live-PG + EQL + ZeroKMS e2e +# harness. Brings up a single Postgres container the test setup +# applies the cipherstash baseline migration (EQL bundle) + the app +# contract-space migration to. Container data lives in a tmpfs so +# every harness boot starts from an empty Postgres. +# +# Container name is `cipherstash-e2e-postgres` to avoid colliding +# with the workspace-root `docker-compose.yaml` (which uses port +# 5433 for the framework e2e suite); this harness binds to host +# port 54329 (forwarded to the container's 5432) to avoid clashing +# with a developer's locally installed Postgres on 5432. The +# harness's global setup overrides DATABASE_URL to point at this +# port at runtime. + +services: + postgres: + image: postgres:16-alpine + container_name: cipherstash-e2e-postgres + ports: + # Bind to a non-standard host port to avoid colliding with a developer's + # locally installed Postgres on 5432. The harness sets DATABASE_URL to + # point at this port at runtime. + - '54329:5432' + environment: + POSTGRES_PASSWORD: cipherstash + POSTGRES_USER: cipherstash + POSTGRES_DB: cipherstash_e2e + tmpfs: + - /var/lib/postgresql/data + healthcheck: + test: ['CMD-SHELL', 'pg_isready -U cipherstash -d cipherstash_e2e'] + interval: 1s + timeout: 5s + retries: 30 diff --git a/examples/prisma/test/e2e/global-setup.ts b/examples/prisma/test/e2e/global-setup.ts new file mode 100644 index 00000000..6d4622ee --- /dev/null +++ b/examples/prisma/test/e2e/global-setup.ts @@ -0,0 +1,134 @@ +/** + * Vitest `globalSetup` for the cipherstash live-PG + EQL + ZeroKMS + * e2e harness. Runs once at the start of the entire e2e run. + * + * Responsibilities: + * + * 1. Load the example app's `.env` into `process.env` so test + * workers inherit `CS_*` credentials and (after we override it + * below) `DATABASE_URL`. + * 2. Verify the harness's Postgres container is reachable (the + * developer is responsible for `docker compose up -d`; the + * harness reports a clear actionable error when it's not up + * rather than orchestrating Docker itself). + * 3. Apply the example app's migrations against the harness DB — + * the cipherstash baseline (EQL bundle install + per-column + * search configs) plus the application `users` table. The + * apply is idempotent at the marker level so a re-run on a + * warm container is a no-op. + * 4. Truncate the `users` table for a clean slate per harness + * boot. Each test file owns its own seed data with file-scoped + * ID prefixes; the truncate guards against state bleeding + * between full-run iterations of the suite. + * + * No teardown — the container lifecycle is owned by the developer + * (`docker compose down` from `examples/prisma` + * tears it down explicitly). + */ + +import { type SpawnSyncReturns, spawnSync } from 'node:child_process'; +import { fileURLToPath } from 'node:url'; +import { config as loadDotenv } from 'dotenv'; +import { dirname, resolve } from 'pathe'; + +const HARNESS_DATABASE_URL = 'postgres://cipherstash:cipherstash@localhost:54329/cipherstash_e2e'; +const POSTGRES_CONTAINER = 'cipherstash-e2e-postgres'; + +const PG_ISREADY_TIMEOUT_MS = 10_000; +const MIGRATION_APPLY_TIMEOUT_MS = 120_000; +const TRUNCATE_TIMEOUT_MS = 10_000; + +function describeSpawnFailure( + label: string, + result: SpawnSyncReturns<Buffer>, + hint?: string, +): string { + const lines = [`cipherstash e2e harness: ${label} failed.`]; + if (result.error) { + lines.push(` spawn error: ${result.error.message}`); + } + if (result.signal) { + lines.push(` killed by signal: ${result.signal}`); + } + if (typeof result.status === 'number') { + lines.push(` exit status: ${result.status}`); + } else if (!result.error && !result.signal) { + lines.push(' exit status: <unknown>'); + } + const stderr = result.stderr?.toString().trim(); + const stdout = result.stdout?.toString().trim(); + if (stderr) lines.push(`--- stderr ---\n${stderr}`); + if (stdout) lines.push(`--- stdout ---\n${stdout}`); + if (hint) lines.push(hint); + return lines.join('\n'); +} + +export default async function setup(): Promise<() => Promise<void>> { + const exampleDir = resolve(dirname(fileURLToPath(import.meta.url)), '..', '..'); + + loadDotenv({ path: resolve(exampleDir, '.env') }); + + if (!process.env['CS_WORKSPACE_CRN']) { + throw new Error( + 'cipherstash e2e harness: `CS_WORKSPACE_CRN` is not set. Populate `.env` ' + + '(see `.env.example`) with a ZeroKMS workspace and the three companion ' + + 'credentials before running `pnpm test:e2e`.', + ); + } + + const pgIsReady = spawnSync( + 'docker', + ['exec', POSTGRES_CONTAINER, 'pg_isready', '-U', 'cipherstash', '-d', 'cipherstash_e2e'], + { stdio: 'pipe', timeout: PG_ISREADY_TIMEOUT_MS }, + ); + if (pgIsReady.error || pgIsReady.signal || pgIsReady.status !== 0) { + throw new Error( + describeSpawnFailure( + `container \`${POSTGRES_CONTAINER}\` is not running or not accepting connections`, + pgIsReady, + 'Bring it up with:\n' + + ' docker compose -f test/e2e/docker-compose.yml up -d\n' + + '(from `examples/prisma`).', + ), + ); + } + + // Override DATABASE_URL so the CLI and the test workers both point + // at the harness container, not the developer's `.env` value (which + // is for the `pnpm start` demo loop). + process.env['DATABASE_URL'] = HARNESS_DATABASE_URL; + + const apply = spawnSync('pnpm', ['exec', 'prisma-next', 'migration', 'apply'], { + cwd: exampleDir, + stdio: 'pipe', + env: process.env, + timeout: MIGRATION_APPLY_TIMEOUT_MS, + }); + if (apply.error || apply.signal || apply.status !== 0) { + throw new Error(describeSpawnFailure('`prisma-next migration apply`', apply)); + } + + // Clean slate for the suite. The `users` table is the only data-bearing + // application table; the EQL bundle tables (`eql_v2_configuration` etc.) + // are state we want to keep. + const truncate = spawnSync( + 'docker', + [ + 'exec', + POSTGRES_CONTAINER, + 'psql', + '-U', + 'cipherstash', + '-d', + 'cipherstash_e2e', + '-c', + 'TRUNCATE TABLE users', + ], + { stdio: 'pipe', timeout: TRUNCATE_TIMEOUT_MS }, + ); + if (truncate.error || truncate.signal || truncate.status !== 0) { + throw new Error(describeSpawnFailure('TRUNCATE TABLE users', truncate)); + } + + return async () => {}; +} diff --git a/examples/prisma/test/e2e/harness.ts b/examples/prisma/test/e2e/harness.ts new file mode 100644 index 00000000..46093ae7 --- /dev/null +++ b/examples/prisma/test/e2e/harness.ts @@ -0,0 +1,81 @@ +/** + * Shared harness module for the cipherstash live PG + EQL + ZeroKMS + * e2e suite. Owns the singleton runtime connection and provides + * tiny conveniences each test file calls in `beforeAll`. + * + * Lifecycle: + * - `globalSetup` (separate process) verifies Docker + applies + * migrations + truncates `users`. + * - Each test file (worker process, shared because `isolate: false` + * + `maxWorkers: 1`) calls `ensureConnected()` in `beforeAll`. + * The first caller awaits `db.connect(...)`; subsequent callers + * await the cached promise. + * + * No `afterAll` cleanup of rows is required: tests use file-scoped + * ID prefixes (`e2e-num-`, `e2e-bool-`, ...) so cross-file + * collisions are impossible. `globalSetup` truncates the table once + * per suite boot, so re-runs start clean. + * + * The harness intentionally does *not* close the runtime in any + * teardown hook. Vitest's `globalSetup` teardown runs in a different + * process, and adding an in-process teardown coordinator + * (`globalThis`-shared latch, last-file detection, ...) is + * disproportionate for a development suite. The pg pool's idle + * timeout retires its connections; the Node process exits when + * vitest is done. + */ + +import { spawnSync } from 'node:child_process'; +import { db } from '../../src/db'; + +let connection: Promise<unknown> | undefined; + +export function ensureConnected(): Promise<unknown> { + if (!connection) { + const url = process.env['DATABASE_URL']; + if (!url) { + throw new Error( + 'cipherstash e2e harness: `DATABASE_URL` is not set; ' + + 'global-setup.ts should have populated it from the harness Postgres URL.', + ); + } + connection = db.connect({ url }); + } + return connection; +} + +/** + * Truncate the `users` table to give a single test file a clean + * slate. Called from `beforeAll` so per-file assertions ("expect + * exactly N rows matching X") don't bleed across files. + * + * Shells out to `docker exec ... psql -c TRUNCATE` rather than going + * through a separate `pg.Pool` to avoid pulling a second postgres + * driver into the example's deps. The container is guaranteed to be + * up — `globalSetup` would have failed the run otherwise. + */ +export function truncateUsers(): void { + const result = spawnSync( + 'docker', + [ + 'exec', + 'cipherstash-e2e-postgres', + 'psql', + '-U', + 'cipherstash', + '-d', + 'cipherstash_e2e', + '-c', + 'TRUNCATE TABLE users', + ], + { stdio: 'pipe' }, + ); + if (result.status !== 0) { + throw new Error( + `cipherstash e2e harness: TRUNCATE failed (exit ${result.status}):\n` + + `${result.stderr?.toString() ?? ''}\n${result.stdout?.toString() ?? ''}`, + ); + } +} + +export { db }; diff --git a/examples/prisma/test/e2e/json.e2e.test.ts b/examples/prisma/test/e2e/json.e2e.test.ts new file mode 100644 index 00000000..9b189edf --- /dev/null +++ b/examples/prisma/test/e2e/json.e2e.test.ts @@ -0,0 +1,131 @@ +/** + * End-to-end round-trip for `EncryptedJson` against live + * Postgres + EQL bundle + ZeroKMS. + * + * Pins: + * - INSERT + decrypt round-trip recovers the source JSON object. + * - `cipherstashJsonbPathExists('$.<key>')` filters rows by + * STE-VEC selector membership. + * + * # Known limitation: STE-VEC selectors require client-side hashing + * + * The cipherstash JSON codec stores values with an STE-VEC index; + * each JSON path is represented in the index as a *hashed* selector + * computed by the CipherStash client at write time. The + * `eql_v2.jsonb_path_exists` function expects that same hashed + * selector at query time — passing a raw JSONpath string + * (`'$.theme'`) probes the index for a path that has not been + * hashed, so the lookup misses every row. + * + * The framework's current operator lowering binds the JSONpath as a + * plain `pg/text@1` `ParamRef`. The wire result is a syntactically + * valid call that the EQL function accepts and runs, but no rows + * match because the encrypted index entries are keyed by hashed + * selectors, not the raw path. Bridging this requires either: + * + * - a client-side hashing step before the SQL fires (a new + * middleware that observes JSON-path arguments and rewrites them + * via the SDK's `selector(...)` API), or + * - an EQL-side overload that accepts plaintext paths and hashes + * them server-side. + * + * Both routes are tracked as a follow-up at + * https://linear.app/prisma-company/issue/TML-2504. The test below + * pins the round-trip + decrypt behaviour (which works today) and + * the JSON SELECT-expression helpers' availability; the predicate + * side is marked as a known limitation with a `.skip` and a pointer + * to this comment, so the regression status is visible at a glance. + */ + +import { + cipherstashJsonbGet, + cipherstashJsonbPathQueryFirst, + decryptAll, + EncryptedBigInt, + EncryptedBoolean, + EncryptedDate, + EncryptedDouble, + EncryptedJson, + EncryptedString, +} from '@cipherstash/prisma-next/runtime'; +import { beforeAll, describe, expect, it } from 'vitest'; +import { db, ensureConnected, truncateUsers } from './harness'; + +const SEED = [ + { + id: 'e2e-json-0', + preferences: { theme: 'dark', notifications: true, locale: 'en-US' }, + }, + { + id: 'e2e-json-1', + preferences: { theme: 'light', notifications: false, locale: 'de-DE' }, + }, + { + id: 'e2e-json-2', + preferences: { theme: 'system', notifications: true }, + }, +] as const; + +function seedRow(s: (typeof SEED)[number]) { + return { + id: s.id, + email: EncryptedString.from(`${s.id}@example.com`), + salary: EncryptedDouble.from(50_000), + accountId: EncryptedBigInt.from(1_000_000n), + birthday: EncryptedDate.from(new Date('1990-01-01')), + emailVerified: EncryptedBoolean.from(true), + preferences: EncryptedJson.from(s.preferences), + }; +} + +describe('EncryptedJson e2e (live PG + EQL + ZeroKMS)', () => { + beforeAll(async () => { + await ensureConnected(); + await truncateUsers(); + await Promise.all(SEED.map((s) => db.orm.User.create(seedRow(s)))); + }); + + it('round-trips an EncryptedJson through bulkEncrypt + bulkDecrypt', async () => { + const rows = await db.orm.User.all(); + expect(rows).toHaveLength(SEED.length); + await decryptAll(rows); + const byId = new Map(rows.map((r) => [r.id, r] as const)); + for (const s of SEED) { + const r = byId.get(s.id); + expect(r, `seed row ${s.id} present`).toBeDefined(); + expect(await r!.preferences.decrypt()).toEqual(s.preferences); + } + }); + + it.skip('cipherstashJsonbPathExists filters by JSON path (KNOWN LIMITATION: needs client-side selector hashing)', async () => { + const rows = await db.orm.User.where((u) => + u.preferences.cipherstashJsonbPathExists('$.locale'), + ).all(); + expect(rows.map((r) => r.id).sort()).toEqual(['e2e-json-0', 'e2e-json-1']); + }); + + it('exposes cipherstashJsonbPathQueryFirst as a typed SELECT-expression helper', () => { + // Type-level: the helper accepts an `Expression<ScopeField>` and + // returns an `Expression` typed as `cipherstash/json@1`. Wiring + // it into a `db.sql.users.select(...)` projection exercises the + // typed surface; the live SQL execution is held back until the + // STE-VEC selector hashing gap closes (see file docblock). + const projection = db.sql.users + .select((f) => ({ + id: f.id, + themeNode: cipherstashJsonbPathQueryFirst(f.preferences, '$.theme'), + })) + .build(); + expect(projection).toBeDefined(); + }); + + it('exposes cipherstashJsonbGet as a typed SELECT-expression helper', () => { + const projection = db.sql.users + .select((f) => ({ + id: f.id, + themeNode: cipherstashJsonbGet(f.preferences, 'theme'), + })) + .build(); + expect(projection).toBeDefined(); + }); +}); diff --git a/examples/prisma/test/e2e/mixed.e2e.test.ts b/examples/prisma/test/e2e/mixed.e2e.test.ts new file mode 100644 index 00000000..03c1411e --- /dev/null +++ b/examples/prisma/test/e2e/mixed.e2e.test.ts @@ -0,0 +1,213 @@ +/** + * End-to-end mixed-codec query against live Postgres + EQL bundle + * + ZeroKMS. + * + * Pins the cross-codec invariants: + * - A single query that touches multiple cipherstash columns of + * different types in WHERE + ORDER BY succeeds end-to-end. + * - Bulk-encrypt batches every search-term envelope into the + * minimum number of SDK round-trips — one `bulkEncrypt` per + * `(table, column)` group (also covered by the bulk-encrypt + * middleware unit tests in + * `packages/prisma-next/test/bulk-encrypt-middleware.test.ts`). + * + * Round-trip counts are observed by wrapping a fresh `CipherstashSdk` + * (built from `cipherstashFromStack({ contractJson }).encryptionClient`) + * with a counting decorator and threading the wrapped instance into a + * private `db` runtime. Concretely: + * + * - WHERE clause touches `email` (string) + `salary` (double) + + * `birthday` (date) + `emailVerified` (boolean) — four cipher + * columns, so **4 bulkEncrypt calls** for the search terms. + * - The query is a read so no row-write envelopes are encrypted. + * - The result rows carry encrypted values across six columns; a + * follow-up `decryptAll(rows)` produces **6 bulkDecrypt calls** + * (one per `(table, column)` group spanning the result set). + */ + +import { bulkEncryptMiddleware } from '@cipherstash/prisma-next/middleware'; +import { + cipherstashAsc, + createCipherstashRuntimeDescriptor, + decryptAll, + EncryptedBigInt, + EncryptedBoolean, + EncryptedDate, + EncryptedDouble, + EncryptedJson, + EncryptedString, +} from '@cipherstash/prisma-next/runtime'; +import { + cipherstashFromStack, + createCipherstashSdk, + deriveStackSchemas, +} from '@cipherstash/prisma-next/stack'; +import postgres from '@prisma-next/postgres/runtime'; +import { and } from '@prisma-next/sql-orm-client'; +import { afterAll, beforeAll, describe, expect, it } from 'vitest'; +import type { Contract } from '../../src/prisma/contract.d'; +import contractJson from '../../src/prisma/contract.json' with { type: 'json' }; +import { truncateUsers } from './harness'; + +const SEED = [ + { + id: 'e2e-mixed-0', + email: 'alice@example.com', + salary: 50_000, + birthday: new Date('1985-01-01'), + emailVerified: true, + }, + { + id: 'e2e-mixed-1', + email: 'bob@example.com', + salary: 110_000, + birthday: new Date('1990-06-15'), + emailVerified: true, + }, + { + id: 'e2e-mixed-2', + email: 'carol@example.com', + salary: 90_000, + birthday: new Date('1980-03-22'), + emailVerified: false, + }, + { + id: 'e2e-mixed-3', + email: 'dave@otherorg.test', + salary: 145_000, + birthday: new Date('1978-11-30'), + emailVerified: true, + }, +] as const; + +function seedRow(s: (typeof SEED)[number]) { + return { + id: s.id, + email: EncryptedString.from(s.email), + salary: EncryptedDouble.from(s.salary), + accountId: EncryptedBigInt.from(1_000_000n), + birthday: EncryptedDate.from(s.birthday), + emailVerified: EncryptedBoolean.from(s.emailVerified), + preferences: EncryptedJson.from({ marker: 'mixed' }), + }; +} + +/** + * Build a counting wrapper around a base SDK so we can observe + * `bulkEncrypt` / `bulkDecrypt` call counts independent of the + * harness's shared `db` instance. + */ +function wrapWithCounting(base: ReturnType<typeof createCipherstashSdk>) { + let bulkEncryptCalls = 0; + let bulkDecryptCalls = 0; + return { + sdk: { + ...base, + async bulkEncrypt(args: Parameters<typeof base.bulkEncrypt>[0]) { + bulkEncryptCalls += 1; + return base.bulkEncrypt(args); + }, + async bulkDecrypt(args: Parameters<typeof base.bulkDecrypt>[0]) { + bulkDecryptCalls += 1; + return base.bulkDecrypt(args); + }, + }, + counts: { + get bulkEncrypt() { + return bulkEncryptCalls; + }, + get bulkDecrypt() { + return bulkDecryptCalls; + }, + reset() { + bulkEncryptCalls = 0; + bulkDecryptCalls = 0; + }, + }, + }; +} + +describe('Mixed-codec e2e (live PG + EQL + ZeroKMS)', () => { + // Use a private `db` instance with a counting SDK so the round-trip + // assertions are insulated from any other test file that may have + // mutated the harness's shared client. + const url = + process.env['DATABASE_URL'] ?? + 'postgres://cipherstash:cipherstash@localhost:54329/cipherstash_e2e'; + let counting: ReturnType<typeof wrapWithCounting>; + let db: ReturnType<typeof postgres<Contract>>; + let runtime: { close(): Promise<void> } | undefined; + + beforeAll(async () => { + // Reuse the encryption client from `cipherstashFromStack` so the + // counting wrapper observes the same ZeroKMS workspace + schema + // surface the example app would in production. Re-derive the stack + // schemas from `contractJson` to satisfy `createCipherstashSdk`'s + // `(client, schemas)` contract. + const { encryptionClient } = await cipherstashFromStack({ contractJson }); + const schemas = deriveStackSchemas(contractJson); + const baseSdk = createCipherstashSdk(encryptionClient, schemas); + counting = wrapWithCounting(baseSdk); + db = postgres<Contract>({ + contractJson, + extensions: [createCipherstashRuntimeDescriptor({ sdk: counting.sdk })], + middleware: [bulkEncryptMiddleware(counting.sdk)], + }); + runtime = (await db.connect({ url })) as { close(): Promise<void> }; + await truncateUsers(); + await Promise.all(SEED.map((s) => db.orm.User.create(seedRow(s)))); + counting.counts.reset(); + }); + + afterAll(async () => { + if (runtime) { + await runtime.close(); + } + }); + + it('executes a four-column WHERE + ordered read end-to-end', async () => { + const rows = await db.orm.User.where((u) => + and( + u.email.cipherstashIlike('%@example.com'), + u.salary.cipherstashGt(75_000), + u.birthday.cipherstashLt(new Date('2000-01-01')), + u.emailVerified.cipherstashInArray([true]), + ), + ) + .orderBy((u) => cipherstashAsc(u.salary)) + .all(); + + // Only bob (e2e-mixed-1) survives all four predicates: alice's + // salary is below the 75k cutoff, carol is unverified, and + // dave's email `dave@otherorg.test` doesn't match `%@example.com`. + expect(rows.map((r) => r.id)).toEqual(['e2e-mixed-1']); + }); + + it('groups search-term encrypts: one bulkEncrypt per (table, column)', async () => { + counting.counts.reset(); + await db.orm.User.where((u) => + and( + u.email.cipherstashIlike('%@example.com'), + u.salary.cipherstashGt(75_000), + u.birthday.cipherstashLt(new Date('2000-01-01')), + u.emailVerified.cipherstashInArray([true]), + ), + ) + .orderBy((u) => cipherstashAsc(u.salary)) + .all(); + // Four distinct (users, <column>) groups in the WHERE — one + // `bulkEncrypt` round-trip per group. ORDER BY is a column ref + // (no envelope to encrypt). No row writes, so no additional + // bulk-encrypt calls beyond the search-term batches. + expect(counting.counts.bulkEncrypt).toBe(4); + }); + + it('groups result decrypts: one bulkDecrypt per (table, column)', async () => { + counting.counts.reset(); + const rows = await db.orm.User.all(); + await decryptAll(rows); + // Six encrypted columns × N rows ⇒ exactly 6 `bulkDecrypt` calls + // (one per `(users, <column>)` group). + expect(counting.counts.bulkDecrypt).toBe(6); + }); +}); diff --git a/examples/prisma/test/e2e/num.e2e.test.ts b/examples/prisma/test/e2e/num.e2e.test.ts new file mode 100644 index 00000000..ca80320c --- /dev/null +++ b/examples/prisma/test/e2e/num.e2e.test.ts @@ -0,0 +1,108 @@ +/** + * End-to-end round-trip for `EncryptedDouble` against live + * Postgres + EQL bundle + ZeroKMS. + * + * Pins: + * - INSERT + decrypt round-trip recovers the source numbers. + * - `cipherstashGt`, `cipherstashGte`, `cipherstashLt`, + * `cipherstashLte`, `cipherstashBetween` each filter rows + * correctly against the IEEE-754-encrypted column. + * - `cipherstashAsc` / `cipherstashDesc` produce numerically- + * sorted results via bare-column `ORDER BY` against the live + * EQL operator family. The cipherstash codec relies on the + * EQL bundle's overloads of `<` / `>` for `eql_v2_encrypted`, + * so an `ORDER BY <col>` clause sorts by the encrypted ORE + * value without requiring a wrapping helper. + * + * Seed: four rows with file-scoped ID prefix `e2e-num-`. The + * `beforeAll` truncates `users` first so the file's assertions + * count exact-match cardinalities (not "at-least-N"). + */ + +import { + cipherstashAsc, + cipherstashDesc, + decryptAll, + EncryptedBigInt, + EncryptedBoolean, + EncryptedDate, + EncryptedDouble, + EncryptedJson, + EncryptedString, +} from '@cipherstash/prisma-next/runtime'; +import { beforeAll, describe, expect, it } from 'vitest'; +import { db, ensureConnected, truncateUsers } from './harness'; + +const SEED = [ + { id: 'e2e-num-0', salary: 50_000 }, + { id: 'e2e-num-1', salary: 95_000 }, + { id: 'e2e-num-2', salary: 120_000 }, + { id: 'e2e-num-3', salary: 200_000 }, +] as const; + +function seedRow(s: (typeof SEED)[number]) { + return { + id: s.id, + email: EncryptedString.from(`${s.id}@example.com`), + salary: EncryptedDouble.from(s.salary), + accountId: EncryptedBigInt.from(1_000_000n), + birthday: EncryptedDate.from(new Date('1990-01-01')), + emailVerified: EncryptedBoolean.from(true), + preferences: EncryptedJson.from({ marker: 'num' }), + }; +} + +describe('EncryptedDouble e2e (live PG + EQL + ZeroKMS)', () => { + beforeAll(async () => { + await ensureConnected(); + await truncateUsers(); + await Promise.all(SEED.map((s) => db.orm.User.create(seedRow(s)))); + }); + + it('round-trips an EncryptedDouble through bulkEncrypt + bulkDecrypt', async () => { + const rows = await db.orm.User.all(); + expect(rows).toHaveLength(SEED.length); + await decryptAll(rows); + const byId = new Map(rows.map((r) => [r.id, r] as const)); + for (const s of SEED) { + const r = byId.get(s.id); + expect(r, `seed row ${s.id} present`).toBeDefined(); + expect(await r!.salary.decrypt()).toBe(s.salary); + } + }); + + it('cipherstashGt filters by encrypted IEEE-754 numeric order', async () => { + const rows = await db.orm.User.where((u) => u.salary.cipherstashGt(95_000)).all(); + expect(rows.map((r) => r.id).sort()).toEqual(['e2e-num-2', 'e2e-num-3']); + }); + + it('cipherstashGte includes the equality boundary', async () => { + const rows = await db.orm.User.where((u) => u.salary.cipherstashGte(95_000)).all(); + expect(rows.map((r) => r.id).sort()).toEqual(['e2e-num-1', 'e2e-num-2', 'e2e-num-3']); + }); + + it('cipherstashLt filters strict-less-than', async () => { + const rows = await db.orm.User.where((u) => u.salary.cipherstashLt(120_000)).all(); + expect(rows.map((r) => r.id).sort()).toEqual(['e2e-num-0', 'e2e-num-1']); + }); + + it('cipherstashLte includes the equality boundary', async () => { + const rows = await db.orm.User.where((u) => u.salary.cipherstashLte(120_000)).all(); + expect(rows.map((r) => r.id).sort()).toEqual(['e2e-num-0', 'e2e-num-1', 'e2e-num-2']); + }); + + it('cipherstashBetween bounds inclusively on both sides', async () => { + const rows = await db.orm.User.where((u) => u.salary.cipherstashBetween(95_000, 120_000)).all(); + expect(rows.map((r) => r.id).sort()).toEqual(['e2e-num-1', 'e2e-num-2']); + }); + + it('cipherstashAsc orders by numeric value via bare-column ORDER BY', async () => { + const rows = await db.orm.User.orderBy((u) => cipherstashAsc(u.salary)).all(); + expect(rows.map((r) => r.id)).toEqual(['e2e-num-0', 'e2e-num-1', 'e2e-num-2', 'e2e-num-3']); + }); + + it('cipherstashDesc reverses the ascending order', async () => { + const rows = await db.orm.User.orderBy((u) => cipherstashDesc(u.salary)).all(); + expect(rows.map((r) => r.id)).toEqual(['e2e-num-3', 'e2e-num-2', 'e2e-num-1', 'e2e-num-0']); + }); +}); diff --git a/examples/prisma/test/e2e/str-range.e2e.test.ts b/examples/prisma/test/e2e/str-range.e2e.test.ts new file mode 100644 index 00000000..56d78df5 --- /dev/null +++ b/examples/prisma/test/e2e/str-range.e2e.test.ts @@ -0,0 +1,88 @@ +/** + * End-to-end round-trip for `EncryptedString` authored with + * `orderAndRange: true` against live Postgres + EQL bundle + + * ZeroKMS. + * + * The example schema authors `email` with the default no-args + * constructor (`cipherstash.EncryptedString()`), which opts every + * flag (`equality`, `freeTextSearch`, `orderAndRange`) into `true`. + * Pins: + * - `cipherstashGt('m')` filters lexicographically. + * - `cipherstashAsc(u.email)` orders alphabetically. + * - `cipherstashIlike('%@example.com')` still works alongside the + * range queries (free-text-search trait coexists with + * order-and-range on the same column). + */ + +import { + cipherstashAsc, + cipherstashDesc, + decryptAll, + EncryptedBigInt, + EncryptedBoolean, + EncryptedDate, + EncryptedDouble, + EncryptedJson, + EncryptedString, +} from '@cipherstash/prisma-next/runtime'; +import { beforeAll, describe, expect, it } from 'vitest'; +import { db, ensureConnected, truncateUsers } from './harness'; + +const SEED = [ + { id: 'e2e-str-0', email: 'alice@example.com' }, + { id: 'e2e-str-1', email: 'bob@example.com' }, + { id: 'e2e-str-2', email: 'mallory@example.com' }, + { id: 'e2e-str-3', email: 'zoe@other.test' }, +] as const; + +function seedRow(s: (typeof SEED)[number]) { + return { + id: s.id, + email: EncryptedString.from(s.email), + salary: EncryptedDouble.from(50_000), + accountId: EncryptedBigInt.from(1_000_000n), + birthday: EncryptedDate.from(new Date('1990-01-01')), + emailVerified: EncryptedBoolean.from(true), + preferences: EncryptedJson.from({ marker: 'str-range' }), + }; +} + +describe('EncryptedString orderAndRange e2e (live PG + EQL + ZeroKMS)', () => { + beforeAll(async () => { + await ensureConnected(); + await truncateUsers(); + await Promise.all(SEED.map((s) => db.orm.User.create(seedRow(s)))); + }); + + it('round-trips an EncryptedString through bulkEncrypt + bulkDecrypt', async () => { + const rows = await db.orm.User.all(); + expect(rows).toHaveLength(SEED.length); + await decryptAll(rows); + const byId = new Map(rows.map((r) => [r.id, r] as const)); + for (const s of SEED) { + const r = byId.get(s.id); + expect(r, `seed row ${s.id} present`).toBeDefined(); + expect(r ? await r.email.decrypt() : undefined).toBe(s.email); + } + }); + + it('cipherstashGt filters lexicographically', async () => { + const rows = await db.orm.User.where((u) => u.email.cipherstashGt('m')).all(); + expect(rows.map((r) => r.id).sort()).toEqual(['e2e-str-2', 'e2e-str-3']); + }); + + it('cipherstashAsc orders alphabetically (bare-column ORDER BY on string)', async () => { + const rows = await db.orm.User.orderBy((u) => cipherstashAsc(u.email)).all(); + expect(rows.map((r) => r.id)).toEqual(['e2e-str-0', 'e2e-str-1', 'e2e-str-2', 'e2e-str-3']); + }); + + it('cipherstashDesc reverses the alphabetical order', async () => { + const rows = await db.orm.User.orderBy((u) => cipherstashDesc(u.email)).all(); + expect(rows.map((r) => r.id)).toEqual(['e2e-str-3', 'e2e-str-2', 'e2e-str-1', 'e2e-str-0']); + }); + + it('cipherstashIlike coexists with order-and-range on the same column', async () => { + const rows = await db.orm.User.where((u) => u.email.cipherstashIlike('%@example.com')).all(); + expect(rows.map((r) => r.id).sort()).toEqual(['e2e-str-0', 'e2e-str-1', 'e2e-str-2']); + }); +}); diff --git a/examples/prisma/test/e2e/vitest.config.ts b/examples/prisma/test/e2e/vitest.config.ts new file mode 100644 index 00000000..de8c341d --- /dev/null +++ b/examples/prisma/test/e2e/vitest.config.ts @@ -0,0 +1,22 @@ +import { defineConfig } from 'vitest/config'; + +export default defineConfig({ + test: { + include: ['test/e2e/**/*.e2e.test.ts'], + globalSetup: ['./test/e2e/global-setup.ts'], + environment: 'node', + pool: 'threads', + // Single worker, no isolation, no parallelism: every test file shares + // a single postgres client connection (via the harness's + // `ensureConnected` memo) and a single CipherStash SDK encryption + // client. Pre-emptive serialisation also keeps SDK rate-limits from + // surfacing under concurrent envelope encrypts across files. + maxWorkers: 1, + isolate: false, + fileParallelism: false, + // Live SDK round-trips + per-file connect + migration apply (the + // first run on a cold container) need the long fuse. + testTimeout: 60_000, + hookTimeout: 120_000, + }, +}); diff --git a/packages/prisma-next/DEVELOPING.md b/packages/prisma-next/DEVELOPING.md index 9fb45199..efaba11a 100644 --- a/packages/prisma-next/DEVELOPING.md +++ b/packages/prisma-next/DEVELOPING.md @@ -69,7 +69,7 @@ The package centres on a shared substrate that lets every cipherstash codec be o ### `EncryptedEnvelopeBase<T>` — shared envelope superclass -`packages/3-extensions/cipherstash/src/execution/envelope-base.ts` exports an abstract `EncryptedEnvelopeBase<T>` class that holds the `#`-prefixed `EncryptedHandle<T>` slot and ships the five redaction overrides (`toJSON`, `toString`, `valueOf`, `Symbol.toPrimitive`, `Symbol.for('nodejs.util.inspect.custom')`), `expose()`, `decrypt({ signal? })`, and the post-decrypt plaintext cache. +`src/execution/envelope-base.ts` exports an abstract `EncryptedEnvelopeBase<T>` class that holds the `#`-prefixed `EncryptedHandle<T>` slot and ships the five redaction overrides (`toJSON`, `toString`, `valueOf`, `Symbol.toPrimitive`, `Symbol.for('nodejs.util.inspect.custom')`), `expose()`, `decrypt({ signal? })`, and the post-decrypt plaintext cache. Each concrete subclass: @@ -260,11 +260,9 @@ The cross-package convention (source-level discipline + bundling-isolation test, ## Tracked follow-ups -| Linear ticket | Surface | -| --- | --- | -| [TML-2388](https://linear.app/prisma-company/issue/TML-2388) | Codec-SDK binding refactor — pull the per-tenant SDK binding out of the codec factory closure into the descriptor seam so multi-tenant deployments don't re-author the codec per tenant. | -| Polymorphic `CipherstashSdk.decrypt` return type | One-line interface widening from `Promise<string>` to `Promise<unknown>` to mirror the bulk shape; removes a narrowing cast in `EncryptedEnvelopeBase.decrypt`. | -| [TML-2504 — Cipherstash JSONB path-exists predicate: STE-VEC selector hashing](https://linear.app/prisma-company/issue/TML-2504) | `cipherstashJsonbPathExists` against the live EQL bundle expects a hashed STE-VEC selector computed via the CipherStash SDK's `selector(...)` API; the framework currently binds the JSONpath as a plain `pg/text@1` `ParamRef`. Round-trip and the two SELECT-expression helpers (`cipherstashJsonbPathQueryFirst`, `cipherstashJsonbGet`) work; the predicate clause returns zero rows. Resolution requires either a client-side path-hashing middleware or an EQL-side plaintext-path overload. | +- **Codec-SDK binding refactor.** Pull the per-tenant SDK binding out of the codec factory closure into the descriptor seam so multi-tenant deployments don't re-author the codec per tenant. +- **Polymorphic `CipherstashSdk.decrypt` return type.** One-line interface widening from `Promise<string>` to `Promise<unknown>` to mirror the bulk shape; removes a narrowing cast in `EncryptedEnvelopeBase.decrypt`. +- **`cipherstashJsonbPathExists` predicate against the live EQL bundle.** The bundle expects a hashed STE-VEC selector computed via the CipherStash SDK's `selector(...)` API; the framework currently binds the JSONpath as a plain `pg/text@1` `ParamRef`. The two SELECT-expression helpers (`cipherstashJsonbPathQueryFirst`, `cipherstashJsonbGet`) work correctly against the same column; the predicate clause returns zero rows. Resolution requires either a client-side path-hashing middleware or an EQL-side plaintext-path overload. ## Behavioural invariants pinned by tests @@ -331,11 +329,6 @@ The following user-facing behaviours are pinned by on-disk tests in `test/` (pac ## References -- [pgvector extension](../pgvector/README.md) — the structural precedent for codec, parameterized descriptor, and pack-meta layout. -- [ADR 202 — Codec trait system](../../../docs/architecture%20docs/adrs/ADR%20202%20-%20Codec%20trait%20system.md). -- [ADR 207 — Codec call context per-query AbortSignal and column metadata](../../../docs/architecture%20docs/adrs/ADR%20207%20-%20Codec%20call%20context%20per-query%20AbortSignal%20and%20column%20metadata.md). -- [ADR 208 — Higher-order codecs for parameterized types](../../../docs/architecture%20docs/adrs/ADR%20208%20-%20Higher-order%20codecs%20for%20parameterized%20types.md). -- [ADR 212 — Contract spaces](../../../docs/architecture%20docs/adrs/ADR%20212%20-%20Contract%20spaces.md). -- [ADR 213 — Codec lifecycle hooks](../../../docs/architecture%20docs/adrs/ADR%20213%20-%20Codec%20lifecycle%20hooks.md). -- [ADR 214 — Extension operator surface: namespaced replacement operators and the predicate/helper split](../../../docs/architecture%20docs/adrs/ADR%20214%20-%20Extension%20operator%20surface%20namespaced%20replacement%20operators.md). -- [ADR 215 — Runtime middleware lifecycle: `beforeExecute` fires before `encodeParams`](../../../docs/architecture%20docs/adrs/ADR%20215%20-%20Runtime%20middleware%20lifecycle%20beforeExecute%20before%20encodeParams.md). +- [Prisma Next encryption docs](https://cipherstash.com/docs/stack/cipherstash/encryption/prisma-next) — user-facing reference for the extension. +- [`@cipherstash/stack`](../stack/README.md) — encryption SDK and schema DSL this package adapts. +- [CipherStash EQL bundle](https://github.com/cipherstash/encrypt-query-language) — the SQL the baseline migration installs. diff --git a/packages/prisma-next/README.md b/packages/prisma-next/README.md index c3484b80..ba82820c 100644 --- a/packages/prisma-next/README.md +++ b/packages/prisma-next/README.md @@ -1,6 +1,6 @@ # @cipherstash/prisma-next -**Searchable field-level encryption for Postgres with [Prisma Next](https://www.npmjs.com/package/@prisma-next/cli)** — via the [EQL bundle](https://cipherstash.com/docs/stack/platform/eql). +**Searchable field-level encryption for Postgres with [Prisma Next](https://www.npmjs.com/package/prisma-next)** — powered by [`@cipherstash/stack`](../stack/README.md) and the [EQL bundle](https://cipherstash.com/docs/stack/platform/eql). Declare encrypted columns directly in `schema.prisma`, and the framework's migration system installs the EQL bundle in the same control-plane sweep that creates your tables. No separate "install EQL" step. @@ -8,8 +8,8 @@ Declare encrypted columns directly in `schema.prisma`, and the framework's migra ## Features -- 🔒 Six encrypted column types — string, double, bigint, date, boolean, JSON -- 🔍 Searchable encryption — equality, free-text search (ILIKE), range, order, JSON path +- 🔒 Six encrypted column types — `string`, `double`, `bigint`, `date`, `boolean`, `json` +- 🔍 Searchable encryption — equality, free-text search, range, order, JSON path and containment - 🎯 17 type-safe query operators (`cipherstashEq`, `cipherstashIlike`, `cipherstashGt`, `cipherstashAsc`, …) - ⚡ Bulk encrypt / bulk decrypt coalescing — one SDK round-trip per `(table, column)` group per query - 🧩 One-call setup via `cipherstashFromStack({ contractJson })` — no duplicate stack schema to maintain @@ -62,7 +62,7 @@ export const db = postgres<Contract>({ ``` ```bash -stash auth login # one-time, per developer +npx stash auth login # one-time, per developer npx prisma-next contract emit npx prisma-next migration plan --name initial npx prisma-next migration apply # installs EQL bundle + your schema @@ -102,9 +102,15 @@ See the [full documentation](https://cipherstash.com/docs/stack/cipherstash/encr ## Authentication -`stash auth login` runs a PKCE flow and caches credentials in your OS keychain — each developer ends up with their own identity for every encrypt / decrypt against the workspace. No `CS_*` env vars in local development. +There are 2 main ways to authenticate to CipherStash: -The four `CS_*` env vars (`CS_WORKSPACE_CRN`, `CS_CLIENT_ID`, `CS_CLIENT_KEY`, `CS_CLIENT_ACCESS_KEY`) are reserved for production deployments and CI runners. See the [authentication docs](https://cipherstash.com/docs/stack/cipherstash/encryption/prisma-next#authentication) for the full identity story. +### Local profile (Dev) + +`npx stash auth login` lets you log in via the browser and saves credentials in the CipherStash profile (`~/.cipherstash`). A key is automatically generated and granted access to the default keyset. + +### Env vars (Production) + +The four `CS_*` env vars (`CS_WORKSPACE_CRN`, `CS_CLIENT_ID`, `CS_CLIENT_KEY`, `CS_CLIENT_ACCESS_KEY`) are reserved for production deployments and CI runners. See the [authentication docs](https://cipherstash.com/docs/stack/encryption/prisma-next#authentication) for more information. ## Example @@ -119,7 +125,7 @@ See [`DEVELOPING.md`](./DEVELOPING.md) for the source layout, two-pass codec enc - 📖 [**Full docs**](https://cipherstash.com/docs/stack/cipherstash/encryption/prisma-next) — column types, operator reference, security model, known limitations. - [CipherStash EQL reference](https://cipherstash.com/docs/stack/platform/eql) — encrypted operator semantics and search-config index types. - [`@cipherstash/stack`](../stack/README.md) — encryption SDK and schema DSL. -- [Prisma Next CLI](https://www.npmjs.com/package/@prisma-next/cli) — the framework this extension plugs into. +- [Prisma Next](https://www.npmjs.com/package/prisma-next) — the framework this extension plugs into. ## License diff --git a/packages/prisma-next/package.json b/packages/prisma-next/package.json index b5db9073..57f9a858 100644 --- a/packages/prisma-next/package.json +++ b/packages/prisma-next/package.json @@ -2,7 +2,7 @@ "name": "@cipherstash/prisma-next", "version": "0.0.0", "license": "MIT", - "author": "CipherStash <hello@cipherstash.com>", + "author": "CipherStash <support@cipherstash.com>", "description": "CipherStash extension for Prisma Next: searchable application-layer field-level encryption for Postgres, with six encrypted column types, 17 query operators, bulk encrypt/decrypt middleware, and a baseline migration that installs the vendored EQL bundle SQL byte-for-byte.", "keywords": [ "encrypted", diff --git a/packages/prisma-next/src/execution/cell-codec-factory.ts b/packages/prisma-next/src/execution/cell-codec-factory.ts index b55cfb86..53526a67 100644 --- a/packages/prisma-next/src/execution/cell-codec-factory.ts +++ b/packages/prisma-next/src/execution/cell-codec-factory.ts @@ -182,7 +182,7 @@ export class CipherstashCellCodec<E extends EncryptedEnvelopeBase<unknown>> exte `cipherstash ${this.descriptor.codecId}: decode invoked on a metadata-only codec instance that has no SDK attached. ` + 'Build a runtime codec via the parameterized descriptors returned by `createParameterizedCodecDescriptors(sdk)`, ' + `or construct the codec directly through the matching \`create*Codec(sdk)\` factory (e.g. \`create${this.#typeName}Codec\`) ` + - 'exported from `@prisma-next/extension-cipherstash/runtime`.', + 'exported from `@cipherstash/prisma-next/runtime`.', { codecId: this.descriptor.codecId, reason: 'cipherstash-sdk-required', diff --git a/packages/prisma-next/src/execution/operators.ts b/packages/prisma-next/src/execution/operators.ts index 68427772..33e2e16f 100644 --- a/packages/prisma-next/src/execution/operators.ts +++ b/packages/prisma-next/src/execution/operators.ts @@ -3,10 +3,7 @@ * * `cipherstashEq` and `cipherstashIlike` lower to EQL's encrypted-aware * comparison functions (`eql_v2.eq`, `eql_v2.ilike`) on - * `cipherstash/string@1`-typed columns. The lowering shape mirrors the - * canonical templates in the reference Prisma integration at - * `reference/cipherstash/stack/packages/stack/src/prisma/core/ - * operation-templates.ts`: + * `cipherstash/string@1`-typed columns: * * eql_v2.eq(<self>, <encrypted-arg>) * eql_v2.ilike(<self>, <encrypted-arg>) diff --git a/packages/prisma-next/src/exports/migration.ts b/packages/prisma-next/src/exports/migration.ts index c982daad..62643b6d 100644 --- a/packages/prisma-next/src/exports/migration.ts +++ b/packages/prisma-next/src/exports/migration.ts @@ -7,7 +7,7 @@ * * ```ts * import { Migration, MigrationCLI, createTable } from '@prisma-next/target-postgres/migration'; - * import { cipherstashAddSearchConfig } from '@prisma-next/extension-cipherstash/migration'; + * import { cipherstashAddSearchConfig } from '@cipherstash/prisma-next/migration'; * * export default class M extends Migration { * override get operations() { @@ -28,9 +28,6 @@ * `@prisma-next/target-postgres/migration`. The codec lifecycle hook * for `Encrypted<string>` columns calls these factories automatically * when planning a contract diff. - * - * @see ADR 195 — Planner IR with two renderers. - * @see ADR 213 — Codec lifecycle hooks. */ export type { diff --git a/packages/prisma-next/src/exports/pack.ts b/packages/prisma-next/src/exports/pack.ts index 36080e0b..b36a91a5 100644 --- a/packages/prisma-next/src/exports/pack.ts +++ b/packages/prisma-next/src/exports/pack.ts @@ -6,8 +6,6 @@ * can enable the `cipherstash.*` PSL/TS namespace and the storage type * registration without pulling in any runtime code (envelope, SDK, * codec runtime, middleware). - * - * Mirrors `packages/3-extensions/pgvector/src/exports/pack.ts`. */ export { cipherstashPackMeta as default } from '../extension-metadata/descriptor-meta'; diff --git a/packages/prisma-next/test/bundling-isolation.test.ts b/packages/prisma-next/test/bundling-isolation.test.ts index 9698f644..674b0200 100644 --- a/packages/prisma-next/test/bundling-isolation.test.ts +++ b/packages/prisma-next/test/bundling-isolation.test.ts @@ -30,10 +30,10 @@ * set are disjoint, modulo the shared `constants-*.js` chunk * (pure literal constants — no SDK / codec / migration code). * - * The dist outputs are produced by `tsdown` from `src/exports/*.ts`. - * `@prisma-next/extension-cipherstash#test` is wired in the root - * `turbo.json` to depend on its own `build`, so the assertions below - * always read fresh dist output for the current source. + * The dist outputs are produced by `tsup` from `src/exports/*.ts`. + * The package's `turbo.json` declares `test` depends on its own + * `build`, so the assertions below always read fresh dist output for + * the current source. */ import { existsSync, readFileSync } from 'node:fs'; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 7a303074..2368c2dd 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -144,12 +144,18 @@ importers: '@types/node': specifier: ^22.15.12 version: 22.19.3 + pathe: + specifier: ^2.0.3 + version: 2.0.3 tsx: specifier: catalog:repo version: 4.19.3 typescript: specifier: catalog:repo version: 5.6.3 + vitest: + specifier: catalog:repo + version: 3.1.3(@types/node@22.19.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.19.3)(yaml@2.8.4) packages/bench: dependencies: From a5199a0f44d2368c7645422966ef18c543d48fe5 Mon Sep 17 00:00:00 2001 From: CJ Brewer <brewercalvinj@gmail.com> Date: Thu, 14 May 2026 08:35:05 -0600 Subject: [PATCH 4/4] fix: e2e primsa tests --- .github/workflows/prisma-next-e2e.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/prisma-next-e2e.yml b/.github/workflows/prisma-next-e2e.yml index 785240ad..ea778a82 100644 --- a/.github/workflows/prisma-next-e2e.yml +++ b/.github/workflows/prisma-next-e2e.yml @@ -83,8 +83,14 @@ jobs: echo "CS_CLIENT_KEY=${{ secrets.CS_CLIENT_KEY }}" >> ./examples/prisma/.env echo "CS_CLIENT_ACCESS_KEY=${{ secrets.CS_CLIENT_ACCESS_KEY }}" >> ./examples/prisma/.env + # Build via turbo so the `^build` dependency on + # `@cipherstash/stack` (which `@cipherstash/prisma-next` imports + # `/schema` from) is honoured. A bare + # `pnpm --filter @cipherstash/prisma-next build` bypasses the + # task graph and leaves the upstream dist/ empty, surfacing as + # `Cannot find module '@cipherstash/stack/schema'` from tsc. - name: Build @cipherstash/prisma-next - run: pnpm --filter @cipherstash/prisma-next build + run: pnpm exec turbo run build --filter @cipherstash/prisma-next - name: Emit example contract run: pnpm --filter @cipherstash/prisma-next-example emit