diff --git a/.env.acceptance.sample b/.env.acceptance.sample new file mode 100644 index 000000000..628cd3c14 --- /dev/null +++ b/.env.acceptance.sample @@ -0,0 +1,37 @@ +####################################### +# Acceptance Runner +####################################### +ACCEPTANCE_TARGET=local +ACCEPTANCE_PROFILE=smoke +ACCEPTANCE_BASE_URL=http://127.0.0.1:5000 +ACCEPTANCE_S3_ENDPOINT=http://127.0.0.1:5000/s3 +ACCEPTANCE_TUS_ENDPOINT=http://127.0.0.1:5000/upload/resumable +ACCEPTANCE_REGION=us-east-1 +ACCEPTANCE_RESOURCE_PREFIX=acc-local +# Matches the default dummy-data tenant; change this for non-default seeds or remote targets. +ACCEPTANCE_TENANT_ID=bjhaohmqunupljrqypxz +ACCEPTANCE_ALLOW_DESTRUCTIVE=false +ACCEPTANCE_ENABLE_ADMIN=false +ACCEPTANCE_ENABLE_CDN=false +ACCEPTANCE_ENABLE_RENDER=false +ACCEPTANCE_ENABLE_RLS_SETUP=false +ACCEPTANCE_ENABLE_VECTOR=false +ACCEPTANCE_ENABLE_ICEBERG=false +ACCEPTANCE_ENABLE_PATH_EDGES=false +ACCEPTANCE_ENABLE_WIRE=false +ACCEPTANCE_ENABLE_TUS=true +ACCEPTANCE_S3_FORCE_PATH_STYLE=true + +####################################### +# Acceptance Credentials +####################################### +ACCEPTANCE_ADMIN_API_KEY=apikey +ACCEPTANCE_AUTHENTICATED_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwic3ViIjoiMzE3ZWFkY2UtNjMxYS00NDI5LWEwYmItZjE5YTdhNTE3YjRhIiwiZW1haWwiOiJpbmlhbit0ZXN0MUBzdXBhYmFzZS5pbyIsImV4cCI6MTkzOTEwNzk4NSwiYXBwX21ldGFkYXRhIjp7InByb3ZpZGVyIjoiZW1haWwifSwidXNlcl9tZXRhZGF0YSI6e30sInJvbGUiOiJhdXRoZW50aWNhdGVkIn0.E-x3oYcHIjFCdUO1M3wKDl1Ln32mik0xdHT2PjrvN70 +ACCEPTANCE_ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoiYW5vbiIsImlhdCI6MTYxMzUzMTk4NSwiZXhwIjoxOTI5MTA3OTg1fQ.mqfi__KnQB4v6PkIjkhzfwWrYyF94MEbSC6LnuvVniE +ACCEPTANCE_SERVICE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoic2VydmljZV9yb2xlIiwiaWF0IjoxNjEzNTMxOTg1LCJleHAiOjE5MjkxMDc5ODV9.th84OKK0Iz8QchDyXZRrojmKSEZ-OuitQm_5DvLiSIc + +####################################### +# S3 Client Credentials +####################################### +ACCEPTANCE_S3_ACCESS_KEY_ID=b585f311d839730f8a980a3457be2787 +ACCEPTANCE_S3_SECRET_ACCESS_KEY=67d161a7a8a46a24a17a75b26e7724f11d56b8d49a119227c66b13b6595601fb diff --git a/.env.test.sample b/.env.test.sample index 09e57619c..6d6b26710 100644 --- a/.env.test.sample +++ b/.env.test.sample @@ -27,6 +27,11 @@ REQUEST_X_FORWARDED_HOST_REGEXP= VECTOR_ENABLED=true VECTOR_S3_BUCKETS=supa-test-local-dev ICEBERG_ENABLED=true +ICEBERG_WAREHOUSE=. +ICEBERG_CATALOG_URL=http://127.0.0.1:8181/v1 +ICEBERG_CATALOG_AUTH_TYPE=token +ICEBERG_CATALOG_AUTH_TOKEN=token +ICEBERG_S3_DELETE_ENABLED=true ICEBERG_BUCKET_DETECTION_MODE="BUCKET" OTEL_METRICS_ENABLED=false diff --git a/.github/workflows/acceptance.yml b/.github/workflows/acceptance.yml new file mode 100644 index 000000000..9b87f50ba --- /dev/null +++ b/.github/workflows/acceptance.yml @@ -0,0 +1,146 @@ +name: Acceptance + +on: + pull_request: + push: + branches: + - master + workflow_dispatch: + inputs: + profile: + description: Acceptance profile to run + required: true + default: smoke + type: choice + options: + - smoke + - core + - full + - wire + acceptance_environment: + description: Acceptance target to run + required: true + default: local + type: choice + options: + - local + - acceptance-remote + allow_destructive: + description: Allow destructive tests against remote targets + required: true + default: false + type: boolean + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-${{ inputs.acceptance_environment || 'local' }} + cancel-in-progress: true + +jobs: + acceptance_local: + name: Local + if: ${{ github.event_name != 'workflow_dispatch' || inputs.acceptance_environment == 'local' }} + runs-on: blacksmith-4vcpu-ubuntu-2404 + timeout-minutes: 35 + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + - uses: actions/cache@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 + with: + path: ~/.npm + key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} + restore-keys: | + ${{ runner.os }}-node- + - name: Set up Node and npm + uses: ./.github/actions/setup-node-npm + with: + node-version: "24" + - name: Install dependencies + run: npm ci + - name: Prepare env files + run: | + cp .env.sample .env + cp .env.test.sample .env.test + cp .env.acceptance.sample .env.acceptance + - name: Typecheck acceptance suite + run: npm run acceptance:typecheck + - name: Run local acceptance profile + env: + ACCEPTANCE_PROFILE: ${{ inputs.profile || 'smoke' }} + run: | + mkdir -p data coverage/acceptance + chmod -R 777 data + npm run acceptance -- --profile "${ACCEPTANCE_PROFILE}" + - name: Upload acceptance artifacts + if: ${{ always() }} + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 + with: + name: acceptance-local + path: coverage/acceptance + if-no-files-found: ignore + + acceptance_remote: + name: Remote + if: ${{ github.event_name == 'workflow_dispatch' && inputs.acceptance_environment != 'local' }} + runs-on: blacksmith-4vcpu-ubuntu-2404 + timeout-minutes: 20 + environment: ${{ inputs.acceptance_environment }} + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + - uses: actions/cache@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 + with: + path: ~/.npm + key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} + restore-keys: | + ${{ runner.os }}-node- + - name: Set up Node and npm + uses: ./.github/actions/setup-node-npm + with: + node-version: "24" + - name: Install dependencies + run: npm ci + - name: Typecheck acceptance suite + run: npm run acceptance:typecheck + - name: Run acceptance profile + env: + ACCEPTANCE_ALLOW_DESTRUCTIVE: ${{ inputs.allow_destructive }} + ACCEPTANCE_BASE_URL: ${{ vars.ACCEPTANCE_BASE_URL }} + ACCEPTANCE_ENABLE_ADMIN: ${{ vars.ACCEPTANCE_ENABLE_ADMIN }} + ACCEPTANCE_ENABLE_CDN: ${{ vars.ACCEPTANCE_ENABLE_CDN }} + ACCEPTANCE_ENABLE_ICEBERG: ${{ vars.ACCEPTANCE_ENABLE_ICEBERG }} + ACCEPTANCE_ENABLE_PATH_EDGES: ${{ vars.ACCEPTANCE_ENABLE_PATH_EDGES }} + ACCEPTANCE_ENABLE_RENDER: ${{ vars.ACCEPTANCE_ENABLE_RENDER }} + ACCEPTANCE_ENABLE_RLS_SETUP: ${{ vars.ACCEPTANCE_ENABLE_RLS_SETUP }} + ACCEPTANCE_ENABLE_TUS: ${{ vars.ACCEPTANCE_ENABLE_TUS }} + ACCEPTANCE_ENABLE_VECTOR: ${{ vars.ACCEPTANCE_ENABLE_VECTOR }} + ACCEPTANCE_ENABLE_WIRE: ${{ vars.ACCEPTANCE_ENABLE_WIRE }} + ACCEPTANCE_PROFILE: ${{ inputs.profile }} + ACCEPTANCE_REGION: ${{ vars.ACCEPTANCE_REGION || secrets.ACCEPTANCE_REGION }} + ACCEPTANCE_RESOURCE_PREFIX: ${{ vars.ACCEPTANCE_RESOURCE_PREFIX || secrets.ACCEPTANCE_RESOURCE_PREFIX }} + ACCEPTANCE_RLS_BUCKET: ${{ vars.ACCEPTANCE_RLS_BUCKET }} + ACCEPTANCE_RLS_READ_OBJECT: ${{ vars.ACCEPTANCE_RLS_READ_OBJECT }} + ACCEPTANCE_RLS_WRITE_PREFIX: ${{ vars.ACCEPTANCE_RLS_WRITE_PREFIX }} + ACCEPTANCE_S3_ENDPOINT: ${{ vars.ACCEPTANCE_S3_ENDPOINT }} + ACCEPTANCE_S3_FORCE_PATH_STYLE: ${{ vars.ACCEPTANCE_S3_FORCE_PATH_STYLE }} + ACCEPTANCE_TLS_REJECT_UNAUTHORIZED: ${{ vars.ACCEPTANCE_TLS_REJECT_UNAUTHORIZED }} + ACCEPTANCE_TENANT_ID: ${{ vars.ACCEPTANCE_TENANT_ID || secrets.ACCEPTANCE_TENANT_ID }} + ACCEPTANCE_TUS_ENDPOINT: ${{ vars.ACCEPTANCE_TUS_ENDPOINT }} + ACCEPTANCE_ADMIN_URL: ${{ vars.ACCEPTANCE_ADMIN_URL || secrets.ACCEPTANCE_ADMIN_URL }} + ACCEPTANCE_ADMIN_API_KEY: ${{ secrets.ACCEPTANCE_ADMIN_API_KEY }} + ACCEPTANCE_ANON_KEY: ${{ secrets.ACCEPTANCE_ANON_KEY }} + ACCEPTANCE_AUTHENTICATED_KEY: ${{ secrets.ACCEPTANCE_AUTHENTICATED_KEY }} + ACCEPTANCE_S3_ACCESS_KEY_ID: ${{ secrets.ACCEPTANCE_S3_ACCESS_KEY_ID }} + ACCEPTANCE_S3_SECRET_ACCESS_KEY: ${{ secrets.ACCEPTANCE_S3_SECRET_ACCESS_KEY }} + ACCEPTANCE_SERVICE_KEY: ${{ secrets.ACCEPTANCE_SERVICE_KEY }} + ACCEPTANCE_TARGET: remote + run: | + mkdir -p coverage/acceptance + npm run acceptance:run + - name: Upload acceptance artifacts + if: ${{ always() }} + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 + with: + name: acceptance-remote + path: coverage/acceptance + if-no-files-found: ignore diff --git a/acceptance/API_COVERAGE.md b/acceptance/API_COVERAGE.md new file mode 100644 index 000000000..8cd8803ca --- /dev/null +++ b/acceptance/API_COVERAGE.md @@ -0,0 +1,50 @@ +# Acceptance API Coverage + +The acceptance suite is black-box by design. Tests only use public HTTP, S3, TUS, and admin +surfaces so the same contracts can be run against the current TypeScript service or a future +Go/Rust rewrite. + +## Default PR Coverage + +These run in `smoke` / `core` profiles and are suitable for pull requests against local CI: + +| Area | Covered APIs / behavior | +| ---------------------- | --------------------------------------------------------------------------------------------------------------------------------- | +| Health | `/status`, `/version` | +| REST buckets | create, get, list/search, update, empty, delete | +| REST objects | upload, update, authenticated read/head/info, public read/head/info, delete | +| REST object operations | list-v1, list-v2, signed URL, batch signed URLs, signed upload URL, copy, move, bulk delete | +| S3 buckets | CreateBucket, HeadBucket, ListBuckets, GetBucketLocation, GetBucketVersioning, DeleteBucket | +| S3 objects | PutObject, HeadObject, GetObject, Range GetObject, CopyObject, DeleteObject, DeleteObjects | +| S3 listing | ListObjectsV2, ListObjects V1 with delimiter/common prefixes | +| S3 multipart | CreateMultipartUpload, UploadPart, ListParts, CompleteMultipartUpload, UploadPartCopy, AbortMultipartUpload, ListMultipartUploads | +| TUS | OPTIONS, POST create, HEAD offset, PATCH resume, DELETE termination, full upload through `tus-js-client`, signed TUS upload | + +## Wire Profile Coverage + +These run in the `wire` profile in addition to smoke coverage: + +| Area | Covered APIs / behavior | +| ---------- | ----------------------------------------------------------------------- | +| Wire/SigV4 | raw `aws-chunked` PutObject and UploadPart, trailer-signature rejection | + +## Opt-In Coverage + +These require target-specific capabilities and are off by default: + +| Capability | Enable with | Covered APIs / behavior | +| ---------- | --------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Admin | `ACCEPTANCE_ENABLE_ADMIN=true` plus admin URL/API key | admin status, API key protection, tenant reads, tenant migration state, metrics config, queue/migration validation, JWKS validation/status, orphan scan validation, S3 credential create/list/delete | +| CDN | `ACCEPTANCE_ENABLE_CDN=true` | `/cdn/:bucket/*` cache purge | +| Render | `ACCEPTANCE_ENABLE_RENDER=true` | public, authenticated, and signed image transformation routes | +| RLS | `ACCEPTANCE_ENABLE_RLS_SETUP=true` plus anon/authenticated keys and RLS resource config | authenticated allow and anon deny for read/write on configured policies | +| Path edges | `ACCEPTANCE_ENABLE_PATH_EDGES=true` | list-v2 preservation for object names with empty path segments, only on targets whose blob backend accepts those names | +| Vector | `ACCEPTANCE_ENABLE_VECTOR=true` | vector bucket, index, put/get/list/query/delete lifecycle | +| Iceberg | `ACCEPTANCE_ENABLE_ICEBERG=true` | analytics bucket, catalog config, namespace, table create/list/load/head/drop | + +## Intentionally Gated + +Remote runs should set `ACCEPTANCE_TARGET=remote`. Destructive tests are blocked on remote targets +unless `ACCEPTANCE_ALLOW_DESTRUCTIVE=true` is set. Admin, CDN, render, vector, Iceberg, and RLS +tests are capability-gated because they depend on tenant features or credentials that are not +universally available. diff --git a/acceptance/README.md b/acceptance/README.md new file mode 100644 index 000000000..efc4daae7 --- /dev/null +++ b/acceptance/README.md @@ -0,0 +1,101 @@ +# Acceptance Tests + +This suite verifies Supabase Storage as a black-box service. It is intentionally separate from +`src/test`: acceptance tests must talk to a running target through HTTP, S3, TUS, or admin +endpoints only. Do not import `src/app`, use `app.inject`, or call storage/database classes here. +See [`API_COVERAGE.md`](./API_COVERAGE.md) for the API/feature coverage inventory. + +## Profiles + +- `smoke` - fast target sanity plus REST and S3 lifecycle checks. +- `core` - smoke plus broader protocol behavior such as TUS and multipart. +- `wire` - smoke plus raw HTTP/SigV4 cases such as `aws-chunked` bodies and trailer negatives. +- `full` - all acceptance tests allowed by the configured target and capability gates. + +## Run Against An Existing Target + +The runner loads `.env.acceptance`. To use a different acceptance env file, set +`ACCEPTANCE_ENV_FILE`. + +```bash +ACCEPTANCE_BASE_URL=http://127.0.0.1:5000 \ +ACCEPTANCE_S3_ENDPOINT=http://127.0.0.1:5000/s3 \ +ACCEPTANCE_SERVICE_KEY="..." \ +ACCEPTANCE_S3_ACCESS_KEY_ID="..." \ +ACCEPTANCE_S3_SECRET_ACCESS_KEY="..." \ +npm run acceptance:run -- --profile smoke +``` + +For a hosted target with a path prefix, set `ACCEPTANCE_BASE_URL` to the public storage base, +for example `https://project.supabase.co/storage/v1`. Relative URLs returned by the API are +resolved against that base so prefix behavior is covered. Set `ACCEPTANCE_TARGET=remote` for +hosted targets; destructive tests then require `ACCEPTANCE_ALLOW_DESTRUCTIVE=true`. + +For local targets on a non-default port, set `ACCEPTANCE_BASE_URL` explicitly. + +## Managed Local Run + +```bash +cp .env.sample .env +cp .env.test.sample .env.test +cp .env.acceptance.sample .env.acceptance +npm run acceptance -- --profile smoke +``` + +This restarts local infra, seeds dummy data, starts the TypeScript server from `.env.test` plus +`.env`, waits for `/status`, runs the acceptance profile, and then stops the server. Set +`ACCEPTANCE_SKIP_INFRA=true` to reuse already-running local infra. + +For local backend variants, put server/runtime changes in `.env` or `.env.test`. Keep +`.env.acceptance` limited to acceptance runner inputs such as target URLs, client credentials, +capability gates, and resource naming. + +## GitHub Environments + +The workflow dispatch `acceptance_environment` input uses `local` for the managed local run. Any +other option is treated as a GitHub Environment name and is used to populate `.env.acceptance` at +runtime. Store non-secret values such as `ACCEPTANCE_BASE_URL`, `ACCEPTANCE_REGION`, and capability +flags as environment variables, and store credentials such as `ACCEPTANCE_SERVICE_KEY` and S3 +secrets as environment secrets. + +## Useful Configuration + +| Variable | Meaning | +| --------------------------------- | ---------------------------------------------------------------------------- | +| `ACCEPTANCE_BASE_URL` | REST base URL. Defaults to `http://127.0.0.1:5000`. | +| `ACCEPTANCE_S3_ENDPOINT` | S3 endpoint. Defaults to `$ACCEPTANCE_BASE_URL/s3`. | +| `ACCEPTANCE_TUS_ENDPOINT` | TUS endpoint. Defaults to `$ACCEPTANCE_BASE_URL/upload/resumable`. | +| `ACCEPTANCE_ADMIN_URL` | Admin API base URL for admin tests. | +| `ACCEPTANCE_SERVICE_KEY` | Service role JWT for REST tests. | +| `ACCEPTANCE_S3_ACCESS_KEY_ID` | S3 protocol access key. | +| `ACCEPTANCE_S3_SECRET_ACCESS_KEY` | S3 protocol secret. | +| `ACCEPTANCE_REGION` | S3 signing region. Defaults to `us-east-1`. | +| `ACCEPTANCE_RESOURCE_PREFIX` | Prefix for all resources created by this run. | +| `ACCEPTANCE_ENABLE_ADMIN` | Enables admin route tests. Requires admin URL and API key. | +| `ACCEPTANCE_ENABLE_CDN` | Enables CDN purge tests. Requires purge-cache support on the target tenant. | +| `ACCEPTANCE_ENABLE_RENDER` | Enables image transformation tests. | +| `ACCEPTANCE_ENABLE_RLS_SETUP` | Enables RLS tests; requires service, anon, authenticated keys and resources. | +| `ACCEPTANCE_ENABLE_VECTOR` | Enables vector bucket API tests. | +| `ACCEPTANCE_ENABLE_ICEBERG` | Enables Iceberg catalog API tests. | +| `ACCEPTANCE_ENABLE_PATH_EDGES` | Enables object-name edge tests that require a backend accepting `//` names. | +| `ACCEPTANCE_ENABLE_WIRE` | Enables wire-level tests outside the `wire` / `full` profiles. | +| `ACCEPTANCE_RLS_BUCKET` | Bucket used by opt-in RLS tests. Defaults to local dummy `bucket2`. | +| `ACCEPTANCE_RLS_READ_OBJECT` | Existing object readable by authenticated role and denied to anon. | +| `ACCEPTANCE_RLS_WRITE_PREFIX` | Prefix where authenticated role may upload and anon may not. | +| `ACCEPTANCE_ALLOW_DESTRUCTIVE` | Required for destructive tests when `ACCEPTANCE_TARGET=remote`. | + +## HTTPS And Wire Tests + +The `wire` profile includes smoke coverage and uses a raw SigV4 client for `aws-chunked` +payloads. To verify proxy/TLS behavior, point `ACCEPTANCE_S3_ENDPOINT` at an HTTPS URL, for +example: + +```bash +ACCEPTANCE_S3_ENDPOINT=https://storage.localhost/s3 \ +ACCEPTANCE_TLS_REJECT_UNAUTHORIZED=false \ +npm run acceptance:run -- --profile wire +``` + +`ACCEPTANCE_TLS_REJECT_UNAUTHORIZED=false` sets `NODE_TLS_REJECT_UNAUTHORIZED=0` in the runner +for local self-signed certificates. Do not use it for remote runs unless the target is explicitly +provisioned for that. diff --git a/acceptance/acceptance.vitest.config.ts b/acceptance/acceptance.vitest.config.ts new file mode 100644 index 000000000..9fe20b058 --- /dev/null +++ b/acceptance/acceptance.vitest.config.ts @@ -0,0 +1,24 @@ +import path from 'node:path' +import { fileURLToPath } from 'node:url' +import { defineConfig } from 'vitest/config' + +const rootDir = path.dirname(fileURLToPath(import.meta.url)) +const artifactDir = path.resolve(rootDir, '..', 'coverage', 'acceptance') + +export default defineConfig({ + test: { + environment: 'node', + fileParallelism: false, + globals: true, + hookTimeout: 60_000, + include: ['acceptance/specs/**/*.test.ts'], + outputFile: process.env.CI + ? { + json: path.join(artifactDir, 'results.json'), + junit: path.join(artifactDir, 'junit.xml'), + } + : undefined, + reporters: process.env.CI ? ['default', 'junit', 'json'] : ['default'], + testTimeout: 60_000, + }, +}) diff --git a/acceptance/scripts/run-managed-local.ts b/acceptance/scripts/run-managed-local.ts new file mode 100644 index 000000000..843e04c87 --- /dev/null +++ b/acceptance/scripts/run-managed-local.ts @@ -0,0 +1,248 @@ +import { type ChildProcess, spawn } from 'node:child_process' +import fs from 'node:fs' +import path from 'node:path' +import process from 'node:process' +import dotenv from 'dotenv' + +// Snapshot before loading .env.acceptance so server config only comes from .env.test/.env. +const inheritedEnv = { ...process.env } +loadAcceptanceEnvFile() + +const args = process.argv.slice(2) +const profile = readArg('profile') ?? acceptanceEnv('ACCEPTANCE_PROFILE') ?? 'smoke' +const serverEnv = loadServerEnvFiles(inheritedEnv) +const serverPort = serverEnv.SERVER_PORT || serverEnv.PORT || '5000' +const baseUrl = acceptanceEnv('ACCEPTANCE_BASE_URL') ?? `http://127.0.0.1:${serverPort}` +const acceptanceRunEnv = { + ...process.env, + ACCEPTANCE_BASE_URL: baseUrl, + ACCEPTANCE_PROFILE: profile, + ACCEPTANCE_S3_ENDPOINT: acceptanceEnv('ACCEPTANCE_S3_ENDPOINT') ?? `${baseUrl}/s3`, + ACCEPTANCE_TUS_ENDPOINT: + acceptanceEnv('ACCEPTANCE_TUS_ENDPOINT') ?? `${baseUrl}/upload/resumable`, +} + +let server: ChildProcess | undefined + +main().catch((error) => { + console.error(error) + process.exit(1) +}) + +async function main() { + try { + if (process.env.ACCEPTANCE_SKIP_INFRA !== 'true') { + await run('npm', ['run', 'infra:restart:ci'], serverEnv) + await run('npm', ['run', 'test:dummy-data'], serverEnv) + } + + server = spawn(localBin('tsx'), ['src/start/server.ts'], { + detached: process.platform !== 'win32', + env: serverEnv, + stdio: ['ignore', 'pipe', 'pipe'], + }) + prefixOutput(server.stdout, '[storage] ') + prefixOutput(server.stderr, '[storage] ') + + await waitForStatus(`${baseUrl}/status`, 60_000) + await run('npm', ['run', 'acceptance:run', '--', ...args], acceptanceRunEnv) + } finally { + if (server) { + await stopServer(server) + } + } +} + +function run(cmd: string, runArgs: string[], runEnv: NodeJS.ProcessEnv): Promise { + return new Promise((resolve, reject) => { + const child = spawn(command(cmd), runArgs, { + env: runEnv, + stdio: 'inherit', + }) + + child.on('exit', (code, signal) => { + if (code === 0) { + resolve() + } else if (signal) { + reject(new Error(`${cmd} ${runArgs.join(' ')} terminated with ${signal}`)) + } else { + reject(new Error(`${cmd} ${runArgs.join(' ')} exited with ${code}`)) + } + }) + child.on('error', reject) + }) +} + +async function waitForStatus(url: string, timeoutMs: number) { + const started = Date.now() + let lastError: unknown + + while (Date.now() - started < timeoutMs) { + let response: Response | undefined + + try { + response = await fetch(url) + if (response.status === 200) { + return + } + } catch (error) { + lastError = error + } finally { + await response?.body?.cancel() + } + + await new Promise((resolve) => setTimeout(resolve, 500)) + } + + throw new Error(`Timed out waiting for ${url}: ${String(lastError)}`) +} + +async function stopServer(child: ChildProcess) { + if (hasExited(child)) { + return + } + + if (process.platform === 'win32') { + const exitedAfterKill = waitForExit(child, 5_000) + child.kill() + + if (!(await exitedAfterKill)) { + process.stderr.write('[storage] server did not exit after kill\n') + } + return + } + + const exitedAfterTerminate = waitForExit(child, 5_000) + killProcessTree(child, 'SIGTERM') + + if (await exitedAfterTerminate) { + return + } + + process.stderr.write('[storage] server did not exit after SIGTERM; sending SIGKILL\n') + + const exitedAfterKill = waitForExit(child, 2_000) + killProcessTree(child, 'SIGKILL') + + if (!(await exitedAfterKill)) { + process.stderr.write('[storage] server did not exit after SIGKILL\n') + } +} + +function waitForExit(child: ChildProcess, timeoutMs: number): Promise { + if (hasExited(child)) { + return Promise.resolve(true) + } + + return new Promise((resolve) => { + let timer: NodeJS.Timeout + + function cleanup() { + clearTimeout(timer) + child.off('exit', onExit) + } + + function onExit() { + cleanup() + resolve(true) + } + + child.once('exit', onExit) + timer = setTimeout(() => { + cleanup() + resolve(hasExited(child)) + }, timeoutMs) + }) +} + +function hasExited(child: ChildProcess): boolean { + return child.exitCode !== null || child.signalCode !== null +} + +function killProcessTree(child: ChildProcess, signal: NodeJS.Signals) { + if (process.platform === 'win32' || !child.pid) { + child.kill(signal) + return + } + + try { + process.kill(-child.pid, signal) + } catch (error) { + if (!isNoSuchProcessError(error)) { + throw error + } + } +} + +function isNoSuchProcessError(error: unknown): boolean { + return typeof error === 'object' && error !== null && 'code' in error && error.code === 'ESRCH' +} + +function readArg(name: string) { + const flag = `--${name}` + const equalsPrefix = `${flag}=` + + for (let index = 0; index < args.length; index++) { + if (args[index] === flag) { + return args[index + 1] + } + if (args[index].startsWith(equalsPrefix)) { + return args[index].slice(equalsPrefix.length) + } + } + + return undefined +} + +function prefixOutput(stream: NodeJS.ReadableStream | null, prefix: string) { + stream?.setEncoding('utf8') + stream?.on('data', (chunk: string) => { + for (const line of chunk.split(/\r?\n/)) { + if (line) { + process.stderr.write(`${prefix}${line}\n`) + } + } + }) +} + +function command(cmd: string) { + return process.platform === 'win32' ? `${cmd}.cmd` : cmd +} + +function localBin(cmd: string) { + return path.resolve('node_modules', '.bin', command(cmd)) +} + +function acceptanceEnv(name: string): string | undefined { + const value = process.env[name] + return value === undefined || value === '' ? undefined : value +} + +function loadAcceptanceEnvFile() { + const acceptanceEnvPath = process.env.ACCEPTANCE_ENV_FILE ?? '.env.acceptance' + + dotenv.config({ path: path.resolve(acceptanceEnvPath), override: false }) +} + +function loadServerEnvFiles(baseEnv: NodeJS.ProcessEnv): NodeJS.ProcessEnv { + const env = { ...baseEnv } + + loadEnvFileInto(env, '.env.test') + loadEnvFileInto(env, '.env') + + return env +} + +function loadEnvFileInto(env: NodeJS.ProcessEnv, envPath: string) { + const resolvedPath = path.resolve(envPath) + if (!fs.existsSync(resolvedPath)) { + return + } + + const parsed = dotenv.parse(fs.readFileSync(resolvedPath)) + for (const [name, value] of Object.entries(parsed)) { + if (env[name] === undefined) { + env[name] = value + } + } +} diff --git a/acceptance/scripts/run.ts b/acceptance/scripts/run.ts new file mode 100644 index 000000000..f5e7dac86 --- /dev/null +++ b/acceptance/scripts/run.ts @@ -0,0 +1,84 @@ +import { spawn } from 'node:child_process' +import path from 'node:path' +import dotenv from 'dotenv' + +loadEnvFiles() + +const passthroughArgs: string[] = [] +const env = { ...process.env } + +for (let index = 2; index < process.argv.length; index++) { + const arg = process.argv[index] + const next = process.argv[index + 1] + + if (arg === '--profile' && next) { + env.ACCEPTANCE_PROFILE = next + index++ + continue + } + + if (arg.startsWith('--profile=')) { + env.ACCEPTANCE_PROFILE = arg.slice('--profile='.length) + continue + } + + if (arg === '--target' && next) { + env.ACCEPTANCE_TARGET = next + index++ + continue + } + + if (arg.startsWith('--target=')) { + env.ACCEPTANCE_TARGET = arg.slice('--target='.length) + continue + } + + if (arg === '--base-url' && next) { + env.ACCEPTANCE_BASE_URL = next + index++ + continue + } + + if (arg.startsWith('--base-url=')) { + env.ACCEPTANCE_BASE_URL = arg.slice('--base-url='.length) + continue + } + + passthroughArgs.push(arg) +} + +if (env.ACCEPTANCE_TLS_REJECT_UNAUTHORIZED === 'false') { + env.NODE_TLS_REJECT_UNAUTHORIZED = '0' +} + +const child = spawn( + localBin('vitest'), + ['run', '--config', 'acceptance/acceptance.vitest.config.ts', ...passthroughArgs], + { + env, + stdio: 'inherit', + } +) + +child.on('exit', (code, signal) => { + if (signal) { + process.kill(process.pid, signal) + return + } + + process.exit(code ?? 1) +}) + +function loadEnvFiles() { + const acceptanceEnvPath = process.env.ACCEPTANCE_ENV_FILE ?? '.env.acceptance' + + dotenv.config({ path: path.resolve(acceptanceEnvPath), override: false }) +} + +function command(cmd: string) { + return process.platform === 'win32' ? `${cmd}.cmd` : cmd +} + +function localBin(cmd: string) { + return path.resolve('node_modules', '.bin', command(cmd)) +} diff --git a/acceptance/specs/admin-extended.test.ts b/acceptance/specs/admin-extended.test.ts new file mode 100644 index 000000000..41d14c786 --- /dev/null +++ b/acceptance/specs/admin-extended.test.ts @@ -0,0 +1,207 @@ +import { describeAcceptance, getAcceptanceConfig, requireConfigValue } from '../support/config' +import { AcceptanceHttpClient, createAdminClient } from '../support/http' + +interface TenantSummary { + id: string +} + +interface MetricsConfigResponse { + metrics: Array<{ + enabled: boolean + name: string + }> +} + +interface TenantDetailResponse { + capabilities?: Record + features?: Record +} + +interface S3CredentialResponse { + access_key: string + description: string + id: string + secret_key: string +} + +interface S3CredentialListItem { + description?: string + id: string +} + +describeAcceptance( + 'extended admin API contract', + { + destructive: true, + profiles: ['full'], + requires: ['admin'], + }, + () => { + it('covers tenant reads, migration and queue validation, metrics config, JWKS validation, orphan validation, and S3 credential CRUD', async () => { + const config = getAcceptanceConfig() + const client = createAdminClient() + const headers = { + apikey: requireConfigValue(config.adminApiKey, 'ACCEPTANCE_ADMIN_API_KEY'), + } + const tenantId = await resolveTenantId(client, headers) + const credentialDescription = `acceptance-${config.runId}` + const credentialIds = new Set() + + try { + const tenant = await client.request('GET', `/tenants/${tenantId}`, { + expectedStatus: 200, + headers, + }) + expect(tenant.json?.features).toBeTruthy() + expect(tenant.json?.capabilities).toBeTruthy() + + const migrations = await client.request('GET', `/tenants/${tenantId}/migrations`, { + expectedStatus: 200, + headers, + }) + expect(migrations.json).toBeTruthy() + + const metrics = await client.request('GET', '/metrics/config', { + expectedStatus: 200, + headers, + }) + expect(Array.isArray(metrics.json?.metrics)).toBe(true) + + await client.request('GET', '/migrations/failed?cursor=not-a-number', { + expectedStatus: 400, + headers, + }) + + await client.request('POST', '/migrations/reset/fleet', { + body: { + untilMigration: 'not-a-migration', + }, + expectedStatus: 400, + headers, + }) + + await client.request('POST', '/queue/move', { + body: {}, + expectedStatus: 400, + headers, + }) + + await client.request('POST', `/tenants/${tenantId}/jwks`, { + body: { + jwk: { + kty: 'unsupported', + }, + kind: 'acceptance_invalid', + }, + expectedStatus: 400, + headers, + }) + + const jwksStatus = await client.request('GET', '/tenants/jwks/generate-all-missing', { + expectedStatus: 200, + headers, + }) + expect(jwksStatus.json).toBeTruthy() + + await client.request( + 'GET', + `/tenants/${tenantId}/buckets/acceptance-nonexistent/orphan-objects?before=not-a-date`, + { + expectedStatus: 400, + headers, + } + ) + + const credential = await client.request( + 'POST', + `/s3/${tenantId}/credentials`, + { + body: { + claims: { + role: 'service_role', + sub: `acceptance-${config.runId}`, + }, + description: credentialDescription, + }, + expectedStatus: 201, + headers, + } + ) + const credentialId = credential.json?.id + expect(credential.json?.access_key).toBeTruthy() + expect(credential.json?.secret_key).toBeTruthy() + expect(credentialId).toBeTruthy() + if (credentialId) { + credentialIds.add(credentialId) + } + + const credentials = await client.request( + 'GET', + `/s3/${tenantId}/credentials`, + { + expectedStatus: 200, + headers, + } + ) + expect(credentials.json?.map((item) => item.id)).toContain(credentialId) + } finally { + await cleanupS3Credentials(client, tenantId, headers, credentialIds, credentialDescription) + } + }) + } +) + +async function resolveTenantId( + client: AcceptanceHttpClient, + headers: Record +): Promise { + const configuredTenantId = getAcceptanceConfig().tenantId + if (configuredTenantId) { + return configuredTenantId + } + + const tenants = await client.request('GET', '/tenants', { + expectedStatus: 200, + headers, + }) + const tenantId = tenants.json?.[0]?.id + if (!tenantId) { + throw new Error('Admin acceptance tests require ACCEPTANCE_TENANT_ID or at least one tenant') + } + + return tenantId +} + +async function cleanupS3Credentials( + client: AcceptanceHttpClient, + tenantId: string, + headers: Record, + credentialIds: Set, + credentialDescription: string +) { + await client + .request('GET', `/s3/${tenantId}/credentials`, { + expectedStatus: 200, + headers, + }) + .then((credentials) => { + for (const credential of credentials.json ?? []) { + if (credential.description === credentialDescription) { + credentialIds.add(credential.id) + } + } + }) + .catch(() => undefined) + + for (const credentialId of credentialIds) { + await client + .request('DELETE', `/s3/${tenantId}/credentials`, { + body: { + id: credentialId, + }, + expectedStatus: [204, 400, 404], + headers, + }) + .catch(() => undefined) + } +} diff --git a/acceptance/specs/cdn-render.test.ts b/acceptance/specs/cdn-render.test.ts new file mode 100644 index 000000000..1c80d7021 --- /dev/null +++ b/acceptance/specs/cdn-render.test.ts @@ -0,0 +1,158 @@ +import { + describeAcceptance, + encodePathSegments, + getAcceptanceConfig, + joinUrl, +} from '../support/config' +import { createRestClient } from '../support/http' +import { + cleanupRestResources, + createRestBucket, + requireServiceKey, + uniqueBucketName, + uniqueObjectKey, + uploadRestObject, +} from '../support/resources' + +interface SignedUrlResponse { + signedURL: string +} + +interface CdnPurgeResponse { + message: string + statusCode: string +} + +const onePixelPng = new Uint8Array( + Buffer.from( + 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/x8AAwMCAO+/p94AAAAASUVORK5CYII=', + 'base64' + ) +) + +describeAcceptance( + 'CDN cache contract', + { + destructive: true, + profiles: ['full'], + requires: ['cdn'], + }, + () => { + it('purges an object cache entry', async () => { + const client = createRestClient() + const bucketName = uniqueBucketName('cdn') + const objectKey = uniqueObjectKey('cdn') + + try { + await createRestBucket(bucketName, { isPublic: true }) + await uploadRestObject(bucketName, objectKey, 'cdn-purge-target') + + const purge = await client.request( + 'DELETE', + `/cdn/${bucketName}/${encodePathSegments(objectKey)}`, + { + expectedStatus: 200, + token: requireServiceKey(), + } + ) + expect(purge.json).toMatchObject({ + message: 'success', + statusCode: '200', + }) + } finally { + await cleanupRestResources(bucketName, [objectKey], client) + } + }) + } +) + +describeAcceptance( + 'image rendering contract', + { + destructive: true, + profiles: ['full'], + requires: ['render'], + }, + () => { + it('renders public, authenticated, and signed transformed images', async () => { + const config = getAcceptanceConfig() + const client = createRestClient() + const token = requireServiceKey(config) + const bucketName = uniqueBucketName('render') + const objectKey = uniqueObjectKey('render', 'png') + + try { + await createRestBucket(bucketName, { isPublic: true }) + await uploadRestObject(bucketName, objectKey, onePixelPng, { + contentType: 'image/png', + }) + + await expectRenderedImage( + joinUrl( + config.baseUrl, + `/render/image/public/${bucketName}/${encodePathSegments(objectKey)}?width=1&height=1` + ) + ) + + await expectRenderedImage( + joinUrl( + config.baseUrl, + `/render/image/authenticated/${bucketName}/${encodePathSegments(objectKey)}?width=1&height=1` + ), + token + ) + + const signed = await client.request( + 'POST', + `/object/sign/${bucketName}/${encodePathSegments(objectKey)}`, + { + body: { + expiresIn: 60, + transform: { + height: 1, + width: 1, + }, + }, + expectedStatus: 200, + token, + } + ) + const signedUrl = new URL(joinUrl(config.baseUrl, signed.json?.signedURL ?? '')) + const signedToken = signedUrl.searchParams.get('token') + expect(signedToken).toBeTruthy() + + await expectRenderedImage( + joinUrl( + config.baseUrl, + `/render/image/sign/${bucketName}/${encodePathSegments(objectKey)}?token=${encodeURIComponent( + signedToken ?? '' + )}` + ) + ) + } finally { + await cleanupRestResources(bucketName, [objectKey], client) + } + }) + } +) + +async function expectRenderedImage(url: string, token?: string) { + let response: Response | undefined + try { + response = await fetch(url, { + headers: token + ? { + authorization: `Bearer ${token}`, + } + : undefined, + }) + + expect(response.status).toBe(200) + expect(response.headers.get('content-type')).toMatch(/^image\//) + expect((await response.arrayBuffer()).byteLength).toBeGreaterThan(0) + } finally { + if (response && !response.bodyUsed) { + await response.body?.cancel() + } + } +} diff --git a/acceptance/specs/health.test.ts b/acceptance/specs/health.test.ts new file mode 100644 index 000000000..387fe803c --- /dev/null +++ b/acceptance/specs/health.test.ts @@ -0,0 +1,42 @@ +import { describeAcceptance, getAcceptanceConfig, requireConfigValue } from '../support/config' +import { createAdminClient, createRestClient } from '../support/http' + +describeAcceptance( + 'target health', + { + profiles: ['smoke'], + }, + () => { + it('serves status and version endpoints', async () => { + const client = createRestClient() + + await client.request('GET', '/status', { expectedStatus: 200 }) + const version = await client.request('GET', '/version', { expectedStatus: 200 }) + + expect(version.body.trim().length).toBeGreaterThan(0) + }) + } +) + +describeAcceptance( + 'admin target health', + { + profiles: ['smoke', 'core', 'full'], + requires: ['admin'], + }, + () => { + it('serves admin status and protects admin routes with an API key', async () => { + const config = getAcceptanceConfig() + const client = createAdminClient() + + await client.request('GET', '/status', { expectedStatus: 200 }) + await client.request('GET', '/tenants', { expectedStatus: 401 }) + await client.request('GET', '/tenants', { + expectedStatus: 200, + headers: { + apikey: requireConfigValue(config.adminApiKey, 'ACCEPTANCE_ADMIN_API_KEY'), + }, + }) + }) + } +) diff --git a/acceptance/specs/iceberg.test.ts b/acceptance/specs/iceberg.test.ts new file mode 100644 index 000000000..31da7ce1b --- /dev/null +++ b/acceptance/specs/iceberg.test.ts @@ -0,0 +1,230 @@ +import { randomUUID } from 'node:crypto' +import { describeAcceptance, getAcceptanceConfig } from '../support/config' +import { createRestClient } from '../support/http' +import { requireServiceKey, uniqueBucketName } from '../support/resources' + +interface IcebergBucket { + id: string + name: string +} + +interface IcebergNamespaceList { + namespaces?: string[][] +} + +interface IcebergTableList { + identifiers?: Array<{ + name: string + namespace: string[] + }> +} + +interface IcebergCatalogConfig { + defaults?: { + prefix?: string + } + overrides?: { + prefix?: string + } +} + +interface IcebergNamespaceResponse { + namespace?: string[] + properties?: Record +} + +interface IcebergTableResponse { + metadata?: Record + 'metadata-location'?: string +} + +describeAcceptance( + 'Iceberg catalog contract', + { + destructive: true, + profiles: ['full'], + requires: ['iceberg'], + }, + () => { + it('covers analytics buckets, catalog config, namespaces, and table lifecycle', async () => { + const client = createRestClient() + const token = requireServiceKey(getAcceptanceConfig()) + const bucketName = uniqueBucketName('ice') + const suffix = randomUUID().replace(/-/g, '').slice(0, 12) + const namespaceName = `ns${suffix}` + const tableName = `tbl${suffix}` + let namespaceCreated = false + let tableCreated = false + + try { + const createdBucket = await client.request('POST', '/iceberg/bucket', { + body: { + name: bucketName, + }, + expectedStatus: 200, + token, + }) + expect(createdBucket.json?.name).toBe(bucketName) + + const buckets = await client.request( + 'GET', + `/iceberg/bucket?search=${bucketName}`, + { + expectedStatus: 200, + token, + } + ) + expect(buckets.json?.map((bucket) => bucket.name)).toContain(bucketName) + + const catalogConfig = await client.request( + 'GET', + `/iceberg/v1/config?warehouse=${bucketName}`, + { + expectedStatus: 200, + token, + } + ) + expect(catalogConfig.json?.defaults?.prefix).toBe(bucketName) + expect(catalogConfig.json?.overrides?.prefix).toBe(bucketName) + + const createdNamespace = await client.request( + 'POST', + `/iceberg/v1/${bucketName}/namespaces`, + { + body: { + namespace: namespaceName, + properties: { + purpose: 'acceptance', + }, + }, + expectedStatus: 200, + token, + } + ) + namespaceCreated = true + expect(createdNamespace.json?.namespace).toEqual([namespaceName]) + + await client.request('HEAD', `/iceberg/v1/${bucketName}/namespaces/${namespaceName}`, { + expectedStatus: 204, + token, + }) + + const namespace = await client.request( + 'GET', + `/iceberg/v1/${bucketName}/namespaces/${namespaceName}`, + { + expectedStatus: 200, + token, + } + ) + expect(namespace.json?.namespace).toEqual([namespaceName]) + + const namespaces = await client.request( + 'GET', + `/iceberg/v1/${bucketName}/namespaces`, + { + expectedStatus: 200, + token, + } + ) + expect(namespaces.json?.namespaces?.flat()).toContain(namespaceName) + + const createdTable = await client.request( + 'POST', + `/iceberg/v1/${bucketName}/namespaces/${namespaceName}/tables`, + { + body: { + name: tableName, + properties: { + purpose: 'acceptance', + }, + schema: { + fields: [ + { + id: 1, + name: 'id', + required: false, + type: 'long', + }, + { + id: 2, + name: 'name', + required: false, + type: 'string', + }, + ], + 'identifier-field-ids': [], + 'schema-id': 0, + type: 'struct', + }, + spec: { + fields: [], + 'spec-id': 0, + }, + 'stage-create': false, + 'write-order': { + fields: [], + 'order-id': 0, + }, + }, + expectedStatus: 200, + token, + } + ) + tableCreated = true + expect(createdTable.json?.metadata).toBeTruthy() + expect(createdTable.json?.['metadata-location']).toBeTruthy() + + const tables = await client.request( + 'GET', + `/iceberg/v1/${bucketName}/namespaces/${namespaceName}/tables`, + { + expectedStatus: 200, + token, + } + ) + expect(tables.json?.identifiers?.map((table) => table.name)).toContain(tableName) + + const table = await client.request( + 'GET', + `/iceberg/v1/${bucketName}/namespaces/${namespaceName}/tables/${tableName}`, + { + expectedStatus: 200, + token, + } + ) + expect(table.json?.metadata).toBeTruthy() + expect(table.json?.['metadata-location']).toBeTruthy() + } finally { + if (tableCreated) { + await client + .request( + 'DELETE', + `/iceberg/v1/${bucketName}/namespaces/${namespaceName}/tables/${tableName}`, + { + expectedStatus: [204, 400, 404], + token, + } + ) + .catch(() => undefined) + } + + if (namespaceCreated) { + await client + .request('DELETE', `/iceberg/v1/${bucketName}/namespaces/${namespaceName}`, { + expectedStatus: [204, 400, 404], + token, + }) + .catch(() => undefined) + } + + await client + .request('DELETE', `/iceberg/bucket/${bucketName}`, { + expectedStatus: [200, 400, 404], + token, + }) + .catch(() => undefined) + } + }) + } +) diff --git a/acceptance/specs/rest-extended.test.ts b/acceptance/specs/rest-extended.test.ts new file mode 100644 index 000000000..394af9d8c --- /dev/null +++ b/acceptance/specs/rest-extended.test.ts @@ -0,0 +1,287 @@ +import { + describeAcceptance, + encodePathSegments, + getAcceptanceConfig, + joinUrl, +} from '../support/config' +import { createRestClient } from '../support/http' +import { + cleanupRestResources, + createRestBucket, + requireServiceKey, + uniqueBucketName, + uniqueObjectKey, + uploadRestObject, +} from '../support/resources' + +interface BucketResponse { + allowed_mime_types?: string[] | null + file_size_limit?: number | string | null + id: string + name: string + public: boolean +} + +interface ObjectInfoResponse { + bucket_id?: string + name?: string +} + +interface SignedUrlBatchItem { + error: string | null + path: string + signedURL: string | null +} + +interface SignedUploadResponse { + token: string + url: string +} + +interface ListObjectsV1Item { + name: string +} + +describeAcceptance( + 'extended REST bucket and object contract', + { + destructive: true, + profiles: ['core'], + }, + () => { + it('empties and deletes an empty bucket', async () => { + const client = createRestClient() + const token = requireServiceKey() + const bucketName = uniqueBucketName('empty') + + try { + await createRestBucket(bucketName) + await client.request('POST', `/bucket/${bucketName}/empty`, { + expectedStatus: 200, + token, + }) + await client.request('DELETE', `/bucket/${bucketName}`, { + expectedStatus: 200, + token, + }) + } finally { + await cleanupRestResources(bucketName, [], client) + } + }) + + it('covers bucket metadata, public/info reads, signed upload, copy, move, update, bulk delete, and list-v1', async () => { + const config = getAcceptanceConfig() + const client = createRestClient() + const token = requireServiceKey(config) + const bucketName = uniqueBucketName('restx') + const objectKey = uniqueObjectKey('restx') + const signedUploadKey = uniqueObjectKey('signed-upload') + const copyKey = uniqueObjectKey('copy') + const movedKey = uniqueObjectKey('moved') + const payload = `acceptance-rest-extended-${config.runId}` + const updatedPayload = `${payload}-updated` + const signedPayload = `${payload}-signed` + const trackedKeys = [objectKey, signedUploadKey, copyKey, movedKey] + + try { + await createRestBucket(bucketName, { isPublic: false }) + + const bucket = await client.request('GET', `/bucket/${bucketName}`, { + expectedStatus: 200, + token, + }) + expect(bucket.json?.id).toBe(bucketName) + expect(bucket.json?.public).toBe(false) + + const listedBuckets = await client.request( + 'GET', + `/bucket?search=${bucketName}`, + { + expectedStatus: 200, + token, + } + ) + expect(listedBuckets.json?.map((listed) => listed.id)).toContain(bucketName) + + await client.request('PUT', `/bucket/${bucketName}`, { + body: { + allowed_mime_types: ['text/plain'], + file_size_limit: 1_000_000, + public: true, + }, + expectedStatus: 200, + token, + }) + + await uploadRestObject(bucketName, objectKey, payload) + + const publicRead = await client.request( + 'GET', + `/object/public/${bucketName}/${encodePathSegments(objectKey)}`, + { expectedStatus: 200 } + ) + expect(publicRead.body).toBe(payload) + + const publicHead = await client.request( + 'HEAD', + `/object/public/${bucketName}/${encodePathSegments(objectKey)}`, + { expectedStatus: 200 } + ) + expect(Number(publicHead.headers.get('content-length'))).toBe(payload.length) + + const publicInfo = await client.request( + 'GET', + `/object/info/public/${bucketName}/${encodePathSegments(objectKey)}`, + { expectedStatus: 200 } + ) + expect(publicInfo.json?.name).toBe(objectKey) + + const authInfo = await client.request( + 'GET', + `/object/info/authenticated/${bucketName}/${encodePathSegments(objectKey)}`, + { expectedStatus: 200, token } + ) + expect(authInfo.json?.bucket_id).toBe(bucketName) + + await client.request('HEAD', `/object/${bucketName}/${encodePathSegments(objectKey)}`, { + expectedStatus: 200, + token, + }) + + const objectInfo = await client.request( + 'GET', + `/object/info/${bucketName}/${encodePathSegments(objectKey)}`, + { expectedStatus: 200, token } + ) + expect(objectInfo.json?.name).toBe(objectKey) + + const legacyList = await client.request( + 'POST', + `/object/list/${bucketName}`, + { + body: { + limit: 100, + prefix: `${config.resourcePrefix}/`, + sortBy: { + column: 'name', + order: 'asc', + }, + }, + expectedStatus: 200, + token, + } + ) + expect(legacyList.json?.map((object) => object.name)).toContain( + objectKey.replace(`${config.resourcePrefix}/`, '') + ) + + const batchSigned = await client.request( + 'POST', + `/object/sign/${bucketName}`, + { + body: { + expiresIn: 60, + paths: [objectKey, `${objectKey}.missing`], + }, + expectedStatus: 200, + token, + } + ) + expect(batchSigned.json?.find((item) => item.path === objectKey)?.signedURL).toBeTruthy() + + const signedUpload = await client.request( + 'POST', + `/object/upload/sign/${bucketName}/${encodePathSegments(signedUploadKey)}`, + { + expectedStatus: 200, + headers: { + 'content-type': 'text/plain', + 'x-upsert': 'true', + }, + token, + } + ) + expect(signedUpload.json?.token).toBeTruthy() + + let signedUploadResponse: Response | undefined + try { + signedUploadResponse = await fetch( + joinUrl(config.baseUrl, signedUpload.json?.url ?? ''), + { + body: signedPayload, + headers: { + 'content-type': 'text/plain', + }, + method: 'PUT', + } + ) + expect(signedUploadResponse.status).toBe(200) + } finally { + await signedUploadResponse?.body?.cancel() + } + + await client.request('PUT', `/object/${bucketName}/${encodePathSegments(objectKey)}`, { + body: updatedPayload, + expectedStatus: 200, + headers: { + 'content-type': 'text/plain', + }, + token, + }) + + const updated = await client.request( + 'GET', + `/object/authenticated/${bucketName}/${encodePathSegments(objectKey)}`, + { expectedStatus: 200, token } + ) + expect(updated.body).toBe(updatedPayload) + + await client.request('POST', '/object/copy', { + body: { + bucketId: bucketName, + destinationKey: copyKey, + sourceKey: objectKey, + }, + expectedStatus: 200, + headers: { + 'x-upsert': 'true', + }, + token, + }) + + await client.request('POST', '/object/move', { + body: { + bucketId: bucketName, + destinationKey: movedKey, + sourceKey: copyKey, + }, + expectedStatus: 200, + token, + }) + + const moved = await client.request( + 'GET', + `/object/authenticated/${bucketName}/${encodePathSegments(movedKey)}`, + { expectedStatus: 200, token } + ) + expect(moved.body).toBe(updatedPayload) + + const deleted = await client.request('DELETE', `/object/${bucketName}`, { + body: { + prefixes: [signedUploadKey, movedKey], + }, + expectedStatus: 200, + token, + }) + expect(deleted.json?.length).toBe(2) + + await client.request('DELETE', `/object/${bucketName}/${encodePathSegments(objectKey)}`, { + expectedStatus: 200, + token, + }) + } finally { + await cleanupRestResources(bucketName, trackedKeys, client) + } + }) + } +) diff --git a/acceptance/specs/rest-object.test.ts b/acceptance/specs/rest-object.test.ts new file mode 100644 index 000000000..b4d78500d --- /dev/null +++ b/acceptance/specs/rest-object.test.ts @@ -0,0 +1,164 @@ +import { + describeAcceptance, + encodePathSegments, + getAcceptanceConfig, + joinUrl, +} from '../support/config' +import { createRestClient } from '../support/http' +import { + cleanupRestResources, + createRestBucket, + requireServiceKey, + uniqueBucketName, + uniqueObjectKey, + uploadRestObject, +} from '../support/resources' + +interface CreateObjectResponse { + Id?: string + Key: string +} + +interface ListObjectsV2Response { + folders: Array<{ name: string }> + hasNext?: boolean + nextCursor?: string + objects: Array<{ name: string }> +} + +interface SignedUrlResponse { + signedURL: string +} + +describeAcceptance( + 'REST bucket and object contract', + { + destructive: true, + profiles: ['smoke', 'core'], + }, + () => { + it('creates a bucket, uploads, reads, lists, signs, and deletes an object', async () => { + const config = getAcceptanceConfig() + const client = createRestClient() + const bucketName = uniqueBucketName('rest') + const objectKey = uniqueObjectKey('rest') + const encodedObjectKey = encodePathSegments(objectKey) + const payload = `acceptance-rest-${config.runId}` + const objectKeys = [objectKey] + + try { + await createRestBucket(bucketName, { isPublic: false }) + + const upload = await client.request( + 'POST', + `/object/${bucketName}/${encodedObjectKey}`, + { + body: payload, + expectedStatus: 200, + headers: { + 'content-type': 'text/plain', + 'x-upsert': 'true', + }, + token: requireServiceKey(config), + } + ) + + expect(upload.json?.Key).toBe(`${bucketName}/${objectKey}`) + expect(upload.json?.Id).toBeTruthy() + + const downloaded = await client.request( + 'GET', + `/object/authenticated/${bucketName}/${encodedObjectKey}`, + { + expectedStatus: 200, + token: requireServiceKey(config), + } + ) + expect(downloaded.body).toBe(payload) + + const listed = await client.request( + 'POST', + `/object/list-v2/${bucketName}`, + { + body: { + limit: 100, + prefix: `${config.resourcePrefix}/`, + with_delimiter: false, + }, + expectedStatus: 200, + token: requireServiceKey(config), + } + ) + expect(listed.json?.objects.map((object) => object.name)).toContain(objectKey) + expect(listed.json?.folders).toEqual([]) + + const signed = await client.request( + 'POST', + `/object/sign/${bucketName}/${encodedObjectKey}`, + { + body: { + expiresIn: 60, + }, + expectedStatus: 200, + token: requireServiceKey(config), + } + ) + const signedUrl = joinUrl(config.baseUrl, signed.json?.signedURL ?? '') + let signedResponse: Response | undefined + try { + signedResponse = await fetch(signedUrl) + expect(signedResponse.status).toBe(200) + expect(await signedResponse.text()).toBe(payload) + } finally { + if (signedResponse && !signedResponse.bodyUsed) { + await signedResponse.body?.cancel() + } + } + } finally { + await cleanupRestResources(bucketName, objectKeys, client) + } + }) + } +) + +describeAcceptance( + 'REST path edge contract', + { + destructive: true, + profiles: ['core'], + requires: ['pathEdges'], + }, + () => { + it('preserves exact empty-segment object names in list-v2 when the backend accepts them', async () => { + const bucketName = uniqueBucketName('path') + const keys = [`${uniqueObjectKey('a')}/child.txt`, `${uniqueObjectKey('a')}///child.txt`] + const client = createRestClient() + + try { + await createRestBucket(bucketName) + await uploadRestObject(bucketName, keys[0], 'one') + await uploadRestObject(bucketName, keys[1], 'two') + + const listed = await client.request( + 'POST', + `/object/list-v2/${bucketName}`, + { + body: { + limit: 100, + prefix: '', + with_delimiter: false, + }, + expectedStatus: 200, + token: requireServiceKey(), + } + ) + + expect(listed.json?.objects.map((object) => object.name)).toEqual( + expect.arrayContaining(keys) + ) + } finally { + await cleanupRestResources(bucketName, keys, client) + } + }) + } +) diff --git a/acceptance/specs/rls.test.ts b/acceptance/specs/rls.test.ts new file mode 100644 index 000000000..e516ca513 --- /dev/null +++ b/acceptance/specs/rls.test.ts @@ -0,0 +1,87 @@ +import { randomUUID } from 'node:crypto' +import { + describeAcceptance, + encodePathSegments, + getAcceptanceConfig, + requireConfigValue, +} from '../support/config' +import { createRestClient } from '../support/http' +import { cleanupRestObjects } from '../support/resources' + +describeAcceptance( + 'RLS authorization contract', + { + destructive: true, + profiles: ['full'], + requires: ['rlsSetup'], + }, + () => { + it('allows authenticated access and rejects anon access for configured RLS resources', async () => { + const config = getAcceptanceConfig() + const client = createRestClient() + const bucketName = requireConfigValue(config.rlsBucket, 'ACCEPTANCE_RLS_BUCKET') + const readObject = requireConfigValue(config.rlsReadObject, 'ACCEPTANCE_RLS_READ_OBJECT') + const writePrefix = requireConfigValue( + config.rlsWritePrefix, + 'ACCEPTANCE_RLS_WRITE_PREFIX' + ).replace(/\/+$/, '') + const authenticatedKey = requireConfigValue( + config.authenticatedKey, + 'ACCEPTANCE_AUTHENTICATED_KEY' + ) + const anonKey = requireConfigValue(config.anonKey, 'ACCEPTANCE_ANON_KEY') + const writeKey = `${writePrefix}/acceptance-${config.runId}-${randomUUID() + .replace(/-/g, '') + .slice(0, 12)}.txt` + const deniedWriteKey = `${writePrefix}/acceptance-denied-${config.runId}-${randomUUID() + .replace(/-/g, '') + .slice(0, 12)}.txt` + + try { + await client.request( + 'GET', + `/object/authenticated/${bucketName}/${encodePathSegments(readObject)}`, + { + expectedStatus: 200, + token: authenticatedKey, + } + ) + + const deniedRead = await client.request( + 'GET', + `/object/authenticated/${bucketName}/${encodePathSegments(readObject)}`, + { + token: anonKey, + } + ) + expect(deniedRead.status).toBeGreaterThanOrEqual(400) + + await client.request('POST', `/object/${bucketName}/${encodePathSegments(writeKey)}`, { + body: `acceptance-rls-${config.runId}`, + expectedStatus: 200, + headers: { + 'content-type': 'text/plain', + 'x-upsert': 'true', + }, + token: authenticatedKey, + }) + + const deniedWrite = await client.request( + 'POST', + `/object/${bucketName}/${encodePathSegments(deniedWriteKey)}`, + { + body: `acceptance-rls-denied-${config.runId}`, + headers: { + 'content-type': 'text/plain', + 'x-upsert': 'true', + }, + token: anonKey, + } + ) + expect(deniedWrite.status).toBeGreaterThanOrEqual(400) + } finally { + await cleanupRestObjects(bucketName, [writeKey, deniedWriteKey], client) + } + }) + } +) diff --git a/acceptance/specs/s3-wire.test.ts b/acceptance/specs/s3-wire.test.ts new file mode 100644 index 000000000..76d05f586 --- /dev/null +++ b/acceptance/specs/s3-wire.test.ts @@ -0,0 +1,120 @@ +import { + CreateBucketCommand, + CreateMultipartUploadCommand, + HeadObjectCommand, + ListPartsCommand, +} from '@aws-sdk/client-s3' +import { describeAcceptance } from '../support/config' +import { uniqueBucketName, uniqueObjectKey } from '../support/resources' +import { cleanupS3Bucket, createAcceptanceS3Client } from '../support/s3' +import { + sendAwsChunkedPutObject, + sendAwsChunkedTrailerModeWithoutTrailer, + sendAwsChunkedUploadPart, +} from '../support/sigv4' + +describeAcceptance( + 'S3 SigV4 wire contract', + { + destructive: true, + profiles: ['wire'], + requires: ['wire'], + }, + () => { + it('accepts aws-chunked PutObject and persists decoded content length', async () => { + const client = createAcceptanceS3Client() + const bucketName = uniqueBucketName('wire') + const key = uniqueObjectKey('chunked', 'bin') + const payload = Buffer.alloc(123, 1) + + try { + await client.send(new CreateBucketCommand({ Bucket: bucketName })) + const response = await sendAwsChunkedPutObject({ bucketName, key, payload }) + + expect(response.status).toBe(200) + + const head = await client.send(new HeadObjectCommand({ Bucket: bucketName, Key: key })) + expect(head.ContentLength).toBe(payload.length) + } finally { + await cleanupS3Bucket(client, bucketName) + client.destroy() + } + }) + + it('accepts aws-chunked UploadPart and lists the uploaded part', async () => { + const client = createAcceptanceS3Client() + const bucketName = uniqueBucketName('wiremp') + const key = uniqueObjectKey('chunked-part', 'bin') + const payload = Buffer.alloc(123, 2) + + try { + await client.send(new CreateBucketCommand({ Bucket: bucketName })) + const multipart = await client.send( + new CreateMultipartUploadCommand({ + Bucket: bucketName, + Key: key, + }) + ) + const uploadId = expectUploadId(multipart.UploadId) + + const response = await sendAwsChunkedUploadPart({ + bucketName, + key, + partNumber: 1, + payload, + uploadId, + }) + + expect(response.status).toBe(200) + + const parts = await client.send( + new ListPartsCommand({ + Bucket: bucketName, + Key: key, + UploadId: uploadId, + }) + ) + expect(parts.Parts?.map((part) => part.PartNumber)).toEqual([1]) + } finally { + await cleanupS3Bucket(client, bucketName) + client.destroy() + } + }) + + it('rejects trailer-mode aws-chunked PutObject when the trailer block is missing', async () => { + const client = createAcceptanceS3Client() + const bucketName = uniqueBucketName('wiretr') + const key = uniqueObjectKey('trailer', 'bin') + const payload = Buffer.alloc(123, 3) + + try { + await client.send(new CreateBucketCommand({ Bucket: bucketName })) + const response = await sendAwsChunkedTrailerModeWithoutTrailer({ + bucketName, + key, + payload, + }) + + expect(response.status).toBeGreaterThanOrEqual(400) + await expect( + client.send(new HeadObjectCommand({ Bucket: bucketName, Key: key })) + ).rejects.toMatchObject({ + $metadata: { + httpStatusCode: 404, + }, + }) + } finally { + await cleanupS3Bucket(client, bucketName) + client.destroy() + } + }) + } +) + +function expectUploadId(uploadId: string | undefined): string { + if (!uploadId) { + throw new Error('CreateMultipartUpload did not return UploadId') + } + + return uploadId +} diff --git a/acceptance/specs/s3.test.ts b/acceptance/specs/s3.test.ts new file mode 100644 index 000000000..25328f56e --- /dev/null +++ b/acceptance/specs/s3.test.ts @@ -0,0 +1,299 @@ +import { + AbortMultipartUploadCommand, + CompleteMultipartUploadCommand, + CopyObjectCommand, + CreateBucketCommand, + CreateMultipartUploadCommand, + DeleteBucketCommand, + DeleteObjectCommand, + DeleteObjectsCommand, + GetBucketLocationCommand, + GetBucketVersioningCommand, + GetObjectCommand, + HeadBucketCommand, + HeadObjectCommand, + ListBucketsCommand, + ListMultipartUploadsCommand, + ListObjectsCommand, + ListObjectsV2Command, + ListPartsCommand, + PutObjectCommand, + UploadPartCommand, + UploadPartCopyCommand, +} from '@aws-sdk/client-s3' +import { describeAcceptance } from '../support/config' +import { uniqueBucketName, uniqueObjectKey } from '../support/resources' +import { cleanupS3Bucket, createAcceptanceS3Client } from '../support/s3' + +describeAcceptance( + 'S3 protocol contract', + { + destructive: true, + profiles: ['smoke', 'core'], + }, + () => { + it('creates a bucket, writes, reads, lists, copies, and deletes an object', async () => { + const client = createAcceptanceS3Client() + const bucketName = uniqueBucketName('s3') + const key = uniqueObjectKey('s3') + const copyKey = uniqueObjectKey('s3-copy') + const payload = Buffer.from('acceptance-s3-object') + + try { + const created = await client.send( + new CreateBucketCommand({ + ACL: 'public-read', + Bucket: bucketName, + }) + ) + expect(created.Location).toBeTruthy() + + await client.send( + new PutObjectCommand({ + Body: payload, + Bucket: bucketName, + ContentType: 'text/plain', + Key: key, + }) + ) + + const head = await client.send( + new HeadObjectCommand({ + Bucket: bucketName, + Key: key, + }) + ) + expect(head.ContentLength).toBe(payload.length) + expect(head.ContentType).toBe('text/plain') + + const downloaded = await client.send( + new GetObjectCommand({ + Bucket: bucketName, + Key: key, + }) + ) + expect(await downloaded.Body?.transformToString()).toBe(payload.toString()) + + const listed = await client.send( + new ListObjectsV2Command({ + Bucket: bucketName, + Prefix: key.split('/')[0], + }) + ) + expect(listed.Contents?.map((object) => object.Key)).toContain(key) + + await client.send( + new CopyObjectCommand({ + Bucket: bucketName, + CopySource: `${bucketName}/${key}`, + Key: copyKey, + }) + ) + const copied = await client.send( + new HeadObjectCommand({ + Bucket: bucketName, + Key: copyKey, + }) + ) + expect(copied.ContentLength).toBe(payload.length) + + await client.send(new DeleteObjectCommand({ Bucket: bucketName, Key: key })) + await client.send(new DeleteObjectCommand({ Bucket: bucketName, Key: copyKey })) + } finally { + await cleanupS3Bucket(client, bucketName) + client.destroy() + } + }) + + it('completes a multipart upload and exposes uploaded parts', async () => { + const client = createAcceptanceS3Client() + const bucketName = uniqueBucketName('s3mp') + const key = uniqueObjectKey('multipart', 'bin') + const payload = Buffer.alloc(5 * 1024 * 1024, 7) + + try { + await client.send(new CreateBucketCommand({ Bucket: bucketName })) + const multipart = await client.send( + new CreateMultipartUploadCommand({ + Bucket: bucketName, + ContentType: 'application/octet-stream', + Key: key, + }) + ) + expect(multipart.UploadId).toBeTruthy() + + const part = await client.send( + new UploadPartCommand({ + Body: payload, + Bucket: bucketName, + ContentLength: payload.length, + Key: key, + PartNumber: 1, + UploadId: multipart.UploadId, + }) + ) + expect(part.ETag).toBeTruthy() + + const parts = await client.send( + new ListPartsCommand({ + Bucket: bucketName, + Key: key, + UploadId: multipart.UploadId, + }) + ) + expect(parts.Parts?.map((listedPart) => listedPart.PartNumber)).toEqual([1]) + + await client.send( + new CompleteMultipartUploadCommand({ + Bucket: bucketName, + Key: key, + MultipartUpload: { + Parts: [{ ETag: part.ETag, PartNumber: 1 }], + }, + UploadId: multipart.UploadId, + }) + ) + + const head = await client.send(new HeadObjectCommand({ Bucket: bucketName, Key: key })) + expect(head.ContentLength).toBe(payload.length) + } finally { + await cleanupS3Bucket(client, bucketName) + client.destroy() + } + }) + } +) + +describeAcceptance( + 'extended S3 protocol contract', + { + destructive: true, + profiles: ['core'], + }, + () => { + it('covers bucket metadata, list-v1, range reads, bulk delete, empty-bucket deletion, upload-part-copy, and abort', async () => { + const client = createAcceptanceS3Client() + const bucketName = uniqueBucketName('s3x') + const prefix = uniqueObjectKey('s3-prefix').replace(/\.txt$/, '') + const keyA = `${prefix}/a.txt` + const keyB = `${prefix}/b.txt` + const keyC = `${prefix}/nested/c.txt` + const copiedPartKey = `${prefix}/copied-part.txt` + const abortedKey = `${prefix}/aborted.txt` + const payload = Buffer.from('acceptance-s3-extended-object') + + try { + await client.send(new CreateBucketCommand({ Bucket: bucketName })) + + await client.send(new HeadBucketCommand({ Bucket: bucketName })) + const buckets = await client.send(new ListBucketsCommand({})) + expect(buckets.Buckets?.map((bucket) => bucket.Name)).toContain(bucketName) + + const location = await client.send(new GetBucketLocationCommand({ Bucket: bucketName })) + expect(location.LocationConstraint ?? '').toBeTypeOf('string') + + const versioning = await client.send(new GetBucketVersioningCommand({ Bucket: bucketName })) + expect([undefined, 'Suspended']).toContain(versioning.Status) + + await client.send( + new PutObjectCommand({ + Body: payload, + Bucket: bucketName, + ContentType: 'text/plain', + Key: keyA, + }) + ) + await client.send(new PutObjectCommand({ Body: 'b', Bucket: bucketName, Key: keyB })) + await client.send(new PutObjectCommand({ Body: 'c', Bucket: bucketName, Key: keyC })) + + const ranged = await client.send( + new GetObjectCommand({ + Bucket: bucketName, + Key: keyA, + Range: 'bytes=0-9', + }) + ) + expect(await ranged.Body?.transformToString()).toBe(payload.subarray(0, 10).toString()) + expect(ranged.ContentRange).toBe(`bytes 0-9/${payload.length}`) + + const listedV1 = await client.send( + new ListObjectsCommand({ + Bucket: bucketName, + Delimiter: '/', + Prefix: `${prefix}/`, + }) + ) + expect(listedV1.Contents?.map((object) => object.Key)).toEqual( + expect.arrayContaining([keyA, keyB]) + ) + expect(listedV1.CommonPrefixes?.map((object) => object.Prefix)).toContain( + `${prefix}/nested/` + ) + + const multipartCopy = await client.send( + new CreateMultipartUploadCommand({ + Bucket: bucketName, + Key: copiedPartKey, + }) + ) + const copiedPart = await client.send( + new UploadPartCopyCommand({ + Bucket: bucketName, + CopySource: `${bucketName}/${keyA}`, + CopySourceRange: `bytes=0-${payload.length - 1}`, + Key: copiedPartKey, + PartNumber: 1, + UploadId: multipartCopy.UploadId, + }) + ) + expect(copiedPart.CopyPartResult?.ETag).toBeTruthy() + await client.send( + new CompleteMultipartUploadCommand({ + Bucket: bucketName, + Key: copiedPartKey, + MultipartUpload: { + Parts: [{ ETag: copiedPart.CopyPartResult?.ETag, PartNumber: 1 }], + }, + UploadId: multipartCopy.UploadId, + }) + ) + const copiedPartHead = await client.send( + new HeadObjectCommand({ Bucket: bucketName, Key: copiedPartKey }) + ) + expect(copiedPartHead.ContentLength).toBe(payload.length) + + const aborted = await client.send( + new CreateMultipartUploadCommand({ + Bucket: bucketName, + Key: abortedKey, + }) + ) + await client.send( + new AbortMultipartUploadCommand({ + Bucket: bucketName, + Key: abortedKey, + UploadId: aborted.UploadId, + }) + ) + const activeUploads = await client.send( + new ListMultipartUploadsCommand({ Bucket: bucketName, Prefix: abortedKey }) + ) + expect(activeUploads.Uploads ?? []).toHaveLength(0) + + await client.send( + new DeleteObjectsCommand({ + Bucket: bucketName, + Delete: { + Objects: [keyA, keyB, keyC, copiedPartKey].map((Key) => ({ Key })), + }, + }) + ) + + await client.send(new DeleteBucketCommand({ Bucket: bucketName })) + } finally { + await cleanupS3Bucket(client, bucketName) + client.destroy() + } + }) + } +) diff --git a/acceptance/specs/tus.test.ts b/acceptance/specs/tus.test.ts new file mode 100644 index 000000000..832085e2e --- /dev/null +++ b/acceptance/specs/tus.test.ts @@ -0,0 +1,344 @@ +import * as tus from 'tus-js-client' +import { + describeAcceptance, + encodePathSegments, + getAcceptanceConfig, + joinUrl, +} from '../support/config' +import { createRestClient } from '../support/http' +import { + cleanupRestResources, + createRestBucket, + requireServiceKey, + uniqueBucketName, + uniqueObjectKey, +} from '../support/resources' + +interface SignedUploadResponse { + token: string + url: string +} + +const tusVersion = '1.0.0' + +describeAcceptance( + 'TUS resumable upload contract', + { + destructive: true, + profiles: ['core'], + requires: ['tus'], + }, + () => { + it('uploads an object through the resumable endpoint', async () => { + const config = getAcceptanceConfig() + const bucketName = uniqueBucketName('tus') + const objectKey = uniqueObjectKey('tus') + const payload = Buffer.from(`acceptance-tus-${config.runId}`) + const uploadPayload = new Blob([payload]) + + try { + await createRestBucket(bucketName) + await new Promise((resolve, reject) => { + const upload = new tus.Upload(uploadPayload, { + chunkSize: uploadPayload.size, + endpoint: config.tusEndpoint, + headers: { + authorization: `Bearer ${requireServiceKey(config)}`, + 'x-upsert': 'true', + }, + metadata: { + bucketName, + cacheControl: '60', + contentType: 'text/plain', + objectName: objectKey, + }, + onError: reject, + onShouldRetry: () => false, + onSuccess: () => resolve(), + uploadDataDuringCreation: false, + }) + + upload.start() + }) + + const client = createRestClient() + const downloaded = await client.request( + 'GET', + `/object/authenticated/${bucketName}/${encodePathSegments(objectKey)}`, + { + expectedStatus: 200, + token: requireServiceKey(config), + } + ) + expect(downloaded.body).toBe(payload.toString()) + } finally { + await cleanupRestResources(bucketName, [objectKey]) + } + }) + + it('resumes chunked uploads through POST, HEAD, and PATCH', async () => { + const config = getAcceptanceConfig() + const bucketName = uniqueBucketName('tusresume') + const objectKey = uniqueObjectKey('tus-resume') + const payload = Buffer.from(`acceptance-tus-resume-${config.runId}`) + + try { + await createRestBucket(bucketName) + + await uploadTusInChunks({ + chunks: [payload.subarray(0, 10), payload.subarray(10)], + endpoint: config.tusEndpoint, + headers: { + authorization: `Bearer ${requireServiceKey(config)}`, + 'x-upsert': 'true', + }, + metadata: { + bucketName, + cacheControl: '60', + contentType: 'text/plain', + objectName: objectKey, + }, + totalLength: payload.length, + }) + + const client = createRestClient() + const downloaded = await client.request( + 'GET', + `/object/authenticated/${bucketName}/${encodePathSegments(objectKey)}`, + { + expectedStatus: 200, + token: requireServiceKey(config), + } + ) + expect(downloaded.body).toBe(payload.toString()) + } finally { + await cleanupRestResources(bucketName, [objectKey]) + } + }) + + it('terminates abandoned uploads through DELETE', async () => { + const config = getAcceptanceConfig() + const bucketName = uniqueBucketName('tusdelete') + const objectKey = uniqueObjectKey('tus-delete') + const headers = { + authorization: `Bearer ${requireServiceKey(config)}`, + 'x-upsert': 'true', + } + + try { + await createRestBucket(bucketName) + + const uploadUrl = await createTusUpload({ + endpoint: config.tusEndpoint, + headers, + metadata: { + bucketName, + cacheControl: '60', + contentType: 'text/plain', + objectName: objectKey, + }, + totalLength: 32, + }) + + let deleted: Response | undefined + try { + deleted = await fetch(uploadUrl, { + headers: { + ...headers, + 'tus-resumable': tusVersion, + }, + method: 'DELETE', + }) + expect(deleted.status).toBe(204) + } finally { + await deleted?.body?.cancel() + } + + let afterDelete: Response | undefined + try { + afterDelete = await fetch(uploadUrl, { + headers: { + ...headers, + 'tus-resumable': tusVersion, + }, + method: 'HEAD', + }) + expect([404, 410]).toContain(afterDelete.status) + } finally { + await afterDelete?.body?.cancel() + } + } finally { + await cleanupRestResources(bucketName, [objectKey]) + } + }) + + it('uploads through the signed TUS endpoint without an authorization header', async () => { + const config = getAcceptanceConfig() + const client = createRestClient() + const bucketName = uniqueBucketName('tussign') + const objectKey = uniqueObjectKey('tus-signed') + const payload = Buffer.from(`acceptance-tus-signed-${config.runId}`) + + try { + await createRestBucket(bucketName) + + const signedUpload = await client.request( + 'POST', + `/object/upload/sign/${bucketName}/${encodePathSegments(objectKey)}`, + { + expectedStatus: 200, + headers: { + 'content-type': 'text/plain', + 'x-upsert': 'true', + }, + token: requireServiceKey(config), + } + ) + + await uploadTusInChunks({ + chunks: [payload], + endpoint: joinUrl(config.tusEndpoint, 'sign'), + headers: { + 'x-signature': signedUpload.json?.token ?? '', + }, + metadata: { + bucketName, + cacheControl: '60', + contentType: 'text/plain', + objectName: objectKey, + }, + totalLength: payload.length, + }) + + const downloaded = await client.request( + 'GET', + `/object/authenticated/${bucketName}/${encodePathSegments(objectKey)}`, + { + expectedStatus: 200, + token: requireServiceKey(config), + } + ) + expect(downloaded.body).toBe(payload.toString()) + } finally { + await cleanupRestResources(bucketName, [objectKey]) + } + }) + } +) + +async function uploadTusInChunks({ + chunks, + endpoint, + headers, + metadata, + totalLength, +}: { + chunks: Uint8Array[] + endpoint: string + headers: Record + metadata: Record + totalLength: number +}) { + const uploadUrl = await createTusUpload({ + endpoint, + headers, + metadata, + totalLength, + }) + let offset = await getTusOffset(uploadUrl, headers) + expect(offset).toBe(0) + + for (const chunk of chunks) { + let patched: Response | undefined + try { + patched = await fetch(uploadUrl, { + body: chunk as unknown as BodyInit, + headers: { + ...headers, + 'content-type': 'application/offset+octet-stream', + 'tus-resumable': tusVersion, + 'upload-offset': offset.toString(), + }, + method: 'PATCH', + }) + expect(patched.status).toBe(204) + offset = Number(patched.headers.get('upload-offset') ?? offset + chunk.byteLength) + expect(offset).toBeGreaterThan(0) + } finally { + await patched?.body?.cancel() + } + } + + expect(offset).toBe(totalLength) +} + +async function createTusUpload({ + endpoint, + headers, + metadata, + totalLength, +}: { + endpoint: string + headers: Record + metadata: Record + totalLength: number +}) { + let options: Response | undefined + try { + options = await fetch(endpoint, { + headers: { + 'tus-resumable': tusVersion, + }, + method: 'OPTIONS', + }) + expect([200, 204]).toContain(options.status) + expect(options.headers.get('tus-version')).toContain(tusVersion) + } finally { + await options?.body?.cancel() + } + + let created: Response | undefined + try { + created = await fetch(endpoint, { + headers: { + ...headers, + 'tus-resumable': tusVersion, + 'upload-length': totalLength.toString(), + 'upload-metadata': encodeTusMetadata(metadata), + }, + method: 'POST', + }) + expect(created.status).toBe(201) + + const location = created.headers.get('location') + expect(location).toBeTruthy() + + return new URL(location ?? '', endpoint).toString() + } finally { + await created?.body?.cancel() + } +} + +async function getTusOffset(uploadUrl: string, headers: Record) { + let head: Response | undefined + try { + head = await fetch(uploadUrl, { + headers: { + ...headers, + 'tus-resumable': tusVersion, + }, + method: 'HEAD', + }) + expect(head.status).toBe(200) + + return Number(head.headers.get('upload-offset') ?? 0) + } finally { + await head?.body?.cancel() + } +} + +function encodeTusMetadata(metadata: Record) { + return Object.entries(metadata) + .map(([key, value]) => `${key} ${Buffer.from(value).toString('base64')}`) + .join(',') +} diff --git a/acceptance/specs/vector.test.ts b/acceptance/specs/vector.test.ts new file mode 100644 index 000000000..ac0990e64 --- /dev/null +++ b/acceptance/specs/vector.test.ts @@ -0,0 +1,210 @@ +import { randomUUID } from 'node:crypto' +import { describeAcceptance, getAcceptanceConfig } from '../support/config' +import { createRestClient } from '../support/http' +import { requireServiceKey } from '../support/resources' + +interface VectorListBucketsResponse { + vectorBuckets?: Array<{ vectorBucketName?: string }> +} + +interface VectorListIndexesResponse { + indexes?: Array<{ indexName?: string }> +} + +interface VectorListResponse { + vectors?: Array<{ key?: string }> +} + +describeAcceptance( + 'vector API contract', + { + destructive: true, + profiles: ['full'], + requires: ['vector'], + }, + () => { + it('covers vector bucket, index, put/get/list/query/delete lifecycle', async () => { + const config = getAcceptanceConfig() + const client = createRestClient() + const token = requireServiceKey(config) + const suffix = randomUUID().replace(/-/g, '').slice(0, 12) + const vectorBucketName = `${config.resourcePrefix}-vec-${suffix}`.slice(0, 45) + const indexName = `idx-${suffix}` + const vectorKeys = [`vec-a-${suffix}`, `vec-b-${suffix}`] + + try { + await client.request('POST', '/vector/CreateVectorBucket', { + body: { + vectorBucketName, + }, + expectedStatus: 200, + token, + }) + + const buckets = await client.request( + 'POST', + '/vector/ListVectorBuckets', + { + body: { + prefix: vectorBucketName, + }, + expectedStatus: 200, + token, + } + ) + expect(buckets.json?.vectorBuckets?.map((bucket) => bucket.vectorBucketName)).toContain( + vectorBucketName + ) + + await client.request('POST', '/vector/GetVectorBucket', { + body: { + vectorBucketName, + }, + expectedStatus: 200, + token, + }) + + await client.request('POST', '/vector/CreateIndex', { + body: { + dataType: 'float32', + dimension: 2, + distanceMetric: 'cosine', + indexName, + vectorBucketName, + }, + expectedStatus: 200, + token, + }) + + const indexes = await client.request( + 'POST', + '/vector/ListIndexes', + { + body: { + vectorBucketName, + }, + expectedStatus: 200, + token, + } + ) + expect(indexes.json?.indexes?.map((index) => index.indexName)).toContain(indexName) + + await client.request('POST', '/vector/GetIndex', { + body: { + indexName, + vectorBucketName, + }, + expectedStatus: 200, + token, + }) + + await client.request('POST', '/vector/PutVectors', { + body: { + indexName, + vectorBucketName, + vectors: [ + { + data: { + float32: [1, 0], + }, + key: vectorKeys[0], + metadata: { + group: 'acceptance', + }, + }, + { + data: { + float32: [0, 1], + }, + key: vectorKeys[1], + metadata: { + group: 'acceptance', + }, + }, + ], + }, + expectedStatus: 200, + token, + }) + + const vectors = await client.request('POST', '/vector/ListVectors', { + body: { + indexName, + returnData: true, + returnMetadata: true, + vectorBucketName, + }, + expectedStatus: 200, + token, + }) + expect(vectors.json?.vectors?.map((vector) => vector.key)).toEqual( + expect.arrayContaining(vectorKeys) + ) + + const fetched = await client.request('POST', '/vector/GetVectors', { + body: { + indexName, + keys: vectorKeys, + returnData: true, + returnMetadata: true, + vectorBucketName, + }, + expectedStatus: 200, + token, + }) + expect(fetched.json?.vectors?.map((vector) => vector.key)).toEqual( + expect.arrayContaining(vectorKeys) + ) + + const query = await client.request('POST', '/vector/QueryVectors', { + body: { + filter: { + group: 'acceptance', + }, + indexName, + queryVector: { + float32: [1, 0], + }, + returnDistance: true, + returnMetadata: true, + topK: 2, + vectorBucketName, + }, + expectedStatus: 200, + token, + }) + expect(query.json?.vectors?.map((vector) => vector.key)).toContain(vectorKeys[0]) + + await client.request('POST', '/vector/DeleteVectors', { + body: { + indexName, + keys: vectorKeys, + vectorBucketName, + }, + expectedStatus: 200, + token, + }) + } finally { + await client + .request('POST', '/vector/DeleteIndex', { + body: { + indexName, + vectorBucketName, + }, + expectedStatus: [200, 400, 404], + token, + }) + .catch(() => undefined) + await client + .request('POST', '/vector/DeleteVectorBucket', { + body: { + vectorBucketName, + }, + expectedStatus: [200, 400, 404], + token, + }) + .catch(() => undefined) + } + }) + } +) diff --git a/acceptance/support/config.ts b/acceptance/support/config.ts new file mode 100644 index 000000000..248c56e1b --- /dev/null +++ b/acceptance/support/config.ts @@ -0,0 +1,302 @@ +const acceptanceProfiles = ['smoke', 'core', 'full', 'wire'] as const + +export type AcceptanceProfile = (typeof acceptanceProfiles)[number] + +export type AcceptanceCapability = + | 'admin' + | 'cdn' + | 'iceberg' + | 'pathEdges' + | 'render' + | 'rlsSetup' + | 'tus' + | 'vector' + | 'wire' + +export interface AcceptanceConfig { + adminApiKey?: string + adminUrl?: string + allowDestructive: boolean + anonKey?: string + authenticatedKey?: string + baseUrl: string + capabilities: Record + forcePathStyle: boolean + profile: AcceptanceProfile + region: string + resourcePrefix: string + rlsBucket?: string + rlsReadObject?: string + rlsWritePrefix?: string + runId: string + s3AccessKeyId?: string + s3Endpoint: string + s3SecretAccessKey?: string + serviceKey?: string + target: 'local' | 'remote' + tenantId?: string + tlsRejectUnauthorized: boolean + tusEndpoint: string +} + +export interface AcceptanceSelection { + destructive?: boolean + profiles: AcceptanceProfile[] + requires?: AcceptanceCapability[] +} + +let cachedConfig: AcceptanceConfig | undefined + +export function getAcceptanceConfig(): AcceptanceConfig { + if (!cachedConfig) { + cachedConfig = buildAcceptanceConfig() + } + + return cachedConfig +} + +export function resetAcceptanceConfigForTests() { + cachedConfig = undefined +} + +export function shouldRunAcceptance(selection: AcceptanceSelection): boolean { + const config = getAcceptanceConfig() + + if (!profileIncludes(config.profile, selection.profiles)) { + return false + } + + if (selection.destructive && config.target === 'remote' && !config.allowDestructive) { + return false + } + + return (selection.requires ?? []).every((capability) => config.capabilities[capability]) +} + +export function describeAcceptance( + name: string, + selection: AcceptanceSelection, + factory: () => void +) { + const runner = shouldRunAcceptance(selection) ? describe : describe.skip + runner(name, factory) +} + +export function requireConfigValue(value: string | undefined, name: string): string { + if (!value) { + throw new Error(`Missing required acceptance configuration: ${name}`) + } + + return value +} + +export function joinUrl(baseUrl: string, route: string): string { + const normalizedBase = baseUrl.endsWith('/') ? baseUrl : `${baseUrl}/` + const normalizedRoute = route.replace(/^\/+/, '') + return new URL(normalizedRoute, normalizedBase).toString() +} + +export function encodePathSegments(value: string): string { + return value + .split('/') + .map((segment) => encodeURIComponent(segment)) + .join('/') +} + +function buildAcceptanceConfig(): AcceptanceConfig { + const profile = normalizeProfile(readOption('profile') ?? envOption('ACCEPTANCE_PROFILE')) + const target = normalizeTarget(readOption('target') ?? envOption('ACCEPTANCE_TARGET')) + const explicitBaseUrl = readOption('base-url') ?? envOption('ACCEPTANCE_BASE_URL') + const defaultBaseUrl = target === 'local' ? 'http://127.0.0.1:5000' : undefined + if (!explicitBaseUrl && !defaultBaseUrl) { + throw new Error('ACCEPTANCE_BASE_URL is required for non-local acceptance targets') + } + + const baseUrl = trimTrailingSlash(explicitBaseUrl ?? defaultBaseUrl ?? '') + const s3Endpoint = trimTrailingSlash( + readOption('s3-endpoint') ?? envOption('ACCEPTANCE_S3_ENDPOINT') ?? joinUrl(baseUrl, '/s3') + ) + const tusEndpoint = trimTrailingSlash( + readOption('tus-endpoint') ?? + envOption('ACCEPTANCE_TUS_ENDPOINT') ?? + joinUrl(baseUrl, '/upload/resumable') + ) + const adminUrl = optionalTrim(readOption('admin-url') ?? envOption('ACCEPTANCE_ADMIN_URL')) + const adminCapabilityRequested = boolOption('enable-admin', envOption('ACCEPTANCE_ENABLE_ADMIN')) + const runId = sanitizeRunId( + readOption('run-id') ?? + envOption('ACCEPTANCE_RUN_ID') ?? + new Date() + .toISOString() + .replace(/[-:.TZ]/g, '') + .slice(0, 14) + ) + const resourcePrefix = sanitizeResourcePrefix( + readOption('resource-prefix') ?? envOption('ACCEPTANCE_RESOURCE_PREFIX') ?? `acc-${runId}` + ) + const adminApiKey = readOption('admin-api-key') ?? envOption('ACCEPTANCE_ADMIN_API_KEY') + const hasAdminConfig = Boolean(adminUrl && adminApiKey) + + const config: AcceptanceConfig = { + adminApiKey, + adminUrl, + allowDestructive: boolOption('allow-destructive', envOption('ACCEPTANCE_ALLOW_DESTRUCTIVE')), + anonKey: envOption('ACCEPTANCE_ANON_KEY'), + authenticatedKey: envOption('ACCEPTANCE_AUTHENTICATED_KEY'), + baseUrl, + capabilities: { + admin: (adminCapabilityRequested || hasAdminConfig) && hasAdminConfig, + cdn: boolOption('enable-cdn', process.env.ACCEPTANCE_ENABLE_CDN), + iceberg: boolOption('enable-iceberg', process.env.ACCEPTANCE_ENABLE_ICEBERG), + pathEdges: boolOption('enable-path-edges', process.env.ACCEPTANCE_ENABLE_PATH_EDGES), + render: boolOption('enable-render', process.env.ACCEPTANCE_ENABLE_RENDER), + rlsSetup: boolOption('enable-rls-setup', process.env.ACCEPTANCE_ENABLE_RLS_SETUP), + tus: boolOptionDefaultTrue('enable-tus', envOption('ACCEPTANCE_ENABLE_TUS')), + vector: boolOption('enable-vector', process.env.ACCEPTANCE_ENABLE_VECTOR), + wire: + boolOption('enable-wire', process.env.ACCEPTANCE_ENABLE_WIRE) || + profile === 'wire' || + profile === 'full', + }, + forcePathStyle: boolOptionDefaultTrue( + 's3-force-path-style', + envOption('ACCEPTANCE_S3_FORCE_PATH_STYLE') + ), + profile, + region: readOption('region') ?? envOption('ACCEPTANCE_REGION') ?? 'us-east-1', + resourcePrefix, + rlsBucket: envOption('ACCEPTANCE_RLS_BUCKET') ?? (target === 'local' ? 'bucket2' : undefined), + rlsReadObject: + envOption('ACCEPTANCE_RLS_READ_OBJECT') ?? + (target === 'local' ? 'authenticated/casestudy.png' : undefined), + rlsWritePrefix: + envOption('ACCEPTANCE_RLS_WRITE_PREFIX') ?? + (target === 'local' ? 'authenticated' : undefined), + runId, + s3AccessKeyId: envOption('ACCEPTANCE_S3_ACCESS_KEY_ID'), + s3Endpoint, + s3SecretAccessKey: envOption('ACCEPTANCE_S3_SECRET_ACCESS_KEY'), + serviceKey: envOption('ACCEPTANCE_SERVICE_KEY'), + target, + tenantId: envOption('ACCEPTANCE_TENANT_ID'), + tlsRejectUnauthorized: boolOptionDefaultTrue( + 'tls-reject-unauthorized', + envOption('ACCEPTANCE_TLS_REJECT_UNAUTHORIZED') + ), + tusEndpoint, + } + + if (config.target === 'remote' && config.allowDestructive && !config.resourcePrefix) { + throw new Error('Remote destructive acceptance runs require ACCEPTANCE_RESOURCE_PREFIX') + } + + return config +} + +function profileIncludes(current: AcceptanceProfile, requested: AcceptanceProfile[]) { + if (current === 'full') { + return true + } + + if (current === 'core') { + return requested.includes('smoke') || requested.includes('core') + } + + if (current === 'wire') { + return requested.includes('smoke') || requested.includes('wire') + } + + return requested.includes('smoke') +} + +function normalizeProfile(value: string | undefined): AcceptanceProfile { + if (isAcceptanceProfile(value)) { + return value + } + + return 'smoke' +} + +function isAcceptanceProfile(value: string | undefined): value is AcceptanceProfile { + return acceptanceProfiles.includes(value as AcceptanceProfile) +} + +function normalizeTarget(value: string | undefined): AcceptanceConfig['target'] { + return value === 'remote' ? 'remote' : 'local' +} + +function boolOption(cliName: string, envValue: string | undefined): boolean { + const cliValue = readOption(cliName) + const value = cliValue ?? envValue + + return value === '1' || value === 'true' || value === 'yes' +} + +function boolOptionDefaultTrue(cliName: string, envValue: string | undefined): boolean { + const cliValue = readOption(cliName) + const value = cliValue ?? envValue + + if (value === undefined) { + return true + } + + return value === '1' || value === 'true' || value === 'yes' +} + +function envOption(name: string): string | undefined { + const value = process.env[name] + if (value === undefined || value === '') { + return undefined + } + + return value +} + +function readOption(name: string): string | undefined { + const flag = `--${name}` + const argv = process.argv + const equalsPrefix = `${flag}=` + + for (let index = 0; index < argv.length; index++) { + const item = argv[index] + if (item === flag) { + return argv[index + 1] + } + if (item.startsWith(equalsPrefix)) { + return item.slice(equalsPrefix.length) + } + } + + return undefined +} + +function trimTrailingSlash(value: string): string { + return value.replace(/\/+$/, '') +} + +function optionalTrim(value: string | undefined): string | undefined { + if (!value) { + return undefined + } + + return trimTrailingSlash(value) +} + +function sanitizeRunId(value: string): string { + return ( + value + .toLowerCase() + .replace(/[^a-z0-9-]/g, '') + .slice(0, 24) || 'run' + ) +} + +function sanitizeResourcePrefix(value: string): string { + return ( + value + .toLowerCase() + .replace(/[^a-z0-9-]/g, '-') + .replace(/^-+|-+$/g, '') + .slice(0, 32) || 'resource' + ) +} diff --git a/acceptance/support/http.ts b/acceptance/support/http.ts new file mode 100644 index 000000000..80d5b6647 --- /dev/null +++ b/acceptance/support/http.ts @@ -0,0 +1,105 @@ +import { getAcceptanceConfig, joinUrl } from './config' + +export interface HttpRequestOptions { + body?: BodyInit | Record + expectedStatus?: number | number[] + headers?: Record + token?: string +} + +export interface HttpResponse { + body: string + headers: Headers + json: T | undefined + status: number + url: string +} + +export class AcceptanceHttpClient { + constructor(private readonly baseUrl: string) {} + + async request( + method: string, + route: string, + options: HttpRequestOptions = {} + ): Promise> { + const url = joinUrl(this.baseUrl, route) + const headers = new Headers(options.headers) + + if (options.token) { + headers.set('authorization', `Bearer ${options.token}`) + } + + let body = options.body + if (body && isPlainObject(body)) { + headers.set('content-type', headers.get('content-type') ?? 'application/json') + body = JSON.stringify(body) + } + + const response = await fetch(url, { + body, + headers, + method, + }) + const text = await response.text() + const parsed = parseJson(text) + const expected = options.expectedStatus + + if (expected !== undefined && !statusMatches(response.status, expected)) { + throw new Error( + [ + `Unexpected HTTP status for ${method} ${url}`, + `expected: ${Array.isArray(expected) ? expected.join(', ') : expected}`, + `received: ${response.status}`, + `body: ${text}`, + ].join('\n') + ) + } + + return { + body: text, + headers: response.headers, + json: parsed, + status: response.status, + url, + } + } +} + +export function createRestClient() { + return new AcceptanceHttpClient(getAcceptanceConfig().baseUrl) +} + +export function createAdminClient() { + const { adminUrl } = getAcceptanceConfig() + if (!adminUrl) { + throw new Error('ACCEPTANCE_ADMIN_URL is required for admin acceptance tests') + } + + return new AcceptanceHttpClient(adminUrl) +} + +function statusMatches(status: number, expected: number | number[]) { + return Array.isArray(expected) ? expected.includes(status) : status === expected +} + +function parseJson(text: string): T | undefined { + if (!text) { + return undefined + } + + try { + return JSON.parse(text) as T + } catch { + return undefined + } +} + +function isPlainObject(value: unknown): value is Record { + if (typeof value !== 'object' || value === null) { + return false + } + + const prototype = Object.getPrototypeOf(value) + return prototype === Object.prototype || prototype === null +} diff --git a/acceptance/support/resources.ts b/acceptance/support/resources.ts new file mode 100644 index 000000000..a663636af --- /dev/null +++ b/acceptance/support/resources.ts @@ -0,0 +1,97 @@ +import { randomUUID } from 'node:crypto' +import { AcceptanceConfig, encodePathSegments, getAcceptanceConfig } from './config' +import { AcceptanceHttpClient, createRestClient } from './http' + +export function uniqueBucketName(kind: string): string { + const config = getAcceptanceConfig() + const suffix = randomUUID().replace(/-/g, '').slice(0, 16) + return `${config.resourcePrefix}-${kind}-${suffix}` + .toLowerCase() + .replace(/[^a-z0-9-]/g, '-') + .slice(0, 63) +} + +export function uniqueObjectKey(kind: string, extension = 'txt'): string { + const config = getAcceptanceConfig() + const suffix = randomUUID().replace(/-/g, '').slice(0, 16) + return `${config.resourcePrefix}/${kind}-${suffix}.${extension}` +} + +export async function createRestBucket(bucketName: string, options: { isPublic?: boolean } = {}) { + const config = getAcceptanceConfig() + const client = createRestClient() + + await client.request('POST', '/bucket', { + body: { + id: bucketName, + name: bucketName, + public: options.isPublic ?? false, + }, + expectedStatus: 200, + token: requireServiceKey(config), + }) +} + +export async function uploadRestObject( + bucketName: string, + objectKey: string, + body: BodyInit, + options: { contentType?: string } = {} +) { + const config = getAcceptanceConfig() + const client = createRestClient() + + await client.request('POST', `/object/${bucketName}/${encodePathSegments(objectKey)}`, { + body, + expectedStatus: 200, + headers: { + 'content-type': options.contentType ?? 'text/plain', + 'x-upsert': 'true', + }, + token: requireServiceKey(config), + }) +} + +export async function cleanupRestResources( + bucketName: string, + objectKeys: string[], + client: AcceptanceHttpClient = createRestClient() +) { + await cleanupRestObjects(bucketName, objectKeys, client) + + await client + .request('DELETE', `/bucket/${bucketName}`, { + expectedStatus: [200, 400, 404], + token: requireServiceKey(), + }) + .catch(() => undefined) +} + +export async function cleanupRestObjects( + bucketName: string, + objectKeys: string[], + client: AcceptanceHttpClient = createRestClient() +) { + const config = getAcceptanceConfig() + const token = requireServiceKey(config) + + const deletionOrder = [...objectKeys].reverse() + + // Callers pass object keys in creation order; delete in LIFO order for cleanup. + for (const objectKey of deletionOrder) { + await client + .request('DELETE', `/object/${bucketName}/${encodePathSegments(objectKey)}`, { + expectedStatus: [200, 400, 404], + token, + }) + .catch(() => undefined) + } +} + +export function requireServiceKey(config: AcceptanceConfig = getAcceptanceConfig()) { + if (!config.serviceKey) { + throw new Error('ACCEPTANCE_SERVICE_KEY is required for this acceptance test') + } + + return config.serviceKey +} diff --git a/acceptance/support/s3.ts b/acceptance/support/s3.ts new file mode 100644 index 000000000..9a33f7ffe --- /dev/null +++ b/acceptance/support/s3.ts @@ -0,0 +1,99 @@ +import { + AbortMultipartUploadCommand, + DeleteBucketCommand, + DeleteObjectCommand, + DeleteObjectsCommand, + ListMultipartUploadsCommand, + ListObjectsV2Command, + S3Client, +} from '@aws-sdk/client-s3' +import { getAcceptanceConfig, requireConfigValue } from './config' + +export function createAcceptanceS3Client() { + const config = getAcceptanceConfig() + + return new S3Client({ + credentials: { + accessKeyId: requireConfigValue(config.s3AccessKeyId, 'ACCEPTANCE_S3_ACCESS_KEY_ID'), + secretAccessKey: requireConfigValue( + config.s3SecretAccessKey, + 'ACCEPTANCE_S3_SECRET_ACCESS_KEY' + ), + }, + endpoint: config.s3Endpoint, + forcePathStyle: config.forcePathStyle, + region: config.region, + }) +} + +export async function cleanupS3Bucket(client: S3Client, bucketName: string) { + await abortMultipartUploads(client, bucketName).catch(() => undefined) + await deleteAllObjects(client, bucketName).catch(() => undefined) + await client.send(new DeleteBucketCommand({ Bucket: bucketName })).catch(() => undefined) +} + +export async function cleanupS3Object(client: S3Client, bucketName: string, key: string) { + await client + .send(new DeleteObjectCommand({ Bucket: bucketName, Key: key })) + .catch(() => undefined) +} + +async function deleteAllObjects(client: S3Client, bucketName: string) { + let continuationToken: string | undefined + + do { + const page = await client.send( + new ListObjectsV2Command({ + Bucket: bucketName, + ContinuationToken: continuationToken, + }) + ) + const keys = page.Contents?.map((object) => object.Key).filter(Boolean) as string[] | undefined + + if (keys?.length) { + await client.send( + new DeleteObjectsCommand({ + Bucket: bucketName, + Delete: { + Objects: keys.map((Key) => ({ Key })), + Quiet: true, + }, + }) + ) + } + + continuationToken = page.NextContinuationToken + } while (continuationToken) +} + +async function abortMultipartUploads(client: S3Client, bucketName: string) { + let keyMarker: string | undefined + let uploadIdMarker: string | undefined + + do { + const page = await client.send( + new ListMultipartUploadsCommand({ + Bucket: bucketName, + KeyMarker: keyMarker, + UploadIdMarker: uploadIdMarker, + }) + ) + + for (const upload of page.Uploads ?? []) { + if (upload.Key && upload.UploadId) { + await client + .send( + new AbortMultipartUploadCommand({ + Bucket: bucketName, + Key: upload.Key, + UploadId: upload.UploadId, + }) + ) + .catch(() => undefined) + } + } + + keyMarker = page.NextKeyMarker + uploadIdMarker = page.NextUploadIdMarker + } while (keyMarker || uploadIdMarker) +} diff --git a/acceptance/support/sigv4.ts b/acceptance/support/sigv4.ts new file mode 100644 index 000000000..7b09337ec --- /dev/null +++ b/acceptance/support/sigv4.ts @@ -0,0 +1,274 @@ +import { createHash, createHmac } from 'node:crypto' +import { type AcceptanceConfig, getAcceptanceConfig, joinUrl, requireConfigValue } from './config' + +const EMPTY_SHA256_HASH = 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' +const STREAMING_PAYLOAD_ALGORITHM = 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' +const STREAMING_TRAILER_PAYLOAD_ALGORITHM = 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER' + +export async function sendAwsChunkedPutObject(options: { + bucketName: string + key: string + payload: Buffer +}) { + const config = getAcceptanceConfig() + return sendAwsChunkedRequest({ + config, + path: `/${options.bucketName}/${options.key}`, + payload: options.payload, + }) +} + +export async function sendAwsChunkedUploadPart(options: { + bucketName: string + key: string + partNumber: number + payload: Buffer + uploadId: string +}) { + const config = getAcceptanceConfig() + return sendAwsChunkedRequest({ + config, + path: `/${options.bucketName}/${options.key}`, + payload: options.payload, + query: { + partNumber: String(options.partNumber), + uploadId: options.uploadId, + }, + }) +} + +export async function sendAwsChunkedTrailerModeWithoutTrailer(options: { + bucketName: string + key: string + payload: Buffer +}) { + const config = getAcceptanceConfig() + return sendAwsChunkedRequest({ + config, + contentSha: STREAMING_TRAILER_PAYLOAD_ALGORITHM, + headers: { + 'x-amz-trailer': 'x-amz-checksum-crc32', + }, + path: `/${options.bucketName}/${options.key}`, + payload: options.payload, + }) +} + +async function sendAwsChunkedRequest(options: { + config: AcceptanceConfig + contentSha?: string + headers?: Record + path: string + payload: Buffer + query?: Record +}) { + const requestUrl = buildS3RequestUrl(options.config, options.path, options.query) + const signedRequest = signS3Request({ + config: options.config, + contentSha: options.contentSha ?? STREAMING_PAYLOAD_ALGORITHM, + headers: { + 'content-encoding': 'aws-chunked', + 'x-amz-decoded-content-length': options.payload.length.toString(), + ...options.headers, + }, + method: 'PUT', + url: requestUrl, + }) + const chunk = createSignedChunk(options.payload, signedRequest.signature, signedRequest) + const endChunk = createSignedChunk(Buffer.alloc(0), chunk.signature, signedRequest) + const encodedBody = Buffer.concat([chunk.encoded, endChunk.encoded]) + + const response = await fetch(requestUrl, { + body: encodedBody, + headers: { + ...signedRequest.headers, + 'content-length': encodedBody.length.toString(), + }, + method: 'PUT', + }) + + return { + body: await response.text(), + headers: response.headers, + status: response.status, + } +} + +function signS3Request(options: { + config: AcceptanceConfig + contentSha: string + headers?: Record + method: string + url: URL +}) { + const longDate = formatAwsDate() + const shortDate = longDate.slice(0, 8) + const service = 's3' + const credentialScope = `${shortDate}/${options.config.region}/${service}/aws4_request` + const headers = normalizeHeaders({ + host: options.url.host, + 'x-amz-content-sha256': options.contentSha, + 'x-amz-date': longDate, + ...options.headers, + }) + const signedHeaders = Object.keys(headers).sort() + const canonicalHeaders = signedHeaders.map((key) => `${key}:${headers[key]}`).join('\n') + const canonicalRequest = [ + options.method.toUpperCase(), + canonicalPath(options.url.pathname), + canonicalQuery(options.url), + `${canonicalHeaders}\n`, + signedHeaders.join(';'), + options.contentSha, + ].join('\n') + const stringToSign = [ + 'AWS4-HMAC-SHA256', + longDate, + credentialScope, + sha256Hex(Buffer.from(canonicalRequest)), + ].join('\n') + const signingKey = deriveSigningKey( + requireConfigValue(options.config.s3SecretAccessKey, 'ACCEPTANCE_S3_SECRET_ACCESS_KEY'), + shortDate, + options.config.region, + service + ) + const signature = createHmac('sha256', signingKey).update(stringToSign).digest('hex') + const accessKeyId = requireConfigValue( + options.config.s3AccessKeyId, + 'ACCEPTANCE_S3_ACCESS_KEY_ID' + ) + + return { + contentSha: options.contentSha, + headers: { + ...headers, + authorization: + `AWS4-HMAC-SHA256 Credential=${accessKeyId}/${credentialScope}, ` + + `SignedHeaders=${signedHeaders.join(';')}, Signature=${signature}`, + }, + longDate, + region: options.config.region, + secretKey: requireConfigValue( + options.config.s3SecretAccessKey, + 'ACCEPTANCE_S3_SECRET_ACCESS_KEY' + ), + service, + shortDate, + signature, + } +} + +function createSignedChunk( + payload: Buffer, + previousSignature: string, + options: { + longDate: string + region: string + secretKey: string + service: string + shortDate: string + } +) { + const signingKey = deriveSigningKey( + options.secretKey, + options.shortDate, + options.region, + options.service + ) + const scope = `${options.shortDate}/${options.region}/${options.service}/aws4_request` + const stringToSign = [ + 'AWS4-HMAC-SHA256-PAYLOAD', + options.longDate, + scope, + previousSignature, + EMPTY_SHA256_HASH, + sha256Hex(payload), + ].join('\n') + const signature = createHmac('sha256', signingKey).update(stringToSign).digest('hex') + + return { + encoded: Buffer.concat([ + Buffer.from(`${payload.length.toString(16)};chunk-signature=${signature}\r\n`), + payload, + Buffer.from('\r\n'), + ]), + signature, + } +} + +function buildS3RequestUrl( + config: AcceptanceConfig, + path: string, + query?: Record +): URL { + const url = new URL(joinUrl(config.s3Endpoint, path)) + + for (const [key, value] of Object.entries(query ?? {})) { + url.searchParams.set(key, value) + } + + return url +} + +function deriveSigningKey(secretKey: string, shortDate: string, region: string, service: string) { + const dateKey = hmacSha256(`AWS4${secretKey}`, shortDate) + const regionKey = hmacSha256(dateKey, region) + const serviceKey = hmacSha256(regionKey, service) + return hmacSha256(serviceKey, 'aws4_request') +} + +function hmacSha256(key: string | Buffer, value: string) { + return createHmac('sha256', key).update(value).digest() +} + +function sha256Hex(value: Buffer) { + return createHash('sha256').update(value).digest('hex') +} + +function formatAwsDate(date = new Date()) { + return date.toISOString().replace(/[:-]|\.\d{3}/g, '') +} + +function normalizeHeaders(headers: Record) { + return Object.fromEntries( + Object.entries(headers).map(([key, value]) => [ + key.toLowerCase(), + value.trim().replace(/\s+/g, ' '), + ]) + ) +} + +function canonicalPath(pathname: string) { + return pathname + .split('/') + .map((segment) => encodeRfc3986(decodeURIComponentSafe(segment))) + .join('/') +} + +function canonicalQuery(url: URL) { + return Array.from(url.searchParams.entries()) + .sort(([leftKey, leftValue], [rightKey, rightValue]) => { + if (leftKey === rightKey) { + return leftValue.localeCompare(rightValue) + } + return leftKey.localeCompare(rightKey) + }) + .map(([key, value]) => `${encodeRfc3986(key)}=${encodeRfc3986(value)}`) + .join('&') +} + +function encodeRfc3986(value: string) { + return encodeURIComponent(value).replace( + /[!'()*]/g, + (char) => `%${char.charCodeAt(0).toString(16).toUpperCase()}` + ) +} + +function decodeURIComponentSafe(value: string) { + try { + return decodeURIComponent(value) + } catch { + return value + } +} diff --git a/acceptance/tsconfig.json b/acceptance/tsconfig.json new file mode 100644 index 000000000..68d179f68 --- /dev/null +++ b/acceptance/tsconfig.json @@ -0,0 +1,10 @@ +{ + "extends": "../tsconfig.json", + "compilerOptions": { + "module": "ESNext", + "noEmit": true, + "rootDir": "." + }, + "include": ["./**/*.ts"], + "exclude": ["../node_modules", "../dist"] +} diff --git a/docker-compose.yml b/docker-compose.yml index 9bded87b1..873714d72 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -47,6 +47,7 @@ services: S3_PROTOCOL_ACCESS_KEY_SECRET: 850181e4652dd023b7a98c58ae0d2d34bd487ee0cc3254aed6eda37307425907 # Iceberg Protocol ICEBERG_BUCKET_DETECTION_MODE: "FULL_PATH" + ICEBERG_WAREHOUSE: . ICEBERG_CATALOG_URL: http://rest-catalog:8181/v1 ICEBERG_CATALOG_AUTH_TYPE: token ICEBERG_CATALOG_AUTH_TOKEN: token diff --git a/package.json b/package.json index f0bdb9b23..230cab8f4 100644 --- a/package.json +++ b/package.json @@ -20,7 +20,10 @@ "test:integration": "vitest run --config vitest.integration.config.ts", "test:integration:watch": "vitest --config vitest.integration.config.ts", "test:integration:coverage": "vitest run --coverage --config vitest.integration.config.ts", - "test": "npm run infra:restart && npm run test:dummy-data && npm run test:unit && npm run test:integration", + "acceptance": "tsx acceptance/scripts/run-managed-local.ts", + "acceptance:run": "tsx acceptance/scripts/run.ts", + "acceptance:typecheck": "tsc -p acceptance/tsconfig.json --noEmit", + "test": "npm run test:unit && npm run infra:restart && npm run test:dummy-data && npm run test:integration", "test:oriole": "npm run infra:restart:oriole && npm run test:dummy-data && npm run test:integration", "test:coverage": "npm run infra:restart && npm run test:dummy-data && npm run test:integration:coverage", "test:coverage:ci": "npm run infra:restart:ci && npm run test:dummy-data && npm run test:integration:coverage",