From 15d3cac30d159cfe3feaff8cca01ffef83c83b4e Mon Sep 17 00:00:00 2001 From: jamals86 Date: Sat, 28 Mar 2026 23:59:42 +0300 Subject: [PATCH 01/12] Updated dart sdk version --- link/sdks/dart/pubspec.lock | 8 ++++---- link/sdks/dart/pubspec.yaml | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/link/sdks/dart/pubspec.lock b/link/sdks/dart/pubspec.lock index 68a50053b..989190c1f 100644 --- a/link/sdks/dart/pubspec.lock +++ b/link/sdks/dart/pubspec.lock @@ -77,10 +77,10 @@ packages: dependency: "direct dev" description: name: build_runner - sha256: "7981eb922842c77033026eb4341d5af651562008cdb116bdfa31fc46516b6462" + sha256: "521daf8d189deb79ba474e43a696b41c49fb3987818dbacf3308f1e03673a75e" url: "https://pub.dev" source: hosted - version: "2.12.2" + version: "2.13.1" built_collection: dependency: transitive description: @@ -311,10 +311,10 @@ packages: dependency: "direct main" description: name: logger - sha256: a7967e31b703831a893bbc3c3dd11db08126fe5f369b5c648a36f821979f5be3 + sha256: "25aee487596a6257655a1e091ec2ae66bc30e7af663592cc3a27e6591e05035c" url: "https://pub.dev" source: hosted - version: "2.6.2" + version: "2.7.0" logging: dependency: transitive description: diff --git a/link/sdks/dart/pubspec.yaml b/link/sdks/dart/pubspec.yaml index 004050a37..fdbd72240 100644 --- a/link/sdks/dart/pubspec.yaml +++ b/link/sdks/dart/pubspec.yaml @@ -1,6 +1,6 @@ name: kalam_link description: KalamDB client SDK for Dart and Flutter — queries, live subscriptions, and authentication powered by flutter_rust_bridge. -version: 0.4.1-beta.3 +version: 0.4.2-beta.1 homepage: https://github.com/jamals86/KalamDB repository: https://github.com/jamals86/KalamDB @@ -13,7 +13,7 @@ dependencies: sdk: flutter flutter_rust_bridge: ^2.11.1 freezed_annotation: ^3.1.0 - logger: ^2.6.2 + logger: ^2.7.0 flutter: plugin: @@ -27,5 +27,5 @@ dev_dependencies: flutter_test: sdk: flutter flutter_lints: ^6.0.0 - build_runner: ^2.12.2 + build_runner: ^2.13.1 freezed: ^3.2.5 From f93904171b82184225debb0bcdac56db85a12c08 Mon Sep 17 00:00:00 2001 From: jamals86 Date: Sun, 29 Mar 2026 00:28:43 +0300 Subject: [PATCH 02/12] Add PG native CI job; tweak tests & logs Add a new GitHub Actions job (pg_extension_tests_native) that runs native e2e tests for the PG extension against a live KalamDB + pgrx PostgreSQL. The job downloads the built server binary, installs system deps and Rust tools, initializes pgrx, starts the server, runs cargo nextest for the kalam-pg-extension e2e tests, stops services, collects logs and uploads artifacts. Wire the new job into the release workflow dependencies. Also: update a few workflow curl invocations to use -fsS and remove redundant KALAMDB_ROOT_PASSWORD exports in some steps. Change JobsManager logging for fetched jobs from info to debug. Update docker/build/test-docker-image.sh by adding a container_get helper and switching health/version requests to use busybox wget via docker exec for more reliable in-container checks. --- .github/workflows/release.yml | 180 +++++++++++++++++- .../src/jobs/jobs_manager/runner.rs | 2 +- docker/build/test-docker-image.sh | 11 +- 3 files changed, 183 insertions(+), 10 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d5744bc9a..f25c512dd 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1204,6 +1204,174 @@ jobs: echo "Smoke tests failed" >&2 exit 1 + # ═══════════════════════════════════════════════════════════════════════════ + # PG EXTENSION TESTS - Native e2e suite against live KalamDB + pgrx PostgreSQL + # ═══════════════════════════════════════════════════════════════════════════ + pg_extension_tests_native: + name: PG Extension Tests (native) + runs-on: ubuntu-latest + needs: + - build_linux_x86_64 + - read_version + + steps: + - name: Checkout + uses: actions/checkout@v6 + + - name: Download server binary + uses: actions/download-artifact@v4 + with: + name: dist-linux-x86_64 + path: dist/ + + - name: Install system dependencies + shell: bash + run: | + set -euo pipefail + sudo apt-get update + sudo apt-get install -y --no-install-recommends \ + clang \ + libclang-dev \ + llvm-dev \ + pkg-config \ + libssl-dev + + - name: Setup Rust + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ env.RUST_VERSION }} + + - name: Cache Rust + uses: Swatinem/rust-cache@v2 + with: + shared-key: pg-extension-tests-native + cache-on-failure: true + + - name: Install cargo-nextest and cargo-pgrx + shell: bash + run: | + set -euo pipefail + curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin + cargo install cargo-pgrx --version "${PGRX_VERSION}" --locked --force + cargo nextest --version + cargo pgrx --version + + - name: Initialize pgrx PostgreSQL + shell: bash + run: | + set -euo pipefail + cargo pgrx init "--pg${PG_EXTENSION_MAJOR}" download + + - name: Prepare server binary + shell: bash + run: | + set -euo pipefail + find dist -name "*.tar.gz" -type f -exec tar -xzf {} -C dist/ \; + SERVER_BIN=$(find dist -name "kalamdb-server-*-linux-x86_64" -type f ! -name "*.tar.gz" | head -1) + [[ -z "$SERVER_BIN" ]] && { echo "Server binary not found"; exit 1; } + chmod +x "$SERVER_BIN" + cp "$SERVER_BIN" ./kalamdb-server + ls -la ./kalamdb-server + + - name: Create server config + shell: bash + run: | + set -euo pipefail + cp backend/server.example.toml server.toml + sed -i 's|rocksdb_path = "./data/rocksdb"|rocksdb_path = "./test-data/rocksdb"|g' server.toml + sed -i 's|default_storage_path = "./data/storage"|default_storage_path = "./test-data/storage"|g' server.toml + sed -i 's|logs_path = "./logs"|logs_path = "./test-data/logs"|g' server.toml + sed -i 's|jwt_secret = ".*"|jwt_secret = "pg-native-test-secret-key-minimum-32-characters-long"|g' server.toml + + - name: Start server + shell: bash + env: + KALAMDB_SERVER_HOST: "0.0.0.0" + KALAMDB_ROOT_PASSWORD: "kalamdb123" + KALAMDB_JWT_SECRET: "pg-native-test-secret-key-minimum-32-characters-long" + KALAMDB_NODE_ID: "1" + KALAMDB_CLUSTER_RPC_ADDR: "0.0.0.0:9188" + KALAMDB_CLUSTER_API_ADDR: "http://127.0.0.1:8080" + run: | + set -euo pipefail + mkdir -p test-data/rocksdb test-data/storage test-data/logs + ./kalamdb-server > pg-server.log 2>&1 & + SERVER_PID=$! + echo "SERVER_PID=$SERVER_PID" >> "$GITHUB_ENV" + for i in {1..60}; do + if curl -sf http://127.0.0.1:8080/health >/dev/null 2>&1; then + echo "✅ Server is ready (${i}s)" + exit 0 + fi + if ! kill -0 "$SERVER_PID" 2>/dev/null; then + echo "❌ Server process died" + cat pg-server.log || true + exit 1 + fi + echo " Waiting... ($i/60)" + sleep 1 + done + echo "❌ Timed out waiting for server" + cat pg-server.log || true + exit 1 + + - name: Setup pgrx PostgreSQL and install extension + shell: bash + env: + PG_MAJOR: ${{ env.PG_EXTENSION_MAJOR }} + PG_EXTENSION_FLAVOR: ${{ env.PG_EXTENSION_FLAVOR }} + run: | + set -euo pipefail + ./pg/scripts/pgrx-test-setup.sh 2>&1 | tee pg-pgrx-setup-output.txt + + - name: Run native PG extension e2e tests + shell: bash + env: + KALAMDB_SERVER_URL: "http://127.0.0.1:8080" + KALAMDB_ROOT_PASSWORD: "kalamdb123" + run: | + set -euo pipefail + cargo nextest run \ + -p kalam-pg-extension \ + --features e2e \ + -E 'test(e2e)' \ + --test-threads 1 \ + 2>&1 | tee pg-native-test-output.txt + + - name: Stop pgrx PostgreSQL + if: always() + shell: bash + env: + PG_MAJOR: ${{ env.PG_EXTENSION_MAJOR }} + PG_EXTENSION_FLAVOR: ${{ env.PG_EXTENSION_FLAVOR }} + run: | + ./pg/scripts/pgrx-test-setup.sh --stop || true + + - name: Stop server + if: always() + shell: bash + run: | + [[ -n "${SERVER_PID:-}" ]] && kill "$SERVER_PID" 2>/dev/null || true + + - name: Collect PG native test logs + if: always() + shell: bash + run: | + set -euo pipefail + mkdir -p pg-native-test-results + cp pg-server.log pg-native-test-results/ 2>/dev/null || true + cp pg-pgrx-setup-output.txt pg-native-test-results/ 2>/dev/null || true + cp pg-native-test-output.txt pg-native-test-results/ 2>/dev/null || true + cp "$HOME/.pgrx/data-${PG_EXTENSION_MAJOR}/pgrx.log" pg-native-test-results/pgrx.log 2>/dev/null || true + + - name: Upload PG native test results + if: always() + uses: actions/upload-artifact@v6 + with: + name: pg-native-test-results + path: pg-native-test-results/* + if-no-files-found: ignore + release: name: Publish GitHub Release runs-on: ubuntu-latest @@ -1220,6 +1388,7 @@ jobs: - build_pg_extension_x86_64 - read_version - smoke_tests + - pg_extension_tests_native - sdk_tests_typescript - sdk_tests_dart - docker @@ -1613,6 +1782,7 @@ jobs: - build_pg_extension_x86_64 - docker - smoke_tests + - pg_extension_tests_native - read_version if: ${{ github.event_name == 'push' || github.event.inputs.docker_push == 'true' }} @@ -1877,7 +2047,6 @@ jobs: shell: bash run: | set -euo pipefail - export KALAMDB_ROOT_PASSWORD="kalamdb123" cp backend/server.example.toml server.toml sed -i 's|data_path = "./data"|data_path = "./test-data"|g' server.toml sed -i 's|logs_path = "./logs"|logs_path = "./test-data/logs"|g' server.toml @@ -1897,10 +2066,10 @@ jobs: shell: bash run: | set -euo pipefail - curl -sf http://localhost:8080/v1/api/auth/setup \ + curl -fsS http://localhost:8080/v1/api/auth/setup \ -H "Content-Type: application/json" \ -d '{"username":"admin","password":"kalamdb123","root_password":"kalamdb123"}' \ - >/dev/null || true + >/dev/null - name: Run TypeScript SDK tests (offline + serial e2e) id: run_typescript_tests @@ -2035,7 +2204,6 @@ jobs: shell: bash run: | set -euo pipefail - export KALAMDB_ROOT_PASSWORD="kalamdb123" cp backend/server.example.toml server.toml sed -i 's|data_path = "./data"|data_path = "./test-data"|g' server.toml sed -i 's|logs_path = "./logs"|logs_path = "./test-data/logs"|g' server.toml @@ -2055,10 +2223,10 @@ jobs: shell: bash run: | set -euo pipefail - curl -sf http://localhost:8080/v1/api/auth/setup \ + curl -fsS http://localhost:8080/v1/api/auth/setup \ -H "Content-Type: application/json" \ -d '{"username":"admin","password":"kalamdb123","root_password":"kalamdb123"}' \ - >/dev/null || true + >/dev/null - name: Run Dart SDK tests id: run_dart_tests diff --git a/backend/crates/kalamdb-core/src/jobs/jobs_manager/runner.rs b/backend/crates/kalamdb-core/src/jobs/jobs_manager/runner.rs index e71e1f392..70e8a6320 100644 --- a/backend/crates/kalamdb-core/src/jobs/jobs_manager/runner.rs +++ b/backend/crates/kalamdb-core/src/jobs/jobs_manager/runner.rs @@ -398,7 +398,7 @@ impl JobsManager { match job_result { Ok(Some((job, job_node))) => { - log::info!( + log::debug!( "[{}] Job fetched for execution: type={:?}, status={:?}, is_leader={}", job.job_id, job.job_type, diff --git a/docker/build/test-docker-image.sh b/docker/build/test-docker-image.sh index 2679e9710..dd015f6c9 100755 --- a/docker/build/test-docker-image.sh +++ b/docker/build/test-docker-image.sh @@ -33,6 +33,11 @@ log_warn() { echo -e "${YELLOW}[WARN]${NC} $1" } +container_get() { + local path="$1" + docker exec "$CONTAINER_NAME" /usr/local/bin/busybox wget -qO- "http://127.0.0.1:8080${path}" +} + cleanup() { log_info "Cleaning up test container..." docker stop "$CONTAINER_NAME" 2>/dev/null || true @@ -115,7 +120,7 @@ main() { exit 1 fi - if curl -sf "http://localhost:$TEST_PORT/health" &>/dev/null; then + if docker exec "$CONTAINER_NAME" /usr/local/bin/busybox wget --spider -q "http://127.0.0.1:8080/health" &>/dev/null; then log_info "Server is ready! (took ${ELAPSED}s)" break fi @@ -125,7 +130,7 @@ main() { # Test 1: Health check log_info "Test 1: Health check endpoint..." - HEALTH_RESPONSE=$(curl -sf "http://localhost:$TEST_PORT/health") + HEALTH_RESPONSE=$(container_get "/health") if [ $? -eq 0 ]; then log_info "✓ Health check passed: $HEALTH_RESPONSE" else @@ -135,7 +140,7 @@ main() { # Test 2: Version info (healthcheck) log_info "Test 2: Version info (healthcheck)..." - VERSION_RESPONSE=$(curl -sf "http://localhost:$TEST_PORT/v1/api/healthcheck" || echo "FAILED") + VERSION_RESPONSE=$(container_get "/v1/api/healthcheck" || echo "FAILED") if [ "$VERSION_RESPONSE" != "FAILED" ]; then log_info "✓ Version info passed: $VERSION_RESPONSE" else From 00a57b830aa65ced91cad0a9b983a92402f6b8c1 Mon Sep 17 00:00:00 2001 From: jamals86 Date: Sun, 29 Mar 2026 14:46:44 +0300 Subject: [PATCH 03/12] Use release script for TypeScript SDK tests Replace inline server start/bootstrap in the release workflow with a dedicated scripts/test-typescript-sdk-release.sh runner and pass KALAMDB_SERVER_BIN into the job. Add the test runner, example server.toml and captured test output, and ignore a local ts-sdk-repro/server.toml in .gitignore. Improve scripts: make pgrx-version resolution robust in pg/scripts/pgrx-test-setup.sh; strengthen scripts/cluster.sh PID handling, free leftover HTTP/RPC listeners, add get_access_token and use the e2e-tests feature flag for cargo tests, and have detect_leader_url authenticate with the server. These changes centralize TS SDK release testing, reduce CI flakiness, and harden local cluster/test tooling. --- .github/workflows/release.yml | 42 +- .gitignore | 1 + pg/scripts/pgrx-test-setup.sh | 21 +- scripts/cluster.sh | 72 +- scripts/test-typescript-sdk-release.sh | 80 ++ ts-sdk-release/server.toml | 654 ++++++++++++ ts-sdk-test-output.txt | 1350 ++++++++++++++++++++++++ 7 files changed, 2171 insertions(+), 49 deletions(-) create mode 100755 scripts/test-typescript-sdk-release.sh create mode 100644 ts-sdk-release/server.toml create mode 100644 ts-sdk-test-output.txt diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f25c512dd..8a2fefe28 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -2043,50 +2043,18 @@ jobs: [[ -z "$SERVER_BIN" ]] && { echo "Server binary not found"; exit 1; } chmod +x "$SERVER_BIN" && cp "$SERVER_BIN" ./kalamdb-server - - name: Start server - shell: bash - run: | - set -euo pipefail - cp backend/server.example.toml server.toml - sed -i 's|data_path = "./data"|data_path = "./test-data"|g' server.toml - sed -i 's|logs_path = "./logs"|logs_path = "./test-data/logs"|g' server.toml - sed -i 's|jwt_secret = ".*"|jwt_secret = "sdk-test-secret-key-minimum-32-characters-long"|g' server.toml - mkdir -p test-data/rocksdb test-data/storage test-data/logs - ./kalamdb-server > ts-sdk-server.log 2>&1 & - SERVER_PID=$! - echo "SERVER_PID=$SERVER_PID" >> "$GITHUB_ENV" - for i in {1..60}; do - curl -sf http://localhost:8080/health > /dev/null 2>&1 && echo "✅ Server ready (${i}s)" && exit 0 - kill -0 "$SERVER_PID" 2>/dev/null || { echo "❌ Server died"; cat ts-sdk-server.log; exit 1; } - echo " Waiting... ($i/60)"; sleep 1 - done - echo "❌ Timed out"; cat ts-sdk-server.log; exit 1 - - - name: Bootstrap SDK test admin user - shell: bash - run: | - set -euo pipefail - curl -fsS http://localhost:8080/v1/api/auth/setup \ - -H "Content-Type: application/json" \ - -d '{"username":"admin","password":"kalamdb123","root_password":"kalamdb123"}' \ - >/dev/null - - name: Run TypeScript SDK tests (offline + serial e2e) id: run_typescript_tests continue-on-error: true shell: bash - working-directory: link/sdks/typescript env: KALAMDB_URL: "http://localhost:8080" KALAMDB_USER: "admin" KALAMDB_PASSWORD: "kalamdb123" + KALAMDB_SERVER_BIN: "${{ github.workspace }}/kalamdb-server" run: | set -euo pipefail - npm install --no-audit --no-fund - # test.sh builds the SDK, runs the offline Node suite, then runs the - # live e2e files serially to avoid auth rate-limit flakes in CI. - chmod +x ./test.sh - ./test.sh 2>&1 | tee ../../../ts-sdk-test-output.txt + ./scripts/test-typescript-sdk-release.sh - name: Parse TypeScript SDK test counts if: always() @@ -2127,12 +2095,6 @@ jobs: handle.write(f"color={color}\n") PYTHON - - name: Stop server - if: always() - shell: bash - run: | - [[ -n "${SERVER_PID:-}" ]] && kill "$SERVER_PID" 2>/dev/null || true - - name: Upload server log if: always() uses: actions/upload-artifact@v6 diff --git a/.gitignore b/.gitignore index a06722e53..296149f4c 100644 --- a/.gitignore +++ b/.gitignore @@ -106,3 +106,4 @@ ui/package-lock.json /vendor /link/sdks/typescript/.wasm-cargo-home-test1 /link/sdks/typescript/.wasm-cargo-home-test2 +ts-sdk-repro/server.toml diff --git a/pg/scripts/pgrx-test-setup.sh b/pg/scripts/pgrx-test-setup.sh index 6a6759ba9..514b676b2 100755 --- a/pg/scripts/pgrx-test-setup.sh +++ b/pg/scripts/pgrx-test-setup.sh @@ -26,7 +26,26 @@ PG_CRATE="$SCRIPT_DIR/.." PGRX_HOME="$HOME/.pgrx" PG_MAJOR="${PG_MAJOR:-16}" PG_EXTENSION_FLAVOR="${PG_EXTENSION_FLAVOR:-pg${PG_MAJOR}}" -PGRX_VERSION_DIR="${PGRX_VERSION_DIR:-$(find "$PGRX_HOME" -maxdepth 1 -type d -name "${PG_MAJOR}.*" 2>/dev/null | sort -V | tail -n 1)}" + +resolve_pgrx_version_dir() { + local candidate + + if [[ -n "${PGRX_VERSION_DIR:-}" ]]; then + printf '%s\n' "$PGRX_VERSION_DIR" + return 0 + fi + + while IFS= read -r candidate; do + [[ -z "$candidate" ]] && continue + [[ "$candidate" == *_unpack ]] && continue + if [[ -x "$candidate/pgrx-install/bin/pg_config" ]]; then + printf '%s\n' "$candidate" + return 0 + fi + done < <(find "$PGRX_HOME" -maxdepth 1 -type d -name "${PG_MAJOR}.*" 2>/dev/null | sort -V -r) +} + +PGRX_VERSION_DIR="$(resolve_pgrx_version_dir)" PGRX_INSTALL_BIN_DIR="$PGRX_VERSION_DIR/pgrx-install/bin" PG_CONFIG="$PGRX_INSTALL_BIN_DIR/pg_config" PSQL="$PGRX_INSTALL_BIN_DIR/psql" diff --git a/scripts/cluster.sh b/scripts/cluster.sh index e37d28f94..53b56b026 100755 --- a/scripts/cluster.sh +++ b/scripts/cluster.sh @@ -437,9 +437,12 @@ start_node() { exit 1 fi - (cd "$data_dir" && KALAMDB_ROOT_PASSWORD="$ROOT_PASSWORD" nohup "$binary" > "$log_file" 2>&1) & - local pid=$! - echo $pid > "$pid_file" + ( + cd "$data_dir" + KALAMDB_ROOT_PASSWORD="$ROOT_PASSWORD" nohup "$binary" > "$log_file" 2>&1 & + echo $! > "$pid_file" + ) + local pid=$(cat "$pid_file") # Wait for server to start sleep 2 @@ -456,6 +459,9 @@ stop_node() { local node_id=$1 local data_dir="$CLUSTER_DATA_DIR/node$node_id" local pid_file="$data_dir/server.pid" + local http_port=$((8080 + node_id)) + local rpc_port=$((9080 + node_id)) + local listener_pids="" if [ -f "$pid_file" ]; then local pid=$(cat "$pid_file") @@ -473,6 +479,24 @@ stop_node() { else echo -e "${YELLOW} Node $node_id not running${NC}" fi + + listener_pids=$( ( + lsof -tiTCP:"$http_port" -sTCP:LISTEN 2>/dev/null || true + lsof -tiTCP:"$rpc_port" -sTCP:LISTEN 2>/dev/null || true + ) | sort -u ) + + if [ -n "$listener_pids" ]; then + echo -e "${YELLOW} Cleaning up leftover listener processes for node $node_id...${NC}" + while IFS= read -r listener_pid; do + [ -z "$listener_pid" ] && continue + kill "$listener_pid" 2>/dev/null || true + sleep 1 + if kill -0 "$listener_pid" 2>/dev/null; then + kill -9 "$listener_pid" 2>/dev/null || true + fi + done <<< "$listener_pids" + echo -e "${GREEN} ✓ Freed ports $http_port and $rpc_port${NC}" + fi } check_node_health() { @@ -650,6 +674,35 @@ ensure_cluster_healthy() { fi } +get_access_token() { + local base_url="${1:-http://127.0.0.1:$NODE1_HTTP}" + local response + + response=$(curl -fsS -X POST "$base_url/v1/api/auth/login" \ + -H "Content-Type: application/json" \ + -d "{\"username\":\"root\",\"password\":\"$ROOT_PASSWORD\"}") || return 1 + + python3 - "$response" << 'PY' +import json +import sys + +if len(sys.argv) < 2: + sys.exit(1) + +try: + data = json.loads(sys.argv[1]) +except json.JSONDecodeError: + sys.exit(1) + +token = data.get("access_token") +if token: + print(token) + sys.exit(0) + +sys.exit(1) +PY +} + run_cluster_tests() { print_header ensure_cluster_healthy @@ -683,7 +736,7 @@ run_smoke_tests() { KALAMDB_SERVER_URL="$leader_url" \ KALAMDB_ROOT_PASSWORD="$ROOT_PASSWORD" \ RUST_TEST_THREADS=1 \ - cargo test --test smoke -- --nocapture + cargo test --features e2e-tests --test smoke -- --nocapture } run_smoke_tests_all_nodes() { @@ -708,7 +761,7 @@ run_smoke_tests_all_nodes() { if KALAMDB_SERVER_URL="$node_url" \ KALAMDB_ROOT_PASSWORD="$ROOT_PASSWORD" \ RUST_TEST_THREADS=1 \ - cargo test --test smoke smoke_test_core_operations -- --nocapture; then + cargo test --features e2e-tests --test smoke smoke_test_core_operations -- --nocapture; then echo -e "${GREEN}✓ Core operations passed on $node_url${NC}" ((passed++)) else @@ -817,14 +870,17 @@ run_full_test_suite() { detect_leader_url() { local query="SELECT api_addr, is_leader FROM system.cluster" local response + local access_token + + access_token=$(get_access_token) || return 1 response=$( - curl -s \ - -u "root:$ROOT_PASSWORD" \ + curl -fsS \ -H "Content-Type: application/json" \ + -H "Authorization: Bearer $access_token" \ -d "{\"sql\":\"$query\"}" \ "http://127.0.0.1:$NODE1_HTTP/v1/api/sql" - ) + ) || return 1 if [ -z "$response" ]; then return 1 diff --git a/scripts/test-typescript-sdk-release.sh b/scripts/test-typescript-sdk-release.sh new file mode 100755 index 000000000..ca300c6dd --- /dev/null +++ b/scripts/test-typescript-sdk-release.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ROOT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +WORK_DIR="${TS_SDK_RELEASE_TMP_DIR:-$ROOT_DIR/ts-sdk-release}" +SERVER_URL="${KALAMDB_URL:-http://localhost:8080}" +SERVER_USER="${KALAMDB_USER:-admin}" +SERVER_PASSWORD="${KALAMDB_PASSWORD:-kalamdb123}" +ROOT_PASSWORD="${KALAMDB_ROOT_PASSWORD:-kalamdb123}" +JWT_SECRET="sdk-test-secret-key-minimum-32-characters-long" +SERVER_LOG="${TS_SDK_SERVER_LOG:-$ROOT_DIR/ts-sdk-server.log}" +TEST_OUTPUT="${TS_SDK_TEST_OUTPUT:-$ROOT_DIR/ts-sdk-test-output.txt}" +SERVER_BIN="${KALAMDB_SERVER_BIN:-}" +SERVER_PID="" + +cleanup() { + if [[ -n "$SERVER_PID" ]]; then + kill "$SERVER_PID" 2>/dev/null || true + wait "$SERVER_PID" 2>/dev/null || true + fi +} + +trap cleanup EXIT + +rm -rf "$WORK_DIR" +mkdir -p "$WORK_DIR/data" "$WORK_DIR/logs" +cp "$ROOT_DIR/backend/server.example.toml" "$WORK_DIR/server.toml" + +perl -0pi -e 's|data_path = "\./data"|data_path = "'"$WORK_DIR"'/data"|g; s|logs_path = "\./logs"|logs_path = "'"$WORK_DIR"'/logs"|g; s|jwt_secret = ".*"|jwt_secret = "'"$JWT_SECRET"'"|g' "$WORK_DIR/server.toml" + +if [[ -n "$SERVER_BIN" ]]; then + SERVER_CMD=("$SERVER_BIN" "$WORK_DIR/server.toml") +else + SERVER_CMD=(cargo run --manifest-path "$ROOT_DIR/backend/Cargo.toml" --bin kalamdb-server -- "$WORK_DIR/server.toml") +fi + +: > "$SERVER_LOG" + +( + cd "$ROOT_DIR" + KALAMDB_SERVER_HOST=0.0.0.0 \ + KALAMDB_JWT_SECRET="$JWT_SECRET" \ + "${SERVER_CMD[@]}" > "$SERVER_LOG" 2>&1 +) & +SERVER_PID=$! + +for i in {1..60}; do + if curl -sf "$SERVER_URL/health" > /dev/null 2>&1; then + echo "✅ TypeScript SDK test server ready (${i}s)" + break + fi + if ! kill -0 "$SERVER_PID" 2>/dev/null; then + echo "❌ TypeScript SDK test server died" + cat "$SERVER_LOG" || true + exit 1 + fi + echo " Waiting for TypeScript SDK test server... ($i/60)" + sleep 1 +done + +if ! curl -sf "$SERVER_URL/health" > /dev/null 2>&1; then + echo "❌ Timed out waiting for TypeScript SDK test server" + cat "$SERVER_LOG" || true + exit 1 +fi + +curl -fsS "$SERVER_URL/v1/api/auth/setup" \ + -H "Content-Type: application/json" \ + -d "{\"username\":\"$SERVER_USER\",\"password\":\"$SERVER_PASSWORD\",\"root_password\":\"$ROOT_PASSWORD\"}" \ + >/dev/null + +( + cd "$ROOT_DIR/link/sdks/typescript" + chmod +x ./test.sh + KALAMDB_URL="$SERVER_URL" \ + KALAMDB_USER="$SERVER_USER" \ + KALAMDB_PASSWORD="$SERVER_PASSWORD" \ + ./test.sh +) 2>&1 | tee "$TEST_OUTPUT" \ No newline at end of file diff --git a/ts-sdk-release/server.toml b/ts-sdk-release/server.toml new file mode 100644 index 000000000..cf71ee058 --- /dev/null +++ b/ts-sdk-release/server.toml @@ -0,0 +1,654 @@ +# KalamDB Server Configuration +# This is an example configuration file with all available settings. +# Copy this file to config.toml and adjust values for your environment. +# +# NOTE: Runtime configuration only! +# - Namespace and storage location configuration is stored in system tables (via kalamdb-sql) +# - This file contains only server runtime settings (ports, paths, limits, etc.) + +[server] +# Server bind address (default: 127.0.0.1) +host = "0.0.0.0" + +# Server port (default: 8080) +port = 8080 + +# Number of worker threads (0 = number of CPU cores) +workers = 0 + +# Enable HTTP/2 protocol support (default: true) +# When true, server uses automatic HTTP/1.1 and HTTP/2 cleartext (h2c) negotiation +# When false, server only supports HTTP/1.1 +# HTTP/2 offers: +# - Multiplexed requests (multiple requests on single connection) +# - Header compression (HPACK) +# - Binary protocol (more efficient parsing) +# - Server push support (for future features) +enable_http2 = true + +# API version (default: "v1") +# Controls the versioned endpoint prefix (e.g., /v1/api/sql) +api_version = "v1" + +[storage] +# Base data directory for all KalamDB storage +# Subdirectories are automatically created: +# ./data/rocksdb - RocksDB hot storage (write buffer) +# ./data/storage - Parquet cold storage (flushed segments) +# ./data/snapshots - Raft snapshots (consensus state) +data_path = "/Users/jamal/git/KalamDB/ts-sdk-release/data" + +# Templates for table storage paths (used by 'local' storage) +# Available placeholders: {namespace}, {tableName}, {userId} +# Final paths: {default_storage_path}/{template} +# Examples: +# Shared table: ./data/storage/myapp/products +# User table: ./data/storage/myapp/preferences/user123 +shared_tables_template = "{namespace}/{tableName}" +user_tables_template = "{namespace}/{tableName}/{userId}" + +# Remote storage timeout settings (S3, GCS, Azure) +# These timeouts apply programmatically to all remote storage operations +[storage.remote_timeouts] +# Request timeout in seconds for all remote storage operations (default: 60s) +request_timeout_secs = 60 +# Connect timeout in seconds for establishing connections (default: 10s) +connect_timeout_secs = 10 + + +[storage.rocksdb] +# Block cache size for reads in bytes (default: 2MB) +# IMPORTANT: This cache is SHARED across ALL column families +# Adding more column families does NOT increase memory proportionally +block_cache_size = 2097152 + +# Maximum number of background compaction/flush jobs (default: 4) +max_background_jobs = 4 + +[storage.rocksdb.cf_profiles.system_meta] +# Low-write system metadata tables and compatibility partitions. +write_buffer_size = 32768 +max_write_buffers = 2 + +[storage.rocksdb.cf_profiles.system_index] +# Secondary indexes for system tables. +write_buffer_size = 32768 +max_write_buffers = 2 + +[storage.rocksdb.cf_profiles.hot_data] +# User/shared/stream data CFs and topic message payloads. +# These stay warmer because they sit on the main read/write path. +write_buffer_size = 131072 +max_write_buffers = 2 + +[storage.rocksdb.cf_profiles.hot_index] +# PK indexes and vector PK indexes. These are latency-sensitive, but smaller than data CFs. +write_buffer_size = 65536 +max_write_buffers = 2 + +[storage.rocksdb.cf_profiles.raft] +# The single raft_data CF is append-heavy and benefits from a larger memtable. +write_buffer_size = 262144 +max_write_buffers = 2 + +[datafusion] +# Memory limit for query execution in bytes (default: 64MB) +# KalamDB is optimised for mobile/OLTP workloads, not heavy analytics. +# 64 MB is sufficient for key-based lookups and small aggregates. +# Queries that exceed this limit are terminated immediately. +memory_limit = 67108864 + +# Number of parallel threads for query execution (default: 2) +# Low fixed value: mobile app queries are short-lived key lookups; +# high parallelism just increases context-switch overhead. +# Set to 0 to auto-detect CPU count (not recommended for shared servers). +query_parallelism = 2 + +# Maximum number of partitions per query (default: 4) +# Caps concurrent partition scans; reduces memory and CPU burst per query. +max_partitions = 4 + +# Batch size for record processing (default: 1024 rows) +# Smaller batches limit peak arrow allocation per operator. +batch_size = 1024 + +[flush] +# Default row limit for flush policies (default: 10000 rows) +# Tables without explicit flush policy will use this value +default_row_limit = 10000 + +# Default time interval for flush in seconds (default: 300s = 5 minutes) +# Tables will flush to Parquet after this duration +default_time_interval = 300 + +# How often the background scheduler checks for tables with pending writes +# and creates flush jobs (default: 60 seconds). Set to 0 to disable. +check_interval_seconds = 60 + +[retention] +# Default retention hours for soft-deleted rows (default: 168 hours = 7 days) +# Rows with _deleted=true will be kept in Parquet files for this duration +default_deleted_retention_hours = 168 + +# Enable periodic dba.stats collection (default: true) +# Set to false to disable the background stats recorder, saving memory and CPU +# in resource-constrained environments (e.g. Docker containers). +enable_dba_stats = false + +# Number of days to preserve samples in dba.stats (default: 7 days) +# Set to 0 to disable automatic cleanup of historical DBA metrics +dba_stats_retention_days = 7 + +[stream] +# Default TTL for stream table rows in seconds (default: 10 seconds) +# Stream tables are ephemeral - rows expire after this duration +default_ttl_seconds = 10 + +# Default maximum buffer size for stream tables (default: 10000 rows) +# Oldest rows are evicted when buffer exceeds this limit +default_max_buffer = 10000 + +# Stream eviction interval in seconds (default: 60 seconds = 1 minute) +# How often the background task checks and evicts expired events +eviction_interval_seconds = 60 + +[manifest_cache] +# Eviction job interval in seconds (default: 300s = 5 minutes) +# How often the eviction job runs to clean up stale manifest entries +eviction_interval_seconds = 300 + +# Maximum number of manifest entries in hot cache (default: 500) +# Older entries are evicted when this limit is reached (LRU policy) +max_entries = 500 + +# TTL in days for manifest eviction (default: 7 days) +# Manifests not accessed for this many days will be removed from both +# hot cache (RAM) and RocksDB persistent cache +# Set to 0 to disable manifest eviction (not recommended for long-running servers) +eviction_ttl_days = 7 + +# Weight factor for user table manifests (default: 10) +# User tables are evicted N times faster than shared tables. +user_table_weight_factor = 10 + +[limits] +# Maximum message size for REST API requests in bytes (default: 1MB) +max_message_size = 1048576 + +# Maximum rows that can be returned in a single query (default: 1000) +max_query_limit = 1000 + +# Default LIMIT for queries without explicit LIMIT clause (default: 50) +default_query_limit = 50 + +[logging] +# Log level: error, warn, info, debug, trace (default: info) +level = "info" + +# Directory for all log files (default: "./logs") +# Server will create server.log (or server.jsonl for JSON format), slow.log, etc. +logs_path = "/Users/jamal/git/KalamDB/ts-sdk-release/logs" + +# Also log to console/stdout (default: true) +log_to_console = true + +# Log format: compact, json (default: compact) +# - compact: Human-readable text format -> server.log +# Format: [timestamp] [LEVEL] [thread - target:line] - message +# - json: JSON Lines format -> server.jsonl (queryable via system.server_logs) +# Each line is a valid JSON object with timestamp, level, thread, target, line, message +format = "compact" + +# Slow query logging threshold in milliseconds (default: 1000ms = 1 second) +# Queries taking longer than this threshold will be logged to slow.log +# AND displayed as WARN in the console +# Set to a high value (e.g., 999999) to disable slow query logging +slow_query_threshold_ms = 1000 + +[logging.otlp] +# Export tracing spans to an OTLP collector (Jaeger all-in-one supports this) +enabled = false +# gRPC endpoint for Jaeger OTLP receiver (port 4317) +endpoint = "http://127.0.0.1:4317" +# Protocol: "grpc" or "http" +protocol = "grpc" +# Service name shown in Jaeger UI +service_name = "kalamdb-server" +# Export timeout in milliseconds +timeout_ms = 3000 + +[performance] +# Request timeout in seconds (default: 30s) +# Requests exceeding this duration will be terminated +request_timeout = 30 + +# Keep-alive timeout in seconds (default: 75s) +# HTTP keep-alive allows connection reuse, reducing TCP handshake overhead +keepalive_timeout = 75 + +# Maximum concurrent connections per worker (default: 25000) +# Includes both REST API and WebSocket connections +# For testing environments with high concurrency, consider 50000 +max_connections = 25000 + +# TCP listen backlog - pending connections queue size (default: 4096) +# Controls how many connections can wait in the kernel queue before being accepted +# Increase for burst traffic or high-concurrency scenarios +# Recommended values: +# - Development/Testing: 4096-8192 (handles burst test loads) +# - Production: 4096-8192 (handles traffic spikes) +# - High traffic: 8192+ (enterprise scale) +# Industry standards: Nginx (511), Apache (511), Caddy (1024), Actix (2048) +backlog = 4096 + +# Max blocking threads per worker for CPU-intensive operations (default: 32) +# Used for RocksDB I/O and synchronous operations +# Increase for high-concurrency workloads or test environments +worker_max_blocking_threads = 32 + +# Number of tokio runtime worker threads (default: 0 = auto, num_cpus capped at 4) +# Lower values reduce idle RSS from thread stacks (~2MB per thread). +# Set to 0 for auto-detection, or an explicit count for Docker/constrained environments. +# Can also be overridden via KALAMDB_TOKIO_WORKER_THREADS env var. +tokio_worker_threads = 0 + +# Client request timeout in seconds (default: 5) +# Time allowed for client to send complete request headers +client_request_timeout = 5 + +# Client disconnect timeout in seconds (default: 2) +# Time allowed for graceful connection shutdown +client_disconnect_timeout = 2 + +# Maximum HTTP header size in bytes (default: 16384 = 16KB) +# Increase if you have large JWT tokens or custom headers +max_header_size = 16384 + +[rate_limit] +# Maximum SQL queries per second per user (default: 100) +# Prevents query flooding from a single user +# For testing/development environments with high load, increase to 10000-100000 +max_queries_per_sec = 100 + +# Maximum WebSocket messages per second per connection (default: 50) +# Prevents message flooding on WebSocket connections +# For testing/development environments with high load, increase to 500-1000 +max_messages_per_sec = 50 + +# Maximum concurrent live query subscriptions per user (default: 10) +# Limits total active subscriptions to prevent resource exhaustion +# For testing/development environments, increase to 100-1000 +max_subscriptions_per_user = 10 + +# Maximum authentication requests per IP per second (default: 20) +# Prevents brute force attacks and login flooding +# Applies to /auth/login, /auth/refresh, /setup endpoints +max_auth_requests_per_ip_per_sec = 20 + +# Maximum concurrent connections per IP address (default: 100) +# Prevents a single IP from exhausting all server connections +max_connections_per_ip = 100 + +# Maximum requests per second per IP BEFORE authentication (default: 200) +# ⚠️ CRITICAL: This is the main rate limit that triggers IP BANS +# Applied before auth to protect against unauthenticated floods +# If exceeded repeatedly, IP will be banned for ban_duration_seconds +# For testing/development with high request rates, set to 100000+ +max_requests_per_ip_per_sec = 200 + +# Maximum request body size in bytes (default: 10MB) +# Prevents memory exhaustion from huge request payloads +request_body_limit_bytes = 10485760 + +# Duration in seconds to ban abusive IPs (default: 300 = 5 minutes) +# IPs that violate max_requests_per_ip_per_sec 10+ times are banned +ban_duration_seconds = 300 + +# Enable connection protection middleware (default: true) +# Set to false to completely disable rate limiting (NOT recommended for production) +enable_connection_protection = true + +# Maximum cached entries for rate limiting state (default: 1,000) +# MEMORY OPTIMIZATION: Reduced from 100k. Moka internal bookkeeping scales +# with max_capacity. 1k handles typical mobile-app deployments. +cache_max_entries = 1000 + +# Time-to-idle for cached entries in seconds (default: 600 = 10 minutes) +cache_ttl_seconds = 600 + +# ============================================================================ +# Security Settings +# ============================================================================ +# CORS, WebSocket, and request limit configuration + +[security] +# Maximum request body size in bytes (default: 10MB) +# Prevents memory exhaustion from large payloads +max_request_body_size = 10485760 + +# Maximum WebSocket message size in bytes (default: 1MB) +# Prevents memory exhaustion from large WebSocket messages +max_ws_message_size = 1048576 + +# Allowed WebSocket origins (if different from CORS origins) +# Leave empty to use CORS allowed_origins for WebSocket validation +allowed_ws_origins = [] + +# Strict WebSocket origin checking (default: false) +# If true, rejects WebSocket connections without Origin header +strict_ws_origin_check = false + +# Trusted reverse proxy source IPs or CIDR ranges for forwarded client IP headers. +# Only peers in this list may supply X-Forwarded-For / X-Real-IP. +# Examples: ["10.0.1.9", "10.0.0.0/8", "192.168.0.0/24"] +trusted_proxy_ranges = [] + +# CORS Configuration (uses actix-cors) +# See: https://docs.rs/actix-cors +[security.cors] +# Allowed origins for CORS requests +# Use ["*"] or empty [] for any origin (development mode) +# For production, specify exact origins: ["https://app.example.com", "https://admin.example.com"] +allowed_origins = [] + +# Allowed HTTP methods (default: common REST methods) +allowed_methods = ["GET", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"] + +# Allowed HTTP headers +# Use ["*"] to allow any header +allowed_headers = ["Authorization", "Content-Type", "Accept", "Origin", "X-Requested-With"] + +# Headers to expose to the browser (default: none) +# Example: ["X-Custom-Header", "X-Request-Id"] +expose_headers = [] + +# Allow credentials (cookies, authorization headers) (default: true) +# Note: If true, allowed_origins cannot be ["*"] in browsers +allow_credentials = true + +# Preflight request cache max age in seconds (default: 3600 = 1 hour) +max_age = 3600 + +# Allow private network requests (default: false) +# Enables Access-Control-Request-Private-Network header support +allow_private_network = false + +[websocket] +# Client heartbeat timeout in seconds (default: 10) +# How long to wait for client pong/activity before disconnecting. +# Increase for high connection counts (>10K) to avoid false timeouts +# caused by scheduling contention and TCP buffer pressure. +client_timeout_secs = 10 + +# Authentication timeout in seconds (default: 3) +# How long to wait for auth message after WebSocket connect +auth_timeout_secs = 3 + +# Heartbeat check interval in seconds (default: 5) +# How often the background heartbeat task iterates all connections. +# Pings are staggered across 4 groups, so each connection is pinged +# once every heartbeat_interval × 4 seconds. +heartbeat_interval_secs = 5 + +[authentication] +# Bcrypt cost factor for password hashing (default: 12, range: 10-14) +# Higher values = more secure but slower +# Changing this only affects NEW passwords +bcrypt_cost = 12 + +# Minimum password length (default: 8) +min_password_length = 8 + +# Maximum password length (default: 72, bcrypt limit) +# Note: Passwords longer than 72 bytes are truncated by bcrypt +max_password_length = 72 + +# Disable common password checking (default: false) +# If true, allows passwords like "password", "123456", etc. +# WARNING: Only disable for testing/development environments! +disable_common_password_check = false + +# JWT configuration (for JWT Bearer token authentication) +# Secret key for JWT signature validation (minimum 32 characters recommended) +# IMPORTANT: Change this in production! Use a strong, random secret. +jwt_secret = "sdk-test-secret-key-minimum-32-characters-long" + +# Allow initial server setup from non-localhost clients (default: false) +# Useful for Docker or remote hosts in trusted networks. +# WARNING: Only enable in trusted environments. +allow_remote_setup = false + +# Comma-separated list of trusted JWT issuers (leave empty to accept any issuer) +# Add your OAuth provider domains here +# Example for Google OAuth: "https://accounts.google.com" +# Example for GitHub OAuth: "https://github.com" +# Example for Firebase Auth: "https://securetoken.google.com/YOUR_PROJECT_ID" +# Multiple issuers (comma-separated): "https://accounts.google.com,https://securetoken.google.com/my-app" +jwt_trusted_issuers = "" + +# Auto-create local OAuth users from trusted provider subject/issuer when not found (default: false) +auto_create_users_from_provider = false + +# ============================================================================ +# OAuth / OIDC Provider Configuration +# ============================================================================ +# Each provider section adds its issuer to the trusted-issuers list and +# registers the audience (client_id) for token validation. +# You still need to add the provider issuer to [authentication].jwt_trusted_issuers above. + +# [oauth] +# # Enable OAuth / OIDC authentication globally (default: false) +# enabled = true +# # Auto-provision a KalamDB user on first successful login (default: false) +# auto_provision = true +# # Default role assigned to auto-provisioned users (default: "user") +# default_role = "user" + +# ── Google / Google Workspace ────────────────────────────────────────────── +# [oauth.providers.google] +# enabled = true +# issuer = "https://accounts.google.com" +# jwks_uri = "https://www.googleapis.com/oauth2/v3/certs" +# client_id = "your-google-client-id.apps.googleusercontent.com" + +# ── GitHub OAuth ─────────────────────────────────────────────────────────── +# [oauth.providers.github] +# enabled = true +# issuer = "https://token.actions.githubusercontent.com" +# jwks_uri = "https://token.actions.githubusercontent.com/.well-known/jwks" +# client_id = "your-github-oauth-app-client-id" + +# ── Microsoft Azure AD / Entra ID ───────────────────────────────────────── +# [oauth.providers.azure] +# enabled = true +# issuer = "https://login.microsoftonline.com/YOUR_TENANT_ID/v2.0" +# jwks_uri = "https://login.microsoftonline.com/YOUR_TENANT_ID/discovery/v2.0/keys" +# client_id = "your-azure-client-id" +# tenant = "your-azure-tenant-id" + +# ── Firebase Authentication ──────────────────────────────────────────────── +# Firebase issues RS256-signed ID tokens with issuer: +# https://securetoken.google.com/{PROJECT_ID} +# JWKS endpoint (static, no discovery needed): +# https://www.googleapis.com/service_accounts/v1/jwk/securetoken@system.gserviceaccount.com +# +# Steps: +# 1. Set enabled = true and fill in your Firebase project ID. +# 2. Add the issuer to [authentication].jwt_trusted_issuers. +# 3. Optionally enable auto_provision so first-time Firebase users get a +# KalamDB account created automatically. +# 4. On the client, obtain a Firebase ID token and pass it as: +# Authorization: Bearer +# +# [oauth.providers.firebase] +# enabled = true +# issuer = "https://securetoken.google.com/YOUR_PROJECT_ID" +# jwks_uri = "https://www.googleapis.com/service_accounts/v1/jwk/securetoken@system.gserviceaccount.com" +# # client_id must match the Firebase project ID (aud claim in the token) +# client_id = "YOUR_PROJECT_ID" + +[shutdown] +# Timeout settings for graceful shutdown + +[shutdown.flush] +# Timeout in seconds to wait for flush jobs to complete during graceful shutdown (default: 300) +timeout = 300 + +# Maximum number of concurrent jobs (default: 10) +# Controls how many jobs can execute simultaneously +max_concurrent = 10 + +# Maximum number of retry attempts per job (default: 3) +# Jobs will be retried this many times before being marked as permanently failed +max_retries = 3 + +# Initial retry backoff delay in milliseconds (default: 100ms) +# Delay increases exponentially with each retry (100ms, 200ms, 400ms, etc.) +retry_backoff_ms = 100 + +# Phase 11, T026: SQL Handler Execution Configuration +[execution] +# Handler execution timeout in seconds (default: 30) +# Maximum time allowed for a single SQL statement to execute +# Prevents hung requests from blocking resources +handler_timeout_seconds = 30 + +# Maximum number of parameters per statement (default: 50) +# Prevents memory exhaustion from excessive parameter arrays +max_parameters = 50 + +# Maximum size per parameter in bytes (default: 524288 = 512KB) +# Prevents memory exhaustion from individual large parameters +max_parameter_size_bytes = 524288 + +# Maximum number of cached SQL logical plans (default: 200) +# Bound memory used by SQL plan cache +sql_plan_cache_max_entries = 200 + +# Time-to-idle TTL for SQL cached plans in seconds (default: 900 = 15 minutes) +# Unused plans are evicted automatically after this idle period +sql_plan_cache_ttl_seconds = 900 + +# ============================================================================ +# RPC TLS / mTLS Configuration +# ============================================================================ +# Secures the shared gRPC listener used by Raft replication, cluster RPCs, +# and the PostgreSQL extension. +# +# Both cluster nodes and PG extension clients present a certificate signed by +# the same CA. The server identifies the caller from the certificate CN: +# kalamdb-node-{node_id} → cluster node +# kalamdb-pg-{name} → PG extension client +# +# All cert values accept EITHER a file path OR an inline PEM string. +# Inline detection: if the value starts with "-----BEGIN", it is used directly. +# Otherwise the value is treated as a file path and read from disk. + +# [rpc_tls] +# enabled = true +# # CA cert — validates ALL incoming client certs (cluster nodes + PG extension) +# ca_cert = "/etc/kalamdb/certs/ca.pem" +# # This server's identity cert and key +# server_cert = "/etc/kalamdb/certs/node1.pem" +# server_key = "/etc/kalamdb/certs/node1.key" +# # Require clients to present a cert (full mTLS). Set false for server-only TLS. +# require_client_cert = true + +# ============================================================================ +# Cluster Configuration — Multi-Node Raft Replication +# ============================================================================ +# When [cluster] is present the server joins a distributed cluster. +# When absent (default) the server runs standalone with no clustering overhead. +# +# All nodes in a cluster MUST have: +# - Matching cluster_id values +# - The same peers list (this node omitted) +# - Matching sharding configuration (user_shards, shared_shards) +# - Unique node_id values +# +# Node with node_id=1 is the bootstrap node (no explicit flag needed). + +# [cluster] +# # Unique cluster identifier - all nodes must share this +# cluster_id = "prod-cluster" +# +# # This node's unique ID within the cluster (must be >= 1) +# # Node with node_id=1 is the designated bootstrap node +# node_id = 1 +# +# # RPC address for Raft inter-node communication +# rpc_addr = "0.0.0.0:9188" +# +# # API address for client HTTP requests (should match server.host:server.port) +# api_addr = "http://192.168.1.10:8080" +# # +# # Optional mTLS for inter-node gRPC (Raft + cluster RPC) +# # When enabled, all three paths are required. +# # Note: TLS for this node's identity lives in the top-level [rpc_tls] section, not here. +# +# # Number of user data shards (default: 8) +# # MEMORY OPTIMIZATION: Reduced from 32 (saves ~5-8 MB) +# # Each shard is a separate Raft group for user table data +# # Trade-off: Lower write parallelism (acceptable for dev/testing) +# user_shards = 8 +# +# # Number of shared data shards (default: 1) +# # Each shard is a separate Raft group for shared table data +# shared_shards = 1 +# +# # Raft heartbeat interval in milliseconds (default: 50) +# heartbeat_interval_ms = 50 +# +# # Raft election timeout range [min, max] in milliseconds (default: [150, 300]) +# election_timeout_ms = [150, 300] +# +# # Maximum entries per Raft snapshot (default: 10000) +# snapshot_threshold = 10000 +# +# # Minimum number of nodes that must acknowledge writes (default: 1) +# # Set to 2 or 3 for strong consistency in a 3-node cluster +# # This ensures data is replicated to multiple nodes before acknowledging success +# min_replication_nodes = 3 +# +# # Peer nodes (list all OTHER nodes in the cluster) +# [[cluster.peers]] +# node_id = 2 +# rpc_addr = "192.168.1.11:9188" +# api_addr = "http://192.168.1.11:8080" +# # Optional TLS server-name override for this peer (SNI/hostname verification) +# # rpc_server_name = "node2.cluster.local" +# +# [[cluster.peers]] +# node_id = 3 +# rpc_addr = "192.168.1.12:9188" +# api_addr = "http://192.168.1.12:8080" +# # rpc_server_name = "node3.cluster.local" + +# ============================================================================ +# Example: 3-Node Production Cluster Configuration +# ============================================================================ +# For production, use an odd number of nodes (3 or 5) for optimal fault tolerance: +# - 3 nodes: tolerates 1 node failure +# - 5 nodes: tolerates 2 node failures +# +# Node 1 configuration (server1.toml): +# [cluster] +# cluster_id = "prod" +# node_id = 1 +# rpc_addr = "node1.example.com:9188" +# api_addr = "http://node1.example.com:8080" +# user_shards = 32 +# shared_shards = 1 +# min_replication_nodes = 3 +# +# [[cluster.peers]] +# node_id = 2 +# rpc_addr = "node2.example.com:9090" +# api_addr = "http://node2.example.com:8080" +# rpc_server_name = "node2.example.com" +# +# [[cluster.peers]] +# node_id = 3 +# rpc_addr = "node3.example.com:9090" +# api_addr = "http://node3.example.com:8080" +# rpc_server_name = "node3.example.com" diff --git a/ts-sdk-test-output.txt b/ts-sdk-test-output.txt new file mode 100644 index 000000000..44e2b3234 --- /dev/null +++ b/ts-sdk-test-output.txt @@ -0,0 +1,1350 @@ +🧪 Testing KalamDB TypeScript SDK... +📦 Building SDK... + +> kalam-link@0.4.1-beta.2 build +> npm run clean && npm run build:wasm && npm run build:fix-types && npm run build:ts && npm run build:copy-wasm + + +> kalam-link@0.4.1-beta.2 clean +> node -e "const fs=require('fs');const p=require('path');const rm=d=>{try{fs.rmSync(d,{recursive:true,force:true})}catch{}};rm('dist');rm('wasm')" + + +> kalam-link@0.4.1-beta.2 build:wasm +> node -e "const {execSync,spawnSync}=require('child_process');process.chdir('../..');const env={...process.env,CARGO_ENCODED_RUSTFLAGS:'',RUSTFLAGS:'',RUSTC_WRAPPER:'',CARGO_BUILD_RUSTC_WRAPPER:'',CARGO_TARGET_DIR:'sdks/typescript/.wasm-target',CARGO_HOME:'sdks/typescript/.wasm-cargo-home'};execSync('wasm-pack build --target web --out-dir sdks/typescript/wasm --no-opt --profile release-dist --features wasm --no-default-features',{stdio:'inherit',env});const wasmPath='sdks/typescript/wasm/kalam_link_bg.wasm';const opt=spawnSync('wasm-opt',['-Oz','--all-features','-o',wasmPath,wasmPath],{stdio:'inherit',env});if(opt.error&&opt.error.code==='ENOENT'){console.warn('wasm-opt not found; skipping post-build size optimization')}else if(opt.status!==0){process.exit(opt.status||1)}" + +[INFO]: 🎯 Checking for the Wasm target... +[INFO]: 🌀 Compiling to Wasm... + Finished `release-dist` profile [optimized] target(s) in 0.20s +[INFO]: ⬇️ Installing wasm-bindgen... +[INFO]: License key is set in Cargo.toml but no LICENSE file(s) were found; Please add the LICENSE file(s) to your project directory +[INFO]: ✨ Done in 0.40s +[INFO]: 📦 Your wasm pkg is ready to publish at /Users/jamal/git/KalamDB/link/sdks/typescript/wasm. + +> kalam-link@0.4.1-beta.2 build:fix-types +> node -e "const fs=require('fs');const p=require('path');const dts=p.join('wasm','kalam_link.d.ts');let c=fs.readFileSync(dts,'utf8');if(!c.includes('type JsonValue')){c='type JsonValue=null|boolean|number|string|JsonValue[]|{[key:string]:JsonValue};\n'+c;fs.writeFileSync(dts,c)}" + + +> kalam-link@0.4.1-beta.2 build:ts +> tsc + + +> kalam-link@0.4.1-beta.2 build:copy-wasm +> node -e "const fs=require('fs');const p=require('path');fs.mkdirSync('dist/wasm',{recursive:true});fs.readdirSync('wasm').filter(f=>!f.includes('package.json')&&!f.includes('.gitignore')).forEach(f=>fs.copyFileSync(p.join('wasm',f),p.join('dist/wasm',f)))" + + +🔬 Running unit tests (no server)... +✔ runAgent retries and acks once after success (1.211084ms) +✔ runAgent calls onFailed and then acks when configured (0.272792ms) +✔ runAgent does not ack when onFailed throws (0.409333ms) +✔ runAgent exposes llm context with system prompt metadata (0.250875ms) +✔ runConsumer delegates to runAgent and processes messages (0.169959ms) +✔ createLangChainAdapter normalizes completion and stream outputs (0.283417ms) +✔ resolveAuthProviderWithRetry retries transient errors then succeeds (0.902167ms) +✔ resolveAuthProviderWithRetry fails fast for non-transient errors (0.281958ms) +✔ isLikelyTransientAuthProviderError detects network-like messages (0.073584ms) +🧪 Running Basic WASM Module Tests + +Test 1: WASM module loads... + ✓ init function exists + ✓ KalamClient class exists + +Test 2: WASM initializes... +using deprecated parameters for the initialization function; pass a single object instead + ✓ WASM initialized successfully + +Test 3: KalamClient construction... + ✓ KalamClient instance created + ✓ isConnected method exists + ✓ connect method exists + ✓ disconnect method exists + ✓ query method exists + ✓ insert method exists + ✓ delete method exists + ✓ subscribe method exists + ✓ unsubscribe method exists + +Test 4: Constructor parameter validation... + ✓ Empty URL throws error + ✓ Empty username throws error + +================================================== +Results: 14 passed, 0 failed +================================================== + +✅ All tests passed! +✔ tests/basic.test.mjs (62.330208ms) +cell-value.test.mjs: all tests registered +[FileRef] Failed to parse JSON: SyntaxError: Unexpected token 'j', "just a string" is not valid JSON + at JSON.parse () + at FileRef.fromJson (file:///Users/jamal/git/KalamDB/link/sdks/typescript/dist/src/file_ref.js:71:58) + at FileRef.from (file:///Users/jamal/git/KalamDB/link/sdks/typescript/dist/src/file_ref.js:97:28) + at KalamCellValue.asFile (file:///Users/jamal/git/KalamDB/link/sdks/typescript/dist/src/cell_value.js:345:24) + at TestContext. (file:///Users/jamal/git/KalamDB/link/sdks/typescript/tests/cell-value.test.mjs:329:55) + at Test.runInAsyncScope (node:async_hooks:214:14) + at Test.run (node:internal/test_runner/test:1103:25) + at Suite.processPendingSubtests (node:internal/test_runner/test:785:18) + at Test.postRun (node:internal/test_runner/test:1232:19) + at Test.run (node:internal/test_runner/test:1160:12) +[FileRef] Failed to parse JSON: SyntaxError: Unexpected token 'e', "text" is not valid JSON + at JSON.parse () + at FileRef.fromJson (file:///Users/jamal/git/KalamDB/link/sdks/typescript/dist/src/file_ref.js:71:58) + at FileRef.from (file:///Users/jamal/git/KalamDB/link/sdks/typescript/dist/src/file_ref.js:97:28) + at KalamCellValue.asFile (file:///Users/jamal/git/KalamDB/link/sdks/typescript/dist/src/cell_value.js:345:24) + at KalamCellValue.asFileUrl (file:///Users/jamal/git/KalamDB/link/sdks/typescript/dist/src/cell_value.js:364:26) + at TestContext. (file:///Users/jamal/git/KalamDB/link/sdks/typescript/tests/cell-value.test.mjs:355:46) + at Test.runInAsyncScope (node:async_hooks:214:14) + at Test.run (node:internal/test_runner/test:1103:25) + at Suite.processPendingSubtests (node:internal/test_runner/test:785:18) + at Test.postRun (node:internal/test_runner/test:1232:19) +▶ KalamCellValue.from() + ✔ wraps null (0.414875ms) + ✔ wraps undefined as null (0.071875ms) + ✔ wraps string (0.068208ms) + ✔ wraps number (0.063625ms) + ✔ wraps boolean (0.083042ms) + ✔ wraps object (0.066791ms) + ✔ wraps array (0.055583ms) +✔ KalamCellValue.from() (1.392542ms) +▶ KalamCellValue.asString() + ✔ returns string as-is (0.131458ms) + ✔ converts number to string (0.074583ms) + ✔ converts boolean to string (0.102833ms) + ✔ handles Utf8 envelope (0.058166ms) + ✔ handles String envelope (0.047666ms) + ✔ returns null for SQL NULL (0.044958ms) +✔ KalamCellValue.asString() (0.611958ms) +▶ KalamCellValue.asInt() + ✔ returns integer (0.085125ms) + ✔ truncates float (0.042541ms) + ✔ parses string integer (0.058667ms) + ✔ converts boolean (0.03675ms) + ✔ returns null for non-numeric string (0.043291ms) + ✔ returns null for null (0.028041ms) +✔ KalamCellValue.asInt() (0.418ms) +▶ KalamCellValue.asBigInt() + ✔ converts number (0.074791ms) + ✔ parses string bigint (0.033958ms) + ✔ returns null for non-numeric string (0.041542ms) +✔ KalamCellValue.asBigInt() (0.209333ms) +▶ KalamCellValue.asFloat() + ✔ returns float (0.068291ms) + ✔ converts integer to float (0.028125ms) + ✔ parses string float (0.029917ms) + ✔ converts boolean (0.030125ms) + ✔ returns null for NaN string (0.035125ms) +✔ KalamCellValue.asFloat() (0.253167ms) +▶ KalamCellValue.asBool() + ✔ returns boolean (0.063208ms) + ✔ converts number (0.032167ms) + ✔ handles string true/false (0.038ms) + ✔ returns null for unrecognized string (0.029417ms) +✔ KalamCellValue.asBool() (0.2215ms) +▶ KalamCellValue.asDate() + ✔ converts unix millis (0.129667ms) + ✔ parses ISO 8601 string (0.040875ms) + ✔ parses numeric timestamp string (0.031166ms) + ✔ normalizes microsecond timestamp strings (2.339ms) + ✔ returns null for bad date (0.085041ms) + ✔ returns null for null (0.0425ms) +✔ KalamCellValue.asDate() (3.057167ms) +▶ KalamCellValue.asObject() + ✔ returns object (0.425459ms) + ✔ returns null for non-object (0.033958ms) + ✔ returns null for array (not considered object) (0.029375ms) +✔ KalamCellValue.asObject() (0.532291ms) +▶ KalamCellValue.asArray() + ✔ returns array (0.58025ms) + ✔ returns null for non-array (0.030334ms) +✔ KalamCellValue.asArray() (0.64225ms) +▶ KalamCellValue.toString() + ✔ NULL for null (0.060958ms) + ✔ string as-is (0.023917ms) + ✔ number stringified (0.0225ms) + ✔ object as JSON (0.037625ms) +✔ KalamCellValue.toString() (0.193041ms) +▶ wrapRowMap() + ✔ wraps each value in KalamCellValue (0.289375ms) + ✔ handles null values (0.051458ms) +✔ wrapRowMap() (0.481625ms) +▶ KalamRow.cell() + ✔ returns KalamCellValue for each column (0.086375ms) + ✔ handles null values (0.038417ms) +✔ KalamRow.cell() (0.155542ms) +▶ KalamRow.typedData + ✔ returns all cells as KalamCellValue (RowData) (0.092208ms) + ✔ caches the result on repeated access (0.034584ms) +✔ KalamRow.typedData (0.153042ms) +▶ KalamRow — unified query & subscribe access pattern + ✔ cell() works the same whether data is raw or pre-wrapped (0.044209ms) +✔ KalamRow — unified query & subscribe access pattern (0.063ms) +▶ KalamCellValue.asFile() + ✔ parses valid FILE column JSON (0.080041ms) + ✔ returns null for non-file values (0.322417ms) + ✔ returns null for null (0.02575ms) +✔ KalamCellValue.asFile() (0.468042ms) +▶ KalamCellValue.asFileUrl() + ✔ builds download URL from FILE reference (0.095333ms) + ✔ returns null for non-file values (0.092708ms) +✔ KalamCellValue.asFileUrl() (0.213292ms) +▶ edge cases + ✔ KalamCellValue wraps nested objects correctly (0.042625ms) +✔ edge cases (0.061958ms) +▶ SeqId + ✔ creates from number (0.132917ms) + ✔ creates from bigint (0.033125ms) + ✔ creates from string (0.027542ms) + ✔ zero() returns value 0 (0.031458ms) + ✔ extracts Snowflake fields (0.061ms) + ✔ toDate returns a Date (0.036625ms) + ✔ equals compares values (0.03825ms) + ✔ compareTo orders correctly (0.039917ms) + ✔ toJSON returns number (0.035958ms) + ✔ throws on invalid input (0.149791ms) +✔ SeqId (0.677958ms) +▶ KalamCellValue.asSeqId() + ✔ converts number to SeqId (0.058291ms) + ✔ converts string to SeqId (0.026ms) + ✔ returns null for non-numeric string (0.038292ms) + ✔ returns null for null (0.023333ms) +✔ KalamCellValue.asSeqId() (0.190875ms) +normalize.test.mjs passed +✔ tests/normalize.test.mjs (45.590959ms) +✔ README live resume example passes options and exposes typed checkpoints (1.654667ms) +✔ README executeAsUser example wraps SQL for tenant-safe writes (0.2485ms) +✔ README queryWithFiles example posts multipart data with auth header (12.080666ms) +✔ README runAgent example writes back through executeAsUser inside the user tenant (0.482625ms) +✔ queryRows wraps named_rows into KalamRow with typed cell access (3.548541ms) +✔ subscribeRows wraps change rows and oldValues as KalamRow instances (0.626375ms) +✔ liveTableRows delegates to live using SELECT * sugar (0.939167ms) +✔ live passes key columns through to Rust materialization (0.251708ms) +✔ login refresh and reconnect helpers delegate to wasm client (0.351875ms) +✔ consumer one-shot batch and ack preserve all consume options (0.186667ms) +✔ consumer run supports latest start manual ack and auto ack flows (0.269791ms) +✔ documented topic publish path uses SQL query calls only (0.194083ms) +✔ multiple subscriptions on one client share one websocket connection (12.104625ms) +✔ failed subscriptions do not leak local subscription state (11.96575ms) +✔ getSubscriptions trusts wasm empty snapshots over stale local metadata (11.636167ms) +✔ disconnect clears local subscription metadata even when wasm disconnect fails (11.746209ms) +✔ subscribeWithSql normalizes websocket rows into RowData cells (10.75725ms) +✔ live delegates materialized rows to the Rust/WASM layer (11.130625ms) +✔ parallel subscribe storms connect once and keep sibling subscriptions isolated (11.426667ms) +ℹ tests 103 +ℹ suites 19 +ℹ pass 103 +ℹ fail 0 +ℹ cancelled 0 +ℹ skipped 0 +ℹ todo 0 +ℹ duration_ms 147.728666 + +🔗 Checking server at http://localhost:8080 ... +✅ Server is reachable + +🧪 Running e2e tests... +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +▶ Auth + ✔ login with basic auth returns tokens and user info (892.391334ms) +KalamClient: Token refreshed, updated JWT authentication +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected + ✔ refreshToken returns a valid access token (864.192708ms) +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected + ✔ JWT-only client can query after login (887.019875ms) +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= + ✔ authProvider callback is used for authentication (866.030333ms) + ✔ Auth.none client can be created (anonymous mode) (0.109041ms) + ✔ wrong password rejects login (861.410833ms) + ✔ constructor requires url (0.144583ms) + ✔ constructor requires authProvider (0.058958ms) +✔ Auth (4372.100167ms) +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +▶ DDL + ✔ CREATE TABLE creates a new table (93.901541ms) + ✔ DROP TABLE IF EXISTS succeeds for existing table (163.832541ms) + ✔ DROP TABLE IF EXISTS succeeds for nonexistent table (81.214792ms) + ✔ table with multiple column types (113.006292ms) +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected + ✔ CREATE NAMESPACE IF NOT EXISTS is idempotent (83.114375ms) +KalamClient: WebSocket closed: code=1005, reason= +✔ DDL (1447.981833ms) +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +▶ Client Lifecycle + ✔ eager initialize then disconnect toggles isConnected (898.97675ms) +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected + ✔ setAutoReconnect / setReconnectDelay / setMaxReconnectAttempts (5.502584ms) +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= + ✔ disableCompression: true still connects and queries (14.95625ms) +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected + ✔ wsLazyConnect: true keeps query-only usage disconnected (6.116083ms) +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected + ✔ onConnect callback fires (505.824959ms) +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= + ✔ calling initialize() twice is safe (9.775584ms) +✔ Client Lifecycle (1442.381583ms) +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +▶ DML Helpers + ✔ insert() adds a row and returns response (7.3395ms) + ✔ update() modifies an existing row (12.359792ms) + ✔ delete() removes a row by id (13.382791ms) +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= +✔ DML Helpers (1034.281ms) +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +▶ Query + ✔ SELECT literal returns result (5.884041ms) + ✔ INSERT then SELECT returns inserted row (11.182958ms) + ✔ parameterised INSERT + SELECT with $1 $2 $3 (12.641125ms) + ✔ UPDATE modifies existing row (7.280459ms) + ✔ DELETE removes row (11.347166ms) + ✔ CREATE NAMESPACE IF NOT EXISTS succeeds (2.730375ms) + ✔ SELECT from nonexistent table returns error (5.671334ms) + ✔ queryOne returns first row or null (12.057583ms) + ✔ queryAll returns array of rows (6.794667ms) + ✔ multiple inserts in single call (11.166209ms) +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +✔ Query (1087.217334ms) +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-e5cc9806d8f2dc6e, sql: SELECT * FROM ts_recon_1774783832653_37iss4.events +KalamClient: Received WebSocket message (444 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-e5cc9806d8f2dc6e, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) +KalamClient: Parsed InitialDataBatch - id: sub-e5cc9806d8f2dc6e, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) +KalamClient: Subscribed with ID: sub-e5cc9806d8f2dc6e +KalamClient: Unsubscribed from: sub-e5cc9806d8f2dc6e +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +▶ Reconnect & Resume + ✔ disconnect then subscribe reconnects automatically (29.292333ms) +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-e5cc9806d8f2dc6e, sql: SELECT * FROM ts_recon_1774783832653_37iss4.events +KalamClient: Received WebSocket message (444 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-e5cc9806d8f2dc6e, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) +KalamClient: Parsed InitialDataBatch - id: sub-e5cc9806d8f2dc6e, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) +KalamClient: Subscribed with ID: sub-e5cc9806d8f2dc6e +KalamClient: Unsubscribed from: sub-e5cc9806d8f2dc6e +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected + ✔ onConnect and onDisconnect fire during reconnect cycle (20.070041ms) +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= + ✔ onError handler can be set without throwing (5.303875ms) +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: Sending subscribe request - id: sub-e5cc9806d8f2dc6e, sql: SELECT * FROM ts_recon_1774783832653_37iss4.events + ✔ setAutoReconnect / setReconnectDelay / setMaxReconnectAttempts (3.428375ms) +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Received WebSocket message (444 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-e5cc9806d8f2dc6e, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) +KalamClient: Parsed InitialDataBatch - id: sub-e5cc9806d8f2dc6e, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) +KalamClient: Subscribed with ID: sub-e5cc9806d8f2dc6e +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Parsed Change - id: sub-e5cc9806d8f2dc6e, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-e5cc9806d8f2dc6e, sql: SELECT * FROM ts_recon_1774783832653_37iss4.events +KalamClient: Received WebSocket message (475 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-e5cc9806d8f2dc6e, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) +KalamClient: Received WebSocket message (384 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-e5cc9806d8f2dc6e, rows: 2, status: Ready +KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) +KalamClient: Subscribed with ID: sub-e5cc9806d8f2dc6e +KalamClient: Parsed Change - id: sub-e5cc9806d8f2dc6e, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) +KalamClient: Unsubscribed from: sub-e5cc9806d8f2dc6e +KalamClient: Unsubscribed from: sub-e5cc9806d8f2dc6e +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected + ✔ subscription resumes after disconnect/reconnect (real-world) (243.091542ms) +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Sending subscribe request - id: sub-e5cc9806d8f2dc6e, sql: SELECT * FROM ts_recon_1774783832653_37iss4.events +KalamClient: Received WebSocket message (475 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-e5cc9806d8f2dc6e, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) +KalamClient: Received WebSocket message (466 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-e5cc9806d8f2dc6e, rows: 3, status: Ready +KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) +KalamClient: Subscribed with ID: sub-e5cc9806d8f2dc6e +KalamClient: Sending subscribe request - id: sub-ff5801b70f7a3ac4, sql: SELECT * FROM ts_recon_1774783832653_37iss4.events2 +KalamClient: Received WebSocket message (442 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-ff5801b70f7a3ac4, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-ff5801b70f7a3ac4 (registered subs: 2) +KalamClient: Parsed InitialDataBatch - id: sub-ff5801b70f7a3ac4, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-ff5801b70f7a3ac4 (registered subs: 2) +KalamClient: Subscribed with ID: sub-ff5801b70f7a3ac4 +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-e5cc9806d8f2dc6e, sql: SELECT * FROM ts_recon_1774783832653_37iss4.events +KalamClient: Received WebSocket message (475 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-e5cc9806d8f2dc6e, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) +KalamClient: Received WebSocket message (466 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-e5cc9806d8f2dc6e, rows: 3, status: Ready +KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) +KalamClient: Subscribed with ID: sub-e5cc9806d8f2dc6e +KalamClient: Sending subscribe request - id: sub-ff5801b70f7a3ac4, sql: SELECT * FROM ts_recon_1774783832653_37iss4.events2 +KalamClient: Received WebSocket message (442 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-ff5801b70f7a3ac4, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-ff5801b70f7a3ac4 (registered subs: 2) +KalamClient: Parsed InitialDataBatch - id: sub-ff5801b70f7a3ac4, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-ff5801b70f7a3ac4 (registered subs: 2) +KalamClient: Subscribed with ID: sub-ff5801b70f7a3ac4 +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Parsed Change - id: sub-e5cc9806d8f2dc6e, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 2) +KalamClient: Parsed Change - id: sub-ff5801b70f7a3ac4, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-ff5801b70f7a3ac4 (registered subs: 2) +KalamClient: Unsubscribed from: sub-ff5801b70f7a3ac4 +KalamClient: Unsubscribed from: sub-e5cc9806d8f2dc6e +KalamClient: Unsubscribed from: sub-ff5801b70f7a3ac4 +KalamClient: Unsubscribed from: sub-e5cc9806d8f2dc6e +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected + ✔ multiple subscriptions work after reconnect (783.89975ms) +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-9923aad38b931ff9, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.from_seq_boundary WHERE id >= 41001 +KalamClient: Received WebSocket message (346 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-9923aad38b931ff9, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-9923aad38b931ff9 (registered subs: 1) +KalamClient: Received WebSocket message (344 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-9923aad38b931ff9, rows: 2, status: Ready +KalamClient: Looking for callback for subscription_id: sub-9923aad38b931ff9 (registered subs: 1) +KalamClient: Subscribed with ID: sub-9923aad38b931ff9 +KalamClient: Unsubscribed from: sub-9923aad38b931ff9 +KalamClient: Sending subscribe request - id: sub-9923aad38b931ff9, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.from_seq_boundary WHERE id >= 41001 +KalamClient: Received WebSocket message (346 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-9923aad38b931ff9, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-9923aad38b931ff9 (registered subs: 1) +KalamClient: Received WebSocket message (292 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-9923aad38b931ff9, rows: 1, status: Ready +KalamClient: Looking for callback for subscription_id: sub-9923aad38b931ff9 (registered subs: 1) +KalamClient: Subscribed with ID: sub-9923aad38b931ff9 +KalamClient: Unsubscribed from: sub-9923aad38b931ff9 +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected + ✔ subscribeWithSql from resumes with only seqs greater than checkpoint (217.899958ms) +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-fc9cba3cc5d6a202, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.resume_a +KalamClient: Received WebSocket message (318 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-fc9cba3cc5d6a202, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-fc9cba3cc5d6a202 (registered subs: 1) +KalamClient: Parsed InitialDataBatch - id: sub-fc9cba3cc5d6a202, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-fc9cba3cc5d6a202 (registered subs: 1) +KalamClient: Subscribed with ID: sub-fc9cba3cc5d6a202 +KalamClient: Sending subscribe request - id: sub-ec33ae1b4df076ae, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.resume_b +KalamClient: Received WebSocket message (318 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-ec33ae1b4df076ae, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-ec33ae1b4df076ae (registered subs: 2) +KalamClient: Parsed InitialDataBatch - id: sub-ec33ae1b4df076ae, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-ec33ae1b4df076ae (registered subs: 2) +KalamClient: Subscribed with ID: sub-ec33ae1b4df076ae +KalamClient: Sending subscribe request - id: sub-a04167b425bfbbdf, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.resume_c +KalamClient: Received WebSocket message (318 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-a04167b425bfbbdf, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-a04167b425bfbbdf (registered subs: 3) +KalamClient: Parsed InitialDataBatch - id: sub-a04167b425bfbbdf, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-a04167b425bfbbdf (registered subs: 3) +KalamClient: Subscribed with ID: sub-a04167b425bfbbdf +KalamClient: Parsed Change - id: sub-fc9cba3cc5d6a202, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-fc9cba3cc5d6a202 (registered subs: 3) +KalamClient: Parsed Change - id: sub-ec33ae1b4df076ae, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-ec33ae1b4df076ae (registered subs: 3) +KalamClient: Parsed Change - id: sub-a04167b425bfbbdf, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-a04167b425bfbbdf (registered subs: 3) +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-e3b9e4a71f291d60, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.resume_a WHERE id >= 1002 +KalamClient: Received WebSocket message (346 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-e3b9e4a71f291d60, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-e3b9e4a71f291d60 (registered subs: 1) +KalamClient: Received WebSocket message (274 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-e3b9e4a71f291d60, rows: 1, status: Ready +KalamClient: Looking for callback for subscription_id: sub-e3b9e4a71f291d60 (registered subs: 1) +KalamClient: Subscribed with ID: sub-e3b9e4a71f291d60 +KalamClient: Sending subscribe request - id: sub-9e06c088b5e9eba7, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.resume_b WHERE id >= 2002 +KalamClient: Received WebSocket message (346 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-9e06c088b5e9eba7, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-9e06c088b5e9eba7 (registered subs: 2) +KalamClient: Received WebSocket message (274 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-9e06c088b5e9eba7, rows: 1, status: Ready +KalamClient: Looking for callback for subscription_id: sub-9e06c088b5e9eba7 (registered subs: 2) +KalamClient: Subscribed with ID: sub-9e06c088b5e9eba7 +KalamClient: Sending subscribe request - id: sub-b62d3aab9f46eaff, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.resume_c WHERE id >= 3002 +KalamClient: Received WebSocket message (346 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-b62d3aab9f46eaff, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-b62d3aab9f46eaff (registered subs: 3) +KalamClient: Received WebSocket message (274 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-b62d3aab9f46eaff, rows: 1, status: Ready +KalamClient: Looking for callback for subscription_id: sub-b62d3aab9f46eaff (registered subs: 3) +KalamClient: Subscribed with ID: sub-b62d3aab9f46eaff +KalamClient: Parsed Change - id: sub-e3b9e4a71f291d60, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-e3b9e4a71f291d60 (registered subs: 3) +KalamClient: Parsed Change - id: sub-9e06c088b5e9eba7, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-9e06c088b5e9eba7 (registered subs: 3) +KalamClient: Parsed Change - id: sub-b62d3aab9f46eaff, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-b62d3aab9f46eaff (registered subs: 3) +KalamClient: Unsubscribed from: sub-b62d3aab9f46eaff +KalamClient: Unsubscribed from: sub-9e06c088b5e9eba7 +KalamClient: Unsubscribed from: sub-e3b9e4a71f291d60 +KalamClient: Unsubscribed from: sub-a04167b425bfbbdf +KalamClient: Unsubscribed from: sub-ec33ae1b4df076ae +KalamClient: Unsubscribed from: sub-fc9cba3cc5d6a202 +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= + ✔ three active subscriptions resume without replaying old rows (862.630709ms) +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-75eb230c63e8cc14, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_a +KalamClient: Received WebSocket message (318 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-75eb230c63e8cc14, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-75eb230c63e8cc14 (registered subs: 1) +KalamClient: Parsed InitialDataBatch - id: sub-75eb230c63e8cc14, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-75eb230c63e8cc14 (registered subs: 1) +KalamClient: Subscribed with ID: sub-75eb230c63e8cc14 +KalamClient: Sending subscribe request - id: sub-58098cca70872354, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_b +KalamClient: Received WebSocket message (318 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-58098cca70872354, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-58098cca70872354 (registered subs: 2) +KalamClient: Parsed InitialDataBatch - id: sub-58098cca70872354, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-58098cca70872354 (registered subs: 2) +KalamClient: Subscribed with ID: sub-58098cca70872354 +KalamClient: Sending subscribe request - id: sub-e98d3d7451b0c749, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_c +KalamClient: Received WebSocket message (318 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-e98d3d7451b0c749, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-e98d3d7451b0c749 (registered subs: 3) +KalamClient: Parsed InitialDataBatch - id: sub-e98d3d7451b0c749, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-e98d3d7451b0c749 (registered subs: 3) +KalamClient: Subscribed with ID: sub-e98d3d7451b0c749 +KalamClient: Parsed Change - id: sub-75eb230c63e8cc14, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-75eb230c63e8cc14 (registered subs: 3) +KalamClient: Parsed Change - id: sub-58098cca70872354, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-58098cca70872354 (registered subs: 3) +KalamClient: Parsed Change - id: sub-e98d3d7451b0c749, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-e98d3d7451b0c749 (registered subs: 3) +KalamClient: Unsubscribed from: sub-e98d3d7451b0c749 +KalamClient: Unsubscribed from: sub-58098cca70872354 +KalamClient: Unsubscribed from: sub-75eb230c63e8cc14 +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-c5458ea9f72d2fa9, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_a WHERE id >= 11012 +KalamClient: Received WebSocket message (346 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-c5458ea9f72d2fa9, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-c5458ea9f72d2fa9 (registered subs: 1) +KalamClient: Received WebSocket message (283 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-c5458ea9f72d2fa9, rows: 1, status: Ready +KalamClient: Looking for callback for subscription_id: sub-c5458ea9f72d2fa9 (registered subs: 1) +KalamClient: Subscribed with ID: sub-c5458ea9f72d2fa9 +KalamClient: Sending subscribe request - id: sub-9f9922586e8b1335, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_b WHERE id >= 12012 +KalamClient: Received WebSocket message (346 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-9f9922586e8b1335, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-9f9922586e8b1335 (registered subs: 2) +KalamClient: Received WebSocket message (283 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-9f9922586e8b1335, rows: 1, status: Ready +KalamClient: Looking for callback for subscription_id: sub-9f9922586e8b1335 (registered subs: 2) +KalamClient: Subscribed with ID: sub-9f9922586e8b1335 +KalamClient: Sending subscribe request - id: sub-3d33407948652b23, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_c WHERE id >= 13012 +KalamClient: Received WebSocket message (346 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-3d33407948652b23, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-3d33407948652b23 (registered subs: 3) +KalamClient: Received WebSocket message (283 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-3d33407948652b23, rows: 1, status: Ready +KalamClient: Looking for callback for subscription_id: sub-3d33407948652b23 (registered subs: 3) +KalamClient: Subscribed with ID: sub-3d33407948652b23 +KalamClient: Parsed Change - id: sub-c5458ea9f72d2fa9, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-c5458ea9f72d2fa9 (registered subs: 3) +KalamClient: Parsed Change - id: sub-9f9922586e8b1335, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-9f9922586e8b1335 (registered subs: 3) +KalamClient: Parsed Change - id: sub-3d33407948652b23, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3d33407948652b23 (registered subs: 3) +KalamClient: Unsubscribed from: sub-3d33407948652b23 +KalamClient: Unsubscribed from: sub-9f9922586e8b1335 +KalamClient: Unsubscribed from: sub-c5458ea9f72d2fa9 +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-d276b34d70dfbabd, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_a WHERE id >= 11022 +KalamClient: Received WebSocket message (346 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-d276b34d70dfbabd, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-d276b34d70dfbabd (registered subs: 1) +KalamClient: Received WebSocket message (283 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-d276b34d70dfbabd, rows: 1, status: Ready +KalamClient: Looking for callback for subscription_id: sub-d276b34d70dfbabd (registered subs: 1) +KalamClient: Subscribed with ID: sub-d276b34d70dfbabd +KalamClient: Sending subscribe request - id: sub-62ddce9fb02d8cbd, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_b WHERE id >= 12022 +KalamClient: Received WebSocket message (346 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-62ddce9fb02d8cbd, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-62ddce9fb02d8cbd (registered subs: 2) +KalamClient: Received WebSocket message (283 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-62ddce9fb02d8cbd, rows: 1, status: Ready +KalamClient: Looking for callback for subscription_id: sub-62ddce9fb02d8cbd (registered subs: 2) +KalamClient: Subscribed with ID: sub-62ddce9fb02d8cbd +KalamClient: Sending subscribe request - id: sub-ff9bfa1e43cd64a4, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_c WHERE id >= 13022 +KalamClient: Received WebSocket message (346 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-ff9bfa1e43cd64a4, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-ff9bfa1e43cd64a4 (registered subs: 3) +KalamClient: Received WebSocket message (283 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-ff9bfa1e43cd64a4, rows: 1, status: Ready +KalamClient: Looking for callback for subscription_id: sub-ff9bfa1e43cd64a4 (registered subs: 3) +KalamClient: Subscribed with ID: sub-ff9bfa1e43cd64a4 +KalamClient: Parsed Change - id: sub-d276b34d70dfbabd, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-d276b34d70dfbabd (registered subs: 3) +KalamClient: Parsed Change - id: sub-62ddce9fb02d8cbd, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-62ddce9fb02d8cbd (registered subs: 3) +KalamClient: Parsed Change - id: sub-ff9bfa1e43cd64a4, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-ff9bfa1e43cd64a4 (registered subs: 3) +KalamClient: Unsubscribed from: sub-ff9bfa1e43cd64a4 +KalamClient: Unsubscribed from: sub-62ddce9fb02d8cbd +KalamClient: Unsubscribed from: sub-d276b34d70dfbabd +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-54695f484d8f1e6, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_a WHERE id >= 11032 +KalamClient: Received WebSocket message (345 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-54695f484d8f1e6, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-54695f484d8f1e6 (registered subs: 1) +KalamClient: Received WebSocket message (282 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-54695f484d8f1e6, rows: 1, status: Ready +KalamClient: Looking for callback for subscription_id: sub-54695f484d8f1e6 (registered subs: 1) +KalamClient: Subscribed with ID: sub-54695f484d8f1e6 +KalamClient: Sending subscribe request - id: sub-6958e4c1d3c0d07c, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_b WHERE id >= 12032 +KalamClient: Received WebSocket message (346 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-6958e4c1d3c0d07c, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-6958e4c1d3c0d07c (registered subs: 2) +KalamClient: Received WebSocket message (283 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-6958e4c1d3c0d07c, rows: 1, status: Ready +KalamClient: Looking for callback for subscription_id: sub-6958e4c1d3c0d07c (registered subs: 2) +KalamClient: Subscribed with ID: sub-6958e4c1d3c0d07c +KalamClient: Sending subscribe request - id: sub-fd4975a26d0f88a8, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_c WHERE id >= 13032 +KalamClient: Received WebSocket message (346 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-fd4975a26d0f88a8, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-fd4975a26d0f88a8 (registered subs: 3) +KalamClient: Received WebSocket message (283 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-fd4975a26d0f88a8, rows: 1, status: Ready +KalamClient: Looking for callback for subscription_id: sub-fd4975a26d0f88a8 (registered subs: 3) +KalamClient: Subscribed with ID: sub-fd4975a26d0f88a8 +KalamClient: Parsed Change - id: sub-54695f484d8f1e6, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-54695f484d8f1e6 (registered subs: 3) +KalamClient: Parsed Change - id: sub-6958e4c1d3c0d07c, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-6958e4c1d3c0d07c (registered subs: 3) +KalamClient: Parsed Change - id: sub-fd4975a26d0f88a8, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-fd4975a26d0f88a8 (registered subs: 3) +KalamClient: Unsubscribed from: sub-fd4975a26d0f88a8 +KalamClient: Unsubscribed from: sub-6958e4c1d3c0d07c +KalamClient: Unsubscribed from: sub-54695f484d8f1e6 +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected + ✔ chaos: repeated reconnect cycles with 3 subscriptions stay consistent (1077.164459ms) +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected + ✔ queries work correctly after WebSocket disconnect cycle (13.240292ms) +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= +✔ Reconnect & Resume (4341.056208ms) +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-c26b6a8ee07bd9e5, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume_live +KalamClient: Received WebSocket message (316 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-c26b6a8ee07bd9e5, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-c26b6a8ee07bd9e5 (registered subs: 1) +KalamClient: Parsed InitialDataBatch - id: sub-c26b6a8ee07bd9e5, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-c26b6a8ee07bd9e5 (registered subs: 1) +KalamClient: Subscribed with ID: sub-c26b6a8ee07bd9e5 +KalamClient: Parsed Change - id: sub-c26b6a8ee07bd9e5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-c26b6a8ee07bd9e5 (registered subs: 1) +KalamClient: Unsubscribed from: sub-c26b6a8ee07bd9e5 +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-c26b6a8ee07bd9e5, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume_live +KalamClient: Received WebSocket message (344 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-c26b6a8ee07bd9e5, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-c26b6a8ee07bd9e5 (registered subs: 1) +KalamClient: Received WebSocket message (271 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-c26b6a8ee07bd9e5, rows: 1, status: Ready +KalamClient: Looking for callback for subscription_id: sub-c26b6a8ee07bd9e5 (registered subs: 1) +KalamClient: Subscribed with ID: sub-c26b6a8ee07bd9e5 +KalamClient: Parsed Change - id: sub-c26b6a8ee07bd9e5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-c26b6a8ee07bd9e5 (registered subs: 1) +KalamClient: Unsubscribed from: sub-c26b6a8ee07bd9e5 +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= +▶ Resume from checkpoint after disconnect + ✔ subscription resumes from checkpoint after disconnect — no replay (342.499292ms) +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-46abb2c4ba616a96, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume3_a +KalamClient: Received WebSocket message (316 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-46abb2c4ba616a96, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-46abb2c4ba616a96 (registered subs: 1) +KalamClient: Parsed InitialDataBatch - id: sub-46abb2c4ba616a96, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-46abb2c4ba616a96 (registered subs: 1) +KalamClient: Subscribed with ID: sub-46abb2c4ba616a96 +KalamClient: Sending subscribe request - id: sub-ee0dce3a832017dc, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume3_b +KalamClient: Received WebSocket message (316 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-ee0dce3a832017dc, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-ee0dce3a832017dc (registered subs: 2) +KalamClient: Parsed InitialDataBatch - id: sub-ee0dce3a832017dc, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-ee0dce3a832017dc (registered subs: 2) +KalamClient: Subscribed with ID: sub-ee0dce3a832017dc +KalamClient: Sending subscribe request - id: sub-6f4a46b5d9938168, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume3_c +KalamClient: Received WebSocket message (316 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-6f4a46b5d9938168, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-6f4a46b5d9938168 (registered subs: 3) +KalamClient: Parsed InitialDataBatch - id: sub-6f4a46b5d9938168, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-6f4a46b5d9938168 (registered subs: 3) +KalamClient: Subscribed with ID: sub-6f4a46b5d9938168 +KalamClient: Parsed Change - id: sub-46abb2c4ba616a96, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-46abb2c4ba616a96 (registered subs: 3) +KalamClient: Parsed Change - id: sub-ee0dce3a832017dc, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-ee0dce3a832017dc (registered subs: 3) +KalamClient: Parsed Change - id: sub-6f4a46b5d9938168, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-6f4a46b5d9938168 (registered subs: 3) +KalamClient: Unsubscribed from: sub-46abb2c4ba616a96 +KalamClient: Unsubscribed from: sub-ee0dce3a832017dc +KalamClient: Unsubscribed from: sub-6f4a46b5d9938168 +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-46abb2c4ba616a96, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume3_a +KalamClient: Received WebSocket message (344 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-46abb2c4ba616a96, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-46abb2c4ba616a96 (registered subs: 1) +KalamClient: Received WebSocket message (273 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-46abb2c4ba616a96, rows: 1, status: Ready +KalamClient: Looking for callback for subscription_id: sub-46abb2c4ba616a96 (registered subs: 1) +KalamClient: Subscribed with ID: sub-46abb2c4ba616a96 +KalamClient: Sending subscribe request - id: sub-ee0dce3a832017dc, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume3_b +KalamClient: Received WebSocket message (344 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-ee0dce3a832017dc, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-ee0dce3a832017dc (registered subs: 2) +KalamClient: Received WebSocket message (273 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-ee0dce3a832017dc, rows: 1, status: Ready +KalamClient: Looking for callback for subscription_id: sub-ee0dce3a832017dc (registered subs: 2) +KalamClient: Subscribed with ID: sub-ee0dce3a832017dc +KalamClient: Sending subscribe request - id: sub-6f4a46b5d9938168, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume3_c +KalamClient: Received WebSocket message (344 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-6f4a46b5d9938168, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-6f4a46b5d9938168 (registered subs: 3) +KalamClient: Received WebSocket message (273 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-6f4a46b5d9938168, rows: 1, status: Ready +KalamClient: Looking for callback for subscription_id: sub-6f4a46b5d9938168 (registered subs: 3) +KalamClient: Subscribed with ID: sub-6f4a46b5d9938168 +KalamClient: Parsed Change - id: sub-46abb2c4ba616a96, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-46abb2c4ba616a96 (registered subs: 3) +KalamClient: Parsed Change - id: sub-ee0dce3a832017dc, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-ee0dce3a832017dc (registered subs: 3) +KalamClient: Parsed Change - id: sub-6f4a46b5d9938168, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-6f4a46b5d9938168 (registered subs: 3) +KalamClient: Unsubscribed from: sub-46abb2c4ba616a96 +KalamClient: Unsubscribed from: sub-ee0dce3a832017dc +KalamClient: Unsubscribed from: sub-6f4a46b5d9938168 +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected + ✔ three subscriptions resume from their checkpoints after disconnect (831.419416ms) +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-604589b6e4371951, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume_double +KalamClient: Received WebSocket message (316 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-604589b6e4371951, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-604589b6e4371951 (registered subs: 1) +KalamClient: Parsed InitialDataBatch - id: sub-604589b6e4371951, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-604589b6e4371951 (registered subs: 1) +KalamClient: Subscribed with ID: sub-604589b6e4371951 +KalamClient: Parsed Change - id: sub-604589b6e4371951, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-604589b6e4371951 (registered subs: 1) +KalamClient: Unsubscribed from: sub-604589b6e4371951 +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-604589b6e4371951, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume_double +KalamClient: Received WebSocket message (344 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-604589b6e4371951, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-604589b6e4371951 (registered subs: 1) +KalamClient: Received WebSocket message (271 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-604589b6e4371951, rows: 1, status: Ready +KalamClient: Looking for callback for subscription_id: sub-604589b6e4371951 (registered subs: 1) +KalamClient: Subscribed with ID: sub-604589b6e4371951 +KalamClient: Unsubscribed from: sub-604589b6e4371951 +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-604589b6e4371951, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume_double +KalamClient: Received WebSocket message (344 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-604589b6e4371951, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-604589b6e4371951 (registered subs: 1) +KalamClient: Received WebSocket message (271 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-604589b6e4371951, rows: 1, status: Ready +KalamClient: Looking for callback for subscription_id: sub-604589b6e4371951 (registered subs: 1) +KalamClient: Subscribed with ID: sub-604589b6e4371951 +KalamClient: Parsed Change - id: sub-604589b6e4371951, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-604589b6e4371951 (registered subs: 1) +KalamClient: Unsubscribed from: sub-604589b6e4371951 +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected + ✔ double disconnect recovers and resumes from checkpoint (729.587042ms) +✔ Resume from checkpoint after disconnect (2816.711875ms) +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-71c1b12e1c371229, sql: SELECT * FROM ts_sub_1774783839914_i687ql.messages +KalamClient: Received WebSocket message (441 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-71c1b12e1c371229, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) +KalamClient: Parsed InitialDataBatch - id: sub-71c1b12e1c371229, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) +KalamClient: Subscribed with ID: sub-71c1b12e1c371229 +KalamClient: Unsubscribed from: sub-71c1b12e1c371229 +KalamClient: Sending subscribe request - id: sub-71c1b12e1c371229, sql: SELECT * FROM ts_sub_1774783839914_i687ql.messages +▶ Subscription + ✔ subscribe returns unsubscribe function (15.809167ms) +KalamClient: Received WebSocket message (441 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-71c1b12e1c371229, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) +KalamClient: Parsed InitialDataBatch - id: sub-71c1b12e1c371229, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) +KalamClient: Subscribed with ID: sub-71c1b12e1c371229 +KalamClient: Unsubscribed from: sub-71c1b12e1c371229 +KalamClient: Sending subscribe request - id: sub-71c1b12e1c371229, sql: SELECT * FROM ts_sub_1774783839914_i687ql.messages + ✔ subscribe receives subscription_ack event (1514.21875ms) +KalamClient: Received WebSocket message (441 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-71c1b12e1c371229, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) +KalamClient: Parsed InitialDataBatch - id: sub-71c1b12e1c371229, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) +KalamClient: Subscribed with ID: sub-71c1b12e1c371229 +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Parsed Change - id: sub-71c1b12e1c371229, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) +KalamClient: Unsubscribed from: sub-71c1b12e1c371229 +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: Sending subscribe request - id: sub-fc42cebeda3f7cdf, sql: SELECT * FROM ts_sub_1774783839914_i687ql.messages WHERE id = 600 + ✔ insert triggers change event on subscriber (4538.228792ms) +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Received WebSocket message (441 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-fc42cebeda3f7cdf, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-fc42cebeda3f7cdf (registered subs: 1) +KalamClient: Parsed InitialDataBatch - id: sub-fc42cebeda3f7cdf, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-fc42cebeda3f7cdf (registered subs: 1) +KalamClient: Subscribed with ID: sub-fc42cebeda3f7cdf +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Parsed Change - id: sub-fc42cebeda3f7cdf, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-fc42cebeda3f7cdf (registered subs: 1) +KalamClient: Unsubscribed from: sub-fc42cebeda3f7cdf +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: Sending subscribe request - id: sub-71c1b12e1c371229, sql: SELECT * FROM ts_sub_1774783839914_i687ql.messages + ✔ subscribeWithSql with WHERE clause works (4548.798083ms) +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Received WebSocket message (472 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-71c1b12e1c371229, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) +KalamClient: Received WebSocket message (373 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-71c1b12e1c371229, rows: 2, status: Ready +KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) +KalamClient: Subscribed with ID: sub-71c1b12e1c371229 +KalamClient: Unsubscribed from: sub-71c1b12e1c371229 +KalamClient: Sending subscribe request - id: sub-71c1b12e1c371229, sql: SELECT * FROM ts_sub_1774783839914_i687ql.messages + ✔ getSubscriptions / isSubscribedTo track subscriptions (1528.354584ms) +KalamClient: Received WebSocket message (472 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-71c1b12e1c371229, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) +KalamClient: Received WebSocket message (373 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-71c1b12e1c371229, rows: 2, status: Ready +KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) +KalamClient: Subscribed with ID: sub-71c1b12e1c371229 +KalamClient: Sending subscribe request - id: sub-e90e1009e874b6aa, sql: SELECT * FROM ts_sub_1774783839914_i687ql.messages WHERE id > 0 +KalamClient: Received WebSocket message (472 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-e90e1009e874b6aa, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-e90e1009e874b6aa (registered subs: 2) +KalamClient: Received WebSocket message (373 bytes) +KalamClient: Parsed InitialDataBatch - id: sub-e90e1009e874b6aa, rows: 2, status: Ready +KalamClient: Looking for callback for subscription_id: sub-e90e1009e874b6aa (registered subs: 2) +KalamClient: Subscribed with ID: sub-e90e1009e874b6aa +KalamClient: Unsubscribed from: sub-71c1b12e1c371229 +KalamClient: Unsubscribed from: sub-e90e1009e874b6aa + ✔ unsubscribeAll clears all subscriptions (2059.333541ms) +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-3cb8ac26d0fefcc5, sql: SELECT id, body FROM ts_sub_1774783839914_i687ql.messages WHERE id >= 85508900 +KalamClient: Received WebSocket message (315 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-3cb8ac26d0fefcc5, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed InitialDataBatch - id: sub-3cb8ac26d0fefcc5, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Subscribed with ID: sub-3cb8ac26d0fefcc5 +KalamClient: Sending subscribe request - id: sub-3cb8ac26d0fefcc5, sql: SELECT id, body FROM ts_sub_1774783839914_i687ql.messages WHERE id >= 85508900 +KalamClient: Received WebSocket message (315 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-3cb8ac26d0fefcc5, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed InitialDataBatch - id: sub-3cb8ac26d0fefcc5, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Subscribed with ID: sub-3cb8ac26d0fefcc5 +KalamClient: Sending subscribe request - id: sub-3cb8ac26d0fefcc5, sql: SELECT id, body FROM ts_sub_1774783839914_i687ql.messages WHERE id >= 85508900 +KalamClient: Received WebSocket message (315 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-3cb8ac26d0fefcc5, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed InitialDataBatch - id: sub-3cb8ac26d0fefcc5, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Subscribed with ID: sub-3cb8ac26d0fefcc5 +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) +KalamClient: Unsubscribed from: sub-3cb8ac26d0fefcc5 +KalamClient: Unsubscribed from: sub-3cb8ac26d0fefcc5 +KalamClient: Unsubscribed from: sub-3cb8ac26d0fefcc5 +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: WebSocket closed: code=1005, reason= + ✔ concurrent writers fan out inserts to every subscriber client (63.665041ms) +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Connecting to WebSocket... +KalamClient: Waiting for WebSocket to open... +KalamClient: WebSocket connected, sending authentication... +KalamClient: Waiting for authentication... +KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba +KalamClient: WebSocket connection established and authenticated +KalamClient: Sending subscribe request - id: sub-94368d5193c0ab23, sql: SELECT id, body FROM ts_sub_1774783839914_i687ql.messages WHERE id = 85514301 +KalamClient: Received WebSocket message (315 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-94368d5193c0ab23, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-94368d5193c0ab23 (registered subs: 1) +KalamClient: Parsed InitialDataBatch - id: sub-94368d5193c0ab23, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-94368d5193c0ab23 (registered subs: 1) +KalamClient: Subscribed with ID: sub-94368d5193c0ab23 +KalamClient: Sending subscribe request - id: sub-1336eef26f07c71d, sql: SELECT id, body FROM ts_sub_1774783839914_i687ql.messages WHERE id = 85514302 +KalamClient: Received WebSocket message (315 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-1336eef26f07c71d, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-1336eef26f07c71d (registered subs: 2) +KalamClient: Parsed InitialDataBatch - id: sub-1336eef26f07c71d, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-1336eef26f07c71d (registered subs: 2) +KalamClient: Subscribed with ID: sub-1336eef26f07c71d +KalamClient: Sending subscribe request - id: sub-c0900fac94513724, sql: SELECT id, body FROM ts_sub_1774783839914_i687ql.messages WHERE id = 85514303 +KalamClient: Received WebSocket message (315 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-c0900fac94513724, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-c0900fac94513724 (registered subs: 3) +KalamClient: Parsed InitialDataBatch - id: sub-c0900fac94513724, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-c0900fac94513724 (registered subs: 3) +KalamClient: Subscribed with ID: sub-c0900fac94513724 +KalamClient: Sending subscribe request - id: sub-99f183e3749b404f, sql: SELECT id, body FROM ts_sub_1774783839914_i687ql.messages WHERE id = 85514304 +KalamClient: Received WebSocket message (315 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-99f183e3749b404f, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-99f183e3749b404f (registered subs: 4) +KalamClient: Parsed InitialDataBatch - id: sub-99f183e3749b404f, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-99f183e3749b404f (registered subs: 4) +KalamClient: Subscribed with ID: sub-99f183e3749b404f +KalamClient: Sending subscribe request - id: sub-7f78694a241dd6df, sql: SELECT id, body FROM ts_sub_1774783839914_i687ql.messages WHERE id = 85514305 +KalamClient: Received WebSocket message (315 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-7f78694a241dd6df, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-7f78694a241dd6df (registered subs: 5) +KalamClient: Parsed InitialDataBatch - id: sub-7f78694a241dd6df, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-7f78694a241dd6df (registered subs: 5) +KalamClient: Subscribed with ID: sub-7f78694a241dd6df +KalamClient: Sending subscribe request - id: sub-70dcf50a59a522ed, sql: SELECT id, body FROM ts_sub_1774783839914_i687ql.messages WHERE id = 85514306 +KalamClient: Received WebSocket message (315 bytes) +KalamClient: Parsed SubscriptionAck - id: sub-70dcf50a59a522ed, total_rows: 0 +KalamClient: Looking for callback for subscription_id: sub-70dcf50a59a522ed (registered subs: 6) +KalamClient: Parsed InitialDataBatch - id: sub-70dcf50a59a522ed, rows: 0, status: Ready +KalamClient: Looking for callback for subscription_id: sub-70dcf50a59a522ed (registered subs: 6) +KalamClient: Subscribed with ID: sub-70dcf50a59a522ed +KalamClient: Parsed Change - id: sub-1336eef26f07c71d, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-1336eef26f07c71d (registered subs: 6) +KalamClient: Parsed Change - id: sub-99f183e3749b404f, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-99f183e3749b404f (registered subs: 6) +KalamClient: Parsed Change - id: sub-70dcf50a59a522ed, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-70dcf50a59a522ed (registered subs: 6) +KalamClient: Parsed Change - id: sub-c0900fac94513724, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-c0900fac94513724 (registered subs: 6) +KalamClient: Parsed Change - id: sub-7f78694a241dd6df, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-7f78694a241dd6df (registered subs: 6) +KalamClient: Parsed Change - id: sub-94368d5193c0ab23, type: Insert, rows: Some(1) +KalamClient: Looking for callback for subscription_id: sub-94368d5193c0ab23 (registered subs: 6) +KalamClient: Unsubscribed from: sub-70dcf50a59a522ed +KalamClient: Unsubscribed from: sub-7f78694a241dd6df +KalamClient: Unsubscribed from: sub-99f183e3749b404f +KalamClient: Unsubscribed from: sub-c0900fac94513724 +KalamClient: Unsubscribed from: sub-1336eef26f07c71d +KalamClient: Unsubscribed from: sub-94368d5193c0ab23 +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected + ✔ one client keeps many simultaneous subscriptions isolated (55.725792ms) +KalamClient: WebSocket closed: code=1005, reason= +KalamClient: Disconnecting from WebSocket... +KalamClient: Disconnected +✔ Subscription (15321.963666ms) +KalamClient: WebSocket closed: code=1005, reason= +ℹ tests 53 +ℹ suites 8 +ℹ pass 53 +ℹ fail 0 +ℹ cancelled 0 +ℹ skipped 0 +ℹ todo 0 +ℹ duration_ms 32284.714875 + +✅ All TypeScript SDK tests passed! From 952aa756e71ff16dc2ee7159f56bfbb22a51fee4 Mon Sep 17 00:00:00 2001 From: jamals86 Date: Sun, 29 Mar 2026 14:50:16 +0300 Subject: [PATCH 04/12] Delete server.toml --- ts-sdk-release/server.toml | 654 ------------------------------------- 1 file changed, 654 deletions(-) delete mode 100644 ts-sdk-release/server.toml diff --git a/ts-sdk-release/server.toml b/ts-sdk-release/server.toml deleted file mode 100644 index cf71ee058..000000000 --- a/ts-sdk-release/server.toml +++ /dev/null @@ -1,654 +0,0 @@ -# KalamDB Server Configuration -# This is an example configuration file with all available settings. -# Copy this file to config.toml and adjust values for your environment. -# -# NOTE: Runtime configuration only! -# - Namespace and storage location configuration is stored in system tables (via kalamdb-sql) -# - This file contains only server runtime settings (ports, paths, limits, etc.) - -[server] -# Server bind address (default: 127.0.0.1) -host = "0.0.0.0" - -# Server port (default: 8080) -port = 8080 - -# Number of worker threads (0 = number of CPU cores) -workers = 0 - -# Enable HTTP/2 protocol support (default: true) -# When true, server uses automatic HTTP/1.1 and HTTP/2 cleartext (h2c) negotiation -# When false, server only supports HTTP/1.1 -# HTTP/2 offers: -# - Multiplexed requests (multiple requests on single connection) -# - Header compression (HPACK) -# - Binary protocol (more efficient parsing) -# - Server push support (for future features) -enable_http2 = true - -# API version (default: "v1") -# Controls the versioned endpoint prefix (e.g., /v1/api/sql) -api_version = "v1" - -[storage] -# Base data directory for all KalamDB storage -# Subdirectories are automatically created: -# ./data/rocksdb - RocksDB hot storage (write buffer) -# ./data/storage - Parquet cold storage (flushed segments) -# ./data/snapshots - Raft snapshots (consensus state) -data_path = "/Users/jamal/git/KalamDB/ts-sdk-release/data" - -# Templates for table storage paths (used by 'local' storage) -# Available placeholders: {namespace}, {tableName}, {userId} -# Final paths: {default_storage_path}/{template} -# Examples: -# Shared table: ./data/storage/myapp/products -# User table: ./data/storage/myapp/preferences/user123 -shared_tables_template = "{namespace}/{tableName}" -user_tables_template = "{namespace}/{tableName}/{userId}" - -# Remote storage timeout settings (S3, GCS, Azure) -# These timeouts apply programmatically to all remote storage operations -[storage.remote_timeouts] -# Request timeout in seconds for all remote storage operations (default: 60s) -request_timeout_secs = 60 -# Connect timeout in seconds for establishing connections (default: 10s) -connect_timeout_secs = 10 - - -[storage.rocksdb] -# Block cache size for reads in bytes (default: 2MB) -# IMPORTANT: This cache is SHARED across ALL column families -# Adding more column families does NOT increase memory proportionally -block_cache_size = 2097152 - -# Maximum number of background compaction/flush jobs (default: 4) -max_background_jobs = 4 - -[storage.rocksdb.cf_profiles.system_meta] -# Low-write system metadata tables and compatibility partitions. -write_buffer_size = 32768 -max_write_buffers = 2 - -[storage.rocksdb.cf_profiles.system_index] -# Secondary indexes for system tables. -write_buffer_size = 32768 -max_write_buffers = 2 - -[storage.rocksdb.cf_profiles.hot_data] -# User/shared/stream data CFs and topic message payloads. -# These stay warmer because they sit on the main read/write path. -write_buffer_size = 131072 -max_write_buffers = 2 - -[storage.rocksdb.cf_profiles.hot_index] -# PK indexes and vector PK indexes. These are latency-sensitive, but smaller than data CFs. -write_buffer_size = 65536 -max_write_buffers = 2 - -[storage.rocksdb.cf_profiles.raft] -# The single raft_data CF is append-heavy and benefits from a larger memtable. -write_buffer_size = 262144 -max_write_buffers = 2 - -[datafusion] -# Memory limit for query execution in bytes (default: 64MB) -# KalamDB is optimised for mobile/OLTP workloads, not heavy analytics. -# 64 MB is sufficient for key-based lookups and small aggregates. -# Queries that exceed this limit are terminated immediately. -memory_limit = 67108864 - -# Number of parallel threads for query execution (default: 2) -# Low fixed value: mobile app queries are short-lived key lookups; -# high parallelism just increases context-switch overhead. -# Set to 0 to auto-detect CPU count (not recommended for shared servers). -query_parallelism = 2 - -# Maximum number of partitions per query (default: 4) -# Caps concurrent partition scans; reduces memory and CPU burst per query. -max_partitions = 4 - -# Batch size for record processing (default: 1024 rows) -# Smaller batches limit peak arrow allocation per operator. -batch_size = 1024 - -[flush] -# Default row limit for flush policies (default: 10000 rows) -# Tables without explicit flush policy will use this value -default_row_limit = 10000 - -# Default time interval for flush in seconds (default: 300s = 5 minutes) -# Tables will flush to Parquet after this duration -default_time_interval = 300 - -# How often the background scheduler checks for tables with pending writes -# and creates flush jobs (default: 60 seconds). Set to 0 to disable. -check_interval_seconds = 60 - -[retention] -# Default retention hours for soft-deleted rows (default: 168 hours = 7 days) -# Rows with _deleted=true will be kept in Parquet files for this duration -default_deleted_retention_hours = 168 - -# Enable periodic dba.stats collection (default: true) -# Set to false to disable the background stats recorder, saving memory and CPU -# in resource-constrained environments (e.g. Docker containers). -enable_dba_stats = false - -# Number of days to preserve samples in dba.stats (default: 7 days) -# Set to 0 to disable automatic cleanup of historical DBA metrics -dba_stats_retention_days = 7 - -[stream] -# Default TTL for stream table rows in seconds (default: 10 seconds) -# Stream tables are ephemeral - rows expire after this duration -default_ttl_seconds = 10 - -# Default maximum buffer size for stream tables (default: 10000 rows) -# Oldest rows are evicted when buffer exceeds this limit -default_max_buffer = 10000 - -# Stream eviction interval in seconds (default: 60 seconds = 1 minute) -# How often the background task checks and evicts expired events -eviction_interval_seconds = 60 - -[manifest_cache] -# Eviction job interval in seconds (default: 300s = 5 minutes) -# How often the eviction job runs to clean up stale manifest entries -eviction_interval_seconds = 300 - -# Maximum number of manifest entries in hot cache (default: 500) -# Older entries are evicted when this limit is reached (LRU policy) -max_entries = 500 - -# TTL in days for manifest eviction (default: 7 days) -# Manifests not accessed for this many days will be removed from both -# hot cache (RAM) and RocksDB persistent cache -# Set to 0 to disable manifest eviction (not recommended for long-running servers) -eviction_ttl_days = 7 - -# Weight factor for user table manifests (default: 10) -# User tables are evicted N times faster than shared tables. -user_table_weight_factor = 10 - -[limits] -# Maximum message size for REST API requests in bytes (default: 1MB) -max_message_size = 1048576 - -# Maximum rows that can be returned in a single query (default: 1000) -max_query_limit = 1000 - -# Default LIMIT for queries without explicit LIMIT clause (default: 50) -default_query_limit = 50 - -[logging] -# Log level: error, warn, info, debug, trace (default: info) -level = "info" - -# Directory for all log files (default: "./logs") -# Server will create server.log (or server.jsonl for JSON format), slow.log, etc. -logs_path = "/Users/jamal/git/KalamDB/ts-sdk-release/logs" - -# Also log to console/stdout (default: true) -log_to_console = true - -# Log format: compact, json (default: compact) -# - compact: Human-readable text format -> server.log -# Format: [timestamp] [LEVEL] [thread - target:line] - message -# - json: JSON Lines format -> server.jsonl (queryable via system.server_logs) -# Each line is a valid JSON object with timestamp, level, thread, target, line, message -format = "compact" - -# Slow query logging threshold in milliseconds (default: 1000ms = 1 second) -# Queries taking longer than this threshold will be logged to slow.log -# AND displayed as WARN in the console -# Set to a high value (e.g., 999999) to disable slow query logging -slow_query_threshold_ms = 1000 - -[logging.otlp] -# Export tracing spans to an OTLP collector (Jaeger all-in-one supports this) -enabled = false -# gRPC endpoint for Jaeger OTLP receiver (port 4317) -endpoint = "http://127.0.0.1:4317" -# Protocol: "grpc" or "http" -protocol = "grpc" -# Service name shown in Jaeger UI -service_name = "kalamdb-server" -# Export timeout in milliseconds -timeout_ms = 3000 - -[performance] -# Request timeout in seconds (default: 30s) -# Requests exceeding this duration will be terminated -request_timeout = 30 - -# Keep-alive timeout in seconds (default: 75s) -# HTTP keep-alive allows connection reuse, reducing TCP handshake overhead -keepalive_timeout = 75 - -# Maximum concurrent connections per worker (default: 25000) -# Includes both REST API and WebSocket connections -# For testing environments with high concurrency, consider 50000 -max_connections = 25000 - -# TCP listen backlog - pending connections queue size (default: 4096) -# Controls how many connections can wait in the kernel queue before being accepted -# Increase for burst traffic or high-concurrency scenarios -# Recommended values: -# - Development/Testing: 4096-8192 (handles burst test loads) -# - Production: 4096-8192 (handles traffic spikes) -# - High traffic: 8192+ (enterprise scale) -# Industry standards: Nginx (511), Apache (511), Caddy (1024), Actix (2048) -backlog = 4096 - -# Max blocking threads per worker for CPU-intensive operations (default: 32) -# Used for RocksDB I/O and synchronous operations -# Increase for high-concurrency workloads or test environments -worker_max_blocking_threads = 32 - -# Number of tokio runtime worker threads (default: 0 = auto, num_cpus capped at 4) -# Lower values reduce idle RSS from thread stacks (~2MB per thread). -# Set to 0 for auto-detection, or an explicit count for Docker/constrained environments. -# Can also be overridden via KALAMDB_TOKIO_WORKER_THREADS env var. -tokio_worker_threads = 0 - -# Client request timeout in seconds (default: 5) -# Time allowed for client to send complete request headers -client_request_timeout = 5 - -# Client disconnect timeout in seconds (default: 2) -# Time allowed for graceful connection shutdown -client_disconnect_timeout = 2 - -# Maximum HTTP header size in bytes (default: 16384 = 16KB) -# Increase if you have large JWT tokens or custom headers -max_header_size = 16384 - -[rate_limit] -# Maximum SQL queries per second per user (default: 100) -# Prevents query flooding from a single user -# For testing/development environments with high load, increase to 10000-100000 -max_queries_per_sec = 100 - -# Maximum WebSocket messages per second per connection (default: 50) -# Prevents message flooding on WebSocket connections -# For testing/development environments with high load, increase to 500-1000 -max_messages_per_sec = 50 - -# Maximum concurrent live query subscriptions per user (default: 10) -# Limits total active subscriptions to prevent resource exhaustion -# For testing/development environments, increase to 100-1000 -max_subscriptions_per_user = 10 - -# Maximum authentication requests per IP per second (default: 20) -# Prevents brute force attacks and login flooding -# Applies to /auth/login, /auth/refresh, /setup endpoints -max_auth_requests_per_ip_per_sec = 20 - -# Maximum concurrent connections per IP address (default: 100) -# Prevents a single IP from exhausting all server connections -max_connections_per_ip = 100 - -# Maximum requests per second per IP BEFORE authentication (default: 200) -# ⚠️ CRITICAL: This is the main rate limit that triggers IP BANS -# Applied before auth to protect against unauthenticated floods -# If exceeded repeatedly, IP will be banned for ban_duration_seconds -# For testing/development with high request rates, set to 100000+ -max_requests_per_ip_per_sec = 200 - -# Maximum request body size in bytes (default: 10MB) -# Prevents memory exhaustion from huge request payloads -request_body_limit_bytes = 10485760 - -# Duration in seconds to ban abusive IPs (default: 300 = 5 minutes) -# IPs that violate max_requests_per_ip_per_sec 10+ times are banned -ban_duration_seconds = 300 - -# Enable connection protection middleware (default: true) -# Set to false to completely disable rate limiting (NOT recommended for production) -enable_connection_protection = true - -# Maximum cached entries for rate limiting state (default: 1,000) -# MEMORY OPTIMIZATION: Reduced from 100k. Moka internal bookkeeping scales -# with max_capacity. 1k handles typical mobile-app deployments. -cache_max_entries = 1000 - -# Time-to-idle for cached entries in seconds (default: 600 = 10 minutes) -cache_ttl_seconds = 600 - -# ============================================================================ -# Security Settings -# ============================================================================ -# CORS, WebSocket, and request limit configuration - -[security] -# Maximum request body size in bytes (default: 10MB) -# Prevents memory exhaustion from large payloads -max_request_body_size = 10485760 - -# Maximum WebSocket message size in bytes (default: 1MB) -# Prevents memory exhaustion from large WebSocket messages -max_ws_message_size = 1048576 - -# Allowed WebSocket origins (if different from CORS origins) -# Leave empty to use CORS allowed_origins for WebSocket validation -allowed_ws_origins = [] - -# Strict WebSocket origin checking (default: false) -# If true, rejects WebSocket connections without Origin header -strict_ws_origin_check = false - -# Trusted reverse proxy source IPs or CIDR ranges for forwarded client IP headers. -# Only peers in this list may supply X-Forwarded-For / X-Real-IP. -# Examples: ["10.0.1.9", "10.0.0.0/8", "192.168.0.0/24"] -trusted_proxy_ranges = [] - -# CORS Configuration (uses actix-cors) -# See: https://docs.rs/actix-cors -[security.cors] -# Allowed origins for CORS requests -# Use ["*"] or empty [] for any origin (development mode) -# For production, specify exact origins: ["https://app.example.com", "https://admin.example.com"] -allowed_origins = [] - -# Allowed HTTP methods (default: common REST methods) -allowed_methods = ["GET", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"] - -# Allowed HTTP headers -# Use ["*"] to allow any header -allowed_headers = ["Authorization", "Content-Type", "Accept", "Origin", "X-Requested-With"] - -# Headers to expose to the browser (default: none) -# Example: ["X-Custom-Header", "X-Request-Id"] -expose_headers = [] - -# Allow credentials (cookies, authorization headers) (default: true) -# Note: If true, allowed_origins cannot be ["*"] in browsers -allow_credentials = true - -# Preflight request cache max age in seconds (default: 3600 = 1 hour) -max_age = 3600 - -# Allow private network requests (default: false) -# Enables Access-Control-Request-Private-Network header support -allow_private_network = false - -[websocket] -# Client heartbeat timeout in seconds (default: 10) -# How long to wait for client pong/activity before disconnecting. -# Increase for high connection counts (>10K) to avoid false timeouts -# caused by scheduling contention and TCP buffer pressure. -client_timeout_secs = 10 - -# Authentication timeout in seconds (default: 3) -# How long to wait for auth message after WebSocket connect -auth_timeout_secs = 3 - -# Heartbeat check interval in seconds (default: 5) -# How often the background heartbeat task iterates all connections. -# Pings are staggered across 4 groups, so each connection is pinged -# once every heartbeat_interval × 4 seconds. -heartbeat_interval_secs = 5 - -[authentication] -# Bcrypt cost factor for password hashing (default: 12, range: 10-14) -# Higher values = more secure but slower -# Changing this only affects NEW passwords -bcrypt_cost = 12 - -# Minimum password length (default: 8) -min_password_length = 8 - -# Maximum password length (default: 72, bcrypt limit) -# Note: Passwords longer than 72 bytes are truncated by bcrypt -max_password_length = 72 - -# Disable common password checking (default: false) -# If true, allows passwords like "password", "123456", etc. -# WARNING: Only disable for testing/development environments! -disable_common_password_check = false - -# JWT configuration (for JWT Bearer token authentication) -# Secret key for JWT signature validation (minimum 32 characters recommended) -# IMPORTANT: Change this in production! Use a strong, random secret. -jwt_secret = "sdk-test-secret-key-minimum-32-characters-long" - -# Allow initial server setup from non-localhost clients (default: false) -# Useful for Docker or remote hosts in trusted networks. -# WARNING: Only enable in trusted environments. -allow_remote_setup = false - -# Comma-separated list of trusted JWT issuers (leave empty to accept any issuer) -# Add your OAuth provider domains here -# Example for Google OAuth: "https://accounts.google.com" -# Example for GitHub OAuth: "https://github.com" -# Example for Firebase Auth: "https://securetoken.google.com/YOUR_PROJECT_ID" -# Multiple issuers (comma-separated): "https://accounts.google.com,https://securetoken.google.com/my-app" -jwt_trusted_issuers = "" - -# Auto-create local OAuth users from trusted provider subject/issuer when not found (default: false) -auto_create_users_from_provider = false - -# ============================================================================ -# OAuth / OIDC Provider Configuration -# ============================================================================ -# Each provider section adds its issuer to the trusted-issuers list and -# registers the audience (client_id) for token validation. -# You still need to add the provider issuer to [authentication].jwt_trusted_issuers above. - -# [oauth] -# # Enable OAuth / OIDC authentication globally (default: false) -# enabled = true -# # Auto-provision a KalamDB user on first successful login (default: false) -# auto_provision = true -# # Default role assigned to auto-provisioned users (default: "user") -# default_role = "user" - -# ── Google / Google Workspace ────────────────────────────────────────────── -# [oauth.providers.google] -# enabled = true -# issuer = "https://accounts.google.com" -# jwks_uri = "https://www.googleapis.com/oauth2/v3/certs" -# client_id = "your-google-client-id.apps.googleusercontent.com" - -# ── GitHub OAuth ─────────────────────────────────────────────────────────── -# [oauth.providers.github] -# enabled = true -# issuer = "https://token.actions.githubusercontent.com" -# jwks_uri = "https://token.actions.githubusercontent.com/.well-known/jwks" -# client_id = "your-github-oauth-app-client-id" - -# ── Microsoft Azure AD / Entra ID ───────────────────────────────────────── -# [oauth.providers.azure] -# enabled = true -# issuer = "https://login.microsoftonline.com/YOUR_TENANT_ID/v2.0" -# jwks_uri = "https://login.microsoftonline.com/YOUR_TENANT_ID/discovery/v2.0/keys" -# client_id = "your-azure-client-id" -# tenant = "your-azure-tenant-id" - -# ── Firebase Authentication ──────────────────────────────────────────────── -# Firebase issues RS256-signed ID tokens with issuer: -# https://securetoken.google.com/{PROJECT_ID} -# JWKS endpoint (static, no discovery needed): -# https://www.googleapis.com/service_accounts/v1/jwk/securetoken@system.gserviceaccount.com -# -# Steps: -# 1. Set enabled = true and fill in your Firebase project ID. -# 2. Add the issuer to [authentication].jwt_trusted_issuers. -# 3. Optionally enable auto_provision so first-time Firebase users get a -# KalamDB account created automatically. -# 4. On the client, obtain a Firebase ID token and pass it as: -# Authorization: Bearer -# -# [oauth.providers.firebase] -# enabled = true -# issuer = "https://securetoken.google.com/YOUR_PROJECT_ID" -# jwks_uri = "https://www.googleapis.com/service_accounts/v1/jwk/securetoken@system.gserviceaccount.com" -# # client_id must match the Firebase project ID (aud claim in the token) -# client_id = "YOUR_PROJECT_ID" - -[shutdown] -# Timeout settings for graceful shutdown - -[shutdown.flush] -# Timeout in seconds to wait for flush jobs to complete during graceful shutdown (default: 300) -timeout = 300 - -# Maximum number of concurrent jobs (default: 10) -# Controls how many jobs can execute simultaneously -max_concurrent = 10 - -# Maximum number of retry attempts per job (default: 3) -# Jobs will be retried this many times before being marked as permanently failed -max_retries = 3 - -# Initial retry backoff delay in milliseconds (default: 100ms) -# Delay increases exponentially with each retry (100ms, 200ms, 400ms, etc.) -retry_backoff_ms = 100 - -# Phase 11, T026: SQL Handler Execution Configuration -[execution] -# Handler execution timeout in seconds (default: 30) -# Maximum time allowed for a single SQL statement to execute -# Prevents hung requests from blocking resources -handler_timeout_seconds = 30 - -# Maximum number of parameters per statement (default: 50) -# Prevents memory exhaustion from excessive parameter arrays -max_parameters = 50 - -# Maximum size per parameter in bytes (default: 524288 = 512KB) -# Prevents memory exhaustion from individual large parameters -max_parameter_size_bytes = 524288 - -# Maximum number of cached SQL logical plans (default: 200) -# Bound memory used by SQL plan cache -sql_plan_cache_max_entries = 200 - -# Time-to-idle TTL for SQL cached plans in seconds (default: 900 = 15 minutes) -# Unused plans are evicted automatically after this idle period -sql_plan_cache_ttl_seconds = 900 - -# ============================================================================ -# RPC TLS / mTLS Configuration -# ============================================================================ -# Secures the shared gRPC listener used by Raft replication, cluster RPCs, -# and the PostgreSQL extension. -# -# Both cluster nodes and PG extension clients present a certificate signed by -# the same CA. The server identifies the caller from the certificate CN: -# kalamdb-node-{node_id} → cluster node -# kalamdb-pg-{name} → PG extension client -# -# All cert values accept EITHER a file path OR an inline PEM string. -# Inline detection: if the value starts with "-----BEGIN", it is used directly. -# Otherwise the value is treated as a file path and read from disk. - -# [rpc_tls] -# enabled = true -# # CA cert — validates ALL incoming client certs (cluster nodes + PG extension) -# ca_cert = "/etc/kalamdb/certs/ca.pem" -# # This server's identity cert and key -# server_cert = "/etc/kalamdb/certs/node1.pem" -# server_key = "/etc/kalamdb/certs/node1.key" -# # Require clients to present a cert (full mTLS). Set false for server-only TLS. -# require_client_cert = true - -# ============================================================================ -# Cluster Configuration — Multi-Node Raft Replication -# ============================================================================ -# When [cluster] is present the server joins a distributed cluster. -# When absent (default) the server runs standalone with no clustering overhead. -# -# All nodes in a cluster MUST have: -# - Matching cluster_id values -# - The same peers list (this node omitted) -# - Matching sharding configuration (user_shards, shared_shards) -# - Unique node_id values -# -# Node with node_id=1 is the bootstrap node (no explicit flag needed). - -# [cluster] -# # Unique cluster identifier - all nodes must share this -# cluster_id = "prod-cluster" -# -# # This node's unique ID within the cluster (must be >= 1) -# # Node with node_id=1 is the designated bootstrap node -# node_id = 1 -# -# # RPC address for Raft inter-node communication -# rpc_addr = "0.0.0.0:9188" -# -# # API address for client HTTP requests (should match server.host:server.port) -# api_addr = "http://192.168.1.10:8080" -# # -# # Optional mTLS for inter-node gRPC (Raft + cluster RPC) -# # When enabled, all three paths are required. -# # Note: TLS for this node's identity lives in the top-level [rpc_tls] section, not here. -# -# # Number of user data shards (default: 8) -# # MEMORY OPTIMIZATION: Reduced from 32 (saves ~5-8 MB) -# # Each shard is a separate Raft group for user table data -# # Trade-off: Lower write parallelism (acceptable for dev/testing) -# user_shards = 8 -# -# # Number of shared data shards (default: 1) -# # Each shard is a separate Raft group for shared table data -# shared_shards = 1 -# -# # Raft heartbeat interval in milliseconds (default: 50) -# heartbeat_interval_ms = 50 -# -# # Raft election timeout range [min, max] in milliseconds (default: [150, 300]) -# election_timeout_ms = [150, 300] -# -# # Maximum entries per Raft snapshot (default: 10000) -# snapshot_threshold = 10000 -# -# # Minimum number of nodes that must acknowledge writes (default: 1) -# # Set to 2 or 3 for strong consistency in a 3-node cluster -# # This ensures data is replicated to multiple nodes before acknowledging success -# min_replication_nodes = 3 -# -# # Peer nodes (list all OTHER nodes in the cluster) -# [[cluster.peers]] -# node_id = 2 -# rpc_addr = "192.168.1.11:9188" -# api_addr = "http://192.168.1.11:8080" -# # Optional TLS server-name override for this peer (SNI/hostname verification) -# # rpc_server_name = "node2.cluster.local" -# -# [[cluster.peers]] -# node_id = 3 -# rpc_addr = "192.168.1.12:9188" -# api_addr = "http://192.168.1.12:8080" -# # rpc_server_name = "node3.cluster.local" - -# ============================================================================ -# Example: 3-Node Production Cluster Configuration -# ============================================================================ -# For production, use an odd number of nodes (3 or 5) for optimal fault tolerance: -# - 3 nodes: tolerates 1 node failure -# - 5 nodes: tolerates 2 node failures -# -# Node 1 configuration (server1.toml): -# [cluster] -# cluster_id = "prod" -# node_id = 1 -# rpc_addr = "node1.example.com:9188" -# api_addr = "http://node1.example.com:8080" -# user_shards = 32 -# shared_shards = 1 -# min_replication_nodes = 3 -# -# [[cluster.peers]] -# node_id = 2 -# rpc_addr = "node2.example.com:9090" -# api_addr = "http://node2.example.com:8080" -# rpc_server_name = "node2.example.com" -# -# [[cluster.peers]] -# node_id = 3 -# rpc_addr = "node3.example.com:9090" -# api_addr = "http://node3.example.com:8080" -# rpc_server_name = "node3.example.com" From 2d414e2ff4b8f1aab9164c80b58b28f03382fd95 Mon Sep 17 00:00:00 2001 From: jamals86 Date: Sun, 29 Mar 2026 14:50:45 +0300 Subject: [PATCH 05/12] Delete ts-sdk-test-output.txt --- ts-sdk-test-output.txt | 1350 ---------------------------------------- 1 file changed, 1350 deletions(-) delete mode 100644 ts-sdk-test-output.txt diff --git a/ts-sdk-test-output.txt b/ts-sdk-test-output.txt deleted file mode 100644 index 44e2b3234..000000000 --- a/ts-sdk-test-output.txt +++ /dev/null @@ -1,1350 +0,0 @@ -🧪 Testing KalamDB TypeScript SDK... -📦 Building SDK... - -> kalam-link@0.4.1-beta.2 build -> npm run clean && npm run build:wasm && npm run build:fix-types && npm run build:ts && npm run build:copy-wasm - - -> kalam-link@0.4.1-beta.2 clean -> node -e "const fs=require('fs');const p=require('path');const rm=d=>{try{fs.rmSync(d,{recursive:true,force:true})}catch{}};rm('dist');rm('wasm')" - - -> kalam-link@0.4.1-beta.2 build:wasm -> node -e "const {execSync,spawnSync}=require('child_process');process.chdir('../..');const env={...process.env,CARGO_ENCODED_RUSTFLAGS:'',RUSTFLAGS:'',RUSTC_WRAPPER:'',CARGO_BUILD_RUSTC_WRAPPER:'',CARGO_TARGET_DIR:'sdks/typescript/.wasm-target',CARGO_HOME:'sdks/typescript/.wasm-cargo-home'};execSync('wasm-pack build --target web --out-dir sdks/typescript/wasm --no-opt --profile release-dist --features wasm --no-default-features',{stdio:'inherit',env});const wasmPath='sdks/typescript/wasm/kalam_link_bg.wasm';const opt=spawnSync('wasm-opt',['-Oz','--all-features','-o',wasmPath,wasmPath],{stdio:'inherit',env});if(opt.error&&opt.error.code==='ENOENT'){console.warn('wasm-opt not found; skipping post-build size optimization')}else if(opt.status!==0){process.exit(opt.status||1)}" - -[INFO]: 🎯 Checking for the Wasm target... -[INFO]: 🌀 Compiling to Wasm... - Finished `release-dist` profile [optimized] target(s) in 0.20s -[INFO]: ⬇️ Installing wasm-bindgen... -[INFO]: License key is set in Cargo.toml but no LICENSE file(s) were found; Please add the LICENSE file(s) to your project directory -[INFO]: ✨ Done in 0.40s -[INFO]: 📦 Your wasm pkg is ready to publish at /Users/jamal/git/KalamDB/link/sdks/typescript/wasm. - -> kalam-link@0.4.1-beta.2 build:fix-types -> node -e "const fs=require('fs');const p=require('path');const dts=p.join('wasm','kalam_link.d.ts');let c=fs.readFileSync(dts,'utf8');if(!c.includes('type JsonValue')){c='type JsonValue=null|boolean|number|string|JsonValue[]|{[key:string]:JsonValue};\n'+c;fs.writeFileSync(dts,c)}" - - -> kalam-link@0.4.1-beta.2 build:ts -> tsc - - -> kalam-link@0.4.1-beta.2 build:copy-wasm -> node -e "const fs=require('fs');const p=require('path');fs.mkdirSync('dist/wasm',{recursive:true});fs.readdirSync('wasm').filter(f=>!f.includes('package.json')&&!f.includes('.gitignore')).forEach(f=>fs.copyFileSync(p.join('wasm',f),p.join('dist/wasm',f)))" - - -🔬 Running unit tests (no server)... -✔ runAgent retries and acks once after success (1.211084ms) -✔ runAgent calls onFailed and then acks when configured (0.272792ms) -✔ runAgent does not ack when onFailed throws (0.409333ms) -✔ runAgent exposes llm context with system prompt metadata (0.250875ms) -✔ runConsumer delegates to runAgent and processes messages (0.169959ms) -✔ createLangChainAdapter normalizes completion and stream outputs (0.283417ms) -✔ resolveAuthProviderWithRetry retries transient errors then succeeds (0.902167ms) -✔ resolveAuthProviderWithRetry fails fast for non-transient errors (0.281958ms) -✔ isLikelyTransientAuthProviderError detects network-like messages (0.073584ms) -🧪 Running Basic WASM Module Tests - -Test 1: WASM module loads... - ✓ init function exists - ✓ KalamClient class exists - -Test 2: WASM initializes... -using deprecated parameters for the initialization function; pass a single object instead - ✓ WASM initialized successfully - -Test 3: KalamClient construction... - ✓ KalamClient instance created - ✓ isConnected method exists - ✓ connect method exists - ✓ disconnect method exists - ✓ query method exists - ✓ insert method exists - ✓ delete method exists - ✓ subscribe method exists - ✓ unsubscribe method exists - -Test 4: Constructor parameter validation... - ✓ Empty URL throws error - ✓ Empty username throws error - -================================================== -Results: 14 passed, 0 failed -================================================== - -✅ All tests passed! -✔ tests/basic.test.mjs (62.330208ms) -cell-value.test.mjs: all tests registered -[FileRef] Failed to parse JSON: SyntaxError: Unexpected token 'j', "just a string" is not valid JSON - at JSON.parse () - at FileRef.fromJson (file:///Users/jamal/git/KalamDB/link/sdks/typescript/dist/src/file_ref.js:71:58) - at FileRef.from (file:///Users/jamal/git/KalamDB/link/sdks/typescript/dist/src/file_ref.js:97:28) - at KalamCellValue.asFile (file:///Users/jamal/git/KalamDB/link/sdks/typescript/dist/src/cell_value.js:345:24) - at TestContext. (file:///Users/jamal/git/KalamDB/link/sdks/typescript/tests/cell-value.test.mjs:329:55) - at Test.runInAsyncScope (node:async_hooks:214:14) - at Test.run (node:internal/test_runner/test:1103:25) - at Suite.processPendingSubtests (node:internal/test_runner/test:785:18) - at Test.postRun (node:internal/test_runner/test:1232:19) - at Test.run (node:internal/test_runner/test:1160:12) -[FileRef] Failed to parse JSON: SyntaxError: Unexpected token 'e', "text" is not valid JSON - at JSON.parse () - at FileRef.fromJson (file:///Users/jamal/git/KalamDB/link/sdks/typescript/dist/src/file_ref.js:71:58) - at FileRef.from (file:///Users/jamal/git/KalamDB/link/sdks/typescript/dist/src/file_ref.js:97:28) - at KalamCellValue.asFile (file:///Users/jamal/git/KalamDB/link/sdks/typescript/dist/src/cell_value.js:345:24) - at KalamCellValue.asFileUrl (file:///Users/jamal/git/KalamDB/link/sdks/typescript/dist/src/cell_value.js:364:26) - at TestContext. (file:///Users/jamal/git/KalamDB/link/sdks/typescript/tests/cell-value.test.mjs:355:46) - at Test.runInAsyncScope (node:async_hooks:214:14) - at Test.run (node:internal/test_runner/test:1103:25) - at Suite.processPendingSubtests (node:internal/test_runner/test:785:18) - at Test.postRun (node:internal/test_runner/test:1232:19) -▶ KalamCellValue.from() - ✔ wraps null (0.414875ms) - ✔ wraps undefined as null (0.071875ms) - ✔ wraps string (0.068208ms) - ✔ wraps number (0.063625ms) - ✔ wraps boolean (0.083042ms) - ✔ wraps object (0.066791ms) - ✔ wraps array (0.055583ms) -✔ KalamCellValue.from() (1.392542ms) -▶ KalamCellValue.asString() - ✔ returns string as-is (0.131458ms) - ✔ converts number to string (0.074583ms) - ✔ converts boolean to string (0.102833ms) - ✔ handles Utf8 envelope (0.058166ms) - ✔ handles String envelope (0.047666ms) - ✔ returns null for SQL NULL (0.044958ms) -✔ KalamCellValue.asString() (0.611958ms) -▶ KalamCellValue.asInt() - ✔ returns integer (0.085125ms) - ✔ truncates float (0.042541ms) - ✔ parses string integer (0.058667ms) - ✔ converts boolean (0.03675ms) - ✔ returns null for non-numeric string (0.043291ms) - ✔ returns null for null (0.028041ms) -✔ KalamCellValue.asInt() (0.418ms) -▶ KalamCellValue.asBigInt() - ✔ converts number (0.074791ms) - ✔ parses string bigint (0.033958ms) - ✔ returns null for non-numeric string (0.041542ms) -✔ KalamCellValue.asBigInt() (0.209333ms) -▶ KalamCellValue.asFloat() - ✔ returns float (0.068291ms) - ✔ converts integer to float (0.028125ms) - ✔ parses string float (0.029917ms) - ✔ converts boolean (0.030125ms) - ✔ returns null for NaN string (0.035125ms) -✔ KalamCellValue.asFloat() (0.253167ms) -▶ KalamCellValue.asBool() - ✔ returns boolean (0.063208ms) - ✔ converts number (0.032167ms) - ✔ handles string true/false (0.038ms) - ✔ returns null for unrecognized string (0.029417ms) -✔ KalamCellValue.asBool() (0.2215ms) -▶ KalamCellValue.asDate() - ✔ converts unix millis (0.129667ms) - ✔ parses ISO 8601 string (0.040875ms) - ✔ parses numeric timestamp string (0.031166ms) - ✔ normalizes microsecond timestamp strings (2.339ms) - ✔ returns null for bad date (0.085041ms) - ✔ returns null for null (0.0425ms) -✔ KalamCellValue.asDate() (3.057167ms) -▶ KalamCellValue.asObject() - ✔ returns object (0.425459ms) - ✔ returns null for non-object (0.033958ms) - ✔ returns null for array (not considered object) (0.029375ms) -✔ KalamCellValue.asObject() (0.532291ms) -▶ KalamCellValue.asArray() - ✔ returns array (0.58025ms) - ✔ returns null for non-array (0.030334ms) -✔ KalamCellValue.asArray() (0.64225ms) -▶ KalamCellValue.toString() - ✔ NULL for null (0.060958ms) - ✔ string as-is (0.023917ms) - ✔ number stringified (0.0225ms) - ✔ object as JSON (0.037625ms) -✔ KalamCellValue.toString() (0.193041ms) -▶ wrapRowMap() - ✔ wraps each value in KalamCellValue (0.289375ms) - ✔ handles null values (0.051458ms) -✔ wrapRowMap() (0.481625ms) -▶ KalamRow.cell() - ✔ returns KalamCellValue for each column (0.086375ms) - ✔ handles null values (0.038417ms) -✔ KalamRow.cell() (0.155542ms) -▶ KalamRow.typedData - ✔ returns all cells as KalamCellValue (RowData) (0.092208ms) - ✔ caches the result on repeated access (0.034584ms) -✔ KalamRow.typedData (0.153042ms) -▶ KalamRow — unified query & subscribe access pattern - ✔ cell() works the same whether data is raw or pre-wrapped (0.044209ms) -✔ KalamRow — unified query & subscribe access pattern (0.063ms) -▶ KalamCellValue.asFile() - ✔ parses valid FILE column JSON (0.080041ms) - ✔ returns null for non-file values (0.322417ms) - ✔ returns null for null (0.02575ms) -✔ KalamCellValue.asFile() (0.468042ms) -▶ KalamCellValue.asFileUrl() - ✔ builds download URL from FILE reference (0.095333ms) - ✔ returns null for non-file values (0.092708ms) -✔ KalamCellValue.asFileUrl() (0.213292ms) -▶ edge cases - ✔ KalamCellValue wraps nested objects correctly (0.042625ms) -✔ edge cases (0.061958ms) -▶ SeqId - ✔ creates from number (0.132917ms) - ✔ creates from bigint (0.033125ms) - ✔ creates from string (0.027542ms) - ✔ zero() returns value 0 (0.031458ms) - ✔ extracts Snowflake fields (0.061ms) - ✔ toDate returns a Date (0.036625ms) - ✔ equals compares values (0.03825ms) - ✔ compareTo orders correctly (0.039917ms) - ✔ toJSON returns number (0.035958ms) - ✔ throws on invalid input (0.149791ms) -✔ SeqId (0.677958ms) -▶ KalamCellValue.asSeqId() - ✔ converts number to SeqId (0.058291ms) - ✔ converts string to SeqId (0.026ms) - ✔ returns null for non-numeric string (0.038292ms) - ✔ returns null for null (0.023333ms) -✔ KalamCellValue.asSeqId() (0.190875ms) -normalize.test.mjs passed -✔ tests/normalize.test.mjs (45.590959ms) -✔ README live resume example passes options and exposes typed checkpoints (1.654667ms) -✔ README executeAsUser example wraps SQL for tenant-safe writes (0.2485ms) -✔ README queryWithFiles example posts multipart data with auth header (12.080666ms) -✔ README runAgent example writes back through executeAsUser inside the user tenant (0.482625ms) -✔ queryRows wraps named_rows into KalamRow with typed cell access (3.548541ms) -✔ subscribeRows wraps change rows and oldValues as KalamRow instances (0.626375ms) -✔ liveTableRows delegates to live using SELECT * sugar (0.939167ms) -✔ live passes key columns through to Rust materialization (0.251708ms) -✔ login refresh and reconnect helpers delegate to wasm client (0.351875ms) -✔ consumer one-shot batch and ack preserve all consume options (0.186667ms) -✔ consumer run supports latest start manual ack and auto ack flows (0.269791ms) -✔ documented topic publish path uses SQL query calls only (0.194083ms) -✔ multiple subscriptions on one client share one websocket connection (12.104625ms) -✔ failed subscriptions do not leak local subscription state (11.96575ms) -✔ getSubscriptions trusts wasm empty snapshots over stale local metadata (11.636167ms) -✔ disconnect clears local subscription metadata even when wasm disconnect fails (11.746209ms) -✔ subscribeWithSql normalizes websocket rows into RowData cells (10.75725ms) -✔ live delegates materialized rows to the Rust/WASM layer (11.130625ms) -✔ parallel subscribe storms connect once and keep sibling subscriptions isolated (11.426667ms) -ℹ tests 103 -ℹ suites 19 -ℹ pass 103 -ℹ fail 0 -ℹ cancelled 0 -ℹ skipped 0 -ℹ todo 0 -ℹ duration_ms 147.728666 - -🔗 Checking server at http://localhost:8080 ... -✅ Server is reachable - -🧪 Running e2e tests... -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -▶ Auth - ✔ login with basic auth returns tokens and user info (892.391334ms) -KalamClient: Token refreshed, updated JWT authentication -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected - ✔ refreshToken returns a valid access token (864.192708ms) -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected - ✔ JWT-only client can query after login (887.019875ms) -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= - ✔ authProvider callback is used for authentication (866.030333ms) - ✔ Auth.none client can be created (anonymous mode) (0.109041ms) - ✔ wrong password rejects login (861.410833ms) - ✔ constructor requires url (0.144583ms) - ✔ constructor requires authProvider (0.058958ms) -✔ Auth (4372.100167ms) -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -▶ DDL - ✔ CREATE TABLE creates a new table (93.901541ms) - ✔ DROP TABLE IF EXISTS succeeds for existing table (163.832541ms) - ✔ DROP TABLE IF EXISTS succeeds for nonexistent table (81.214792ms) - ✔ table with multiple column types (113.006292ms) -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected - ✔ CREATE NAMESPACE IF NOT EXISTS is idempotent (83.114375ms) -KalamClient: WebSocket closed: code=1005, reason= -✔ DDL (1447.981833ms) -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -▶ Client Lifecycle - ✔ eager initialize then disconnect toggles isConnected (898.97675ms) -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected - ✔ setAutoReconnect / setReconnectDelay / setMaxReconnectAttempts (5.502584ms) -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= - ✔ disableCompression: true still connects and queries (14.95625ms) -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected - ✔ wsLazyConnect: true keeps query-only usage disconnected (6.116083ms) -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected - ✔ onConnect callback fires (505.824959ms) -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= - ✔ calling initialize() twice is safe (9.775584ms) -✔ Client Lifecycle (1442.381583ms) -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -▶ DML Helpers - ✔ insert() adds a row and returns response (7.3395ms) - ✔ update() modifies an existing row (12.359792ms) - ✔ delete() removes a row by id (13.382791ms) -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= -✔ DML Helpers (1034.281ms) -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -▶ Query - ✔ SELECT literal returns result (5.884041ms) - ✔ INSERT then SELECT returns inserted row (11.182958ms) - ✔ parameterised INSERT + SELECT with $1 $2 $3 (12.641125ms) - ✔ UPDATE modifies existing row (7.280459ms) - ✔ DELETE removes row (11.347166ms) - ✔ CREATE NAMESPACE IF NOT EXISTS succeeds (2.730375ms) - ✔ SELECT from nonexistent table returns error (5.671334ms) - ✔ queryOne returns first row or null (12.057583ms) - ✔ queryAll returns array of rows (6.794667ms) - ✔ multiple inserts in single call (11.166209ms) -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -✔ Query (1087.217334ms) -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-e5cc9806d8f2dc6e, sql: SELECT * FROM ts_recon_1774783832653_37iss4.events -KalamClient: Received WebSocket message (444 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-e5cc9806d8f2dc6e, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) -KalamClient: Parsed InitialDataBatch - id: sub-e5cc9806d8f2dc6e, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) -KalamClient: Subscribed with ID: sub-e5cc9806d8f2dc6e -KalamClient: Unsubscribed from: sub-e5cc9806d8f2dc6e -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -▶ Reconnect & Resume - ✔ disconnect then subscribe reconnects automatically (29.292333ms) -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-e5cc9806d8f2dc6e, sql: SELECT * FROM ts_recon_1774783832653_37iss4.events -KalamClient: Received WebSocket message (444 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-e5cc9806d8f2dc6e, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) -KalamClient: Parsed InitialDataBatch - id: sub-e5cc9806d8f2dc6e, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) -KalamClient: Subscribed with ID: sub-e5cc9806d8f2dc6e -KalamClient: Unsubscribed from: sub-e5cc9806d8f2dc6e -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected - ✔ onConnect and onDisconnect fire during reconnect cycle (20.070041ms) -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= - ✔ onError handler can be set without throwing (5.303875ms) -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: Sending subscribe request - id: sub-e5cc9806d8f2dc6e, sql: SELECT * FROM ts_recon_1774783832653_37iss4.events - ✔ setAutoReconnect / setReconnectDelay / setMaxReconnectAttempts (3.428375ms) -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Received WebSocket message (444 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-e5cc9806d8f2dc6e, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) -KalamClient: Parsed InitialDataBatch - id: sub-e5cc9806d8f2dc6e, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) -KalamClient: Subscribed with ID: sub-e5cc9806d8f2dc6e -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Parsed Change - id: sub-e5cc9806d8f2dc6e, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-e5cc9806d8f2dc6e, sql: SELECT * FROM ts_recon_1774783832653_37iss4.events -KalamClient: Received WebSocket message (475 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-e5cc9806d8f2dc6e, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) -KalamClient: Received WebSocket message (384 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-e5cc9806d8f2dc6e, rows: 2, status: Ready -KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) -KalamClient: Subscribed with ID: sub-e5cc9806d8f2dc6e -KalamClient: Parsed Change - id: sub-e5cc9806d8f2dc6e, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) -KalamClient: Unsubscribed from: sub-e5cc9806d8f2dc6e -KalamClient: Unsubscribed from: sub-e5cc9806d8f2dc6e -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected - ✔ subscription resumes after disconnect/reconnect (real-world) (243.091542ms) -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Sending subscribe request - id: sub-e5cc9806d8f2dc6e, sql: SELECT * FROM ts_recon_1774783832653_37iss4.events -KalamClient: Received WebSocket message (475 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-e5cc9806d8f2dc6e, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) -KalamClient: Received WebSocket message (466 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-e5cc9806d8f2dc6e, rows: 3, status: Ready -KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) -KalamClient: Subscribed with ID: sub-e5cc9806d8f2dc6e -KalamClient: Sending subscribe request - id: sub-ff5801b70f7a3ac4, sql: SELECT * FROM ts_recon_1774783832653_37iss4.events2 -KalamClient: Received WebSocket message (442 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-ff5801b70f7a3ac4, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-ff5801b70f7a3ac4 (registered subs: 2) -KalamClient: Parsed InitialDataBatch - id: sub-ff5801b70f7a3ac4, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-ff5801b70f7a3ac4 (registered subs: 2) -KalamClient: Subscribed with ID: sub-ff5801b70f7a3ac4 -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-e5cc9806d8f2dc6e, sql: SELECT * FROM ts_recon_1774783832653_37iss4.events -KalamClient: Received WebSocket message (475 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-e5cc9806d8f2dc6e, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) -KalamClient: Received WebSocket message (466 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-e5cc9806d8f2dc6e, rows: 3, status: Ready -KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 1) -KalamClient: Subscribed with ID: sub-e5cc9806d8f2dc6e -KalamClient: Sending subscribe request - id: sub-ff5801b70f7a3ac4, sql: SELECT * FROM ts_recon_1774783832653_37iss4.events2 -KalamClient: Received WebSocket message (442 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-ff5801b70f7a3ac4, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-ff5801b70f7a3ac4 (registered subs: 2) -KalamClient: Parsed InitialDataBatch - id: sub-ff5801b70f7a3ac4, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-ff5801b70f7a3ac4 (registered subs: 2) -KalamClient: Subscribed with ID: sub-ff5801b70f7a3ac4 -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Parsed Change - id: sub-e5cc9806d8f2dc6e, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-e5cc9806d8f2dc6e (registered subs: 2) -KalamClient: Parsed Change - id: sub-ff5801b70f7a3ac4, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-ff5801b70f7a3ac4 (registered subs: 2) -KalamClient: Unsubscribed from: sub-ff5801b70f7a3ac4 -KalamClient: Unsubscribed from: sub-e5cc9806d8f2dc6e -KalamClient: Unsubscribed from: sub-ff5801b70f7a3ac4 -KalamClient: Unsubscribed from: sub-e5cc9806d8f2dc6e -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected - ✔ multiple subscriptions work after reconnect (783.89975ms) -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-9923aad38b931ff9, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.from_seq_boundary WHERE id >= 41001 -KalamClient: Received WebSocket message (346 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-9923aad38b931ff9, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-9923aad38b931ff9 (registered subs: 1) -KalamClient: Received WebSocket message (344 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-9923aad38b931ff9, rows: 2, status: Ready -KalamClient: Looking for callback for subscription_id: sub-9923aad38b931ff9 (registered subs: 1) -KalamClient: Subscribed with ID: sub-9923aad38b931ff9 -KalamClient: Unsubscribed from: sub-9923aad38b931ff9 -KalamClient: Sending subscribe request - id: sub-9923aad38b931ff9, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.from_seq_boundary WHERE id >= 41001 -KalamClient: Received WebSocket message (346 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-9923aad38b931ff9, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-9923aad38b931ff9 (registered subs: 1) -KalamClient: Received WebSocket message (292 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-9923aad38b931ff9, rows: 1, status: Ready -KalamClient: Looking for callback for subscription_id: sub-9923aad38b931ff9 (registered subs: 1) -KalamClient: Subscribed with ID: sub-9923aad38b931ff9 -KalamClient: Unsubscribed from: sub-9923aad38b931ff9 -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected - ✔ subscribeWithSql from resumes with only seqs greater than checkpoint (217.899958ms) -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-fc9cba3cc5d6a202, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.resume_a -KalamClient: Received WebSocket message (318 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-fc9cba3cc5d6a202, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-fc9cba3cc5d6a202 (registered subs: 1) -KalamClient: Parsed InitialDataBatch - id: sub-fc9cba3cc5d6a202, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-fc9cba3cc5d6a202 (registered subs: 1) -KalamClient: Subscribed with ID: sub-fc9cba3cc5d6a202 -KalamClient: Sending subscribe request - id: sub-ec33ae1b4df076ae, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.resume_b -KalamClient: Received WebSocket message (318 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-ec33ae1b4df076ae, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-ec33ae1b4df076ae (registered subs: 2) -KalamClient: Parsed InitialDataBatch - id: sub-ec33ae1b4df076ae, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-ec33ae1b4df076ae (registered subs: 2) -KalamClient: Subscribed with ID: sub-ec33ae1b4df076ae -KalamClient: Sending subscribe request - id: sub-a04167b425bfbbdf, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.resume_c -KalamClient: Received WebSocket message (318 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-a04167b425bfbbdf, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-a04167b425bfbbdf (registered subs: 3) -KalamClient: Parsed InitialDataBatch - id: sub-a04167b425bfbbdf, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-a04167b425bfbbdf (registered subs: 3) -KalamClient: Subscribed with ID: sub-a04167b425bfbbdf -KalamClient: Parsed Change - id: sub-fc9cba3cc5d6a202, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-fc9cba3cc5d6a202 (registered subs: 3) -KalamClient: Parsed Change - id: sub-ec33ae1b4df076ae, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-ec33ae1b4df076ae (registered subs: 3) -KalamClient: Parsed Change - id: sub-a04167b425bfbbdf, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-a04167b425bfbbdf (registered subs: 3) -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-e3b9e4a71f291d60, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.resume_a WHERE id >= 1002 -KalamClient: Received WebSocket message (346 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-e3b9e4a71f291d60, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-e3b9e4a71f291d60 (registered subs: 1) -KalamClient: Received WebSocket message (274 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-e3b9e4a71f291d60, rows: 1, status: Ready -KalamClient: Looking for callback for subscription_id: sub-e3b9e4a71f291d60 (registered subs: 1) -KalamClient: Subscribed with ID: sub-e3b9e4a71f291d60 -KalamClient: Sending subscribe request - id: sub-9e06c088b5e9eba7, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.resume_b WHERE id >= 2002 -KalamClient: Received WebSocket message (346 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-9e06c088b5e9eba7, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-9e06c088b5e9eba7 (registered subs: 2) -KalamClient: Received WebSocket message (274 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-9e06c088b5e9eba7, rows: 1, status: Ready -KalamClient: Looking for callback for subscription_id: sub-9e06c088b5e9eba7 (registered subs: 2) -KalamClient: Subscribed with ID: sub-9e06c088b5e9eba7 -KalamClient: Sending subscribe request - id: sub-b62d3aab9f46eaff, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.resume_c WHERE id >= 3002 -KalamClient: Received WebSocket message (346 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-b62d3aab9f46eaff, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-b62d3aab9f46eaff (registered subs: 3) -KalamClient: Received WebSocket message (274 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-b62d3aab9f46eaff, rows: 1, status: Ready -KalamClient: Looking for callback for subscription_id: sub-b62d3aab9f46eaff (registered subs: 3) -KalamClient: Subscribed with ID: sub-b62d3aab9f46eaff -KalamClient: Parsed Change - id: sub-e3b9e4a71f291d60, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-e3b9e4a71f291d60 (registered subs: 3) -KalamClient: Parsed Change - id: sub-9e06c088b5e9eba7, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-9e06c088b5e9eba7 (registered subs: 3) -KalamClient: Parsed Change - id: sub-b62d3aab9f46eaff, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-b62d3aab9f46eaff (registered subs: 3) -KalamClient: Unsubscribed from: sub-b62d3aab9f46eaff -KalamClient: Unsubscribed from: sub-9e06c088b5e9eba7 -KalamClient: Unsubscribed from: sub-e3b9e4a71f291d60 -KalamClient: Unsubscribed from: sub-a04167b425bfbbdf -KalamClient: Unsubscribed from: sub-ec33ae1b4df076ae -KalamClient: Unsubscribed from: sub-fc9cba3cc5d6a202 -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= - ✔ three active subscriptions resume without replaying old rows (862.630709ms) -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-75eb230c63e8cc14, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_a -KalamClient: Received WebSocket message (318 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-75eb230c63e8cc14, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-75eb230c63e8cc14 (registered subs: 1) -KalamClient: Parsed InitialDataBatch - id: sub-75eb230c63e8cc14, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-75eb230c63e8cc14 (registered subs: 1) -KalamClient: Subscribed with ID: sub-75eb230c63e8cc14 -KalamClient: Sending subscribe request - id: sub-58098cca70872354, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_b -KalamClient: Received WebSocket message (318 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-58098cca70872354, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-58098cca70872354 (registered subs: 2) -KalamClient: Parsed InitialDataBatch - id: sub-58098cca70872354, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-58098cca70872354 (registered subs: 2) -KalamClient: Subscribed with ID: sub-58098cca70872354 -KalamClient: Sending subscribe request - id: sub-e98d3d7451b0c749, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_c -KalamClient: Received WebSocket message (318 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-e98d3d7451b0c749, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-e98d3d7451b0c749 (registered subs: 3) -KalamClient: Parsed InitialDataBatch - id: sub-e98d3d7451b0c749, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-e98d3d7451b0c749 (registered subs: 3) -KalamClient: Subscribed with ID: sub-e98d3d7451b0c749 -KalamClient: Parsed Change - id: sub-75eb230c63e8cc14, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-75eb230c63e8cc14 (registered subs: 3) -KalamClient: Parsed Change - id: sub-58098cca70872354, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-58098cca70872354 (registered subs: 3) -KalamClient: Parsed Change - id: sub-e98d3d7451b0c749, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-e98d3d7451b0c749 (registered subs: 3) -KalamClient: Unsubscribed from: sub-e98d3d7451b0c749 -KalamClient: Unsubscribed from: sub-58098cca70872354 -KalamClient: Unsubscribed from: sub-75eb230c63e8cc14 -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-c5458ea9f72d2fa9, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_a WHERE id >= 11012 -KalamClient: Received WebSocket message (346 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-c5458ea9f72d2fa9, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-c5458ea9f72d2fa9 (registered subs: 1) -KalamClient: Received WebSocket message (283 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-c5458ea9f72d2fa9, rows: 1, status: Ready -KalamClient: Looking for callback for subscription_id: sub-c5458ea9f72d2fa9 (registered subs: 1) -KalamClient: Subscribed with ID: sub-c5458ea9f72d2fa9 -KalamClient: Sending subscribe request - id: sub-9f9922586e8b1335, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_b WHERE id >= 12012 -KalamClient: Received WebSocket message (346 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-9f9922586e8b1335, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-9f9922586e8b1335 (registered subs: 2) -KalamClient: Received WebSocket message (283 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-9f9922586e8b1335, rows: 1, status: Ready -KalamClient: Looking for callback for subscription_id: sub-9f9922586e8b1335 (registered subs: 2) -KalamClient: Subscribed with ID: sub-9f9922586e8b1335 -KalamClient: Sending subscribe request - id: sub-3d33407948652b23, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_c WHERE id >= 13012 -KalamClient: Received WebSocket message (346 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-3d33407948652b23, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-3d33407948652b23 (registered subs: 3) -KalamClient: Received WebSocket message (283 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-3d33407948652b23, rows: 1, status: Ready -KalamClient: Looking for callback for subscription_id: sub-3d33407948652b23 (registered subs: 3) -KalamClient: Subscribed with ID: sub-3d33407948652b23 -KalamClient: Parsed Change - id: sub-c5458ea9f72d2fa9, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-c5458ea9f72d2fa9 (registered subs: 3) -KalamClient: Parsed Change - id: sub-9f9922586e8b1335, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-9f9922586e8b1335 (registered subs: 3) -KalamClient: Parsed Change - id: sub-3d33407948652b23, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3d33407948652b23 (registered subs: 3) -KalamClient: Unsubscribed from: sub-3d33407948652b23 -KalamClient: Unsubscribed from: sub-9f9922586e8b1335 -KalamClient: Unsubscribed from: sub-c5458ea9f72d2fa9 -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-d276b34d70dfbabd, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_a WHERE id >= 11022 -KalamClient: Received WebSocket message (346 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-d276b34d70dfbabd, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-d276b34d70dfbabd (registered subs: 1) -KalamClient: Received WebSocket message (283 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-d276b34d70dfbabd, rows: 1, status: Ready -KalamClient: Looking for callback for subscription_id: sub-d276b34d70dfbabd (registered subs: 1) -KalamClient: Subscribed with ID: sub-d276b34d70dfbabd -KalamClient: Sending subscribe request - id: sub-62ddce9fb02d8cbd, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_b WHERE id >= 12022 -KalamClient: Received WebSocket message (346 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-62ddce9fb02d8cbd, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-62ddce9fb02d8cbd (registered subs: 2) -KalamClient: Received WebSocket message (283 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-62ddce9fb02d8cbd, rows: 1, status: Ready -KalamClient: Looking for callback for subscription_id: sub-62ddce9fb02d8cbd (registered subs: 2) -KalamClient: Subscribed with ID: sub-62ddce9fb02d8cbd -KalamClient: Sending subscribe request - id: sub-ff9bfa1e43cd64a4, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_c WHERE id >= 13022 -KalamClient: Received WebSocket message (346 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-ff9bfa1e43cd64a4, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-ff9bfa1e43cd64a4 (registered subs: 3) -KalamClient: Received WebSocket message (283 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-ff9bfa1e43cd64a4, rows: 1, status: Ready -KalamClient: Looking for callback for subscription_id: sub-ff9bfa1e43cd64a4 (registered subs: 3) -KalamClient: Subscribed with ID: sub-ff9bfa1e43cd64a4 -KalamClient: Parsed Change - id: sub-d276b34d70dfbabd, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-d276b34d70dfbabd (registered subs: 3) -KalamClient: Parsed Change - id: sub-62ddce9fb02d8cbd, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-62ddce9fb02d8cbd (registered subs: 3) -KalamClient: Parsed Change - id: sub-ff9bfa1e43cd64a4, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-ff9bfa1e43cd64a4 (registered subs: 3) -KalamClient: Unsubscribed from: sub-ff9bfa1e43cd64a4 -KalamClient: Unsubscribed from: sub-62ddce9fb02d8cbd -KalamClient: Unsubscribed from: sub-d276b34d70dfbabd -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-54695f484d8f1e6, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_a WHERE id >= 11032 -KalamClient: Received WebSocket message (345 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-54695f484d8f1e6, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-54695f484d8f1e6 (registered subs: 1) -KalamClient: Received WebSocket message (282 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-54695f484d8f1e6, rows: 1, status: Ready -KalamClient: Looking for callback for subscription_id: sub-54695f484d8f1e6 (registered subs: 1) -KalamClient: Subscribed with ID: sub-54695f484d8f1e6 -KalamClient: Sending subscribe request - id: sub-6958e4c1d3c0d07c, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_b WHERE id >= 12032 -KalamClient: Received WebSocket message (346 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-6958e4c1d3c0d07c, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-6958e4c1d3c0d07c (registered subs: 2) -KalamClient: Received WebSocket message (283 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-6958e4c1d3c0d07c, rows: 1, status: Ready -KalamClient: Looking for callback for subscription_id: sub-6958e4c1d3c0d07c (registered subs: 2) -KalamClient: Subscribed with ID: sub-6958e4c1d3c0d07c -KalamClient: Sending subscribe request - id: sub-fd4975a26d0f88a8, sql: SELECT id, payload FROM ts_recon_1774783832653_37iss4.chaos_c WHERE id >= 13032 -KalamClient: Received WebSocket message (346 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-fd4975a26d0f88a8, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-fd4975a26d0f88a8 (registered subs: 3) -KalamClient: Received WebSocket message (283 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-fd4975a26d0f88a8, rows: 1, status: Ready -KalamClient: Looking for callback for subscription_id: sub-fd4975a26d0f88a8 (registered subs: 3) -KalamClient: Subscribed with ID: sub-fd4975a26d0f88a8 -KalamClient: Parsed Change - id: sub-54695f484d8f1e6, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-54695f484d8f1e6 (registered subs: 3) -KalamClient: Parsed Change - id: sub-6958e4c1d3c0d07c, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-6958e4c1d3c0d07c (registered subs: 3) -KalamClient: Parsed Change - id: sub-fd4975a26d0f88a8, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-fd4975a26d0f88a8 (registered subs: 3) -KalamClient: Unsubscribed from: sub-fd4975a26d0f88a8 -KalamClient: Unsubscribed from: sub-6958e4c1d3c0d07c -KalamClient: Unsubscribed from: sub-54695f484d8f1e6 -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected - ✔ chaos: repeated reconnect cycles with 3 subscriptions stay consistent (1077.164459ms) -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected - ✔ queries work correctly after WebSocket disconnect cycle (13.240292ms) -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= -✔ Reconnect & Resume (4341.056208ms) -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-c26b6a8ee07bd9e5, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume_live -KalamClient: Received WebSocket message (316 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-c26b6a8ee07bd9e5, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-c26b6a8ee07bd9e5 (registered subs: 1) -KalamClient: Parsed InitialDataBatch - id: sub-c26b6a8ee07bd9e5, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-c26b6a8ee07bd9e5 (registered subs: 1) -KalamClient: Subscribed with ID: sub-c26b6a8ee07bd9e5 -KalamClient: Parsed Change - id: sub-c26b6a8ee07bd9e5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-c26b6a8ee07bd9e5 (registered subs: 1) -KalamClient: Unsubscribed from: sub-c26b6a8ee07bd9e5 -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-c26b6a8ee07bd9e5, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume_live -KalamClient: Received WebSocket message (344 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-c26b6a8ee07bd9e5, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-c26b6a8ee07bd9e5 (registered subs: 1) -KalamClient: Received WebSocket message (271 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-c26b6a8ee07bd9e5, rows: 1, status: Ready -KalamClient: Looking for callback for subscription_id: sub-c26b6a8ee07bd9e5 (registered subs: 1) -KalamClient: Subscribed with ID: sub-c26b6a8ee07bd9e5 -KalamClient: Parsed Change - id: sub-c26b6a8ee07bd9e5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-c26b6a8ee07bd9e5 (registered subs: 1) -KalamClient: Unsubscribed from: sub-c26b6a8ee07bd9e5 -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= -▶ Resume from checkpoint after disconnect - ✔ subscription resumes from checkpoint after disconnect — no replay (342.499292ms) -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-46abb2c4ba616a96, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume3_a -KalamClient: Received WebSocket message (316 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-46abb2c4ba616a96, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-46abb2c4ba616a96 (registered subs: 1) -KalamClient: Parsed InitialDataBatch - id: sub-46abb2c4ba616a96, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-46abb2c4ba616a96 (registered subs: 1) -KalamClient: Subscribed with ID: sub-46abb2c4ba616a96 -KalamClient: Sending subscribe request - id: sub-ee0dce3a832017dc, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume3_b -KalamClient: Received WebSocket message (316 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-ee0dce3a832017dc, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-ee0dce3a832017dc (registered subs: 2) -KalamClient: Parsed InitialDataBatch - id: sub-ee0dce3a832017dc, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-ee0dce3a832017dc (registered subs: 2) -KalamClient: Subscribed with ID: sub-ee0dce3a832017dc -KalamClient: Sending subscribe request - id: sub-6f4a46b5d9938168, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume3_c -KalamClient: Received WebSocket message (316 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-6f4a46b5d9938168, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-6f4a46b5d9938168 (registered subs: 3) -KalamClient: Parsed InitialDataBatch - id: sub-6f4a46b5d9938168, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-6f4a46b5d9938168 (registered subs: 3) -KalamClient: Subscribed with ID: sub-6f4a46b5d9938168 -KalamClient: Parsed Change - id: sub-46abb2c4ba616a96, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-46abb2c4ba616a96 (registered subs: 3) -KalamClient: Parsed Change - id: sub-ee0dce3a832017dc, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-ee0dce3a832017dc (registered subs: 3) -KalamClient: Parsed Change - id: sub-6f4a46b5d9938168, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-6f4a46b5d9938168 (registered subs: 3) -KalamClient: Unsubscribed from: sub-46abb2c4ba616a96 -KalamClient: Unsubscribed from: sub-ee0dce3a832017dc -KalamClient: Unsubscribed from: sub-6f4a46b5d9938168 -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-46abb2c4ba616a96, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume3_a -KalamClient: Received WebSocket message (344 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-46abb2c4ba616a96, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-46abb2c4ba616a96 (registered subs: 1) -KalamClient: Received WebSocket message (273 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-46abb2c4ba616a96, rows: 1, status: Ready -KalamClient: Looking for callback for subscription_id: sub-46abb2c4ba616a96 (registered subs: 1) -KalamClient: Subscribed with ID: sub-46abb2c4ba616a96 -KalamClient: Sending subscribe request - id: sub-ee0dce3a832017dc, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume3_b -KalamClient: Received WebSocket message (344 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-ee0dce3a832017dc, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-ee0dce3a832017dc (registered subs: 2) -KalamClient: Received WebSocket message (273 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-ee0dce3a832017dc, rows: 1, status: Ready -KalamClient: Looking for callback for subscription_id: sub-ee0dce3a832017dc (registered subs: 2) -KalamClient: Subscribed with ID: sub-ee0dce3a832017dc -KalamClient: Sending subscribe request - id: sub-6f4a46b5d9938168, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume3_c -KalamClient: Received WebSocket message (344 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-6f4a46b5d9938168, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-6f4a46b5d9938168 (registered subs: 3) -KalamClient: Received WebSocket message (273 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-6f4a46b5d9938168, rows: 1, status: Ready -KalamClient: Looking for callback for subscription_id: sub-6f4a46b5d9938168 (registered subs: 3) -KalamClient: Subscribed with ID: sub-6f4a46b5d9938168 -KalamClient: Parsed Change - id: sub-46abb2c4ba616a96, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-46abb2c4ba616a96 (registered subs: 3) -KalamClient: Parsed Change - id: sub-ee0dce3a832017dc, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-ee0dce3a832017dc (registered subs: 3) -KalamClient: Parsed Change - id: sub-6f4a46b5d9938168, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-6f4a46b5d9938168 (registered subs: 3) -KalamClient: Unsubscribed from: sub-46abb2c4ba616a96 -KalamClient: Unsubscribed from: sub-ee0dce3a832017dc -KalamClient: Unsubscribed from: sub-6f4a46b5d9938168 -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected - ✔ three subscriptions resume from their checkpoints after disconnect (831.419416ms) -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-604589b6e4371951, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume_double -KalamClient: Received WebSocket message (316 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-604589b6e4371951, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-604589b6e4371951 (registered subs: 1) -KalamClient: Parsed InitialDataBatch - id: sub-604589b6e4371951, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-604589b6e4371951 (registered subs: 1) -KalamClient: Subscribed with ID: sub-604589b6e4371951 -KalamClient: Parsed Change - id: sub-604589b6e4371951, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-604589b6e4371951 (registered subs: 1) -KalamClient: Unsubscribed from: sub-604589b6e4371951 -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-604589b6e4371951, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume_double -KalamClient: Received WebSocket message (344 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-604589b6e4371951, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-604589b6e4371951 (registered subs: 1) -KalamClient: Received WebSocket message (271 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-604589b6e4371951, rows: 1, status: Ready -KalamClient: Looking for callback for subscription_id: sub-604589b6e4371951 (registered subs: 1) -KalamClient: Subscribed with ID: sub-604589b6e4371951 -KalamClient: Unsubscribed from: sub-604589b6e4371951 -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-604589b6e4371951, sql: SELECT id, value FROM ts_resume_1774783837046_l2m6h6.resume_double -KalamClient: Received WebSocket message (344 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-604589b6e4371951, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-604589b6e4371951 (registered subs: 1) -KalamClient: Received WebSocket message (271 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-604589b6e4371951, rows: 1, status: Ready -KalamClient: Looking for callback for subscription_id: sub-604589b6e4371951 (registered subs: 1) -KalamClient: Subscribed with ID: sub-604589b6e4371951 -KalamClient: Parsed Change - id: sub-604589b6e4371951, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-604589b6e4371951 (registered subs: 1) -KalamClient: Unsubscribed from: sub-604589b6e4371951 -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected - ✔ double disconnect recovers and resumes from checkpoint (729.587042ms) -✔ Resume from checkpoint after disconnect (2816.711875ms) -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-71c1b12e1c371229, sql: SELECT * FROM ts_sub_1774783839914_i687ql.messages -KalamClient: Received WebSocket message (441 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-71c1b12e1c371229, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) -KalamClient: Parsed InitialDataBatch - id: sub-71c1b12e1c371229, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) -KalamClient: Subscribed with ID: sub-71c1b12e1c371229 -KalamClient: Unsubscribed from: sub-71c1b12e1c371229 -KalamClient: Sending subscribe request - id: sub-71c1b12e1c371229, sql: SELECT * FROM ts_sub_1774783839914_i687ql.messages -▶ Subscription - ✔ subscribe returns unsubscribe function (15.809167ms) -KalamClient: Received WebSocket message (441 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-71c1b12e1c371229, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) -KalamClient: Parsed InitialDataBatch - id: sub-71c1b12e1c371229, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) -KalamClient: Subscribed with ID: sub-71c1b12e1c371229 -KalamClient: Unsubscribed from: sub-71c1b12e1c371229 -KalamClient: Sending subscribe request - id: sub-71c1b12e1c371229, sql: SELECT * FROM ts_sub_1774783839914_i687ql.messages - ✔ subscribe receives subscription_ack event (1514.21875ms) -KalamClient: Received WebSocket message (441 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-71c1b12e1c371229, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) -KalamClient: Parsed InitialDataBatch - id: sub-71c1b12e1c371229, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) -KalamClient: Subscribed with ID: sub-71c1b12e1c371229 -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Parsed Change - id: sub-71c1b12e1c371229, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) -KalamClient: Unsubscribed from: sub-71c1b12e1c371229 -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: Sending subscribe request - id: sub-fc42cebeda3f7cdf, sql: SELECT * FROM ts_sub_1774783839914_i687ql.messages WHERE id = 600 - ✔ insert triggers change event on subscriber (4538.228792ms) -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Received WebSocket message (441 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-fc42cebeda3f7cdf, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-fc42cebeda3f7cdf (registered subs: 1) -KalamClient: Parsed InitialDataBatch - id: sub-fc42cebeda3f7cdf, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-fc42cebeda3f7cdf (registered subs: 1) -KalamClient: Subscribed with ID: sub-fc42cebeda3f7cdf -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Parsed Change - id: sub-fc42cebeda3f7cdf, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-fc42cebeda3f7cdf (registered subs: 1) -KalamClient: Unsubscribed from: sub-fc42cebeda3f7cdf -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: Sending subscribe request - id: sub-71c1b12e1c371229, sql: SELECT * FROM ts_sub_1774783839914_i687ql.messages - ✔ subscribeWithSql with WHERE clause works (4548.798083ms) -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Received WebSocket message (472 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-71c1b12e1c371229, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) -KalamClient: Received WebSocket message (373 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-71c1b12e1c371229, rows: 2, status: Ready -KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) -KalamClient: Subscribed with ID: sub-71c1b12e1c371229 -KalamClient: Unsubscribed from: sub-71c1b12e1c371229 -KalamClient: Sending subscribe request - id: sub-71c1b12e1c371229, sql: SELECT * FROM ts_sub_1774783839914_i687ql.messages - ✔ getSubscriptions / isSubscribedTo track subscriptions (1528.354584ms) -KalamClient: Received WebSocket message (472 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-71c1b12e1c371229, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) -KalamClient: Received WebSocket message (373 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-71c1b12e1c371229, rows: 2, status: Ready -KalamClient: Looking for callback for subscription_id: sub-71c1b12e1c371229 (registered subs: 1) -KalamClient: Subscribed with ID: sub-71c1b12e1c371229 -KalamClient: Sending subscribe request - id: sub-e90e1009e874b6aa, sql: SELECT * FROM ts_sub_1774783839914_i687ql.messages WHERE id > 0 -KalamClient: Received WebSocket message (472 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-e90e1009e874b6aa, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-e90e1009e874b6aa (registered subs: 2) -KalamClient: Received WebSocket message (373 bytes) -KalamClient: Parsed InitialDataBatch - id: sub-e90e1009e874b6aa, rows: 2, status: Ready -KalamClient: Looking for callback for subscription_id: sub-e90e1009e874b6aa (registered subs: 2) -KalamClient: Subscribed with ID: sub-e90e1009e874b6aa -KalamClient: Unsubscribed from: sub-71c1b12e1c371229 -KalamClient: Unsubscribed from: sub-e90e1009e874b6aa - ✔ unsubscribeAll clears all subscriptions (2059.333541ms) -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-3cb8ac26d0fefcc5, sql: SELECT id, body FROM ts_sub_1774783839914_i687ql.messages WHERE id >= 85508900 -KalamClient: Received WebSocket message (315 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-3cb8ac26d0fefcc5, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed InitialDataBatch - id: sub-3cb8ac26d0fefcc5, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Subscribed with ID: sub-3cb8ac26d0fefcc5 -KalamClient: Sending subscribe request - id: sub-3cb8ac26d0fefcc5, sql: SELECT id, body FROM ts_sub_1774783839914_i687ql.messages WHERE id >= 85508900 -KalamClient: Received WebSocket message (315 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-3cb8ac26d0fefcc5, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed InitialDataBatch - id: sub-3cb8ac26d0fefcc5, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Subscribed with ID: sub-3cb8ac26d0fefcc5 -KalamClient: Sending subscribe request - id: sub-3cb8ac26d0fefcc5, sql: SELECT id, body FROM ts_sub_1774783839914_i687ql.messages WHERE id >= 85508900 -KalamClient: Received WebSocket message (315 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-3cb8ac26d0fefcc5, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed InitialDataBatch - id: sub-3cb8ac26d0fefcc5, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Subscribed with ID: sub-3cb8ac26d0fefcc5 -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Parsed Change - id: sub-3cb8ac26d0fefcc5, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-3cb8ac26d0fefcc5 (registered subs: 1) -KalamClient: Unsubscribed from: sub-3cb8ac26d0fefcc5 -KalamClient: Unsubscribed from: sub-3cb8ac26d0fefcc5 -KalamClient: Unsubscribed from: sub-3cb8ac26d0fefcc5 -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: WebSocket closed: code=1005, reason= - ✔ concurrent writers fan out inserts to every subscriber client (63.665041ms) -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Connecting to WebSocket... -KalamClient: Waiting for WebSocket to open... -KalamClient: WebSocket connected, sending authentication... -KalamClient: Waiting for authentication... -KalamClient: Authentication successful - user_id: u_e7b9f077d2744ab5ade7882c6cde68fa, role: Dba -KalamClient: WebSocket connection established and authenticated -KalamClient: Sending subscribe request - id: sub-94368d5193c0ab23, sql: SELECT id, body FROM ts_sub_1774783839914_i687ql.messages WHERE id = 85514301 -KalamClient: Received WebSocket message (315 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-94368d5193c0ab23, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-94368d5193c0ab23 (registered subs: 1) -KalamClient: Parsed InitialDataBatch - id: sub-94368d5193c0ab23, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-94368d5193c0ab23 (registered subs: 1) -KalamClient: Subscribed with ID: sub-94368d5193c0ab23 -KalamClient: Sending subscribe request - id: sub-1336eef26f07c71d, sql: SELECT id, body FROM ts_sub_1774783839914_i687ql.messages WHERE id = 85514302 -KalamClient: Received WebSocket message (315 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-1336eef26f07c71d, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-1336eef26f07c71d (registered subs: 2) -KalamClient: Parsed InitialDataBatch - id: sub-1336eef26f07c71d, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-1336eef26f07c71d (registered subs: 2) -KalamClient: Subscribed with ID: sub-1336eef26f07c71d -KalamClient: Sending subscribe request - id: sub-c0900fac94513724, sql: SELECT id, body FROM ts_sub_1774783839914_i687ql.messages WHERE id = 85514303 -KalamClient: Received WebSocket message (315 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-c0900fac94513724, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-c0900fac94513724 (registered subs: 3) -KalamClient: Parsed InitialDataBatch - id: sub-c0900fac94513724, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-c0900fac94513724 (registered subs: 3) -KalamClient: Subscribed with ID: sub-c0900fac94513724 -KalamClient: Sending subscribe request - id: sub-99f183e3749b404f, sql: SELECT id, body FROM ts_sub_1774783839914_i687ql.messages WHERE id = 85514304 -KalamClient: Received WebSocket message (315 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-99f183e3749b404f, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-99f183e3749b404f (registered subs: 4) -KalamClient: Parsed InitialDataBatch - id: sub-99f183e3749b404f, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-99f183e3749b404f (registered subs: 4) -KalamClient: Subscribed with ID: sub-99f183e3749b404f -KalamClient: Sending subscribe request - id: sub-7f78694a241dd6df, sql: SELECT id, body FROM ts_sub_1774783839914_i687ql.messages WHERE id = 85514305 -KalamClient: Received WebSocket message (315 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-7f78694a241dd6df, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-7f78694a241dd6df (registered subs: 5) -KalamClient: Parsed InitialDataBatch - id: sub-7f78694a241dd6df, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-7f78694a241dd6df (registered subs: 5) -KalamClient: Subscribed with ID: sub-7f78694a241dd6df -KalamClient: Sending subscribe request - id: sub-70dcf50a59a522ed, sql: SELECT id, body FROM ts_sub_1774783839914_i687ql.messages WHERE id = 85514306 -KalamClient: Received WebSocket message (315 bytes) -KalamClient: Parsed SubscriptionAck - id: sub-70dcf50a59a522ed, total_rows: 0 -KalamClient: Looking for callback for subscription_id: sub-70dcf50a59a522ed (registered subs: 6) -KalamClient: Parsed InitialDataBatch - id: sub-70dcf50a59a522ed, rows: 0, status: Ready -KalamClient: Looking for callback for subscription_id: sub-70dcf50a59a522ed (registered subs: 6) -KalamClient: Subscribed with ID: sub-70dcf50a59a522ed -KalamClient: Parsed Change - id: sub-1336eef26f07c71d, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-1336eef26f07c71d (registered subs: 6) -KalamClient: Parsed Change - id: sub-99f183e3749b404f, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-99f183e3749b404f (registered subs: 6) -KalamClient: Parsed Change - id: sub-70dcf50a59a522ed, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-70dcf50a59a522ed (registered subs: 6) -KalamClient: Parsed Change - id: sub-c0900fac94513724, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-c0900fac94513724 (registered subs: 6) -KalamClient: Parsed Change - id: sub-7f78694a241dd6df, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-7f78694a241dd6df (registered subs: 6) -KalamClient: Parsed Change - id: sub-94368d5193c0ab23, type: Insert, rows: Some(1) -KalamClient: Looking for callback for subscription_id: sub-94368d5193c0ab23 (registered subs: 6) -KalamClient: Unsubscribed from: sub-70dcf50a59a522ed -KalamClient: Unsubscribed from: sub-7f78694a241dd6df -KalamClient: Unsubscribed from: sub-99f183e3749b404f -KalamClient: Unsubscribed from: sub-c0900fac94513724 -KalamClient: Unsubscribed from: sub-1336eef26f07c71d -KalamClient: Unsubscribed from: sub-94368d5193c0ab23 -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected - ✔ one client keeps many simultaneous subscriptions isolated (55.725792ms) -KalamClient: WebSocket closed: code=1005, reason= -KalamClient: Disconnecting from WebSocket... -KalamClient: Disconnected -✔ Subscription (15321.963666ms) -KalamClient: WebSocket closed: code=1005, reason= -ℹ tests 53 -ℹ suites 8 -ℹ pass 53 -ℹ fail 0 -ℹ cancelled 0 -ℹ skipped 0 -ℹ todo 0 -ℹ duration_ms 32284.714875 - -✅ All TypeScript SDK tests passed! From 0d1d43f42e9a55849093fc28cbbd1bce9d7944cd Mon Sep 17 00:00:00 2001 From: jamals86 Date: Sun, 29 Mar 2026 15:40:41 +0300 Subject: [PATCH 06/12] Fixed issue in compiling sdk ts and gr extension --- .github/workflows/release.yml | 3 ++- link/sdks/typescript/tests/readme-examples.test.mjs | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8a2fefe28..0b336b5b3 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1234,7 +1234,8 @@ jobs: libclang-dev \ llvm-dev \ pkg-config \ - libssl-dev + libssl-dev \ + libreadline-dev - name: Setup Rust uses: dtolnay/rust-toolchain@stable diff --git a/link/sdks/typescript/tests/readme-examples.test.mjs b/link/sdks/typescript/tests/readme-examples.test.mjs index 0f15b6c68..965ef37b5 100644 --- a/link/sdks/typescript/tests/readme-examples.test.mjs +++ b/link/sdks/typescript/tests/readme-examples.test.mjs @@ -251,7 +251,7 @@ test('README queryWithFiles example posts multipart data with auth header', asyn assert.equal(fetchCall.options.body.get('params'), JSON.stringify(['att_1'])); const uploaded = fetchCall.options.body.get('file:upload'); - assert.ok(uploaded instanceof TestFile); + assert.ok(uploaded instanceof Blob, 'uploaded file should be a Blob'); assert.equal(uploaded.name, 'note.txt'); } finally { globalThis.fetch = originalFetch; From 73b331d2209aca515e418589e6382c816947b653 Mon Sep 17 00:00:00 2001 From: jamals86 Date: Sun, 29 Mar 2026 16:41:58 +0300 Subject: [PATCH 07/12] Bump CI/node, optimize WASM & timestamp code Upgrade CI workflows (actions/cache -> v5, download-artifact -> v6, setup-node -> Node 22) and add a Rust cache step for the dart bridge. Improve Docker healthcheck and test scripts to use a robust GET (-O /dev/null / curl) to avoid h2c/HEAD issues. Optimize link crate: pre-serialize WASM ping payload, use zero-copy gzip helper (decompress_if_gzip) for WS messages, add inline annotations and helper functions for validation to avoid allocations, and rewrite timestamp formatting to use write! into pre-sized buffers (with added regression tests). Bump WASM-related dependencies and update Cargo.toml accordingly. --- .github/workflows/ci.yml | 22 ++--- .github/workflows/release.yml | 56 +++++++----- Cargo.lock | 34 ++++---- Cargo.toml | 12 +-- docker/build/Dockerfile.prebuilt | 4 +- docker/build/test-docker-image.sh | 2 +- docker/build/test-docker-startup.sh | 2 +- link/src/compression.rs | 15 ++++ link/src/timestamp.rs | 130 ++++++++++++++++++++++++---- link/src/wasm/client.rs | 13 ++- link/src/wasm/helpers.rs | 41 +++++---- link/src/wasm/reconnect.rs | 6 +- link/src/wasm/state.rs | 3 + link/src/wasm/validation.rs | 50 +++++++++-- 14 files changed, 282 insertions(+), 108 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 56faf9177..b3e1fe744 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -58,7 +58,7 @@ jobs: libssl-dev - name: Cache cargo registry - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: ~/.cargo/registry key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }} @@ -66,7 +66,7 @@ jobs: ${{ runner.os }}-cargo-registry- - name: Cache cargo index - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: ~/.cargo/git key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }} @@ -74,7 +74,7 @@ jobs: ${{ runner.os }}-cargo-index- - name: Cache cargo build - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: target key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('**/Cargo.lock') }} @@ -106,7 +106,7 @@ jobs: # libssl-dev # - name: Cache cargo registry - # uses: actions/cache@v4 + # uses: actions/cache@v5 # with: # path: ~/.cargo/registry # key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }} @@ -114,7 +114,7 @@ jobs: # ${{ runner.os }}-cargo-registry- # - name: Cache cargo index - # uses: actions/cache@v4 + # uses: actions/cache@v5 # with: # path: ~/.cargo/git # key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }} @@ -122,7 +122,7 @@ jobs: # ${{ runner.os }}-cargo-index- # - name: Cache cargo build - # uses: actions/cache@v4 + # uses: actions/cache@v5 # with: # path: target # key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('**/Cargo.lock') }} @@ -154,7 +154,7 @@ jobs: libssl-dev - name: Cache cargo registry - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: ~/.cargo/registry key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }} @@ -162,7 +162,7 @@ jobs: ${{ runner.os }}-cargo-registry- - name: Cache cargo index - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: ~/.cargo/git key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }} @@ -170,7 +170,7 @@ jobs: ${{ runner.os }}-cargo-index- - name: Cache cargo build - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: target key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('**/Cargo.lock') }} @@ -310,10 +310,10 @@ jobs: with: toolchain: ${{ env.RUST_VERSION }} - - name: Setup Node 20 + - name: Setup Node 22 uses: actions/setup-node@v6 with: - node-version: 20 + node-version: 22 - name: Install cargo-license shell: bash diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0b336b5b3..d0137b4cc 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -116,10 +116,10 @@ jobs: with: toolchain: ${{ env.RUST_VERSION }} - - name: Setup Node 20 + - name: Setup Node 22 uses: actions/setup-node@v6 with: - node-version: 20 + node-version: 22 - name: Install cargo-license shell: bash @@ -213,10 +213,10 @@ jobs: shared-key: wasm-build cache-on-failure: true - - name: Setup Node 20 + - name: Setup Node 22 uses: actions/setup-node@v6 with: - node-version: 20 + node-version: 22 - name: Install wasm-pack shell: bash @@ -472,7 +472,7 @@ jobs: - name: Download UI dist if: ${{ steps.vars.outputs.build_linux == 'true' }} - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v6 with: name: ui-dist path: ui/dist @@ -563,7 +563,7 @@ jobs: - name: Download UI dist if: ${{ steps.vars.outputs.build_linux_arm == 'true' }} - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v6 with: name: ui-dist path: ui/dist @@ -720,7 +720,7 @@ jobs: - name: Download UI dist if: ${{ steps.vars.outputs.build_windows == 'true' }} - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v6 with: name: ui-dist path: ui/dist @@ -837,7 +837,7 @@ jobs: - name: Download UI dist if: ${{ steps.vars.outputs.build_macos == 'true' }} - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v6 with: name: ui-dist path: ui/dist @@ -1048,13 +1048,13 @@ jobs: uses: actions/checkout@v6 - name: Download server binary - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v6 with: name: dist-linux-x86_64 path: dist/ - name: Download CLI binary - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v6 with: name: dist-cli-linux-x86_64 path: dist/ @@ -1219,7 +1219,7 @@ jobs: uses: actions/checkout@v6 - name: Download server binary - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v6 with: name: dist-linux-x86_64 path: dist/ @@ -1443,7 +1443,7 @@ jobs: git push origin main - name: Download all artifacts - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v6 with: path: release-assets pattern: 'dist-*' @@ -1535,25 +1535,25 @@ jobs: echo "docker_image_created=$DOCKER_IMAGE_CREATED" >> "$GITHUB_OUTPUT" - name: Download pre-built artifacts (x86_64) - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v6 with: name: dist-linux-x86_64 path: artifacts/server-amd64 - name: Download pre-built CLI artifact (x86_64) - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v6 with: name: dist-cli-linux-x86_64 path: artifacts/cli-amd64 - name: Download pre-built artifacts (aarch64) - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v6 with: name: dist-linux-aarch64 path: artifacts/server-arm64 - name: Download pre-built CLI artifact (aarch64) - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v6 with: name: dist-cli-linux-aarch64 path: artifacts/cli-arm64 @@ -1818,7 +1818,7 @@ jobs: echo "value=$DESCRIPTION" >> "$GITHUB_OUTPUT" - name: Download PG extension artifact (x86_64) - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v6 with: name: dist-pg-extension-linux-x86_64 path: pg-artifacts-amd64/ @@ -1999,7 +1999,7 @@ jobs: uses: actions/checkout@v6 - name: Download server binary - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v6 with: name: dist-linux-x86_64 path: dist/ @@ -2023,10 +2023,10 @@ jobs: shared-key: sdk-tests-typescript cache-on-failure: true - - name: Setup Node 20 + - name: Setup Node 22 uses: actions/setup-node@v6 with: - node-version: 20 + node-version: 22 - name: Install wasm-pack shell: bash @@ -2137,7 +2137,7 @@ jobs: uses: actions/checkout@v6 - name: Download server binary - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v6 with: name: dist-linux-x86_64 path: dist/ @@ -2149,6 +2149,18 @@ jobs: sudo apt-get install -y --no-install-recommends \ clang libclang-dev pkg-config libssl-dev + - name: Setup Rust + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ env.RUST_VERSION }} + + - name: Cache Rust (dart bridge) + uses: Swatinem/rust-cache@v2 + with: + shared-key: dart-bridge + cache-on-failure: true + workspaces: link -> link/target + - name: Setup Flutter uses: subosito/flutter-action@v2 with: @@ -2348,7 +2360,7 @@ jobs: - name: Setup Node.js uses: actions/setup-node@v6 with: - node-version: '20' + node-version: '22' registry-url: 'https://registry.npmjs.org' - name: Setup Rust diff --git a/Cargo.lock b/Cargo.lock index 0adda9079..c2d9002fa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3711,10 +3711,12 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.91" +version = "0.3.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c" +checksum = "cc4c90f45aa2e6eacbe8645f77fdea542ac97a494bcd117a67df9ff4d611f995" dependencies = [ + "cfg-if", + "futures-util", "once_cell", "wasm-bindgen", ] @@ -7885,9 +7887,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.114" +version = "0.2.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e" +checksum = "6523d69017b7633e396a89c5efab138161ed5aafcbc8d3e5c5a42ae38f50495a" dependencies = [ "cfg-if", "once_cell", @@ -7898,23 +7900,19 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.64" +version = "0.4.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9c5522b3a28661442748e09d40924dfb9ca614b21c00d3fd135720e48b67db8" +checksum = "2d1faf851e778dfa54db7cd438b70758eba9755cb47403f3496edd7c8fc212f0" dependencies = [ - "cfg-if", - "futures-util", "js-sys", - "once_cell", "wasm-bindgen", - "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.114" +version = "0.2.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6" +checksum = "4e3a6c758eb2f701ed3d052ff5737f5bfe6614326ea7f3bbac7156192dc32e67" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7922,9 +7920,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.114" +version = "0.2.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3" +checksum = "921de2737904886b52bcbb237301552d05969a6f9c40d261eb0533c8b055fedf" dependencies = [ "bumpalo", "proc-macro2", @@ -7935,9 +7933,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.114" +version = "0.2.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16" +checksum = "a93e946af942b58934c604527337bad9ae33ba1d5c6900bbb41c2c07c2364a93" dependencies = [ "unicode-ident", ] @@ -8004,9 +8002,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.91" +version = "0.3.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9" +checksum = "84cde8507f4d7cfcb1185b8cb5890c494ffea65edbe1ba82cfd63661c805ed94" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/Cargo.toml b/Cargo.toml index 573388ac5..82d0b2335 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -191,13 +191,13 @@ storekey = "0.11" moka = { version = "0.12.15", features = ["future", "sync"] } ntest = "0.9.5" ipnet = "2.11.0" -wasm-bindgen = { version = "0.2.114" } -wasm-bindgen-futures = { version = "0.4.64" } -js-sys = { version = "0.3.91" } +wasm-bindgen = { version = "0.2.115" } +wasm-bindgen-futures = { version = "0.4.65" } +js-sys = { version = "0.3.92" } libc = "0.2.183" -web-sys = { version = "0.3.91" } -tsify-next = { version = "0.5", default-features = false, features = ["js"] } -serde-wasm-bindgen = "0.6" +web-sys = { version = "0.3.92" } +tsify-next = { version = "0.5.6", default-features = false, features = ["js"] } +serde-wasm-bindgen = "0.6.5" flate2 = "1.1.9" tar = "0.4" zip = { version = "8.2.0", default-features = false, features = ["deflate"] } diff --git a/docker/build/Dockerfile.prebuilt b/docker/build/Dockerfile.prebuilt index eefc3108d..67266330e 100644 --- a/docker/build/Dockerfile.prebuilt +++ b/docker/build/Dockerfile.prebuilt @@ -92,9 +92,9 @@ WORKDIR /data # Expose default port EXPOSE 8080 -# Health check uses busybox wget to avoid pulling extra packages into the runtime image. +# Health check: use wget GET to /dev/null (--spider sends HEAD which is incompatible with h2c auto-detect). HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \ - CMD ["/usr/local/bin/busybox", "wget", "--spider", "-q", "http://127.0.0.1:8080/health"] + CMD ["/usr/local/bin/busybox", "wget", "-q", "-O", "/dev/null", "http://127.0.0.1:8080/health"] # Default command: run server (looks for server.toml in current directory) CMD ["/usr/local/bin/kalamdb-server", "/config/server.toml"] diff --git a/docker/build/test-docker-image.sh b/docker/build/test-docker-image.sh index dd015f6c9..f159a4b97 100755 --- a/docker/build/test-docker-image.sh +++ b/docker/build/test-docker-image.sh @@ -120,7 +120,7 @@ main() { exit 1 fi - if docker exec "$CONTAINER_NAME" /usr/local/bin/busybox wget --spider -q "http://127.0.0.1:8080/health" &>/dev/null; then + if curl -sf "http://localhost:$TEST_PORT/health" > /dev/null 2>&1; then log_info "Server is ready! (took ${ELAPSED}s)" break fi diff --git a/docker/build/test-docker-startup.sh b/docker/build/test-docker-startup.sh index f8478caed..ee3216385 100755 --- a/docker/build/test-docker-startup.sh +++ b/docker/build/test-docker-startup.sh @@ -45,7 +45,7 @@ while true; do exit 1 fi - if docker exec "$CONTAINER_NAME" /usr/local/bin/busybox wget --spider -q http://127.0.0.1:8080/health >/dev/null 2>&1; then + if docker exec "$CONTAINER_NAME" /usr/local/bin/busybox wget -q -O /dev/null http://127.0.0.1:8080/health >/dev/null 2>&1; then echo "Docker startup test passed in ${ELAPSED}s" break fi diff --git a/link/src/compression.rs b/link/src/compression.rs index 072f2e8ea..754dbbc51 100644 --- a/link/src/compression.rs +++ b/link/src/compression.rs @@ -171,6 +171,21 @@ mod tests { assert_eq!(&*result, plain); } + #[test] + fn test_decompress_if_gzip_plain_returns_borrowed() { + // Non-gzip data should return a Cow::Borrowed (zero-copy). + let plain = b"not compressed"; + let result = decompress_if_gzip(plain); + assert!(matches!(result, std::borrow::Cow::Borrowed(_))); + } + + #[test] + fn test_decompress_if_gzip_empty() { + let result = decompress_if_gzip(&[]); + assert!(result.is_empty()); + assert!(matches!(result, std::borrow::Cow::Borrowed(_))); + } + #[test] fn test_decompress_gzip_with_limit_rejects_large_advertised_size() { // Minimal gzip payload with empty deflate body and a forged trailer diff --git a/link/src/timestamp.rs b/link/src/timestamp.rs index 6a4862dd4..a8380fc69 100644 --- a/link/src/timestamp.rs +++ b/link/src/timestamp.rs @@ -173,10 +173,19 @@ fn parse_offset_millis(bytes: &[u8], start: usize) -> Result { } } +/// Format timestamp parts to ISO 8601 string. +/// +/// Uses `write!` into a pre-sized `String` to avoid the formatting overhead +/// of `format!()` which goes through `fmt::Arguments` heap allocation. +#[inline] fn format_parts_iso8601(parts: UtcDateTimeParts, include_millis: bool, zulu: bool) -> String { + use std::fmt::Write; + // Max length: "YYYY-MM-DDTHH:MM:SS.mmm+00:00" = 29 chars + let mut buf = String::with_capacity(32); if include_millis { if zulu { - format!( + let _ = write!( + buf, "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}.{:03}Z", parts.year, parts.month, @@ -185,9 +194,10 @@ fn format_parts_iso8601(parts: UtcDateTimeParts, include_millis: bool, zulu: boo parts.minute, parts.second, parts.millisecond - ) + ); } else { - format!( + let _ = write!( + buf, "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}.{:03}+00:00", parts.year, parts.month, @@ -196,14 +206,16 @@ fn format_parts_iso8601(parts: UtcDateTimeParts, include_millis: bool, zulu: boo parts.minute, parts.second, parts.millisecond - ) + ); } } else { - format!( + let _ = write!( + buf, "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}Z", parts.year, parts.month, parts.day, parts.hour, parts.minute, parts.second - ) + ); } + buf } /// Timestamp format options. @@ -323,7 +335,12 @@ impl TimestampFormatter { /// Format as ISO 8601 date only: `2024-12-14` fn format_iso8601_date(&self, ms: i64) -> String { match split_timestamp(ms) { - Some(parts) => format!("{:04}-{:02}-{:02}", parts.year, parts.month, parts.day), + Some(parts) => { + use std::fmt::Write; + let mut buf = String::with_capacity(10); + let _ = write!(buf, "{:04}-{:02}-{:02}", parts.year, parts.month, parts.day); + buf + }, None => format!("Invalid timestamp: {}", ms), } } @@ -339,16 +356,22 @@ impl TimestampFormatter { /// Format as RFC 2822: `Fri, 14 Dec 2024 15:30:45 +0000` fn format_rfc2822(&self, ms: i64) -> String { match split_timestamp(ms) { - Some(parts) => format!( - "{}, {:02} {} {:04} {:02}:{:02}:{:02} +0000", - WEEKDAY_NAMES[usize::from(parts.weekday)], - parts.day, - MONTH_NAMES[usize::from(parts.month - 1)], - parts.year, - parts.hour, - parts.minute, - parts.second - ), + Some(parts) => { + use std::fmt::Write; + let mut buf = String::with_capacity(32); + let _ = write!( + buf, + "{}, {:02} {} {:04} {:02}:{:02}:{:02} +0000", + WEEKDAY_NAMES[usize::from(parts.weekday)], + parts.day, + MONTH_NAMES[usize::from(parts.month - 1)], + parts.year, + parts.hour, + parts.minute, + parts.second + ); + buf + }, None => format!("Invalid timestamp: {}", ms), } } @@ -569,4 +592,77 @@ mod tests { let ms = now(); assert!(ms > 1700000000000); } + + // ── Regression tests for write!-based formatting (zero-alloc rewrite) ── + + #[test] + fn test_format_iso8601_exact_output() { + let formatter = TimestampFormatter::new(TimestampFormat::Iso8601); + assert_eq!( + formatter.format(Some(1734211234567)), + "2024-12-14T21:20:34.567Z" + ); + } + + #[test] + fn test_format_iso8601_date_exact() { + let formatter = TimestampFormatter::new(TimestampFormat::Iso8601Date); + // Known epoch: 2024-01-01T00:00:00.000Z + assert_eq!(formatter.format(Some(1704067200000)), "2024-01-01"); + // End-of-year rollover + assert_eq!(formatter.format(Some(1735689599999)), "2024-12-31"); + } + + #[test] + fn test_format_rfc2822_exact() { + let formatter = TimestampFormatter::new(TimestampFormat::Rfc2822); + // Mon, 01 Jan 2024 00:00:00 +0000 + assert_eq!( + formatter.format(Some(1704067200000)), + "Mon, 01 Jan 2024 00:00:00 +0000" + ); + } + + #[test] + fn test_format_rfc3339_exact() { + let formatter = TimestampFormatter::new(TimestampFormat::Rfc3339); + assert_eq!( + formatter.format(Some(1734211234567)), + "2024-12-14T21:20:34.567+00:00" + ); + } + + #[test] + fn test_format_iso8601_datetime_no_millis() { + let formatter = TimestampFormatter::new(TimestampFormat::Iso8601DateTime); + assert_eq!( + formatter.format(Some(1734211234567)), + "2024-12-14T21:20:34Z" + ); + } + + #[test] + fn test_format_epoch_zero() { + let iso = TimestampFormatter::new(TimestampFormat::Iso8601); + assert_eq!(iso.format(Some(0)), "1970-01-01T00:00:00.000Z"); + + let date = TimestampFormatter::new(TimestampFormat::Iso8601Date); + assert_eq!(date.format(Some(0)), "1970-01-01"); + + let rfc = TimestampFormatter::new(TimestampFormat::Rfc2822); + assert_eq!(rfc.format(Some(0)), "Thu, 01 Jan 1970 00:00:00 +0000"); + } + + #[test] + fn test_format_consistency_across_formats() { + // All formats should agree on the date portion for the same timestamp + let ms = 1734211234567_i64; + let iso = TimestampFormatter::new(TimestampFormat::Iso8601).format(Some(ms)); + let date = TimestampFormatter::new(TimestampFormat::Iso8601Date).format(Some(ms)); + let rfc = TimestampFormatter::new(TimestampFormat::Rfc3339).format(Some(ms)); + + assert!(iso.starts_with("2024-12-14")); + assert_eq!(date, "2024-12-14"); + assert!(rfc.starts_with("2024-12-14")); + } } diff --git a/link/src/wasm/client.rs b/link/src/wasm/client.rs index e139b1d90..6e5d9ab31 100644 --- a/link/src/wasm/client.rs +++ b/link/src/wasm/client.rs @@ -26,6 +26,13 @@ use super::validation::{ quote_table_name, validate_column_name, validate_row_id, validate_sql_identifier, }; +// Pre-serialized ping message to avoid re-serializing `ClientMessage::Ping` +// on every keepalive tick. WASM is single-threaded, so a thread-local is safe. +thread_local! { + pub(crate) static PING_PAYLOAD: String = serde_json::to_string(&ClientMessage::Ping) + .expect("ClientMessage::Ping serialization is infallible"); +} + /// WASM-compatible KalamDB client with auto-reconnection support /// /// Supports multiple authentication methods: @@ -1212,9 +1219,9 @@ impl KalamClient { let ping_cb = Closure::wrap(Box::new(move || { if let Some(ws) = ws_ref.borrow().as_ref() { if ws.ready_state() == WebSocket::OPEN { - if let Ok(payload) = serde_json::to_string(&ClientMessage::Ping) { - let _ = ws.send_with_str(&payload); - } + PING_PAYLOAD.with(|payload| { + let _ = ws.send_with_str(payload); + }); } } }) as Box); diff --git a/link/src/wasm/helpers.rs b/link/src/wasm/helpers.rs index 2fe346203..173dc99fe 100644 --- a/link/src/wasm/helpers.rs +++ b/link/src/wasm/helpers.rs @@ -9,6 +9,7 @@ use web_sys::{Headers, MessageEvent, Request, RequestInit, RequestMode, Response use super::console_log; use crate::compression; +#[inline] pub(crate) fn ws_url_from_http_opts( base_url: &str, disable_compression: bool, @@ -33,6 +34,7 @@ pub(crate) fn ws_url_from_http_opts( /// /// Uses `DefaultHasher` (SipHash) — fast and collision-resistant enough for /// subscription deduplication. Not a cryptographic hash. +#[inline] pub(crate) fn subscription_hash(s: &str) -> u64 { let mut hasher = DefaultHasher::new(); s.hash(&mut hasher); @@ -133,38 +135,41 @@ pub(crate) async fn wasm_fetch( /// Handles text (`JsString`), binary (`ArrayBuffer` — decompressed via gzip), /// `Blob` fallback, and unknown types. Returns `None` when the payload cannot /// be decoded (with a diagnostic logged to the JS console). +/// +/// Optimized to use `decompress_if_gzip` which returns `Cow<[u8]>` — avoiding +/// a heap allocation when the payload is not gzip-compressed. +#[inline] pub(crate) fn decode_ws_message(e: &MessageEvent) -> Option { - if let Ok(txt) = e.data().dyn_into::() { + let data = e.data(); + + if let Ok(txt) = data.dyn_into::() { return Some(String::from(txt)); } - if let Ok(array_buffer) = e.data().dyn_into::() { + let data = e.data(); + if let Ok(array_buffer) = data.dyn_into::() { let uint8_array = js_sys::Uint8Array::new(&array_buffer); - let data = uint8_array.to_vec(); - - return match compression::decompress_gzip(&data) { - Ok(decompressed) => match String::from_utf8(decompressed) { - Ok(s) => Some(s), - Err(e) => { - console_log(&format!( - "KalamClient: Invalid UTF-8 in decompressed message: {}", - e - )); - None - }, - }, + let raw = uint8_array.to_vec(); + + let decompressed = compression::decompress_if_gzip(&raw); + return match std::str::from_utf8(&decompressed) { + Ok(s) => Some(s.to_owned()), Err(e) => { - console_log(&format!("KalamClient: Failed to decompress message: {}", e)); + console_log(&format!( + "KalamClient: Invalid UTF-8 in message: {}", + e + )); None }, }; } - if e.data().is_instance_of::() { + let data = e.data(); + if data.is_instance_of::() { console_log( "KalamClient: Received Blob message - binary mode may be misconfigured. Attempting to read as text.", ); - return e.data().as_string(); + return data.as_string(); } // Unknown message type — log diagnostics diff --git a/link/src/wasm/reconnect.rs b/link/src/wasm/reconnect.rs index 708b61594..bfd361950 100644 --- a/link/src/wasm/reconnect.rs +++ b/link/src/wasm/reconnect.rs @@ -172,9 +172,9 @@ pub(crate) fn restart_ping_timer( let ping_cb = Closure::wrap(Box::new(move || { if let Some(ws) = ws_clone.borrow().as_ref() { if ws.ready_state() == WebSocket::OPEN { - if let Ok(payload) = serde_json::to_string(&ClientMessage::Ping) { - let _ = ws.send_with_str(&payload); - } + super::client::PING_PAYLOAD.with(|payload| { + let _ = ws.send_with_str(payload); + }); } } }) as Box); diff --git a/link/src/wasm/state.rs b/link/src/wasm/state.rs index b4d21259b..067d51990 100644 --- a/link/src/wasm/state.rs +++ b/link/src/wasm/state.rs @@ -64,6 +64,7 @@ pub(crate) struct WasmLiveRowsOptions { pub(crate) subscription_options: Option, } +#[inline] pub(crate) fn track_subscription_checkpoint(last_seq_id: &mut Option, event: &ChangeEvent) { match event { ChangeEvent::Ack { batch_control, .. } => { @@ -95,6 +96,7 @@ pub(crate) fn track_subscription_checkpoint(last_seq_id: &mut Option, eve } } +#[inline] pub(crate) fn filter_subscription_event( options: &SubscriptionOptions, event: &ServerMessage, @@ -103,6 +105,7 @@ pub(crate) fn filter_subscription_event( crate::subscription::filter_replayed_event(change_event, options.from) } +#[inline] pub(crate) fn callback_payload( mode: &mut SubscriptionCallbackMode, event: &ChangeEvent, diff --git a/link/src/wasm/validation.rs b/link/src/wasm/validation.rs index bd6bc3cde..0e22f58cd 100644 --- a/link/src/wasm/validation.rs +++ b/link/src/wasm/validation.rs @@ -3,6 +3,7 @@ use wasm_bindgen::prelude::JsValue; /// Validate a SQL identifier (table name, column name) to prevent SQL injection. /// Only allows: letters, numbers, underscores, and dots (for namespace.table format). /// Must start with a letter or underscore. +#[inline] pub(crate) fn validate_sql_identifier(name: &str, context: &str) -> Result<(), JsValue> { if name.is_empty() { return Err(JsValue::from_str(&format!("{} cannot be empty", context))); @@ -39,6 +40,9 @@ pub(crate) fn validate_sql_identifier(name: &str, context: &str) -> Result<(), J /// Validate a row ID to prevent SQL injection. /// Accepts: UUIDs, integers, or alphanumeric strings with underscores/hyphens. +/// +/// Optimized to avoid heap allocation: uses case-insensitive byte matching +/// instead of `.to_uppercase()`. pub(crate) fn validate_row_id(row_id: &str) -> Result<(), JsValue> { if row_id.is_empty() { return Err(JsValue::from_str("Row ID cannot be empty")); @@ -47,16 +51,30 @@ pub(crate) fn validate_row_id(row_id: &str) -> Result<(), JsValue> { return Err(JsValue::from_str("Row ID too long (max 128 chars)")); } - // Check for SQL injection patterns - let dangerous_patterns = [ - ";", "--", "/*", "*/", "'", "\"", "DROP", "DELETE", "UPDATE", "INSERT", "UNION", "SELECT", + // Check for SQL injection patterns using case-insensitive matching + // without allocating a new uppercase string. + let dangerous_patterns: &[&[u8]] = &[ + b";", b"--", b"/*", b"*/", b"'", b"\"", ]; - let upper = row_id.to_uppercase(); + let dangerous_keywords: &[&[u8]] = &[ + b"DROP", b"DELETE", b"UPDATE", b"INSERT", b"UNION", b"SELECT", + ]; + let bytes = row_id.as_bytes(); + for pattern in dangerous_patterns { - if upper.contains(pattern) { + if contains_bytes(bytes, pattern) { return Err(JsValue::from_str(&format!( "Row ID contains forbidden pattern '{}'", - pattern + std::str::from_utf8(pattern).unwrap_or("?") + ))); + } + } + + for keyword in dangerous_keywords { + if contains_bytes_case_insensitive(bytes, keyword) { + return Err(JsValue::from_str(&format!( + "Row ID contains forbidden pattern '{}'", + std::str::from_utf8(keyword).unwrap_or("?") ))); } } @@ -71,13 +89,33 @@ pub(crate) fn validate_row_id(row_id: &str) -> Result<(), JsValue> { Ok(()) } +/// Check if `haystack` contains `needle` (exact byte match). +#[inline] +fn contains_bytes(haystack: &[u8], needle: &[u8]) -> bool { + haystack.windows(needle.len()).any(|w| w == needle) +} + +/// Check if `haystack` contains `needle` using ASCII case-insensitive matching. +/// `needle` MUST be all-uppercase ASCII. +#[inline] +fn contains_bytes_case_insensitive(haystack: &[u8], needle: &[u8]) -> bool { + if needle.len() > haystack.len() { + return false; + } + haystack.windows(needle.len()).any(|w| { + w.iter().zip(needle.iter()).all(|(h, n)| h.to_ascii_uppercase() == *n) + }) +} + /// Validate a column name for INSERT operations +#[inline] pub(crate) fn validate_column_name(name: &str) -> Result<(), JsValue> { validate_sql_identifier(name, "Column name") } /// Quote a table name properly, handling namespace.table format. /// Converts "namespace.table" to "namespace"."table" for correct SQL parsing. +#[inline] pub(crate) fn quote_table_name(table_name: &str) -> String { if let Some(dot_pos) = table_name.find('.') { let namespace = &table_name[..dot_pos]; From 84c97ec060d790335a6a90f67dd3f635e5a76315 Mon Sep 17 00:00:00 2001 From: jamals86 Date: Sun, 29 Mar 2026 17:21:54 +0300 Subject: [PATCH 08/12] Fixes to release workflow --- docker/build/test-docker-image.sh | 2 +- link/sdks/typescript/tests/basic.test.mjs | 4 ++-- .../typescript/tests/readme-examples.test.mjs | 18 ++++++------------ 3 files changed, 9 insertions(+), 15 deletions(-) diff --git a/docker/build/test-docker-image.sh b/docker/build/test-docker-image.sh index f159a4b97..40514305b 100755 --- a/docker/build/test-docker-image.sh +++ b/docker/build/test-docker-image.sh @@ -120,7 +120,7 @@ main() { exit 1 fi - if curl -sf "http://localhost:$TEST_PORT/health" > /dev/null 2>&1; then + if docker exec "$CONTAINER_NAME" /usr/local/bin/busybox wget -q -O /dev/null "http://127.0.0.1:8080/health" > /dev/null 2>&1; then log_info "Server is ready! (took ${ELAPSED}s)" break fi diff --git a/link/sdks/typescript/tests/basic.test.mjs b/link/sdks/typescript/tests/basic.test.mjs index 0ed7961fc..f1eb19ed5 100755 --- a/link/sdks/typescript/tests/basic.test.mjs +++ b/link/sdks/typescript/tests/basic.test.mjs @@ -58,8 +58,8 @@ async function runTests() { // For Node.js, we need to pass the WASM file path explicitly const wasmPath = join(wasmOutPath, 'kalam_link_bg.wasm'); const wasmBuffer = await readFile(wasmPath); - - await init(wasmBuffer); + + await init({ module_or_path: wasmBuffer }); console.log(' ✓ WASM initialized successfully'); passed++; } catch (error) { diff --git a/link/sdks/typescript/tests/readme-examples.test.mjs b/link/sdks/typescript/tests/readme-examples.test.mjs index 965ef37b5..ce7e7740d 100644 --- a/link/sdks/typescript/tests/readme-examples.test.mjs +++ b/link/sdks/typescript/tests/readme-examples.test.mjs @@ -207,16 +207,6 @@ test('README executeAsUser example wraps SQL for tenant-safe writes', async () = test('README queryWithFiles example posts multipart data with auth header', async () => { const originalFetch = globalThis.fetch; - const originalFile = globalThis.File; - - class TestFile extends Blob { - constructor(parts, name, options) { - super(parts, options); - this.name = name; - } - } - - globalThis.File = TestFile; let fetchCall; globalThis.fetch = async (url, options) => { @@ -238,9 +228,14 @@ test('README queryWithFiles example posts multipart data with auth header', asyn client.initialized = true; client.auth = Auth.jwt('token-123'); + // Use the native File class (available in Node.js 20+) so that FormData + // stores and returns a proper File/Blob without undici wrapping it in an + // internal class that is not instanceof globalThis.Blob. + const testFile = new File(['hello world'], 'note.txt', { type: 'text/plain' }); + await client.queryWithFiles( 'INSERT INTO support.attachments (id, file_data) VALUES ($1, FILE("upload"))', - { upload: new TestFile(['hello world'], 'note.txt', { type: 'text/plain' }) }, + { upload: testFile }, ['att_1'], ); @@ -255,7 +250,6 @@ test('README queryWithFiles example posts multipart data with auth header', asyn assert.equal(uploaded.name, 'note.txt'); } finally { globalThis.fetch = originalFetch; - globalThis.File = originalFile; } }); From 4e7c0051114f34c7b187c7d28ccbbc9e6f0e2a1c Mon Sep 17 00:00:00 2001 From: jamals86 Date: Sun, 29 Mar 2026 18:08:02 +0300 Subject: [PATCH 09/12] Fixes to pg extension pushing --- pg/docker/docker-compose.test.yml | 2 +- pg/docker/docker-compose.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pg/docker/docker-compose.test.yml b/pg/docker/docker-compose.test.yml index 209e22fe6..a1b344c1e 100644 --- a/pg/docker/docker-compose.test.yml +++ b/pg/docker/docker-compose.test.yml @@ -35,7 +35,7 @@ services: restart: "no" command: ["/usr/local/bin/kalamdb-server", "/config/server.toml"] healthcheck: - test: ["CMD-SHELL", "curl -sf http://127.0.0.1:8080/v1/api/healthcheck || exit 1"] + test: ["CMD", "/usr/local/bin/busybox", "wget", "-q", "-O", "/dev/null", "http://127.0.0.1:8080/v1/api/healthcheck"] interval: 5s timeout: 3s start_period: 8s diff --git a/pg/docker/docker-compose.yml b/pg/docker/docker-compose.yml index fdb5c0abe..bffd49e28 100644 --- a/pg/docker/docker-compose.yml +++ b/pg/docker/docker-compose.yml @@ -39,7 +39,7 @@ services: restart: unless-stopped command: ["/usr/local/bin/kalamdb-server", "/config/server.toml"] healthcheck: - test: ["CMD", "/usr/local/bin/busybox", "wget", "--spider", "-q", "http://127.0.0.1:8080/v1/api/healthcheck"] + test: ["CMD", "/usr/local/bin/busybox", "wget", "-q", "-O", "/dev/null", "http://127.0.0.1:8080/v1/api/healthcheck"] interval: 10s timeout: 5s start_period: 10s From 0229b28aca17e41ba3f5f944c3a59ccd674426b8 Mon Sep 17 00:00:00 2001 From: jamals86 Date: Mon, 30 Mar 2026 14:32:59 +0300 Subject: [PATCH 10/12] Update Docker image description & add README Refine the OCI image description string in pg/docker/Dockerfile, Dockerfile.release-pg, and Dockerfile.runtime to improve clarity (changed "for connecting PostgreSQL foreign tables to a remote KalamDB server" to "so you can connect PostgreSQL foreign tables to a remote KalamDB server"). Add pg/docker/README.md documenting the pg-kalam Docker image, quick start, and usage examples. Remove the now-obsolete pg/docker/image-description.txt file. --- pg/docker/Dockerfile | 2 +- pg/docker/Dockerfile.release-pg | 2 +- pg/docker/Dockerfile.runtime | 2 +- pg/docker/README.md | 42 +++++++++++++++++++++++++++++++++ pg/docker/image-description.txt | 1 - 5 files changed, 45 insertions(+), 4 deletions(-) create mode 100644 pg/docker/README.md delete mode 100644 pg/docker/image-description.txt diff --git a/pg/docker/Dockerfile b/pg/docker/Dockerfile index f023df689..7318ebbb7 100644 --- a/pg/docker/Dockerfile +++ b/pg/docker/Dockerfile @@ -15,7 +15,7 @@ ARG POSTGRES_BASE_IMAGE=public.ecr.aws/docker/library/postgres:16-bookworm ARG PG_MAJOR=16 ARG PG_EXTENSION_FLAVOR=pg16 ARG PGRX_VERSION=0.17.0 -ARG OCI_IMAGE_DESCRIPTION="PostgreSQL with the pg_kalam extension preinstalled for connecting PostgreSQL foreign tables to a remote KalamDB server." +ARG OCI_IMAGE_DESCRIPTION="PostgreSQL with the pg_kalam extension preinstalled so you can connect PostgreSQL foreign tables to a remote KalamDB server." # --------------------------------------------------------------------------- # Stage 1: Builder — Rust toolchain + selected PG dev headers → .so + .sql diff --git a/pg/docker/Dockerfile.release-pg b/pg/docker/Dockerfile.release-pg index 3fe16d171..e740b13f8 100644 --- a/pg/docker/Dockerfile.release-pg +++ b/pg/docker/Dockerfile.release-pg @@ -6,7 +6,7 @@ # docker build -f pg/docker/Dockerfile.release-pg -t pg-kalam:latest ARG POSTGRES_BASE_IMAGE=public.ecr.aws/docker/library/postgres:16-bookworm ARG PG_MAJOR=16 -ARG OCI_IMAGE_DESCRIPTION="PostgreSQL with the pg_kalam extension preinstalled for connecting PostgreSQL foreign tables to a remote KalamDB server." +ARG OCI_IMAGE_DESCRIPTION="PostgreSQL with the pg_kalam extension preinstalled so you can connect PostgreSQL foreign tables to a remote KalamDB server." FROM ${POSTGRES_BASE_IMAGE} ARG PG_MAJOR diff --git a/pg/docker/Dockerfile.runtime b/pg/docker/Dockerfile.runtime index ee883c5a8..4296646f9 100644 --- a/pg/docker/Dockerfile.runtime +++ b/pg/docker/Dockerfile.runtime @@ -5,7 +5,7 @@ # docker build -f pg/docker/Dockerfile.runtime -t pg-kalam:latest pg/docker ARG POSTGRES_BASE_IMAGE=public.ecr.aws/docker/library/postgres:16-bookworm ARG PG_MAJOR=16 -ARG OCI_IMAGE_DESCRIPTION="PostgreSQL with the pg_kalam extension preinstalled for connecting PostgreSQL foreign tables to a remote KalamDB server." +ARG OCI_IMAGE_DESCRIPTION="PostgreSQL with the pg_kalam extension preinstalled so you can connect PostgreSQL foreign tables to a remote KalamDB server." FROM ${POSTGRES_BASE_IMAGE} ARG PG_MAJOR diff --git a/pg/docker/README.md b/pg/docker/README.md new file mode 100644 index 000000000..a1315a3cb --- /dev/null +++ b/pg/docker/README.md @@ -0,0 +1,42 @@ +# pg-kalam on Docker Hub + +PostgreSQL with the pg_kalam extension preinstalled so you can connect PostgreSQL foreign tables to a remote KalamDB server. + +This image packages stock PostgreSQL with the `pg_kalam` foreign data wrapper extension already installed. + +## What you get + +- A PostgreSQL runtime image with `pg_kalam.so`, `pg_kalam.control`, and the extension SQL files already installed +- `CREATE EXTENSION pg_kalam;` support without a separate extension build step in the container +- A ready-to-run base for local testing, CI, and production images that need PostgreSQL to talk to KalamDB + +## Quick start + +Pull the image: + +```bash +docker pull jamals86/pg-kalam:latest +``` + +Run PostgreSQL with the extension available: + +```bash +docker run --name pg-kalam \ + -e POSTGRES_DB=kalamdb \ + -e POSTGRES_USER=kalamdb \ + -e POSTGRES_PASSWORD=kalamdb123 \ + -p 5432:5432 \ + -d jamals86/pg-kalam:latest +``` + +Then enable the extension: + +```sql +CREATE EXTENSION IF NOT EXISTS pg_kalam; +``` + +## Connect to KalamDB + +After the extension is enabled, create a foreign server that points at your running KalamDB instance. + +For a full local stack with PostgreSQL and KalamDB together, use the compose setup documented in [pg/README.md](../README.md). \ No newline at end of file diff --git a/pg/docker/image-description.txt b/pg/docker/image-description.txt deleted file mode 100644 index 1aff90d51..000000000 --- a/pg/docker/image-description.txt +++ /dev/null @@ -1 +0,0 @@ -PostgreSQL with the pg_kalam extension preinstalled for connecting PostgreSQL foreign tables to a remote KalamDB server. \ No newline at end of file From e6e5f8ab4bc004527afedc2bfbe9c67858f7eb86 Mon Sep 17 00:00:00 2001 From: jamals86 Date: Mon, 30 Mar 2026 23:44:53 +0300 Subject: [PATCH 11/12] Add MessagePack WS, header pre-auth, and new crates Introduce protocol negotiation and MessagePack support for WebSocket connections: add send_message/send_data_binary helpers, handle msgpack binary frames (with optional gzip), and respect per-connection serialization/compression options parsed from the upgrade query string. Add a header-auth fast path to authenticate JWTs during the HTTP upgrade and consolidate post-validation steps into a common complete_ws_auth/send_current_auth_success flow. Split handler and job code into new workspace crates (kalamdb-handlers, kalamdb-jobs) and wire them into the workspace/backend Cargo.toml. Update websocket handler to use protocol-aware messaging, support msgpack deserialization, and add tests for protocol parsing. Also update CI workflow to wait for the KalamDB container to become healthy before starting Postgres. --- .github/workflows/release.yml | 36 +- Cargo.lock | 70 ++ Cargo.toml | 2 + backend/Cargo.toml | 2 + backend/crates/kalamdb-api/Cargo.toml | 2 + .../src/handlers/ws/events/auth.rs | 109 ++- .../src/handlers/ws/events/batch.rs | 5 +- .../kalamdb-api/src/handlers/ws/events/mod.rs | 41 ++ .../src/handlers/ws/events/subscription.rs | 7 +- .../kalamdb-api/src/handlers/ws/handler.rs | 269 ++++++- backend/crates/kalamdb-commons/Cargo.toml | 5 +- .../crates/kalamdb-commons/src/websocket.rs | 217 ++++++ backend/crates/kalamdb-core/Cargo.toml | 4 + .../crates/kalamdb-core/src/app_context.rs | 92 ++- .../kalamdb-core/src/applier/executor/ddl.rs | 5 +- .../src/applier/raft/provider_meta_applier.rs | 2 +- backend/crates/kalamdb-core/src/job_waker.rs | 12 + backend/crates/kalamdb-core/src/jobs/mod.rs | 75 -- backend/crates/kalamdb-core/src/lib.rs | 8 +- .../src/live/helpers/initial_data.rs | 16 +- .../kalamdb-core/src/live/manager/tests.rs | 5 +- .../src/live/models/connection.rs | 31 + .../crates/kalamdb-core/src/operations/mod.rs | 1 + .../kalamdb-core/src/operations/service.rs | 8 +- .../src/operations/table_cleanup.rs | 193 +++++ .../kalamdb-core/src/slow_query_logger.rs | 2 +- .../src/sql/executor/handler_adapter.rs | 76 +- .../src/sql/executor/handler_registry.rs | 676 ++---------------- .../src/sql/executor/handlers/mod.rs | 16 +- .../src/sql/executor/helpers/mod.rs | 8 - .../src/sql/executor/sql_executor.rs | 28 +- backend/crates/kalamdb-core/src/sql/mod.rs | 2 +- .../crates/kalamdb-core/src/test_helpers.rs | 50 +- .../tests/test_context_functions.rs | 9 +- .../kalamdb-core/tests/test_cte_support.rs | 24 +- .../kalamdb-core/tests/test_typed_handlers.rs | 45 +- .../tests/test_vector_search_sql.rs | 53 +- backend/crates/kalamdb-handlers/Cargo.toml | 69 ++ .../src}/backup/backup_database.rs | 11 +- .../src}/backup/mod.rs | 0 .../src}/backup/restore_database.rs | 11 +- .../src}/cluster/clear.rs | 6 +- .../src}/cluster/list.rs | 6 +- .../src}/cluster/mod.rs | 0 .../src}/cluster/purge.rs | 6 +- .../src}/cluster/snapshot.rs | 6 +- .../src}/cluster/stepdown.rs | 6 +- .../src}/cluster/transfer_leader.rs | 6 +- .../src}/cluster/trigger_election.rs | 6 +- .../src}/compact/compact_all.rs | 11 +- .../src}/compact/compact_table.rs | 11 +- .../src}/compact/mod.rs | 0 .../src}/export/export_user_data.rs | 11 +- .../src}/export/mod.rs | 0 .../src}/export/show_export.rs | 9 +- .../src}/flush/flush_all.rs | 11 +- .../src}/flush/flush_table.rs | 11 +- .../src}/flush/mod.rs | 0 .../src/helpers/ast_parsing.rs | 143 ++++ .../src}/helpers/audit.rs | 6 +- .../src}/helpers/guards.rs | 4 +- .../kalamdb-handlers/src/helpers/mod.rs | 9 + .../src}/helpers/namespace_helpers.rs | 8 +- .../src}/helpers/storage.rs | 10 +- .../src}/helpers/table_creation.rs | 6 +- .../src}/helpers/tables.rs | 6 +- .../src}/jobs/kill_job.rs | 11 +- .../src}/jobs/kill_live_query.rs | 10 +- .../src}/jobs/mod.rs | 0 backend/crates/kalamdb-handlers/src/lib.rs | 627 ++++++++++++++++ .../src}/namespace/alter.rs | 16 +- .../src}/namespace/create.rs | 16 +- .../src}/namespace/drop.rs | 18 +- .../src}/namespace/mod.rs | 0 .../src}/namespace/show.rs | 12 +- .../src}/namespace/use_namespace.rs | 10 +- .../src}/storage/alter.rs | 14 +- .../src}/storage/check.rs | 14 +- .../src}/storage/create.rs | 16 +- .../src}/storage/drop.rs | 12 +- .../src}/storage/mod.rs | 0 .../src}/storage/show.rs | 10 +- .../src}/subscription/mod.rs | 0 .../src}/subscription/subscribe.rs | 10 +- .../src}/system/mod.rs | 0 .../src}/system/show_manifest_cache.rs | 18 +- .../src}/table/alter.rs | 16 +- .../src}/table/create.rs | 16 +- .../src}/table/describe.rs | 12 +- .../src}/table/drop.rs | 212 +----- .../src}/table/mod.rs | 0 .../src}/table/show.rs | 12 +- .../src}/table/show_stats.rs | 12 +- .../src}/topics/ack.rs | 8 +- .../src}/topics/add_source.rs | 8 +- .../src}/topics/clear.rs | 11 +- .../src}/topics/consume.rs | 8 +- .../src}/topics/create.rs | 10 +- .../src}/topics/drop.rs | 11 +- .../src}/topics/mod.rs | 0 .../src}/user/alter.rs | 10 +- .../src}/user/create.rs | 12 +- .../src}/user/drop.rs | 10 +- .../src}/user/mod.rs | 0 .../src}/view/create.rs | 12 +- .../src}/view/mod.rs | 0 backend/crates/kalamdb-jobs/Cargo.toml | 56 ++ .../src}/executors/backup.rs | 4 +- .../src}/executors/cleanup.rs | 32 +- .../src}/executors/compact.rs | 6 +- .../src}/executors/executor_trait.rs | 6 +- .../src}/executors/flush.rs | 20 +- .../src}/executors/job_cleanup.rs | 4 +- .../src}/executors/manifest_eviction.rs | 6 +- .../src}/executors/mod.rs | 0 .../src}/executors/registry.rs | 10 +- .../src}/executors/restore.rs | 4 +- .../src}/executors/retention.rs | 4 +- .../src}/executors/shared_table_cleanup.rs | 6 +- .../src}/executors/stream_eviction.rs | 22 +- .../src}/executors/topic_cleanup.rs | 4 +- .../src}/executors/topic_retention.rs | 4 +- .../src}/executors/user_cleanup.rs | 4 +- .../src}/executors/user_export.rs | 9 +- .../src}/executors/vector_index.rs | 6 +- .../src}/flush_scheduler.rs | 8 +- .../src}/health_monitor.rs | 4 +- .../src}/jobs_manager/actions.rs | 6 +- .../src}/jobs_manager/mod.rs | 0 .../src}/jobs_manager/queries.rs | 4 +- .../src}/jobs_manager/runner.rs | 9 +- .../src}/jobs_manager/types.rs | 4 +- .../src}/jobs_manager/utils.rs | 4 +- .../src}/leader_failover.rs | 4 +- .../jobs => kalamdb-jobs/src}/leader_guard.rs | 2 +- backend/crates/kalamdb-jobs/src/lib.rs | 94 +++ .../src}/stream_eviction.rs | 8 +- backend/src/lifecycle.rs | 14 +- backend/tests/common/testserver/flush.rs | 5 +- .../misc/auth/test_password_complexity.rs | 5 +- .../misc/system/test_topic_cleanup_job.rs | 5 +- .../test_flush_policy_verification_http.rs | 3 +- cli/full_smoke_output.txt | 201 ++++++ .../usecases/smoke_test_websocket_capacity.rs | 80 ++- docs/Notes.md | 1 + link/Cargo.toml | 1 + link/kalam-link-dart/src/api.rs | 63 +- link/sdks/dart/README.md | 29 + link/sdks/dart/lib/kalam_link.dart | 2 +- link/sdks/typescript/README.md | 6 +- link/src/connection/mod.rs | 7 +- link/src/connection/models/client_message.rs | 5 + .../connection/models/connection_options.rs | 21 + link/src/connection/models/mod.rs | 2 + link/src/connection/models/protocol.rs | 95 +++ link/src/connection/models/server_message.rs | 4 + link/src/connection/shared.rs | 150 ++-- link/src/connection/websocket.rs | 189 ++++- link/src/models/mod.rs | 3 +- link/src/models/tests.rs | 1 + link/src/wasm/auth.rs | 8 +- link/src/wasm/client.rs | 243 ++++--- link/src/wasm/helpers.rs | 39 + link/src/wasm/reconnect.rs | 27 +- link/tests/proxied/rapid_flap.rs | 180 +++++ pg/crates/kalam-pg-client/Cargo.toml | 1 + .../kalam-pg-client/tests/connectivity.rs | 30 +- pg/docker/docker-compose.test.yml | 4 +- pg/docker/image-description.txt | 1 + pg/docker/test.sh | 20 +- 170 files changed, 3776 insertions(+), 1812 deletions(-) create mode 100644 backend/crates/kalamdb-core/src/job_waker.rs delete mode 100644 backend/crates/kalamdb-core/src/jobs/mod.rs create mode 100644 backend/crates/kalamdb-core/src/operations/table_cleanup.rs create mode 100644 backend/crates/kalamdb-handlers/Cargo.toml rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/backup/backup_database.rs (86%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/backup/mod.rs (100%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/backup/restore_database.rs (89%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/cluster/clear.rs (97%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/cluster/list.rs (99%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/cluster/mod.rs (100%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/cluster/purge.rs (94%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/cluster/snapshot.rs (96%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/cluster/stepdown.rs (94%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/cluster/transfer_leader.rs (95%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/cluster/trigger_election.rs (95%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/compact/compact_all.rs (91%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/compact/compact_table.rs (90%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/compact/mod.rs (100%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/export/export_user_data.rs (86%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/export/mod.rs (100%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/export/show_export.rs (95%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/flush/flush_all.rs (94%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/flush/flush_table.rs (92%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/flush/mod.rs (100%) create mode 100644 backend/crates/kalamdb-handlers/src/helpers/ast_parsing.rs rename backend/crates/{kalamdb-core/src/sql/executor => kalamdb-handlers/src}/helpers/audit.rs (98%) rename backend/crates/{kalamdb-core/src/sql/executor => kalamdb-handlers/src}/helpers/guards.rs (98%) create mode 100644 backend/crates/kalamdb-handlers/src/helpers/mod.rs rename backend/crates/{kalamdb-core/src/sql/executor => kalamdb-handlers/src}/helpers/namespace_helpers.rs (96%) rename backend/crates/{kalamdb-core/src/sql/executor => kalamdb-handlers/src}/helpers/storage.rs (97%) rename backend/crates/{kalamdb-core/src/sql/executor => kalamdb-handlers/src}/helpers/table_creation.rs (99%) rename backend/crates/{kalamdb-core/src/sql/executor => kalamdb-handlers/src}/helpers/tables.rs (98%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/jobs/kill_job.rs (84%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/jobs/kill_live_query.rs (85%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/jobs/mod.rs (100%) create mode 100644 backend/crates/kalamdb-handlers/src/lib.rs rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/namespace/alter.rs (91%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/namespace/create.rs (95%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/namespace/drop.rs (93%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/namespace/mod.rs (100%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/namespace/show.rs (89%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/namespace/use_namespace.rs (91%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/storage/alter.rs (95%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/storage/check.rs (95%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/storage/create.rs (95%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/storage/drop.rs (95%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/storage/mod.rs (100%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/storage/show.rs (89%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/subscription/mod.rs (100%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/subscription/subscribe.rs (91%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/system/mod.rs (100%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/system/show_manifest_cache.rs (87%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/table/alter.rs (98%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/table/create.rs (96%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/table/describe.rs (88%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/table/drop.rs (69%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/table/mod.rs (100%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/table/show.rs (93%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/table/show_stats.rs (92%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/topics/ack.rs (94%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/topics/add_source.rs (93%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/topics/clear.rs (90%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/topics/consume.rs (96%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/topics/create.rs (95%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/topics/drop.rs (89%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/topics/mod.rs (100%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/user/alter.rs (94%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/user/create.rs (95%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/user/drop.rs (90%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/user/mod.rs (100%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/view/create.rs (94%) rename backend/crates/{kalamdb-core/src/sql/executor/handlers => kalamdb-handlers/src}/view/mod.rs (100%) create mode 100644 backend/crates/kalamdb-jobs/Cargo.toml rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/executors/backup.rs (98%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/executors/cleanup.rs (86%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/executors/compact.rs (95%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/executors/executor_trait.rs (98%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/executors/flush.rs (95%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/executors/job_cleanup.rs (96%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/executors/manifest_eviction.rs (96%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/executors/mod.rs (100%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/executors/registry.rs (98%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/executors/restore.rs (98%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/executors/retention.rs (98%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/executors/shared_table_cleanup.rs (88%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/executors/stream_eviction.rs (96%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/executors/topic_cleanup.rs (98%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/executors/topic_retention.rs (98%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/executors/user_cleanup.rs (96%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/executors/user_export.rs (98%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/executors/vector_index.rs (95%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/flush_scheduler.rs (97%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/health_monitor.rs (91%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/jobs_manager/actions.rs (98%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/jobs_manager/mod.rs (100%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/jobs_manager/queries.rs (95%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/jobs_manager/runner.rs (99%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/jobs_manager/types.rs (97%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/jobs_manager/utils.rs (98%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/leader_failover.rs (99%) rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/leader_guard.rs (99%) create mode 100644 backend/crates/kalamdb-jobs/src/lib.rs rename backend/crates/{kalamdb-core/src/jobs => kalamdb-jobs/src}/stream_eviction.rs (95%) create mode 100644 cli/full_smoke_output.txt create mode 100644 link/src/connection/models/protocol.rs create mode 100644 link/tests/proxied/rapid_flap.rs create mode 100644 pg/docker/image-description.txt diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d0137b4cc..6a942aaec 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1894,7 +1894,41 @@ jobs: docker pull "$PG_IMAGE" docker compose -f "$COMPOSE_FILE" -p "$COMPOSE_PROJECT" down -v --remove-orphans 2>/dev/null || true - docker compose -f "$COMPOSE_FILE" -p "$COMPOSE_PROJECT" up -d + + docker compose -f "$COMPOSE_FILE" -p "$COMPOSE_PROJECT" up -d kalamdb + + KALAMDB_CONTAINER_ID="$(docker compose -f "$COMPOSE_FILE" -p "$COMPOSE_PROJECT" ps -q kalamdb)" + if [[ -z "$KALAMDB_CONTAINER_ID" ]]; then + echo "Failed to resolve kalamdb container id" >&2 + docker compose -f "$COMPOSE_FILE" -p "$COMPOSE_PROJECT" ps || true + echo "pg_docker_tests_passed=false" >> "$GITHUB_OUTPUT" + exit 1 + fi + + KALAMDB_READY=false + for i in {1..90}; do + if docker exec "$KALAMDB_CONTAINER_ID" /usr/local/bin/busybox wget -q -O /dev/null http://127.0.0.1:8080/health >/dev/null 2>&1; then + KALAMDB_READY=true + break + fi + + if [[ "$(docker inspect --format '{{.State.Status}}' "$KALAMDB_CONTAINER_ID")" != "running" ]]; then + echo "KalamDB container stopped before becoming ready" >&2 + docker logs "$KALAMDB_CONTAINER_ID" || true + break + fi + + sleep 1 + done + + if [[ "$KALAMDB_READY" != "true" ]]; then + echo "KalamDB container did not become ready in time" >&2 + docker logs "$KALAMDB_CONTAINER_ID" || true + echo "pg_docker_tests_passed=false" >> "$GITHUB_OUTPUT" + exit 1 + fi + + docker compose -f "$COMPOSE_FILE" -p "$COMPOSE_PROJECT" up -d postgres if PGHOST=127.0.0.1 \ PGPORT="$KALAMDB_TEST_PG_PORT" \ diff --git a/Cargo.lock b/Cargo.lock index c2d9002fa..d012395f4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3792,6 +3792,7 @@ dependencies = [ "log", "miniz_oxide 0.9.1", "reqwest 0.13.2", + "rmp-serde", "serde", "serde-wasm-bindgen", "serde_json", @@ -3842,6 +3843,7 @@ dependencies = [ "ntest", "serde_json", "tokio", + "tokio-stream", "tonic", ] @@ -3917,6 +3919,7 @@ dependencies = [ "kalamdb-configs", "kalamdb-core", "kalamdb-filestore", + "kalamdb-jobs", "kalamdb-raft", "kalamdb-session", "kalamdb-sql", @@ -3926,6 +3929,7 @@ dependencies = [ "mime_guess", "moka", "parking_lot", + "rmp-serde", "rust-embed", "serde", "serde_json", @@ -3979,6 +3983,7 @@ dependencies = [ "nanoid", "once_cell", "parking_lot", + "rmp-serde", "serde", "serde_json", "sha2 0.11.0", @@ -4016,6 +4021,8 @@ dependencies = [ "kalamdb-commons", "kalamdb-configs", "kalamdb-filestore", + "kalamdb-handlers", + "kalamdb-jobs", "kalamdb-observability", "kalamdb-pg", "kalamdb-publisher", @@ -4098,6 +4105,67 @@ dependencies = [ "tracing", ] +[[package]] +name = "kalamdb-handlers" +version = "0.4.2-dev" +dependencies = [ + "arrow", + "async-trait", + "chrono", + "dashmap 6.1.0", + "datafusion", + "kalamdb-auth", + "kalamdb-commons", + "kalamdb-configs", + "kalamdb-core", + "kalamdb-filestore", + "kalamdb-jobs", + "kalamdb-publisher", + "kalamdb-raft", + "kalamdb-session", + "kalamdb-sql", + "kalamdb-store", + "kalamdb-system", + "kalamdb-tables", + "kalamdb-vector", + "kalamdb-views", + "log", + "serde", + "serde_json", + "sqlparser", + "thiserror 2.0.18", + "tokio", + "tracing", + "uuid", + "zip", +] + +[[package]] +name = "kalamdb-jobs" +version = "0.4.2-dev" +dependencies = [ + "async-trait", + "chrono", + "datafusion", + "kalamdb-commons", + "kalamdb-core", + "kalamdb-observability", + "kalamdb-raft", + "kalamdb-sharding", + "kalamdb-store", + "kalamdb-system", + "kalamdb-tables", + "log", + "parking_lot", + "serde", + "serde_json", + "tempfile", + "tokio", + "tracing", + "uuid", + "zip", +] + [[package]] name = "kalamdb-macros" version = "0.4.2-dev" @@ -4227,6 +4295,8 @@ dependencies = [ "kalamdb-configs", "kalamdb-core", "kalamdb-dba", + "kalamdb-handlers", + "kalamdb-jobs", "kalamdb-raft", "kalamdb-sharding", "kalamdb-sql", diff --git a/Cargo.toml b/Cargo.toml index 82d0b2335..122bc6504 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,6 +20,8 @@ members = [ "backend/crates/kalamdb-observability", "backend/crates/kalamdb-vector", "backend/crates/kalamdb-core", + "backend/crates/kalamdb-handlers", + "backend/crates/kalamdb-jobs", "backend/crates/kalamdb-publisher", "backend/crates/kalamdb-sql", "backend/crates/kalamdb-store", diff --git a/backend/Cargo.toml b/backend/Cargo.toml index af4c694a5..fb3e0da76 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -14,6 +14,8 @@ mimalloc = { version = "0.1", default-features = false, optional = true } # Core library kalamdb-core = { path = "crates/kalamdb-core" } +kalamdb-handlers = { path = "crates/kalamdb-handlers" } +kalamdb-jobs = { path = "crates/kalamdb-jobs" } kalamdb-api = { path = "crates/kalamdb-api", default-features = false } kalamdb-store = { path = "crates/kalamdb-store" } kalamdb-auth = { path = "crates/kalamdb-auth" } diff --git a/backend/crates/kalamdb-api/Cargo.toml b/backend/crates/kalamdb-api/Cargo.toml index 14c220393..3343c8482 100644 --- a/backend/crates/kalamdb-api/Cargo.toml +++ b/backend/crates/kalamdb-api/Cargo.toml @@ -13,6 +13,7 @@ build = "../../build.rs" kalamdb-commons = { path = "../kalamdb-commons" } kalamdb-configs = { path = "../kalamdb-configs" } kalamdb-core = { path = "../kalamdb-core" } +kalamdb-jobs = { path = "../kalamdb-jobs" } kalamdb-raft = { path = "../kalamdb-raft" } kalamdb-sql = { path = "../kalamdb-sql" } kalamdb-auth = { path = "../kalamdb-auth" } @@ -32,6 +33,7 @@ actix-multipart = { workspace = true } # Serialization serde = { workspace = true } serde_json = { workspace = true } +rmp-serde = { workspace = true } # Bytes diff --git a/backend/crates/kalamdb-api/src/handlers/ws/events/auth.rs b/backend/crates/kalamdb-api/src/handlers/ws/events/auth.rs index 7f05c981a..c641f2b09 100644 --- a/backend/crates/kalamdb-api/src/handlers/ws/events/auth.rs +++ b/backend/crates/kalamdb-api/src/handlers/ws/events/auth.rs @@ -1,16 +1,17 @@ -//! WebSocket authentication handler +//! WebSocket authentication service //! -//! Handles the Authenticate message for WebSocket connections. -//! Uses the unified authentication module from kalamdb-auth. +//! Provides a single code path for completing WebSocket authentication +//! regardless of how the user was validated (header-auth fast path or +//! explicit Authenticate message). //! //! Only JWT token authentication is accepted for WebSocket connections. //! This keeps username/password auth limited to the login endpoint. use actix_ws::Session; use kalamdb_auth::{authenticate, extract_username_for_audit, AuthRequest, UserRepository}; -use kalamdb_commons::models::ConnectionInfo; -use kalamdb_commons::websocket::WsAuthCredentials; -use kalamdb_commons::WebSocketMessage; +use kalamdb_commons::models::{ConnectionInfo, UserId}; +use kalamdb_commons::websocket::{ProtocolOptions, WsAuthCredentials}; +use kalamdb_commons::{Role, WebSocketMessage}; use kalamdb_core::app_context::AppContext; use kalamdb_core::live::{ConnectionsManager, SharedConnectionState}; use log::debug; @@ -32,11 +33,13 @@ pub async fn handle_authenticate( connection_state: &SharedConnectionState, client_ip: &ConnectionInfo, credentials: WsAuthCredentials, + protocol: ProtocolOptions, session: &mut Session, registry: &Arc, _app_context: &Arc, rate_limiter: &Arc, user_repo: &Arc, + compression: bool, ) -> Result<(), String> { // SECURITY: Rate limit auth attempts per IP to prevent brute-force via WebSocket. // This mirrors the rate limiting applied to the HTTP login endpoint. @@ -58,21 +61,88 @@ pub async fn handle_authenticate( connection_state, client_ip, auth_request, + protocol, session, registry, user_repo, + compression, ) .await } +/// Complete WebSocket authentication after a user has been validated. +/// +/// This is the single source of truth for post-validation auth steps. +/// Called from both the header-auth fast path (handler.rs) and the +/// message-auth path (authenticate_with_request). Consolidates: +/// - Marking the connection as authenticated +/// - Notifying the registry +/// - Setting the negotiated protocol +/// - Sending the AuthSuccess response +pub async fn complete_ws_auth( + connection_state: &SharedConnectionState, + registry: &Arc, + user_id: UserId, + role: Role, + protocol: ProtocolOptions, + session: &mut Session, + compression: bool, +) -> Result<(), String> { + let connection_id = connection_state.connection_id().clone(); + + connection_state.mark_authenticated(user_id.clone(), role); + registry.on_authenticated(&connection_id, user_id.clone()); + connection_state.set_protocol(protocol); + + let msg = WebSocketMessage::AuthSuccess { + user_id: user_id.clone(), + role: format!("{:?}", role), + protocol, + }; + let _ = send_json(session, &msg, compression).await; + + debug!( + "WebSocket authenticated: {} as {} ({:?})", + connection_id, + user_id.as_str(), + role + ); + + Ok(()) +} + +/// Send an AuthSuccess response for an already-authenticated connection. +/// +/// Used when a client sends an Authenticate message on a connection that +/// was already authenticated (e.g. header-auth). Idempotent — simply echoes +/// the current auth state without re-validating. +pub async fn send_current_auth_success( + connection_state: &SharedConnectionState, + session: &mut Session, + compression: bool, +) -> Result<(), String> { + if let (Some(user_id), Some(role)) = (connection_state.user_id(), connection_state.user_role()) + { + let msg = WebSocketMessage::AuthSuccess { + user_id: user_id.clone(), + role: format!("{:?}", role), + protocol: connection_state.protocol(), + }; + let _ = send_json(session, &msg, compression).await; + } + Ok(()) +} + /// Internal function that handles authentication for any AuthRequest type async fn authenticate_with_request( connection_state: &SharedConnectionState, connection_info: &ConnectionInfo, auth_request: AuthRequest, + protocol: ProtocolOptions, session: &mut Session, registry: &Arc, user_repo: &Arc, + compression: bool, ) -> Result<(), String> { let connection_id = connection_state.connection_id().clone(); @@ -104,23 +174,16 @@ async fn authenticate_with_request( tracing::Span::current().record("user_id", auth_result.user_id.as_str()); tracing::Span::current().record("role", format!("{:?}", auth_result.role).as_str()); - connection_state.mark_authenticated(auth_result.user_id.clone(), auth_result.role); - registry.on_authenticated(&connection_id, auth_result.user_id.clone()); - - let msg = WebSocketMessage::AuthSuccess { - user_id: auth_result.user_id.clone(), - role: format!("{:?}", auth_result.role), - }; - let _ = send_json(session, &msg, true).await; - - debug!( - "WebSocket authenticated: {} as {} ({:?})", - connection_id, - auth_result.user_id.as_str(), - auth_result.role - ); - - Ok(()) + complete_ws_auth( + connection_state, + registry, + auth_result.user_id, + auth_result.role, + protocol, + session, + compression, + ) + .await } .instrument(auth_span) .await diff --git a/backend/crates/kalamdb-api/src/handlers/ws/events/batch.rs b/backend/crates/kalamdb-api/src/handlers/ws/events/batch.rs index e0618219f..8eedfbbf4 100644 --- a/backend/crates/kalamdb-api/src/handlers/ws/events/batch.rs +++ b/backend/crates/kalamdb-api/src/handlers/ws/events/batch.rs @@ -14,7 +14,7 @@ use tracing::debug; use crate::handlers::ws::models::WsErrorCode; -use super::{send_error, send_json}; +use super::{send_error, send_message}; /// Handle next batch request /// @@ -82,7 +82,8 @@ pub async fn handle_next_batch( rows_json, batch_control, ); - let _ = send_json(session, &msg, compression_enabled).await; + let ser = connection_state.serialization_type(); + let _ = send_message(session, &msg, ser, compression_enabled).await; if !result.has_more { let flushed = connection_state.complete_initial_load(subscription_id); diff --git a/backend/crates/kalamdb-api/src/handlers/ws/events/mod.rs b/backend/crates/kalamdb-api/src/handlers/ws/events/mod.rs index d3398741f..788d8730f 100644 --- a/backend/crates/kalamdb-api/src/handlers/ws/events/mod.rs +++ b/backend/crates/kalamdb-api/src/handlers/ws/events/mod.rs @@ -10,6 +10,7 @@ //! eliminating the need to pass connection_id as a separate parameter. //! //! Messages are compressed with gzip when they exceed 512 bytes. +//! When the client negotiates MessagePack, payloads are sent as binary frames. pub mod auth; pub mod batch; @@ -18,6 +19,7 @@ pub mod subscription; pub mod unsubscribe; use actix_ws::{CloseCode, CloseReason, Session}; +use kalamdb_commons::websocket::SerializationType; use kalamdb_commons::WebSocketMessage; use crate::compression::{is_gzip, maybe_compress}; @@ -68,6 +70,29 @@ pub async fn send_json( } } +/// Protocol-aware message sender. +/// +/// Serializes `msg` using the connection's negotiated serialization type: +/// - **Json**: serialized to JSON text, optionally gzip-compressed. +/// - **MessagePack**: serialized to msgpack binary, optionally gzip-compressed. +/// +/// The `compress` flag respects the `?compress=false` query-parameter override +/// and the negotiated `CompressionType`. +pub async fn send_message( + session: &mut Session, + msg: &T, + serialization: SerializationType, + compress: bool, +) -> Result<(), ()> { + match serialization { + SerializationType::Json => send_json(session, msg, compress).await, + SerializationType::MessagePack => { + let bytes = rmp_serde::to_vec_named(msg).map_err(|_| ())?; + send_data_binary(session, &bytes, compress).await + }, + } +} + /// Send raw data with optional compression. /// /// When `compress` is `true`, messages over 512 bytes are gzip compressed and @@ -91,3 +116,19 @@ async fn send_data(session: &mut Session, data: &[u8], compress: bool) -> Result session.text(text.into_owned()).await.map_err(|_| ()) } } + +/// Send binary data (msgpack or already-binary) with optional gzip compression. +/// +/// Always sends as a binary WebSocket frame (never text). +async fn send_data_binary( + session: &mut Session, + data: &[u8], + compress: bool, +) -> Result<(), ()> { + if !compress { + return session.binary(data.to_vec()).await.map_err(|_| ()); + } + + let (payload, _compressed) = maybe_compress(data); + session.binary(payload).await.map_err(|_| ()) +} diff --git a/backend/crates/kalamdb-api/src/handlers/ws/events/subscription.rs b/backend/crates/kalamdb-api/src/handlers/ws/events/subscription.rs index 26b3b69ef..680d98b7b 100644 --- a/backend/crates/kalamdb-api/src/handlers/ws/events/subscription.rs +++ b/backend/crates/kalamdb-api/src/handlers/ws/events/subscription.rs @@ -13,7 +13,7 @@ use std::sync::Arc; use crate::handlers::ws::models::WsErrorCode; use crate::limiter::RateLimiter; -use super::{send_error, send_json}; +use super::{send_error, send_message}; /// Handle subscription request /// @@ -121,7 +121,8 @@ pub async fn handle_subscribe( batch_control.clone(), result.schema.clone(), ); - let _ = send_json(session, &ack, compression_enabled).await; + let ser = connection_state.serialization_type(); + let _ = send_message(session, &ack, ser, compression_enabled).await; if let Some(initial) = result.initial_data { // Convert Row objects to HashMap (always using simple JSON format) @@ -165,7 +166,7 @@ pub async fn handle_subscribe( rows_json, batch_control, ); - let _ = send_json(session, &batch_msg, compression_enabled).await; + let _ = send_message(session, &batch_msg, ser, compression_enabled).await; if !initial.has_more { let flushed = connection_state.complete_initial_load(&subscription_id); diff --git a/backend/crates/kalamdb-api/src/handlers/ws/handler.rs b/backend/crates/kalamdb-api/src/handlers/ws/handler.rs index df50bc7c7..a8143f907 100644 --- a/backend/crates/kalamdb-api/src/handlers/ws/handler.rs +++ b/backend/crates/kalamdb-api/src/handlers/ws/handler.rs @@ -14,12 +14,12 @@ use actix_web::{get, web, Error, HttpRequest, HttpResponse}; use actix_ws::{CloseCode, CloseReason, Message, ProtocolError, Session}; use futures_util::StreamExt; -use kalamdb_auth::UserRepository; -use kalamdb_commons::models::ConnectionInfo; -use kalamdb_commons::websocket::ClientMessage; -use kalamdb_commons::WebSocketMessage; +use kalamdb_auth::{authenticate, AuthRequest, UserRepository}; +use kalamdb_commons::models::{ConnectionInfo, UserId}; +use kalamdb_commons::websocket::{ClientMessage, CompressionType, ProtocolOptions, SerializationType}; +use kalamdb_commons::{Role, WebSocketMessage}; use kalamdb_core::app_context::AppContext; -use kalamdb_core::jobs::health_monitor::{ +use kalamdb_jobs::health_monitor::{ decrement_websocket_sessions, increment_websocket_sessions, }; use kalamdb_core::live::{ @@ -28,6 +28,39 @@ use kalamdb_core::live::{ use log::{debug, error, info, warn}; use std::sync::Arc; +/// Pre-authenticated connection information extracted from the HTTP upgrade +/// request's `Authorization: Bearer` header. +struct UpgradeAuth { + user_id: UserId, + role: Role, + protocol: ProtocolOptions, +} + +/// Parse protocol options from the WebSocket upgrade query string. +/// +/// Supports: `serialization=msgpack` (default json), `compression=none` (default gzip). +fn parse_protocol_from_query(query: &str) -> ProtocolOptions { + let mut protocol = ProtocolOptions::default(); + for kv in query.split('&') { + if let Some((key, value)) = kv.split_once('=') { + match key { + "serialization" => { + if value.eq_ignore_ascii_case("msgpack") { + protocol.serialization = SerializationType::MessagePack; + } + }, + "compression" => { + if value.eq_ignore_ascii_case("none") { + protocol.compression = CompressionType::None; + } + }, + _ => {}, + } + } + } + protocol +} + /// Returns `true` for WebSocket errors that represent a normal client /// disconnect rather than a server-side fault. /// @@ -64,8 +97,9 @@ fn is_expected_ws_disconnect(e: &ProtocolError) -> bool { } use super::events::{ - auth::handle_authenticate, batch::handle_next_batch, cleanup::cleanup_connection, send_error, - send_json, subscription::handle_subscribe, unsubscribe::handle_unsubscribe, + auth::{complete_ws_auth, handle_authenticate, send_current_auth_success}, + batch::handle_next_batch, cleanup::cleanup_connection, send_error, + send_json, send_message, subscription::handle_subscribe, unsubscribe::handle_unsubscribe, }; use crate::handlers::ws::models::WsErrorCode; use crate::limiter::RateLimiter; @@ -123,6 +157,39 @@ pub async fn websocket_handler( .split('&') .any(|kv| kv.eq_ignore_ascii_case("compress=false")); + // --- Header-auth fast path: validate JWT from upgrade request headers --- + // If the Authorization header carries a valid Bearer token, authenticate + // during the HTTP upgrade so the client can skip the explicit Authenticate + // message round-trip. Invalid tokens are rejected immediately with 401. + let pre_auth = if let Some(auth_header) = req.headers().get("Authorization") { + if let Ok(auth_str) = auth_header.to_str() { + if let Some(token) = auth_str.strip_prefix("Bearer ") { + let client_ip_for_auth = kalamdb_auth::extract_client_ip_secure(&req); + let auth_request = AuthRequest::Jwt { token: token.to_string() }; + match authenticate(auth_request, &client_ip_for_auth, user_repo.get_ref()).await { + Ok(result) => { + let protocol = parse_protocol_from_query(req.query_string()); + Some(UpgradeAuth { + user_id: result.user.user_id, + role: result.user.role, + protocol, + }) + }, + Err(_) => { + warn!("WebSocket upgrade rejected: invalid Bearer token"); + return Ok(HttpResponse::Unauthorized().body("Invalid token")); + }, + } + } else { + None + } + } else { + None + } + } else { + None + }; + // Generate unique connection ID let connection_id = ConnectionId::new(uuid::Uuid::new_v4().simple().to_string()); @@ -136,7 +203,11 @@ pub async fn websocket_handler( ) .entered(); - debug!("New WebSocket connection: {} (auth required within 3s)", connection_id); + debug!( + "New WebSocket connection: {} (pre_auth={})", + connection_id, + if pre_auth.is_some() { "header" } else { "pending" } + ); // Register connection with unified registry (handles heartbeat tracking) let registration = @@ -175,6 +246,7 @@ pub async fn websocket_handler( user_repository, max_message_size, compression_enabled, + pre_auth, ) .await; @@ -201,6 +273,7 @@ async fn handle_websocket( user_repo: Arc, max_message_size: usize, compression_enabled: bool, + pre_auth: Option, ) { let mut event_rx = registration.event_rx; let mut notification_rx = registration.notification_rx; @@ -212,6 +285,23 @@ async fn handle_websocket( let mut session = session; let mut msg_stream = msg_stream; + // Header-auth fast path: if JWT was validated during HTTP upgrade, + // mark authenticated immediately and send AuthSuccess as the first frame. + if let Some(auth) = pre_auth { + connection_state.mark_auth_started(); + let _ = complete_ws_auth( + &connection_state, + ®istry, + auth.user_id, + auth.role, + auth.protocol, + &mut session, + compression_enabled, + ) + .await; + debug!("WebSocket pre-authenticated from header: {}", connection_id); + } + loop { tokio::select! { biased; @@ -307,9 +397,66 @@ async fn handle_websocket( break; } } - Some(Ok(Message::Binary(_))) => { - warn!("Binary messages not supported: {}", connection_id); - let _ = send_error(&mut session, "protocol", WsErrorCode::UnsupportedData, "Binary not supported", compression_enabled).await; + Some(Ok(Message::Binary(data))) => { + connection_state.update_heartbeat(); + + // After msgpack negotiation, binary frames carry msgpack payloads. + // With JSON negotiation, binary frames are gzip-compressed JSON (existing behaviour). + let serialization = connection_state.serialization_type(); + + match serialization { + SerializationType::MessagePack => { + // Decompress if gzip, then deserialize msgpack + let raw = if data.len() >= 2 && data[0] == 0x1f && data[1] == 0x8b { + use flate2::read::GzDecoder; + use std::io::Read; + let mut decoder = GzDecoder::new(&data[..]); + let mut buf = Vec::new(); + if decoder.read_to_end(&mut buf).is_err() { + warn!("Failed to decompress binary msgpack from {}", connection_id); + continue; + } + buf + } else { + data.to_vec() + }; + match rmp_serde::from_slice::(&raw) { + Ok(client_msg) => { + if let Err(e) = handle_client_message( + client_msg, + &connection_state, + &client_ip, + &mut session, + ®istry, + &app_context, + &rate_limiter, + &live_query_manager, + &user_repo, + compression_enabled, + ).await { + error!("Error handling msgpack message: {}", e); + let _ = session.close(Some(CloseReason { + code: CloseCode::Error, + description: Some(e), + })).await; + break; + } + }, + Err(e) => { + warn!("Invalid msgpack from {}: {}", connection_id, e); + let _ = send_error(&mut session, "protocol", + WsErrorCode::UnsupportedData, + &format!("Invalid MessagePack: {}", e), + compression_enabled).await; + }, + } + }, + SerializationType::Json => { + // Legacy: binary = gzip-compressed JSON + warn!("Binary messages not supported for JSON protocol: {}", connection_id); + let _ = send_error(&mut session, "protocol", WsErrorCode::UnsupportedData, "Binary not supported", compression_enabled).await; + }, + } } Some(Ok(Message::Close(reason))) => { debug!("Client requested close: {:?}", reason); @@ -339,8 +486,9 @@ async fn handle_websocket( notification = notification_rx.recv() => { match notification { Some(notif) => { - // Use send_json for automatic compression of large payloads - if send_json(&mut session, notif.as_ref(), compression_enabled).await.is_err() { + // Use protocol-aware send for automatic format selection + let ser = connection_state.serialization_type(); + if send_message(&mut session, notif.as_ref(), ser, compression_enabled).await.is_err() { break; } } @@ -377,18 +525,57 @@ async fn handle_text_message( let msg: ClientMessage = serde_json::from_str(text).map_err(|e| format!("Invalid message: {}", e))?; + handle_client_message( + msg, + connection_state, + client_ip, + session, + registry, + app_context, + rate_limiter, + live_query_manager, + user_repo, + compression_enabled, + ) + .await +} + +/// Dispatch a parsed `ClientMessage` to the appropriate handler. +#[allow(clippy::too_many_arguments)] +async fn handle_client_message( + msg: ClientMessage, + connection_state: &SharedConnectionState, + client_ip: &ConnectionInfo, + session: &mut Session, + registry: &Arc, + app_context: &Arc, + rate_limiter: &Arc, + live_query_manager: &Arc, + user_repo: &Arc, + compression_enabled: bool, +) -> Result<(), String> { match msg { - ClientMessage::Authenticate { credentials } => { + ClientMessage::Authenticate { + credentials, + protocol, + } => { + // If already authenticated (header-auth), just re-send AuthSuccess. + if connection_state.is_authenticated() { + return send_current_auth_success(connection_state, session, compression_enabled) + .await; + } connection_state.mark_auth_started(); handle_authenticate( connection_state, client_ip, credentials, + protocol, session, registry, app_context, rate_limiter, user_repo, + compression_enabled, ) .await }, @@ -445,3 +632,57 @@ async fn handle_text_message( }, } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_protocol_defaults_when_empty() { + let proto = parse_protocol_from_query(""); + assert_eq!(proto.serialization, SerializationType::Json); + assert_eq!(proto.compression, CompressionType::Gzip); + } + + #[test] + fn parse_protocol_msgpack_serialization() { + let proto = parse_protocol_from_query("serialization=msgpack"); + assert_eq!(proto.serialization, SerializationType::MessagePack); + assert_eq!(proto.compression, CompressionType::Gzip); + } + + #[test] + fn parse_protocol_compression_none() { + let proto = parse_protocol_from_query("compression=none"); + assert_eq!(proto.serialization, SerializationType::Json); + assert_eq!(proto.compression, CompressionType::None); + } + + #[test] + fn parse_protocol_both_options() { + let proto = parse_protocol_from_query("serialization=msgpack&compression=none"); + assert_eq!(proto.serialization, SerializationType::MessagePack); + assert_eq!(proto.compression, CompressionType::None); + } + + #[test] + fn parse_protocol_mixed_with_compress_false() { + let proto = parse_protocol_from_query("compress=false&serialization=msgpack"); + assert_eq!(proto.serialization, SerializationType::MessagePack); + assert_eq!(proto.compression, CompressionType::Gzip); + } + + #[test] + fn parse_protocol_case_insensitive() { + let proto = parse_protocol_from_query("serialization=MSGPACK&compression=NONE"); + assert_eq!(proto.serialization, SerializationType::MessagePack); + assert_eq!(proto.compression, CompressionType::None); + } + + #[test] + fn parse_protocol_unknown_values_keep_defaults() { + let proto = parse_protocol_from_query("serialization=avro&compression=lz4"); + assert_eq!(proto.serialization, SerializationType::Json); + assert_eq!(proto.compression, CompressionType::Gzip); + } +} diff --git a/backend/crates/kalamdb-commons/Cargo.toml b/backend/crates/kalamdb-commons/Cargo.toml index c9008e95f..140a21884 100644 --- a/backend/crates/kalamdb-commons/Cargo.toml +++ b/backend/crates/kalamdb-commons/Cargo.toml @@ -29,6 +29,7 @@ flexbuffers = { workspace = true, optional = true } chrono = { workspace = true, optional = true } arrow = { workspace = true, optional = true } uuid = { workspace = true, optional = true } +rmp-serde = { workspace = true, optional = true } nanoid = { workspace = true, optional = true } log = { workspace = true, optional = true } tracing = { workspace = true } @@ -71,6 +72,7 @@ arrow-utils = [ "rows", ] websocket-auth = ["serde"] +msgpack = ["serde", "dep:rmp-serde"] # Full feature set including Arrow/DataFusion-backed utilities (for server use) full = [ "serde", @@ -85,9 +87,10 @@ full = [ "dep:log", "websocket-auth", "dep:datafusion-common", + "msgpack", ] # WASM-compatible subset (no datafusion but has serialization) -wasm = ["serde"] +wasm = ["serde", "msgpack"] default = ["full"] [lib] diff --git a/backend/crates/kalamdb-commons/src/websocket.rs b/backend/crates/kalamdb-commons/src/websocket.rs index 2e7ed1698..314cc0347 100644 --- a/backend/crates/kalamdb-commons/src/websocket.rs +++ b/backend/crates/kalamdb-commons/src/websocket.rs @@ -107,6 +107,56 @@ use datafusion_common::ScalarValue; use serde::{Deserialize, Serialize}; use std::collections::{BTreeMap, HashMap}; +/// Wire-format serialization type negotiated during authentication. +/// +/// The client sends its preferred serialization in the `Authenticate` message. +/// After a successful auth response (always JSON), all subsequent frames use +/// the negotiated format. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum SerializationType { + /// JSON text frames (default, backward-compatible). + #[default] + Json, + /// MessagePack binary frames — compact and fast with the same Serde model. + #[serde(rename = "msgpack")] + MessagePack, +} + +/// Wire-format compression negotiated during authentication. +/// +/// Applied independently of serialization type. Large payloads may still +/// benefit from gzip even when using MessagePack. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum CompressionType { + /// No compression. + None, + /// Gzip compression for payloads above the server threshold (default). + #[default] + Gzip, +} + +/// Protocol options negotiated once per connection during authentication. +/// +/// Always sent in `ClientMessage::Authenticate` and echoed in `AuthSuccess`. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub struct ProtocolOptions { + /// Serialization format for messages after auth. + pub serialization: SerializationType, + /// Compression policy. + pub compression: CompressionType, +} + +impl Default for ProtocolOptions { + fn default() -> Self { + Self { + serialization: SerializationType::Json, + compression: CompressionType::Gzip, + } + } +} + /// Type alias for row data in WebSocket messages (column_name -> cell value) pub type RowData = HashMap; @@ -124,11 +174,15 @@ pub enum WebSocketMessage { /// /// Sent after client sends Authenticate message with valid credentials. /// Client can now send Subscribe/Unsubscribe messages. + /// Always sent as JSON text, even when msgpack was negotiated; the + /// negotiated protocol takes effect for all *subsequent* frames. AuthSuccess { /// Authenticated user ID user_id: UserId, /// User role role: String, + /// Negotiated protocol echoed back to the client. + protocol: ProtocolOptions, }, /// Authentication failed response @@ -187,10 +241,13 @@ pub enum ClientMessage { /// Server responds with AuthSuccess or AuthError. /// /// Supports token-based authentication via the `credentials` field. + /// Optionally negotiates wire format via `protocol`. Authenticate { /// Authentication credentials (jwt or future token-based methods) #[serde(flatten)] credentials: WsAuthCredentials, + /// Protocol negotiation (serialization + compression). + protocol: ProtocolOptions, }, /// Subscribe to live query updates @@ -802,4 +859,164 @@ mod tests { assert!(json.contains("error")); assert!(json.contains("INVALID_QUERY")); } + + #[test] + fn test_serialization_type_default() { + assert_eq!(SerializationType::default(), SerializationType::Json); + } + + #[test] + fn test_compression_type_default() { + assert_eq!(CompressionType::default(), CompressionType::Gzip); + } + + #[test] + fn test_protocol_options_default() { + let opts = ProtocolOptions::default(); + assert_eq!(opts.serialization, SerializationType::Json); + assert_eq!(opts.compression, CompressionType::Gzip); + } + + #[test] + fn test_protocol_options_json_roundtrip() { + let opts = ProtocolOptions { + serialization: SerializationType::MessagePack, + compression: CompressionType::None, + }; + let json = serde_json::to_string(&opts).unwrap(); + assert!(json.contains("\"msgpack\"")); + assert!(json.contains("\"none\"")); + let parsed: ProtocolOptions = serde_json::from_str(&json).unwrap(); + assert_eq!(parsed.serialization, SerializationType::MessagePack); + assert_eq!(parsed.compression, CompressionType::None); + } + + #[test] + fn test_protocol_options_default_omitted() { + // Default protocol should serialize cleanly and round-trip + let opts = ProtocolOptions::default(); + let json = serde_json::to_string(&opts).unwrap(); + let parsed: ProtocolOptions = serde_json::from_str(&json).unwrap(); + assert_eq!(parsed, opts); + } + + #[test] + fn test_authenticate_with_protocol() { + let msg = ClientMessage::Authenticate { + credentials: crate::websocket::WsAuthCredentials::Jwt { + token: "test_token".to_string(), + }, + protocol: ProtocolOptions { + serialization: SerializationType::MessagePack, + compression: CompressionType::Gzip, + }, + }; + let json = serde_json::to_string(&msg).unwrap(); + assert!(json.contains("\"protocol\"")); + assert!(json.contains("\"msgpack\"")); + let parsed: ClientMessage = serde_json::from_str(&json).unwrap(); + match parsed { + ClientMessage::Authenticate { protocol, .. } => { + assert_eq!(protocol.serialization, SerializationType::MessagePack); + }, + _ => panic!("Expected Authenticate"), + } + } + + #[test] + fn test_authenticate_default_protocol() { + let msg = ClientMessage::Authenticate { + credentials: crate::websocket::WsAuthCredentials::Jwt { + token: "test_token".to_string(), + }, + protocol: ProtocolOptions::default(), + }; + let json = serde_json::to_string(&msg).unwrap(); + assert!(json.contains("\"protocol\"")); + let parsed: ClientMessage = serde_json::from_str(&json).unwrap(); + match parsed { + ClientMessage::Authenticate { protocol, .. } => { + assert_eq!(protocol.serialization, SerializationType::Json); + assert_eq!(protocol.compression, CompressionType::Gzip); + }, + _ => panic!("Expected Authenticate"), + } + } + + #[test] + fn test_auth_success_with_protocol() { + let msg = WebSocketMessage::AuthSuccess { + user_id: UserId::from("user-1"), + role: "admin".to_string(), + protocol: ProtocolOptions { + serialization: SerializationType::MessagePack, + compression: CompressionType::Gzip, + }, + }; + let json = serde_json::to_string(&msg).unwrap(); + assert!(json.contains("\"protocol\"")); + let parsed: WebSocketMessage = serde_json::from_str(&json).unwrap(); + match parsed { + WebSocketMessage::AuthSuccess { protocol, .. } => { + assert_eq!(protocol.serialization, SerializationType::MessagePack); + }, + _ => panic!("Expected AuthSuccess"), + } + } + + #[cfg(feature = "msgpack")] + #[test] + fn test_client_message_msgpack_roundtrip() { + let msg = ClientMessage::subscribe(SubscriptionRequest { + id: "sub-1".to_string(), + sql: "SELECT * FROM test".to_string(), + options: SubscriptionOptions::default(), + }); + let bytes = rmp_serde::to_vec_named(&msg).unwrap(); + let parsed: ClientMessage = rmp_serde::from_slice(&bytes).unwrap(); + match parsed { + ClientMessage::Subscribe { subscription } => { + assert_eq!(subscription.id, "sub-1"); + assert_eq!(subscription.sql, "SELECT * FROM test"); + }, + _ => panic!("Expected Subscribe"), + } + } + + #[cfg(feature = "msgpack")] + #[test] + fn test_websocket_message_msgpack_roundtrip() { + let msg = WebSocketMessage::AuthSuccess { + user_id: UserId::from("user-1"), + role: "admin".to_string(), + protocol: ProtocolOptions { + serialization: SerializationType::MessagePack, + compression: CompressionType::None, + }, + }; + let bytes = rmp_serde::to_vec_named(&msg).unwrap(); + let parsed: WebSocketMessage = rmp_serde::from_slice(&bytes).unwrap(); + match parsed { + WebSocketMessage::AuthSuccess { user_id, role, protocol } => { + assert_eq!(user_id, UserId::from("user-1")); + assert_eq!(role, "admin"); + assert_eq!(protocol.serialization, SerializationType::MessagePack); + }, + _ => panic!("Expected AuthSuccess"), + } + } + + #[cfg(feature = "msgpack")] + #[test] + fn test_notification_msgpack_roundtrip() { + let notification = Notification::insert("sub-1".to_string(), vec![]); + let bytes = rmp_serde::to_vec_named(¬ification).unwrap(); + let parsed: Notification = rmp_serde::from_slice(&bytes).unwrap(); + match parsed { + Notification::Change { subscription_id, .. } => { + assert_eq!(subscription_id, "sub-1"); + } + _ => panic!("Expected Change"), + } + } } diff --git a/backend/crates/kalamdb-core/Cargo.toml b/backend/crates/kalamdb-core/Cargo.toml index eb4a617bf..36ab2c91a 100644 --- a/backend/crates/kalamdb-core/Cargo.toml +++ b/backend/crates/kalamdb-core/Cargo.toml @@ -90,10 +90,14 @@ tempfile = { workspace = true } tokio = { workspace = true, features = ["test-util"] } parquet = { workspace = true } ntest = { workspace = true } +kalamdb-handlers = { path = "../kalamdb-handlers" } +kalamdb-jobs = { path = "../kalamdb-jobs" } [features] # Feature to run expensive/slow tests (disabled by default) expensive_tests = [] +# Expose test helpers to other crates' test code (dev-dependencies) +test-helpers = [] [lib] doctest = false diff --git a/backend/crates/kalamdb-core/src/app_context.rs b/backend/crates/kalamdb-core/src/app_context.rs index c7e9cffbd..e43725f83 100644 --- a/backend/crates/kalamdb-core/src/app_context.rs +++ b/backend/crates/kalamdb-core/src/app_context.rs @@ -5,11 +5,7 @@ use crate::applier::UnifiedApplier; use crate::error_extensions::KalamDbResultExt; -use crate::jobs::executors::{ - BackupExecutor, CleanupExecutor, CompactExecutor, FlushExecutor, JobCleanupExecutor, - JobRegistry, RestoreExecutor, RetentionExecutor, StreamEvictionExecutor, TopicCleanupExecutor, - TopicRetentionExecutor, UserCleanupExecutor, UserExportExecutor, VectorIndexExecutor, -}; +use crate::job_waker::JobWaker; use crate::live::notification::NotificationService; use crate::live::ConnectionsManager; use crate::live::TopicPublisherService; @@ -70,7 +66,11 @@ pub struct AppContext { storage_backend: Arc, // ===== Managers ===== - job_manager: OnceCell>, + /// Type-erased job manager (concrete type is `kalamdb_jobs::JobsManager`). + /// Consumers downcast via the `kalamdb_jobs::AppContextJobsExt` trait. + job_manager: OnceCell>, + /// Lightweight waker so the Raft applier can notify the job loop. + job_waker: OnceCell>, live_query_manager: OnceCell>, // ===== Notification Service ===== @@ -128,7 +128,8 @@ impl std::fmt::Debug for AppContext { .field("user_table_store", &"Arc") .field("shared_table_store", &"Arc") .field("storage_backend", &"Arc") - .field("job_manager", &"OnceCell>") + .field("job_manager", &"OnceCell>") + .field("job_waker", &"OnceCell>") .field("live_query_manager", &"OnceCell>") .field("connection_registry", &"Arc") .field("storage_registry", &"Arc") @@ -299,25 +300,10 @@ impl AppContext { // Note: information_schema.tables and information_schema.columns are provided // by DataFusion's built-in support (enabled via .with_information_schema(true)) - // Create job registry and register all 8 executors (Phase 9, T154) - let job_registry = Arc::new(JobRegistry::new()); - job_registry.register(Arc::new(FlushExecutor::new())); - job_registry.register(Arc::new(CleanupExecutor::new())); - job_registry.register(Arc::new(JobCleanupExecutor::new())); - job_registry.register(Arc::new(RetentionExecutor::new())); - job_registry.register(Arc::new(StreamEvictionExecutor::new())); - job_registry.register(Arc::new(UserCleanupExecutor::new())); - job_registry.register(Arc::new(CompactExecutor::new())); - job_registry.register(Arc::new(BackupExecutor::new())); - job_registry.register(Arc::new(RestoreExecutor::new())); - job_registry.register(Arc::new(TopicRetentionExecutor::new())); - job_registry.register(Arc::new(TopicCleanupExecutor::new())); - job_registry.register(Arc::new(UserExportExecutor::new())); - job_registry.register(Arc::new(VectorIndexExecutor::new())); - - // Create unified job manager (Phase 9, T154) - let jobs_provider = system_tables.jobs(); - let job_nodes_provider = system_tables.job_nodes(); + // Create job registry and register all 13 executors (Phase 9, T154) + // Moved to kalamdb-jobs — callers set job_manager via set_job_manager() + let job_registry_placeholder: Option<()> = None; + let _ = job_registry_placeholder; // Create connections manager (unified WebSocket connection management) // Timeouts from config or defaults @@ -428,6 +414,7 @@ impl AppContext { shared_table_store, storage_backend, job_manager: OnceCell::new(), + job_waker: OnceCell::new(), live_query_manager: OnceCell::new(), notification_service: Arc::clone(¬ification_service), connection_registry, @@ -513,15 +500,7 @@ impl AppContext { log::info!("Wired gRPC ClusterClient and CoreClusterHandler for cluster RPC"); } - let job_manager = Arc::new(crate::jobs::JobsManager::new( - jobs_provider, - job_nodes_provider, - job_registry, - Arc::clone(&app_ctx), - )); - if app_ctx.job_manager.set(job_manager).is_err() { - panic!("JobsManager already initialized"); - } + // Job manager is initialized externally by kalamdb-jobs (via set_job_manager) let applier = crate::applier::create_applier(Arc::clone(&app_ctx)); if app_ctx.applier.set(applier).is_err() { @@ -662,7 +641,7 @@ impl AppContext { /// let sys_cols = app_context.system_columns_service(); /// let (snowflake_id, updated_ns, deleted) = sys_cols.handle_insert(None).unwrap(); /// ``` - #[cfg(test)] + #[cfg(any(test, feature = "test-helpers"))] pub fn new_test() -> Arc { use kalamdb_store::test_utils::InMemoryBackend; @@ -698,10 +677,7 @@ impl AppContext { ); let base_session_context = Arc::new(session_factory.create_session()); - // Create minimal job manager registry - let job_registry = Arc::new(JobRegistry::new()); - let jobs_provider = system_tables.jobs(); - let job_nodes_provider = system_tables.job_nodes(); + // Job registry creation is deferred to kalamdb-jobs (set_job_manager) // Create test NodeId let node_id = Arc::new(NodeId::new(22)); @@ -765,6 +741,7 @@ impl AppContext { shared_table_store, storage_backend, job_manager: OnceCell::new(), + job_waker: OnceCell::new(), live_query_manager: OnceCell::new(), notification_service: Arc::clone(¬ification_service), connection_registry, @@ -797,15 +774,7 @@ impl AppContext { // Wire AppContext into notification service for leadership checks (tests) notification_service.set_app_context(Arc::downgrade(&app_ctx)); - let job_manager = Arc::new(crate::jobs::JobsManager::new( - jobs_provider, - job_nodes_provider, - job_registry, - Arc::clone(&app_ctx), - )); - if app_ctx.job_manager.set(job_manager).is_err() { - panic!("JobsManager already initialized"); - } + // Job manager is initialized externally by kalamdb-jobs (via set_job_manager) let applier = crate::applier::create_applier(Arc::clone(&app_ctx)); if app_ctx.applier.set(applier).is_err() { @@ -852,8 +821,29 @@ impl AppContext { self.storage_backend.clone() } - pub fn job_manager(&self) -> Arc { - self.job_manager.get().expect("JobsManager not initialized").clone() + pub fn job_manager_raw(&self) -> &Arc { + self.job_manager.get().expect("JobsManager not initialized") + } + + /// Set the type-erased job manager and its waker. + /// + /// Called once from `kalamdb-jobs` initialization (typically in lifecycle.rs). + pub fn set_job_manager( + &self, + mgr: Arc, + waker: Arc, + ) { + if self.job_manager.set(mgr).is_err() { + panic!("JobsManager already initialized"); + } + if self.job_waker.set(waker).is_err() { + panic!("JobWaker already initialized"); + } + } + + /// Get the job waker (used by the Raft applier to notify the job loop). + pub fn job_waker(&self) -> &Arc { + self.job_waker.get().expect("JobWaker not initialized") } pub fn live_query_manager(&self) -> Arc { diff --git a/backend/crates/kalamdb-core/src/applier/executor/ddl.rs b/backend/crates/kalamdb-core/src/applier/executor/ddl.rs index 111005000..0287837eb 100644 --- a/backend/crates/kalamdb-core/src/applier/executor/ddl.rs +++ b/backend/crates/kalamdb-core/src/applier/executor/ddl.rs @@ -164,7 +164,10 @@ mod tests { let app_ctx = test_app_context_simple(); // Register SqlExecutor into AppContext so DdlExecutor can clear plan cache. - let sql_executor = Arc::new(SqlExecutor::new(app_ctx.clone(), false)); + let sql_executor = Arc::new(SqlExecutor::new( + app_ctx.clone(), + Arc::new(crate::sql::executor::handler_registry::HandlerRegistry::new(app_ctx.clone())), + )); app_ctx.set_sql_executor(sql_executor.clone()); // Build a minimal STREAM table definition (avoids storage registry requirements). diff --git a/backend/crates/kalamdb-core/src/applier/raft/provider_meta_applier.rs b/backend/crates/kalamdb-core/src/applier/raft/provider_meta_applier.rs index d83ef3f9c..9d6106e50 100644 --- a/backend/crates/kalamdb-core/src/applier/raft/provider_meta_applier.rs +++ b/backend/crates/kalamdb-core/src/applier/raft/provider_meta_applier.rs @@ -289,7 +289,7 @@ impl MetaApplier for ProviderMetaApplier { // This provides instant dispatch rather than relying on polling let this_node_id = *self.app_context.node_id().as_ref(); if node_id == this_node_id { - self.app_context.job_manager().awake_job(job_id.clone()); + self.app_context.job_waker().awake_job(job_id.clone()); } Ok(format!("Job node {} created and awakened", node_id)) diff --git a/backend/crates/kalamdb-core/src/job_waker.rs b/backend/crates/kalamdb-core/src/job_waker.rs new file mode 100644 index 000000000..104fc793b --- /dev/null +++ b/backend/crates/kalamdb-core/src/job_waker.rs @@ -0,0 +1,12 @@ +//! Lightweight trait for awakening jobs from within kalamdb-core (e.g., the Raft applier). +//! +//! The concrete `JobsManager` implementation lives in `kalamdb-jobs`; this trait +//! allows kalamdb-core to notify it without a direct dependency. + +use kalamdb_commons::JobId; + +/// Minimal interface so kalamdb-core can wake up a job without depending on +/// kalamdb-jobs. +pub trait JobWaker: Send + Sync { + fn awake_job(&self, job_id: JobId); +} diff --git a/backend/crates/kalamdb-core/src/jobs/mod.rs b/backend/crates/kalamdb-core/src/jobs/mod.rs deleted file mode 100644 index d1d4186ec..000000000 --- a/backend/crates/kalamdb-core/src/jobs/mod.rs +++ /dev/null @@ -1,75 +0,0 @@ -//! # Job Management System -//! -//! **Phase 9 Status**: JobsManager with 8 concrete executors is production-ready. -//! Legacy components (JobExecutor, old schedulers) are DEPRECATED and pending migration. -//! -//! ## Examples -//! -//! ```rust,no_run -//! // Phase 9: Unified Job Management with typed JobIds -//! use kalamdb_core::jobs::{JobsManager, JobRegistry}; -//! use kalamdb_core::app_context::AppContext; -//! use kalamdb_core::jobs::executors::*; -//! use kalamdb_core::tables::system::JobsTableProvider; -//! use kalamdb_system::JobNodesTableProvider; -//! use kalamdb_store::test_utils::InMemoryBackend; -//! use kalamdb_store::StorageBackend; -//! use std::sync::Arc; -//! use kalamdb_system::JobType; -//! -//! # fn example() { -//! let backend: Arc = Arc::new(InMemoryBackend::new()); -//! let jobs_provider = Arc::new(JobsTableProvider::new(backend)); -//! # // app_ctx is injected by the caller -//! let job_registry = Arc::new(JobRegistry::new()); -//! let job_nodes_provider = Arc::new(JobNodesTableProvider::new(backend)); -//! let job_manager = Arc::new(JobsManager::new( -//! jobs_provider, -//! job_nodes_provider, -//! job_registry, -//! app_ctx, -//! )); -//! -//! // Register executors (8 concrete implementations) -//! job_registry.register("flush", Arc::new(FlushExecutor::new(/* ... */))); -//! job_registry.register("cleanup", Arc::new(CleanupExecutor::new(/* ... */))); -//! // ... register remaining 6 executors -//! -//! // Create job with typed JobId (idempotency enforced) -//! let job_id = job_manager.create_job( -//! JobType::Flush, -//! serde_json::json!({"namespace_id": "default", "table_name": "xyz"}), -//! Some("flush-table-xyz".to_string()), // Idempotency key -//! None, // JobOptions -//! ).await.unwrap(); -//! -//! // Job processing loop (spawned in lifecycle.rs) -//! // job_manager.run_loop(max_concurrent).await?; -//! # } -//! ``` - -// ============================================================================ -// PHASE 9: UNIFIED JOB MANAGEMENT (PRODUCTION-READY) -// ============================================================================ -pub mod executors; -pub mod flush_scheduler; -pub mod health_monitor; -pub mod jobs_manager; -pub mod stream_eviction; - -// ============================================================================ -// PHASE 16: LEADER-ONLY JOB EXECUTION (CLUSTER MODE) -// ============================================================================ -pub mod leader_failover; -pub mod leader_guard; - -// Phase 9 exports (production API) -pub use executors::{JobContext, JobDecision, JobExecutor as JobExecutorTrait, JobRegistry}; -pub use flush_scheduler::FlushScheduler; -pub use health_monitor::HealthMonitor; -pub use jobs_manager::JobsManager; -pub use stream_eviction::StreamEvictionScheduler; - -// Phase 16 exports (cluster mode) -pub use leader_failover::{JobRecoveryAction, LeaderFailoverHandler, RecoveryReport}; -pub use leader_guard::{LeaderOnlyJobGuard, LeadershipStatus}; diff --git a/backend/crates/kalamdb-core/src/lib.rs b/backend/crates/kalamdb-core/src/lib.rs index 0698f2ea2..40256d91e 100644 --- a/backend/crates/kalamdb-core/src/lib.rs +++ b/backend/crates/kalamdb-core/src/lib.rs @@ -39,7 +39,7 @@ pub mod app_context; pub mod applier; pub mod error; pub mod error_extensions; -pub mod jobs; +pub mod job_waker; pub mod live; pub mod manifest; pub mod metrics; @@ -67,7 +67,7 @@ pub mod system_columns { pub use crate::schema_registry::SystemColumnsService; } -// Test helpers module for unit tests inside this crate. -// Integration tests should include their own helpers or use path-based modules. -#[cfg(test)] +// Test helpers module — compiled for internal unit tests and for other crates' +// dev-dependencies that enable the "test-helpers" feature. +#[cfg(any(test, feature = "test-helpers"))] pub mod test_helpers; diff --git a/backend/crates/kalamdb-core/src/live/helpers/initial_data.rs b/backend/crates/kalamdb-core/src/live/helpers/initial_data.rs index 3a50ffc93..127dd7358 100644 --- a/backend/crates/kalamdb-core/src/live/helpers/initial_data.rs +++ b/backend/crates/kalamdb-core/src/live/helpers/initial_data.rs @@ -454,6 +454,7 @@ mod tests { use crate::providers::UserTableProvider; use crate::schema_registry::CachedTableData; use crate::schema_registry::TablesSchemaRegistryAdapter; + use crate::sql::executor::handler_registry::HandlerRegistry; use crate::sql::executor::SqlExecutor; use kalamdb_commons::ids::{SeqId, UserTableRowId}; use kalamdb_commons::models::datatypes::KalamDataType; @@ -626,7 +627,10 @@ mod tests { let fetcher = InitialDataFetcher::new(app_context.base_session_context(), schema_registry.clone()); - let sql_executor = Arc::new(SqlExecutor::new(app_context.clone(), false)); + let sql_executor = Arc::new(SqlExecutor::new( + app_context.clone(), + Arc::new(HandlerRegistry::new(app_context.clone())), + )); fetcher.set_sql_executor(sql_executor); // LiveId for connection user 'userA' (RLS enforced) @@ -770,7 +774,10 @@ mod tests { let fetcher = InitialDataFetcher::new(app_context.base_session_context(), schema_registry.clone()); - let sql_executor = Arc::new(SqlExecutor::new(app_context.clone(), false)); + let sql_executor = Arc::new(SqlExecutor::new( + app_context.clone(), + Arc::new(HandlerRegistry::new(app_context.clone())), + )); fetcher.set_sql_executor(sql_executor); let user_id = UserId::new("userb"); let conn = ConnId::new("conn2"); @@ -948,7 +955,10 @@ mod tests { let fetcher = InitialDataFetcher::new(app_context.base_session_context(), schema_registry.clone()); - let sql_executor = Arc::new(SqlExecutor::new(app_context.clone(), false)); + let sql_executor = Arc::new(SqlExecutor::new( + app_context.clone(), + Arc::new(HandlerRegistry::new(app_context.clone())), + )); fetcher.set_sql_executor(sql_executor); let user_id = UserId::new("userc"); let conn = ConnId::new("conn3"); diff --git a/backend/crates/kalamdb-core/src/live/manager/tests.rs b/backend/crates/kalamdb-core/src/live/manager/tests.rs index 021fc2bdd..e43e9e79d 100644 --- a/backend/crates/kalamdb-core/src/live/manager/tests.rs +++ b/backend/crates/kalamdb-core/src/live/manager/tests.rs @@ -155,7 +155,10 @@ async fn create_test_manager() -> (Arc, LiveQueryManager, Te base_session_context, Arc::clone(&app_ctx), ); - let sql_executor = Arc::new(SqlExecutor::new(app_ctx, false)); + let sql_executor = Arc::new(SqlExecutor::new( + app_ctx.clone(), + Arc::new(crate::sql::executor::handler_registry::HandlerRegistry::new(app_ctx.clone())), + )); manager.set_sql_executor(sql_executor); (connection_registry, manager, test_db) } diff --git a/backend/crates/kalamdb-core/src/live/models/connection.rs b/backend/crates/kalamdb-core/src/live/models/connection.rs index ab23e340f..8be993279 100644 --- a/backend/crates/kalamdb-core/src/live/models/connection.rs +++ b/backend/crates/kalamdb-core/src/live/models/connection.rs @@ -6,6 +6,7 @@ use dashmap::DashMap; use datafusion::sql::sqlparser::ast::Expr; use kalamdb_commons::ids::SeqId; use kalamdb_commons::models::{ConnectionId, ConnectionInfo, LiveQueryId, TableId, UserId}; +use kalamdb_commons::websocket::{CompressionType, ProtocolOptions, SerializationType}; use kalamdb_commons::Notification; use kalamdb_commons::Role; use parking_lot::Mutex; @@ -217,6 +218,9 @@ pub struct ConnectionState { user_id: OnceLock, user_role: OnceLock, + // === Protocol negotiation (set-once at auth time) === + protocol: OnceLock, + // === Heartbeat (atomic) === last_heartbeat_ms: AtomicU64, @@ -244,6 +248,7 @@ impl ConnectionState { auth_started: AtomicBool::new(false), user_id: OnceLock::new(), user_role: OnceLock::new(), + protocol: OnceLock::new(), last_heartbeat_ms: AtomicU64::new(epoch_millis()), subscriptions: DashMap::new(), notification_tx, @@ -305,6 +310,32 @@ impl ConnectionState { self.user_role.get().copied() } + // === Protocol === + + /// Store negotiated protocol options (set-once at auth time). + #[inline] + pub fn set_protocol(&self, opts: ProtocolOptions) { + let _ = self.protocol.set(opts); + } + + /// Negotiated serialization type (defaults to Json if not yet set). + #[inline] + pub fn serialization_type(&self) -> SerializationType { + self.protocol.get().map_or(SerializationType::Json, |p| p.serialization) + } + + /// Negotiated compression type (defaults to Gzip if not yet set). + #[inline] + pub fn compression_type(&self) -> CompressionType { + self.protocol.get().map_or(CompressionType::Gzip, |p| p.compression) + } + + /// Negotiated protocol options (defaults if not yet set). + #[inline] + pub fn protocol(&self) -> ProtocolOptions { + self.protocol.get().copied().unwrap_or_default() + } + // === Heartbeat === /// Update heartbeat timestamp — lock-free atomic store. diff --git a/backend/crates/kalamdb-core/src/operations/mod.rs b/backend/crates/kalamdb-core/src/operations/mod.rs index a47b2e400..e7e262f72 100644 --- a/backend/crates/kalamdb-core/src/operations/mod.rs +++ b/backend/crates/kalamdb-core/src/operations/mod.rs @@ -1,6 +1,7 @@ pub mod error; pub mod scan; pub mod service; +pub mod table_cleanup; pub mod types; pub use service::OperationService; diff --git a/backend/crates/kalamdb-core/src/operations/service.rs b/backend/crates/kalamdb-core/src/operations/service.rs index 951bfc4b2..ca523db79 100644 --- a/backend/crates/kalamdb-core/src/operations/service.rs +++ b/backend/crates/kalamdb-core/src/operations/service.rs @@ -175,10 +175,7 @@ impl OperationExecutor for OperationService { base, ); - let sql_executor = crate::sql::executor::SqlExecutor::new( - Arc::clone(&self.app_context), - false, // no password-complexity enforcement for DDL - ); + let sql_executor = self.app_context.sql_executor(); let result = sql_executor .execute(sql, &exec_ctx, Vec::new()) .await @@ -200,8 +197,7 @@ impl OperationExecutor for OperationService { base, ); - let sql_executor = - crate::sql::executor::SqlExecutor::new(Arc::clone(&self.app_context), false); + let sql_executor = self.app_context.sql_executor(); let result = sql_executor .execute(sql, &exec_ctx, Vec::new()) .await diff --git a/backend/crates/kalamdb-core/src/operations/table_cleanup.rs b/backend/crates/kalamdb-core/src/operations/table_cleanup.rs new file mode 100644 index 000000000..e8104d199 --- /dev/null +++ b/backend/crates/kalamdb-core/src/operations/table_cleanup.rs @@ -0,0 +1,193 @@ +//! Reusable table cleanup helpers. +//! +//! These functions are shared by the `DropTableHandler` (in `kalamdb-handlers`) +//! and the `CleanupExecutor` (in `kalamdb-jobs`). + +use crate::app_context::AppContext; +use crate::error::KalamDbError; +use crate::error_extensions::KalamDbResultExt; +use crate::schema_registry::SchemaRegistry; +use kalamdb_commons::models::{StorageId, TableId}; +use kalamdb_commons::schemas::TableType; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; + +/// Cleanup operation types +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum CleanupOperation { + /// Drop table (delete all data and metadata) + DropTable, + /// Truncate table (delete all data, keep schema) + Truncate, + /// Remove orphaned files + RemoveOrphaned, +} + +/// Storage details needed to delete Parquet trees after metadata removal. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct StorageCleanupDetails { + /// Storage identifier + pub storage_id: StorageId, + /// Base directory resolved for this storage + pub base_directory: String, + /// Relative path template with static placeholders substituted + pub relative_path_template: String, +} + +/// Delete table data from RocksDB partitions. +/// +/// Returns the number of rows deleted (currently always 0 — partitions are +/// dropped wholesale, not row-by-row). +pub async fn cleanup_table_data_internal( + app_context: &Arc, + table_id: &TableId, + table_type: TableType, +) -> Result { + log::debug!( + "[CleanupHelper] Cleaning up table data for {:?} (type: {:?})", + table_id, + table_type + ); + + let rows_deleted = match table_type { + TableType::User => { + use kalamdb_tables::new_indexed_user_table_store; + + new_indexed_user_table_store(app_context.storage_backend(), table_id, "_pk") + .drop_all_partitions() + .map_err(|e| { + KalamDbError::Other(format!( + "Failed to drop user table partitions for {}: {}", + table_id, e + )) + })?; + + log::debug!("[CleanupHelper] Dropped all partitions for user table {:?}", table_id); + 0usize + }, + TableType::Shared => { + use kalamdb_tables::new_indexed_shared_table_store; + + new_indexed_shared_table_store(app_context.storage_backend(), table_id, "_pk") + .drop_all_partitions() + .map_err(|e| { + KalamDbError::Other(format!( + "Failed to drop shared table partitions for {}: {}", + table_id, e + )) + })?; + + log::debug!("[CleanupHelper] Dropped all partitions for shared table {:?}", table_id); + 0usize + }, + TableType::Stream => { + use kalamdb_commons::constants::ColumnFamilyNames; + use kalamdb_store::storage_trait::Partition as StorePartition; + + let partition_name = format!( + "{}{}", + ColumnFamilyNames::STREAM_TABLE_PREFIX, + table_id + ); + + let backend = app_context.storage_backend(); + let partition = StorePartition::new(partition_name.clone()); + + match backend.drop_partition(&partition) { + Ok(_) => { + log::debug!( + "[CleanupHelper] Dropped partition '{}' for stream table {:?}", + partition_name, + table_id + ); + 0usize + }, + Err(e) => { + let msg = e.to_string(); + if msg.to_lowercase().contains("not found") { + log::debug!( + "[CleanupHelper] Stream partition '{}' not found (likely in-memory)", + partition_name + ); + 0usize + } else { + return Err(KalamDbError::Other(format!( + "Failed to drop partition '{}' for stream table {}: {}", + partition_name, table_id, e + ))); + } + }, + } + }, + TableType::System => { + return Err(KalamDbError::InvalidOperation( + "Cannot cleanup system table data".to_string(), + )); + }, + }; + + log::debug!("[CleanupHelper] Deleted {} rows from table data", rows_deleted); + Ok(rows_deleted) +} + +/// Delete Parquet files from the storage backend for a given table. +/// +/// Returns the number of bytes freed (currently 0 — `delete_prefix` does not +/// report byte counts). +pub async fn cleanup_parquet_files_internal( + app_context: &Arc, + table_id: &TableId, + table_type: TableType, + storage: &StorageCleanupDetails, +) -> Result { + log::debug!( + "[CleanupHelper] Cleaning up Parquet files for {:?} using storage {}", + table_id, + storage.storage_id.as_str() + ); + + let storage_cached = + app_context.storage_registry().get_cached(&storage.storage_id)?.ok_or_else(|| { + KalamDbError::InvalidOperation(format!( + "Storage '{}' not found during cleanup", + storage.storage_id.as_str() + )) + })?; + + let files_deleted = storage_cached + .delete_prefix( + table_type, table_id, None, + ) + .await + .into_kalamdb_error("Failed to delete Parquet tree")? + .files_deleted; + + log::debug!("[CleanupHelper] Freed {} files from Parquet storage", files_deleted); + Ok(0) +} + +/// Remove table metadata from system tables (schema registry). +/// +/// If the table has been re-created since the drop, the cleanup is skipped to +/// avoid deleting the new definition. +pub async fn cleanup_metadata_internal( + _app_ctx: &AppContext, + schema_registry: &Arc, + table_id: &TableId, +) -> Result<(), KalamDbError> { + log::debug!("[CleanupHelper] Cleaning up metadata for {:?}", table_id); + + if schema_registry.get_table_if_exists(table_id)?.is_some() { + log::debug!( + "[CleanupHelper] Metadata present for {:?} (table re-created) - skipping cleanup", + table_id + ); + return Ok(()); + } + + schema_registry.delete_table_definition(table_id)?; + + log::debug!("[CleanupHelper] Metadata cleanup complete"); + Ok(()) +} diff --git a/backend/crates/kalamdb-core/src/slow_query_logger.rs b/backend/crates/kalamdb-core/src/slow_query_logger.rs index a2a710433..a3a2be234 100644 --- a/backend/crates/kalamdb-core/src/slow_query_logger.rs +++ b/backend/crates/kalamdb-core/src/slow_query_logger.rs @@ -94,7 +94,7 @@ impl SlowQueryLogger { /// This version is safe to use in unit tests that don't have a Tokio runtime. /// It creates a channel but doesn't spawn the background task, so logs are /// simply dropped (tests don't typically check slow query logs anyway). - #[cfg(test)] + #[cfg(any(test, feature = "test-helpers"))] pub fn new_test() -> Self { let (sender, _receiver) = mpsc::unbounded_channel::(); // Note: We drop the receiver immediately, so logs will just be discarded diff --git a/backend/crates/kalamdb-core/src/sql/executor/handler_adapter.rs b/backend/crates/kalamdb-core/src/sql/executor/handler_adapter.rs index fd112ef72..d51446818 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handler_adapter.rs +++ b/backend/crates/kalamdb-core/src/sql/executor/handler_adapter.rs @@ -167,78 +167,4 @@ macro_rules! extract_statement { }; } -#[cfg(test)] -mod tests { - use super::*; - use crate::sql::executor::handlers::namespace::CreateNamespaceHandler; - use crate::test_helpers::{create_test_session_simple, test_app_context_simple}; - use kalamdb_commons::models::{NamespaceId, UserId}; - use kalamdb_commons::Role; - use kalamdb_sql::ddl::CreateNamespaceStatement; - - #[ignore = "Requires Raft for CREATE NAMESPACE"] - #[tokio::test] - async fn test_generic_adapter() { - let app_ctx = test_app_context_simple(); - let handler = CreateNamespaceHandler::new(app_ctx); - - let adapter = TypedHandlerAdapter::new(handler, |stmt| match stmt.kind() { - kalamdb_sql::classifier::SqlStatementKind::CreateNamespace(s) => Some(s.clone()), - _ => None, - }); - let ctx = ExecutionContext::new( - UserId::from("test_user"), - Role::Dba, - create_test_session_simple(), - ); - - let stmt = kalamdb_sql::classifier::SqlStatement::new( - "CREATE NAMESPACE test_adapter_ns".to_string(), - kalamdb_sql::classifier::SqlStatementKind::CreateNamespace(CreateNamespaceStatement { - name: NamespaceId::new("test_adapter_ns"), - if_not_exists: false, - }), - ); - - let result = adapter.execute(stmt, vec![], &ctx).await; - assert!(result.is_ok()); - } - - #[ignore = "Requires Raft for CREATE NAMESPACE"] - #[tokio::test] - async fn test_wrong_statement_type() { - let app_ctx = test_app_context_simple(); - let handler = CreateNamespaceHandler::new(app_ctx); - - let adapter = TypedHandlerAdapter::new(handler, |stmt| match stmt.kind() { - kalamdb_sql::classifier::SqlStatementKind::CreateNamespace(s) => Some(s.clone()), - _ => None, - }); - let ctx = ExecutionContext::new( - UserId::from("test_user"), - Role::Dba, - create_test_session_simple(), - ); - - // Pass wrong statement type (ShowNamespaces instead of CreateNamespace) - let stmt = kalamdb_sql::classifier::SqlStatement::new( - "SHOW NAMESPACES".to_string(), - kalamdb_sql::classifier::SqlStatementKind::ShowNamespaces( - kalamdb_sql::ddl::ShowNamespacesStatement, - ), - ); - - let result = adapter.execute(stmt, vec![], &ctx).await; - assert!(result.is_err()); - match result { - Err(KalamDbError::InvalidOperation(msg)) => { - assert!(msg.contains("wrong statement type")); - }, - _ => panic!("Expected InvalidOperation error"), - } - } - - // NOTE: DynamicHandlerAdapter test removed - DML handlers now use TypedStatementHandler pattern - // and no longer implement StatementHandler trait. The test_generic_adapter test above demonstrates - // the TypedHandlerAdapter pattern which is the new approach. -} +// Tests for TypedHandlerAdapter live in kalamdb-handlers where the concrete handlers reside. diff --git a/backend/crates/kalamdb-core/src/sql/executor/handler_registry.rs b/backend/crates/kalamdb-core/src/sql/executor/handler_registry.rs index ea2bcf7ac..ef3242579 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handler_registry.rs +++ b/backend/crates/kalamdb-core/src/sql/executor/handler_registry.rs @@ -16,42 +16,10 @@ use crate::error::KalamDbError; use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; use crate::sql::executor::handler_adapter::{DynamicHandlerAdapter, TypedHandlerAdapter}; use dashmap::DashMap; -use kalamdb_commons::models::TableId; use kalamdb_sql::classifier::SqlStatement; use std::sync::Arc; use tracing::Instrument; -// Import all typed handlers -use crate::sql::executor::handlers::backup::{BackupDatabaseHandler, RestoreDatabaseHandler}; -use crate::sql::executor::handlers::cluster::{ - ClusterClearHandler, ClusterListHandler, ClusterPurgeHandler, ClusterSnapshotHandler, - ClusterStepdownHandler, ClusterTransferLeaderHandler, ClusterTriggerElectionHandler, -}; -use crate::sql::executor::handlers::compact::{CompactAllTablesHandler, CompactTableHandler}; -use crate::sql::executor::handlers::export::{ExportUserDataHandler, ShowExportHandler}; -use crate::sql::executor::handlers::flush::{FlushAllTablesHandler, FlushTableHandler}; -use crate::sql::executor::handlers::jobs::{KillJobHandler, KillLiveQueryHandler}; -use crate::sql::executor::handlers::namespace::{ - AlterNamespaceHandler, CreateNamespaceHandler, DropNamespaceHandler, ShowNamespacesHandler, - UseNamespaceHandler, -}; -use crate::sql::executor::handlers::storage::{ - AlterStorageHandler, CheckStorageHandler, CreateStorageHandler, DropStorageHandler, - ShowStoragesHandler, -}; -use crate::sql::executor::handlers::subscription::SubscribeHandler; -use crate::sql::executor::handlers::system::ShowManifestCacheHandler; -use crate::sql::executor::handlers::table::{ - AlterTableHandler, CreateTableHandler, DescribeTableHandler, DropTableHandler, - ShowStatsHandler, ShowTablesHandler, -}; -use crate::sql::executor::handlers::topics::{ - AckHandler, AddTopicSourceHandler, ClearTopicHandler, ConsumeHandler, CreateTopicHandler, - DropTopicHandler, -}; -use crate::sql::executor::handlers::user::{AlterUserHandler, CreateUserHandler, DropUserHandler}; -use crate::sql::executor::handlers::view::CreateViewHandler; - /// Trait for handlers that can process any SqlStatement variant /// /// This allows polymorphic handler dispatch without boxing every handler. @@ -116,597 +84,14 @@ pub struct HandlerRegistry { } impl HandlerRegistry { - /// Create a new handler registry with all handlers pre-registered + /// Create an empty handler registry. /// - /// This is called once during SqlExecutor initialization. - pub fn new(app_context: Arc, enforce_password_complexity: bool) -> Self { - use kalamdb_commons::models::{NamespaceId, StorageId}; - use kalamdb_commons::TableType; - use kalamdb_sql::classifier::SqlStatementKind; // Role not needed here - - let registry = Self { + /// Handler registration is performed externally via `kalamdb_handlers::register_all_handlers()`. + pub fn new(app_context: Arc) -> Self { + Self { handlers: DashMap::new(), - app_context: app_context.clone(), - }; - - // ============================================================================ - // NAMESPACE HANDLERS - // ============================================================================ - - registry.register_typed( - SqlStatementKind::CreateNamespace(kalamdb_sql::ddl::CreateNamespaceStatement { - name: NamespaceId::new("_placeholder"), - if_not_exists: false, - }), - CreateNamespaceHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::CreateNamespace(s) => Some(s.clone()), - _ => None, - }, - ); - - registry.register_typed( - SqlStatementKind::AlterNamespace(kalamdb_sql::ddl::AlterNamespaceStatement { - name: NamespaceId::new("_placeholder"), - options: std::collections::HashMap::new(), - }), - AlterNamespaceHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::AlterNamespace(s) => Some(s.clone()), - _ => None, - }, - ); - - registry.register_typed( - SqlStatementKind::DropNamespace(kalamdb_sql::ddl::DropNamespaceStatement { - name: NamespaceId::new("_placeholder"), - if_exists: false, - cascade: false, - }), - DropNamespaceHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::DropNamespace(s) => Some(s.clone()), - _ => None, - }, - ); - - registry.register_typed( - SqlStatementKind::ShowNamespaces(kalamdb_sql::ddl::ShowNamespacesStatement), - ShowNamespacesHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::ShowNamespaces(s) => Some(s.clone()), - _ => None, - }, - ); - - registry.register_typed( - SqlStatementKind::UseNamespace(kalamdb_sql::ddl::UseNamespaceStatement { - namespace: NamespaceId::new("_placeholder"), - }), - UseNamespaceHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::UseNamespace(s) => Some(s.clone()), - _ => None, - }, - ); - - // ============================================================================ - // STORAGE HANDLERS - // ============================================================================ - - registry.register_typed( - SqlStatementKind::CreateStorage(kalamdb_sql::ddl::CreateStorageStatement { - storage_id: StorageId::new("_placeholder"), - storage_type: kalamdb_system::providers::storages::models::StorageType::Filesystem, - storage_name: String::new(), - description: None, - base_directory: String::new(), - shared_tables_template: String::new(), - user_tables_template: String::new(), - credentials: None, - config_json: None, - }), - CreateStorageHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::CreateStorage(s) => Some(s.clone()), - _ => None, - }, - ); - - registry.register_typed( - SqlStatementKind::AlterStorage(kalamdb_sql::ddl::AlterStorageStatement { - storage_id: StorageId::new("_placeholder"), - storage_name: None, - description: None, - shared_tables_template: None, - user_tables_template: None, - config_json: None, - }), - AlterStorageHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::AlterStorage(s) => Some(s.clone()), - _ => None, - }, - ); - - registry.register_typed( - SqlStatementKind::DropStorage(kalamdb_sql::ddl::DropStorageStatement { - storage_id: kalamdb_commons::StorageId::from(""), - if_exists: false, - }), - DropStorageHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::DropStorage(s) => Some(s.clone()), - _ => None, - }, - ); - - registry.register_typed( - SqlStatementKind::ShowStorages(kalamdb_sql::ddl::ShowStoragesStatement), - ShowStoragesHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::ShowStorages(s) => Some(s.clone()), - _ => None, - }, - ); - - registry.register_typed( - SqlStatementKind::CheckStorage(kalamdb_sql::ddl::CheckStorageStatement { - storage_id: kalamdb_commons::StorageId::from(""), - extended: false, - }), - CheckStorageHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::CheckStorage(s) => Some(s.clone()), - _ => None, - }, - ); - - // ============================================================================ - // TABLE HANDLERS - // ============================================================================ - - // TABLE HANDLERS (Phase 8) - fully implemented - // Register table handlers with minimal placeholder instances for discriminant extraction - use kalamdb_sql::ddl::{ - AlterTableStatement, CreateTableStatement, CreateViewStatement, DescribeTableStatement, - DropTableStatement, ShowTableStatsStatement, ShowTablesStatement, - }; - // TableType already imported above; avoid duplicate imports - use datafusion::arrow::datatypes::Schema as ArrowSchema; - use kalamdb_commons::models::TableName; - use std::collections::HashMap; - - registry.register_typed( - SqlStatementKind::CreateTable(CreateTableStatement { - table_name: TableName::new("_placeholder"), - namespace_id: NamespaceId::new("_placeholder"), - table_type: TableType::Shared, - schema: Arc::new(ArrowSchema::empty()), - column_defaults: HashMap::new(), - primary_key_column: None, - storage_id: None, - use_user_storage: false, - flush_policy: None, - deleted_retention_hours: None, - ttl_seconds: None, - if_not_exists: false, - access_level: None, - }), - CreateTableHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::CreateTable(s) => Some(s.clone()), - _ => None, - }, - ); - registry.register_typed( - SqlStatementKind::CreateView(CreateViewStatement { - namespace_id: NamespaceId::new("_placeholder"), - view_name: TableName::new("_placeholder"), - or_replace: false, - if_not_exists: false, - columns: Vec::new(), - query_sql: String::new(), - original_sql: String::new(), - }), - CreateViewHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::CreateView(s) => Some(s.clone()), - _ => None, - }, - ); - registry.register_typed( - SqlStatementKind::AlterTable(AlterTableStatement { - table_name: TableName::new("_placeholder"), - namespace_id: NamespaceId::new("_placeholder"), - operation: kalamdb_sql::ddl::ColumnOperation::Drop { - column_name: "_placeholder".to_string(), - }, - }), - AlterTableHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::AlterTable(s) => Some(s.clone()), - _ => None, - }, - ); - registry.register_typed( - SqlStatementKind::DropTable(DropTableStatement { - table_name: TableName::new("_placeholder"), - namespace_id: NamespaceId::new("_placeholder"), - table_type: kalamdb_sql::ddl::TableKind::Shared, - if_exists: false, - }), - DropTableHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::DropTable(s) => Some(s.clone()), - _ => None, - }, - ); - registry.register_typed( - SqlStatementKind::ShowTables(ShowTablesStatement { namespace_id: None }), - ShowTablesHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::ShowTables(s) => Some(s.clone()), - _ => None, - }, - ); - registry.register_typed( - SqlStatementKind::DescribeTable(DescribeTableStatement { - namespace_id: None, - table_name: TableName::new("_placeholder"), - show_history: false, - }), - DescribeTableHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::DescribeTable(s) => Some(s.clone()), - _ => None, - }, - ); - registry.register_typed( - SqlStatementKind::ShowStats(ShowTableStatsStatement { - namespace_id: None, - table_name: TableName::new("_placeholder"), - }), - ShowStatsHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::ShowStats(s) => Some(s.clone()), - _ => None, - }, - ); - - // ============================================================================ - // SYSTEM HANDLERS - // ============================================================================ - registry.register_typed( - SqlStatementKind::ShowManifest(kalamdb_sql::ddl::ShowManifestStatement), - ShowManifestCacheHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::ShowManifest(s) => Some(s.clone()), - _ => None, - }, - ); - - // ============================================================================ - // FLUSH HANDLERS - // ============================================================================ - registry.register_typed( - SqlStatementKind::FlushTable(kalamdb_sql::ddl::FlushTableStatement { - namespace: NamespaceId::new("_placeholder"), - table_name: TableName::new("_placeholder"), - }), - FlushTableHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::FlushTable(s) => Some(s.clone()), - _ => None, - }, - ); - - registry.register_typed( - SqlStatementKind::FlushAllTables(kalamdb_sql::ddl::FlushAllTablesStatement { - namespace: NamespaceId::new("_placeholder"), - }), - FlushAllTablesHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::FlushAllTables(s) => Some(s.clone()), - _ => None, - }, - ); - - registry.register_typed( - SqlStatementKind::CompactTable(kalamdb_sql::ddl::CompactTableStatement { - namespace: NamespaceId::new("_placeholder"), - table_name: TableName::new("_placeholder"), - }), - CompactTableHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::CompactTable(s) => Some(s.clone()), - _ => None, - }, - ); - - registry.register_typed( - SqlStatementKind::CompactAllTables(kalamdb_sql::ddl::CompactAllTablesStatement { - namespace: NamespaceId::new("_placeholder"), - }), - CompactAllTablesHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::CompactAllTables(s) => Some(s.clone()), - _ => None, - }, - ); - - // ============================================================================ - // CLUSTER HANDLERS - // ============================================================================ - registry.register_dynamic( - SqlStatementKind::ClusterSnapshot, - ClusterSnapshotHandler::new(app_context.clone()), - ); - - registry.register_dynamic( - SqlStatementKind::ClusterPurge(0), - ClusterPurgeHandler::new(app_context.clone()), - ); - - registry.register_dynamic( - SqlStatementKind::ClusterTriggerElection, - ClusterTriggerElectionHandler::new(app_context.clone()), - ); - - registry.register_dynamic( - SqlStatementKind::ClusterTransferLeader(0), - ClusterTransferLeaderHandler::new(app_context.clone()), - ); - - registry.register_dynamic( - SqlStatementKind::ClusterStepdown, - ClusterStepdownHandler::new(app_context.clone()), - ); - - registry.register_dynamic( - SqlStatementKind::ClusterClear, - ClusterClearHandler::new(app_context.clone()), - ); - - registry.register_dynamic( - SqlStatementKind::ClusterList, - ClusterListHandler::new(app_context.clone()), - ); - - // ============================================================================ - // JOB HANDLERS - // ============================================================================ - registry.register_typed( - SqlStatementKind::KillJob(kalamdb_sql::ddl::JobCommand::Kill { - job_id: String::new(), - }), - KillJobHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::KillJob(s) => Some(s.clone()), - _ => None, - }, - ); - - // For discriminant extraction, we only need a placeholder instance. Use LiveQueryId::from_string. - let placeholder_live = - kalamdb_commons::models::LiveQueryId::from_string("user123-conn_abc-q1") - .unwrap_or_else(|_| { - kalamdb_commons::models::LiveQueryId::new( - kalamdb_commons::models::UserId::new("user123"), - kalamdb_commons::models::ConnectionId::new("conn_abc"), - "q1".to_string(), - ) - }); - registry.register_typed( - SqlStatementKind::KillLiveQuery(kalamdb_sql::ddl::KillLiveQueryStatement { - live_id: placeholder_live, - }), - KillLiveQueryHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::KillLiveQuery(s) => Some(s.clone()), - _ => None, - }, - ); - - // ============================================================================ - // USER HANDLERS - // ============================================================================ - use kalamdb_commons::AuthType; - use kalamdb_sql::ddl::{ - AlterUserStatement, CreateUserStatement, DropUserStatement, UserModification, - }; // Role not needed in placeholder (defaults handled by statement parsing) - registry.register_typed( - SqlStatementKind::CreateUser(CreateUserStatement { - username: "_placeholder".to_string(), - auth_type: AuthType::Internal, - role: kalamdb_commons::Role::User, - email: None, - password: None, - }), - CreateUserHandler::new(app_context.clone(), enforce_password_complexity), - |stmt| match stmt.kind() { - SqlStatementKind::CreateUser(s) => Some(s.clone()), - _ => None, - }, - ); - registry.register_typed( - SqlStatementKind::AlterUser(AlterUserStatement { - username: "_placeholder".to_string(), - modification: UserModification::SetEmail("_placeholder".to_string()), - }), - AlterUserHandler::new(app_context.clone(), enforce_password_complexity), - |stmt| match stmt.kind() { - SqlStatementKind::AlterUser(s) => Some(s.clone()), - _ => None, - }, - ); - registry.register_typed( - SqlStatementKind::DropUser(DropUserStatement { - username: "_placeholder".to_string(), - if_exists: false, - }), - DropUserHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::DropUser(s) => Some(s.clone()), - _ => None, - }, - ); - - // ============================================================================ - // SUBSCRIPTION HANDLER - // ============================================================================ - use kalamdb_sql::ddl::{SubscribeStatement, SubscriptionOptions}; - registry.register_typed( - SqlStatementKind::Subscribe(SubscribeStatement { - select_query: "SELECT * FROM _placeholder._placeholder".to_string(), - namespace: NamespaceId::new("_placeholder"), - table_name: TableName::new("_placeholder"), - options: SubscriptionOptions::default(), - }), - SubscribeHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::Subscribe(s) => Some(s.clone()), - _ => None, - }, - ); - - // ============================================================================ - // TOPIC PUB/SUB HANDLERS - // ============================================================================ - use kalamdb_commons::models::PayloadMode; - use kalamdb_sql::ddl::{ - AckStatement, AddTopicSourceStatement, ClearTopicStatement, ConsumePosition, - ConsumeStatement, CreateTopicStatement, DropTopicStatement, - }; - - registry.register_typed( - SqlStatementKind::CreateTopic(CreateTopicStatement { - topic_name: "_placeholder".to_string(), - partitions: None, - }), - CreateTopicHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::CreateTopic(s) => Some(s.clone()), - _ => None, - }, - ); - - registry.register_typed( - SqlStatementKind::DropTopic(DropTopicStatement { - topic_name: "_placeholder".to_string(), - }), - DropTopicHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::DropTopic(s) => Some(s.clone()), - _ => None, - }, - ); - - registry.register_typed( - SqlStatementKind::ClearTopic(ClearTopicStatement { - topic_id: kalamdb_commons::models::TopicId::new("_placeholder"), - }), - ClearTopicHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::ClearTopic(s) => Some(s.clone()), - _ => None, - }, - ); - - registry.register_typed( - SqlStatementKind::AddTopicSource(AddTopicSourceStatement { - topic_name: "_placeholder".to_string(), - table_id: TableId::from_strings("_placeholder", "_placeholder"), - operation: kalamdb_commons::models::TopicOp::Insert, - filter_expr: None, - payload_mode: PayloadMode::Full, - }), - AddTopicSourceHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::AddTopicSource(s) => Some(s.clone()), - _ => None, - }, - ); - - registry.register_typed( - SqlStatementKind::ConsumeTopic(ConsumeStatement { - topic_name: "_placeholder".to_string(), - group_id: None, - position: ConsumePosition::Latest, - limit: None, - }), - ConsumeHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::ConsumeTopic(s) => Some(s.clone()), - _ => None, - }, - ); - - registry.register_typed( - SqlStatementKind::AckTopic(AckStatement { - topic_name: "_placeholder".to_string(), - group_id: "_placeholder".to_string(), - partition_id: 0, - upto_offset: 0, - }), - AckHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::AckTopic(s) => Some(s.clone()), - _ => None, - }, - ); - - // ============================================================================ - // BACKUP & RESTORE HANDLERS - // ============================================================================ - use kalamdb_sql::ddl::{BackupDatabaseStatement, RestoreDatabaseStatement}; - - registry.register_typed( - SqlStatementKind::BackupDatabase(BackupDatabaseStatement { - backup_path: "_placeholder".to_string(), - }), - BackupDatabaseHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::BackupDatabase(s) => Some(s.clone()), - _ => None, - }, - ); - - registry.register_typed( - SqlStatementKind::RestoreDatabase(RestoreDatabaseStatement { - backup_path: "_placeholder".to_string(), - }), - RestoreDatabaseHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::RestoreDatabase(s) => Some(s.clone()), - _ => None, - }, - ); - - // ============================================================================ - // USER DATA EXPORT HANDLERS - // ============================================================================ - use kalamdb_sql::ddl::{ExportUserDataStatement, ShowExportStatement}; - - registry.register_typed( - SqlStatementKind::ExportUserData(ExportUserDataStatement), - ExportUserDataHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::ExportUserData(s) => Some(s.clone()), - _ => None, - }, - ); - - registry.register_typed( - SqlStatementKind::ShowExport(ShowExportStatement), - ShowExportHandler::new(app_context.clone()), - |stmt| match stmt.kind() { - SqlStatementKind::ShowExport(s) => Some(s.clone()), - _ => None, - }, - ); - - registry + app_context, + } } /// Register a typed handler for a specific statement variant @@ -722,7 +107,7 @@ impl HandlerRegistry { /// - `placeholder`: An instance of the SqlStatement variant (for discriminant extraction) /// - `handler`: The handler instance to register /// - `extractor`: Function to extract T from SqlStatement - fn register_typed( + pub fn register_typed( &self, placeholder: kalamdb_sql::classifier::SqlStatementKind, handler: H, @@ -756,7 +141,7 @@ impl HandlerRegistry { /// InsertHandler::new(app_context.clone()), /// ); /// ``` - fn register_dynamic( + pub fn register_dynamic( &self, placeholder: kalamdb_sql::classifier::SqlStatementKind, handler: H, @@ -845,7 +230,7 @@ mod tests { use kalamdb_sql::classifier::SqlStatementKind; let app_ctx = test_app_context_simple(); - let registry = HandlerRegistry::new(app_ctx, false); + let registry = HandlerRegistry::new(app_ctx); let ctx = test_context(); let stmt = SqlStatement::new( @@ -876,7 +261,7 @@ mod tests { use kalamdb_sql::classifier::SqlStatementKind; let app_ctx = test_app_context_simple(); - let registry = HandlerRegistry::new(app_ctx, false); + let registry = HandlerRegistry::new(app_ctx); let ctx = test_context(); // Use an unclassified statement (not in SqlStatementKind) @@ -905,8 +290,47 @@ mod tests { async fn test_registry_authorization_check() { use kalamdb_sql::classifier::SqlStatementKind; + // A minimal handler stub that requires DBA role (simulates DDL handler auth). + struct DbaOnlyStub; + + #[async_trait::async_trait] + impl SqlStatementHandler for DbaOnlyStub { + async fn execute( + &self, + _statement: SqlStatement, + _params: Vec, + _context: &ExecutionContext, + ) -> Result { + Ok(ExecutionResult::Success { message: "ok".to_string() }) + } + + async fn check_authorization( + &self, + _statement: &SqlStatement, + context: &ExecutionContext, + ) -> Result<(), KalamDbError> { + if !matches!(context.user_role(), kalamdb_commons::Role::Dba | kalamdb_commons::Role::System) { + Err(KalamDbError::Unauthorized( + "Requires DBA role".to_string(), + )) + } else { + Ok(()) + } + } + } + let app_ctx = test_app_context_simple(); - let registry = HandlerRegistry::new(app_ctx, false); + let registry = HandlerRegistry::new(app_ctx); + + // Register stub handler for CreateNamespace + let key = std::mem::discriminant(&SqlStatementKind::CreateNamespace( + kalamdb_sql::ddl::CreateNamespaceStatement { + name: kalamdb_commons::models::NamespaceId::new("_"), + if_not_exists: false, + }, + )); + registry.handlers.insert(key, Arc::new(DbaOnlyStub)); + let user_ctx = ExecutionContext::new( UserId::from("regular_user"), Role::User, diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/mod.rs b/backend/crates/kalamdb-core/src/sql/executor/handlers/mod.rs index 8ab0ec3d0..bab57ae8e 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/mod.rs +++ b/backend/crates/kalamdb-core/src/sql/executor/handlers/mod.rs @@ -18,22 +18,8 @@ use crate::error::KalamDbError; use kalamdb_sql::classifier::SqlStatement; -// Typed handlers organized by category -pub mod backup; -pub mod cluster; -pub mod compact; -pub mod export; -pub mod flush; -pub mod jobs; -pub mod namespace; -pub mod storage; -pub mod subscription; -pub mod system; -pub mod table; -pub mod topics; +// Typed handler trait (stays in core; handler impls are in kalamdb-handlers) pub mod typed; -pub mod user; -pub mod view; // Re-export core types from executor/models for convenience pub use crate::sql::context::{ExecutionContext, ExecutionMetadata, ExecutionResult, ScalarValue}; diff --git a/backend/crates/kalamdb-core/src/sql/executor/helpers/mod.rs b/backend/crates/kalamdb-core/src/sql/executor/helpers/mod.rs index 07c7ae901..b059e7b48 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/helpers/mod.rs +++ b/backend/crates/kalamdb-core/src/sql/executor/helpers/mod.rs @@ -1,9 +1 @@ -//! Shared helper utilities for SQL execution (moved from handlers/) - pub mod ast_parsing; -pub mod audit; -pub mod guards; -pub mod namespace_helpers; -pub mod storage; -pub mod table_creation; -pub mod tables; diff --git a/backend/crates/kalamdb-core/src/sql/executor/sql_executor.rs b/backend/crates/kalamdb-core/src/sql/executor/sql_executor.rs index 57583b0e9..91cd11554 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/sql_executor.rs +++ b/backend/crates/kalamdb-core/src/sql/executor/sql_executor.rs @@ -1,6 +1,6 @@ use super::{PreparedExecutionStatement, SqlExecutor}; use crate::error::KalamDbError; -use crate::sql::executor::helpers::guards::block_system_namespace_modification; +use crate::sql::executor::handler_registry::HandlerRegistry; use crate::sql::plan_cache::PlanCacheKey; use crate::sql::{ExecutionContext, ExecutionResult}; use arrow::array::RecordBatch; @@ -8,6 +8,7 @@ use datafusion::scalar::ScalarValue; use kalamdb_commons::conversions::arrow_json_conversion::arrow_value_to_scalar; use kalamdb_commons::models::TableId; use kalamdb_sql::classifier::{SqlStatement, SqlStatementKind}; +use std::sync::Arc; use std::time::Duration; use tracing::Instrument; @@ -74,12 +75,16 @@ impl SqlExecutor { return Ok(()); }; - block_system_namespace_modification( - table_id.namespace_id(), - Self::dml_operation_name(dml_kind), - "TABLE", - Some(table_id.table_name().as_str()), - ) + if table_id.namespace_id().is_system_namespace() { + let op = Self::dml_operation_name(dml_kind); + return Err(KalamDbError::InvalidOperation(format!( + "Cannot {} system table '{}.{}'", + op.to_lowercase(), + table_id.namespace_id().as_str(), + table_id.table_name().as_str(), + ))); + } + Ok(()) } /// Try to extract a typed `KalamDbError::NotLeader` from a `DataFusionError`. @@ -128,16 +133,11 @@ impl SqlExecutor { } } - /// Construct a new executor hooked into the shared `AppContext`. + /// Construct a new executor with a pre-built handler registry. pub fn new( app_context: std::sync::Arc, - enforce_password_complexity: bool, + handler_registry: Arc, ) -> Self { - let handler_registry = - std::sync::Arc::new(crate::sql::executor::handler_registry::HandlerRegistry::new( - app_context.clone(), - enforce_password_complexity, - )); let plan_cache = std::sync::Arc::new(crate::sql::plan_cache::PlanCache::with_config( app_context.config().execution.sql_plan_cache_max_entries, Duration::from_secs(app_context.config().execution.sql_plan_cache_ttl_seconds), diff --git a/backend/crates/kalamdb-core/src/sql/mod.rs b/backend/crates/kalamdb-core/src/sql/mod.rs index e7b8b8cde..d1ae07f5f 100644 --- a/backend/crates/kalamdb-core/src/sql/mod.rs +++ b/backend/crates/kalamdb-core/src/sql/mod.rs @@ -23,7 +23,7 @@ //! ```rust,ignore //! use kalamdb_core::sql::SqlExecutor; //! -//! let executor = SqlExecutor::new(app_context, false); +//! let executor = SqlExecutor::new(app_context, handler_registry); //! let result = executor.execute("SELECT * FROM users", None, None).await?; //! ``` diff --git a/backend/crates/kalamdb-core/src/test_helpers.rs b/backend/crates/kalamdb-core/src/test_helpers.rs index 5645e4691..0a66ba0ca 100644 --- a/backend/crates/kalamdb-core/src/test_helpers.rs +++ b/backend/crates/kalamdb-core/src/test_helpers.rs @@ -1,28 +1,42 @@ -//! Test helpers compiled only for kalamdb-core unit tests. +//! Test helpers for kalamdb-core. +//! +//! All helpers are compiled when the `test-helpers` feature (or `cfg(test)`) is active. +//! They have no dependency on `kalamdb-jobs`, so they are safe to use from other crates' +//! test code via `kalamdb-core = { ..., features = ["test-helpers"] }` in dev-dependencies. use crate::app_context::AppContext; -use crate::jobs::executors::{ - BackupExecutor, CleanupExecutor, CompactExecutor, FlushExecutor, JobRegistry, RestoreExecutor, - RetentionExecutor, StreamEvictionExecutor, UserCleanupExecutor, VectorIndexExecutor, -}; use datafusion::prelude::SessionContext; -use kalamdb_commons::models::{NamespaceId, NodeId, StorageId}; +use kalamdb_commons::models::{NodeId, StorageId}; use kalamdb_store::test_utils::TestDb; use kalamdb_store::StorageBackend; use kalamdb_system::{StoragePartition, SystemTable}; -use once_cell::sync::OnceCell; use std::sync::Arc; + +// ── Imports needed by init_test_app_context / test_app_context ───────────────── +#[cfg(any(test, feature = "test-helpers"))] +use kalamdb_commons::models::NamespaceId; +#[cfg(any(test, feature = "test-helpers"))] +use once_cell::sync::OnceCell; +#[cfg(any(test, feature = "test-helpers"))] use std::sync::Once; +#[cfg(any(test, feature = "test-helpers"))] static TEST_DB: OnceCell> = OnceCell::new(); +#[cfg(any(test, feature = "test-helpers"))] static TEST_RUNTIME: OnceCell> = OnceCell::new(); +#[cfg(any(test, feature = "test-helpers"))] static TEST_APP_CONTEXT: OnceCell> = OnceCell::new(); +#[cfg(any(test, feature = "test-helpers"))] static INIT: Once = Once::new(); +#[cfg(any(test, feature = "test-helpers"))] static BOOTSTRAP_INIT: Once = Once::new(); +// ── Full helpers (no kalamdb-jobs dep) ───────────────────────────────────────── + /// Initialize AppContext with minimal test dependencies. /// /// This is used by unit tests inside `kalamdb-core/src/**` that run with `cfg(test)`. +#[cfg(any(test, feature = "test-helpers"))] pub fn init_test_app_context() -> Arc { INIT.call_once(|| { let mut column_families: Vec<&'static str> = SystemTable::all_tables() @@ -128,6 +142,7 @@ pub fn init_test_app_context() -> Arc { TEST_DB.get().expect("TEST_DB should be initialized").clone() } +#[cfg(any(test, feature = "test-helpers"))] pub fn test_app_context() -> Arc { init_test_app_context(); TEST_APP_CONTEXT.get().expect("TEST_APP_CONTEXT should be initialized").clone() @@ -192,27 +207,6 @@ pub fn test_app_context_simple() -> Arc { app_ctx } -pub fn create_test_job_registry() -> JobRegistry { - let registry = JobRegistry::new(); - - registry.register(Arc::new(FlushExecutor::new())); - registry.register(Arc::new(CleanupExecutor::new())); - registry.register(Arc::new(RetentionExecutor::new())); - registry.register(Arc::new(StreamEvictionExecutor::new())); - registry.register(Arc::new(UserCleanupExecutor::new())); - registry.register(Arc::new(CompactExecutor::new())); - registry.register(Arc::new(BackupExecutor::new())); - registry.register(Arc::new(RestoreExecutor::new())); - registry.register(Arc::new(VectorIndexExecutor::new())); - - registry -} - -pub fn create_test_session() -> Arc { - let app_ctx = test_app_context(); - Arc::new(app_ctx.session_factory().create_session()) -} - /// Creates a SessionContext using test_app_context_simple() (no Raft bootstrap). pub fn create_test_session_simple() -> Arc { let app_ctx = test_app_context_simple(); diff --git a/backend/crates/kalamdb-core/tests/test_context_functions.rs b/backend/crates/kalamdb-core/tests/test_context_functions.rs index 8bb6bc560..980c5c2ea 100644 --- a/backend/crates/kalamdb-core/tests/test_context_functions.rs +++ b/backend/crates/kalamdb-core/tests/test_context_functions.rs @@ -9,11 +9,18 @@ use kalamdb_core::app_context::AppContext; use kalamdb_core::sql::context::ExecutionContext; use kalamdb_core::sql::context::ExecutionResult; use kalamdb_core::sql::datafusion_session::DataFusionSessionFactory; +use kalamdb_core::sql::executor::handler_registry::HandlerRegistry; use kalamdb_core::sql::executor::SqlExecutor; use kalamdb_session::AuthSession; use kalamdb_store::test_utils::TestDb; use std::sync::Arc; +fn create_executor(app_context: Arc) -> SqlExecutor { + let registry = Arc::new(HandlerRegistry::new(app_context.clone())); + kalamdb_handlers::register_all_handlers(®istry, app_context.clone(), false); + SqlExecutor::new(app_context, registry) +} + /// Helper to create a simple test session with custom functions registered fn create_test_session() -> Arc { // Use DataFusionSessionFactory to get a session with all custom functions registered @@ -382,7 +389,7 @@ async fn test_sql_standard_context_function_aliases() { ); let exec_ctx = ExecutionContext::from_session(auth_session, app_context.base_session_context()); - let executor = SqlExecutor::new(app_context, false); + let executor = create_executor(app_context); let result = executor .execute( diff --git a/backend/crates/kalamdb-core/tests/test_cte_support.rs b/backend/crates/kalamdb-core/tests/test_cte_support.rs index 13db936e2..c0e61fbfe 100644 --- a/backend/crates/kalamdb-core/tests/test_cte_support.rs +++ b/backend/crates/kalamdb-core/tests/test_cte_support.rs @@ -14,12 +14,20 @@ use kalamdb_commons::{NodeId, Role, UserId}; use kalamdb_configs::ServerConfig; use kalamdb_core::app_context::AppContext; use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult}; +use kalamdb_core::sql::executor::handler_registry::HandlerRegistry; use kalamdb_core::sql::executor::SqlExecutor; use kalamdb_store::test_utils::TestDb; use kalamdb_system::providers::storages::models::StorageType; use kalamdb_system::Storage; use std::sync::Arc; +/// Helper to create a fully-wired SqlExecutor with all handlers registered. +fn create_executor(app_context: Arc) -> SqlExecutor { + let registry = Arc::new(HandlerRegistry::new(app_context.clone())); + kalamdb_handlers::register_all_handlers(®istry, app_context.clone(), false); + SqlExecutor::new(app_context, registry) +} + /// Helper to create AppContext with temporary RocksDB for testing async fn create_test_app_context() -> (Arc, TestDb) { let test_db = TestDb::with_system_tables().expect("Failed to create test database"); @@ -128,7 +136,7 @@ async fn setup_test_table( async fn test_simple_cte() { let (app_context, _temp_dir) = create_test_app_context().await; let exec_ctx = create_exec_context_with_app_context(app_context.clone(), "u_admin", Role::Dba); - let executor = SqlExecutor::new(app_context.clone(), false); + let executor = create_executor(app_context.clone()); // Setup test data setup_test_table(&executor, &exec_ctx).await.unwrap(); @@ -167,7 +175,7 @@ async fn test_simple_cte() { async fn test_cte_with_aggregation() { let (app_context, _temp_dir) = create_test_app_context().await; let exec_ctx = create_exec_context_with_app_context(app_context.clone(), "u_admin", Role::Dba); - let executor = SqlExecutor::new(app_context.clone(), false); + let executor = create_executor(app_context.clone()); // Setup test data setup_test_table(&executor, &exec_ctx).await.unwrap(); @@ -213,7 +221,7 @@ async fn test_cte_with_aggregation() { async fn test_multiple_ctes() { let (app_context, _temp_dir) = create_test_app_context().await; let exec_ctx = create_exec_context_with_app_context(app_context.clone(), "u_admin", Role::Dba); - let executor = SqlExecutor::new(app_context.clone(), false); + let executor = create_executor(app_context.clone()); // Setup test data setup_test_table(&executor, &exec_ctx).await.unwrap(); @@ -261,7 +269,7 @@ async fn test_multiple_ctes() { async fn test_chained_ctes() { let (app_context, _temp_dir) = create_test_app_context().await; let exec_ctx = create_exec_context_with_app_context(app_context.clone(), "u_admin", Role::Dba); - let executor = SqlExecutor::new(app_context.clone(), false); + let executor = create_executor(app_context.clone()); // Setup test data setup_test_table(&executor, &exec_ctx).await.unwrap(); @@ -308,7 +316,7 @@ async fn test_chained_ctes() { async fn test_cte_with_where_clause() { let (app_context, _temp_dir) = create_test_app_context().await; let exec_ctx = create_exec_context_with_app_context(app_context.clone(), "u_admin", Role::Dba); - let executor = SqlExecutor::new(app_context.clone(), false); + let executor = create_executor(app_context.clone()); // Setup test data setup_test_table(&executor, &exec_ctx).await.unwrap(); @@ -349,7 +357,7 @@ async fn test_cte_with_where_clause() { async fn test_cte_with_limit() { let (app_context, _temp_dir) = create_test_app_context().await; let exec_ctx = create_exec_context_with_app_context(app_context.clone(), "u_admin", Role::Dba); - let executor = SqlExecutor::new(app_context.clone(), false); + let executor = create_executor(app_context.clone()); // Setup test data setup_test_table(&executor, &exec_ctx).await.unwrap(); @@ -392,7 +400,7 @@ async fn test_cte_with_limit() { async fn test_cte_syntax_error() { let (app_context, _temp_dir) = create_test_app_context().await; let exec_ctx = create_exec_context_with_app_context(app_context.clone(), "u_admin", Role::Dba); - let executor = SqlExecutor::new(app_context.clone(), false); + let executor = create_executor(app_context.clone()); // Setup test data setup_test_table(&executor, &exec_ctx).await.unwrap(); @@ -421,7 +429,7 @@ async fn test_cte_syntax_error() { async fn test_cte_undefined_table() { let (app_context, _temp_dir) = create_test_app_context().await; let exec_ctx = create_exec_context_with_app_context(app_context.clone(), "u_admin", Role::Dba); - let executor = SqlExecutor::new(app_context.clone(), false); + let executor = create_executor(app_context.clone()); // No setup - testing undefined table diff --git a/backend/crates/kalamdb-core/tests/test_typed_handlers.rs b/backend/crates/kalamdb-core/tests/test_typed_handlers.rs index 533f53786..fd7bcd67a 100644 --- a/backend/crates/kalamdb-core/tests/test_typed_handlers.rs +++ b/backend/crates/kalamdb-core/tests/test_typed_handlers.rs @@ -11,12 +11,46 @@ use kalamdb_configs::ServerConfig; use kalamdb_core::app_context::AppContext; use kalamdb_core::sql::context::ExecutionContext; use kalamdb_core::sql::context::ExecutionResult; +use kalamdb_core::sql::executor::handler_registry::HandlerRegistry; use kalamdb_core::sql::executor::SqlExecutor; +use kalamdb_jobs::executors::{ + BackupExecutor, CleanupExecutor, CompactExecutor, FlushExecutor, JobRegistry, RestoreExecutor, + RetentionExecutor, StreamEvictionExecutor, UserCleanupExecutor, VectorIndexExecutor, +}; +use kalamdb_jobs::JobsManager; use kalamdb_store::test_utils::TestDb; use kalamdb_system::providers::storages::models::StorageType; use kalamdb_system::Storage; use std::sync::Arc; +fn create_executor(app_context: Arc) -> SqlExecutor { + let registry = Arc::new(HandlerRegistry::new(app_context.clone())); + kalamdb_handlers::register_all_handlers(®istry, app_context.clone(), false); + SqlExecutor::new(app_context, registry) +} + +fn init_job_manager(app_context: &Arc) { + let registry = Arc::new(JobRegistry::new()); + registry.register(Arc::new(FlushExecutor::new())); + registry.register(Arc::new(CleanupExecutor::new())); + registry.register(Arc::new(RetentionExecutor::new())); + registry.register(Arc::new(StreamEvictionExecutor::new())); + registry.register(Arc::new(UserCleanupExecutor::new())); + registry.register(Arc::new(CompactExecutor::new())); + registry.register(Arc::new(BackupExecutor::new())); + registry.register(Arc::new(RestoreExecutor::new())); + registry.register(Arc::new(VectorIndexExecutor::new())); + let jobs_provider = app_context.system_tables().jobs(); + let job_nodes_provider = app_context.system_tables().job_nodes(); + let job_manager = Arc::new(JobsManager::new( + jobs_provider, + job_nodes_provider, + registry, + Arc::clone(app_context), + )); + app_context.set_job_manager(job_manager.clone(), job_manager); +} + /// Helper to create AppContext with temporary RocksDB for testing async fn create_test_app_context() -> (Arc, TestDb) { let test_db = TestDb::with_system_tables().expect("Failed to create test database"); @@ -58,6 +92,7 @@ async fn create_test_app_context() -> (Arc, TestDb) { .expect("Failed to create default local storage"); } + init_job_manager(&app_context); (app_context, test_db) } @@ -65,7 +100,7 @@ async fn create_test_app_context() -> (Arc, TestDb) { #[ignore = "Requires Raft for CREATE NAMESPACE"] async fn test_typed_handler_create_namespace() { let (app_ctx, _test_db) = create_test_app_context().await; - let executor = SqlExecutor::new(Arc::clone(&app_ctx), false); + let executor = create_executor(Arc::clone(&app_ctx)); let exec_ctx = ExecutionContext::new(UserId::from("admin"), Role::Dba, app_ctx.base_session_context()); @@ -88,7 +123,7 @@ async fn test_typed_handler_create_namespace() { #[tokio::test] async fn test_typed_handler_authorization() { let (app_ctx, _temp_dir) = create_test_app_context().await; - let executor = SqlExecutor::new(Arc::clone(&app_ctx), false); + let executor = create_executor(Arc::clone(&app_ctx)); let user_ctx = ExecutionContext::new( UserId::from("regular_user"), Role::User, @@ -107,7 +142,7 @@ async fn test_classifier_prioritizes_select() { // This test verifies that SELECT queries go through the fast path // without attempting DDL parsing let (app_ctx, _temp_dir) = create_test_app_context().await; - let executor = SqlExecutor::new(Arc::clone(&app_ctx), false); + let executor = create_executor(Arc::clone(&app_ctx)); let exec_ctx = ExecutionContext::new(UserId::from("user"), Role::User, app_ctx.base_session_context()); @@ -123,7 +158,7 @@ async fn test_classifier_prioritizes_select() { #[ntest::timeout(90000)] async fn test_storage_flush_table_returns_noop_when_no_pending_writes() { let (app_ctx, _temp_dir) = create_test_app_context().await; - let executor = SqlExecutor::new(Arc::clone(&app_ctx), false); + let executor = create_executor(Arc::clone(&app_ctx)); let exec_ctx = ExecutionContext::new(UserId::from("admin"), Role::Dba, app_ctx.base_session_context()); @@ -156,7 +191,7 @@ async fn test_storage_flush_table_returns_noop_when_no_pending_writes() { #[ntest::timeout(90000)] async fn test_storage_flush_table_returns_noop_when_flush_already_in_progress() { let (app_ctx, _temp_dir) = create_test_app_context().await; - let executor = SqlExecutor::new(Arc::clone(&app_ctx), false); + let executor = create_executor(Arc::clone(&app_ctx)); let exec_ctx = ExecutionContext::new(UserId::from("admin"), Role::Dba, app_ctx.base_session_context()); diff --git a/backend/crates/kalamdb-core/tests/test_vector_search_sql.rs b/backend/crates/kalamdb-core/tests/test_vector_search_sql.rs index 1f32e00a5..0a2a7db55 100644 --- a/backend/crates/kalamdb-core/tests/test_vector_search_sql.rs +++ b/backend/crates/kalamdb-core/tests/test_vector_search_sql.rs @@ -8,8 +8,14 @@ use kalamdb_commons::{NodeId, Role, UserId}; use kalamdb_configs::ServerConfig; use kalamdb_core::app_context::AppContext; use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult}; +use kalamdb_core::sql::executor::handler_registry::HandlerRegistry; use kalamdb_core::sql::executor::SqlExecutor; use kalamdb_core::vector::flush_shared_scope_vectors; +use kalamdb_jobs::executors::{ + BackupExecutor, CleanupExecutor, CompactExecutor, FlushExecutor, JobRegistry, RestoreExecutor, + RetentionExecutor, StreamEvictionExecutor, UserCleanupExecutor, VectorIndexExecutor, +}; +use kalamdb_jobs::JobsManager; use kalamdb_store::test_utils::TestDb; use kalamdb_store::EntityStore; use kalamdb_store::Partition; @@ -21,6 +27,34 @@ use kalamdb_vector::{ }; use std::sync::Arc; +fn create_executor(app_context: Arc) -> SqlExecutor { + let registry = Arc::new(HandlerRegistry::new(app_context.clone())); + kalamdb_handlers::register_all_handlers(®istry, app_context.clone(), false); + SqlExecutor::new(app_context, registry) +} + +fn init_job_manager(app_context: &Arc) { + let registry = Arc::new(JobRegistry::new()); + registry.register(Arc::new(FlushExecutor::new())); + registry.register(Arc::new(CleanupExecutor::new())); + registry.register(Arc::new(RetentionExecutor::new())); + registry.register(Arc::new(StreamEvictionExecutor::new())); + registry.register(Arc::new(UserCleanupExecutor::new())); + registry.register(Arc::new(CompactExecutor::new())); + registry.register(Arc::new(BackupExecutor::new())); + registry.register(Arc::new(RestoreExecutor::new())); + registry.register(Arc::new(VectorIndexExecutor::new())); + let jobs_provider = app_context.system_tables().jobs(); + let job_nodes_provider = app_context.system_tables().job_nodes(); + let job_manager = Arc::new(JobsManager::new( + jobs_provider, + job_nodes_provider, + registry, + Arc::clone(app_context), + )); + app_context.set_job_manager(job_manager.clone(), job_manager); +} + async fn create_test_app_context() -> (Arc, TestDb) { let test_db = TestDb::with_system_tables().expect("Failed to create test database"); let storage_base_path = test_db.storage_dir().expect("Failed to create storage directory"); @@ -61,6 +95,7 @@ async fn create_test_app_context() -> (Arc, TestDb) { .expect("Failed to create default local storage"); } + init_job_manager(&app_context); (app_context, test_db) } @@ -171,7 +206,7 @@ async fn setup_vector_table(executor: &SqlExecutor, exec_ctx: &ExecutionContext) async fn test_cosine_distance_order_by_syntax_on_table() { let (app_context, _temp_dir) = create_test_app_context().await; let exec_ctx = create_exec_context(app_context.clone()); - let executor = SqlExecutor::new(app_context.clone(), false); + let executor = create_executor(app_context.clone()); setup_vector_table(&executor, &exec_ctx).await; @@ -200,7 +235,7 @@ async fn test_cosine_distance_order_by_syntax_on_table() { async fn test_create_and_drop_vector_index_lifecycle() { let (app_context, _temp_dir) = create_test_app_context().await; let exec_ctx = create_exec_context(app_context.clone()); - let executor = SqlExecutor::new(app_context.clone(), false); + let executor = create_executor(app_context.clone()); setup_vector_table(&executor, &exec_ctx).await; @@ -344,7 +379,7 @@ async fn test_create_and_drop_vector_index_lifecycle() { async fn test_multiple_vector_indexes_flush_and_selection() { let (app_context, _temp_dir) = create_test_app_context().await; let exec_ctx = create_exec_context(app_context.clone()); - let executor = SqlExecutor::new(app_context.clone(), false); + let executor = create_executor(app_context.clone()); setup_vector_table(&executor, &exec_ctx).await; @@ -452,7 +487,7 @@ async fn test_multiple_vector_indexes_flush_and_selection() { async fn test_vector_search_with_hundred_rows_real_embeddings() { let (app_context, _temp_dir) = create_test_app_context().await; let exec_ctx = create_exec_context(app_context.clone()); - let executor = SqlExecutor::new(app_context.clone(), false); + let executor = create_executor(app_context.clone()); execute_sql(&executor, &exec_ctx, "CREATE NAMESPACE test_ns").await; execute_sql( @@ -560,7 +595,7 @@ async fn test_vector_search_with_hundred_rows_real_embeddings() { async fn test_cosine_distance_query_combines_hot_and_cold_rows_after_index_flush() { let (app_context, _temp_dir) = create_test_app_context().await; let exec_ctx = create_exec_context(app_context.clone()); - let executor = SqlExecutor::new(app_context.clone(), false); + let executor = create_executor(app_context.clone()); execute_sql(&executor, &exec_ctx, "CREATE NAMESPACE test_ns").await; execute_sql( @@ -674,7 +709,7 @@ async fn test_cosine_distance_query_combines_hot_and_cold_rows_after_index_flush async fn test_vector_index_multiple_flush_cycles_and_watermark() { let (app_context, _temp_dir) = create_test_app_context().await; let exec_ctx = create_exec_context(app_context.clone()); - let executor = SqlExecutor::new(app_context.clone(), false); + let executor = create_executor(app_context.clone()); execute_sql(&executor, &exec_ctx, "CREATE NAMESPACE test_ns").await; execute_sql( @@ -823,7 +858,7 @@ async fn test_vector_index_multiple_flush_cycles_and_watermark() { async fn test_cosine_distance_query_combines_hot_and_cold_rows() { let (app_context, _temp_dir) = create_test_app_context().await; let exec_ctx = create_exec_context(app_context.clone()); - let executor = SqlExecutor::new(app_context.clone(), false); + let executor = create_executor(app_context.clone()); execute_sql(&executor, &exec_ctx, "CREATE NAMESPACE test_ns").await; execute_sql( @@ -872,7 +907,7 @@ async fn test_cosine_distance_query_combines_hot_and_cold_rows() { async fn test_vector_delete_stages_hot_tombstone_and_flushes_cleanup() { let (app_context, _temp_dir) = create_test_app_context().await; let exec_ctx = create_exec_context(app_context.clone()); - let executor = SqlExecutor::new(app_context.clone(), false); + let executor = create_executor(app_context.clone()); execute_sql(&executor, &exec_ctx, "CREATE NAMESPACE test_ns").await; execute_sql( @@ -1022,7 +1057,7 @@ async fn test_vector_delete_stages_hot_tombstone_and_flushes_cleanup() { async fn test_multiple_vector_indexes_with_mixed_rows_and_null_embeddings() { let (app_context, _temp_dir) = create_test_app_context().await; let exec_ctx = create_exec_context(app_context.clone()); - let executor = SqlExecutor::new(app_context.clone(), false); + let executor = create_executor(app_context.clone()); execute_sql(&executor, &exec_ctx, "CREATE NAMESPACE test_ns").await; execute_sql( diff --git a/backend/crates/kalamdb-handlers/Cargo.toml b/backend/crates/kalamdb-handlers/Cargo.toml new file mode 100644 index 000000000..6f2b6825d --- /dev/null +++ b/backend/crates/kalamdb-handlers/Cargo.toml @@ -0,0 +1,69 @@ +[package] +name = "kalamdb-handlers" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +authors.workspace = true +license.workspace = true +repository.workspace = true +description = "SQL statement handler implementations for KalamDB" + +[dependencies] +# Internal crates +kalamdb-auth = { path = "../kalamdb-auth", default-features = false } +kalamdb-commons = { path = "../kalamdb-commons", features = ["full"] } +kalamdb-configs = { path = "../kalamdb-configs" } +kalamdb-core = { path = "../kalamdb-core" } +kalamdb-filestore = { path = "../kalamdb-filestore" } +kalamdb-jobs = { path = "../kalamdb-jobs" } +kalamdb-publisher = { path = "../kalamdb-publisher" } +kalamdb-raft = { path = "../kalamdb-raft" } +kalamdb-session = { path = "../kalamdb-session" } +kalamdb-sql = { path = "../kalamdb-sql" } +kalamdb-store = { path = "../kalamdb-store" } +kalamdb-system = { path = "../kalamdb-system" } +kalamdb-tables = { path = "../kalamdb-tables" } +kalamdb-views = { path = "../kalamdb-views" } +kalamdb-vector = { path = "../kalamdb-vector" } + +# Apache Arrow ecosystem +arrow = { workspace = true } +datafusion = { workspace = true } + +# SQL parsing (used in handler helpers) +sqlparser = { workspace = true } + +# Concurrent data structures +dashmap = { workspace = true } + +# Async +async-trait = { workspace = true } +tokio = { workspace = true, features = ["sync", "time", "rt"] } + +# Serialization +serde = { workspace = true } +serde_json = { workspace = true } + +# Error handling +thiserror = { workspace = true } + +# Time handling +chrono = { workspace = true } + +# Logging +log = { workspace = true } + +# Tracing +tracing = { workspace = true } + +# UUID generation +uuid = { workspace = true } + +# ZIP archive creation +zip = { workspace = true } + +[lib] +doctest = false + +[dev-dependencies] +kalamdb-core = { path = "../kalamdb-core", features = ["test-helpers"] } diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/backup/backup_database.rs b/backend/crates/kalamdb-handlers/src/backup/backup_database.rs similarity index 86% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/backup/backup_database.rs rename to backend/crates/kalamdb-handlers/src/backup/backup_database.rs index 7fb431325..44a38b967 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/backup/backup_database.rs +++ b/backend/crates/kalamdb-handlers/src/backup/backup_database.rs @@ -1,10 +1,11 @@ //! Typed handler for BACKUP DATABASE statement -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::jobs::executors::backup::BackupParams; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_jobs::executors::backup::BackupParams; +use kalamdb_jobs::AppContextJobsExt; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_commons::JobId; use kalamdb_sql::ddl::BackupDatabaseStatement; use kalamdb_system::JobType; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/backup/mod.rs b/backend/crates/kalamdb-handlers/src/backup/mod.rs similarity index 100% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/backup/mod.rs rename to backend/crates/kalamdb-handlers/src/backup/mod.rs diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/backup/restore_database.rs b/backend/crates/kalamdb-handlers/src/backup/restore_database.rs similarity index 89% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/backup/restore_database.rs rename to backend/crates/kalamdb-handlers/src/backup/restore_database.rs index 33c8c048b..21c052336 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/backup/restore_database.rs +++ b/backend/crates/kalamdb-handlers/src/backup/restore_database.rs @@ -1,10 +1,11 @@ //! Typed handler for RESTORE DATABASE statement -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::jobs::executors::restore::RestoreParams; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_jobs::executors::restore::RestoreParams; +use kalamdb_jobs::AppContextJobsExt; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_commons::JobId; use kalamdb_sql::ddl::RestoreDatabaseStatement; use kalamdb_system::JobType; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/clear.rs b/backend/crates/kalamdb-handlers/src/cluster/clear.rs similarity index 97% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/clear.rs rename to backend/crates/kalamdb-handlers/src/cluster/clear.rs index cfb90e03f..80422269f 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/clear.rs +++ b/backend/crates/kalamdb-handlers/src/cluster/clear.rs @@ -2,9 +2,9 @@ //! //! Clears old snapshots from the cluster storage -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::executor::handlers::{ +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::executor::handlers::{ ExecutionContext, ExecutionResult, ScalarValue, StatementHandler, }; use kalamdb_sql::classifier::{SqlStatement, SqlStatementKind}; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/list.rs b/backend/crates/kalamdb-handlers/src/cluster/list.rs similarity index 99% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/list.rs rename to backend/crates/kalamdb-handlers/src/cluster/list.rs index 407eaf982..954858642 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/list.rs +++ b/backend/crates/kalamdb-handlers/src/cluster/list.rs @@ -3,9 +3,9 @@ //! Lists all nodes in the cluster with their groups and health status //! Provides a formatted display for debugging and cluster overview -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::executor::handlers::{ +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::executor::handlers::{ ExecutionContext, ExecutionResult, ScalarValue, StatementHandler, }; use kalamdb_raft::{GroupId, NodeRole, RaftExecutor}; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/mod.rs b/backend/crates/kalamdb-handlers/src/cluster/mod.rs similarity index 100% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/mod.rs rename to backend/crates/kalamdb-handlers/src/cluster/mod.rs diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/purge.rs b/backend/crates/kalamdb-handlers/src/cluster/purge.rs similarity index 94% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/purge.rs rename to backend/crates/kalamdb-handlers/src/cluster/purge.rs index e439a713e..925d90ffb 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/purge.rs +++ b/backend/crates/kalamdb-handlers/src/cluster/purge.rs @@ -2,9 +2,9 @@ //! //! Purges Raft logs up to the specified index across all groups. -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::executor::handlers::{ +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::executor::handlers::{ ExecutionContext, ExecutionResult, ScalarValue, StatementHandler, }; use kalamdb_raft::RaftExecutor; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/snapshot.rs b/backend/crates/kalamdb-handlers/src/cluster/snapshot.rs similarity index 96% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/snapshot.rs rename to backend/crates/kalamdb-handlers/src/cluster/snapshot.rs index 4de05a528..51cfe9b1f 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/snapshot.rs +++ b/backend/crates/kalamdb-handlers/src/cluster/snapshot.rs @@ -2,9 +2,9 @@ //! //! Forces all Raft logs to be written to snapshots across the cluster -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::executor::handlers::{ +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::executor::handlers::{ ExecutionContext, ExecutionResult, ScalarValue, StatementHandler, }; use kalamdb_raft::RaftExecutor; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/stepdown.rs b/backend/crates/kalamdb-handlers/src/cluster/stepdown.rs similarity index 94% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/stepdown.rs rename to backend/crates/kalamdb-handlers/src/cluster/stepdown.rs index 6ed114e39..c4f58fc1c 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/stepdown.rs +++ b/backend/crates/kalamdb-handlers/src/cluster/stepdown.rs @@ -2,9 +2,9 @@ //! //! Attempts to step down leaders for all Raft groups. -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::executor::handlers::{ +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::executor::handlers::{ ExecutionContext, ExecutionResult, ScalarValue, StatementHandler, }; use kalamdb_raft::RaftExecutor; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/transfer_leader.rs b/backend/crates/kalamdb-handlers/src/cluster/transfer_leader.rs similarity index 95% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/transfer_leader.rs rename to backend/crates/kalamdb-handlers/src/cluster/transfer_leader.rs index c3e3b9fa7..e080d077b 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/transfer_leader.rs +++ b/backend/crates/kalamdb-handlers/src/cluster/transfer_leader.rs @@ -2,9 +2,9 @@ //! //! Attempts to transfer leadership for all Raft groups to the specified node. -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::executor::handlers::{ +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::executor::handlers::{ ExecutionContext, ExecutionResult, ScalarValue, StatementHandler, }; use kalamdb_commons::models::NodeId; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/trigger_election.rs b/backend/crates/kalamdb-handlers/src/cluster/trigger_election.rs similarity index 95% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/trigger_election.rs rename to backend/crates/kalamdb-handlers/src/cluster/trigger_election.rs index b5fd25136..08acabc74 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/cluster/trigger_election.rs +++ b/backend/crates/kalamdb-handlers/src/cluster/trigger_election.rs @@ -2,9 +2,9 @@ //! //! Triggers leader election for all Raft groups. -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::executor::handlers::{ +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::executor::handlers::{ ExecutionContext, ExecutionResult, ScalarValue, StatementHandler, }; use kalamdb_raft::RaftExecutor; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/compact/compact_all.rs b/backend/crates/kalamdb-handlers/src/compact/compact_all.rs similarity index 91% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/compact/compact_all.rs rename to backend/crates/kalamdb-handlers/src/compact/compact_all.rs index 5f64a5977..81d87e2e5 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/compact/compact_all.rs +++ b/backend/crates/kalamdb-handlers/src/compact/compact_all.rs @@ -1,10 +1,11 @@ //! Typed handler for STORAGE COMPACT ALL statement -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::jobs::executors::compact::CompactParams; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_jobs::executors::compact::CompactParams; +use kalamdb_jobs::AppContextJobsExt; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_commons::models::{TableId, TableName}; use kalamdb_commons::schemas::TableType; use kalamdb_commons::JobId; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/compact/compact_table.rs b/backend/crates/kalamdb-handlers/src/compact/compact_table.rs similarity index 90% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/compact/compact_table.rs rename to backend/crates/kalamdb-handlers/src/compact/compact_table.rs index 592fe42c4..163c36d75 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/compact/compact_table.rs +++ b/backend/crates/kalamdb-handlers/src/compact/compact_table.rs @@ -1,10 +1,11 @@ //! Typed handler for STORAGE COMPACT TABLE statement -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::jobs::executors::compact::CompactParams; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_jobs::executors::compact::CompactParams; +use kalamdb_jobs::AppContextJobsExt; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_commons::models::TableId; use kalamdb_commons::schemas::TableType; use kalamdb_commons::JobId; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/compact/mod.rs b/backend/crates/kalamdb-handlers/src/compact/mod.rs similarity index 100% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/compact/mod.rs rename to backend/crates/kalamdb-handlers/src/compact/mod.rs diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/export/export_user_data.rs b/backend/crates/kalamdb-handlers/src/export/export_user_data.rs similarity index 86% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/export/export_user_data.rs rename to backend/crates/kalamdb-handlers/src/export/export_user_data.rs index aeb445330..1e325612d 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/export/export_user_data.rs +++ b/backend/crates/kalamdb-handlers/src/export/export_user_data.rs @@ -1,10 +1,11 @@ //! Typed handler for EXPORT USER DATA statement -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::jobs::executors::user_export::UserExportParams; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_jobs::executors::user_export::UserExportParams; +use kalamdb_jobs::AppContextJobsExt; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_commons::JobId; use kalamdb_sql::ddl::ExportUserDataStatement; use kalamdb_system::JobType; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/export/mod.rs b/backend/crates/kalamdb-handlers/src/export/mod.rs similarity index 100% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/export/mod.rs rename to backend/crates/kalamdb-handlers/src/export/mod.rs diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/export/show_export.rs b/backend/crates/kalamdb-handlers/src/export/show_export.rs similarity index 95% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/export/show_export.rs rename to backend/crates/kalamdb-handlers/src/export/show_export.rs index 35b415379..b3e64aa5a 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/export/show_export.rs +++ b/backend/crates/kalamdb-handlers/src/export/show_export.rs @@ -1,11 +1,12 @@ //! Typed handler for SHOW EXPORT statement -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use arrow::array::{RecordBatch, StringArray, TimestampMicrosecondArray}; use arrow::datatypes::{DataType, Field, Schema, TimeUnit}; +use kalamdb_jobs::AppContextJobsExt; use kalamdb_sql::ddl::ShowExportStatement; use kalamdb_system::providers::jobs::models::{Job, JobFilter, JobSortField, SortOrder}; use kalamdb_system::JobType; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/flush/flush_all.rs b/backend/crates/kalamdb-handlers/src/flush/flush_all.rs similarity index 94% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/flush/flush_all.rs rename to backend/crates/kalamdb-handlers/src/flush/flush_all.rs index a244c2747..4f3c8f782 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/flush/flush_all.rs +++ b/backend/crates/kalamdb-handlers/src/flush/flush_all.rs @@ -1,10 +1,11 @@ //! Typed handler for STORAGE FLUSH ALL statement -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::jobs::executors::flush::FlushParams; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_jobs::executors::flush::FlushParams; +use kalamdb_jobs::AppContextJobsExt; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_commons::JobId; use kalamdb_sql::ddl::FlushAllTablesStatement; use kalamdb_system::JobType; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/flush/flush_table.rs b/backend/crates/kalamdb-handlers/src/flush/flush_table.rs similarity index 92% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/flush/flush_table.rs rename to backend/crates/kalamdb-handlers/src/flush/flush_table.rs index 939f91178..125a55465 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/flush/flush_table.rs +++ b/backend/crates/kalamdb-handlers/src/flush/flush_table.rs @@ -1,10 +1,11 @@ //! Typed handler for STORAGE FLUSH TABLE statement -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::jobs::executors::flush::FlushParams; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_jobs::executors::flush::FlushParams; +use kalamdb_jobs::AppContextJobsExt; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_commons::models::TableId; use kalamdb_commons::{JobId, TableType}; use kalamdb_sql::ddl::FlushTableStatement; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/flush/mod.rs b/backend/crates/kalamdb-handlers/src/flush/mod.rs similarity index 100% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/flush/mod.rs rename to backend/crates/kalamdb-handlers/src/flush/mod.rs diff --git a/backend/crates/kalamdb-handlers/src/helpers/ast_parsing.rs b/backend/crates/kalamdb-handlers/src/helpers/ast_parsing.rs new file mode 100644 index 000000000..4da6ca646 --- /dev/null +++ b/backend/crates/kalamdb-handlers/src/helpers/ast_parsing.rs @@ -0,0 +1,143 @@ +//! Shared AST-level parsing utilities for fast-path DML. +//! +//! Consolidates `expr_to_scalar`, `sql_value_to_scalar`, and table-ID +//! extraction logic that was previously duplicated across `fast_insert.rs` +//! and `fast_point_dml.rs`. + +use datafusion::scalar::ScalarValue; +use kalamdb_commons::TableId; +use sqlparser::ast::{ + Expr, Insert, ObjectNamePart, TableFactor, TableObject, TableWithJoins, UnaryOperator, Value, +}; + +// ────────────────────────────────────────────────────────────────────── +// Scalar / Value conversion +// ────────────────────────────────────────────────────────────────────── + +/// Convert a sqlparser `Expr` to a DataFusion `ScalarValue`. +/// +/// Only handles literal values and simple negation. Returns `Err` for +/// anything more complex (functions, casts, subqueries, etc.) so the +/// caller can fall back to DataFusion. +pub fn expr_to_scalar(expr: &Expr) -> Result { + match expr { + Expr::Value(val) => sql_value_to_scalar(&val.value), + Expr::UnaryOp { + op: UnaryOperator::Minus, + expr, + } => match strip_nested_expr(expr.as_ref()) { + Expr::Value(val) => match &val.value { + Value::Number(n, _) => { + if let Ok(i) = n.parse::() { + Ok(ScalarValue::Int64(Some(-i))) + } else if let Ok(f) = n.parse::() { + Ok(ScalarValue::Float64(Some(-f))) + } else { + Err("unsupported numeric literal") + } + }, + _ => Err("unsupported unary literal"), + }, + _ => Err("unsupported unary expression"), + }, + _ => Err("unsupported expression"), + } +} + +/// Convert a sqlparser `Value` to a DataFusion `ScalarValue`. +pub fn sql_value_to_scalar(value: &Value) -> Result { + match value { + Value::SingleQuotedString(s) | Value::DoubleQuotedString(s) => { + Ok(ScalarValue::Utf8(Some(s.clone()))) + }, + Value::Number(n, _) => { + if let Ok(i) = n.parse::() { + Ok(ScalarValue::Int64(Some(i))) + } else if let Ok(f) = n.parse::() { + Ok(ScalarValue::Float64(Some(f))) + } else { + Err("unsupported numeric literal") + } + }, + Value::Boolean(b) => Ok(ScalarValue::Boolean(Some(*b))), + Value::Null => Ok(ScalarValue::Null), + _ => Err("unsupported literal"), + } +} + +/// Recursively unwrap `Expr::Nested(inner)` parentheses. +pub fn strip_nested_expr(expr: &Expr) -> &Expr { + match expr { + Expr::Nested(inner) => strip_nested_expr(inner), + _ => expr, + } +} + +// ────────────────────────────────────────────────────────────────────── +// Table-ID extraction from AST nodes +// ────────────────────────────────────────────────────────────────────── + +/// Resolve a 1- or 2-part identifier list into a `TableId`, +/// applying `default_namespace` when only one part is present. +fn resolve_table_parts(parts: &[String], default_namespace: &str) -> Option { + match parts.len() { + 1 => Some(TableId::from_strings(default_namespace, &parts[0])), + 2 => Some(TableId::from_strings(&parts[0], &parts[1])), + _ => None, + } +} + +/// Collect identifier parts from an `ObjectName`. +fn object_name_parts(name: &sqlparser::ast::ObjectName) -> Vec { + name.0 + .iter() + .filter_map(|part| match part { + ObjectNamePart::Identifier(ident) => Some(ident.value.clone()), + _ => None, + }) + .collect() +} + +/// Extract `TableId` from an INSERT statement's table reference. +pub fn extract_table_id_from_insert(insert: &Insert, default_namespace: &str) -> Option { + match &insert.table { + TableObject::TableName(obj_name) => { + resolve_table_parts(&object_name_parts(obj_name), default_namespace) + }, + _ => None, + } +} + +/// Extract `TableId` from a `TableWithJoins` node (UPDATE / DELETE). +pub fn extract_table_id_from_table_with_joins( + table: &TableWithJoins, + default_namespace: &str, +) -> Option { + if !table.joins.is_empty() { + return None; + } + match &table.relation { + TableFactor::Table { name, .. } => { + resolve_table_parts(&object_name_parts(name), default_namespace) + }, + _ => None, + } +} + +/// Extract `TableId` from a DELETE statement (handles both +/// `FROM ` and bare `
` syntax). +pub fn extract_table_id_from_delete( + delete: &sqlparser::ast::Delete, + default_namespace: &str, +) -> Option { + let table = match &delete.from { + sqlparser::ast::FromTable::WithFromKeyword(tables) + | sqlparser::ast::FromTable::WithoutKeyword(tables) => { + if tables.len() != 1 { + return None; + } + tables.first()? + }, + }; + extract_table_id_from_table_with_joins(table, default_namespace) +} diff --git a/backend/crates/kalamdb-core/src/sql/executor/helpers/audit.rs b/backend/crates/kalamdb-handlers/src/helpers/audit.rs similarity index 98% rename from backend/crates/kalamdb-core/src/sql/executor/helpers/audit.rs rename to backend/crates/kalamdb-handlers/src/helpers/audit.rs index 950cadeab..8a3cfc8b4 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/helpers/audit.rs +++ b/backend/crates/kalamdb-handlers/src/helpers/audit.rs @@ -3,8 +3,8 @@ //! Helper functions for creating and managing audit log entries. //! **Phase 2 Task T018**: Centralized audit logging for SQL operations. -use crate::error::KalamDbError; -use crate::sql::context::ExecutionContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::ExecutionContext; use chrono::Utc; use kalamdb_commons::models::AuditLogId; use kalamdb_commons::{UserId, UserName}; @@ -172,7 +172,7 @@ pub fn log_auth_event_with_username( } } -use crate::app_context::AppContext; +use kalamdb_core::app_context::AppContext; use std::sync::Arc; /// Persist an audit entry to the system.audit_logs table diff --git a/backend/crates/kalamdb-core/src/sql/executor/helpers/guards.rs b/backend/crates/kalamdb-handlers/src/helpers/guards.rs similarity index 98% rename from backend/crates/kalamdb-core/src/sql/executor/helpers/guards.rs rename to backend/crates/kalamdb-handlers/src/helpers/guards.rs index 9691f83e7..766284422 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/helpers/guards.rs +++ b/backend/crates/kalamdb-handlers/src/helpers/guards.rs @@ -3,8 +3,8 @@ //! Common authorization and validation guards for DDL operations. //! These helpers consolidate repeated validation patterns across handlers. -use crate::error::KalamDbError; -use crate::sql::context::ExecutionContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::ExecutionContext; use kalamdb_commons::models::NamespaceId; /// Block modifications (ALTER, DROP, CREATE) on system namespaces. diff --git a/backend/crates/kalamdb-handlers/src/helpers/mod.rs b/backend/crates/kalamdb-handlers/src/helpers/mod.rs new file mode 100644 index 000000000..07c7ae901 --- /dev/null +++ b/backend/crates/kalamdb-handlers/src/helpers/mod.rs @@ -0,0 +1,9 @@ +//! Shared helper utilities for SQL execution (moved from handlers/) + +pub mod ast_parsing; +pub mod audit; +pub mod guards; +pub mod namespace_helpers; +pub mod storage; +pub mod table_creation; +pub mod tables; diff --git a/backend/crates/kalamdb-core/src/sql/executor/helpers/namespace_helpers.rs b/backend/crates/kalamdb-handlers/src/helpers/namespace_helpers.rs similarity index 96% rename from backend/crates/kalamdb-core/src/sql/executor/helpers/namespace_helpers.rs rename to backend/crates/kalamdb-handlers/src/helpers/namespace_helpers.rs index 6c1823d86..9907aa48d 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/helpers/namespace_helpers.rs +++ b/backend/crates/kalamdb-handlers/src/helpers/namespace_helpers.rs @@ -2,9 +2,9 @@ //! //! Handlers for CREATE NAMESPACE and DROP NAMESPACE statements. -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult}; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult}; use datafusion::execution::context::SessionContext; use kalamdb_commons::models::NamespaceId; use kalamdb_sql::ddl::{CreateNamespaceStatement, DropNamespaceStatement}; @@ -137,7 +137,7 @@ pub async fn execute_drop_namespace( #[cfg(test)] mod tests { use super::*; - use crate::test_helpers::{create_test_session_simple, test_app_context_simple}; + use kalamdb_core::test_helpers::{create_test_session_simple, test_app_context_simple}; use datafusion::prelude::SessionContext; use kalamdb_commons::models::UserId; use kalamdb_commons::Role; diff --git a/backend/crates/kalamdb-core/src/sql/executor/helpers/storage.rs b/backend/crates/kalamdb-handlers/src/helpers/storage.rs similarity index 97% rename from backend/crates/kalamdb-core/src/sql/executor/helpers/storage.rs rename to backend/crates/kalamdb-handlers/src/helpers/storage.rs index bd2e5582d..fdd002912 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/helpers/storage.rs +++ b/backend/crates/kalamdb-handlers/src/helpers/storage.rs @@ -2,10 +2,10 @@ //! //! Handlers for CREATE STORAGE statements. -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::error_extensions::KalamDbResultExt; -use crate::sql::context::{ExecutionContext, ExecutionResult}; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::error_extensions::KalamDbResultExt; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult}; use datafusion::execution::context::SessionContext; use kalamdb_commons::models::StorageId; use kalamdb_filestore::StorageHealthService; @@ -227,7 +227,7 @@ pub fn ensure_filesystem_directory(path: &str) -> Result<(), KalamDbError> { #[cfg(test)] mod tests { use super::*; - use crate::test_helpers::{create_test_session_simple, test_app_context_simple}; + use kalamdb_core::test_helpers::{create_test_session_simple, test_app_context_simple}; use datafusion::prelude::SessionContext; use kalamdb_commons::models::UserId; use kalamdb_commons::Role; diff --git a/backend/crates/kalamdb-core/src/sql/executor/helpers/table_creation.rs b/backend/crates/kalamdb-handlers/src/helpers/table_creation.rs similarity index 99% rename from backend/crates/kalamdb-core/src/sql/executor/helpers/table_creation.rs rename to backend/crates/kalamdb-handlers/src/helpers/table_creation.rs index 35bcd3712..b350121d2 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/helpers/table_creation.rs +++ b/backend/crates/kalamdb-handlers/src/helpers/table_creation.rs @@ -2,9 +2,9 @@ //! //! Provides unified logic for creating all table types (USER/SHARED/STREAM) -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::error_extensions::KalamDbResultExt; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::error_extensions::KalamDbResultExt; use kalamdb_commons::models::{NamespaceId, StorageId, TableAccess, TableId, UserId}; use kalamdb_commons::schemas::TableType; use kalamdb_commons::Role; diff --git a/backend/crates/kalamdb-core/src/sql/executor/helpers/tables.rs b/backend/crates/kalamdb-handlers/src/helpers/tables.rs similarity index 98% rename from backend/crates/kalamdb-core/src/sql/executor/helpers/tables.rs rename to backend/crates/kalamdb-handlers/src/helpers/tables.rs index 4544a3bd2..dfb29b18d 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/helpers/tables.rs +++ b/backend/crates/kalamdb-handlers/src/helpers/tables.rs @@ -3,9 +3,9 @@ //! Common utilities for DDL operations including schema transformations, //! table validation, and metadata storage. -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::error_extensions::KalamDbResultExt; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::error_extensions::KalamDbResultExt; use arrow::datatypes::Schema; use kalamdb_commons::schemas::{ColumnDefault, TableType}; use kalamdb_commons::StorageId; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/jobs/kill_job.rs b/backend/crates/kalamdb-handlers/src/jobs/kill_job.rs similarity index 84% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/jobs/kill_job.rs rename to backend/crates/kalamdb-handlers/src/jobs/kill_job.rs index 5ea6bd31b..dbe524d73 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/jobs/kill_job.rs +++ b/backend/crates/kalamdb-handlers/src/jobs/kill_job.rs @@ -1,10 +1,11 @@ //! Typed handler for KILL JOB statement -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_commons::JobId; +use kalamdb_jobs::AppContextJobsExt; use kalamdb_sql::ddl::JobCommand; use std::sync::Arc; @@ -34,7 +35,7 @@ impl TypedStatementHandler for KillJobHandler { job_manager.cancel_job(&job_id_typed).await?; // Log DDL operation (treating KILL JOB as an admin operation) - use crate::sql::executor::helpers::audit; + use crate::helpers::audit; let audit_entry = audit::log_ddl_operation(context, "KILL", "JOB", &job_id, None, None); audit::persist_audit_entry(&self.app_context, &audit_entry).await?; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/jobs/kill_live_query.rs b/backend/crates/kalamdb-handlers/src/jobs/kill_live_query.rs similarity index 85% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/jobs/kill_live_query.rs rename to backend/crates/kalamdb-handlers/src/jobs/kill_live_query.rs index 10b80654f..923ca7c10 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/jobs/kill_live_query.rs +++ b/backend/crates/kalamdb-handlers/src/jobs/kill_live_query.rs @@ -1,9 +1,9 @@ //! Typed handler for KILL LIVE QUERY statement -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_sql::ddl::KillLiveQueryStatement; use std::sync::Arc; @@ -30,7 +30,7 @@ impl TypedStatementHandler for KillLiveQueryHandler { manager.unregister_subscription_by_id(&statement.live_id).await?; // Log DDL operation - use crate::sql::executor::helpers::audit; + use crate::helpers::audit; let audit_entry = audit::log_ddl_operation( context, "KILL", diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/jobs/mod.rs b/backend/crates/kalamdb-handlers/src/jobs/mod.rs similarity index 100% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/jobs/mod.rs rename to backend/crates/kalamdb-handlers/src/jobs/mod.rs diff --git a/backend/crates/kalamdb-handlers/src/lib.rs b/backend/crates/kalamdb-handlers/src/lib.rs new file mode 100644 index 000000000..ba983b0f0 --- /dev/null +++ b/backend/crates/kalamdb-handlers/src/lib.rs @@ -0,0 +1,627 @@ +//! SQL statement handler implementations for KalamDB +//! +//! This crate contains all SQL handler implementations (CREATE TABLE, DROP NAMESPACE, etc.) +//! and shared helpers used by those handlers. Extracting these from `kalamdb-core` enables +//! parallel compilation, significantly reducing build times. +//! +//! # Architecture +//! +//! Handlers implement [`TypedStatementHandler`] or [`StatementHandler`] traits defined +//! in `kalamdb-core`. Registration into the [`HandlerRegistry`] happens via +//! [`register_all_handlers`], which is called from the server lifecycle code. + +// Handler modules (organized by SQL statement category) +pub mod backup; +pub mod cluster; +pub mod compact; +pub mod export; +pub mod flush; +pub mod helpers; +pub mod jobs; +pub mod namespace; +pub mod storage; +pub mod subscription; +pub mod system; +pub mod table; +pub mod topics; +pub mod user; +pub mod view; + +use kalamdb_core::app_context::AppContext; +use kalamdb_core::sql::executor::handler_registry::HandlerRegistry; +use std::sync::Arc; + +/// Register all SQL statement handlers into the given registry. +/// +/// This function is called once during server startup from lifecycle code. +/// It populates the `HandlerRegistry` with concrete handler implementations +/// for every supported SQL statement type. +pub fn register_all_handlers( + registry: &HandlerRegistry, + app_context: Arc, + enforce_password_complexity: bool, +) { + use kalamdb_commons::models::{NamespaceId, StorageId}; + use kalamdb_commons::TableType; + use kalamdb_sql::classifier::SqlStatementKind; + + // ============================================================================ + // NAMESPACE HANDLERS + // ============================================================================ + + registry.register_typed( + SqlStatementKind::CreateNamespace(kalamdb_sql::ddl::CreateNamespaceStatement { + name: NamespaceId::new("_placeholder"), + if_not_exists: false, + }), + namespace::CreateNamespaceHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::CreateNamespace(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::AlterNamespace(kalamdb_sql::ddl::AlterNamespaceStatement { + name: NamespaceId::new("_placeholder"), + options: std::collections::HashMap::new(), + }), + namespace::AlterNamespaceHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::AlterNamespace(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::DropNamespace(kalamdb_sql::ddl::DropNamespaceStatement { + name: NamespaceId::new("_placeholder"), + if_exists: false, + cascade: false, + }), + namespace::DropNamespaceHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::DropNamespace(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::ShowNamespaces(kalamdb_sql::ddl::ShowNamespacesStatement), + namespace::ShowNamespacesHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::ShowNamespaces(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::UseNamespace(kalamdb_sql::ddl::UseNamespaceStatement { + namespace: NamespaceId::new("_placeholder"), + }), + namespace::UseNamespaceHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::UseNamespace(s) => Some(s.clone()), + _ => None, + }, + ); + + // ============================================================================ + // STORAGE HANDLERS + // ============================================================================ + + registry.register_typed( + SqlStatementKind::CreateStorage(kalamdb_sql::ddl::CreateStorageStatement { + storage_id: StorageId::new("_placeholder"), + storage_type: kalamdb_system::providers::storages::models::StorageType::Filesystem, + storage_name: String::new(), + description: None, + base_directory: String::new(), + shared_tables_template: String::new(), + user_tables_template: String::new(), + credentials: None, + config_json: None, + }), + storage::CreateStorageHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::CreateStorage(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::AlterStorage(kalamdb_sql::ddl::AlterStorageStatement { + storage_id: StorageId::new("_placeholder"), + storage_name: None, + description: None, + shared_tables_template: None, + user_tables_template: None, + config_json: None, + }), + storage::AlterStorageHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::AlterStorage(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::DropStorage(kalamdb_sql::ddl::DropStorageStatement { + storage_id: kalamdb_commons::StorageId::from(""), + if_exists: false, + }), + storage::DropStorageHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::DropStorage(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::ShowStorages(kalamdb_sql::ddl::ShowStoragesStatement), + storage::ShowStoragesHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::ShowStorages(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::CheckStorage(kalamdb_sql::ddl::CheckStorageStatement { + storage_id: kalamdb_commons::StorageId::from(""), + extended: false, + }), + storage::CheckStorageHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::CheckStorage(s) => Some(s.clone()), + _ => None, + }, + ); + + // ============================================================================ + // TABLE HANDLERS + // ============================================================================ + use datafusion::arrow::datatypes::Schema as ArrowSchema; + use kalamdb_commons::models::TableName; + use kalamdb_sql::ddl::{ + AlterTableStatement, CreateTableStatement, CreateViewStatement, DescribeTableStatement, + DropTableStatement, ShowTableStatsStatement, ShowTablesStatement, + }; + use std::collections::HashMap; + + registry.register_typed( + SqlStatementKind::CreateTable(CreateTableStatement { + table_name: TableName::new("_placeholder"), + namespace_id: NamespaceId::new("_placeholder"), + table_type: TableType::Shared, + schema: Arc::new(ArrowSchema::empty()), + column_defaults: HashMap::new(), + primary_key_column: None, + storage_id: None, + use_user_storage: false, + flush_policy: None, + deleted_retention_hours: None, + ttl_seconds: None, + if_not_exists: false, + access_level: None, + }), + table::CreateTableHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::CreateTable(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::CreateView(CreateViewStatement { + namespace_id: NamespaceId::new("_placeholder"), + view_name: TableName::new("_placeholder"), + or_replace: false, + if_not_exists: false, + columns: Vec::new(), + query_sql: String::new(), + original_sql: String::new(), + }), + view::CreateViewHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::CreateView(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::AlterTable(AlterTableStatement { + table_name: TableName::new("_placeholder"), + namespace_id: NamespaceId::new("_placeholder"), + operation: kalamdb_sql::ddl::ColumnOperation::Drop { + column_name: "_placeholder".to_string(), + }, + }), + table::AlterTableHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::AlterTable(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::DropTable(DropTableStatement { + table_name: TableName::new("_placeholder"), + namespace_id: NamespaceId::new("_placeholder"), + table_type: kalamdb_sql::ddl::TableKind::Shared, + if_exists: false, + }), + table::DropTableHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::DropTable(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::ShowTables(ShowTablesStatement { namespace_id: None }), + table::ShowTablesHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::ShowTables(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::DescribeTable(DescribeTableStatement { + namespace_id: None, + table_name: TableName::new("_placeholder"), + show_history: false, + }), + table::DescribeTableHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::DescribeTable(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::ShowStats(ShowTableStatsStatement { + namespace_id: None, + table_name: TableName::new("_placeholder"), + }), + table::ShowStatsHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::ShowStats(s) => Some(s.clone()), + _ => None, + }, + ); + + // ============================================================================ + // SYSTEM HANDLERS + // ============================================================================ + registry.register_typed( + SqlStatementKind::ShowManifest(kalamdb_sql::ddl::ShowManifestStatement), + system::ShowManifestCacheHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::ShowManifest(s) => Some(s.clone()), + _ => None, + }, + ); + + // ============================================================================ + // FLUSH HANDLERS + // ============================================================================ + registry.register_typed( + SqlStatementKind::FlushTable(kalamdb_sql::ddl::FlushTableStatement { + namespace: NamespaceId::new("_placeholder"), + table_name: TableName::new("_placeholder"), + }), + flush::FlushTableHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::FlushTable(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::FlushAllTables(kalamdb_sql::ddl::FlushAllTablesStatement { + namespace: NamespaceId::new("_placeholder"), + }), + flush::FlushAllTablesHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::FlushAllTables(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::CompactTable(kalamdb_sql::ddl::CompactTableStatement { + namespace: NamespaceId::new("_placeholder"), + table_name: TableName::new("_placeholder"), + }), + compact::CompactTableHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::CompactTable(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::CompactAllTables(kalamdb_sql::ddl::CompactAllTablesStatement { + namespace: NamespaceId::new("_placeholder"), + }), + compact::CompactAllTablesHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::CompactAllTables(s) => Some(s.clone()), + _ => None, + }, + ); + + // ============================================================================ + // CLUSTER HANDLERS + // ============================================================================ + registry.register_dynamic( + SqlStatementKind::ClusterSnapshot, + cluster::ClusterSnapshotHandler::new(app_context.clone()), + ); + + registry.register_dynamic( + SqlStatementKind::ClusterPurge(0), + cluster::ClusterPurgeHandler::new(app_context.clone()), + ); + + registry.register_dynamic( + SqlStatementKind::ClusterTriggerElection, + cluster::ClusterTriggerElectionHandler::new(app_context.clone()), + ); + + registry.register_dynamic( + SqlStatementKind::ClusterTransferLeader(0), + cluster::ClusterTransferLeaderHandler::new(app_context.clone()), + ); + + registry.register_dynamic( + SqlStatementKind::ClusterStepdown, + cluster::ClusterStepdownHandler::new(app_context.clone()), + ); + + registry.register_dynamic( + SqlStatementKind::ClusterClear, + cluster::ClusterClearHandler::new(app_context.clone()), + ); + + registry.register_dynamic( + SqlStatementKind::ClusterList, + cluster::ClusterListHandler::new(app_context.clone()), + ); + + // ============================================================================ + // JOB HANDLERS + // ============================================================================ + registry.register_typed( + SqlStatementKind::KillJob(kalamdb_sql::ddl::JobCommand::Kill { + job_id: String::new(), + }), + jobs::KillJobHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::KillJob(s) => Some(s.clone()), + _ => None, + }, + ); + + let placeholder_live = + kalamdb_commons::models::LiveQueryId::from_string("user123-conn_abc-q1") + .unwrap_or_else(|_| { + kalamdb_commons::models::LiveQueryId::new( + kalamdb_commons::models::UserId::new("user123"), + kalamdb_commons::models::ConnectionId::new("conn_abc"), + "q1".to_string(), + ) + }); + registry.register_typed( + SqlStatementKind::KillLiveQuery(kalamdb_sql::ddl::KillLiveQueryStatement { + live_id: placeholder_live, + }), + jobs::KillLiveQueryHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::KillLiveQuery(s) => Some(s.clone()), + _ => None, + }, + ); + + // ============================================================================ + // USER HANDLERS + // ============================================================================ + use kalamdb_commons::AuthType; + use kalamdb_sql::ddl::{AlterUserStatement, CreateUserStatement, DropUserStatement, UserModification}; + + registry.register_typed( + SqlStatementKind::CreateUser(CreateUserStatement { + username: "_placeholder".to_string(), + auth_type: AuthType::Internal, + role: kalamdb_commons::Role::User, + email: None, + password: None, + }), + user::CreateUserHandler::new(app_context.clone(), enforce_password_complexity), + |stmt| match stmt.kind() { + SqlStatementKind::CreateUser(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::AlterUser(AlterUserStatement { + username: "_placeholder".to_string(), + modification: UserModification::SetEmail("_placeholder".to_string()), + }), + user::AlterUserHandler::new(app_context.clone(), enforce_password_complexity), + |stmt| match stmt.kind() { + SqlStatementKind::AlterUser(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::DropUser(DropUserStatement { + username: "_placeholder".to_string(), + if_exists: false, + }), + user::DropUserHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::DropUser(s) => Some(s.clone()), + _ => None, + }, + ); + + // ============================================================================ + // SUBSCRIPTION HANDLER + // ============================================================================ + use kalamdb_sql::ddl::{SubscribeStatement, SubscriptionOptions}; + + registry.register_typed( + SqlStatementKind::Subscribe(SubscribeStatement { + select_query: "SELECT * FROM _placeholder._placeholder".to_string(), + namespace: NamespaceId::new("_placeholder"), + table_name: TableName::new("_placeholder"), + options: SubscriptionOptions::default(), + }), + subscription::SubscribeHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::Subscribe(s) => Some(s.clone()), + _ => None, + }, + ); + + // ============================================================================ + // TOPIC PUB/SUB HANDLERS + // ============================================================================ + use kalamdb_commons::models::{PayloadMode, TableId, TopicId}; + use kalamdb_sql::ddl::{ + AckStatement, AddTopicSourceStatement, ClearTopicStatement, ConsumePosition, + ConsumeStatement, CreateTopicStatement, DropTopicStatement, + }; + + registry.register_typed( + SqlStatementKind::CreateTopic(CreateTopicStatement { + topic_name: "_placeholder".to_string(), + partitions: None, + }), + topics::CreateTopicHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::CreateTopic(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::DropTopic(DropTopicStatement { + topic_name: "_placeholder".to_string(), + }), + topics::DropTopicHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::DropTopic(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::ClearTopic(ClearTopicStatement { + topic_id: TopicId::new("_placeholder"), + }), + topics::ClearTopicHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::ClearTopic(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::AddTopicSource(AddTopicSourceStatement { + topic_name: "_placeholder".to_string(), + table_id: TableId::from_strings("_placeholder", "_placeholder"), + operation: kalamdb_commons::models::TopicOp::Insert, + filter_expr: None, + payload_mode: PayloadMode::Full, + }), + topics::AddTopicSourceHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::AddTopicSource(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::ConsumeTopic(ConsumeStatement { + topic_name: "_placeholder".to_string(), + group_id: None, + position: ConsumePosition::Latest, + limit: None, + }), + topics::ConsumeHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::ConsumeTopic(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::AckTopic(AckStatement { + topic_name: "_placeholder".to_string(), + group_id: "_placeholder".to_string(), + partition_id: 0, + upto_offset: 0, + }), + topics::AckHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::AckTopic(s) => Some(s.clone()), + _ => None, + }, + ); + + // ============================================================================ + // BACKUP & RESTORE HANDLERS + // ============================================================================ + use kalamdb_sql::ddl::{BackupDatabaseStatement, RestoreDatabaseStatement}; + + registry.register_typed( + SqlStatementKind::BackupDatabase(BackupDatabaseStatement { + backup_path: "_placeholder".to_string(), + }), + backup::BackupDatabaseHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::BackupDatabase(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::RestoreDatabase(RestoreDatabaseStatement { + backup_path: "_placeholder".to_string(), + }), + backup::RestoreDatabaseHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::RestoreDatabase(s) => Some(s.clone()), + _ => None, + }, + ); + + // ============================================================================ + // USER DATA EXPORT HANDLERS + // ============================================================================ + use kalamdb_sql::ddl::{ExportUserDataStatement, ShowExportStatement}; + + registry.register_typed( + SqlStatementKind::ExportUserData(ExportUserDataStatement), + export::ExportUserDataHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::ExportUserData(s) => Some(s.clone()), + _ => None, + }, + ); + + registry.register_typed( + SqlStatementKind::ShowExport(ShowExportStatement), + export::ShowExportHandler::new(app_context.clone()), + |stmt| match stmt.kind() { + SqlStatementKind::ShowExport(s) => Some(s.clone()), + _ => None, + }, + ); +} diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/namespace/alter.rs b/backend/crates/kalamdb-handlers/src/namespace/alter.rs similarity index 91% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/namespace/alter.rs rename to backend/crates/kalamdb-handlers/src/namespace/alter.rs index a1f71b986..d3f7911cc 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/namespace/alter.rs +++ b/backend/crates/kalamdb-handlers/src/namespace/alter.rs @@ -1,10 +1,10 @@ //! Typed DDL handler for ALTER NAMESPACE statements -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; -use crate::sql::executor::helpers::guards::require_admin; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; +use crate::helpers::guards::require_admin; use kalamdb_commons::models::NamespaceId; use kalamdb_sql::ddl::AlterNamespaceStatement; use std::sync::Arc; @@ -69,7 +69,7 @@ impl TypedStatementHandler for AlterNamespaceHandler { .map_err(|e| KalamDbError::ExecutionError(format!("Task join error: {}", e)))??; // Log DDL operation - use crate::sql::executor::helpers::audit; + use crate::helpers::audit; let audit_entry = audit::log_ddl_operation( _context, "ALTER", @@ -89,7 +89,7 @@ impl TypedStatementHandler for AlterNamespaceHandler { _statement: &AlterNamespaceStatement, context: &ExecutionContext, ) -> Result<(), KalamDbError> { - use crate::sql::executor::helpers::guards::block_anonymous_write; + use crate::helpers::guards::block_anonymous_write; // T050: Block anonymous users from DDL operations block_anonymous_write(context, "ALTER NAMESPACE")?; @@ -101,7 +101,7 @@ impl TypedStatementHandler for AlterNamespaceHandler { #[cfg(test)] mod tests { use super::*; - use crate::test_helpers::{create_test_session_simple, test_app_context_simple}; + use kalamdb_core::test_helpers::{create_test_session_simple, test_app_context_simple}; use kalamdb_commons::models::UserId; use kalamdb_commons::Role; use std::sync::Arc; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/namespace/create.rs b/backend/crates/kalamdb-handlers/src/namespace/create.rs similarity index 95% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/namespace/create.rs rename to backend/crates/kalamdb-handlers/src/namespace/create.rs index cfef25269..d02b7c819 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/namespace/create.rs +++ b/backend/crates/kalamdb-handlers/src/namespace/create.rs @@ -6,11 +6,11 @@ //! When a namespace is created, it is also registered as a DataFusion schema //! so that queries like `SELECT * FROM namespace.table` work correctly. -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; -use crate::sql::executor::helpers::guards::require_admin; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; +use crate::helpers::guards::require_admin; use datafusion::catalog::MemorySchemaProvider; use kalamdb_commons::models::{NamespaceId, UserId}; use kalamdb_sql::ddl::CreateNamespaceStatement; @@ -116,7 +116,7 @@ impl TypedStatementHandler for CreateNamespaceHandler self.register_namespace_schema(&namespace_id)?; // Log DDL operation - use crate::sql::executor::helpers::audit; + use crate::helpers::audit; let audit_entry = audit::log_ddl_operation(context, "CREATE", "NAMESPACE", name, None, None); audit::persist_audit_entry(&self.app_context, &audit_entry).await?; @@ -130,7 +130,7 @@ impl TypedStatementHandler for CreateNamespaceHandler _statement: &CreateNamespaceStatement, context: &ExecutionContext, ) -> Result<(), KalamDbError> { - use crate::sql::executor::helpers::guards::block_anonymous_write; + use crate::helpers::guards::block_anonymous_write; // T050: Block anonymous users from DDL operations block_anonymous_write(context, "CREATE NAMESPACE")?; @@ -142,7 +142,7 @@ impl TypedStatementHandler for CreateNamespaceHandler #[cfg(test)] mod tests { use super::*; - use crate::test_helpers::{create_test_session_simple, test_app_context_simple}; + use kalamdb_core::test_helpers::{create_test_session_simple, test_app_context_simple}; use kalamdb_commons::models::UserId; use kalamdb_commons::Role; use kalamdb_system::Namespace; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/namespace/drop.rs b/backend/crates/kalamdb-handlers/src/namespace/drop.rs similarity index 93% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/namespace/drop.rs rename to backend/crates/kalamdb-handlers/src/namespace/drop.rs index 541f32b72..0b130ee02 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/namespace/drop.rs +++ b/backend/crates/kalamdb-handlers/src/namespace/drop.rs @@ -3,11 +3,11 @@ //! When a namespace is dropped, its DataFusion schema becomes unavailable. //! Any queries referencing tables in the dropped namespace will fail. -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; -use crate::sql::executor::helpers::guards::require_admin; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; +use crate::helpers::guards::require_admin; use kalamdb_commons::models::{NamespaceId, TableId}; use kalamdb_sql::ddl::DropNamespaceStatement; use std::sync::Arc; @@ -86,7 +86,7 @@ impl TypedStatementHandler for DropNamespaceHandler { .await .map_err(|e| KalamDbError::ExecutionError(format!("DROP TABLE failed: {}", e)))?; - use crate::sql::executor::helpers::audit; + use crate::helpers::audit; let audit_entry = audit::log_ddl_operation( context, "DROP", @@ -109,7 +109,7 @@ impl TypedStatementHandler for DropNamespaceHandler { self.deregister_namespace_schema(&namespace_id); // Log DDL operation - use crate::sql::executor::helpers::audit; + use crate::helpers::audit; let audit_entry = audit::log_ddl_operation( context, "DROP", @@ -129,7 +129,7 @@ impl TypedStatementHandler for DropNamespaceHandler { _statement: &DropNamespaceStatement, context: &ExecutionContext, ) -> Result<(), KalamDbError> { - use crate::sql::executor::helpers::guards::block_anonymous_write; + use crate::helpers::guards::block_anonymous_write; // T050: Block anonymous users from DDL operations block_anonymous_write(context, "DROP NAMESPACE")?; @@ -141,7 +141,7 @@ impl TypedStatementHandler for DropNamespaceHandler { #[cfg(test)] mod tests { use super::*; - use crate::test_helpers::{create_test_session_simple, test_app_context_simple}; + use kalamdb_core::test_helpers::{create_test_session_simple, test_app_context_simple}; use kalamdb_commons::models::UserId; use kalamdb_commons::Role; use std::sync::Arc; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/namespace/mod.rs b/backend/crates/kalamdb-handlers/src/namespace/mod.rs similarity index 100% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/namespace/mod.rs rename to backend/crates/kalamdb-handlers/src/namespace/mod.rs diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/namespace/show.rs b/backend/crates/kalamdb-handlers/src/namespace/show.rs similarity index 89% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/namespace/show.rs rename to backend/crates/kalamdb-handlers/src/namespace/show.rs index d53b6e153..212657be8 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/namespace/show.rs +++ b/backend/crates/kalamdb-handlers/src/namespace/show.rs @@ -1,9 +1,9 @@ //! Typed DDL handler for SHOW NAMESPACES statements -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_sql::ddl::ShowNamespacesStatement; use std::sync::Arc; @@ -38,7 +38,7 @@ impl TypedStatementHandler for ShowNamespacesHandler { // Log query operation let duration = start_time.elapsed().as_secs_f64() * 1000.0; - use crate::sql::executor::helpers::audit; + use crate::helpers::audit; let audit_entry = audit::log_query_operation(context, "SHOW", "NAMESPACES", duration, None); audit::persist_audit_entry(&self.app_context, &audit_entry).await?; @@ -64,7 +64,7 @@ impl TypedStatementHandler for ShowNamespacesHandler { #[cfg(test)] mod tests { use super::*; - use crate::test_helpers::{create_test_session_simple, test_app_context_simple}; + use kalamdb_core::test_helpers::{create_test_session_simple, test_app_context_simple}; use kalamdb_commons::models::UserId; use kalamdb_commons::Role; use std::sync::Arc; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/namespace/use_namespace.rs b/backend/crates/kalamdb-handlers/src/namespace/use_namespace.rs similarity index 91% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/namespace/use_namespace.rs rename to backend/crates/kalamdb-handlers/src/namespace/use_namespace.rs index 20cd52db9..a1f47e1da 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/namespace/use_namespace.rs +++ b/backend/crates/kalamdb-handlers/src/namespace/use_namespace.rs @@ -6,10 +6,10 @@ //! After executing `USE namespace1`, queries like `SELECT * FROM users` //! will resolve to `kalam.namespace1.users`. -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_sql::ddl::UseNamespaceStatement; use std::sync::Arc; @@ -86,7 +86,7 @@ impl TypedStatementHandler for UseNamespaceHandler { #[cfg(test)] mod tests { use super::*; - use crate::test_helpers::{create_test_session_simple, test_app_context_simple}; + use kalamdb_core::test_helpers::{create_test_session_simple, test_app_context_simple}; use kalamdb_commons::models::UserId; use kalamdb_commons::Role; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/storage/alter.rs b/backend/crates/kalamdb-handlers/src/storage/alter.rs similarity index 95% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/storage/alter.rs rename to backend/crates/kalamdb-handlers/src/storage/alter.rs index 63ece74e7..717ef22ef 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/storage/alter.rs +++ b/backend/crates/kalamdb-handlers/src/storage/alter.rs @@ -1,11 +1,11 @@ //! Typed DDL handler for ALTER STORAGE statements -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::error_extensions::KalamDbResultExt; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; -use crate::sql::executor::helpers::guards::require_admin; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::error_extensions::KalamDbResultExt; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; +use crate::helpers::guards::require_admin; use kalamdb_filestore::StorageHealthService; use kalamdb_sql::ddl::AlterStorageStatement; use std::sync::Arc; @@ -154,7 +154,7 @@ impl TypedStatementHandler for AlterStorageHandler { #[cfg(test)] mod tests { use super::*; - use crate::test_helpers::{create_test_session_simple, test_app_context_simple}; + use kalamdb_core::test_helpers::{create_test_session_simple, test_app_context_simple}; use kalamdb_commons::models::UserId; use kalamdb_commons::{Role, StorageId}; use kalamdb_system::Storage; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/storage/check.rs b/backend/crates/kalamdb-handlers/src/storage/check.rs similarity index 95% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/storage/check.rs rename to backend/crates/kalamdb-handlers/src/storage/check.rs index 022cf4502..e91f7f80e 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/storage/check.rs +++ b/backend/crates/kalamdb-handlers/src/storage/check.rs @@ -1,11 +1,11 @@ //! Typed DDL handler for STORAGE CHECK statements -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::error_extensions::KalamDbResultExt; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; -use crate::sql::executor::helpers::guards::require_admin; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::error_extensions::KalamDbResultExt; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; +use crate::helpers::guards::require_admin; use arrow::array::{ ArrayRef, BooleanBuilder, Int64Builder, StringBuilder, TimestampMillisecondBuilder, }; @@ -151,7 +151,7 @@ impl TypedStatementHandler for CheckStorageHandler { #[cfg(test)] mod tests { use super::*; - use crate::test_helpers::{create_test_session_simple, test_app_context_simple}; + use kalamdb_core::test_helpers::{create_test_session_simple, test_app_context_simple}; use kalamdb_commons::models::UserId; use kalamdb_commons::{Role, StorageId}; use std::sync::Arc; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/storage/create.rs b/backend/crates/kalamdb-handlers/src/storage/create.rs similarity index 95% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/storage/create.rs rename to backend/crates/kalamdb-handlers/src/storage/create.rs index 4a5849f10..9820a0674 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/storage/create.rs +++ b/backend/crates/kalamdb-handlers/src/storage/create.rs @@ -1,12 +1,12 @@ //! Typed DDL handler for CREATE STORAGE statements -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::error_extensions::KalamDbResultExt; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; -use crate::sql::executor::helpers::guards::require_admin; -use crate::sql::executor::helpers::storage::ensure_filesystem_directory; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::error_extensions::KalamDbResultExt; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; +use crate::helpers::guards::require_admin; +use crate::helpers::storage::ensure_filesystem_directory; use kalamdb_commons::models::StorageId; use kalamdb_filestore::StorageHealthService; use kalamdb_sql::ddl::CreateStorageStatement; @@ -175,7 +175,7 @@ impl TypedStatementHandler for CreateStorageHandler { #[cfg(test)] mod tests { use super::*; - use crate::test_helpers::{create_test_session_simple, test_app_context_simple}; + use kalamdb_core::test_helpers::{create_test_session_simple, test_app_context_simple}; use kalamdb_commons::models::UserId; use kalamdb_commons::{Role, StorageId}; use std::sync::Arc; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/storage/drop.rs b/backend/crates/kalamdb-handlers/src/storage/drop.rs similarity index 95% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/storage/drop.rs rename to backend/crates/kalamdb-handlers/src/storage/drop.rs index 29ec50179..8b7b66071 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/storage/drop.rs +++ b/backend/crates/kalamdb-handlers/src/storage/drop.rs @@ -1,10 +1,10 @@ //! Typed DDL handler for DROP STORAGE statements -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; -use crate::sql::executor::helpers::guards::require_admin; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; +use crate::helpers::guards::require_admin; use kalamdb_sql::ddl::DropStorageStatement; use std::sync::Arc; @@ -114,7 +114,7 @@ impl TypedStatementHandler for DropStorageHandler { #[cfg(test)] mod tests { use super::*; - use crate::test_helpers::{create_test_session_simple, test_app_context_simple}; + use kalamdb_core::test_helpers::{create_test_session_simple, test_app_context_simple}; use kalamdb_commons::models::UserId; use kalamdb_commons::{Role, StorageId}; use std::sync::Arc; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/storage/mod.rs b/backend/crates/kalamdb-handlers/src/storage/mod.rs similarity index 100% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/storage/mod.rs rename to backend/crates/kalamdb-handlers/src/storage/mod.rs diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/storage/show.rs b/backend/crates/kalamdb-handlers/src/storage/show.rs similarity index 89% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/storage/show.rs rename to backend/crates/kalamdb-handlers/src/storage/show.rs index 7ad79e850..21b4b7aba 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/storage/show.rs +++ b/backend/crates/kalamdb-handlers/src/storage/show.rs @@ -1,9 +1,9 @@ //! Typed DDL handler for SHOW STORAGES statements -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_sql::ddl::ShowStoragesStatement; use std::sync::Arc; @@ -56,7 +56,7 @@ impl TypedStatementHandler for ShowStoragesHandler { #[cfg(test)] mod tests { use super::*; - use crate::test_helpers::{create_test_session_simple, test_app_context_simple}; + use kalamdb_core::test_helpers::{create_test_session_simple, test_app_context_simple}; use kalamdb_commons::models::UserId; use kalamdb_commons::Role; use std::sync::Arc; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/subscription/mod.rs b/backend/crates/kalamdb-handlers/src/subscription/mod.rs similarity index 100% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/subscription/mod.rs rename to backend/crates/kalamdb-handlers/src/subscription/mod.rs diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/subscription/subscribe.rs b/backend/crates/kalamdb-handlers/src/subscription/subscribe.rs similarity index 91% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/subscription/subscribe.rs rename to backend/crates/kalamdb-handlers/src/subscription/subscribe.rs index 2c256fb88..4c75be13f 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/subscription/subscribe.rs +++ b/backend/crates/kalamdb-handlers/src/subscription/subscribe.rs @@ -1,9 +1,9 @@ //! Typed handler for SUBSCRIBE statement -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_sql::ddl::SubscribeStatement; use std::sync::Arc; use uuid::Uuid; @@ -59,7 +59,7 @@ impl TypedStatementHandler for SubscribeHandler { }; // Log query operation - use crate::sql::executor::helpers::audit; + use crate::helpers::audit; let audit_entry = audit::log_query_operation( context, "SUBSCRIBE", diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/system/mod.rs b/backend/crates/kalamdb-handlers/src/system/mod.rs similarity index 100% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/system/mod.rs rename to backend/crates/kalamdb-handlers/src/system/mod.rs diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/system/show_manifest_cache.rs b/backend/crates/kalamdb-handlers/src/system/show_manifest_cache.rs similarity index 87% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/system/show_manifest_cache.rs rename to backend/crates/kalamdb-handlers/src/system/show_manifest_cache.rs index 0e33dcbb8..f8f2e01fb 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/system/show_manifest_cache.rs +++ b/backend/crates/kalamdb-handlers/src/system/show_manifest_cache.rs @@ -3,10 +3,10 @@ //! Returns all manifest cache entries with their metadata. //! Uses ManifestTableProvider from kalamdb-system for consistent schema. -use crate::error::KalamDbError; -use crate::error_extensions::KalamDbResultExt; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::TypedStatementHandler; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::error_extensions::KalamDbResultExt; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use async_trait::async_trait; use kalamdb_sql::ShowManifestStatement; use kalamdb_system::providers::ManifestTableProvider; @@ -16,12 +16,12 @@ use std::sync::Arc; /// /// Delegates to ManifestTableProvider for consistent schema and implementation. pub struct ShowManifestCacheHandler { - app_context: Arc, + app_context: Arc, } impl ShowManifestCacheHandler { /// Create a new ShowManifestCacheHandler - pub fn new(app_context: Arc) -> Self { + pub fn new(app_context: Arc) -> Self { Self { app_context } } } @@ -48,7 +48,7 @@ impl TypedStatementHandler for ShowManifestCacheHandler { // Log query operation let duration = start_time.elapsed().as_secs_f64() * 1000.0; - use crate::sql::executor::helpers::audit; + use crate::helpers::audit; let audit_entry = audit::log_query_operation(context, "SHOW", "MANIFEST CACHE", duration, None); audit::persist_audit_entry(&self.app_context, &audit_entry).await?; @@ -64,11 +64,11 @@ impl TypedStatementHandler for ShowManifestCacheHandler { #[cfg(test)] mod tests { use super::*; - use crate::app_context::AppContext; + use kalamdb_core::app_context::AppContext; #[tokio::test] async fn test_show_manifest_cache_empty() { - use crate::sql::context::ExecutionContext; + use kalamdb_core::sql::context::ExecutionContext; use datafusion::prelude::SessionContext; use kalamdb_commons::models::{Role, UserId}; use std::sync::Arc; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/table/alter.rs b/backend/crates/kalamdb-handlers/src/table/alter.rs similarity index 98% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/table/alter.rs rename to backend/crates/kalamdb-handlers/src/table/alter.rs index 36ac67adc..6b1b61dda 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/table/alter.rs +++ b/backend/crates/kalamdb-handlers/src/table/alter.rs @@ -1,11 +1,11 @@ //! Typed DDL handler for ALTER TABLE statements -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::executor::handlers::typed::TypedStatementHandler; -use crate::sql::executor::helpers::guards::block_system_namespace_modification; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; +use crate::helpers::guards::block_system_namespace_modification; // Note: table_registration moved to unified applier commands -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; use kalamdb_commons::constants::SystemColumnNames; use kalamdb_commons::models::schemas::{ColumnDefinition, TableDefinition}; use kalamdb_commons::models::{NamespaceId, TableId, UserId}; @@ -214,7 +214,7 @@ impl AlterTableHandler { statement: &AlterTableStatement, context: &ExecutionContext, ) -> Result { - use crate::sql::executor::helpers::audit; + use crate::helpers::audit; let table_id = TableId::from_strings(statement.namespace_id.as_str(), statement.table_name.as_str()); @@ -463,7 +463,7 @@ impl TypedStatementHandler for AlterTableHandler { return self.execute_vector_index_operation(&statement, context).await; } - use crate::sql::executor::helpers::audit; + use crate::helpers::audit; let namespace_id: NamespaceId = statement.namespace_id.clone(); let table_id = TableId::from_strings(namespace_id.as_str(), statement.table_name.as_str()); @@ -539,7 +539,7 @@ impl TypedStatementHandler for AlterTableHandler { statement: &AlterTableStatement, context: &ExecutionContext, ) -> Result<(), KalamDbError> { - use crate::sql::executor::helpers::guards::block_anonymous_write; + use crate::helpers::guards::block_anonymous_write; // Block anonymous users from DDL operations block_anonymous_write(context, "ALTER TABLE")?; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/table/create.rs b/backend/crates/kalamdb-handlers/src/table/create.rs similarity index 96% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/table/create.rs rename to backend/crates/kalamdb-handlers/src/table/create.rs index ee68c8210..07bbf1612 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/table/create.rs +++ b/backend/crates/kalamdb-handlers/src/table/create.rs @@ -1,9 +1,9 @@ //! Typed DDL handler for CREATE TABLE statements -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_commons::models::TableId; use kalamdb_commons::schemas::TableType; use kalamdb_sql::ddl::CreateTableStatement; @@ -44,8 +44,8 @@ impl TypedStatementHandler for CreateTableHandler { _params: Vec, context: &ExecutionContext, ) -> Result { - use crate::sql::executor::helpers::audit; - use crate::sql::executor::helpers::table_creation; + use crate::helpers::audit; + use crate::helpers::table_creation; let mut statement = statement; let effective_type = Self::resolve_table_type(&statement, context); @@ -108,7 +108,7 @@ impl TypedStatementHandler for CreateTableHandler { statement: &CreateTableStatement, context: &ExecutionContext, ) -> Result<(), KalamDbError> { - use crate::sql::executor::helpers::guards::block_anonymous_write; + use crate::helpers::guards::block_anonymous_write; // T050: Block anonymous users from DDL operations block_anonymous_write(context, "CREATE TABLE")?; @@ -134,7 +134,7 @@ impl TypedStatementHandler for CreateTableHandler { #[cfg(test)] mod tests { use super::*; - use crate::test_helpers::{create_test_session_simple, test_app_context_simple}; + use kalamdb_core::test_helpers::{create_test_session_simple, test_app_context_simple}; use arrow::datatypes::{DataType, Field, Schema}; use kalamdb_commons::models::{NamespaceId, UserId}; use kalamdb_commons::schemas::TableType; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/table/describe.rs b/backend/crates/kalamdb-handlers/src/table/describe.rs similarity index 88% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/table/describe.rs rename to backend/crates/kalamdb-handlers/src/table/describe.rs index 21800102e..6232486c1 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/table/describe.rs +++ b/backend/crates/kalamdb-handlers/src/table/describe.rs @@ -1,10 +1,10 @@ //! Typed DDL handler for DESCRIBE TABLE statements -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; -use crate::views::DescribeView; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; +use kalamdb_core::views::DescribeView; use kalamdb_commons::models::{NamespaceId, TableId}; use kalamdb_sql::ddl::DescribeTableStatement; use std::sync::Arc; @@ -47,7 +47,7 @@ impl TypedStatementHandler for DescribeTableHandler { // Log query operation let duration = start_time.elapsed().as_secs_f64() * 1000.0; - use crate::sql::executor::helpers::audit; + use crate::helpers::audit; let audit_entry = audit::log_query_operation(context, "DESCRIBE", &table_id.full_name(), duration, None); audit::persist_audit_entry(&self.app_context, &audit_entry).await?; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/table/drop.rs b/backend/crates/kalamdb-handlers/src/table/drop.rs similarity index 69% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/table/drop.rs rename to backend/crates/kalamdb-handlers/src/table/drop.rs index d80ed00ac..22b530de5 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/table/drop.rs +++ b/backend/crates/kalamdb-handlers/src/table/drop.rs @@ -3,204 +3,20 @@ //! This module provides both the DROP TABLE handler and reusable cleanup functions //! for table deletion operations (used by both DDL handler and CleanupExecutor). -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::error_extensions::KalamDbResultExt; -use crate::jobs::executors::cleanup::{CleanupOperation, CleanupParams, StorageCleanupDetails}; -use crate::schema_registry::SchemaRegistry; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; -use crate::sql::executor::helpers::guards::block_system_namespace_modification; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_jobs::executors::cleanup::{CleanupOperation, CleanupParams, StorageCleanupDetails}; +use kalamdb_jobs::AppContextJobsExt; +use kalamdb_core::operations::table_cleanup::cleanup_table_data_internal; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; +use crate::helpers::guards::block_system_namespace_modification; use kalamdb_commons::models::TableId; use kalamdb_commons::schemas::TableType; use kalamdb_sql::ddl::DropTableStatement; use kalamdb_system::JobType; use std::sync::Arc; -/// Cleanup helper function: Delete table data from RocksDB -/// -/// **Phase 8.5 (T146a)**: Public helper for CleanupExecutor -/// -/// # Arguments -/// * `_app_context` - Application context for accessing stores (reserved for future use) -/// * `table_id` - Table identifier (namespace:table_name) -/// * `table_type` - Table type (User/Shared/Stream) -/// -/// # Returns -/// Number of rows deleted -pub async fn cleanup_table_data_internal( - _app_context: &Arc, - table_id: &TableId, - table_type: TableType, -) -> Result { - log::debug!( - "[CleanupHelper] Cleaning up table data for {:?} (type: {:?})", - table_id, - table_type - ); - - let rows_deleted = match table_type { - TableType::User => { - use kalamdb_tables::new_indexed_user_table_store; - - new_indexed_user_table_store(_app_context.storage_backend(), table_id, "_pk") - .drop_all_partitions() - .map_err(|e| { - KalamDbError::Other(format!( - "Failed to drop user table partitions for {}: {}", - table_id, e - )) - })?; - - log::debug!("[CleanupHelper] Dropped all partitions for user table {:?}", table_id); - 0usize - }, - TableType::Shared => { - use kalamdb_tables::new_indexed_shared_table_store; - - new_indexed_shared_table_store(_app_context.storage_backend(), table_id, "_pk") - .drop_all_partitions() - .map_err(|e| { - KalamDbError::Other(format!( - "Failed to drop shared table partitions for {}: {}", - table_id, e - )) - })?; - - log::debug!("[CleanupHelper] Dropped all partitions for shared table {:?}", table_id); - 0usize - }, - TableType::Stream => { - // Stream tables are in-memory by design. However, if a persistent - // backend is configured, attempt to drop the partition best-effort. - use kalamdb_commons::constants::ColumnFamilyNames; - use kalamdb_store::storage_trait::Partition as StorePartition; - - let partition_name = format!( - "{}{}", - ColumnFamilyNames::STREAM_TABLE_PREFIX, - table_id // TableId Display: "namespace:table" - ); - - let backend = _app_context.storage_backend(); - let partition = StorePartition::new(partition_name.clone()); - - match backend.drop_partition(&partition) { - Ok(_) => { - log::debug!( - "[CleanupHelper] Dropped partition '{}' for stream table {:?}", - partition_name, - table_id - ); - 0usize - }, - Err(e) => { - let msg = e.to_string(); - if msg.to_lowercase().contains("not found") { - log::debug!( - "[CleanupHelper] Stream partition '{}' not found (likely in-memory)", - partition_name - ); - 0usize - } else { - return Err(KalamDbError::Other(format!( - "Failed to drop partition '{}' for stream table {}: {}", - partition_name, table_id, e - ))); - } - }, - } - }, - TableType::System => { - // System tables cannot be dropped via DDL - return Err(KalamDbError::InvalidOperation( - "Cannot cleanup system table data".to_string(), - )); - }, - }; - - log::debug!("[CleanupHelper] Deleted {} rows from table data", rows_deleted); - Ok(rows_deleted) -} - -/// Cleanup helper function: Delete Parquet files from storage backend -/// -/// **Phase 8.5 (T146a)**: Public helper for CleanupExecutor -/// -/// # Arguments -/// * `app_context` - Application context for accessing storage backend -/// * `table_id` - Table identifier (namespace:table_name) -/// -/// # Returns -/// Number of bytes freed (sum of deleted file sizes) -pub async fn cleanup_parquet_files_internal( - app_context: &Arc, - table_id: &TableId, - table_type: TableType, - storage: &StorageCleanupDetails, -) -> Result { - log::debug!( - "[CleanupHelper] Cleaning up Parquet files for {:?} using storage {}", - table_id, - storage.storage_id.as_str() - ); - - // Get StorageCached from registry - let storage_cached = - app_context.storage_registry().get_cached(&storage.storage_id)?.ok_or_else(|| { - KalamDbError::InvalidOperation(format!( - "Storage '{}' not found during cleanup", - storage.storage_id.as_str() - )) - })?; - - // Delete Parquet files using StorageCached (async to avoid blocking runtime) - // Note: For user tables, we'd need user_id, but cleanup is table-wide - let files_deleted = storage_cached - .delete_prefix( - table_type, table_id, None, // user_id - cleanup is table-wide - ) - .await - .into_kalamdb_error("Failed to delete Parquet tree")? - .files_deleted; - - log::debug!("[CleanupHelper] Freed {} files from Parquet storage", files_deleted); - Ok(0) // Bytes freed not returned by delete_prefix_sync -} - -/// Cleanup helper function: Remove table metadata from system tables -/// -/// **Phase 8.5 (T146a)**: Public helper for CleanupExecutor -/// -/// # Arguments -/// * `schema_registry` - Schema registry for metadata removal -/// * `table_id` - Table identifier (namespace:table_name) -/// -/// # Returns -/// Ok(()) on success -pub async fn cleanup_metadata_internal( - _app_ctx: &AppContext, - schema_registry: &Arc, - table_id: &TableId, -) -> Result<(), KalamDbError> { - log::debug!("[CleanupHelper] Cleaning up metadata for {:?}", table_id); - - if schema_registry.get_table_if_exists(table_id)?.is_some() { - log::debug!( - "[CleanupHelper] Metadata present for {:?} (table re-created) - skipping cleanup", - table_id - ); - return Ok(()); - } - - // Delete table definition from SchemaRegistry - // This removes from both cache and persistent store (delete-through pattern) - schema_registry.delete_table_definition(table_id)?; - - log::debug!("[CleanupHelper] Metadata cleanup complete"); - Ok(()) -} - /// Typed handler for DROP TABLE statements pub struct DropTableHandler { app_context: Arc, @@ -474,7 +290,7 @@ impl TypedStatementHandler for DropTableHandler { .await?; // Log DDL operation - use crate::sql::executor::helpers::audit; + use crate::helpers::audit; let audit_entry = audit::log_ddl_operation( context, "DROP", @@ -508,7 +324,7 @@ impl TypedStatementHandler for DropTableHandler { _statement: &DropTableStatement, context: &ExecutionContext, ) -> Result<(), KalamDbError> { - use crate::sql::executor::helpers::guards::block_anonymous_write; + use crate::helpers::guards::block_anonymous_write; // T050: Block anonymous users from DDL operations block_anonymous_write(context, "DROP TABLE")?; @@ -525,10 +341,10 @@ impl TypedStatementHandler for DropTableHandler { #[cfg(test)] mod tests { use super::{cleanup_table_data_internal, DropTableHandler}; - use crate::sql::context::{ExecutionContext, ExecutionResult}; - use crate::sql::executor::handlers::table::create::CreateTableHandler; - use crate::sql::executor::handlers::typed::TypedStatementHandler; - use crate::test_helpers::{ + use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult}; + use crate::table::create::CreateTableHandler; + use kalamdb_core::sql::executor::handlers::TypedStatementHandler; + use kalamdb_core::test_helpers::{ create_test_session_simple, test_app_context, test_app_context_simple, }; use arrow::datatypes::{DataType, Field, Schema}; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/table/mod.rs b/backend/crates/kalamdb-handlers/src/table/mod.rs similarity index 100% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/table/mod.rs rename to backend/crates/kalamdb-handlers/src/table/mod.rs diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/table/show.rs b/backend/crates/kalamdb-handlers/src/table/show.rs similarity index 93% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/table/show.rs rename to backend/crates/kalamdb-handlers/src/table/show.rs index 10f3d0811..f914b5ca3 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/table/show.rs +++ b/backend/crates/kalamdb-handlers/src/table/show.rs @@ -1,10 +1,10 @@ //! Typed DDL handler for SHOW TABLES statements -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::error_extensions::KalamDbResultExt; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::error_extensions::KalamDbResultExt; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use datafusion::arrow::array::{ ArrayRef, Int32Array, RecordBatch, StringBuilder, TimestampMicrosecondArray, }; @@ -66,7 +66,7 @@ impl TypedStatementHandler for ShowTablesHandler { // Log query operation let duration = start_time.elapsed().as_secs_f64() * 1000.0; - use crate::sql::executor::helpers::audit; + use crate::helpers::audit; let audit_entry = audit::log_query_operation(context, "SHOW", "TABLES", duration, None); audit::persist_audit_entry(&self.app_context, &audit_entry).await?; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/table/show_stats.rs b/backend/crates/kalamdb-handlers/src/table/show_stats.rs similarity index 92% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/table/show_stats.rs rename to backend/crates/kalamdb-handlers/src/table/show_stats.rs index 0b29fd8d4..fe2445595 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/table/show_stats.rs +++ b/backend/crates/kalamdb-handlers/src/table/show_stats.rs @@ -1,10 +1,10 @@ //! Typed DDL handler for SHOW STATS statements -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::error_extensions::KalamDbResultExt; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::error_extensions::KalamDbResultExt; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use datafusion::arrow::array::{ArrayRef, RecordBatch, StringArray, UInt64Array}; use kalamdb_commons::arrow_utils::{field_uint64, field_utf8, schema}; use kalamdb_commons::models::{NamespaceId, TableId}; @@ -78,7 +78,7 @@ impl TypedStatementHandler for ShowStatsHandler { // Log query operation let duration = start_time.elapsed().as_secs_f64() * 1000.0; - use crate::sql::executor::helpers::audit; + use crate::helpers::audit; let audit_entry = audit::log_query_operation( context, "SHOW", diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/topics/ack.rs b/backend/crates/kalamdb-handlers/src/topics/ack.rs similarity index 94% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/topics/ack.rs rename to backend/crates/kalamdb-handlers/src/topics/ack.rs index 7163b9404..3949f12f3 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/topics/ack.rs +++ b/backend/crates/kalamdb-handlers/src/topics/ack.rs @@ -1,9 +1,9 @@ //! ACK statement handler for committing consumer group offsets -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use datafusion::arrow::{ array::{ArrayRef, Int32Array, Int64Array, StringBuilder}, datatypes::{DataType, Field, Schema}, diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/topics/add_source.rs b/backend/crates/kalamdb-handlers/src/topics/add_source.rs similarity index 93% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/topics/add_source.rs rename to backend/crates/kalamdb-handlers/src/topics/add_source.rs index f37cb92c7..70243d3e5 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/topics/add_source.rs +++ b/backend/crates/kalamdb-handlers/src/topics/add_source.rs @@ -1,9 +1,9 @@ //! ALTER TOPIC ADD SOURCE handler -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_commons::models::TopicId; use kalamdb_sql::ddl::AddTopicSourceStatement; use kalamdb_system::providers::topics::models::TopicRoute; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/topics/clear.rs b/backend/crates/kalamdb-handlers/src/topics/clear.rs similarity index 90% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/topics/clear.rs rename to backend/crates/kalamdb-handlers/src/topics/clear.rs index 0dd2f86cb..3cf06a61f 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/topics/clear.rs +++ b/backend/crates/kalamdb-handlers/src/topics/clear.rs @@ -1,9 +1,10 @@ //! CLEAR TOPIC handler -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; +use kalamdb_jobs::AppContextJobsExt; use kalamdb_sql::ddl::ClearTopicStatement; use std::sync::Arc; @@ -52,7 +53,7 @@ impl TypedStatementHandler for ClearTopicHandler { let topic_name = topic.unwrap().name; // Schedule a TopicCleanup job to perform the actual cleanup - use crate::jobs::executors::topic_cleanup::TopicCleanupParams; + use kalamdb_jobs::executors::topic_cleanup::TopicCleanupParams; use kalamdb_system::JobType; let cleanup_params = TopicCleanupParams { diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/topics/consume.rs b/backend/crates/kalamdb-handlers/src/topics/consume.rs similarity index 96% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/topics/consume.rs rename to backend/crates/kalamdb-handlers/src/topics/consume.rs index 7b4e41f37..99ba1ce2f 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/topics/consume.rs +++ b/backend/crates/kalamdb-handlers/src/topics/consume.rs @@ -1,9 +1,9 @@ //! CONSUME FROM handler -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use datafusion::arrow::{ array::{ArrayRef, BinaryBuilder, Int32Array, Int64Array, StringBuilder}, record_batch::RecordBatch, diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/topics/create.rs b/backend/crates/kalamdb-handlers/src/topics/create.rs similarity index 95% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/topics/create.rs rename to backend/crates/kalamdb-handlers/src/topics/create.rs index 0b13c3484..2b7994650 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/topics/create.rs +++ b/backend/crates/kalamdb-handlers/src/topics/create.rs @@ -1,9 +1,9 @@ //! CREATE TOPIC handler -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_commons::models::{NamespaceId, TopicId}; use kalamdb_sql::ddl::CreateTopicStatement; use kalamdb_system::providers::topics::models::Topic; @@ -106,7 +106,7 @@ impl TypedStatementHandler for CreateTopicHandler { #[cfg(test)] mod tests { use super::*; - use crate::test_helpers::{create_test_session_simple, test_app_context_simple}; + use kalamdb_core::test_helpers::{create_test_session_simple, test_app_context_simple}; use kalamdb_commons::models::UserId; use kalamdb_commons::Role; use kalamdb_system::Namespace; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/topics/drop.rs b/backend/crates/kalamdb-handlers/src/topics/drop.rs similarity index 89% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/topics/drop.rs rename to backend/crates/kalamdb-handlers/src/topics/drop.rs index cf28841a6..9227f4b28 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/topics/drop.rs +++ b/backend/crates/kalamdb-handlers/src/topics/drop.rs @@ -1,10 +1,11 @@ //! DROP TOPIC handler -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_commons::models::TopicId; +use kalamdb_jobs::AppContextJobsExt; use kalamdb_sql::ddl::DropTopicStatement; use std::sync::Arc; @@ -51,7 +52,7 @@ impl TypedStatementHandler for DropTopicHandler { self.app_context.topic_publisher().remove_topic(&topic_id); // Schedule a TopicCleanup job to clean up all messages and offsets - use crate::jobs::executors::topic_cleanup::TopicCleanupParams; + use kalamdb_jobs::executors::topic_cleanup::TopicCleanupParams; use kalamdb_system::JobType; let cleanup_params = TopicCleanupParams { diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/topics/mod.rs b/backend/crates/kalamdb-handlers/src/topics/mod.rs similarity index 100% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/topics/mod.rs rename to backend/crates/kalamdb-handlers/src/topics/mod.rs diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/user/alter.rs b/backend/crates/kalamdb-handlers/src/user/alter.rs similarity index 94% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/user/alter.rs rename to backend/crates/kalamdb-handlers/src/user/alter.rs index 5169dd40d..5d4eb2ef9 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/user/alter.rs +++ b/backend/crates/kalamdb-handlers/src/user/alter.rs @@ -1,9 +1,9 @@ //! Typed handler for ALTER USER statement -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_auth::security::password::{ hash_password, validate_password_with_policy, PasswordPolicy, }; @@ -99,7 +99,7 @@ impl TypedStatementHandler for AlterUserHandler { .map_err(|e| KalamDbError::ExecutionError(format!("ALTER USER failed: {}", e)))?; // Log DDL operation (with password redaction) - use crate::sql::executor::helpers::audit; + use crate::helpers::audit; let audit_entry = audit::log_ddl_operation( context, "ALTER", diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/user/create.rs b/backend/crates/kalamdb-handlers/src/user/create.rs similarity index 95% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/user/create.rs rename to backend/crates/kalamdb-handlers/src/user/create.rs index bcf4e7e4a..da9981c41 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/user/create.rs +++ b/backend/crates/kalamdb-handlers/src/user/create.rs @@ -1,10 +1,10 @@ //! Typed handler for CREATE USER statement -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::error_extensions::KalamDbResultExt; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::error_extensions::KalamDbResultExt; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_auth::security::password::{ hash_password, validate_password_with_policy, PasswordPolicy, }; @@ -138,7 +138,7 @@ impl TypedStatementHandler for CreateUserHandler { .map_err(|e| KalamDbError::ExecutionError(format!("CREATE USER failed: {}", e)))?; // Log DDL operation - use crate::sql::executor::helpers::audit; + use crate::helpers::audit; let audit_entry = audit::log_ddl_operation( context, "CREATE", diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/user/drop.rs b/backend/crates/kalamdb-handlers/src/user/drop.rs similarity index 90% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/user/drop.rs rename to backend/crates/kalamdb-handlers/src/user/drop.rs index e3b4938ac..2302e05cc 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/user/drop.rs +++ b/backend/crates/kalamdb-handlers/src/user/drop.rs @@ -1,9 +1,9 @@ //! Typed handler for DROP USER statement -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_sql::ddl::DropUserStatement; use std::sync::Arc; // No direct UserId usage, removing unused import @@ -52,7 +52,7 @@ impl TypedStatementHandler for DropUserHandler { .map_err(|e| KalamDbError::ExecutionError(format!("DROP USER failed: {}", e)))?; // Log DDL operation - use crate::sql::executor::helpers::audit; + use crate::helpers::audit; let audit_entry = audit::log_ddl_operation(context, "DROP", "USER", &statement.username, None, None); audit::persist_audit_entry(&self.app_context, &audit_entry).await?; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/user/mod.rs b/backend/crates/kalamdb-handlers/src/user/mod.rs similarity index 100% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/user/mod.rs rename to backend/crates/kalamdb-handlers/src/user/mod.rs diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/view/create.rs b/backend/crates/kalamdb-handlers/src/view/create.rs similarity index 94% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/view/create.rs rename to backend/crates/kalamdb-handlers/src/view/create.rs index 3a25f995a..9d94ab6e5 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/handlers/view/create.rs +++ b/backend/crates/kalamdb-handlers/src/view/create.rs @@ -4,11 +4,11 @@ //! registration to the shared base SessionContext so subsequent per-user //! sessions inherit the view definition. -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::error_extensions::KalamDbResultExt; -use crate::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; -use crate::sql::executor::handlers::typed::TypedStatementHandler; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::error_extensions::KalamDbResultExt; +use kalamdb_core::sql::context::{ExecutionContext, ExecutionResult, ScalarValue}; +use kalamdb_core::sql::executor::handlers::TypedStatementHandler; use kalamdb_commons::models::NamespaceId; use kalamdb_sql::ddl::CreateViewStatement; use std::sync::Arc; @@ -125,7 +125,7 @@ impl TypedStatementHandler for CreateViewHandler { #[cfg(test)] mod tests { use super::*; - use crate::test_helpers::test_app_context_simple; + use kalamdb_core::test_helpers::test_app_context_simple; use arrow::array::Int64Array; use kalamdb_commons::models::{NamespaceId, UserId}; diff --git a/backend/crates/kalamdb-core/src/sql/executor/handlers/view/mod.rs b/backend/crates/kalamdb-handlers/src/view/mod.rs similarity index 100% rename from backend/crates/kalamdb-core/src/sql/executor/handlers/view/mod.rs rename to backend/crates/kalamdb-handlers/src/view/mod.rs diff --git a/backend/crates/kalamdb-jobs/Cargo.toml b/backend/crates/kalamdb-jobs/Cargo.toml new file mode 100644 index 000000000..4e9f88c0d --- /dev/null +++ b/backend/crates/kalamdb-jobs/Cargo.toml @@ -0,0 +1,56 @@ +[package] +name = "kalamdb-jobs" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +authors.workspace = true +license.workspace = true +repository.workspace = true +description = "Job management system for KalamDB: executors, schedulers, and orchestration" + +[dependencies] +# Internal crates +kalamdb-commons = { path = "../kalamdb-commons", features = ["full"] } +kalamdb-core = { path = "../kalamdb-core" } +kalamdb-observability = { path = "../kalamdb-observability" } +kalamdb-raft = { path = "../kalamdb-raft" } +kalamdb-sharding = { path = "../kalamdb-sharding" } +kalamdb-store = { path = "../kalamdb-store" } +kalamdb-system = { path = "../kalamdb-system" } +kalamdb-tables = { path = "../kalamdb-tables" } + +# Async +async-trait = { workspace = true } +tokio = { workspace = true, features = ["sync", "time", "rt"] } + +# Serialization +serde = { workspace = true } +serde_json = { workspace = true } + +# Time handling +chrono = { workspace = true } + +# Logging +log = { workspace = true } + +# Tracing +tracing = { workspace = true } + +# UUID generation +uuid = { workspace = true } + +# ZIP archive creation +zip = { workspace = true } + +# Concurrent +parking_lot = { workspace = true } + +# DataFusion (for test TableProvider downcast) +datafusion = { workspace = true } + +[lib] +doctest = false + +[dev-dependencies] +kalamdb-core = { path = "../kalamdb-core", features = ["test-helpers"] } +tempfile = { workspace = true } diff --git a/backend/crates/kalamdb-core/src/jobs/executors/backup.rs b/backend/crates/kalamdb-jobs/src/executors/backup.rs similarity index 98% rename from backend/crates/kalamdb-core/src/jobs/executors/backup.rs rename to backend/crates/kalamdb-jobs/src/executors/backup.rs index a9ad2faa0..d22b90d33 100644 --- a/backend/crates/kalamdb-core/src/jobs/executors/backup.rs +++ b/backend/crates/kalamdb-jobs/src/executors/backup.rs @@ -21,8 +21,8 @@ //! } //! ``` -use crate::error::KalamDbError; -use crate::jobs::executors::{JobContext, JobDecision, JobExecutor, JobParams}; +use kalamdb_core::error::KalamDbError; +use crate::executors::{JobContext, JobDecision, JobExecutor, JobParams}; use async_trait::async_trait; use kalamdb_system::JobType; use serde::{Deserialize, Serialize}; diff --git a/backend/crates/kalamdb-core/src/jobs/executors/cleanup.rs b/backend/crates/kalamdb-jobs/src/executors/cleanup.rs similarity index 86% rename from backend/crates/kalamdb-core/src/jobs/executors/cleanup.rs rename to backend/crates/kalamdb-jobs/src/executors/cleanup.rs index 69041b77f..de8bcd9d2 100644 --- a/backend/crates/kalamdb-core/src/jobs/executors/cleanup.rs +++ b/backend/crates/kalamdb-jobs/src/executors/cleanup.rs @@ -19,41 +19,20 @@ //! } //! ``` -use crate::error::KalamDbError; -use crate::jobs::executors::{JobContext, JobDecision, JobExecutor, JobParams}; -use crate::sql::executor::handlers::table::drop::{ +use kalamdb_core::error::KalamDbError; +use crate::executors::{JobContext, JobDecision, JobExecutor, JobParams}; +use kalamdb_core::operations::table_cleanup::{ cleanup_metadata_internal, cleanup_parquet_files_internal, cleanup_table_data_internal, }; use async_trait::async_trait; -use kalamdb_commons::models::StorageId; use kalamdb_commons::schemas::TableType; use kalamdb_commons::TableId; use kalamdb_system::JobType; use serde::{Deserialize, Serialize}; use std::sync::Arc; -/// Cleanup operation types -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "snake_case")] -pub enum CleanupOperation { - /// Drop table (delete all data and metadata) - DropTable, - /// Truncate table (delete all data, keep schema) - Truncate, - /// Remove orphaned files - RemoveOrphaned, -} - -/// Storage details needed to delete Parquet trees after metadata removal. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct StorageCleanupDetails { - /// Storage identifier - pub storage_id: StorageId, - /// Base directory resolved for this storage - pub base_directory: String, - /// Relative path template with static placeholders substituted - pub relative_path_template: String, -} +// Re-export so consumers can keep importing from this module. +pub use kalamdb_core::operations::table_cleanup::{CleanupOperation, StorageCleanupDetails}; /// Typed parameters for cleanup operations (T191) #[derive(Debug, Clone, Serialize, Deserialize)] @@ -184,6 +163,7 @@ impl Default for CleanupExecutor { mod tests { use super::*; use kalamdb_commons::{NamespaceId, TableName}; + use kalamdb_commons::StorageId; #[test] fn test_executor_properties() { diff --git a/backend/crates/kalamdb-core/src/jobs/executors/compact.rs b/backend/crates/kalamdb-jobs/src/executors/compact.rs similarity index 95% rename from backend/crates/kalamdb-core/src/jobs/executors/compact.rs rename to backend/crates/kalamdb-jobs/src/executors/compact.rs index e12761737..0c3ba5d23 100644 --- a/backend/crates/kalamdb-core/src/jobs/executors/compact.rs +++ b/backend/crates/kalamdb-jobs/src/executors/compact.rs @@ -18,9 +18,9 @@ //! } //! ``` -use crate::error::KalamDbError; -use crate::jobs::executors::shared_table_cleanup::cleanup_empty_shared_scope_if_needed; -use crate::jobs::executors::{JobContext, JobDecision, JobExecutor, JobParams}; +use kalamdb_core::error::KalamDbError; +use crate::executors::shared_table_cleanup::cleanup_empty_shared_scope_if_needed; +use crate::executors::{JobContext, JobDecision, JobExecutor, JobParams}; use async_trait::async_trait; use kalamdb_commons::constants::ColumnFamilyNames; use kalamdb_commons::schemas::TableType; diff --git a/backend/crates/kalamdb-core/src/jobs/executors/executor_trait.rs b/backend/crates/kalamdb-jobs/src/executors/executor_trait.rs similarity index 98% rename from backend/crates/kalamdb-core/src/jobs/executors/executor_trait.rs rename to backend/crates/kalamdb-jobs/src/executors/executor_trait.rs index 864b0ddbe..040615b48 100644 --- a/backend/crates/kalamdb-core/src/jobs/executors/executor_trait.rs +++ b/backend/crates/kalamdb-jobs/src/executors/executor_trait.rs @@ -37,8 +37,8 @@ //! } //! ``` -use crate::app_context::AppContext; -use crate::error::KalamDbError; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; use kalamdb_system::JobType; use log::{debug, error, info, trace, warn}; use serde::{Deserialize, Serialize}; @@ -347,7 +347,7 @@ pub trait JobExecutor: Send + Sync { #[cfg(test)] mod tests { use super::*; - use crate::test_helpers::test_app_context_simple; + use kalamdb_core::test_helpers::test_app_context_simple; #[test] fn test_job_decision_completed() { diff --git a/backend/crates/kalamdb-core/src/jobs/executors/flush.rs b/backend/crates/kalamdb-jobs/src/executors/flush.rs similarity index 95% rename from backend/crates/kalamdb-core/src/jobs/executors/flush.rs rename to backend/crates/kalamdb-jobs/src/executors/flush.rs index e08105636..0f61d8562 100644 --- a/backend/crates/kalamdb-core/src/jobs/executors/flush.rs +++ b/backend/crates/kalamdb-jobs/src/executors/flush.rs @@ -27,12 +27,12 @@ //! } //! ``` -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::error_extensions::KalamDbResultExt; -use crate::jobs::executors::shared_table_cleanup::cleanup_empty_shared_scope_if_needed; -use crate::jobs::executors::{JobContext, JobDecision, JobExecutor, JobParams}; -use crate::manifest::flush::{SharedTableFlushJob, TableFlush, UserTableFlushJob}; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::error_extensions::KalamDbResultExt; +use crate::executors::shared_table_cleanup::cleanup_empty_shared_scope_if_needed; +use crate::executors::{JobContext, JobDecision, JobExecutor, JobParams}; +use kalamdb_core::manifest::flush::{SharedTableFlushJob, TableFlush, UserTableFlushJob}; use async_trait::async_trait; use kalamdb_commons::schemas::TableType; use kalamdb_commons::TableId; @@ -151,7 +151,7 @@ impl FlushExecutor { // Downcast to UserTableProvider to access store let provider = provider_arc .as_any() - .downcast_ref::() + .downcast_ref::() .ok_or_else(|| { KalamDbError::InvalidOperation( "Cached provider type mismatch for user table".into(), @@ -191,7 +191,7 @@ impl FlushExecutor { // Downcast to SharedTableProvider to access store let provider = provider_arc .as_any() - .downcast_ref::() + .downcast_ref::() .ok_or_else(|| { KalamDbError::InvalidOperation( "Cached provider type mismatch for shared table".into(), @@ -349,7 +349,7 @@ impl JobExecutor for FlushExecutor { TableType::User => { if let Some(provider_arc) = schema_registry.get_provider(¶ms.table_id) { if let Some(provider) = - provider_arc.as_any().downcast_ref::() + provider_arc.as_any().downcast_ref::() { let store = provider.store(); let partition = store.partition(); @@ -367,7 +367,7 @@ impl JobExecutor for FlushExecutor { if let Some(provider_arc) = schema_registry.get_provider(¶ms.table_id) { if let Some(provider) = provider_arc .as_any() - .downcast_ref::() + .downcast_ref::() { let store = provider.store(); let partition = store.partition(); diff --git a/backend/crates/kalamdb-core/src/jobs/executors/job_cleanup.rs b/backend/crates/kalamdb-jobs/src/executors/job_cleanup.rs similarity index 96% rename from backend/crates/kalamdb-core/src/jobs/executors/job_cleanup.rs rename to backend/crates/kalamdb-jobs/src/executors/job_cleanup.rs index 706e7b736..a99259ced 100644 --- a/backend/crates/kalamdb-core/src/jobs/executors/job_cleanup.rs +++ b/backend/crates/kalamdb-jobs/src/executors/job_cleanup.rs @@ -15,8 +15,8 @@ //! } //! ``` -use crate::error::KalamDbError; -use crate::jobs::executors::{JobContext, JobDecision, JobExecutor, JobParams}; +use kalamdb_core::error::KalamDbError; +use crate::executors::{JobContext, JobDecision, JobExecutor, JobParams}; use async_trait::async_trait; use kalamdb_system::JobType; use serde::{Deserialize, Serialize}; diff --git a/backend/crates/kalamdb-core/src/jobs/executors/manifest_eviction.rs b/backend/crates/kalamdb-jobs/src/executors/manifest_eviction.rs similarity index 96% rename from backend/crates/kalamdb-core/src/jobs/executors/manifest_eviction.rs rename to backend/crates/kalamdb-jobs/src/executors/manifest_eviction.rs index dd6e842eb..ed69079e2 100644 --- a/backend/crates/kalamdb-core/src/jobs/executors/manifest_eviction.rs +++ b/backend/crates/kalamdb-jobs/src/executors/manifest_eviction.rs @@ -19,9 +19,9 @@ //! - eviction_interval_seconds: How often the job runs (default: 600s = 10 minutes) //! - eviction_ttl_days: How many days before an unaccessed manifest is evicted (default: 7 days) -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::jobs::executors::{JobContext, JobDecision, JobExecutor, JobParams}; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use crate::executors::{JobContext, JobDecision, JobExecutor, JobParams}; use async_trait::async_trait; use kalamdb_system::JobType; use serde::{Deserialize, Serialize}; diff --git a/backend/crates/kalamdb-core/src/jobs/executors/mod.rs b/backend/crates/kalamdb-jobs/src/executors/mod.rs similarity index 100% rename from backend/crates/kalamdb-core/src/jobs/executors/mod.rs rename to backend/crates/kalamdb-jobs/src/executors/mod.rs diff --git a/backend/crates/kalamdb-core/src/jobs/executors/registry.rs b/backend/crates/kalamdb-jobs/src/executors/registry.rs similarity index 98% rename from backend/crates/kalamdb-core/src/jobs/executors/registry.rs rename to backend/crates/kalamdb-jobs/src/executors/registry.rs index e7539e4fa..a096f063d 100644 --- a/backend/crates/kalamdb-core/src/jobs/executors/registry.rs +++ b/backend/crates/kalamdb-jobs/src/executors/registry.rs @@ -12,9 +12,9 @@ //! while preserving type safety at execution time through runtime deserialization. use super::executor_trait::{JobContext, JobDecision, JobExecutor, JobParams}; -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::error_extensions::SerdeJsonResultExt; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::error_extensions::SerdeJsonResultExt; use async_trait::async_trait; use kalamdb_system::Job; use kalamdb_system::JobType; @@ -467,8 +467,8 @@ impl Default for JobRegistry { #[cfg(test)] mod tests { use super::*; - use crate::jobs::executors::JobParams; - use crate::test_helpers::test_app_context_simple; + use crate::executors::JobParams; + use kalamdb_core::test_helpers::test_app_context_simple; use kalamdb_commons::models::{JobId, NodeId}; use kalamdb_system::JobStatus; use serde::{Deserialize, Serialize}; diff --git a/backend/crates/kalamdb-core/src/jobs/executors/restore.rs b/backend/crates/kalamdb-jobs/src/executors/restore.rs similarity index 98% rename from backend/crates/kalamdb-core/src/jobs/executors/restore.rs rename to backend/crates/kalamdb-jobs/src/executors/restore.rs index fbb226725..c4afa1ed7 100644 --- a/backend/crates/kalamdb-core/src/jobs/executors/restore.rs +++ b/backend/crates/kalamdb-jobs/src/executors/restore.rs @@ -24,8 +24,8 @@ //! ## IMPORTANT //! Restore requires a server restart after completion to reload the restored data. -use crate::error::KalamDbError; -use crate::jobs::executors::{JobContext, JobDecision, JobExecutor, JobParams}; +use kalamdb_core::error::KalamDbError; +use crate::executors::{JobContext, JobDecision, JobExecutor, JobParams}; use async_trait::async_trait; use kalamdb_system::JobType; use serde::{Deserialize, Serialize}; diff --git a/backend/crates/kalamdb-core/src/jobs/executors/retention.rs b/backend/crates/kalamdb-jobs/src/executors/retention.rs similarity index 98% rename from backend/crates/kalamdb-core/src/jobs/executors/retention.rs rename to backend/crates/kalamdb-jobs/src/executors/retention.rs index c7dfc1112..30cccf752 100644 --- a/backend/crates/kalamdb-core/src/jobs/executors/retention.rs +++ b/backend/crates/kalamdb-jobs/src/executors/retention.rs @@ -20,8 +20,8 @@ //! } //! ``` -use crate::error::KalamDbError; -use crate::jobs::executors::{JobContext, JobDecision, JobExecutor, JobParams}; +use kalamdb_core::error::KalamDbError; +use crate::executors::{JobContext, JobDecision, JobExecutor, JobParams}; use async_trait::async_trait; use kalamdb_commons::schemas::TableType; use kalamdb_commons::TableId; diff --git a/backend/crates/kalamdb-core/src/jobs/executors/shared_table_cleanup.rs b/backend/crates/kalamdb-jobs/src/executors/shared_table_cleanup.rs similarity index 88% rename from backend/crates/kalamdb-core/src/jobs/executors/shared_table_cleanup.rs rename to backend/crates/kalamdb-jobs/src/executors/shared_table_cleanup.rs index 5770087ca..7e8d8f07b 100644 --- a/backend/crates/kalamdb-core/src/jobs/executors/shared_table_cleanup.rs +++ b/backend/crates/kalamdb-jobs/src/executors/shared_table_cleanup.rs @@ -1,6 +1,6 @@ -use crate::error::KalamDbError; -use crate::jobs::executors::{JobContext, JobParams}; -use crate::providers::{row_utils::system_user_id, BaseTableProvider, SharedTableProvider}; +use kalamdb_core::error::KalamDbError; +use crate::executors::{JobContext, JobParams}; +use kalamdb_core::providers::{row_utils::system_user_id, BaseTableProvider, SharedTableProvider}; use kalamdb_commons::TableId; pub(crate) async fn cleanup_empty_shared_scope_if_needed( diff --git a/backend/crates/kalamdb-core/src/jobs/executors/stream_eviction.rs b/backend/crates/kalamdb-jobs/src/executors/stream_eviction.rs similarity index 96% rename from backend/crates/kalamdb-core/src/jobs/executors/stream_eviction.rs rename to backend/crates/kalamdb-jobs/src/executors/stream_eviction.rs index b2f94a06c..e8a539669 100644 --- a/backend/crates/kalamdb-core/src/jobs/executors/stream_eviction.rs +++ b/backend/crates/kalamdb-jobs/src/executors/stream_eviction.rs @@ -21,13 +21,13 @@ //! } //! ``` -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::error_extensions::KalamDbResultExt; -use crate::jobs::executors::{JobContext, JobDecision, JobExecutor, JobParams}; -use crate::providers::StreamTableProvider; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::error_extensions::KalamDbResultExt; +use crate::executors::{JobContext, JobDecision, JobExecutor, JobParams}; +use kalamdb_core::providers::StreamTableProvider; #[cfg(test)] -use crate::schema_registry::TablesSchemaRegistryAdapter; +use kalamdb_core::schema_registry::TablesSchemaRegistryAdapter; use async_trait::async_trait; use kalamdb_commons::ids::{SeqId, SnowflakeGenerator}; use kalamdb_commons::schemas::TableType; @@ -235,11 +235,11 @@ impl Default for StreamEvictionExecutor { #[cfg(test)] mod tests { use super::*; - use crate::app_context::AppContext; - use crate::providers::arrow_json_conversion::json_to_row; - use crate::providers::base::{BaseTableProvider, TableProviderCore}; - use crate::providers::StreamTableProvider; - use crate::test_helpers::test_app_context_simple; + use kalamdb_core::app_context::AppContext; + use kalamdb_core::providers::arrow_json_conversion::json_to_row; + use kalamdb_core::providers::base::{BaseTableProvider, TableProviderCore}; + use kalamdb_core::providers::StreamTableProvider; + use kalamdb_core::test_helpers::test_app_context_simple; use chrono::Utc; use datafusion::datasource::TableProvider; use kalamdb_commons::models::datatypes::KalamDataType; diff --git a/backend/crates/kalamdb-core/src/jobs/executors/topic_cleanup.rs b/backend/crates/kalamdb-jobs/src/executors/topic_cleanup.rs similarity index 98% rename from backend/crates/kalamdb-core/src/jobs/executors/topic_cleanup.rs rename to backend/crates/kalamdb-jobs/src/executors/topic_cleanup.rs index b618f35b7..9b0563af0 100644 --- a/backend/crates/kalamdb-core/src/jobs/executors/topic_cleanup.rs +++ b/backend/crates/kalamdb-jobs/src/executors/topic_cleanup.rs @@ -21,8 +21,8 @@ //! } //! ``` -use crate::error::KalamDbError; -use crate::jobs::executors::{JobContext, JobDecision, JobExecutor, JobParams}; +use kalamdb_core::error::KalamDbError; +use crate::executors::{JobContext, JobDecision, JobExecutor, JobParams}; use async_trait::async_trait; use kalamdb_commons::models::TopicId; use kalamdb_system::JobType; diff --git a/backend/crates/kalamdb-core/src/jobs/executors/topic_retention.rs b/backend/crates/kalamdb-jobs/src/executors/topic_retention.rs similarity index 98% rename from backend/crates/kalamdb-core/src/jobs/executors/topic_retention.rs rename to backend/crates/kalamdb-jobs/src/executors/topic_retention.rs index 14e2138c2..4d6d91505 100644 --- a/backend/crates/kalamdb-core/src/jobs/executors/topic_retention.rs +++ b/backend/crates/kalamdb-jobs/src/executors/topic_retention.rs @@ -20,8 +20,8 @@ //! } //! ``` -use crate::error::KalamDbError; -use crate::jobs::executors::{JobContext, JobDecision, JobExecutor, JobParams}; +use kalamdb_core::error::KalamDbError; +use crate::executors::{JobContext, JobDecision, JobExecutor, JobParams}; use async_trait::async_trait; use kalamdb_commons::models::TopicId; use kalamdb_system::JobType; diff --git a/backend/crates/kalamdb-core/src/jobs/executors/user_cleanup.rs b/backend/crates/kalamdb-jobs/src/executors/user_cleanup.rs similarity index 96% rename from backend/crates/kalamdb-core/src/jobs/executors/user_cleanup.rs rename to backend/crates/kalamdb-jobs/src/executors/user_cleanup.rs index b671a5168..23cf2c3f0 100644 --- a/backend/crates/kalamdb-core/src/jobs/executors/user_cleanup.rs +++ b/backend/crates/kalamdb-jobs/src/executors/user_cleanup.rs @@ -19,8 +19,8 @@ //! } //! ``` -use crate::error::KalamDbError; -use crate::jobs::executors::{JobContext, JobDecision, JobExecutor, JobParams}; +use kalamdb_core::error::KalamDbError; +use crate::executors::{JobContext, JobDecision, JobExecutor, JobParams}; use async_trait::async_trait; use kalamdb_commons::models::UserId; use kalamdb_system::JobType; diff --git a/backend/crates/kalamdb-core/src/jobs/executors/user_export.rs b/backend/crates/kalamdb-jobs/src/executors/user_export.rs similarity index 98% rename from backend/crates/kalamdb-core/src/jobs/executors/user_export.rs rename to backend/crates/kalamdb-jobs/src/executors/user_export.rs index 4fd970767..c7ba021e7 100644 --- a/backend/crates/kalamdb-core/src/jobs/executors/user_export.rs +++ b/backend/crates/kalamdb-jobs/src/executors/user_export.rs @@ -15,10 +15,11 @@ //! } //! ``` -use crate::error::KalamDbError; -use crate::jobs::executors::flush::FlushParams; -use crate::jobs::executors::{JobContext, JobDecision, JobExecutor, JobParams}; -use crate::providers::UserTableProvider; +use kalamdb_core::error::KalamDbError; +use crate::AppContextJobsExt; +use crate::executors::flush::FlushParams; +use crate::executors::{JobContext, JobDecision, JobExecutor, JobParams}; +use kalamdb_core::providers::UserTableProvider; use async_trait::async_trait; use kalamdb_commons::ids::UserTableRowId; use kalamdb_commons::models::UserId; diff --git a/backend/crates/kalamdb-core/src/jobs/executors/vector_index.rs b/backend/crates/kalamdb-jobs/src/executors/vector_index.rs similarity index 95% rename from backend/crates/kalamdb-core/src/jobs/executors/vector_index.rs rename to backend/crates/kalamdb-jobs/src/executors/vector_index.rs index d66bac2bb..961fbd0b0 100644 --- a/backend/crates/kalamdb-core/src/jobs/executors/vector_index.rs +++ b/backend/crates/kalamdb-jobs/src/executors/vector_index.rs @@ -3,9 +3,9 @@ //! Flushes per-column vector hot staging into cold snapshot artifacts and updates //! vector metadata embedded in manifest.json. -use crate::error::KalamDbError; -use crate::jobs::executors::{JobContext, JobDecision, JobExecutor, JobParams}; -use crate::vector::{flush_shared_scope_vectors, flush_user_scope_vectors}; +use kalamdb_core::error::KalamDbError; +use crate::executors::{JobContext, JobDecision, JobExecutor, JobParams}; +use kalamdb_core::vector::{flush_shared_scope_vectors, flush_user_scope_vectors}; use async_trait::async_trait; use kalamdb_commons::models::{TableId, UserId}; use kalamdb_commons::schemas::TableType; diff --git a/backend/crates/kalamdb-core/src/jobs/flush_scheduler.rs b/backend/crates/kalamdb-jobs/src/flush_scheduler.rs similarity index 97% rename from backend/crates/kalamdb-core/src/jobs/flush_scheduler.rs rename to backend/crates/kalamdb-jobs/src/flush_scheduler.rs index a7daf1f0d..e1ef55934 100644 --- a/backend/crates/kalamdb-core/src/jobs/flush_scheduler.rs +++ b/backend/crates/kalamdb-jobs/src/flush_scheduler.rs @@ -1,7 +1,7 @@ -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::jobs::executors::flush::FlushParams; -use crate::jobs::JobsManager; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use crate::executors::flush::FlushParams; +use crate::JobsManager; use kalamdb_commons::TableType; use kalamdb_system::JobType; use std::sync::Arc; diff --git a/backend/crates/kalamdb-core/src/jobs/health_monitor.rs b/backend/crates/kalamdb-jobs/src/health_monitor.rs similarity index 91% rename from backend/crates/kalamdb-core/src/jobs/health_monitor.rs rename to backend/crates/kalamdb-jobs/src/health_monitor.rs index 8c7782231..c070eea96 100644 --- a/backend/crates/kalamdb-core/src/jobs/health_monitor.rs +++ b/backend/crates/kalamdb-jobs/src/health_monitor.rs @@ -1,5 +1,5 @@ -use crate::app_context::AppContext; -use crate::error::KalamDbError; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; use std::sync::Arc; // Re-export the WebSocket session tracking functions from kalamdb-observability diff --git a/backend/crates/kalamdb-core/src/jobs/jobs_manager/actions.rs b/backend/crates/kalamdb-jobs/src/jobs_manager/actions.rs similarity index 98% rename from backend/crates/kalamdb-core/src/jobs/jobs_manager/actions.rs rename to backend/crates/kalamdb-jobs/src/jobs_manager/actions.rs index 2adf4e24e..1db45bc7a 100644 --- a/backend/crates/kalamdb-core/src/jobs/jobs_manager/actions.rs +++ b/backend/crates/kalamdb-jobs/src/jobs_manager/actions.rs @@ -1,7 +1,7 @@ use super::types::JobsManager; -use crate::error::KalamDbError; -use crate::error_extensions::KalamDbResultExt; -use crate::jobs::executors::JobParams; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::error_extensions::KalamDbResultExt; +use crate::executors::JobParams; use chrono::Utc; use kalamdb_commons::JobId; use kalamdb_raft::commands::MetaCommand; diff --git a/backend/crates/kalamdb-core/src/jobs/jobs_manager/mod.rs b/backend/crates/kalamdb-jobs/src/jobs_manager/mod.rs similarity index 100% rename from backend/crates/kalamdb-core/src/jobs/jobs_manager/mod.rs rename to backend/crates/kalamdb-jobs/src/jobs_manager/mod.rs diff --git a/backend/crates/kalamdb-core/src/jobs/jobs_manager/queries.rs b/backend/crates/kalamdb-jobs/src/jobs_manager/queries.rs similarity index 95% rename from backend/crates/kalamdb-core/src/jobs/jobs_manager/queries.rs rename to backend/crates/kalamdb-jobs/src/jobs_manager/queries.rs index b84189903..1a4048643 100644 --- a/backend/crates/kalamdb-core/src/jobs/jobs_manager/queries.rs +++ b/backend/crates/kalamdb-jobs/src/jobs_manager/queries.rs @@ -1,6 +1,6 @@ use super::types::JobsManager; -use crate::error::KalamDbError; -use crate::error_extensions::KalamDbResultExt; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::error_extensions::KalamDbResultExt; use kalamdb_commons::JobId; use kalamdb_system::providers::jobs::models::{Job, JobFilter}; use kalamdb_system::JobStatus; diff --git a/backend/crates/kalamdb-core/src/jobs/jobs_manager/runner.rs b/backend/crates/kalamdb-jobs/src/jobs_manager/runner.rs similarity index 99% rename from backend/crates/kalamdb-core/src/jobs/jobs_manager/runner.rs rename to backend/crates/kalamdb-jobs/src/jobs_manager/runner.rs index 70e8a6320..0e86fe9d1 100644 --- a/backend/crates/kalamdb-core/src/jobs/jobs_manager/runner.rs +++ b/backend/crates/kalamdb-jobs/src/jobs_manager/runner.rs @@ -1,9 +1,10 @@ use super::types::JobsManager; use super::utils::log_job; -use crate::error::KalamDbError; -use crate::error_extensions::KalamDbResultExt; -use crate::jobs::executors::JobDecision; -use crate::jobs::{FlushScheduler, HealthMonitor, StreamEvictionScheduler}; +use crate::AppContextJobsExt; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::error_extensions::KalamDbResultExt; +use crate::executors::JobDecision; +use crate::{FlushScheduler, HealthMonitor, StreamEvictionScheduler}; use kalamdb_commons::{JobId, NodeId}; use kalamdb_raft::commands::MetaCommand; use kalamdb_raft::GroupId; diff --git a/backend/crates/kalamdb-core/src/jobs/jobs_manager/types.rs b/backend/crates/kalamdb-jobs/src/jobs_manager/types.rs similarity index 97% rename from backend/crates/kalamdb-core/src/jobs/jobs_manager/types.rs rename to backend/crates/kalamdb-jobs/src/jobs_manager/types.rs index 1c634abb6..ad6b60b12 100644 --- a/backend/crates/kalamdb-core/src/jobs/jobs_manager/types.rs +++ b/backend/crates/kalamdb-jobs/src/jobs_manager/types.rs @@ -1,5 +1,5 @@ -use crate::app_context::AppContext; -use crate::jobs::executors::JobRegistry; +use kalamdb_core::app_context::AppContext; +use crate::executors::JobRegistry; use kalamdb_commons::{JobId, NodeId}; use kalamdb_system::{JobNodesTableProvider, JobsTableProvider}; use std::sync::atomic::{AtomicBool, Ordering}; diff --git a/backend/crates/kalamdb-core/src/jobs/jobs_manager/utils.rs b/backend/crates/kalamdb-jobs/src/jobs_manager/utils.rs similarity index 98% rename from backend/crates/kalamdb-core/src/jobs/jobs_manager/utils.rs rename to backend/crates/kalamdb-jobs/src/jobs_manager/utils.rs index aca4e1257..cd2651a86 100644 --- a/backend/crates/kalamdb-core/src/jobs/jobs_manager/utils.rs +++ b/backend/crates/kalamdb-jobs/src/jobs_manager/utils.rs @@ -1,6 +1,6 @@ use super::types::JobsManager; -use crate::error::KalamDbError; -use crate::error_extensions::KalamDbResultExt; +use kalamdb_core::error::KalamDbError; +use kalamdb_core::error_extensions::KalamDbResultExt; use chrono::Utc; use kalamdb_commons::{JobId, NodeId}; use kalamdb_raft::commands::MetaCommand; diff --git a/backend/crates/kalamdb-core/src/jobs/leader_failover.rs b/backend/crates/kalamdb-jobs/src/leader_failover.rs similarity index 99% rename from backend/crates/kalamdb-core/src/jobs/leader_failover.rs rename to backend/crates/kalamdb-jobs/src/leader_failover.rs index 1458efaaf..618f2708d 100644 --- a/backend/crates/kalamdb-core/src/jobs/leader_failover.rs +++ b/backend/crates/kalamdb-jobs/src/leader_failover.rs @@ -28,8 +28,8 @@ use kalamdb_system::{JobFilter, JobSortField, JobStatus, JobType, SortOrder}; use std::collections::HashSet; use std::sync::Arc; -use crate::error::KalamDbError; -use crate::jobs::leader_guard::LeaderOnlyJobGuard; +use kalamdb_core::error::KalamDbError; +use crate::leader_guard::LeaderOnlyJobGuard; use kalamdb_system::JobsTableProvider; /// How long to wait before considering a job orphaned (in seconds) diff --git a/backend/crates/kalamdb-core/src/jobs/leader_guard.rs b/backend/crates/kalamdb-jobs/src/leader_guard.rs similarity index 99% rename from backend/crates/kalamdb-core/src/jobs/leader_guard.rs rename to backend/crates/kalamdb-jobs/src/leader_guard.rs index 378df6b49..5d657415a 100644 --- a/backend/crates/kalamdb-core/src/jobs/leader_guard.rs +++ b/backend/crates/kalamdb-jobs/src/leader_guard.rs @@ -23,7 +23,7 @@ use chrono::Utc; use kalamdb_raft::{CommandExecutor, GroupId, MetaCommand, MetaResponse}; use std::sync::Arc; -use crate::error::KalamDbError; +use kalamdb_core::error::KalamDbError; use kalamdb_commons::models::{JobId, NodeId}; /// Result of checking leadership status diff --git a/backend/crates/kalamdb-jobs/src/lib.rs b/backend/crates/kalamdb-jobs/src/lib.rs new file mode 100644 index 000000000..c8418f83e --- /dev/null +++ b/backend/crates/kalamdb-jobs/src/lib.rs @@ -0,0 +1,94 @@ +//! # Job Management System +//! +//! Extracted from `kalamdb-core` to reduce compile times. +//! Provides `JobsManager` with concrete executors, flush/eviction schedulers, +//! health monitoring, and leader-only job execution for cluster mode. + +// ============================================================================ +// PHASE 9: UNIFIED JOB MANAGEMENT (PRODUCTION-READY) +// ============================================================================ +pub mod executors; +pub mod flush_scheduler; +pub mod health_monitor; +pub mod jobs_manager; +pub mod stream_eviction; + +// ============================================================================ +// PHASE 16: LEADER-ONLY JOB EXECUTION (CLUSTER MODE) +// ============================================================================ +pub mod leader_failover; +pub mod leader_guard; + +// Phase 9 exports (production API) +pub use executors::{JobContext, JobDecision, JobExecutor as JobExecutorTrait, JobRegistry}; +pub use flush_scheduler::FlushScheduler; +pub use health_monitor::HealthMonitor; +pub use jobs_manager::JobsManager; +pub use stream_eviction::StreamEvictionScheduler; + +// Phase 16 exports (cluster mode) +pub use leader_failover::{JobRecoveryAction, LeaderFailoverHandler, RecoveryReport}; +pub use leader_guard::{LeaderOnlyJobGuard, LeadershipStatus}; + +// ============================================================================ +// JobWaker implementation (bridges kalamdb-core trait → JobsManager) +// ============================================================================ +impl kalamdb_core::job_waker::JobWaker for JobsManager { + fn awake_job(&self, job_id: kalamdb_commons::JobId) { + // Delegate to the inherent method on JobsManager + JobsManager::awake_job(self, job_id); + } +} + +// ============================================================================ +// Extension trait: ergonomic `.job_manager()` on AppContext +// ============================================================================ +use kalamdb_core::app_context::AppContext; +use std::sync::Arc; + +/// Extension trait that provides typed access to the `JobsManager` stored +/// inside `AppContext` (which stores it as `Arc`). +pub trait AppContextJobsExt { + /// Downcast the type-erased job manager to `Arc`. + fn job_manager(&self) -> Arc; +} + +impl AppContextJobsExt for AppContext { + fn job_manager(&self) -> Arc { + self.job_manager_raw() + .clone() + .downcast::() + .expect("job_manager is not a JobsManager — was set_job_manager called?") + } +} + +/// Convenience: create a fully-wired `JobsManager`, register all executors, +/// and install it into the given `AppContext`. +pub fn init_job_manager(app_ctx: &Arc) { + use crate::executors::*; + + let job_registry = Arc::new(JobRegistry::new()); + job_registry.register(Arc::new(FlushExecutor::new())); + job_registry.register(Arc::new(CleanupExecutor::new())); + job_registry.register(Arc::new(RetentionExecutor::new())); + job_registry.register(Arc::new(StreamEvictionExecutor::new())); + job_registry.register(Arc::new(UserCleanupExecutor::new())); + job_registry.register(Arc::new(CompactExecutor::new())); + job_registry.register(Arc::new(BackupExecutor::new())); + job_registry.register(Arc::new(RestoreExecutor::new())); + job_registry.register(Arc::new(VectorIndexExecutor::new())); + job_registry.register(Arc::new(TopicCleanupExecutor::new())); + job_registry.register(Arc::new(TopicRetentionExecutor::new())); + job_registry.register(Arc::new(UserExportExecutor::new())); + + let jobs_provider = app_ctx.system_tables().jobs(); + let job_nodes_provider = app_ctx.system_tables().job_nodes(); + let job_manager = Arc::new(JobsManager::new( + jobs_provider, + job_nodes_provider, + job_registry, + Arc::clone(app_ctx), + )); + // Pass as both the type-erased store and the JobWaker impl + app_ctx.set_job_manager(job_manager.clone(), job_manager); +} diff --git a/backend/crates/kalamdb-core/src/jobs/stream_eviction.rs b/backend/crates/kalamdb-jobs/src/stream_eviction.rs similarity index 95% rename from backend/crates/kalamdb-core/src/jobs/stream_eviction.rs rename to backend/crates/kalamdb-jobs/src/stream_eviction.rs index 41e133db3..138cbaaa9 100644 --- a/backend/crates/kalamdb-core/src/jobs/stream_eviction.rs +++ b/backend/crates/kalamdb-jobs/src/stream_eviction.rs @@ -1,6 +1,6 @@ -use crate::app_context::AppContext; -use crate::error::KalamDbError; -use crate::jobs::JobsManager; +use kalamdb_core::app_context::AppContext; +use kalamdb_core::error::KalamDbError; +use crate::JobsManager; use kalamdb_commons::models::{schemas::TableOptions, TableId}; use kalamdb_commons::TableType; use kalamdb_system::JobType; @@ -47,7 +47,7 @@ impl StreamEvictionScheduler { let table_id = TableId::new(table.namespace_id.clone(), table.table_name.clone()); // Create eviction job with typed parameters - let params = crate::jobs::executors::stream_eviction::StreamEvictionParams { + let params = crate::executors::stream_eviction::StreamEvictionParams { table_id: table_id.clone(), table_type: TableType::Stream, ttl_seconds, diff --git a/backend/src/lifecycle.rs b/backend/src/lifecycle.rs index d70d2ce7e..ed48bd351 100644 --- a/backend/src/lifecycle.rs +++ b/backend/src/lifecycle.rs @@ -13,8 +13,10 @@ use kalamdb_configs::ServerConfig; use kalamdb_core::live::ConnectionsManager; use kalamdb_core::live_query::LiveQueryManager; use kalamdb_core::sql::datafusion_session::DataFusionSessionFactory; +use kalamdb_core::sql::executor::handler_registry::HandlerRegistry; use kalamdb_core::sql::executor::SqlExecutor; use kalamdb_dba::{initialize_dba_namespace, start_stats_recorder}; +use kalamdb_jobs::AppContextJobsExt; use kalamdb_store::open_storage_backend; use kalamdb_system::providers::storages::models::StorageMode; use log::debug; @@ -117,8 +119,15 @@ pub async fn prepare_components( let user_repo: Arc = Arc::new(kalamdb_api::repositories::CachedUsersRepo::new(users_provider)); + let handler_registry = Arc::new(HandlerRegistry::new(app_context.clone())); + kalamdb_handlers::register_all_handlers( + &handler_registry, + app_context.clone(), + config.auth.enforce_password_complexity, + ); + let sql_executor = - Arc::new(SqlExecutor::new(app_context.clone(), config.auth.enforce_password_complexity)); + Arc::new(SqlExecutor::new(app_context.clone(), handler_registry)); app_context.set_sql_executor(sql_executor.clone()); live_query_manager.set_sql_executor(sql_executor.clone()); @@ -128,6 +137,9 @@ pub async fn prepare_components( app_context.restore_raft_state_machines().await; + // Initialize job system (executors, manager, waker) — extracted to kalamdb-jobs crate + kalamdb_jobs::init_job_manager(&app_context); + let job_manager = app_context.job_manager(); let max_concurrent = config.jobs.max_concurrent; tokio::spawn(async move { diff --git a/backend/tests/common/testserver/flush.rs b/backend/tests/common/testserver/flush.rs index 6b4649497..3c179ae7e 100644 --- a/backend/tests/common/testserver/flush.rs +++ b/backend/tests/common/testserver/flush.rs @@ -1,8 +1,9 @@ use anyhow::Result; use kalam_link::models::{QueryResponse, ResponseStatus}; use kalamdb_commons::{NamespaceId, TableId, TableName}; -use kalamdb_core::jobs::executors::flush::{FlushExecutor, FlushParams}; -use kalamdb_core::jobs::executors::{JobContext, JobExecutor}; +use kalamdb_jobs::executors::flush::{FlushExecutor, FlushParams}; +use kalamdb_jobs::executors::{JobContext, JobExecutor}; +use kalamdb_jobs::AppContextJobsExt; use std::path::{Path, PathBuf}; use tokio::time::{sleep, Duration, Instant}; diff --git a/backend/tests/misc/auth/test_password_complexity.rs b/backend/tests/misc/auth/test_password_complexity.rs index 20f158186..29352fde2 100644 --- a/backend/tests/misc/auth/test_password_complexity.rs +++ b/backend/tests/misc/auth/test_password_complexity.rs @@ -5,6 +5,7 @@ use kalamdb_commons::{models::UserName, AuthType, Role, StorageId, UserId}; use kalamdb_core::app_context::AppContext; use kalamdb_core::error::KalamDbError; use kalamdb_core::sql::{ExecutionContext, ExecutionResult, SqlExecutor}; +use kalamdb_core::sql::executor::handler_registry::HandlerRegistry; use kalamdb_system::providers::storages::models::StorageMode; use std::sync::Arc; @@ -17,7 +18,9 @@ async fn setup_executor( let session_context = server.session_context.clone(); // Create SqlExecutor with desired password complexity enforcement - let executor = SqlExecutor::new(app_context.clone(), enforce_complexity); + let registry = Arc::new(HandlerRegistry::new(app_context.clone())); + kalamdb_handlers::register_all_handlers(®istry, app_context.clone(), enforce_complexity); + let executor = SqlExecutor::new(app_context.clone(), registry); (executor, app_context, session_context) } diff --git a/backend/tests/misc/system/test_topic_cleanup_job.rs b/backend/tests/misc/system/test_topic_cleanup_job.rs index 2319dd966..e68deeb77 100644 --- a/backend/tests/misc/system/test_topic_cleanup_job.rs +++ b/backend/tests/misc/system/test_topic_cleanup_job.rs @@ -7,6 +7,7 @@ use super::test_support::TestServer; use anyhow::Result; +use kalamdb_jobs::AppContextJobsExt; /// Test that TopicCleanup job is scheduled when dropping a topic #[actix_web::test] @@ -142,12 +143,12 @@ async fn test_topic_cleanup_job_idempotent() -> Result<()> { // Try to schedule another cleanup job manually (simulating retry) // This should be idempotent and not fail let topic_id = kalamdb_commons::models::TopicId::new(&topic_name); - let params = kalamdb_core::jobs::executors::topic_cleanup::TopicCleanupParams { + let params = kalamdb_jobs::executors::topic_cleanup::TopicCleanupParams { topic_id: topic_id.clone(), topic_name: topic_name.clone(), }; - let result = ctx.jobs_manager().create_job_typed( + let result = ctx.job_manager().create_job_typed( kalamdb_system::providers::jobs::models::JobType::TopicCleanup, params, ); diff --git a/backend/tests/testserver/flush/test_flush_policy_verification_http.rs b/backend/tests/testserver/flush/test_flush_policy_verification_http.rs index 6a19eb843..5d366caea 100644 --- a/backend/tests/testserver/flush/test_flush_policy_verification_http.rs +++ b/backend/tests/testserver/flush/test_flush_policy_verification_http.rs @@ -17,7 +17,8 @@ use super::test_support::jobs::{ }; use kalam_link::models::ResponseStatus; use kalamdb_commons::Role; -use kalamdb_core::jobs::FlushScheduler; +use kalamdb_jobs::FlushScheduler; +use kalamdb_jobs::AppContextJobsExt; use tokio::time::Duration; async fn count_matching_flush_jobs( diff --git a/cli/full_smoke_output.txt b/cli/full_smoke_output.txt new file mode 100644 index 000000000..3b0967bb1 --- /dev/null +++ b/cli/full_smoke_output.txt @@ -0,0 +1,201 @@ + Compiling kalam-link v0.4.2-dev (/Users/jamal/git/KalamDB/link) + Compiling kalam-cli v0.4.2-dev (/Users/jamal/git/KalamDB/cli) + Finished `test` profile [unoptimized] target(s) in 5.33s +──────────── + Nextest run ID 7bc6e5cd-fcd4-48ee-9ecd-60651c8904b2 with nextest profile: default + Starting 192 tests across 1 binary + PASS [ 0.029s] ( 1/192) kalam-cli::smoke leader_only_reads::smoke_test_not_leader_error_detection + PASS [ 6.158s] ( 2/192) kalam-cli::smoke leader_only_reads::smoke_test_leader_read_succeeds_on_leader + PASS [ 6.131s] ( 3/192) kalam-cli::smoke smoke_test_all_system_tables_schemas::smoke_test_system_table_column_counts + PASS [ 6.199s] ( 4/192) kalam-cli::smoke leader_only_reads::smoke_test_system_table_reads + PASS [ 6.207s] ( 5/192) kalam-cli::smoke leader_only_reads::smoke_test_leader_read_shared_table + PASS [ 6.487s] ( 6/192) kalam-cli::smoke leader_only_reads::smoke_test_leader_read_with_filters + PASS [ 6.604s] ( 7/192) kalam-cli::smoke leader_only_reads::smoke_test_read_after_write_consistency + PASS [ 6.608s] ( 8/192) kalam-cli::smoke smoke_test_00_parallel_query_burst::smoke_test_00_parallel_query_burst + PASS [ 6.785s] ( 9/192) kalam-cli::smoke smoke_test_all_datatypes::smoke_all_datatypes_user_shared_stream + PASS [ 8.350s] ( 10/192) kalam-cli::smoke chat_ai_example_smoke::smoke_chat_ai_example_from_readme + PASS [ 6.119s] ( 11/192) kalam-cli::smoke smoke_test_all_system_tables_schemas::smoke_test_system_tables_in_information_schema + PASS [ 6.249s] ( 12/192) kalam-cli::smoke smoke_test_all_system_tables_schemas::smoke_test_topic_offsets_schema_and_operations + PASS [ 6.266s] ( 13/192) kalam-cli::smoke smoke_test_alter_with_data::smoke_test_alter_table_with_data_verification + PASS [ 13.637s] ( 14/192) kalam-cli::smoke smoke_test_all_system_tables_schemas::smoke_test_all_system_tables_and_views_queryable + PASS [ 11.602s] ( 15/192) kalam-cli::smoke smoke_test_as_user_authorization::smoke_security_regular_user_cannot_impersonate_privileged_users_in_batch + PASS [ 9.751s] ( 16/192) kalam-cli::smoke smoke_test_as_user_impersonation::smoke_as_user_blocked_for_regular_user + PASS [ 13.603s] ( 17/192) kalam-cli::smoke smoke_test_as_user_chat_impersonation::smoke_as_user_chat_select_scope_for_different_user + PASS [ 15.016s] ( 18/192) kalam-cli::smoke smoke_test_as_user_chat_impersonation::smoke_as_user_chat_delete_flow + PASS [ 15.036s] ( 19/192) kalam-cli::smoke smoke_test_as_user_chat_impersonation::smoke_as_user_chat_insert_and_select_flow + PASS [ 15.410s] ( 20/192) kalam-cli::smoke smoke_test_as_user_chat_impersonation::smoke_as_user_chat_update_flow + PASS [ 11.082s] ( 21/192) kalam-cli::smoke smoke_test_as_user_impersonation::smoke_as_user_delete_with_dba_role + PASS [ 11.577s] ( 22/192) kalam-cli::smoke smoke_test_as_user_impersonation::smoke_as_user_insert_with_service_role + PASS [ 10.483s] ( 23/192) kalam-cli::smoke smoke_test_as_user_impersonation::smoke_as_user_rejected_on_shared_table + PASS [ 15.005s] ( 24/192) kalam-cli::smoke smoke_test_as_user_impersonation::smoke_as_user_full_workflow + PASS [ 6.951s] ( 25/192) kalam-cli::smoke smoke_test_backup_restore::smoke_backup_database_job_completes + PASS [ 6.885s] ( 26/192) kalam-cli::smoke smoke_test_backup_restore::smoke_backup_job_visible_in_system_jobs + PASS [ 6.092s] ( 27/192) kalam-cli::smoke smoke_test_backup_restore::smoke_restore_nonexistent_path_returns_error + PASS [ 7.069s] ( 28/192) kalam-cli::smoke smoke_test_backup_restore::smoke_restore_from_backup_job_completes + PASS [ 13.379s] ( 29/192) kalam-cli::smoke smoke_test_as_user_impersonation::smoke_as_user_select_scopes_reads_for_user_tables + PASS [ 9.536s] ( 30/192) kalam-cli::smoke smoke_test_backup_restore::smoke_backup_requires_dba_role + PASS [ 13.827s] ( 31/192) kalam-cli::smoke smoke_test_as_user_impersonation::smoke_as_user_stream_table_isolation + PASS [ 13.001s] ( 32/192) kalam-cli::smoke smoke_test_as_user_impersonation::smoke_as_user_update_with_dba_role + PASS [ 9.436s] ( 33/192) kalam-cli::smoke smoke_test_backup_restore::smoke_restore_requires_dba_role + PASS [ 6.218s] ( 34/192) kalam-cli::smoke smoke_test_batch_control::smoke_batch_control_data_ordering + PASS [ 6.481s] ( 35/192) kalam-cli::smoke smoke_test_batch_control::smoke_batch_control_empty_table + PASS [ 6.538s] ( 36/192) kalam-cli::smoke smoke_test_batch_control::smoke_batch_control_multi_batch + PASS [ 6.714s] ( 37/192) kalam-cli::smoke smoke_test_batch_control::smoke_batch_control_single_batch + PASS [ 6.199s] ( 38/192) kalam-cli::smoke smoke_test_cli_commands::smoke_cli_describe_table_command + PASS [ 6.135s] ( 39/192) kalam-cli::smoke smoke_test_cli_commands::smoke_cli_error_handling + PASS [ 7.226s] ( 40/192) kalam-cli::smoke smoke_test_cli_commands::smoke_cli_alter_table + PASS [ 8.441s] ( 41/192) kalam-cli::smoke smoke_test_cleanup_job::smoke_cleanup_job_completes + PASS [ 6.343s] ( 42/192) kalam-cli::smoke smoke_test_cli_commands::smoke_cli_flush_command + PASS [ 6.016s] ( 43/192) kalam-cli::smoke smoke_test_cli_commands::smoke_cli_format_json_command + PASS [ 6.257s] ( 44/192) kalam-cli::smoke smoke_test_cli_commands::smoke_cli_list_tables_command + PASS [ 6.311s] ( 45/192) kalam-cli::smoke smoke_test_cli_commands::smoke_cli_namespace_management + PASS [ 6.444s] ( 46/192) kalam-cli::smoke smoke_test_cli_commands::smoke_cli_sql_execution + PASS [ 4.742s] ( 47/192) kalam-cli::smoke smoke_test_cluster_operations::smoke_test_cluster_all + PASS [ 6.577s] ( 48/192) kalam-cli::smoke smoke_test_cli_commands::smoke_cli_stats_command + PASS [ 7.282s] ( 49/192) kalam-cli::smoke smoke_test_cli_commands::smoke_cli_system_tables + PASS [ 7.470s] ( 50/192) kalam-cli::smoke smoke_test_cluster_operations::smoke_test_cluster_batch_insert_consistency + PASS [ 6.116s] ( 51/192) kalam-cli::smoke smoke_test_cluster_operations::smoke_test_cluster_namespace_consistency + PASS [ 7.890s] ( 52/192) kalam-cli::smoke smoke_test_cluster_operations::smoke_test_cluster_concurrent_operations + PASS [ 7.656s] ( 53/192) kalam-cli::smoke smoke_test_cluster_operations::smoke_test_cluster_live_query_tracking + PASS [ 7.989s] ( 54/192) kalam-cli::smoke smoke_test_cluster_operations::smoke_test_cluster_job_tracking + PASS [ 10.845s] ( 55/192) kalam-cli::smoke smoke_test_cli_commands::smoke_cli_user_management + PASS [ 6.785s] ( 56/192) kalam-cli::smoke smoke_test_cluster_operations::smoke_test_cluster_system_table_counts_consistent + PASS [ 8.422s] ( 57/192) kalam-cli::smoke smoke_test_cluster_operations::smoke_test_cluster_storage_operations + PASS [ 11.054s] ( 58/192) kalam-cli::smoke smoke_test_cluster_operations::smoke_test_cluster_shared_table_consistency + PASS [ 7.630s] ( 59/192) kalam-cli::smoke smoke_test_cluster_operations::smoke_test_cluster_table_type_consistency + PASS [ 6.685s] ( 60/192) kalam-cli::smoke smoke_test_custom_functions::smoke_test_current_user_default + PASS [ 7.102s] ( 61/192) kalam-cli::smoke smoke_test_custom_functions::smoke_test_all_custom_functions_combined + PASS [ 6.801s] ( 62/192) kalam-cli::smoke smoke_test_custom_functions::smoke_test_snowflake_id_default + PASS [ 8.480s] ( 63/192) kalam-cli::smoke smoke_test_core_operations::smoke_test_core_operations + PASS [ 6.656s] ( 64/192) kalam-cli::smoke smoke_test_custom_functions::smoke_test_ulid_default + PASS [ 6.821s] ( 65/192) kalam-cli::smoke smoke_test_custom_functions::smoke_test_uuid_v7_default + PASS [ 11.441s] ( 66/192) kalam-cli::smoke smoke_test_cluster_operations::smoke_test_cluster_user_operations + PASS [ 6.634s] ( 67/192) kalam-cli::smoke smoke_test_datatype_preservation::test_all_kalam_datatypes_are_preserved + PASS [ 6.600s] ( 68/192) kalam-cli::smoke smoke_test_datatype_preservation::test_system_tables_shows_correct_datatypes + PASS [ 13.624s] ( 69/192) kalam-cli::smoke smoke_test_cluster_operations::smoke_test_cluster_user_data_partitioning + PASS [ 7.079s] ( 70/192) kalam-cli::smoke smoke_test_ddl_alter::smoke_test_alter_add_not_null_without_default_error + PASS [ 7.359s] ( 71/192) kalam-cli::smoke smoke_test_ddl_alter::smoke_test_alter_shared_table_access_level + PASS [ 7.327s] ( 72/192) kalam-cli::smoke smoke_test_ddl_alter::smoke_test_alter_system_columns_error + PASS [ 7.582s] ( 73/192) kalam-cli::smoke smoke_test_ddl_alter::smoke_test_alter_table_add_column + PASS [ 7.395s] ( 74/192) kalam-cli::smoke smoke_test_ddl_alter::smoke_test_alter_table_drop_column + PASS [ 7.362s] ( 75/192) kalam-cli::smoke smoke_test_ddl_alter::smoke_test_alter_table_modify_column + PASS [ 7.597s] ( 76/192) kalam-cli::smoke smoke_test_dml_extended::smoke_test_aggregation_queries + PASS [ 7.895s] ( 77/192) kalam-cli::smoke smoke_test_dml_extended::smoke_test_hard_delete_stream_table + PASS [ 8.518s] ( 78/192) kalam-cli::smoke smoke_test_dml_extended::smoke_test_multi_row_insert + PASS [ 8.676s] ( 79/192) kalam-cli::smoke smoke_test_dml_extended::smoke_test_multi_row_update + PASS [ 8.589s] ( 80/192) kalam-cli::smoke smoke_test_dml_extended::smoke_test_soft_delete_shared_table + PASS [ 8.853s] ( 81/192) kalam-cli::smoke smoke_test_dml_extended::smoke_test_soft_delete_user_table + PASS [ 8.619s] ( 82/192) kalam-cli::smoke smoke_test_dml_watermark_optimization::smoke_test_watermark_dml_delete + PASS [ 9.290s] ( 83/192) kalam-cli::smoke smoke_test_dml_watermark_optimization::smoke_test_watermark_ddl_then_dml + PASS [ 9.551s] ( 84/192) kalam-cli::smoke smoke_test_dml_watermark_optimization::smoke_test_watermark_dml_insert_performance + PASS [ 10.111s] ( 85/192) kalam-cli::smoke smoke_test_dml_watermark_optimization::smoke_test_watermark_dml_update + PASS [ 9.673s] ( 86/192) kalam-cli::smoke smoke_test_dml_wide_columns::smoke_shared_table_dml_wide_columns + PASS [ 10.081s] ( 87/192) kalam-cli::smoke smoke_test_dml_wide_columns::smoke_subscription_update_delete_notifications + PASS [ 10.034s] ( 88/192) kalam-cli::smoke smoke_test_dml_wide_columns::smoke_user_table_dml_wide_columns + PASS [ 8.047s] ( 89/192) kalam-cli::smoke smoke_test_file_datatype::test_file_datatype_upload_and_download + PASS [ 12.646s] ( 90/192) kalam-cli::smoke smoke_test_export_user_data::smoke_export_download_zip_is_valid + PASS [ 7.235s] ( 91/192) kalam-cli::smoke smoke_test_flush_manifest::smoke_test_flush_stream_table_error + PASS [ 11.302s] ( 92/192) kalam-cli::smoke smoke_test_export_user_data::smoke_show_export_empty_for_new_user + PASS [ 12.664s] ( 93/192) kalam-cli::smoke smoke_test_export_user_data::smoke_export_user_data_job_completes + PASS [ 8.082s] ( 94/192) kalam-cli::smoke smoke_test_flush_manifest::smoke_test_manifest_updated_on_second_flush + PASS [ 16.204s] ( 95/192) kalam-cli::smoke smoke_test_export_user_data::smoke_export_download_forbidden_for_other_user + PASS [ 7.626s] ( 96/192) kalam-cli::smoke smoke_test_flush_manifest::smoke_test_shared_table_flush_manifest + PASS [ 12.686s] ( 97/192) kalam-cli::smoke smoke_test_export_user_data::smoke_show_export_returns_completed_status_and_download_url + PASS [ 7.195s] ( 98/192) kalam-cli::smoke smoke_test_flush_manifest::smoke_test_user_table_flush_manifest + PASS [ 7.711s] ( 99/192) kalam-cli::smoke smoke_test_flush_operations::smoke_test_mixed_source_query + PASS [ 7.609s] (100/192) kalam-cli::smoke smoke_test_flush_operations::smoke_test_shared_table_flush + PASS [ 7.658s] (101/192) kalam-cli::smoke smoke_test_flush_operations::smoke_test_user_table_flush + PASS [ 7.720s] (102/192) kalam-cli::smoke smoke_test_flush_pk_integrity::smoke_test_flush_pk_integrity_shared_table + PASS [ 7.611s] (103/192) kalam-cli::smoke smoke_test_flush_pk_integrity::smoke_test_flush_pk_integrity_user_table + PASS [ 7.351s] (104/192) kalam-cli::smoke smoke_test_insert_returning::smoke_insert_returning_seq_multi_row + PASS [ 7.510s] (105/192) kalam-cli::smoke smoke_test_insert_returning::smoke_insert_returning_seq_single_row + PASS [ 7.538s] (106/192) kalam-cli::smoke smoke_test_insert_returning::smoke_insert_returning_seq_on_user_table + PASS [ 7.673s] (107/192) kalam-cli::smoke smoke_test_insert_returning::smoke_insert_returning_star + PASS [ 8.152s] (108/192) kalam-cli::smoke smoke_test_insert_throughput::smoke_test_insert_throughput_batched + PASS [ 6.608s] (109/192) kalam-cli::smoke smoke_test_rpc_auth::smoke_rpc_health_is_public + PASS [ 8.307s] (110/192) kalam-cli::smoke smoke_test_int64_precision::smoke_int64_negative_large_values + PASS [ 9.956s] (111/192) kalam-cli::smoke smoke_test_insert_throughput::smoke_test_insert_throughput_parallel + PASS [ 9.276s] (112/192) kalam-cli::smoke smoke_test_insert_throughput::smoke_test_insert_throughput_single + PASS [ 8.177s] (113/192) kalam-cli::smoke smoke_test_int64_precision::smoke_int64_precision_preserved_as_string + PASS [ 8.688s] (114/192) kalam-cli::smoke smoke_test_int64_precision::smoke_int64_edge_case_exactly_max_safe + PASS [ 8.300s] (115/192) kalam-cli::smoke smoke_test_int64_precision::smoke_int64_small_values_remain_numbers + PASS [ 9.403s] (116/192) kalam-cli::smoke smoke_test_queries_benchmark::smoke_queries_benchmark + PASS [ 9.001s] (117/192) kalam-cli::smoke smoke_test_rpc_auth::smoke_rpc_login_nonexistent_user_matches_wrong_password_response + PASS [ 12.689s] (118/192) kalam-cli::smoke smoke_test_insert_throughput::smoke_test_insert_throughput_summary + PASS [ 6.820s] (119/192) kalam-cli::smoke smoke_test_rpc_auth::smoke_rpc_me_no_auth_returns_401 + PASS [ 6.776s] (120/192) kalam-cli::smoke smoke_test_rpc_auth::smoke_rpc_sql_basic_auth_returns_401 + PASS [ 6.750s] (121/192) kalam-cli::smoke smoke_test_rpc_auth::smoke_rpc_sql_forged_jwt_alg_none_returns_401 + PASS [ 6.909s] (122/192) kalam-cli::smoke smoke_test_rpc_auth::smoke_rpc_sql_invalid_bearer_returns_401 + PASS [ 6.671s] (123/192) kalam-cli::smoke smoke_test_rpc_auth::smoke_rpc_sql_no_auth_returns_401 + PASS [ 8.701s] (124/192) kalam-cli::smoke smoke_test_rpc_auth::smoke_rpc_login_wrong_password_returns_401_generic_message + PASS [ 8.526s] (125/192) kalam-cli::smoke smoke_test_rpc_auth::smoke_rpc_sql_empty_body_returns_error + PASS [ 8.528s] (126/192) kalam-cli::smoke smoke_test_schema_history::smoke_test_drop_table_removes_schema_history + PASS [ 12.726s] (127/192) kalam-cli::smoke smoke_test_rpc_auth::smoke_rpc_user_cannot_escalate_own_role + PASS [ 12.348s] (128/192) kalam-cli::smoke smoke_test_rpc_auth::smoke_rpc_user_role_cannot_read_system_users + PASS [ 8.651s] (129/192) kalam-cli::smoke smoke_test_schema_history::smoke_test_schema_history_in_system_tables + PASS [ 8.113s] (130/192) kalam-cli::smoke smoke_test_shared_table_crud::smoke_shared_table_crud + PASS [ 12.827s] (131/192) kalam-cli::smoke smoke_test_security_access::smoke_security_private_shared_table_blocked_in_batch + PASS [ 12.960s] (132/192) kalam-cli::smoke smoke_test_security_access::smoke_security_query_length_limit + PASS [ 12.933s] (133/192) kalam-cli::smoke smoke_test_security_access::smoke_security_system_table_write_blocked + PASS [ 14.028s] (134/192) kalam-cli::smoke smoke_test_security_access::smoke_security_subscription_blocked_for_system_and_private_shared + PASS [ 12.786s] (135/192) kalam-cli::smoke smoke_test_security_access::smoke_security_system_tables_blocked_in_batch + PASS [ 9.316s] (136/192) kalam-cli::smoke smoke_test_show_storages::smoke_show_storages_basic + PASS [ 9.429s] (137/192) kalam-cli::smoke smoke_test_show_storages::smoke_show_storages_cli_timestamps_are_not_epoch_shifted + PASS [ 12.871s] (138/192) kalam-cli::smoke smoke_test_shared_table_subscription::smoke_shared_table_subscription_private_denied + PASS [ 15.605s] (139/192) kalam-cli::smoke smoke_test_shared_table_subscription::smoke_shared_table_subscription_lifecycle + PASS [ 8.814s] (140/192) kalam-cli::smoke smoke_test_storage_compact::smoke_storage_compact_table_job_and_rocksdb + PASS [ 8.226s] (141/192) kalam-cli::smoke smoke_test_storage_health::smoke_storage_check_local_basic + PASS [ 12.935s] (142/192) kalam-cli::smoke smoke_test_show_storages::smoke_show_storages_user_access + PASS [ 8.434s] (143/192) kalam-cli::smoke smoke_test_storage_health::smoke_storage_check_extended + PASS [ 12.342s] (144/192) kalam-cli::smoke smoke_test_storage_health::smoke_storage_check_authorization + PASS [ 12.137s] (145/192) kalam-cli::smoke smoke_test_storage_health::smoke_storage_check_dba_access + PASS [ 8.953s] (146/192) kalam-cli::smoke smoke_test_storage_health::smoke_storage_check_nonexistent + PASS [ 9.680s] (147/192) kalam-cli::smoke smoke_test_subscription_advanced::smoke_subscription_column_projection + PASS [ 9.357s] (148/192) kalam-cli::smoke smoke_test_subscription_advanced::smoke_subscription_delete_events + PASS [ 9.478s] (149/192) kalam-cli::smoke smoke_test_subscription_advanced::smoke_subscription_high_volume_changes + PASS [ 9.342s] (150/192) kalam-cli::smoke smoke_test_subscription_advanced::smoke_subscription_resume_from_seq_id + PASS [ 14.217s] (151/192) kalam-cli::smoke smoke_test_storage_templates::smoke_storage_custom_templates + PASS [ 10.469s] (152/192) kalam-cli::smoke smoke_test_subscription_advanced::smoke_subscription_multi_batch_initial_data + PASS [ 12.289s] (153/192) kalam-cli::smoke smoke_test_stream_subscription::smoke_stream_table_subscription + PASS [ 9.573s] (154/192) kalam-cli::smoke smoke_test_subscription_close::smoke_subscription_drop_removes_live_query + PASS [ 8.869s] (155/192) kalam-cli::smoke smoke_test_subscription_close::smoke_subscription_explicit_close_removes_live_query + PASS [ 8.984s] (156/192) kalam-cli::smoke smoke_test_subscription_delta_updates::smoke_subscription_update_sends_delta_only + PASS [ 7.829s] (157/192) kalam-cli::smoke smoke_test_subscription_reconnect_resume::smoke_subscription_reconnect_basic_resume + PASS [ 8.007s] (158/192) kalam-cli::smoke smoke_test_subscription_listing::smoke_subscription_listing_tracks_seq_id + PASS [ 9.217s] (159/192) kalam-cli::smoke smoke_test_subscription_listing::smoke_subscription_listing_and_close_removes + PASS [ 14.368s] (160/192) kalam-cli::smoke smoke_test_subscription_delete::smoke_subscription_receives_delete_event + PASS [ 8.410s] (161/192) kalam-cli::smoke smoke_test_subscription_reconnect_resume::smoke_subscription_resume_from_seq_id + PASS [ 7.457s] (162/192) kalam-cli::smoke smoke_test_system_tables_extended::smoke_test_dt_meta_command + PASS [ 7.825s] (163/192) kalam-cli::smoke smoke_test_system_tables_extended::smoke_test_describe_table_meta_command + PASS [ 10.125s] (164/192) kalam-cli::smoke smoke_test_system_and_users::smoke_system_tables_and_user_lifecycle + PASS [ 14.567s] (165/192) kalam-cli::smoke smoke_test_subscription_multi_reconnect::smoke_subscription_multi_reconnect_parallel + PASS [ 8.593s] (166/192) kalam-cli::smoke smoke_test_system_tables_extended::smoke_test_system_tables_options_column + PASS [ 9.158s] (167/192) kalam-cli::smoke smoke_test_system_tables_extended::smoke_test_system_live_queries + PASS [ 9.068s] (168/192) kalam-cli::smoke smoke_test_system_tables_extended::smoke_test_system_stats_meta_command + PASS [ 9.407s] (169/192) kalam-cli::smoke smoke_test_timing_output::smoke_test_timing_ddl_operations + PASS [ 10.118s] (170/192) kalam-cli::smoke smoke_test_timing_output::smoke_test_timing_aggregation_query + PASS [ 10.790s] (171/192) kalam-cli::smoke smoke_test_timing_output::smoke_test_timing_flush_operation + PASS [ 9.749s] (172/192) kalam-cli::smoke smoke_test_timing_output::smoke_test_timing_output_format + PASS [ 10.729s] (173/192) kalam-cli::smoke smoke_test_timing_output::smoke_test_timing_join_query + PASS [ 10.772s] (174/192) kalam-cli::smoke smoke_test_timing_output::smoke_test_timing_scaling_medium_table + PASS [ 10.586s] (175/192) kalam-cli::smoke smoke_test_timing_output::smoke_test_timing_scaling_small_table + PASS [ 10.146s] (176/192) kalam-cli::smoke smoke_test_topic_consumption::test_topic_consume_delete_events + PASS [ 10.219s] (177/192) kalam-cli::smoke smoke_test_topic_consumption::test_topic_consume_from_earliest + PASS [ 9.855s] (178/192) kalam-cli::smoke smoke_test_topic_consumption::test_topic_consume_mixed_operations + PASS [ 10.121s] (179/192) kalam-cli::smoke smoke_test_topic_consumption::test_topic_consume_insert_events + PASS [ 11.458s] (180/192) kalam-cli::smoke smoke_test_topic_consumption::test_topic_consume_from_latest + PASS [ 9.630s] (181/192) kalam-cli::smoke smoke_test_topic_consumption::test_topic_consume_offset_persistence + PASS [ 10.445s] (182/192) kalam-cli::smoke smoke_test_topic_consumption::test_topic_consume_update_events + PASS [ 15.029s] (183/192) kalam-cli::smoke smoke_test_topic_high_load::test_topic_fan_out_different_groups_receive_all + PASS [ 8.873s] (184/192) kalam-cli::smoke smoke_test_websocket_capacity::smoke_test_websocket_capacity + PASS [ 12.502s] (185/192) kalam-cli::smoke smoke_test_user_table_rls::smoke_user_table_rls_isolation + PASS [ 15.185s] (186/192) kalam-cli::smoke smoke_test_topic_high_load::test_topic_high_load_concurrent_publishers + PASS [ 15.815s] (187/192) kalam-cli::smoke smoke_test_topic_high_load::test_topic_four_consumers_same_group_no_duplicates + PASS [ 14.827s] (188/192) kalam-cli::smoke smoke_test_topic_high_load::test_topic_high_load_two_consumers_same_group_single_delivery + PASS [ 23.100s] (189/192) kalam-cli::smoke smoke_test_topic_consumption::test_topic_consume_option_matrix_start_batch_auto_ack_modes + PASS [ 15.565s] (190/192) kalam-cli::smoke smoke_test_user_table_subscription::smoke_user_table_subscription_lifecycle + PASS [ 22.043s] (191/192) kalam-cli::smoke smoke_test_topic_throughput::smoke_test_topic_throughput_benchmark + SLOW [> 60.000s] (───────) kalam-cli::smoke smoke_test_topic_high_load::test_topic_ack_failure_recovery_no_message_loss_with_latency + PASS [ 78.064s] (192/192) kalam-cli::smoke smoke_test_topic_high_load::test_topic_ack_failure_recovery_no_message_loss_with_latency +──────────── + Summary [ 238.922s] 192 tests run: 192 passed (1 slow), 0 skipped diff --git a/cli/tests/smoke/usecases/smoke_test_websocket_capacity.rs b/cli/tests/smoke/usecases/smoke_test_websocket_capacity.rs index 217d88bfc..cc2576cfb 100644 --- a/cli/tests/smoke/usecases/smoke_test_websocket_capacity.rs +++ b/cli/tests/smoke/usecases/smoke_test_websocket_capacity.rs @@ -231,33 +231,35 @@ async fn open_authenticated_connection( .await .unwrap_or_else(|e| panic!("Failed to open websocket #{}: {}", idx, e)); - let auth_payload = json!({ - "type": "authenticate", - "method": "jwt", - "token": token, - }); - - stream - .send(Message::Text(auth_payload.to_string().into())) - .await - .unwrap_or_else(|e| panic!("Failed to send auth message on websocket #{}: {}", idx, e)); + let auth_payload = match tokio::time::timeout( + Duration::from_secs(2), + read_ws_text_message(idx, &mut stream, "auth response"), + ) + .await + { + Ok(Ok(payload)) => payload, + Ok(Err(err)) => panic!("Websocket #{} header-auth response failed: {}", idx, err), + Err(_) => { + let auth_payload = json!({ + "type": "authenticate", + "method": "jwt", + "token": token, + }); - let auth_payload = tokio::time::timeout(Duration::from_secs(5), async { - loop { - let next = stream - .next() + stream + .send(Message::Text(auth_payload.to_string().into())) .await - .unwrap_or_else(|| panic!("Websocket #{} closed before auth response", idx)) - .unwrap_or_else(|e| panic!("Websocket #{} auth response error: {}", idx, e)); - match next { - Message::Text(payload) => break payload, - Message::Ping(_) | Message::Pong(_) => continue, - other => panic!("Websocket #{} expected text auth response, got {:?}", idx, other), - } - } - }) - .await - .unwrap_or_else(|_| panic!("Websocket #{} auth response timed out", idx)); + .unwrap_or_else(|e| panic!("Failed to send auth message on websocket #{}: {}", idx, e)); + + tokio::time::timeout( + Duration::from_secs(5), + read_ws_text_message(idx, &mut stream, "auth response"), + ) + .await + .unwrap_or_else(|_| panic!("Websocket #{} auth response timed out", idx)) + .unwrap_or_else(|err| panic!("Websocket #{} auth response failed: {}", idx, err)) + }, + }; let value: serde_json::Value = serde_json::from_str(&auth_payload) .unwrap_or_else(|e| panic!("Invalid auth response JSON on websocket #{}: {}", idx, e)); @@ -305,6 +307,34 @@ async fn open_authenticated_connection( stream } +async fn read_ws_text_message( + idx: usize, + stream: &mut WsStream, + context: &str, +) -> Result { + loop { + let message = stream + .next() + .await + .ok_or_else(|| format!("Websocket #{} closed before {}", idx, context))? + .map_err(|e| format!("Websocket #{} {} error: {}", idx, context, e))?; + + match message { + Message::Text(payload) => return Ok(payload.to_string()), + Message::Ping(payload) => { + let _ = stream.send(Message::Pong(payload)).await; + }, + Message::Pong(_) => continue, + other => { + return Err(format!( + "Websocket #{} expected text {} , got {:?}", + idx, context, other + )); + }, + } + } +} + async fn run_simple_sql() -> Duration { let start = Instant::now(); let result = tokio::task::spawn_blocking(|| { diff --git a/docs/Notes.md b/docs/Notes.md index bc7ed5e09..6eee18172 100644 --- a/docs/Notes.md +++ b/docs/Notes.md @@ -1330,3 +1330,4 @@ Postgres Extension: WITH (type = 'user', migrate = true, compress = 5, ...); - Can we support migrating a current table from postgres to kalamdb using something like: SELECT set_kalam_table('schema.table1', migrate => true, compress => 5, ...); this will convert the current table to a kalamdb table and move the data as well without needing to create a new table and move the data there - for parquet stick with only one compression for now +- Remove the requirement the auth first we can rely on headers instead which will make the connection faster diff --git a/link/Cargo.toml b/link/Cargo.toml index f1d3d53be..5503945fb 100644 --- a/link/Cargo.toml +++ b/link/Cargo.toml @@ -25,6 +25,7 @@ tokio-tungstenite = { workspace = true, optional = true } # Serialization serde = { workspace = true } serde_json = { workspace = true } +rmp-serde = { workspace = true } # Byte buffers for upload streaming bytes = { workspace = true, optional = true } diff --git a/link/kalam-link-dart/src/api.rs b/link/kalam-link-dart/src/api.rs index 080d3f133..2c8d2ef51 100644 --- a/link/kalam-link-dart/src/api.rs +++ b/link/kalam-link-dart/src/api.rs @@ -131,19 +131,10 @@ fn create_client_inner( builder = builder.max_retries(r as u32); } - // Build connection options from the individual flags. - { - let mut conn_opts = kalam_link::ConnectionOptions::default(); - if disable_compression.unwrap_or(false) { - conn_opts.disable_compression = true; - } - // ws_lazy_connect defaults to true in ConnectionOptions::default(). - // Only override when the caller explicitly passes false. - if let Some(lazy) = ws_lazy_connect { - conn_opts.ws_lazy_connect = lazy; - } - builder = builder.connection_options(conn_opts); - } + builder = builder.connection_options(build_dart_connection_options( + disable_compression, + ws_lazy_connect, + )); if let Some(ms) = keepalive_interval_ms { let mut timeouts = kalam_link::KalamLinkTimeouts::default(); @@ -167,6 +158,28 @@ fn create_client_inner( }) } +fn build_dart_connection_options( + disable_compression: Option, + ws_lazy_connect: Option, +) -> kalam_link::ConnectionOptions { + let mut conn_opts = kalam_link::ConnectionOptions::default(); + + // Favor the smaller binary wire format for Dart subscriptions by default. + conn_opts.protocol.serialization = kalam_link::models::SerializationType::MessagePack; + + if disable_compression.unwrap_or(false) { + conn_opts.disable_compression = true; + } + + // ws_lazy_connect defaults to true in ConnectionOptions::default(). + // Only override when the caller explicitly passes false. + if let Some(lazy) = ws_lazy_connect { + conn_opts.ws_lazy_connect = lazy; + } + + conn_opts +} + /// Update the authentication credentials on a live client. /// /// This is used to implement refresh-token flows from Dart: @@ -187,6 +200,30 @@ pub fn dart_update_auth(client: &DartKalamClient, auth: DartAuthProvider) -> any Ok(()) } +#[cfg(test)] +mod tests { + use super::build_dart_connection_options; + use kalam_link::models::SerializationType; + + #[test] + fn dart_connection_options_default_to_msgpack() { + let options = build_dart_connection_options(None, None); + + assert_eq!(options.protocol.serialization, SerializationType::MessagePack); + assert!(options.ws_lazy_connect); + assert!(!options.disable_compression); + } + + #[test] + fn dart_connection_options_preserve_explicit_flags() { + let options = build_dart_connection_options(Some(true), Some(false)); + + assert_eq!(options.protocol.serialization, SerializationType::MessagePack); + assert!(!options.ws_lazy_connect); + assert!(options.disable_compression); + } +} + /// Build [`EventHandlers`](kalam_link::EventHandlers) that push events into /// a shared queue and notify waiters. fn build_event_handlers( diff --git a/link/sdks/dart/README.md b/link/sdks/dart/README.md index 31b52426a..77c827a9e 100644 --- a/link/sdks/dart/README.md +++ b/link/sdks/dart/README.md @@ -14,9 +14,11 @@ KalamDB is a SQL-first realtime database. The current Dart SDK focuses on the ap - **SQL queries** over HTTP with `$1`, `$2`, ... parameter binding - **Typed rows** via `Map` accessors like `asString()`, `asInt()`, and `asFile()` - **Live subscriptions** to any `SELECT` query over WebSocket +- **Materialized live rows** with `liveQueryRowsWithSql()` and `liveTableRows()` - **Authentication flows** with `Auth.jwt`, `Auth.basic`, `Auth.none`, `login()`, `refreshToken()`, and `refreshAuth()` - **Connection diagnostics** with `ConnectionHandlers`, keepalive control, and SDK logging hooks - **Subscription inspection** with `getSubscriptions()` and `SeqId` resume support +- **Manual shared-socket control** with `isConnected`, `disconnectWebSocket()`, and `reconnectWebSocket()` ## Installation @@ -146,6 +148,8 @@ final client = await KalamClient.connect( | `authProviderInitialBackoff` | `Duration` | `250ms` | Initial auth-provider retry backoff | | `authProviderMaxBackoff` | `Duration` | `2s` | Maximum auth-provider retry backoff | +Native Dart subscriptions default to MessagePack over the shared Rust transport. That reduces payload size and decoding overhead on the connect-to-first-batch path without changing the global Rust client default used by other SDKs. + ## Authentication ### `authProvider` is the primary auth API @@ -321,6 +325,26 @@ for (final sub in subs) { } ``` +For UI-facing row lists, prefer the materialized helpers instead of reconciling +change events yourself: + +```dart +final rowsStream = client.liveQueryRowsWithSql>( + "SELECT id, body FROM chat.messages WHERE room = 'main'", + lastRows: 20, +); + +await for (final rows in rowsStream) { + print('materialized rows=${rows.length}'); +} +``` + +Or use table-name convenience: + +```dart +final tasks = client.liveTableRows>('app.tasks'); +``` + ## Connection Lifecycle and Logging ```dart @@ -469,10 +493,15 @@ Avoid this pattern. Build the UI first, then resolve auth and connect from a pro | `KalamClient.connect(...)` | Create a client handle and configure auth, logging, retries, and WebSocket behavior | | `query(sql, {params, namespace})` | Execute SQL over HTTP | | `subscribe(sql, {batchSize, lastRows, from, subscriptionId})` | Subscribe to live query changes | +| `liveQueryRowsWithSql(sql, {batchSize, lastRows, from, subscriptionId, limit, keyColumns, mapRow})` | Subscribe to a reconciled live row set | +| `liveTableRows(tableName, {batchSize, lastRows, from, subscriptionId, limit, keyColumns, mapRow})` | Subscribe to reconciled rows for `SELECT * FROM table` | | `login(username, password)` | Exchange Basic credentials for JWT tokens | | `refreshToken(refreshToken)` | Refresh an access token | | `refreshAuth(...)` | Re-run `authProvider` and update credentials in place | | `healthCheck()` | Read server health/version metadata | +| `isConnected` | Report whether the shared WebSocket is currently open | +| `disconnectWebSocket()` | Close the shared WebSocket explicitly | +| `reconnectWebSocket()` | Refresh auth as needed and reopen the shared WebSocket | | `getSubscriptions()` | Inspect active subscriptions and resume checkpoints | | `dispose()` | Release client resources | diff --git a/link/sdks/dart/lib/kalam_link.dart b/link/sdks/dart/lib/kalam_link.dart index 8649c5e24..7c0f8192f 100644 --- a/link/sdks/dart/lib/kalam_link.dart +++ b/link/sdks/dart/lib/kalam_link.dart @@ -23,7 +23,7 @@ /// /// **Important:** Only `init()` should be awaited before `runApp()`. /// `connect()` performs network I/O — awaiting it before `runApp()` -/// will freeze the UI until the WebSocket handshake completes. +/// can delay first render while auth or other startup work completes. library; export 'src/auth.dart'; diff --git a/link/sdks/typescript/README.md b/link/sdks/typescript/README.md index 6159cb5f1..59e202683 100644 --- a/link/sdks/typescript/README.md +++ b/link/sdks/typescript/README.md @@ -46,7 +46,7 @@ with `live()` so row reconciliation still stays inside the shared Rust core: ```ts const stop = await client.live( - 'SELECT room_id, message_id, body FROM support.messages', + "SELECT room_id, message_id, body FROM support.messages WHERE room_id = 'main'", (rows) => { console.log(rows.length); }, @@ -93,6 +93,7 @@ function renderInbox(rows) { const inboxSql = ` SELECT id, room, role, body, created_at FROM support.inbox + WHERE room = 'main' `; const stop = await client.live( @@ -140,6 +141,7 @@ function renderInbox(rows) { const inboxSql = ` SELECT id, room, role, body, created_at FROM support.inbox + WHERE room = 'main' `; // Start from a specific known sequence ID. @@ -194,7 +196,7 @@ If you need raw subscription frames, `subscribeWithSql()` still exists. import { ChangeType, MessageType } from 'kalam-link'; const stop = await client.subscribeWithSql( - 'SELECT * FROM support.inbox', + "SELECT * FROM support.inbox WHERE room = 'main'", (event) => { // Use this path when you need raw subscription protocol events. if (event.type !== MessageType.Change) { diff --git a/link/src/connection/mod.rs b/link/src/connection/mod.rs index 85c172b5c..657f7b5b3 100644 --- a/link/src/connection/mod.rs +++ b/link/src/connection/mod.rs @@ -18,9 +18,10 @@ pub mod websocket; pub(crate) use shared::SharedConnection; #[cfg(feature = "tokio-runtime")] pub(crate) use websocket::{ - apply_ws_auth_headers, connect_with_optional_local_bind, decode_ws_payload, - jitter_keepalive_interval, parse_message, resolve_ws_url, send_auth_and_wait, - send_next_batch_request, WebSocketStream, + apply_ws_auth_headers, authenticate_ws, connect_with_optional_local_bind, decode_ws_payload, + jitter_keepalive_interval, parse_message, parse_message_msgpack, resolve_ws_url, + send_client_message, + send_next_batch_request_with_format, WebSocketStream, }; #[cfg(feature = "tokio-runtime")] diff --git a/link/src/connection/models/client_message.rs b/link/src/connection/models/client_message.rs index 57da0816a..6a743d7ad 100644 --- a/link/src/connection/models/client_message.rs +++ b/link/src/connection/models/client_message.rs @@ -4,6 +4,8 @@ use crate::auth::models::WsAuthCredentials; use crate::seq_id::SeqId; use crate::subscription::models::SubscriptionRequest; +use super::ProtocolOptions; + /// Client-to-server request messages #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(tag = "type", rename_all = "snake_case")] @@ -15,10 +17,13 @@ pub enum ClientMessage { /// Server responds with AuthSuccess or AuthError. /// /// Supports multiple authentication methods via the credentials field. + /// Optionally negotiates wire format via `protocol`. Authenticate { /// Authentication credentials (basic, jwt, or future methods) #[serde(flatten)] credentials: WsAuthCredentials, + /// Protocol negotiation (serialization + compression). + protocol: ProtocolOptions, }, /// Subscribe to live query updates diff --git a/link/src/connection/models/connection_options.rs b/link/src/connection/models/connection_options.rs index 8754eeced..2dc02423c 100644 --- a/link/src/connection/models/connection_options.rs +++ b/link/src/connection/models/connection_options.rs @@ -1,5 +1,6 @@ use serde::{Deserialize, Serialize}; +use super::ProtocolOptions; use crate::timestamp::{TimestampFormat, TimestampFormatter}; /// HTTP protocol version to use for connections. @@ -139,6 +140,19 @@ pub struct ConnectionOptions { /// Default: `true`. #[serde(default = "default_ws_lazy_connect")] pub ws_lazy_connect: bool, + + /// Protocol options for the WebSocket connection. + /// + /// Controls the serialization format (JSON or MessagePack) and compression + /// (None or Gzip) used for WebSocket messages after authentication. + /// + /// Authentication messages are always sent as JSON text frames regardless + /// of this setting. The negotiated protocol takes effect after the server + /// confirms it in the `AuthSuccess` response. + /// + /// Default: JSON serialization + Gzip compression. + #[serde(default)] + pub protocol: ProtocolOptions, } fn default_auto_reconnect() -> bool { @@ -174,6 +188,7 @@ impl Default for ConnectionOptions { ws_local_bind_addresses: Vec::new(), disable_compression: false, ws_lazy_connect: true, + protocol: ProtocolOptions::default(), } } } @@ -265,6 +280,12 @@ impl ConnectionOptions { self.ws_lazy_connect = lazy; self } + + /// Set the protocol options for the WebSocket connection. + pub fn with_protocol(mut self, protocol: ProtocolOptions) -> Self { + self.protocol = protocol; + self + } } #[cfg(test)] diff --git a/link/src/connection/models/mod.rs b/link/src/connection/models/mod.rs index 4c9be6920..05093db31 100644 --- a/link/src/connection/models/mod.rs +++ b/link/src/connection/models/mod.rs @@ -3,9 +3,11 @@ pub mod client_message; pub mod connection_options; pub mod health_check_response; +pub mod protocol; pub mod server_message; pub use client_message::ClientMessage; pub use connection_options::{ConnectionOptions, HttpVersion}; pub use health_check_response::HealthCheckResponse; +pub use protocol::{CompressionType, ProtocolOptions, SerializationType}; pub use server_message::ServerMessage; diff --git a/link/src/connection/models/protocol.rs b/link/src/connection/models/protocol.rs new file mode 100644 index 000000000..f1ccaff25 --- /dev/null +++ b/link/src/connection/models/protocol.rs @@ -0,0 +1,95 @@ +use serde::{Deserialize, Serialize}; + +/// Wire-format serialization type negotiated during authentication. +/// +/// Mirrors `SerializationType` from `kalamdb-commons`. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize)] +#[cfg_attr(feature = "wasm", derive(tsify_next::Tsify))] +#[cfg_attr(feature = "wasm", tsify(into_wasm_abi, from_wasm_abi))] +#[serde(rename_all = "snake_case")] +pub enum SerializationType { + /// JSON text frames (default, backward-compatible). + #[default] + Json, + /// MessagePack binary frames. + #[serde(rename = "msgpack")] + MessagePack, +} + +/// Wire-format compression negotiated during authentication. +/// +/// Mirrors `CompressionType` from `kalamdb-commons`. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize)] +#[cfg_attr(feature = "wasm", derive(tsify_next::Tsify))] +#[cfg_attr(feature = "wasm", tsify(into_wasm_abi, from_wasm_abi))] +#[serde(rename_all = "snake_case")] +pub enum CompressionType { + /// No compression. + None, + /// Gzip compression for payloads above threshold (default). + #[default] + Gzip, +} + +/// Protocol options negotiated once per connection during authentication. +/// +/// Mirrors `ProtocolOptions` from `kalamdb-commons`. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "wasm", derive(tsify_next::Tsify))] +#[cfg_attr(feature = "wasm", tsify(into_wasm_abi, from_wasm_abi))] +pub struct ProtocolOptions { + /// Serialization format for messages after auth. + pub serialization: SerializationType, + /// Compression policy. + pub compression: CompressionType, +} + +impl Default for ProtocolOptions { + fn default() -> Self { + Self { + serialization: SerializationType::Json, + compression: CompressionType::Gzip, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_serialization_type_json_roundtrip() { + let ser = SerializationType::Json; + let json = serde_json::to_string(&ser).unwrap(); + assert_eq!(json, "\"json\""); + let parsed: SerializationType = serde_json::from_str(&json).unwrap(); + assert_eq!(parsed, SerializationType::Json); + } + + #[test] + fn test_serialization_type_msgpack_roundtrip() { + let ser = SerializationType::MessagePack; + let json = serde_json::to_string(&ser).unwrap(); + assert_eq!(json, "\"msgpack\""); + let parsed: SerializationType = serde_json::from_str(&json).unwrap(); + assert_eq!(parsed, SerializationType::MessagePack); + } + + #[test] + fn test_protocol_options_default() { + let opts = ProtocolOptions::default(); + assert_eq!(opts.serialization, SerializationType::Json); + assert_eq!(opts.compression, CompressionType::Gzip); + } + + #[test] + fn test_protocol_options_roundtrip() { + let opts = ProtocolOptions { + serialization: SerializationType::MessagePack, + compression: CompressionType::None, + }; + let json = serde_json::to_string(&opts).unwrap(); + let parsed: ProtocolOptions = serde_json::from_str(&json).unwrap(); + assert_eq!(parsed, opts); + } +} diff --git a/link/src/connection/models/server_message.rs b/link/src/connection/models/server_message.rs index b4d694a51..67773e510 100644 --- a/link/src/connection/models/server_message.rs +++ b/link/src/connection/models/server_message.rs @@ -6,6 +6,8 @@ use crate::models::SchemaField; use crate::subscription::models::BatchControl; use crate::subscription::models::ChangeTypeRaw; +use super::ProtocolOptions; + /// WebSocket message types sent from server to client #[derive(Debug, Clone, Serialize, Deserialize)] #[cfg_attr(feature = "wasm", derive(tsify_next::Tsify))] @@ -18,6 +20,8 @@ pub enum ServerMessage { user_id: String, //TODO: Use UserId type instead /// User role role: String, //TODO: Use UserRole type instead + /// Negotiated protocol echoed back from the server. + protocol: ProtocolOptions, }, /// Authentication failed response (browser clients only) diff --git a/link/src/connection/shared.rs b/link/src/connection/shared.rs index 0faf0ef62..1b29fbe31 100644 --- a/link/src/connection/shared.rs +++ b/link/src/connection/shared.rs @@ -13,16 +13,17 @@ use crate::{ auth::{AuthProvider, ResolvedAuth}, connection::{ - apply_ws_auth_headers, connect_with_optional_local_bind, decode_ws_payload, - jitter_keepalive_interval, parse_message, resolve_ws_url, send_auth_and_wait, - send_next_batch_request, WebSocketStream, DEFAULT_EVENT_CHANNEL_CAPACITY, FAR_FUTURE, - MAX_WS_TEXT_MESSAGE_BYTES, + apply_ws_auth_headers, authenticate_ws, connect_with_optional_local_bind, + decode_ws_payload, jitter_keepalive_interval, parse_message, parse_message_msgpack, + resolve_ws_url, send_client_message, send_next_batch_request_with_format, + WebSocketStream, + DEFAULT_EVENT_CHANNEL_CAPACITY, FAR_FUTURE, MAX_WS_TEXT_MESSAGE_BYTES, }, error::{KalamLinkError, Result}, event_handlers::{ConnectionError, DisconnectReason, EventHandlers}, models::{ - ChangeEvent, ClientMessage, ConnectionOptions, SubscriptionInfo, SubscriptionOptions, - SubscriptionRequest, + ChangeEvent, ClientMessage, CompressionType, ConnectionOptions, SerializationType, + SubscriptionInfo, SubscriptionOptions, SubscriptionRequest, }, seq_id::SeqId, seq_tracking, @@ -462,12 +463,28 @@ async fn establish_ws( timeouts: &KalamLinkTimeouts, connection_options: &ConnectionOptions, event_handlers: &EventHandlers, -) -> Result<(WebSocketStream, AuthProvider)> { +) -> Result<(WebSocketStream, AuthProvider, SerializationType)> { log::debug!("[kalam-link] Establishing WebSocket connection to {}", base_url); let resolved = resolved_auth.read().unwrap().clone(); let auth = resolved.resolve().await?; - let request_url = resolve_ws_url(base_url, None, connection_options.disable_compression)?; + let uses_header_auth = matches!(&auth, AuthProvider::JwtToken(_)); + + let mut request_url = resolve_ws_url(base_url, None, connection_options.disable_compression)?; + + // For header-auth fast path, include protocol preferences in query params + // so the server can negotiate without a separate Authenticate message. + if uses_header_auth { + let protocol = connection_options.protocol; + if protocol.serialization != SerializationType::Json { + let sep = if request_url.contains('?') { "&" } else { "?" }; + request_url.push_str(&format!("{}serialization=msgpack", sep)); + } + if protocol.compression == CompressionType::None { + let sep = if request_url.contains('?') { "&" } else { "?" }; + request_url.push_str(&format!("{}compression=none", sep)); + } + } let mut request = request_url.into_client_request().map_err(|e| { KalamLinkError::WebSocketError(format!("Failed to build WebSocket request: {}", e)) @@ -535,10 +552,19 @@ async fn establish_ws( }, }; - send_auth_and_wait(&mut ws_stream, &auth, timeouts.auth_timeout).await?; - log::info!("[kalam-link] WebSocket authenticated successfully"); + let ser = if uses_header_auth { + // Header-auth fast path: JWT was sent in the upgrade request's + // Authorization header. The server validates it during the HTTP + // upgrade and sends AuthSuccess proactively — no explicit + // Authenticate message needed, saving a full round-trip. + authenticate_ws(&mut ws_stream, &auth, timeouts.auth_timeout, connection_options.protocol, false).await? + } else { + // Fallback: send an explicit Authenticate message and wait. + authenticate_ws(&mut ws_stream, &auth, timeouts.auth_timeout, connection_options.protocol, true).await? + }; + log::info!("[kalam-link] WebSocket authenticated successfully (header_auth={})", uses_header_auth); - Ok((ws_stream, auth)) + Ok((ws_stream, auth, ser)) } async fn send_subscribe( @@ -546,6 +572,7 @@ async fn send_subscribe( id: &str, sql: &str, options: SubscriptionOptions, + serialization: SerializationType, ) -> Result<()> { let msg = ClientMessage::Subscribe { subscription: SubscriptionRequest { @@ -554,24 +581,18 @@ async fn send_subscribe( options, }, }; - let payload = serde_json::to_string(&msg).map_err(|e| { - KalamLinkError::WebSocketError(format!("Failed to serialize subscribe: {}", e)) - })?; - ws.send(Message::Text(payload.into())) - .await - .map_err(|e| KalamLinkError::WebSocketError(format!("Failed to send subscribe: {}", e))) + send_client_message(ws, &msg, serialization).await } -async fn send_unsubscribe(ws: &mut WebSocketStream, id: &str) -> Result<()> { +async fn send_unsubscribe( + ws: &mut WebSocketStream, + id: &str, + serialization: SerializationType, +) -> Result<()> { let msg = ClientMessage::Unsubscribe { subscription_id: id.to_string(), }; - let payload = serde_json::to_string(&msg).map_err(|e| { - KalamLinkError::WebSocketError(format!("Failed to serialize unsubscribe: {}", e)) - })?; - ws.send(Message::Text(payload.into())) - .await - .map_err(|e| KalamLinkError::WebSocketError(format!("Failed to send unsubscribe: {}", e))) + send_client_message(ws, &msg, serialization).await } async fn route_event( @@ -580,6 +601,7 @@ async fn route_event( subs: &mut HashMap, seq_id_cache: &mut HashMap, timeouts: &KalamLinkTimeouts, + serialization: SerializationType, ) { let incoming_sub_id = match event.subscription_id() { Some(id) => id.to_string(), @@ -617,7 +639,7 @@ async fn route_event( .as_ref() .and_then(|key| subs.get(key)) .and_then(|entry| entry.batch_seq_id.or(entry.last_seq_id)); - if let Err(e) = send_next_batch_request(ws, &incoming_sub_id, last_seq).await { + if let Err(e) = send_next_batch_request_with_format(ws, &incoming_sub_id, last_seq, serialization).await { log::warn!("Failed to send NextBatch for {}: {}", incoming_sub_id, e); } } @@ -700,6 +722,7 @@ async fn resubscribe_all( subs: &mut HashMap, timeouts: &KalamLinkTimeouts, event_handlers: &EventHandlers, + serialization: SerializationType, ) { log::info!( "[kalam-link] Re-subscribing {} active subscription(s) after reconnect", @@ -728,7 +751,7 @@ async fn resubscribe_all( options.snapshot_end_seq.map(|s| s.to_string()) ); - if let Err(e) = send_subscribe(ws, id, &entry.sql, options).await { + if let Err(e) = send_subscribe(ws, id, &entry.sql, options, serialization).await { log::warn!("Failed to re-subscribe {}: {}", id, e); event_handlers.emit_error(ConnectionError::new( format!("Failed to re-subscribe {}: {}", id, e), @@ -822,6 +845,9 @@ async fn connection_task( let mut ws_stream: Option = None; let mut shutdown_requested = false; let mut next_generation: u64 = 1; + // Negotiated serialization format for the current connection. + // Reset to Json on disconnect; updated after each successful auth. + let mut negotiated_ser = SerializationType::Json; let keepalive_dur = if timeouts.keepalive_interval.is_zero() { FAR_FUTURE @@ -842,8 +868,9 @@ async fn connection_task( match establish_ws(&base_url, &resolved_auth, &timeouts, &connection_options, &event_handlers) .await { - Ok((stream, _auth)) => { + Ok((stream, _auth, ser)) => { ws_stream = Some(stream); + negotiated_ser = ser; connected.store(true, Ordering::SeqCst); event_handlers.emit_connect(); ping_deadline = TokioInstant::now() + keepalive_dur; @@ -863,7 +890,7 @@ async fn connection_task( if shutdown_requested { if let Some(ref mut ws) = ws_stream { for id in subs.keys() { - let _ = send_unsubscribe(ws, id).await; + let _ = send_unsubscribe(ws, id, negotiated_ser).await; } let _ = ws.close(None).await; } @@ -923,7 +950,7 @@ async fn connection_task( "[kalam-link] Replacing existing subscription '{}'", id, ); - let _ = send_unsubscribe(ws, &id).await; + let _ = send_unsubscribe(ws, &id, negotiated_ser).await; if let Some(mut old_entry) = remove_subscription_entry(&mut subs, &mut seq_id_cache, &id, None) { @@ -935,7 +962,7 @@ async fn connection_task( let inherited_seq = seq_id_cache.get(&id).copied(); let mut send_options = options.clone(); let effective_from = merge_resume_from(&mut send_options, inherited_seq); - let result = send_subscribe(ws, &id, &sql, send_options).await; + let result = send_subscribe(ws, &id, &sql, send_options, negotiated_ser).await; if result.is_ok() { register_subscription_entry( &mut subs, @@ -959,7 +986,7 @@ async fn connection_task( if let Some(result_tx) = entry.pending_result_tx.take() { let _ = result_tx.send(Err(KalamLinkError::Cancelled)); } - let _ = send_unsubscribe(ws, &id).await; + let _ = send_unsubscribe(ws, &id, negotiated_ser).await; } else { log::debug!( "[kalam-link] Ignoring stale unsubscribe for '{}' (gen={:?})", @@ -993,12 +1020,10 @@ async fn connection_task( ws_stream = None; continue; } - // Also send an application-level JSON ping so the server's + // Also send an application-level ping so the server's // heartbeat checker is satisfied. The server tracks // {"type":"ping"} messages, not native WebSocket Ping frames. - if let Ok(json_ping) = serde_json::to_string(&ClientMessage::Ping) { - let _ = ws.send(Message::Text(json_ping.into())).await; - } + let _ = send_client_message(ws, &ClientMessage::Ping, negotiated_ser).await; event_handlers.emit_send("[ping]"); if has_pong_timeout { awaiting_pong = true; @@ -1022,26 +1047,55 @@ async fn connection_task( event_handlers.emit_receive(&text); match parse_message(&text) { Ok(Some(event)) => { - route_event(event, ws, &mut subs, &mut seq_id_cache, &timeouts).await; + route_event(event, ws, &mut subs, &mut seq_id_cache, &timeouts, negotiated_ser).await; }, Ok(None) => {}, Err(e) => log::warn!("Failed to parse WS message: {}", e), } }, Some(Ok(Message::Binary(data))) => { - match decode_ws_payload(&data) { - Ok(text) => { - event_handlers.emit_receive(&text); - match parse_message(&text) { + match negotiated_ser { + SerializationType::MessagePack => { + // Msgpack binary frame (possibly gzip-compressed) + let raw = if data.len() >= 2 && data[0] == 0x1f && data[1] == 0x8b { + match crate::compression::decompress_gzip_with_limit( + &data, + crate::connection::MAX_WS_DECOMPRESSED_MESSAGE_BYTES, + ) { + Ok(d) => d, + Err(e) => { + log::warn!("Failed to decompress msgpack: {}", e); + continue; + }, + } + } else { + data.to_vec() + }; + match parse_message_msgpack(&raw) { Ok(Some(event)) => { - route_event(event, ws, &mut subs, &mut seq_id_cache, &timeouts).await; + route_event(event, ws, &mut subs, &mut seq_id_cache, &timeouts, negotiated_ser).await; }, Ok(None) => {}, - Err(e) => log::warn!("Failed to parse decompressed WS message: {}", e), + Err(e) => log::warn!("Failed to parse msgpack message: {}", e), } }, - Err(e) => { - event_handlers.emit_error(ConnectionError::new(e.to_string(), false)); + SerializationType::Json => { + // Legacy: gzip-compressed JSON binary frame + match decode_ws_payload(&data) { + Ok(text) => { + event_handlers.emit_receive(&text); + match parse_message(&text) { + Ok(Some(event)) => { + route_event(event, ws, &mut subs, &mut seq_id_cache, &timeouts, negotiated_ser).await; + }, + Ok(None) => {}, + Err(e) => log::warn!("Failed to parse decompressed WS message: {}", e), + } + }, + Err(e) => { + event_handlers.emit_error(ConnectionError::new(e.to_string(), false)); + }, + } }, } }, @@ -1053,6 +1107,7 @@ async fn connection_task( }; event_handlers.emit_disconnect(reason); connected.store(false, Ordering::SeqCst); + negotiated_ser = SerializationType::Json; ws_stream = None; continue; }, @@ -1070,12 +1125,14 @@ async fn connection_task( "WebSocket error: {}", msg ))); connected.store(false, Ordering::SeqCst); + negotiated_ser = SerializationType::Json; ws_stream = None; continue; }, None => { event_handlers.emit_disconnect(DisconnectReason::new("WebSocket stream ended")); connected.store(false, Ordering::SeqCst); + negotiated_ser = SerializationType::Json; ws_stream = None; continue; }, @@ -1257,12 +1314,13 @@ async fn connection_task( ) .await { - Ok((mut stream, _auth)) => { + Ok((mut stream, _auth, ser)) => { log::info!("Reconnection successful"); + negotiated_ser = ser; reconnect_attempts.store(0, Ordering::SeqCst); connected.store(true, Ordering::SeqCst); event_handlers.emit_connect(); - resubscribe_all(&mut stream, &mut subs, &timeouts, &event_handlers).await; + resubscribe_all(&mut stream, &mut subs, &timeouts, &event_handlers, negotiated_ser).await; ws_stream = Some(stream); ping_deadline = TokioInstant::now() + keepalive_dur; awaiting_pong = false; diff --git a/link/src/connection/websocket.rs b/link/src/connection/websocket.rs index 68b2a9174..960459132 100644 --- a/link/src/connection/websocket.rs +++ b/link/src/connection/websocket.rs @@ -298,15 +298,36 @@ pub(crate) fn apply_ws_auth_headers( // ── Authentication Handshake ──────────────────────────────────────────────── -/// Send authentication message and wait for AuthSuccess response. +/// Authenticate the WebSocket connection. /// -/// The WebSocket protocol requires an explicit Authenticate message after -/// connection. This function sends credentials and waits for the server's -/// response within the configured timeout. -pub(crate) async fn send_auth_and_wait( +/// When `send_credentials` is `true` (message-auth fallback), sends an explicit +/// `Authenticate` message carrying the JWT and protocol options, then waits for +/// the server's `AuthSuccess`. +/// +/// When `send_credentials` is `false` (header-auth fast path), the JWT was +/// already in the HTTP upgrade request header. The server validates it during +/// the upgrade and proactively sends `AuthSuccess` as the first frame — no +/// `Authenticate` message is sent, saving a full round-trip. +/// +/// Returns the negotiated `SerializationType` from the server's `AuthSuccess`. +pub(crate) async fn authenticate_ws( ws_stream: &mut WebSocketStream, auth: &AuthProvider, auth_timeout: Duration, + protocol: crate::models::ProtocolOptions, + send_credentials: bool, +) -> Result { + if send_credentials { + send_authenticate_message(ws_stream, auth, protocol).await?; + } + await_auth_response(ws_stream, auth_timeout).await +} + +/// Send the explicit `Authenticate` client message on the WebSocket. +async fn send_authenticate_message( + ws_stream: &mut WebSocketStream, + auth: &AuthProvider, + protocol: crate::models::ProtocolOptions, ) -> Result<()> { let credentials = match auth { AuthProvider::BasicAuth(_, _) => { @@ -324,7 +345,10 @@ pub(crate) async fn send_auth_and_wait( }, }; - let auth_message = ClientMessage::Authenticate { credentials }; + let auth_message = ClientMessage::Authenticate { + credentials, + protocol, + }; let payload = serde_json::to_string(&auth_message).map_err(|e| { KalamLinkError::WebSocketError(format!("Failed to serialize auth message: {}", e)) })?; @@ -333,7 +357,17 @@ pub(crate) async fn send_auth_and_wait( KalamLinkError::WebSocketError(format!("Failed to send auth message: {}", e)) })?; - // Loop until AuthSuccess/AuthError, tolerating Ping/Pong during handshake. + Ok(()) +} + +/// Wait for the server's `AuthSuccess` or `AuthError` response. +/// +/// Shared by both the message-auth and header-auth paths. Tolerates +/// Ping/Pong frames during the handshake window. +async fn await_auth_response( + ws_stream: &mut WebSocketStream, + auth_timeout: Duration, +) -> Result { let deadline = TokioInstant::now() + auth_timeout; loop { let remaining = deadline.saturating_duration_since(TokioInstant::now()); @@ -347,7 +381,9 @@ pub(crate) async fn send_auth_and_wait( match tokio::time::timeout(remaining, ws_stream.next()).await { Ok(Some(Ok(Message::Text(text)))) => { match serde_json::from_str::(&text) { - Ok(ServerMessage::AuthSuccess { .. }) => return Ok(()), + Ok(ServerMessage::AuthSuccess { protocol: negotiated, .. }) => { + return Ok(negotiated.serialization); + }, Ok(ServerMessage::AuthError { message }) => { return Err(KalamLinkError::AuthenticationError(format!( "WebSocket authentication failed: {}", @@ -412,6 +448,18 @@ pub(crate) fn parse_message(text: &str) -> Result> { Ok(ChangeEvent::from_server_message(msg)) } +/// Parse a binary MessagePack payload into a `ChangeEvent`. +pub(crate) fn parse_message_msgpack(data: &[u8]) -> Result> { + let msg: ServerMessage = rmp_serde::from_slice(data).map_err(|e| { + KalamLinkError::SerializationError(format!( + "Failed to parse msgpack as ServerMessage: {}", + e + )) + })?; + + Ok(ChangeEvent::from_server_message(msg)) +} + // ── Keepalive Jitter ──────────────────────────────────────────────────────── /// Spread keepalive pings across connections to avoid synchronized bursts. @@ -463,28 +511,61 @@ pub(crate) fn decode_ws_payload(data: &[u8]) -> Result { }) } -/// Send a `NextBatch` request through the WebSocket stream. -pub(crate) async fn send_next_batch_request( +/// Send a `NextBatch` request using the negotiated serialization format. +pub(crate) async fn send_next_batch_request_with_format( ws_stream: &mut WebSocketStream, subscription_id: &str, last_seq_id: Option, + serialization: crate::models::SerializationType, ) -> Result<()> { let message = ClientMessage::NextBatch { subscription_id: subscription_id.to_string(), last_seq_id, }; - let payload = serde_json::to_string(&message).map_err(|e| { - KalamLinkError::WebSocketError(format!("Failed to serialize NextBatch: {}", e)) - })?; - ws_stream - .send(Message::Text(payload.into())) - .await - .map_err(|e| KalamLinkError::WebSocketError(format!("Failed to send NextBatch: {}", e))) + send_client_message(ws_stream, &message, serialization).await +} + +/// Encode and send a `ClientMessage` using the given serialization format. +pub(crate) async fn send_client_message( + ws_stream: &mut WebSocketStream, + msg: &ClientMessage, + serialization: crate::models::SerializationType, +) -> Result<()> { + match serialization { + crate::models::SerializationType::Json => { + let payload = serde_json::to_string(msg).map_err(|e| { + KalamLinkError::WebSocketError(format!("Failed to serialize message: {}", e)) + })?; + ws_stream + .send(Message::Text(payload.into())) + .await + .map_err(|e| { + KalamLinkError::WebSocketError(format!("Failed to send message: {}", e)) + }) + }, + crate::models::SerializationType::MessagePack => { + let payload = rmp_serde::to_vec_named(msg).map_err(|e| { + KalamLinkError::WebSocketError(format!("Failed to serialize msgpack: {}", e)) + })?; + ws_stream + .send(Message::Binary(payload.into())) + .await + .map_err(|e| { + KalamLinkError::WebSocketError(format!("Failed to send binary message: {}", e)) + }) + }, + } } #[cfg(test)] mod tests { use super::*; + use crate::auth::AuthProvider; + use crate::error::KalamLinkError; + use tokio_tungstenite::tungstenite::{ + client::IntoClientRequest, + http::header::AUTHORIZATION, + }; #[test] fn test_ws_url_conversion() { @@ -556,6 +637,39 @@ mod tests { .is_err()); } + #[test] + fn test_apply_ws_auth_headers_sets_bearer_header_for_jwt() { + let mut request = "ws://localhost:3000/v1/ws".into_client_request().unwrap(); + + apply_ws_auth_headers( + &mut request, + &AuthProvider::jwt_token("token-123".to_string()), + ) + .expect("jwt auth should be applied via Authorization header"); + + assert_eq!( + request.headers().get(AUTHORIZATION).unwrap(), + "Bearer token-123" + ); + } + + #[test] + fn test_apply_ws_auth_headers_rejects_basic_auth() { + let mut request = "ws://localhost:3000/v1/ws".into_client_request().unwrap(); + + let err = apply_ws_auth_headers( + &mut request, + &AuthProvider::basic_auth("admin".to_string(), "secret".to_string()), + ) + .expect_err("basic auth should not be used for websocket upgrades"); + + assert!(matches!( + err, + KalamLinkError::AuthenticationError(message) + if message.contains("requires a JWT token") + )); + } + #[test] fn test_keepalive_jitter_is_deterministic() { let base = Duration::from_secs(20); @@ -578,4 +692,45 @@ mod tests { max ); } + + #[test] + fn test_parse_message_msgpack_server_message() { + use crate::models::{ProtocolOptions, SerializationType, ServerMessage}; + + let msg = ServerMessage::AuthSuccess { + user_id: "user-1".to_string(), + role: "admin".to_string(), + protocol: ProtocolOptions { + serialization: SerializationType::MessagePack, + compression: crate::models::CompressionType::Gzip, + }, + }; + let bytes = rmp_serde::to_vec_named(&msg).unwrap(); + let result = parse_message_msgpack(&bytes).unwrap(); + // AuthSuccess is not a ChangeEvent, so parse_message_msgpack should return None + // (it only handles subscription events) + assert!(result.is_none()); + } + + #[test] + fn test_msgpack_client_message_roundtrip() { + use crate::models::{ClientMessage, SubscriptionOptions, SubscriptionRequest}; + + let msg = ClientMessage::Subscribe { + subscription: SubscriptionRequest { + id: "sub-1".to_string(), + sql: "SELECT * FROM test".to_string(), + options: SubscriptionOptions::default(), + }, + }; + let bytes = rmp_serde::to_vec_named(&msg).unwrap(); + let parsed: ClientMessage = rmp_serde::from_slice(&bytes).unwrap(); + match parsed { + ClientMessage::Subscribe { subscription } => { + assert_eq!(subscription.id, "sub-1"); + assert_eq!(subscription.sql, "SELECT * FROM test"); + }, + _ => panic!("Expected Subscribe"), + } + } } diff --git a/link/src/models/mod.rs b/link/src/models/mod.rs index 5f30147af..70cc0c13d 100644 --- a/link/src/models/mod.rs +++ b/link/src/models/mod.rs @@ -32,7 +32,8 @@ pub use crate::auth::models::{ // ── Connection models ──────────────────────────────────────────────────────── pub use crate::connection::models::{ - ClientMessage, ConnectionOptions, HealthCheckResponse, HttpVersion, ServerMessage, + ClientMessage, CompressionType, ConnectionOptions, HealthCheckResponse, HttpVersion, + ProtocolOptions, SerializationType, ServerMessage, }; // ── Consumer models ────────────────────────────────────────────────────────── diff --git a/link/src/models/tests.rs b/link/src/models/tests.rs index 6e7a85d6b..7f2d84a06 100644 --- a/link/src/models/tests.rs +++ b/link/src/models/tests.rs @@ -305,6 +305,7 @@ fn test_client_message_authenticate_jwt_serialization() { credentials: WsAuthCredentials::Jwt { token: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.test".to_string(), }, + protocol: ProtocolOptions::default(), }; let json = serde_json::to_string(&msg).unwrap(); diff --git a/link/src/wasm/auth.rs b/link/src/wasm/auth.rs index d673326b6..b15bb3cb4 100644 --- a/link/src/wasm/auth.rs +++ b/link/src/wasm/auth.rs @@ -1,6 +1,6 @@ use base64::{engine::general_purpose, Engine as _}; -use crate::models::{ClientMessage, WsAuthCredentials}; +use crate::models::{ClientMessage, ProtocolOptions, WsAuthCredentials}; /// Authentication provider for WASM clients /// @@ -33,13 +33,17 @@ impl WasmAuthProvider { } /// Get the WebSocket authentication message using unified WsAuthCredentials - pub(crate) fn to_ws_auth_message(&self) -> Option { + pub(crate) fn to_ws_auth_message( + &self, + protocol: ProtocolOptions, + ) -> Option { match self { WasmAuthProvider::Basic { .. } => None, WasmAuthProvider::Jwt { token } => Some(ClientMessage::Authenticate { credentials: WsAuthCredentials::Jwt { token: token.clone(), }, + protocol, }), WasmAuthProvider::None => None, } diff --git a/link/src/wasm/client.rs b/link/src/wasm/client.rs index 6e5d9ab31..1123ddd97 100644 --- a/link/src/wasm/client.rs +++ b/link/src/wasm/client.rs @@ -1,4 +1,4 @@ -use std::cell::RefCell; +use std::cell::{Cell, RefCell}; use std::collections::HashMap; use std::rc::Rc; @@ -9,14 +9,17 @@ use wasm_bindgen_futures::JsFuture; use web_sys::{CloseEvent, ErrorEvent, MessageEvent, WebSocket}; use crate::models::{ - ClientMessage, ConnectionOptions, QueryRequest, ServerMessage, SubscriptionOptions, - SubscriptionRequest, + ClientMessage, ConnectionOptions, QueryRequest, SerializationType, ServerMessage, + SubscriptionOptions, SubscriptionRequest, }; use base64::Engine; use super::auth::WasmAuthProvider; use super::console_log; -use super::helpers::{create_promise, decode_ws_message, subscription_hash}; +use super::helpers::{ + create_promise, decode_ws_binary_payload, decode_ws_message, send_ws_message, + subscription_hash, +}; use super::reconnect::{self, reconnect_internal_with_auth, resubscribe_all}; use super::state::{ callback_payload, filter_subscription_event, track_subscription_checkpoint, @@ -26,13 +29,6 @@ use super::validation::{ quote_table_name, validate_column_name, validate_row_id, validate_sql_identifier, }; -// Pre-serialized ping message to avoid re-serializing `ClientMessage::Ping` -// on every keepalive tick. WASM is single-threaded, so a thread-local is safe. -thread_local! { - pub(crate) static PING_PAYLOAD: String = serde_json::to_string(&ClientMessage::Ping) - .expect("ClientMessage::Ping serialization is infallible"); -} - /// WASM-compatible KalamDB client with auto-reconnection support /// /// Supports multiple authentication methods: @@ -113,6 +109,8 @@ pub struct KalamClient { /// The callback must return a Promise that resolves to an object of the /// shape `{ jwt: { token: string } }` or `{ none: null }`. auth_provider_cb: Rc>>, + /// Negotiated serialization format for this WebSocket connection. + negotiated_ser: Rc>, } impl KalamClient { @@ -152,16 +150,11 @@ impl KalamClient { options: subscription_options, }, }; - let payload = serde_json::to_string(&subscribe_msg) - .map_err(|e| JsValue::from_str(&format!("Serialization error: {}", e)))?; console_log(&format!( "KalamClient: Sending subscribe request - id: {}, sql: {}", subscription_id, sql )); - if let Some(cb) = self.on_send_cb.borrow().as_ref() { - let _ = cb.call1(&JsValue::NULL, &JsValue::from_str(&payload)); - } - if let Err(error) = ws.send_with_str(&payload) { + if let Err(error) = send_ws_message(ws, &subscribe_msg, self.negotiated_ser.get()) { self.subscription_state.borrow_mut().remove(&subscription_id); return Err(error); } @@ -193,6 +186,7 @@ impl KalamClient { on_receive_cb: Rc::new(RefCell::new(None)), on_send_cb: Rc::new(RefCell::new(None)), auth_provider_cb: Rc::new(RefCell::new(None)), + negotiated_ser: Rc::new(Cell::new(SerializationType::Json)), } } } @@ -507,25 +501,30 @@ fn install_runtime_message_handler( ws: &WebSocket, subscriptions: Rc>>, on_receive_cb: Rc>>, + negotiated_ser: Rc>, ) { let onmessage_callback = Closure::wrap(Box::new(move |e: MessageEvent| { - let message = match decode_ws_message(&e) { - Some(m) => m, - None => return, - }; - - if message.len() > 200 { - console_log(&format!( - "KalamClient: Received WebSocket message ({} bytes)", - message.len() - )); - } - - if let Some(cb) = on_receive_cb.borrow().as_ref() { - let _ = cb.call1(&JsValue::NULL, &JsValue::from_str(&message)); - } + let event: Option = (|| { + let data = e.data(); + if data.is_instance_of::() { + let text: String = data.dyn_into::().ok()?.into(); + if let Some(cb) = on_receive_cb.borrow().as_ref() { + let _ = cb.call1(&JsValue::NULL, &JsValue::from_str(&text)); + } + serde_json::from_str::(&text).ok() + } else if negotiated_ser.get() == SerializationType::MessagePack { + let raw = decode_ws_binary_payload(&e)?; + rmp_serde::from_slice::(&raw).ok() + } else { + let message = decode_ws_message(&e)?; + if let Some(cb) = on_receive_cb.borrow().as_ref() { + let _ = cb.call1(&JsValue::NULL, &JsValue::from_str(&message)); + } + serde_json::from_str::(&message).ok() + } + })(); - if let Ok(event) = serde_json::from_str::(&message) { + if let Some(event) = event { if let Some(dispatch) = dispatch_subscription_server_message(&subscriptions, &event) { dispatch.invoke(); } @@ -910,7 +909,8 @@ impl KalamClient { let (auth_promise, auth_resolve, auth_reject) = create_promise(); // Clone auth message for the onopen handler - let auth_message = resolved_auth.to_ws_auth_message(); + let protocol_opts = self.connection_options.borrow().protocol.clone(); + let auth_message = resolved_auth.to_ws_auth_message(protocol_opts); let ws_clone_for_auth = ws.clone(); let auth_resolve_for_anon = auth_resolve.clone(); let on_send_for_open = Rc::clone(&self.on_send_cb); @@ -1040,82 +1040,91 @@ impl KalamClient { let on_receive_for_msg = Rc::clone(&self.on_receive_cb); let on_connect_for_msg = Rc::clone(&self.on_connect_cb); let on_error_for_msg = Rc::clone(&self.on_error_cb); + let negotiated_ser_for_msg = Rc::clone(&self.negotiated_ser); let onmessage_callback = Closure::wrap(Box::new(move |e: MessageEvent| { - let message = match decode_ws_message(&e) { - Some(m) => m, + // Try to parse the message as a ServerMessage. + // Text frames are always JSON. Binary frames depend on negotiated format. + let event: Option = (|| { + let data = e.data(); + if data.is_instance_of::() { + // Text frame — always JSON (auth messages and JSON-mode data) + let text: String = data.dyn_into::().ok()?.into(); + if let Some(cb) = on_receive_for_msg.borrow().as_ref() { + let _ = cb.call1(&JsValue::NULL, &JsValue::from_str(&text)); + } + serde_json::from_str::(&text).ok() + } else if negotiated_ser_for_msg.get() == SerializationType::MessagePack { + // Binary frame with msgpack negotiated + let raw = decode_ws_binary_payload(&e)?; + rmp_serde::from_slice::(&raw).ok() + } else { + // Binary frame with JSON (gzip-compressed JSON) + let message = decode_ws_message(&e)?; + if let Some(cb) = on_receive_for_msg.borrow().as_ref() { + let _ = cb.call1(&JsValue::NULL, &JsValue::from_str(&message)); + } + serde_json::from_str::(&message).ok() + } + })(); + + let event = match event { + Some(e) => e, None => return, }; - // SECURITY: Do not log full message content — it may contain - // sensitive row data. Log only the message type for debugging. - if message.len() > 200 { - console_log(&format!( - "KalamClient: Received WebSocket message ({} bytes)", - message.len() - )); - } - - // Emit on_receive callback for debug tracing - if let Some(cb) = on_receive_for_msg.borrow().as_ref() { - let _ = cb.call1(&JsValue::NULL, &JsValue::from_str(&message)); - } - - // Parse message using ServerMessage enum - if let Ok(event) = serde_json::from_str::(&message) { - // Check for authentication response first - if !*auth_handled_clone.borrow() { - match &event { - ServerMessage::AuthSuccess { user_id, role } => { - console_log(&format!( - "KalamClient: Authentication successful - user_id: {}, role: {}", - user_id, role - )); - *auth_handled_clone.borrow_mut() = true; - // Emit on_connect — connection is fully established and authenticated - if let Some(cb) = on_connect_for_msg.borrow().as_ref() { - let _ = cb.call0(&JsValue::NULL); - } - let _ = auth_resolve_clone.call0(&JsValue::NULL); - return; - }, - ServerMessage::AuthError { message: error_msg } => { - console_log(&format!( - "KalamClient: Authentication failed - {}", - error_msg - )); - *auth_handled_clone.borrow_mut() = true; - // Emit on_error for auth failure - if let Some(cb) = on_error_for_msg.borrow().as_ref() { - let err_obj = js_sys::Object::new(); - let _ = js_sys::Reflect::set( - &err_obj, - &"message".into(), - &JsValue::from_str(&format!( - "Authentication failed: {}", - error_msg - )), - ); - let _ = js_sys::Reflect::set( - &err_obj, - &"recoverable".into(), - &JsValue::FALSE, - ); - let _ = cb.call1(&JsValue::NULL, &err_obj); - } - let error = - JsValue::from_str(&format!("Authentication failed: {}", error_msg)); - let _ = auth_reject_clone2.call1(&JsValue::NULL, &error); - return; - }, - _ => {}, // Not an auth message, continue to subscription handling - } + // Check for authentication response first + if !*auth_handled_clone.borrow() { + match &event { + ServerMessage::AuthSuccess { user_id, role, protocol } => { + console_log(&format!( + "KalamClient: Authentication successful - user_id: {}, role: {}", + user_id, role + )); + // Store the negotiated serialization type + negotiated_ser_for_msg.set(protocol.serialization); + *auth_handled_clone.borrow_mut() = true; + if let Some(cb) = on_connect_for_msg.borrow().as_ref() { + let _ = cb.call0(&JsValue::NULL); + } + let _ = auth_resolve_clone.call0(&JsValue::NULL); + return; + }, + ServerMessage::AuthError { message: error_msg } => { + console_log(&format!( + "KalamClient: Authentication failed - {}", + error_msg + )); + *auth_handled_clone.borrow_mut() = true; + if let Some(cb) = on_error_for_msg.borrow().as_ref() { + let err_obj = js_sys::Object::new(); + let _ = js_sys::Reflect::set( + &err_obj, + &"message".into(), + &JsValue::from_str(&format!( + "Authentication failed: {}", + error_msg + )), + ); + let _ = js_sys::Reflect::set( + &err_obj, + &"recoverable".into(), + &JsValue::FALSE, + ); + let _ = cb.call1(&JsValue::NULL, &err_obj); + } + let error = + JsValue::from_str(&format!("Authentication failed: {}", error_msg)); + let _ = auth_reject_clone2.call1(&JsValue::NULL, &error); + return; + }, + _ => {}, } + } - if let Some(dispatch) = dispatch_subscription_server_message(&subscriptions, &event) - { - dispatch.invoke(); - } + if let Some(dispatch) = dispatch_subscription_server_message(&subscriptions, &event) + { + dispatch.invoke(); } }) as Box); ws.set_onmessage(Some(onmessage_callback.as_ref().unchecked_ref())); @@ -1196,9 +1205,7 @@ impl KalamClient { pub fn send_ping(&self) -> Result<(), JsValue> { if let Some(ws) = self.ws.borrow().as_ref() { if ws.ready_state() == WebSocket::OPEN { - let payload = serde_json::to_string(&ClientMessage::Ping) - .map_err(|e| JsValue::from_str(&format!("Ping serialization error: {}", e)))?; - ws.send_with_str(&payload)?; + send_ws_message(ws, &ClientMessage::Ping, self.negotiated_ser.get())?; } } Ok(()) @@ -1216,12 +1223,11 @@ impl KalamClient { } let ws_ref = Rc::clone(&self.ws); + let negotiated_ser_for_ping = Rc::clone(&self.negotiated_ser); let ping_cb = Closure::wrap(Box::new(move || { if let Some(ws) = ws_ref.borrow().as_ref() { if ws.ready_state() == WebSocket::OPEN { - PING_PAYLOAD.with(|payload| { - let _ = ws.send_with_str(payload); - }); + let _ = send_ws_message(ws, &ClientMessage::Ping, negotiated_ser_for_ping.get()); } } }) as Box); @@ -1490,13 +1496,7 @@ impl KalamClient { let unsubscribe_msg = ClientMessage::Unsubscribe { subscription_id: subscription_id.clone(), }; - let payload = serde_json::to_string(&unsubscribe_msg) - .map_err(|e| JsValue::from_str(&format!("Serialization error: {}", e)))?; - // Emit on_send for debug tracing - if let Some(cb) = self.on_send_cb.borrow().as_ref() { - let _ = cb.call1(&JsValue::NULL, &JsValue::from_str(&payload)); - } - ws.send_with_str(&payload)?; + send_ws_message(ws, &unsubscribe_msg, self.negotiated_ser.get())?; } console_log(&format!("KalamClient: Unsubscribed from: {}", subscription_id)); @@ -1985,6 +1985,7 @@ impl KalamClient { Rc::clone(&self.on_disconnect_cb), Rc::clone(&self.on_error_cb), Rc::clone(&self.on_receive_cb), + Rc::clone(&self.negotiated_ser), ); } } @@ -2005,6 +2006,7 @@ fn install_auto_reconnect_listener( on_disconnect_cb: Rc>>, on_error_cb: Rc>>, on_receive_cb: Rc>>, + negotiated_ser: Rc>, ) { let source_ws = ws.clone(); let onclose_reconnect = Closure::wrap(Box::new(move |_e: CloseEvent| { @@ -2056,6 +2058,7 @@ fn install_auto_reconnect_listener( let on_error_clone = on_error_cb.clone(); let on_receive_clone = on_receive_cb.clone(); let auth_provider_rc = auth_provider_cb.clone(); + let negotiated_ser_clone = negotiated_ser.clone(); let reconnect_fn = Closure::wrap(Box::new(move || { { @@ -2093,6 +2096,8 @@ fn install_auto_reconnect_listener( let ws_ref_next = ws_ref_clone.clone(); let ping_id_next = ping_interval_id_clone.clone(); let auth_provider_next = auth_provider_rc.clone(); + let negotiated_ser_inner = negotiated_ser_clone.clone(); + let negotiated_ser_next = negotiated_ser_clone.clone(); wasm_bindgen_futures::spawn_local(async move { match reconnect_internal_with_auth(url, auth, cb, disable_compression).await { @@ -2110,6 +2115,7 @@ fn install_auto_reconnect_listener( &ws, Rc::clone(&subscription_state), Rc::clone(&on_receive), + Rc::clone(&negotiated_ser_inner), ); if let Some(cb) = on_connect.borrow().as_ref() { let _ = cb.call0(&JsValue::NULL); @@ -2131,9 +2137,10 @@ fn install_auto_reconnect_listener( Rc::clone(&on_disconnect), Rc::clone(&on_error), Rc::clone(&on_receive), + Rc::clone(&negotiated_ser_next), ); - resubscribe_all(ws_ref.clone(), subscription_state).await; - reconnect::restart_ping_timer(&ws_ref, &conn_opts, &ping_id); + resubscribe_all(ws_ref.clone(), subscription_state, negotiated_ser_inner.get()).await; + reconnect::restart_ping_timer(&ws_ref, &conn_opts, &ping_id, &negotiated_ser_inner); }, Err(e) => { console_log(&format!("KalamClient: Reconnection failed: {:?}", e)); diff --git a/link/src/wasm/helpers.rs b/link/src/wasm/helpers.rs index 173dc99fe..0571d316e 100644 --- a/link/src/wasm/helpers.rs +++ b/link/src/wasm/helpers.rs @@ -8,6 +8,7 @@ use web_sys::{Headers, MessageEvent, Request, RequestInit, RequestMode, Response use super::console_log; use crate::compression; +use crate::models::SerializationType; #[inline] pub(crate) fn ws_url_from_http_opts( @@ -193,3 +194,41 @@ pub(crate) fn decode_ws_message(e: &MessageEvent) -> Option { )); None } + +/// Extract raw bytes from a binary WebSocket frame, decompressing gzip if needed. +/// +/// Returns `None` if the frame is not a binary `ArrayBuffer`. +#[inline] +pub(crate) fn decode_ws_binary_payload(e: &MessageEvent) -> Option> { + let data = e.data(); + if let Ok(array_buffer) = data.dyn_into::() { + let uint8_array = js_sys::Uint8Array::new(&array_buffer); + let raw = uint8_array.to_vec(); + let decompressed = compression::decompress_if_gzip(&raw); + Some(decompressed.into_owned()) + } else { + None + } +} + +/// Send a `ClientMessage` using the negotiated serialization format. +/// +/// JSON messages are sent as text frames; MessagePack messages as binary frames. +pub(crate) fn send_ws_message( + ws: &web_sys::WebSocket, + msg: &crate::models::ClientMessage, + serialization: SerializationType, +) -> Result<(), JsValue> { + match serialization { + SerializationType::Json => { + let json = serde_json::to_string(msg) + .map_err(|e| JsValue::from_str(&format!("JSON serialization error: {}", e)))?; + ws.send_with_str(&json) + }, + SerializationType::MessagePack => { + let bytes = rmp_serde::to_vec_named(msg) + .map_err(|e| JsValue::from_str(&format!("MessagePack serialization error: {}", e)))?; + ws.send_with_u8_array(&bytes) + }, + } +} diff --git a/link/src/wasm/reconnect.rs b/link/src/wasm/reconnect.rs index bfd361950..7655b025a 100644 --- a/link/src/wasm/reconnect.rs +++ b/link/src/wasm/reconnect.rs @@ -1,4 +1,4 @@ -use std::cell::RefCell; +use std::cell::{Cell, RefCell}; use std::collections::HashMap; use std::rc::Rc; @@ -7,11 +7,11 @@ use wasm_bindgen::JsCast; use wasm_bindgen_futures::JsFuture; use web_sys::{ErrorEvent, MessageEvent, WebSocket}; -use crate::models::{ClientMessage, ConnectionOptions, ServerMessage, SubscriptionRequest}; +use crate::models::{ClientMessage, ConnectionOptions, ProtocolOptions, SerializationType, ServerMessage, SubscriptionRequest}; use super::auth::WasmAuthProvider; use super::console_log; -use super::helpers::{create_promise, ws_url_from_http_opts}; +use super::helpers::{create_promise, send_ws_message, ws_url_from_http_opts}; use super::state::SubscriptionState; /// Resolve a `WasmAuthProvider` from an optional JS async callback. @@ -83,7 +83,7 @@ pub(crate) async fn reconnect_internal_with_auth( // Check if auth is required let requires_auth = !matches!(resolved_auth, WasmAuthProvider::None); - let auth_message = resolved_auth.to_ws_auth_message(); + let auth_message = resolved_auth.to_ws_auth_message(ProtocolOptions::default()); let ws_clone = ws.clone(); let auth_resolve_for_anon = auth_resolve.clone(); @@ -155,6 +155,7 @@ pub(crate) fn restart_ping_timer( ws_ref: &Rc>>, connection_options: &Rc>, ping_interval_id: &Rc>, + negotiated_ser: &Rc>, ) { // Stop any previous timer let old_id = *ping_interval_id.borrow(); @@ -169,12 +170,11 @@ pub(crate) fn restart_ping_timer( } let ws_clone = Rc::clone(ws_ref); + let ser = Rc::clone(negotiated_ser); let ping_cb = Closure::wrap(Box::new(move || { if let Some(ws) = ws_clone.borrow().as_ref() { if ws.ready_state() == WebSocket::OPEN { - super::client::PING_PAYLOAD.with(|payload| { - let _ = ws.send_with_str(payload); - }); + let _ = send_ws_message(ws, &ClientMessage::Ping, ser.get()); } } }) as Box); @@ -189,6 +189,7 @@ pub(crate) fn restart_ping_timer( pub(crate) async fn resubscribe_all( ws_ref: Rc>>, subscription_state: Rc>>, + negotiated_ser: SerializationType, ) { let states: Vec<(String, SubscriptionState)> = { let mut subs = subscription_state.borrow_mut(); @@ -224,13 +225,11 @@ pub(crate) async fn resubscribe_all( }; if let Some(ws) = ws_ref.borrow().as_ref() { - if let Ok(payload) = serde_json::to_string(&subscribe_msg) { - if let Err(e) = ws.send_with_str(&payload) { - console_log(&format!( - "KalamClient: Failed to re-subscribe to {}: {:?}", - subscription_id, e - )); - } + if let Err(e) = send_ws_message(ws, &subscribe_msg, negotiated_ser) { + console_log(&format!( + "KalamClient: Failed to re-subscribe to {}: {:?}", + subscription_id, e + )); } } } diff --git a/link/tests/proxied/rapid_flap.rs b/link/tests/proxied/rapid_flap.rs new file mode 100644 index 000000000..d406e1227 --- /dev/null +++ b/link/tests/proxied/rapid_flap.rs @@ -0,0 +1,180 @@ +use super::helpers::*; +use crate::common::tcp_proxy::TcpDisconnectProxy; +use kalam_link::SubscriptionConfig; +use std::sync::atomic::Ordering; +use std::time::Duration; +use tokio::time::{sleep, timeout}; + +/// Rapid connection flapping: bring the proxy up and down in quick succession +/// (sub-second cycles). The client must survive without panicking, must not +/// get stuck in a reconnect loop, and must eventually stabilise and resume +/// from the correct seq once the link stays up. +#[tokio::test] +async fn test_rapid_flapping_connection_stabilises_and_resumes() { + let result = timeout(Duration::from_secs(60), async { + let writer = match create_test_client() { + Ok(c) => c, + Err(e) => { + eprintln!("Skipping test (writer client unavailable): {}", e); + return; + }, + }; + + let proxy = TcpDisconnectProxy::start(upstream_server_url()).await; + let (client, connect_count, disconnect_count) = + match create_test_client_with_events_for_base_url(proxy.base_url()) { + Ok(v) => v, + Err(e) => { + eprintln!("Skipping test (proxy client unavailable): {}", e); + proxy.shutdown().await; + return; + }, + }; + + let suffix = unique_suffix(); + let table = format!("default.rapid_flap_{}", suffix); + ensure_table(&writer, &table).await; + + client.connect().await.expect("initial connect through proxy"); + + let mut sub = client + .subscribe_with_config(SubscriptionConfig::new( + format!("rapid-flap-{}", suffix), + format!("SELECT id, value FROM {}", table), + )) + .await + .expect("subscribe through proxy"); + + let _ = timeout(TEST_TIMEOUT, sub.next()).await; + + // Insert a baseline row and observe it. + writer + .execute_query( + &format!("INSERT INTO {} (id, value) VALUES ('baseline', 'ready')", table), + None, + None, + None, + ) + .await + .expect("insert baseline row"); + + let mut baseline_ids = Vec::::new(); + let mut checkpoint = None; + for _ in 0..12 { + if baseline_ids.iter().any(|id| id == "baseline") { + break; + } + match timeout(Duration::from_millis(1200), sub.next()).await { + Ok(Some(Ok(ev))) => { + collect_ids_and_track_seq( + &ev, + &mut baseline_ids, + &mut checkpoint, + None, + "rapid-flap baseline", + ); + }, + _ => {}, + } + } + assert!(baseline_ids.iter().any(|id| id == "baseline")); + let resume_from = query_max_seq(&writer, &table).await; + + // ── Rapid flap: 5 cycles of down→up with < 500 ms between ────── + for cycle in 0..5 { + proxy.simulate_server_down().await; + sleep(Duration::from_millis(150)).await; + proxy.simulate_server_up(); + sleep(Duration::from_millis(250)).await; + + // Insert a row during each brief up window. + let _ = writer + .execute_query( + &format!( + "INSERT INTO {} (id, value) VALUES ('flap-{}', 'cycle')", + table, cycle + ), + None, + None, + None, + ) + .await; + } + + // ── Stabilise: leave the proxy up ─────────────────────────────── + proxy.simulate_server_up(); + + // Insert a final row that MUST arrive. + writer + .execute_query( + &format!( + "INSERT INTO {} (id, value) VALUES ('post-flap', 'stable')", + table + ), + None, + None, + None, + ) + .await + .expect("insert post-flap row"); + + // Wait for reconnect. + for _ in 0..100 { + if client.is_connected().await { + break; + } + sleep(Duration::from_millis(100)).await; + } + assert!( + client.is_connected().await, + "client should stabilise after rapid flapping stops" + ); + + // Client should have seen at least one disconnect during the flap. + assert!( + disconnect_count.load(Ordering::SeqCst) >= 1, + "at least one disconnect event should fire during rapid flapping" + ); + + // Collect all events after the stable resume. + let mut post_ids = Vec::::new(); + let mut post_seq = None; + for _ in 0..60 { + if post_ids.iter().any(|id| id == "post-flap") { + break; + } + match timeout(Duration::from_millis(2000), sub.next()).await { + Ok(Some(Ok(ev))) => { + collect_ids_and_track_seq( + &ev, + &mut post_ids, + &mut post_seq, + Some(resume_from), + "rapid-flap recovery", + ); + }, + Ok(Some(Err(e))) => panic!("subscription error after flap: {}", e), + Ok(None) => panic!("subscription ended after flap"), + Err(_) => {}, + } + } + + assert!( + post_ids.iter().any(|id| id == "post-flap"), + "post-flap row must arrive after the connection stabilises" + ); + + // Verify no stale data (baseline must not replay). + assert!( + !post_ids.iter().any(|id| id == "baseline"), + "baseline row must not replay after resume" + ); + + sub.close().await.ok(); + client.disconnect().await; + proxy.shutdown().await; + }) + .await; + + assert!(result.is_ok(), "rapid flapping test timed out"); +} diff --git a/pg/crates/kalam-pg-client/Cargo.toml b/pg/crates/kalam-pg-client/Cargo.toml index 29de72484..cb3d70782 100644 --- a/pg/crates/kalam-pg-client/Cargo.toml +++ b/pg/crates/kalam-pg-client/Cargo.toml @@ -27,4 +27,5 @@ async-trait = { workspace = true } kalamdb-pg = { path = "../../../backend/crates/kalamdb-pg", features = ["server"] } ntest = { workspace = true } tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } +tokio-stream = { workspace = true } tonic = { workspace = true } diff --git a/pg/crates/kalam-pg-client/tests/connectivity.rs b/pg/crates/kalam-pg-client/tests/connectivity.rs index bc548d0f1..ce0d4b5df 100644 --- a/pg/crates/kalam-pg-client/tests/connectivity.rs +++ b/pg/crates/kalam-pg-client/tests/connectivity.rs @@ -1,25 +1,26 @@ use kalam_pg_client::RemoteKalamClient; use kalam_pg_common::RemoteServerConfig; use kalamdb_pg::{KalamPgService, PgServiceServer}; -use std::net::SocketAddr; use std::time::Duration; +use tokio::net::TcpListener; -/// Helper: start a gRPC PgService on `addr` and return a connected client. -async fn start_server_and_client(addr: &str) -> RemoteKalamClient { - let bind_addr: SocketAddr = addr.parse().expect("bind addr"); +/// Helper: start a gRPC PgService on an ephemeral port and return a connected client. +async fn start_server_and_client() -> RemoteKalamClient { + let listener = TcpListener::bind("127.0.0.1:0").await.expect("bind ephemeral port"); + let port = listener.local_addr().expect("local addr").port(); + let incoming = tokio_stream::wrappers::TcpListenerStream::new(listener); let service = KalamPgService::new(false, None); tokio::spawn(async move { tonic::transport::Server::builder() .add_service(PgServiceServer::new(service)) - .serve(bind_addr) + .serve_with_incoming(incoming) .await .expect("serve pg grpc"); }); - tokio::time::sleep(Duration::from_millis(100)).await; + tokio::time::sleep(Duration::from_millis(50)).await; - let port = bind_addr.port(); RemoteKalamClient::connect(RemoteServerConfig { host: "127.0.0.1".to_string(), port, @@ -32,7 +33,7 @@ async fn start_server_and_client(addr: &str) -> RemoteKalamClient { #[tokio::test] #[ntest::timeout(10000)] async fn remote_client_connects_and_opens_session() { - let client = start_server_and_client("127.0.0.1:59971").await; + let client = start_server_and_client().await; client.ping().await.expect("ping"); let session = client @@ -50,7 +51,7 @@ async fn remote_client_connects_and_opens_session() { #[tokio::test] #[ntest::timeout(10000)] async fn sequential_transactions_commit_cleanly() { - let client = start_server_and_client("127.0.0.1:59972").await; + let client = start_server_and_client().await; client.open_session("pg-seq-tx", None).await.expect("open session"); @@ -72,7 +73,7 @@ async fn sequential_transactions_commit_cleanly() { #[tokio::test] #[ntest::timeout(10000)] async fn stale_transaction_auto_rollback_on_new_begin() { - let client = start_server_and_client("127.0.0.1:59973").await; + let client = start_server_and_client().await; client.open_session("pg-stale-tx", None).await.expect("open session"); @@ -92,7 +93,7 @@ async fn stale_transaction_auto_rollback_on_new_begin() { #[tokio::test] #[ntest::timeout(10000)] async fn close_session_removes_server_state() { - let client = start_server_and_client("127.0.0.1:59974").await; + let client = start_server_and_client().await; client.open_session("pg-close-test", None).await.expect("open session"); @@ -110,9 +111,14 @@ async fn close_session_removes_server_state() { #[tokio::test] #[ntest::timeout(10000)] async fn connect_with_timeout_fails_on_unreachable_server() { + // Bind and immediately drop to guarantee the port is free and nothing listens. + let listener = TcpListener::bind("127.0.0.1:0").await.expect("bind"); + let free_port = listener.local_addr().expect("addr").port(); + drop(listener); + let config = RemoteServerConfig { host: "127.0.0.1".to_string(), - port: 59999, // Nothing listening here + port: free_port, timeout_ms: 1000, ..Default::default() }; diff --git a/pg/docker/docker-compose.test.yml b/pg/docker/docker-compose.test.yml index a1b344c1e..29d77b534 100644 --- a/pg/docker/docker-compose.test.yml +++ b/pg/docker/docker-compose.test.yml @@ -35,7 +35,7 @@ services: restart: "no" command: ["/usr/local/bin/kalamdb-server", "/config/server.toml"] healthcheck: - test: ["CMD", "/usr/local/bin/busybox", "wget", "-q", "-O", "/dev/null", "http://127.0.0.1:8080/v1/api/healthcheck"] + test: ["CMD", "/usr/local/bin/busybox", "wget", "-q", "-O", "/dev/null", "http://127.0.0.1:8080/health"] interval: 5s timeout: 3s start_period: 8s @@ -46,7 +46,7 @@ services: container_name: pg-kalam-e2e depends_on: kalamdb: - condition: service_healthy + condition: service_started ports: - "${KALAMDB_TEST_PG_PORT:-15433}:5432" environment: diff --git a/pg/docker/image-description.txt b/pg/docker/image-description.txt new file mode 100644 index 000000000..db7e76880 --- /dev/null +++ b/pg/docker/image-description.txt @@ -0,0 +1 @@ +PostgreSQL with the pg_kalam extension preinstalled so you can connect PostgreSQL foreign tables to a remote KalamDB server. \ No newline at end of file diff --git a/pg/docker/test.sh b/pg/docker/test.sh index 6001d30f6..0cfe76cd1 100755 --- a/pg/docker/test.sh +++ b/pg/docker/test.sh @@ -63,8 +63,14 @@ echo "" # Step 1: Check KalamDB server is reachable echo "Checking KalamDB server at $KALAMDB_API_URL ..." for i in $(seq 1 15); do - if curl -sf "$KALAMDB_API_URL/health" > /dev/null 2>&1 \ - || curl -sf "$KALAMDB_API_URL/v1/api/healthcheck" > /dev/null 2>&1; then + HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" \ + --connect-timeout 2 \ + --max-time 5 \ + -X POST "$KALAMDB_API_URL/v1/api/auth/login" \ + -H "Content-Type: application/json" \ + -d "{\"username\":\"admin\",\"password\":\"$KALAMDB_PASSWORD\"}" \ + || true) + if [ "$HTTP_CODE" != "000" ]; then echo "KalamDB server is reachable." break fi @@ -84,7 +90,7 @@ LOGIN_RESP=$(curl -sf "$KALAMDB_API_URL/v1/api/auth/login" \ -d "{\"username\":\"admin\",\"password\":\"$KALAMDB_PASSWORD\"}" \ 2>/dev/null || true) -BEARER_TOKEN=$(echo "$LOGIN_RESP" | grep -o '"access_token":"[^"]*"' | cut -d'"' -f4) +BEARER_TOKEN=$(printf '%s' "$LOGIN_RESP" | sed -n 's/.*"access_token":"\([^"]*\)".*/\1/p') if [ -z "$BEARER_TOKEN" ]; then echo "WARNING: Could not login to KalamDB. Trying setup first..." curl -sf "$KALAMDB_API_URL/v1/api/auth/setup" \ @@ -95,7 +101,7 @@ if [ -z "$BEARER_TOKEN" ]; then -H "Content-Type: application/json" \ -d "{\"username\":\"admin\",\"password\":\"$KALAMDB_PASSWORD\"}" \ 2>/dev/null || true) - BEARER_TOKEN=$(echo "$LOGIN_RESP" | grep -o '"access_token":"[^"]*"' | cut -d'"' -f4) + BEARER_TOKEN=$(printf '%s' "$LOGIN_RESP" | sed -n 's/.*"access_token":"\([^"]*\)".*/\1/p') fi if [ -z "$BEARER_TOKEN" ]; then @@ -183,7 +189,7 @@ for i in $(seq 1 5); do PAGER=cat "$PSQL_BIN" -h "$PGHOST" -p "$PGPORT" -U "$PGUSER" -d "$PGDATABASE" \ -v ON_ERROR_STOP=1 \ -P pager=off \ - -c "SET kalam.user_id = 'concurrent-user-$i'; INSERT INTO rmtest.profiles (id, name, age) VALUES ('cc$i', 'Concurrent$i', $((20 + i))); SELECT COUNT(*) FROM rmtest.profiles;" \ + -c "SET kalam.user_id = 'concurrent-user'; INSERT INTO rmtest.profiles (id, name, age) VALUES ('cc$i', 'Concurrent$i', $((20 + i))); SELECT COUNT(*) FROM rmtest.profiles;" \ > /dev/null 2>&1 ) & done @@ -203,7 +209,7 @@ fi # Verify all concurrent rows exist CC_COUNT=$( PAGER=cat "$PSQL_BIN" -h "$PGHOST" -p "$PGPORT" -U "$PGUSER" -d "$PGDATABASE" \ - -t -A -c "SET kalam.user_id = 'concurrent-user-1'; SELECT COUNT(*) FROM rmtest.profiles WHERE id IN ('cc1','cc2','cc3','cc4','cc5');" ) + -t -A -c "SET kalam.user_id = 'concurrent-user'; SELECT COUNT(*) FROM rmtest.profiles WHERE id IN ('cc1','cc2','cc3','cc4','cc5');" | tail -n 1 | tr -d '[:space:]' ) if [ "$CC_COUNT" -ge 5 ]; then echo " PASS: All 5 concurrent rows visible ($CC_COUNT)." @@ -216,7 +222,7 @@ fi PAGER=cat "$PSQL_BIN" -h "$PGHOST" -p "$PGPORT" -U "$PGUSER" -d "$PGDATABASE" \ -v ON_ERROR_STOP=1 \ -P pager=off \ - -c "SET kalam.user_id = 'concurrent-user-1'; DELETE FROM rmtest.profiles WHERE id IN ('cc1','cc2','cc3','cc4','cc5');" \ + -c "SET kalam.user_id = 'concurrent-user'; DELETE FROM rmtest.profiles WHERE id IN ('cc1','cc2','cc3','cc4','cc5');" \ > /dev/null 2>&1 echo "" From 56d863e896a941c83ee4db78a18d2ecdb2b0ca9f Mon Sep 17 00:00:00 2001 From: jamals86 Date: Tue, 31 Mar 2026 08:52:46 +0300 Subject: [PATCH 12/12] Add proxied network-impairment tests Add a suite of integration tests under link/tests/proxied that simulate various TCP/proxy impairments using TcpDisconnectProxy to validate client reconnect and resume behavior. New tests include: - blackhole_during_subscribe.rs: blackhole during subscribe handshake and recovery - event_counter_integrity.rs: verify connect/disconnect event counters across outages - gradual_degradation.rs: latency ramp causing reconnect and recovery - heavy_write_burst_recovery.rs: large write backlog delivered exactly once after outage - latency_during_snapshot.rs: latency during initial snapshot and subsequent live delivery - subscribe_during_reconnect.rs: add subscription while reconnecting and ensure delivery - unsubscribe_during_outage.rs: unsubscribe during outage and ensure it is not re-subscribed Also update link/tests/proxied.rs to register the new modules and adjust rapid_flap.rs to ignore an unused connect_count variable. These tests exercise resume/replay, duplicate avoidance, subscription lifecycle, and event-counting correctness under adverse network conditions. --- link/tests/proxied.rs | 16 ++ .../proxied/blackhole_during_subscribe.rs | 179 ++++++++++++++ link/tests/proxied/event_counter_integrity.rs | 210 ++++++++++++++++ link/tests/proxied/gradual_degradation.rs | 190 +++++++++++++++ .../proxied/heavy_write_burst_recovery.rs | 207 ++++++++++++++++ link/tests/proxied/latency_during_snapshot.rs | 147 +++++++++++ link/tests/proxied/rapid_flap.rs | 2 +- .../proxied/subscribe_during_reconnect.rs | 208 ++++++++++++++++ .../proxied/unsubscribe_during_outage.rs | 230 ++++++++++++++++++ 9 files changed, 1388 insertions(+), 1 deletion(-) create mode 100644 link/tests/proxied/blackhole_during_subscribe.rs create mode 100644 link/tests/proxied/event_counter_integrity.rs create mode 100644 link/tests/proxied/gradual_degradation.rs create mode 100644 link/tests/proxied/heavy_write_burst_recovery.rs create mode 100644 link/tests/proxied/latency_during_snapshot.rs create mode 100644 link/tests/proxied/subscribe_during_reconnect.rs create mode 100644 link/tests/proxied/unsubscribe_during_outage.rs diff --git a/link/tests/proxied.rs b/link/tests/proxied.rs index 49688346e..415c18047 100644 --- a/link/tests/proxied.rs +++ b/link/tests/proxied.rs @@ -7,12 +7,22 @@ mod common; #[path = "proxied/ack_before_first_batch.rs"] mod ack_before_first_batch; +#[path = "proxied/blackhole_during_subscribe.rs"] +mod blackhole_during_subscribe; #[path = "proxied/double_outage.rs"] mod double_outage; +#[path = "proxied/event_counter_integrity.rs"] +mod event_counter_integrity; +#[path = "proxied/gradual_degradation.rs"] +mod gradual_degradation; +#[path = "proxied/heavy_write_burst_recovery.rs"] +mod heavy_write_burst_recovery; #[path = "proxied/helpers.rs"] mod helpers; #[path = "proxied/large_snapshot_repeated_outages.rs"] mod large_snapshot_repeated_outages; +#[path = "proxied/latency_during_snapshot.rs"] +mod latency_during_snapshot; #[path = "proxied/live_updates_resume.rs"] mod live_updates_resume; #[path = "proxied/loading_resume_with_live_writes.rs"] @@ -21,6 +31,8 @@ mod loading_resume_with_live_writes; mod mixed_stage_recovery; #[path = "proxied/multi_sub_bounce.rs"] mod multi_sub_bounce; +#[path = "proxied/rapid_flap.rs"] +mod rapid_flap; #[path = "proxied/server_down_connecting.rs"] mod server_down_connecting; #[path = "proxied/server_down_initial_load.rs"] @@ -29,7 +41,11 @@ mod server_down_initial_load; mod socket_drop_resume; #[path = "proxied/staggered_outages.rs"] mod staggered_outages; +#[path = "proxied/subscribe_during_reconnect.rs"] +mod subscribe_during_reconnect; #[path = "proxied/transport_impairments.rs"] mod transport_impairments; +#[path = "proxied/unsubscribe_during_outage.rs"] +mod unsubscribe_during_outage; #[path = "proxied/update_delete_resume.rs"] mod update_delete_resume; diff --git a/link/tests/proxied/blackhole_during_subscribe.rs b/link/tests/proxied/blackhole_during_subscribe.rs new file mode 100644 index 000000000..5f3e155e5 --- /dev/null +++ b/link/tests/proxied/blackhole_during_subscribe.rs @@ -0,0 +1,179 @@ +use super::helpers::*; +use crate::common::tcp_proxy::TcpDisconnectProxy; +use kalam_link::SubscriptionConfig; +use std::time::Duration; +use tokio::time::{sleep, timeout}; + +/// Blackhole the proxy right as the client sends its subscribe request. +/// The TCP socket stays open but no data flows. The client should detect a +/// dead link (pong timeout), reconnect, re-subscribe, and ultimately deliver +/// the full snapshot plus any rows inserted during the outage. +#[tokio::test] +async fn test_blackhole_during_subscribe_handshake_recovers() { + let result = timeout(Duration::from_secs(60), async { + let writer = match create_test_client() { + Ok(c) => c, + Err(e) => { + eprintln!("Skipping test (writer client unavailable): {}", e); + return; + }, + }; + + let proxy = TcpDisconnectProxy::start(upstream_server_url()).await; + let (client, _connect_count, _disconnect_count) = + match create_test_client_with_events_for_base_url(proxy.base_url()) { + Ok(v) => v, + Err(e) => { + eprintln!("Skipping test (proxy client unavailable): {}", e); + proxy.shutdown().await; + return; + }, + }; + + let suffix = unique_suffix(); + let table = format!("default.blackhole_subscribe_{}", suffix); + ensure_table(&writer, &table).await; + + // Seed a few rows so the subscription has data to deliver. + for i in 0..5 { + writer + .execute_query( + &format!( + "INSERT INTO {} (id, value) VALUES ('seed-{}', 'val-{}')", + table, i, i + ), + None, + None, + None, + ) + .await + .expect("insert seed row"); + } + + client.connect().await.expect("connect through proxy"); + assert!( + proxy.wait_for_active_connections(1, Duration::from_secs(2)).await, + "proxy should see at least one active connection" + ); + + // Blackhole immediately BEFORE subscribing so the subscribe message + // (and the server's response) never make it across. + proxy.blackhole(); + + // Subscribe will eventually time out or hang because nothing comes back. + // We wrap it in a timeout so the test can continue. + let sub_result = timeout( + Duration::from_secs(3), + client.subscribe_with_config(SubscriptionConfig::new( + format!("blackhole-sub-{}", suffix), + format!("SELECT id, value FROM {}", table), + )), + ) + .await; + + // Whether subscribe itself succeeded (buffered locally) or timed out, + // we need to let the client recover. + let mut sub = match sub_result { + Ok(Ok(s)) => s, + Ok(Err(e)) => { + // subscribe failed — restore traffic and try again. + proxy.restore_traffic(); + sleep(Duration::from_millis(500)).await; + + for _ in 0..80 { + if client.is_connected().await { + break; + } + sleep(Duration::from_millis(100)).await; + } + + client + .subscribe_with_config(SubscriptionConfig::new( + format!("blackhole-sub-{}", suffix), + format!("SELECT id, value FROM {}", table), + )) + .await + .unwrap_or_else(|_| panic!("subscribe after recovery should succeed: {}", e)) + }, + Err(_timeout) => { + // subscribe hung — restore traffic so the client can reconnect + // and the subscription can complete via re-subscribe. + proxy.restore_traffic(); + + for _ in 0..80 { + if client.is_connected().await { + break; + } + sleep(Duration::from_millis(100)).await; + } + + client + .subscribe_with_config(SubscriptionConfig::new( + format!("blackhole-sub-{}", suffix), + format!("SELECT id, value FROM {}", table), + )) + .await + .expect("subscribe after timeout recovery should succeed") + }, + }; + + // Insert a row after recovery. + writer + .execute_query( + &format!( + "INSERT INTO {} (id, value) VALUES ('post-blackhole', 'live')", + table + ), + None, + None, + None, + ) + .await + .expect("insert post-blackhole row"); + + // Collect events — we should see the seed rows AND the post-blackhole row. + let mut seen_ids = Vec::::new(); + let mut seq = None; + for _ in 0..60 { + if seen_ids.iter().any(|id| id == "post-blackhole") + && (0..5).all(|i| seen_ids.iter().any(|id| id == &format!("seed-{}", i))) + { + break; + } + match timeout(Duration::from_millis(2000), sub.next()).await { + Ok(Some(Ok(ev))) => { + collect_ids_and_track_seq( + &ev, + &mut seen_ids, + &mut seq, + None, + "blackhole-subscribe recovery", + ); + }, + Ok(Some(Err(e))) => { + // Subscription errors are acceptable during recovery. + eprintln!("subscription error (may be transient): {}", e); + }, + Ok(None) => break, + Err(_) => {}, + } + } + + assert!( + (0..5).all(|i| seen_ids.iter().any(|id| id == &format!("seed-{}", i))), + "all seed rows should arrive after blackhole recovery; got: {:?}", + seen_ids + ); + assert!( + seen_ids.iter().any(|id| id == "post-blackhole"), + "post-blackhole row should arrive after recovery" + ); + + sub.close().await.ok(); + client.disconnect().await; + proxy.shutdown().await; + }) + .await; + + assert!(result.is_ok(), "blackhole during subscribe test timed out"); +} diff --git a/link/tests/proxied/event_counter_integrity.rs b/link/tests/proxied/event_counter_integrity.rs new file mode 100644 index 000000000..980845964 --- /dev/null +++ b/link/tests/proxied/event_counter_integrity.rs @@ -0,0 +1,210 @@ +use super::helpers::*; +use crate::common::tcp_proxy::TcpDisconnectProxy; +use kalam_link::SubscriptionConfig; +use std::sync::atomic::Ordering; +use std::time::Duration; +use tokio::time::{sleep, timeout}; + +/// After a complex outage+recovery sequence, verify that connect and disconnect +/// event counters are consistent: each reconnect fires exactly one on_connect, +/// each forcible drop fires exactly one on_disconnect, and the final state after +/// shutdown is deterministic. +#[tokio::test] +async fn test_event_counter_integrity_through_multiple_outages() { + let result = timeout(Duration::from_secs(60), async { + let writer = match create_test_client() { + Ok(c) => c, + Err(e) => { + eprintln!("Skipping test (writer client unavailable): {}", e); + return; + }, + }; + + let proxy = TcpDisconnectProxy::start(upstream_server_url()).await; + let (client, connect_count, disconnect_count) = + match create_test_client_with_events_for_base_url(proxy.base_url()) { + Ok(v) => v, + Err(e) => { + eprintln!("Skipping test (proxy client unavailable): {}", e); + proxy.shutdown().await; + return; + }, + }; + + let suffix = unique_suffix(); + let table = format!("default.event_counter_{}", suffix); + ensure_table(&writer, &table).await; + + // ── Phase 1: Initial connect ──────────────────────────────────── + client.connect().await.expect("initial connect"); + + let mut sub = client + .subscribe_with_config(SubscriptionConfig::new( + format!("event-counter-{}", suffix), + format!("SELECT id, value FROM {}", table), + )) + .await + .expect("subscribe"); + + let _ = timeout(TEST_TIMEOUT, sub.next()).await; + + for _ in 0..40 { + if connect_count.load(Ordering::SeqCst) >= 1 && client.is_connected().await { + break; + } + sleep(Duration::from_millis(100)).await; + } + + assert_eq!( + connect_count.load(Ordering::SeqCst), + 1, + "exactly one connect event should fire after initial connect" + ); + assert_eq!( + disconnect_count.load(Ordering::SeqCst), + 0, + "no disconnect events should fire before any outage" + ); + + // ── Phase 2: First outage + recovery ──────────────────────────── + proxy.simulate_server_down().await; + + for _ in 0..40 { + if disconnect_count.load(Ordering::SeqCst) >= 1 { + break; + } + sleep(Duration::from_millis(100)).await; + } + assert_eq!( + disconnect_count.load(Ordering::SeqCst), + 1, + "exactly one disconnect event after first outage" + ); + + proxy.simulate_server_up(); + + for _ in 0..80 { + if connect_count.load(Ordering::SeqCst) >= 2 && client.is_connected().await { + break; + } + sleep(Duration::from_millis(100)).await; + } + assert_eq!( + connect_count.load(Ordering::SeqCst), + 2, + "exactly two connect events after first recovery" + ); + + // ── Phase 3: Second outage + recovery ─────────────────────────── + proxy.simulate_server_down().await; + + for _ in 0..40 { + if disconnect_count.load(Ordering::SeqCst) >= 2 { + break; + } + sleep(Duration::from_millis(100)).await; + } + assert_eq!( + disconnect_count.load(Ordering::SeqCst), + 2, + "exactly two disconnect events after second outage" + ); + + proxy.simulate_server_up(); + + for _ in 0..80 { + if connect_count.load(Ordering::SeqCst) >= 3 && client.is_connected().await { + break; + } + sleep(Duration::from_millis(100)).await; + } + assert_eq!( + connect_count.load(Ordering::SeqCst), + 3, + "exactly three connect events after second recovery" + ); + + // ── Phase 4: Third outage + recovery (verify no drift) ────────── + proxy.simulate_server_down().await; + + for _ in 0..40 { + if disconnect_count.load(Ordering::SeqCst) >= 3 { + break; + } + sleep(Duration::from_millis(100)).await; + } + assert_eq!( + disconnect_count.load(Ordering::SeqCst), + 3, + "exactly three disconnect events after third outage" + ); + + proxy.simulate_server_up(); + + for _ in 0..80 { + if connect_count.load(Ordering::SeqCst) >= 4 && client.is_connected().await { + break; + } + sleep(Duration::from_millis(100)).await; + } + assert_eq!( + connect_count.load(Ordering::SeqCst), + 4, + "exactly four connect events after third recovery" + ); + + // Final state: all counters balanced (connects = disconnects + 1 for + // the still-active connection). + let final_connects = connect_count.load(Ordering::SeqCst); + let final_disconnects = disconnect_count.load(Ordering::SeqCst); + assert_eq!( + final_connects, + final_disconnects + 1, + "connects ({}) should equal disconnects ({}) + 1 (the active connection)", + final_connects, + final_disconnects + ); + + // Verify that live data still flows. + writer + .execute_query( + &format!("INSERT INTO {} (id, value) VALUES ('counter-verify', 'v')", table), + None, + None, + None, + ) + .await + .expect("insert counter-verify row"); + + let mut ids = Vec::::new(); + let mut seq = None; + for _ in 0..12 { + if ids.iter().any(|id| id == "counter-verify") { + break; + } + match timeout(Duration::from_millis(2000), sub.next()).await { + Ok(Some(Ok(ev))) => { + collect_ids_and_track_seq( + &ev, + &mut ids, + &mut seq, + None, + "counter verify live", + ); + }, + _ => {}, + } + } + assert!( + ids.iter().any(|id| id == "counter-verify"), + "live data should still flow after all the outage cycles" + ); + + sub.close().await.ok(); + client.disconnect().await; + proxy.shutdown().await; + }) + .await; + + assert!(result.is_ok(), "event counter integrity test timed out"); +} diff --git a/link/tests/proxied/gradual_degradation.rs b/link/tests/proxied/gradual_degradation.rs new file mode 100644 index 000000000..7ffd22476 --- /dev/null +++ b/link/tests/proxied/gradual_degradation.rs @@ -0,0 +1,190 @@ +use super::helpers::*; +use crate::common::tcp_proxy::TcpDisconnectProxy; +use kalam_link::SubscriptionConfig; +use std::sync::atomic::Ordering; +use std::time::Duration; +use tokio::time::{sleep, timeout}; + +/// Latency increases in steps until it exceeds the pong timeout, forcing the +/// client to reconnect. Once latency drops back to zero the client should +/// resume and deliver any rows queued on the server during the degraded window. +#[tokio::test] +async fn test_gradual_latency_ramp_forces_reconnect_then_recovers() { + let result = timeout(Duration::from_secs(90), async { + let writer = match create_test_client() { + Ok(c) => c, + Err(e) => { + eprintln!("Skipping test (writer client unavailable): {}", e); + return; + }, + }; + + let proxy = TcpDisconnectProxy::start(upstream_server_url()).await; + let (client, connect_count, disconnect_count) = + match create_test_client_with_events_for_base_url(proxy.base_url()) { + Ok(v) => v, + Err(e) => { + eprintln!("Skipping test (proxy client unavailable): {}", e); + proxy.shutdown().await; + return; + }, + }; + + let suffix = unique_suffix(); + let table = format!("default.latency_ramp_{}", suffix); + ensure_table(&writer, &table).await; + + client.connect().await.expect("initial connect through proxy"); + + let mut sub = client + .subscribe_with_config(SubscriptionConfig::new( + format!("latency-ramp-{}", suffix), + format!("SELECT id, value FROM {}", table), + )) + .await + .expect("subscribe through proxy"); + + let _ = timeout(TEST_TIMEOUT, sub.next()).await; + + // Insert baseline. + writer + .execute_query( + &format!("INSERT INTO {} (id, value) VALUES ('baseline', 'ready')", table), + None, + None, + None, + ) + .await + .expect("insert baseline"); + + let mut baseline_ids = Vec::::new(); + let mut checkpoint = None; + for _ in 0..12 { + if baseline_ids.iter().any(|id| id == "baseline") { + break; + } + match timeout(Duration::from_millis(1200), sub.next()).await { + Ok(Some(Ok(ev))) => { + collect_ids_and_track_seq( + &ev, + &mut baseline_ids, + &mut checkpoint, + None, + "ramp baseline", + ); + }, + _ => {}, + } + } + assert!(baseline_ids.iter().any(|id| id == "baseline")); + let resume_from = query_max_seq(&writer, &table).await; + + // ── Ramp latency: 100 → 500 → 1500 → 3000ms ─────────────────── + // reconnect_test_timeouts() uses pong_timeout = 2s, receive_timeout = 5s. + // At 3000ms per chunk the pong will time out and the client will disconnect. + let dc_before = disconnect_count.load(Ordering::SeqCst); + let latencies_ms = [100, 500, 1500, 3000]; + for (step, &latency_ms) in latencies_ms.iter().enumerate() { + proxy.set_latency(Duration::from_millis(latency_ms)); + sleep(Duration::from_millis(2000)).await; + + // Insert a row at each step so we can verify delivery later. + let _ = writer + .execute_query( + &format!( + "INSERT INTO {} (id, value) VALUES ('ramp-{}', 'at-{}ms')", + table, step, latency_ms + ), + None, + None, + None, + ) + .await; + } + + // Wait for the disconnect that the 3000ms latency should produce. + for _ in 0..80 { + if disconnect_count.load(Ordering::SeqCst) > dc_before { + break; + } + sleep(Duration::from_millis(100)).await; + } + + assert!( + disconnect_count.load(Ordering::SeqCst) > dc_before, + "client should disconnect once latency exceeds pong timeout" + ); + + // ── Clear latency and allow recovery ──────────────────────────── + proxy.clear_latency(); + let expected_connects = connect_count.load(Ordering::SeqCst) + 1; + + for _ in 0..100 { + if connect_count.load(Ordering::SeqCst) >= expected_connects + && client.is_connected().await + { + break; + } + sleep(Duration::from_millis(100)).await; + } + assert!( + client.is_connected().await, + "client should reconnect after latency spike clears" + ); + + // Insert a post-recovery marker. + writer + .execute_query( + &format!( + "INSERT INTO {} (id, value) VALUES ('post-ramp', 'recovered')", + table + ), + None, + None, + None, + ) + .await + .expect("insert post-ramp row"); + + // ── Collect resumed events ────────────────────────────────────── + let mut resumed_ids = Vec::::new(); + let mut resumed_seq = None; + for _ in 0..60 { + if resumed_ids.iter().any(|id| id == "post-ramp") { + break; + } + match timeout(Duration::from_millis(2000), sub.next()).await { + Ok(Some(Ok(ev))) => { + collect_ids_and_track_seq( + &ev, + &mut resumed_ids, + &mut resumed_seq, + Some(resume_from), + "ramp recovery", + ); + }, + Ok(Some(Err(e))) => panic!("subscription error after ramp: {}", e), + Ok(None) => panic!("subscription ended after ramp"), + Err(_) => {}, + } + } + + assert!( + resumed_ids.iter().any(|id| id == "post-ramp"), + "post-ramp row should arrive after recovery" + ); + + // Baseline must not replay. + assert!( + !resumed_ids.iter().any(|id| id == "baseline"), + "baseline must not replay after ramp recovery" + ); + + sub.close().await.ok(); + client.disconnect().await; + proxy.shutdown().await; + }) + .await; + + assert!(result.is_ok(), "gradual latency ramp test timed out"); +} diff --git a/link/tests/proxied/heavy_write_burst_recovery.rs b/link/tests/proxied/heavy_write_burst_recovery.rs new file mode 100644 index 000000000..1fa894989 --- /dev/null +++ b/link/tests/proxied/heavy_write_burst_recovery.rs @@ -0,0 +1,207 @@ +use super::helpers::*; +use crate::common::tcp_proxy::TcpDisconnectProxy; +use kalam_link::SubscriptionConfig; +use std::collections::HashSet; +use std::sync::atomic::Ordering; +use std::time::Duration; +use tokio::time::{sleep, timeout}; + +/// A large burst of writes (50 rows) is performed while the client is +/// disconnected. After recovery, every single row must arrive exactly once +/// with no duplicates and no gaps. This stress-tests the resume/replay +/// path under a realistic write backlog. +#[tokio::test] +async fn test_heavy_write_burst_during_outage_all_delivered() { + let result = timeout(Duration::from_secs(90), async { + let writer = match create_test_client() { + Ok(c) => c, + Err(e) => { + eprintln!("Skipping test (writer client unavailable): {}", e); + return; + }, + }; + + let proxy = TcpDisconnectProxy::start(upstream_server_url()).await; + let (client, connect_count, disconnect_count) = + match create_test_client_with_events_for_base_url(proxy.base_url()) { + Ok(v) => v, + Err(e) => { + eprintln!("Skipping test (proxy client unavailable): {}", e); + proxy.shutdown().await; + return; + }, + }; + + let suffix = unique_suffix(); + let table = format!("default.burst_recovery_{}", suffix); + ensure_table(&writer, &table).await; + + client.connect().await.expect("connect through proxy"); + + let mut sub = client + .subscribe_with_config(SubscriptionConfig::new( + format!("burst-recovery-{}", suffix), + format!("SELECT id, value FROM {}", table), + )) + .await + .expect("subscribe through proxy"); + + let _ = timeout(TEST_TIMEOUT, sub.next()).await; + + // Insert a baseline row. + writer + .execute_query( + &format!("INSERT INTO {} (id, value) VALUES ('baseline', 'ready')", table), + None, + None, + None, + ) + .await + .expect("insert baseline"); + + let mut baseline_ids = Vec::::new(); + let mut checkpoint = None; + for _ in 0..12 { + if baseline_ids.iter().any(|id| id == "baseline") { + break; + } + match timeout(Duration::from_millis(1200), sub.next()).await { + Ok(Some(Ok(ev))) => { + collect_ids_and_track_seq( + &ev, + &mut baseline_ids, + &mut checkpoint, + None, + "burst baseline", + ); + }, + _ => {}, + } + } + assert!(baseline_ids.iter().any(|id| id == "baseline")); + let resume_from = query_max_seq(&writer, &table).await; + + // ── Take the proxy down ───────────────────────────────────────── + let dc_before = disconnect_count.load(Ordering::SeqCst); + proxy.simulate_server_down().await; + + for _ in 0..40 { + if disconnect_count.load(Ordering::SeqCst) > dc_before { + break; + } + sleep(Duration::from_millis(100)).await; + } + + // ── Burst-insert 50 rows while the client is disconnected ─────── + let burst_count = 50_u32; + for i in 0..burst_count { + writer + .execute_query( + &format!( + "INSERT INTO {} (id, value) VALUES ('burst-{}', 'val-{}')", + table, i, i + ), + None, + None, + None, + ) + .await + .unwrap_or_else(|e| panic!("burst insert #{} failed: {}", i, e)); + } + + // ── Bring the proxy back ──────────────────────────────────────── + let expected_connects = connect_count.load(Ordering::SeqCst) + 1; + proxy.simulate_server_up(); + + for _ in 0..100 { + if connect_count.load(Ordering::SeqCst) >= expected_connects + && client.is_connected().await + { + break; + } + sleep(Duration::from_millis(100)).await; + } + assert!(client.is_connected().await, "client should reconnect after burst outage"); + + // Insert one final row after reconnect to mark the end. + writer + .execute_query( + &format!( + "INSERT INTO {} (id, value) VALUES ('after-burst', 'marker')", + table + ), + None, + None, + None, + ) + .await + .expect("insert after-burst marker"); + + // ── Collect all events and verify completeness ────────────────── + let mut seen_ids = Vec::::new(); + let mut seen_seq = None; + for _ in 0..120 { + if seen_ids.iter().any(|id| id == "after-burst") + && (0..burst_count) + .all(|i| seen_ids.iter().any(|id| id == &format!("burst-{}", i))) + { + break; + } + match timeout(Duration::from_millis(2000), sub.next()).await { + Ok(Some(Ok(ev))) => { + collect_ids_and_track_seq( + &ev, + &mut seen_ids, + &mut seen_seq, + Some(resume_from), + "burst recovery", + ); + }, + Ok(Some(Err(e))) => panic!("subscription error during burst recovery: {}", e), + Ok(None) => panic!("subscription ended during burst recovery"), + Err(_) => {}, + } + } + + // Verify every burst row arrived. + let missing: Vec<_> = (0..burst_count) + .filter(|i| !seen_ids.iter().any(|id| id == &format!("burst-{}", i))) + .collect(); + assert!( + missing.is_empty(), + "all {} burst rows should arrive; missing indices: {:?}", + burst_count, + missing + ); + + // Verify no duplicates. + let burst_only: Vec<_> = + seen_ids.iter().filter(|id| id.starts_with("burst-")).collect(); + let unique_count = burst_only.iter().collect::>().len(); + assert_eq!( + burst_only.len(), + unique_count, + "burst rows must not contain duplicates; total={} unique={}", + burst_only.len(), + unique_count + ); + + assert!( + seen_ids.iter().any(|id| id == "after-burst"), + "after-burst marker should arrive" + ); + + // baseline must NOT replay. + assert!( + !seen_ids.iter().any(|id| id == "baseline"), + "baseline row must not replay after resume" + ); + + sub.close().await.ok(); + client.disconnect().await; + proxy.shutdown().await; + }) + .await; + + assert!(result.is_ok(), "heavy write burst recovery test timed out"); +} diff --git a/link/tests/proxied/latency_during_snapshot.rs b/link/tests/proxied/latency_during_snapshot.rs new file mode 100644 index 000000000..ea5c24412 --- /dev/null +++ b/link/tests/proxied/latency_during_snapshot.rs @@ -0,0 +1,147 @@ +use super::helpers::*; +use crate::common::tcp_proxy::TcpDisconnectProxy; +use kalam_link::SubscriptionConfig; +use std::time::Duration; +use tokio::time::timeout; + +/// Inject high latency while the initial snapshot is being loaded. +/// If latency exceeds `initial_data_timeout` the subscription should fail +/// gracefully; if below the timeout, the snapshot should complete normally. +/// After the latency clears the client must resume and deliver live rows. +#[tokio::test] +async fn test_latency_spike_during_initial_snapshot_recovers() { + let result = timeout(Duration::from_secs(90), async { + let writer = match create_test_client() { + Ok(c) => c, + Err(e) => { + eprintln!("Skipping test (writer client unavailable): {}", e); + return; + }, + }; + + let proxy = TcpDisconnectProxy::start(upstream_server_url()).await; + let (client, _connect_count, _disconnect_count) = + match create_test_client_with_events_for_base_url(proxy.base_url()) { + Ok(v) => v, + Err(e) => { + eprintln!("Skipping test (proxy client unavailable): {}", e); + proxy.shutdown().await; + return; + }, + }; + + let suffix = unique_suffix(); + let table = format!("default.latency_snapshot_{}", suffix); + ensure_table(&writer, &table).await; + + // Seed enough rows to produce multiple batches. + for i in 0..20 { + writer + .execute_query( + &format!( + "INSERT INTO {} (id, value) VALUES ('seed-{}', 'seed-val-{}')", + table, i, i + ), + None, + None, + None, + ) + .await + .expect("insert seed row"); + } + + client.connect().await.expect("initial connect"); + + // Inject moderate latency BEFORE subscribing — the entire handshake and + // initial snapshot will run through the slow pipe. + // 200ms per chunk is enough to feel painful but well under the 20s + // initial_data_timeout. + proxy.set_latency(Duration::from_millis(200)); + + let mut sub = client + .subscribe_with_config(SubscriptionConfig::new( + format!("latency-snapshot-{}", suffix), + format!("SELECT id, value FROM {}", table), + )) + .await + .expect("subscribe should succeed even under latency"); + + // Collect the full initial snapshot despite the latency. + let mut seen_ids = Vec::::new(); + let mut checkpoint = None; + for _ in 0..60 { + if (0..20).all(|i| seen_ids.iter().any(|id| id == &format!("seed-{}", i))) { + break; + } + match timeout(Duration::from_millis(3000), sub.next()).await { + Ok(Some(Ok(ev))) => { + collect_ids_and_track_seq( + &ev, + &mut seen_ids, + &mut checkpoint, + None, + "latency-snapshot loading", + ); + }, + Ok(Some(Err(e))) => panic!("subscription error during latent snapshot: {}", e), + Ok(None) => panic!("subscription ended during latent snapshot"), + Err(_) => {}, + } + } + + assert!( + (0..20).all(|i| seen_ids.iter().any(|id| id == &format!("seed-{}", i))), + "all 20 seed rows should arrive despite latency; got {} ids", + seen_ids.len() + ); + let _resume_from = query_max_seq(&writer, &table).await; + + // Clear latency and confirm a live row arrives promptly. + proxy.clear_latency(); + + writer + .execute_query( + &format!( + "INSERT INTO {} (id, value) VALUES ('live-after-latency', 'post')", + table + ), + None, + None, + None, + ) + .await + .expect("insert live row after latency clears"); + + let mut live_ids = Vec::::new(); + let mut live_seq = None; + for _ in 0..12 { + if live_ids.iter().any(|id| id == "live-after-latency") { + break; + } + match timeout(Duration::from_millis(2000), sub.next()).await { + Ok(Some(Ok(ev))) => { + collect_ids_and_track_seq( + &ev, + &mut live_ids, + &mut live_seq, + None, + "post-latency live", + ); + }, + _ => {}, + } + } + + assert!( + live_ids.iter().any(|id| id == "live-after-latency"), + "live row should arrive promptly once latency clears" + ); + + sub.close().await.ok(); + client.disconnect().await; + proxy.shutdown().await; + }) + .await; + + assert!(result.is_ok(), "latency during snapshot test timed out"); +} diff --git a/link/tests/proxied/rapid_flap.rs b/link/tests/proxied/rapid_flap.rs index d406e1227..1e157a425 100644 --- a/link/tests/proxied/rapid_flap.rs +++ b/link/tests/proxied/rapid_flap.rs @@ -21,7 +21,7 @@ async fn test_rapid_flapping_connection_stabilises_and_resumes() { }; let proxy = TcpDisconnectProxy::start(upstream_server_url()).await; - let (client, connect_count, disconnect_count) = + let (client, _connect_count, disconnect_count) = match create_test_client_with_events_for_base_url(proxy.base_url()) { Ok(v) => v, Err(e) => { diff --git a/link/tests/proxied/subscribe_during_reconnect.rs b/link/tests/proxied/subscribe_during_reconnect.rs new file mode 100644 index 000000000..0da90f6bd --- /dev/null +++ b/link/tests/proxied/subscribe_during_reconnect.rs @@ -0,0 +1,208 @@ +use super::helpers::*; +use crate::common::tcp_proxy::TcpDisconnectProxy; +use kalam_link::SubscriptionConfig; +use std::sync::atomic::Ordering; +use std::time::Duration; +use tokio::time::{sleep, timeout}; + +/// Add a new subscription while the client is actively reconnecting after an +/// outage. The new subscription must eventually be established and deliver its +/// data once the connection stabilises. +#[tokio::test] +async fn test_subscribe_during_reconnect_eventually_delivers() { + let result = timeout(Duration::from_secs(60), async { + let writer = match create_test_client() { + Ok(c) => c, + Err(e) => { + eprintln!("Skipping test (writer client unavailable): {}", e); + return; + }, + }; + + let proxy = TcpDisconnectProxy::start(upstream_server_url()).await; + let (client, _connect_count, disconnect_count) = + match create_test_client_with_events_for_base_url(proxy.base_url()) { + Ok(v) => v, + Err(e) => { + eprintln!("Skipping test (proxy client unavailable): {}", e); + proxy.shutdown().await; + return; + }, + }; + + let suffix = unique_suffix(); + let table_a = format!("default.sub_reconn_a_{}", suffix); + let table_b = format!("default.sub_reconn_b_{}", suffix); + ensure_table(&writer, &table_a).await; + ensure_table(&writer, &table_b).await; + + client.connect().await.expect("initial connect"); + + // Subscribe to table A and confirm it works. + let mut sub_a = client + .subscribe_with_config(SubscriptionConfig::new( + format!("sub-reconn-a-{}", suffix), + format!("SELECT id, value FROM {}", table_a), + )) + .await + .expect("subscribe A"); + + let _ = timeout(TEST_TIMEOUT, sub_a.next()).await; + + writer + .execute_query( + &format!("INSERT INTO {} (id, value) VALUES ('a-pre', 'val')", table_a), + None, + None, + None, + ) + .await + .expect("insert a-pre"); + + let mut a_ids = Vec::::new(); + let mut a_seq = None; + for _ in 0..12 { + if a_ids.iter().any(|id| id == "a-pre") { + break; + } + match timeout(Duration::from_millis(1200), sub_a.next()).await { + Ok(Some(Ok(ev))) => { + collect_ids_and_track_seq(&ev, &mut a_ids, &mut a_seq, None, "sub-reconn A pre"); + }, + _ => {}, + } + } + assert!(a_ids.iter().any(|id| id == "a-pre"), "a-pre should be observed"); + + // ── Drop the connection ───────────────────────────────────────── + let dc_before = disconnect_count.load(Ordering::SeqCst); + proxy.simulate_server_down().await; + + for _ in 0..40 { + if disconnect_count.load(Ordering::SeqCst) > dc_before { + break; + } + sleep(Duration::from_millis(100)).await; + } + + // ── Subscribe to table B WHILE disconnected ───────────────────── + // This should be queued internally and established once connected. + writer + .execute_query( + &format!("INSERT INTO {} (id, value) VALUES ('b-seed', 'val')", table_b), + None, + None, + None, + ) + .await + .expect("insert b-seed"); + + // Bring the proxy back so the client can reconnect. + proxy.simulate_server_up(); + + // Subscribe while reconnect is (potentially) in progress. + let sub_b_future = client.subscribe_with_config(SubscriptionConfig::new( + format!("sub-reconn-b-{}", suffix), + format!("SELECT id, value FROM {}", table_b), + )); + + // Give the reconnect a moment to progress. + let mut sub_b = timeout(Duration::from_secs(15), sub_b_future) + .await + .expect("subscribe B should not hang forever") + .expect("subscribe B should succeed after reconnect"); + + // Wait for the connection to be fully established. + for _ in 0..80 { + if client.is_connected().await { + break; + } + sleep(Duration::from_millis(100)).await; + } + assert!(client.is_connected().await, "client should reconnect"); + + // Insert rows after reconnect. + writer + .execute_query( + &format!("INSERT INTO {} (id, value) VALUES ('a-post', 'val')", table_a), + None, + None, + None, + ) + .await + .expect("insert a-post"); + writer + .execute_query( + &format!("INSERT INTO {} (id, value) VALUES ('b-post', 'val')", table_b), + None, + None, + None, + ) + .await + .expect("insert b-post"); + + // Verify table A subscription resumed. + let mut a_post_ids = Vec::::new(); + let mut a_post_seq = None; + for _ in 0..30 { + if a_post_ids.iter().any(|id| id == "a-post") { + break; + } + match timeout(Duration::from_millis(2000), sub_a.next()).await { + Ok(Some(Ok(ev))) => { + collect_ids_and_track_seq( + &ev, + &mut a_post_ids, + &mut a_post_seq, + None, + "sub-reconn A post", + ); + }, + _ => {}, + } + } + assert!( + a_post_ids.iter().any(|id| id == "a-post"), + "table A subscription should resume and deliver a-post" + ); + + // Verify table B subscription delivers both the seed and post row. + let mut b_ids = Vec::::new(); + let mut b_seq = None; + for _ in 0..30 { + if b_ids.iter().any(|id| id == "b-seed") + && b_ids.iter().any(|id| id == "b-post") + { + break; + } + match timeout(Duration::from_millis(2000), sub_b.next()).await { + Ok(Some(Ok(ev))) => { + collect_ids_and_track_seq( + &ev, + &mut b_ids, + &mut b_seq, + None, + "sub-reconn B", + ); + }, + _ => {}, + } + } + assert!( + b_ids.iter().any(|id| id == "b-seed"), + "table B subscription should deliver b-seed" + ); + assert!( + b_ids.iter().any(|id| id == "b-post"), + "table B subscription should deliver b-post" + ); + + sub_a.close().await.ok(); + sub_b.close().await.ok(); + client.disconnect().await; + proxy.shutdown().await; + }) + .await; + + assert!(result.is_ok(), "subscribe during reconnect test timed out"); +} diff --git a/link/tests/proxied/unsubscribe_during_outage.rs b/link/tests/proxied/unsubscribe_during_outage.rs new file mode 100644 index 000000000..5a5673ff3 --- /dev/null +++ b/link/tests/proxied/unsubscribe_during_outage.rs @@ -0,0 +1,230 @@ +use super::helpers::*; +use crate::common::tcp_proxy::TcpDisconnectProxy; +use kalam_link::SubscriptionConfig; +use std::sync::atomic::Ordering; +use std::time::Duration; +use tokio::time::{sleep, timeout}; + +/// Close (unsubscribe from) a subscription while the client is disconnected. +/// On reconnect the shared connection must NOT re-subscribe the dropped query, +/// but must still re-subscribe any remaining active ones. +#[tokio::test] +async fn test_unsubscribe_during_outage_prevents_resubscribe() { + let result = timeout(Duration::from_secs(60), async { + let writer = match create_test_client() { + Ok(c) => c, + Err(e) => { + eprintln!("Skipping test (writer client unavailable): {}", e); + return; + }, + }; + + let proxy = TcpDisconnectProxy::start(upstream_server_url()).await; + let (client, connect_count, disconnect_count) = + match create_test_client_with_events_for_base_url(proxy.base_url()) { + Ok(v) => v, + Err(e) => { + eprintln!("Skipping test (proxy client unavailable): {}", e); + proxy.shutdown().await; + return; + }, + }; + + let suffix = unique_suffix(); + let table_keep = format!("default.unsub_keep_{}", suffix); + let table_drop = format!("default.unsub_drop_{}", suffix); + ensure_table(&writer, &table_keep).await; + ensure_table(&writer, &table_drop).await; + + client.connect().await.expect("connect through proxy"); + + let mut sub_keep = client + .subscribe_with_config(SubscriptionConfig::new( + format!("unsub-keep-{}", suffix), + format!("SELECT id, value FROM {}", table_keep), + )) + .await + .expect("subscribe keep"); + + let mut sub_drop = client + .subscribe_with_config(SubscriptionConfig::new( + format!("unsub-drop-{}", suffix), + format!("SELECT id, value FROM {}", table_drop), + )) + .await + .expect("subscribe drop"); + + // Consume initial events from both. + let _ = timeout(TEST_TIMEOUT, sub_keep.next()).await; + let _ = timeout(TEST_TIMEOUT, sub_drop.next()).await; + + // Insert pre-outage rows. + writer + .execute_query( + &format!("INSERT INTO {} (id, value) VALUES ('keep-pre', 'v')", table_keep), + None, + None, + None, + ) + .await + .expect("insert keep-pre"); + writer + .execute_query( + &format!("INSERT INTO {} (id, value) VALUES ('drop-pre', 'v')", table_drop), + None, + None, + None, + ) + .await + .expect("insert drop-pre"); + + // Observe pre-outage rows. + let mut keep_ids = Vec::::new(); + let mut keep_seq = None; + for _ in 0..12 { + if keep_ids.iter().any(|id| id == "keep-pre") { + break; + } + match timeout(Duration::from_millis(1200), sub_keep.next()).await { + Ok(Some(Ok(ev))) => { + collect_ids_and_track_seq( + &ev, + &mut keep_ids, + &mut keep_seq, + None, + "unsub keep pre", + ); + }, + _ => {}, + } + } + assert!(keep_ids.iter().any(|id| id == "keep-pre")); + + let mut drop_ids = Vec::::new(); + let mut drop_seq = None; + for _ in 0..12 { + if drop_ids.iter().any(|id| id == "drop-pre") { + break; + } + match timeout(Duration::from_millis(1200), sub_drop.next()).await { + Ok(Some(Ok(ev))) => { + collect_ids_and_track_seq( + &ev, + &mut drop_ids, + &mut drop_seq, + None, + "unsub drop pre", + ); + }, + _ => {}, + } + } + assert!(drop_ids.iter().any(|id| id == "drop-pre")); + + // ── Take the proxy down ───────────────────────────────────────── + let dc_before = disconnect_count.load(Ordering::SeqCst); + proxy.simulate_server_down().await; + + for _ in 0..40 { + if disconnect_count.load(Ordering::SeqCst) > dc_before { + break; + } + sleep(Duration::from_millis(100)).await; + } + + // ── Drop sub_drop WHILE disconnected ──────────────────────────── + sub_drop.close().await.ok(); + + // Insert gap rows into both tables while disconnected. + writer + .execute_query( + &format!("INSERT INTO {} (id, value) VALUES ('keep-gap', 'g')", table_keep), + None, + None, + None, + ) + .await + .expect("insert keep-gap"); + writer + .execute_query( + &format!("INSERT INTO {} (id, value) VALUES ('drop-gap', 'g')", table_drop), + None, + None, + None, + ) + .await + .expect("insert drop-gap (should NOT be delivered)"); + + // ── Bring the proxy back ──────────────────────────────────────── + let expected_connects = connect_count.load(Ordering::SeqCst) + 1; + proxy.simulate_server_up(); + + for _ in 0..100 { + if connect_count.load(Ordering::SeqCst) >= expected_connects + && client.is_connected().await + { + break; + } + sleep(Duration::from_millis(100)).await; + } + assert!(client.is_connected().await, "client should reconnect"); + + // Verify the kept subscription resumes and delivers the gap row. + writer + .execute_query( + &format!("INSERT INTO {} (id, value) VALUES ('keep-post', 'p')", table_keep), + None, + None, + None, + ) + .await + .expect("insert keep-post"); + + let mut keep_resumed = Vec::::new(); + let mut keep_resumed_seq = None; + for _ in 0..30 { + if keep_resumed.iter().any(|id| id == "keep-gap") + && keep_resumed.iter().any(|id| id == "keep-post") + { + break; + } + match timeout(Duration::from_millis(2000), sub_keep.next()).await { + Ok(Some(Ok(ev))) => { + collect_ids_and_track_seq( + &ev, + &mut keep_resumed, + &mut keep_resumed_seq, + None, + "unsub keep resumed", + ); + }, + _ => {}, + } + } + assert!( + keep_resumed.iter().any(|id| id == "keep-gap"), + "kept subscription should deliver gap row after reconnect" + ); + assert!( + keep_resumed.iter().any(|id| id == "keep-post"), + "kept subscription should deliver post-reconnect row" + ); + + // Verify the dropped subscription is NOT listed as active. + let subs = client.subscriptions().await; + let drop_sub_active = subs.iter().any(|entry| { + entry.query == format!("SELECT id, value FROM {}", table_drop) + }); + assert!( + !drop_sub_active, + "dropped subscription must NOT be re-subscribed on reconnect" + ); + + sub_keep.close().await.ok(); + client.disconnect().await; + proxy.shutdown().await; + }) + .await; + + assert!(result.is_ok(), "unsubscribe during outage test timed out"); +}