diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 567e33fd..0892f41a 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -30,6 +30,24 @@ steps: STACK_VERSION: "9.3.0" command: ".buildkite/run-es-tests.sh" + - group: "KB Functional Tests" + steps: + - label: ":kibana: KB functional tests — Node {{matrix.node}}" + key: "kb-functional" + matrix: + setup: + node: + - "22" + - "24" + agents: + provider: gcp + image: family/kibana-ubuntu-2404 + machineType: n2-standard-4 + env: + NODE_VERSION: "{{matrix.node}}" + STACK_VERSION: "9.3.0" + command: ".buildkite/run-kb-tests.sh" + - group: "Cloud Smoke Tests" steps: - label: ":cloud: Cloud smoke tests — Node {{matrix.node}}" diff --git a/.buildkite/run-kb-tests-runner.sh b/.buildkite/run-kb-tests-runner.sh new file mode 100755 index 00000000..fe25332a --- /dev/null +++ b/.buildkite/run-kb-tests-runner.sh @@ -0,0 +1,132 @@ +#!/usr/bin/env bash +# Copyright Elasticsearch B.V. and contributors +# SPDX-License-Identifier: Apache-2.0 +# +# Runs INSIDE the test-runner container on the same Docker network as ES/Kibana. +# Prefers Docker DNS aliases (elasticsearch / kibana) but falls back to the +# container IPs passed via ES_IP / KB_IP if the embedded DNS server is +# unavailable (known issue with some rootless/userns Docker configurations). + +set -euo pipefail + +ES_PASSWORD="${ES_PASSWORD:-changeme}" + +echo "--- Installing curl and jq" +apt-get update -qq && apt-get install -y -q --no-install-recommends curl jq + +# Prefer Docker DNS aliases; fall back to container IPs if DNS is unavailable. +ES_HOST="elasticsearch" +KB_HOST="kibana" + +if ! getent hosts elasticsearch > /dev/null 2>&1; then + echo "DNS for 'elasticsearch' unavailable; falling back to ES_IP=${ES_IP:-}" + ES_HOST="${ES_IP:-elasticsearch}" +fi +if ! getent hosts kibana > /dev/null 2>&1; then + echo "DNS for 'kibana' unavailable; falling back to KB_IP=${KB_IP:-}" + KB_HOST="${KB_IP:-kibana}" +fi + +echo "ES_HOST=${ES_HOST} KB_HOST=${KB_HOST}" + +# ── Wait for Elasticsearch ─────────────────────────────────────────────────── +echo "--- Waiting for Elasticsearch to be healthy" +RETRIES=0 +MAX_RETRIES=180 +until curl -sf -u "elastic:${ES_PASSWORD}" "http://${ES_HOST}:9200/_cluster/health" > /dev/null 2>&1; do + RETRIES=$((RETRIES + 1)) + if [ "$RETRIES" -ge "$MAX_RETRIES" ]; then + echo "Elasticsearch did not become healthy after $((MAX_RETRIES * 2))s" + curl -s -u "elastic:${ES_PASSWORD}" "http://${ES_HOST}:9200/_cluster/health" 2>&1 || true + exit 1 + fi + if [ $((RETRIES % 15)) -eq 0 ]; then + echo " still waiting for Elasticsearch... (${RETRIES}/${MAX_RETRIES})" + fi + sleep 2 +done +echo "Elasticsearch cluster is up" + +# The cluster can report healthy before the .security index is fully bootstrapped. +# Kibana's alerting/connectors plugins depend on ES API keys (encryptedSavedObjects), +# so we must confirm the security index is ready. +# Technique borrowed from Kibana's own kbn-es tooling (wait_for_security_index.ts). +echo "--- Waiting for Elasticsearch security index to be ready" +RETRIES=0 +MAX_RETRIES=60 +until curl -sf -u "elastic:${ES_PASSWORD}" \ + -X POST "http://${ES_HOST}:9200/_security/api_key" \ + -H "Content-Type: application/json" \ + -d '{"name":"healthcheck","expiration":"1m"}' > /dev/null 2>&1; do + RETRIES=$((RETRIES + 1)) + if [ "$RETRIES" -ge "$MAX_RETRIES" ]; then + echo "Elasticsearch security index did not become ready in time" + exit 1 + fi + sleep 2 +done +echo "Elasticsearch is ready" + +echo "--- Waiting for Kibana to be healthy" +RETRIES=0 +MAX_RETRIES=90 +until curl -sf -u "elastic:${ES_PASSWORD}" "http://${KB_HOST}:5601/api/status" \ + | jq -e '.status.overall.level == "available"' > /dev/null 2>&1; do + RETRIES=$((RETRIES + 1)) + if [ "$RETRIES" -ge "$MAX_RETRIES" ]; then + echo "Kibana did not become healthy in time" + echo "Last Kibana status:" + curl -sf -u "elastic:${ES_PASSWORD}" "http://${KB_HOST}:5601/api/status" 2>&1 || true + exit 1 + fi + sleep 3 +done +echo "Kibana core is ready" + +# Poll /api/status for plugin-level readiness rather than calling the actions +# endpoint directly (the actions HTTP context returns 500 briefly after +# Kibana's overall "available", and Fleet degradation is isolated to +# plugins.fleet and does not affect plugins.actions or plugins.alerting). +echo "--- Waiting for actions + alerting plugins to be available" +RETRIES=0 +MAX_RETRIES=60 +until curl -sf -u "elastic:${ES_PASSWORD}" "http://${KB_HOST}:5601/api/status" \ + | jq -e ' + (.status.plugins.actions.level // "") == "available" and + (.status.plugins.alerting.level // "") == "available" + ' > /dev/null 2>&1; do + RETRIES=$((RETRIES + 1)) + if [ "$RETRIES" -ge "$MAX_RETRIES" ]; then + echo "Actions/alerting plugins did not reach 'available' in time" + echo "Last plugin statuses:" + curl -sf -u "elastic:${ES_PASSWORD}" "http://${KB_HOST}:5601/api/status" \ + | jq '.status.plugins | with_entries(select(.value.level != "available"))' 2>&1 || true + exit 1 + fi + if [ $((RETRIES % 10)) -eq 0 ]; then + echo " still waiting... (${RETRIES}/${MAX_RETRIES})" + fi + sleep 2 +done +echo "Kibana is ready" + +echo "--- Generating CLI config file" +cat > /tmp/elastic-rc.yml <&1 | tail -50 || true + echo "--- Kibana logs (last 50 lines)" + docker logs "$KB_CONTAINER_NAME" 2>&1 | tail -50 || true + echo "--- Cleaning up" + docker rm -f "$TEST_RUNNER_NAME" 2>/dev/null || true + docker rm -f "$KB_CONTAINER_NAME" 2>/dev/null || true + docker rm -f "$ES_CONTAINER_NAME" 2>/dev/null || true + docker network rm "$NETWORK_NAME" 2>/dev/null || true +} +trap cleanup EXIT + +# Use fixed dummy values so the CLI config can reference them without secrets management. +ES_PASSWORD="changeme" +KIBANA_ENCRYPTION_KEY="xP9mfMqnRrNHmSmzPoBtLQvLFzYdHxKj" # gitleaks:allow + +ES_IMAGE="docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}" +KB_IMAGE="docker.elastic.co/kibana/kibana:${STACK_VERSION}" + +# ── Docker network ─────────────────────────────────────────────────────────── +echo "--- Creating Docker network" +docker network create "$NETWORK_NAME" 2>/dev/null || true + +# ── Elasticsearch ──────────────────────────────────────────────────────────── +# Start ES as early as possible. It needs ~1-2 minutes to bootstrap the +# security index. Kibana will not start until after the build so ES has +# plenty of time to be fully ready before Kibana connects. + +echo "--- Loading Elasticsearch image" +ES_CACHE_DIR="${ES_CACHE_DIR:-}" +if [[ -n "$ES_CACHE_DIR" ]] && compgen -G "$ES_CACHE_DIR/elasticsearch-$STACK_VERSION*.tar.gz" > /dev/null 2>&1; then + echo " Loading from agent cache: $ES_CACHE_DIR" + docker load < "$(ls "$ES_CACHE_DIR/elasticsearch-$STACK_VERSION"*.tar.gz | head -1)" +else + docker pull "$ES_IMAGE" +fi + +echo "--- Starting Elasticsearch ${STACK_VERSION} (background)" +docker run \ + --name "$ES_CONTAINER_NAME" \ + --network "$NETWORK_NAME" \ + --network-alias elasticsearch \ + --env "discovery.type=single-node" \ + --env "xpack.license.self_generated.type=trial" \ + --env "action.destructive_requires_name=false" \ + --env "ELASTIC_PASSWORD=${ES_PASSWORD}" \ + --env "xpack.security.http.ssl.enabled=false" \ + --env "xpack.security.transport.ssl.enabled=false" \ + --env "ES_JAVA_OPTS=-Xms512m -Xmx512m" \ + --detach \ + --rm \ + "$ES_IMAGE" + +# Pull Kibana and the test-runner images while ES boots and the CLI builds. +echo "--- Pulling Kibana image (background)" +docker pull "$KB_IMAGE" & +KB_PULL_PID=$! + +echo "--- Pulling test-runner image (background)" +docker pull "$NODE_RUNNER_IMAGE" & +NODE_PULL_PID=$! + +# ── Build CLI (concurrent with ES startup + image pulls) ──────────────────── + +echo "--- Setting up Node.js ${NODE_VERSION}" +export NVM_DIR="${NVM_DIR:-$HOME/.nvm}" +if [ ! -s "$NVM_DIR/nvm.sh" ]; then + echo "nvm not found, installing..." + mkdir -p "$NVM_DIR" + NVM_VERSION=$(curl -s https://api.github.com/repos/nvm-sh/nvm/releases/latest | jq -r '.tag_name // "v0.39.7"') + echo "Installing nvm ${NVM_VERSION}" + curl -o- "https://raw.githubusercontent.com/nvm-sh/nvm/${NVM_VERSION}/install.sh" | bash +fi +# shellcheck source=/dev/null +. "$NVM_DIR/nvm.sh" +nvm install "$NODE_VERSION" +nvm use "$NODE_VERSION" + +echo "--- Installing jq 1.7.1" +JQ_VERSION="1.7.1" +if ! jq --version 2>/dev/null | grep -q "$JQ_VERSION"; then + mkdir -p "$HOME/.local/bin" + curl -sfL "https://github.com/jqlang/jq/releases/download/jq-${JQ_VERSION}/jq-linux-amd64" -o "$HOME/.local/bin/jq" + chmod +x "$HOME/.local/bin/jq" + export PATH="$HOME/.local/bin:$PATH" +fi +echo "Using jq $(jq --version)" + +echo "--- Installing dependencies" +npm ci + +export NODE_OPTIONS="${NODE_OPTIONS:-} --max-old-space-size=4096" + +echo "--- Building CLI" +npm run build + +# ── Configure kibana_system user (after build, ES has had ~3 min to boot) ─── +# Kibana 9.x forbids using the elastic superuser as ELASTICSEARCH_USERNAME. +# We must use kibana_system instead, which requires setting its password via the +# ES API. A one-shot Node.js container on the same network handles this without +# needing the host to reach ES directly. + +echo "--- Waiting for node runner image pull to finish" +wait "$NODE_PULL_PID" + +echo "--- Configuring kibana_system user" +docker run \ + --rm \ + --network "$NETWORK_NAME" \ + --volume "$(pwd):/workspace:ro" \ + --env "ES_PASSWORD=${ES_PASSWORD}" \ + "$NODE_RUNNER_IMAGE" \ + node /workspace/.buildkite/setup-kibana.cjs + +# ── Start Kibana ───────────────────────────────────────────────────────────── + +echo "--- Waiting for Kibana image pull to finish" +wait "$KB_PULL_PID" + +echo "--- Starting Kibana ${STACK_VERSION}" +# Intentionally no --rm so crash logs are always available in cleanup. +docker run \ + --name "$KB_CONTAINER_NAME" \ + --network "$NETWORK_NAME" \ + --network-alias kibana \ + --env "ELASTICSEARCH_HOSTS=http://elasticsearch:9200" \ + --env "ELASTICSEARCH_USERNAME=kibana_system" \ + --env "ELASTICSEARCH_PASSWORD=${ES_PASSWORD}" \ + --env "XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=${KIBANA_ENCRYPTION_KEY}" \ + --env "XPACK_REPORTING_ENCRYPTIONKEY=${KIBANA_ENCRYPTION_KEY}" \ + --env "XPACK_SECURITY_ENCRYPTIONKEY=${KIBANA_ENCRYPTION_KEY}" \ + --detach \ + "$KB_IMAGE" + +# Fetch container IPs immediately after starting — Docker assigns them before +# the main process runs. We pass these to the test runner as a fallback in case +# the embedded DNS server (127.0.0.11) is unavailable on this agent. +ES_IP=$(docker inspect "$ES_CONTAINER_NAME" \ + --format="{{(index .NetworkSettings.Networks \"$NETWORK_NAME\").IPAddress}}") +KB_IP=$(docker inspect "$KB_CONTAINER_NAME" \ + --format="{{(index .NetworkSettings.Networks \"$NETWORK_NAME\").IPAddress}}") +echo "Container IPs — ES: ${ES_IP}, Kibana: ${KB_IP}" + +# ── Run health checks and tests inside the Docker network ─────────────────── + +echo "--- Running tests inside Docker network" +docker run \ + --name "$TEST_RUNNER_NAME" \ + --network "$NETWORK_NAME" \ + --rm \ + --volume "$(pwd):/workspace" \ + --workdir /workspace \ + --env "ES_PASSWORD=${ES_PASSWORD}" \ + --env "ES_IP=${ES_IP}" \ + --env "KB_IP=${KB_IP}" \ + "$NODE_RUNNER_IMAGE" \ + bash /workspace/.buildkite/run-kb-tests-runner.sh diff --git a/.buildkite/setup-kibana.cjs b/.buildkite/setup-kibana.cjs new file mode 100644 index 00000000..7b1aadf6 --- /dev/null +++ b/.buildkite/setup-kibana.cjs @@ -0,0 +1,78 @@ +// Copyright Elasticsearch B.V. and contributors +// SPDX-License-Identifier: Apache-2.0 +// +// Runs inside a Node.js container on the same Docker network as Elasticsearch. +// Waits for ES to be fully ready (cluster health + security index), then sets +// the kibana_system password so Kibana can connect as that user. +const http = require('http'); + +const ES_PASSWORD = process.env.ES_PASSWORD || 'changeme'; +const KB_PASSWORD = process.env.KB_PASSWORD || ES_PASSWORD; +const auth = 'Basic ' + Buffer.from(`elastic:${ES_PASSWORD}`).toString('base64'); + +function request(method, path, body) { + return new Promise((resolve, reject) => { + const data = body ? JSON.stringify(body) : null; + const req = http.request( + { + hostname: 'elasticsearch', + port: 9200, + path, + method, + headers: { + Authorization: auth, + ...(data && { 'Content-Type': 'application/json', 'Content-Length': Buffer.byteLength(data) }), + }, + }, + res => { + let raw = ''; + res.on('data', chunk => (raw += chunk)); + res.on('end', () => { + try { resolve({ status: res.statusCode, body: JSON.parse(raw) }); } + catch { resolve({ status: res.statusCode, body: raw }); } + }); + } + ); + req.setTimeout(5000, () => { req.destroy(new Error('request timed out')); }); + req.on('error', reject); + if (data) req.write(data); + req.end(); + }); +} + +function delay(ms) { return new Promise(r => setTimeout(r, ms)); } + +async function retry(fn, label, maxRetries = 180, intervalMs = 2000) { + for (let i = 0; i < maxRetries; i++) { + try { + const ok = await fn(); + if (ok) return; + } catch { /* not ready yet */ } + if (i > 0 && i % 15 === 0) console.log(` still waiting for ${label}... (${i}/${maxRetries})`); + await delay(intervalMs); + } + throw new Error(`${label} did not become ready in time`); +} + +async function main() { + console.log('Waiting for Elasticsearch cluster health...'); + await retry(async () => { + const { body } = await request('GET', '/_cluster/health'); + return ['green', 'yellow'].includes(body.status); + }, 'ES cluster health'); + console.log('ES cluster is up'); + + console.log('Waiting for ES security index...'); + await retry(async () => { + const { status } = await request('POST', '/_security/api_key', { name: 'setup-check', expiration: '1m' }); + return status >= 200 && status < 300; + }, 'ES security index', 60); + console.log('ES security index is ready'); + + console.log('Setting kibana_system password...'); + const { status } = await request('POST', '/_security/user/kibana_system/_password', { password: KB_PASSWORD }); + if (status < 200 || status >= 300) throw new Error(`HTTP ${status}`); + console.log('kibana_system password configured'); +} + +main().catch(e => { console.error('Setup failed:', e.message); process.exit(1); }); diff --git a/.gitleaks.toml b/.gitleaks.toml new file mode 100644 index 00000000..d8877be2 --- /dev/null +++ b/.gitleaks.toml @@ -0,0 +1,11 @@ +[extend] +# Extend the default gitleaks ruleset. +useDefault = true + +[allowlist] +description = "Dummy values used only in CI test scripts — not real secrets" +regexTarget = "line" +regexes = [ + # Dummy Kibana encryption key in run-kb-tests.sh + '''xP9mfMqnRrNHmSmzPoBtLQvLFzYdHxKj''', +] diff --git a/package.json b/package.json index 55d37cb1..ae2de96b 100644 --- a/package.json +++ b/package.json @@ -17,6 +17,7 @@ "codegen:functional": "npx tsx codegen/functional/index.ts --tests-dir ../elasticsearch-clients-tests/tests", "test:functional:es": "bash test/functional/es/run.sh", "test:functional:cloud": "bash test/functional/cloud/smoke.sh", + "test:functional:kb": "bash test/functional/kb/run.sh", "test:license": "license-checker --production", "test:spdx": "./scripts/check-spdx", "generate:notice": "node scripts/generate-notice.mjs", diff --git a/src/kb/apis/alerting.ts b/src/kb/apis/alerting.ts index 20f05ec9..82aeff8e 100644 --- a/src/kb/apis/alerting.ts +++ b/src/kb/apis/alerting.ts @@ -54,7 +54,7 @@ export const alertingApis: KbApiDefinition[] = [ { name: "schedule", type: "object", description: "The check interval, which specifies how frequently the rule conditions are checked.", required: true }, { name: "tags", type: "array", description: "The tags for the rule." }, { name: "throttle", type: "string", description: "Use the `throttle` property in the action `frequency` object instead. The throttle interval, which defines how often an alert generates repeated actions. NOTE: You cannot specify the throttle interval at both the rule and action level. If you set it at the rule level then update the rule in Kibana, it is automatically changed to use action-specific values." }, - { name: "params", type: "string", description: "The parameters for the rule." }, + { name: "params", type: "object", description: "The parameters for the rule." }, ], }, { diff --git a/src/kb/apis/connectors.ts b/src/kb/apis/connectors.ts index 0d082fe9..7d5a7dbe 100644 --- a/src/kb/apis/connectors.ts +++ b/src/kb/apis/connectors.ts @@ -68,8 +68,8 @@ export const connectorsApis: KbApiDefinition[] = [ bodyParams: [ { name: "connector_type_id", type: "string", description: "The type of connector.", required: true }, { name: "name", type: "string", description: "The display name for the connector.", required: true }, - { name: "config", cliFlag: "kb-config", type: "string", description: "The connector configuration details." }, - { name: "secrets", type: "string", description: "" }, + { name: "config", cliFlag: "kb-config", type: "object", description: "The connector configuration details." }, + { name: "secrets", type: "object", description: "" }, ], }, { @@ -83,8 +83,8 @@ export const connectorsApis: KbApiDefinition[] = [ ], bodyParams: [ { name: "name", type: "string", description: "The display name for the connector.", required: true }, - { name: "config", cliFlag: "kb-config", type: "string", description: "The connector configuration details." }, - { name: "secrets", type: "string", description: "" }, + { name: "config", cliFlag: "kb-config", type: "object", description: "The connector configuration details." }, + { name: "secrets", type: "object", description: "" }, ], }, { @@ -97,7 +97,7 @@ export const connectorsApis: KbApiDefinition[] = [ { name: "id", description: "An identifier for the connector.", required: true }, ], bodyParams: [ - { name: "params", type: "string", description: "", required: true }, + { name: "params", type: "object", description: "", required: true }, ], }, { diff --git a/src/kb/apis/saved-objects.ts b/src/kb/apis/saved-objects.ts index b322ac91..09a92dc2 100644 --- a/src/kb/apis/saved-objects.ts +++ b/src/kb/apis/saved-objects.ts @@ -26,6 +26,7 @@ export const savedObjectsApis: KbApiDefinition[] = [ { name: "search", type: "string", description: "Search for documents to export using the Elasticsearch Simple Query String syntax." }, { name: "type", type: "string", description: "The saved object types to include in the export. Use `*` to export all the types. Valid options depend on enabled plugins, but may include `visualization`, `dashboard`, `search`, `index-pattern`, `tag`, `config`, `config-global`, `lens`, `map`, `event-annotation-group`, `query`, `url`, `action`, `alert`, `alerting_rule_template`, `apm-indices`, `cases-user-actions`, `cases`, `cases-comments`, `infrastructure-monitoring-log-view`, `ml-trained-model`, `osquery-saved-query`, `osquery-pack`, `osquery-pack-asset`." }, ], + responseType: "ndjson", }, { name: "post-saved-objects-import", @@ -38,5 +39,25 @@ export const savedObjectsApis: KbApiDefinition[] = [ { name: "createNewCopies", type: "boolean", description: "Creates copies of saved objects, regenerates each object ID, and resets the origin. When used, potential conflict errors are avoided. NOTE: This option cannot be used with the `overwrite` and `compatibilityMode` options." }, { name: "compatibilityMode", type: "boolean", description: "Applies various adjustments to the saved objects that are being imported to maintain compatibility between different Kibana versions. Use this option only if you encounter issues with imported saved objects. NOTE: This option cannot be used with the `createNewCopies` option." }, ], + bodyParams: [ + { name: "file", type: "string", description: "A file exported using the export API. Changing the contents of the exported file in any way before importing it can cause errors, crashes or data loss. NOTE: The `savedObjects.maxImportExportSize` configuration setting limits the number of saved objects which may be included in this file. Similarly, the `savedObjects.maxImportPayloadBytes` setting limits the overall size of the file that can be imported.", required: true }, + ], + requestType: "multipart", + }, + { + name: "post-saved-objects-resolve-import-errors", + namespace: "saved-objects", + description: "Resolve import errors", + method: "POST", + path: "/api/saved_objects/_resolve_import_errors", + queryParams: [ + { name: "createNewCopies", type: "boolean", description: "Creates copies of saved objects, regenerates each object ID, and resets the origin." }, + { name: "compatibilityMode", type: "boolean", description: "Applies adjustments to maintain compatibility between different Kibana versions." }, + ], + bodyParams: [ + { name: "file", type: "string", description: "", required: true }, + { name: "retries", type: "string", description: "", required: true }, + ], + requestType: "multipart", }, ] diff --git a/src/kb/request-builder.ts b/src/kb/request-builder.ts index 4871208d..8de8b18a 100644 --- a/src/kb/request-builder.ts +++ b/src/kb/request-builder.ts @@ -27,11 +27,17 @@ export function buildKibanaRequestParams ( const path = interpolatePath(def, input) const querystring = buildQuerystring(def, input) - const body = collectBody(def, input) const params: KibanaRequestParams = { method: def.method, path } if (Object.keys(querystring).length > 0) params.querystring = querystring - if (body !== undefined) params.body = body + + if (def.requestType === 'multipart') { + const fields = collectMultipartFields(def, input) + if (fields != null) params.multipartFields = fields + } else { + const body = collectBody(def, input) + if (body !== undefined) params.body = body + } return params } @@ -72,6 +78,24 @@ function buildQuerystring ( return qs } +/** + * Collects multipart form fields from body params. + * Each body param value is treated as a string (file path or literal value). + * Returns `undefined` when no fields are present. + */ +function collectMultipartFields ( + def: KbApiDefinition, + input: Record +): Record | undefined { + const fields: Record = {} + for (const param of def.bodyParams ?? []) { + const key = param.cliFlag ?? param.name + const value = input[key] + if (value !== undefined) fields[param.name] = String(value) + } + return Object.keys(fields).length === 0 ? undefined : fields +} + /** * Collects request body fields from body params. * Returns `undefined` when no body fields are present. diff --git a/src/kb/types.ts b/src/kb/types.ts index f5fbd6ea..731774e8 100644 --- a/src/kb/types.ts +++ b/src/kb/types.ts @@ -70,6 +70,10 @@ export interface KbApiDefinition { pathParams?: KbPathParam[] queryParams?: KbQueryParam[] bodyParams?: KbBodyParam[] + /** When 'multipart', the request body must be sent as multipart/form-data. */ + requestType?: 'multipart' + /** When 'ndjson', the success response is newline-delimited JSON (parsed into an array). */ + responseType?: 'ndjson' } const VALID_NAME = /^[a-z0-9][a-z0-9-]*$/ diff --git a/src/lib/kibana-client.ts b/src/lib/kibana-client.ts index 015ad654..113e72d9 100644 --- a/src/lib/kibana-client.ts +++ b/src/lib/kibana-client.ts @@ -3,6 +3,8 @@ * SPDX-License-Identifier: Apache-2.0 */ +import fs from 'node:fs' +import path from 'node:path' import { getResolvedConfig } from '../config/store.ts' import { isLoopbackUrl } from './is-loopback-host.ts' @@ -16,6 +18,8 @@ export interface KibanaRequestParams { path: string querystring?: Record body?: unknown + /** When set, the request is sent as multipart/form-data. Keys map to form field names; string values that resolve to an existing file path are sent as file uploads. */ + multipartFields?: Record } /** @@ -31,12 +35,14 @@ export interface KibanaRequestParams { */ export class KibanaClient { readonly baseUrl: string - private readonly authHeader: string + private readonly authHeader: string | undefined private _fetch: typeof fetch = globalThis.fetch - constructor (baseUrl: string, auth: { api_key: string } | { username: string; password: string }) { + constructor (baseUrl: string, auth?: { api_key: string } | { username: string; password: string }) { this.baseUrl = baseUrl.replace(/\/+$/, '') - if ('api_key' in auth) { + if (auth == null) { + this.authHeader = undefined + } else if ('api_key' in auth) { this.authHeader = `ApiKey ${auth.api_key}` } else { const encoded = Buffer.from(`${auth.username}:${auth.password}`).toString('base64') @@ -64,9 +70,10 @@ export class KibanaClient { const method = params.method.toUpperCase() const headers: Record = { - 'Authorization': this.authHeader, 'Accept': 'application/json', - 'Content-Type': 'application/json', + } + if (this.authHeader != null) { + headers['Authorization'] = this.authHeader } // Kibana requires kbn-xsrf for all state-mutating requests to protect against CSRF @@ -76,7 +83,21 @@ export class KibanaClient { const init: RequestInit = { method, headers, redirect: 'error' } - if (params.body !== undefined) { + if (params.multipartFields != null) { + // Send as multipart/form-data; do NOT set Content-Type manually (fetch sets it with the boundary) + const form = new FormData() + for (const [field, value] of Object.entries(params.multipartFields)) { + const resolved = path.resolve(value) + if (fs.existsSync(resolved)) { + const blob = new Blob([fs.readFileSync(resolved)], { type: 'application/octet-stream' }) + form.append(field, blob, path.basename(resolved)) + } else { + form.append(field, value) + } + } + init.body = form + } else if (params.body !== undefined) { + headers['Content-Type'] = 'application/json' init.body = JSON.stringify(params.body) } @@ -88,7 +109,15 @@ export class KibanaClient { } const text = await response.text() - return text.length > 0 ? JSON.parse(text) : {} + if (text.length === 0) return {} + + // application/x-ndjson: parse each non-empty line as a JSON object + const contentType = response.headers.get('content-type') ?? '' + if (contentType.includes('ndjson')) { + return text.split('\n').filter((l) => l.trim().length > 0).map((l) => JSON.parse(l)) + } + + return JSON.parse(text) } /** @@ -123,19 +152,15 @@ export function getKibanaClient (): KibanaClient { } const { url, auth } = kb - const authRecord = auth as Record - - let typedAuth: { api_key: string } | { username: string; password: string } - if (typeof authRecord['api_key'] === 'string') { - typedAuth = { api_key: authRecord['api_key'] } - } else if (typeof authRecord['username'] === 'string' && typeof authRecord['password'] === 'string') { - typedAuth = { username: authRecord['username'], password: authRecord['password'] } - } else { - throw new Error( - 'missing_config: Kibana auth requires either api_key or username/password. ' + - 'Check your .elasticrc.yml config file.' - ) + const authRecord = auth as Record | undefined + + let typedAuth: { api_key: string } | { username: string; password: string } | undefined + if (typeof authRecord?.['api_key'] === 'string') { + typedAuth = { api_key: authRecord['api_key'] as string } + } else if (typeof authRecord?.['username'] === 'string' && typeof authRecord?.['password'] === 'string') { + typedAuth = { username: authRecord['username'] as string, password: authRecord['password'] as string } } + // auth is optional — when absent (e.g. security disabled), requests are sent without credentials _client = new KibanaClient(url, typedAuth) return _client diff --git a/test/functional/kb/alerting.sh b/test/functional/kb/alerting.sh new file mode 100755 index 00000000..5a114be2 --- /dev/null +++ b/test/functional/kb/alerting.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash +# Copyright Elasticsearch B.V. and contributors +# SPDX-License-Identifier: Apache-2.0 +# +# Functional tests for the Kibana alerting API namespace. +# Exercises create rule / get rule / find rules / delete rule. + +set -euo pipefail +exec < /dev/null + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" +CLI="node $REPO_ROOT/dist/cli.js" + +RULE_ID="cli-ft-rule" + +# Use the built-in .es-query rule type (available in all Kibana deployments). +RULE_PARAMS='{"searchType":"esqlQuery","esqlQuery":{"esql":"FROM * | LIMIT 1"},"timeWindowSize":5,"timeWindowUnit":"m","threshold":[0],"thresholdComparator":">","size":0,"timeField":"@timestamp"}' + +teardown() { + $CLI stack kb alerting delete-alerting-rule-id --id "$RULE_ID" --json >/dev/null 2>&1 || true +} +trap teardown EXIT + +# ── create ──────────────────────────────────────────────────────────── + +output=$($CLI stack kb alerting post-alerting-rule-id \ + --id "$RULE_ID" \ + --consumer "stackAlerts" \ + --name "CLI FT Rule" \ + --rule-type-id ".es-query" \ + --schedule '{"interval":"1m"}' \ + --params "$RULE_PARAMS" \ + --json 2>/tmp/cli-err.txt) \ + || { echo "FAIL: alerting create — command failed"; cat /tmp/cli-err.txt; exit 1; } +[ "$(echo "$output" | jq -r '.id')" = "$RULE_ID" ] \ + || { echo "FAIL: alerting create — id mismatch"; exit 1; } +[ "$(echo "$output" | jq -r '.name')" = "CLI FT Rule" ] \ + || { echo "FAIL: alerting create — name mismatch"; exit 1; } +[ "$(echo "$output" | jq -r '.rule_type_id')" = ".es-query" ] \ + || { echo "FAIL: alerting create — rule_type_id mismatch"; exit 1; } + +# ── get ─────────────────────────────────────────────────────────────── + +output=$($CLI stack kb alerting get-alerting-rule-id --id "$RULE_ID" --json 2>/dev/null) +[ "$(echo "$output" | jq -r '.id')" = "$RULE_ID" ] \ + || { echo "FAIL: alerting get — id mismatch"; exit 1; } +[ "$(echo "$output" | jq -r '.enabled')" = "true" ] \ + || { echo "FAIL: alerting get — rule should be enabled by default"; exit 1; } + +# ── find ────────────────────────────────────────────────────────────── + +output=$($CLI stack kb alerting get-alerting-rules-find --json 2>/dev/null) +count=$(echo "$output" | jq '[.data[] | select(.id == "'"$RULE_ID"'")] | length') +[ "$count" -eq 1 ] || { echo "FAIL: alerting find — created rule not found"; exit 1; } + +# ── delete ──────────────────────────────────────────────────────────── + +$CLI stack kb alerting delete-alerting-rule-id --id "$RULE_ID" --json >/dev/null 2>/dev/null + +output=$($CLI stack kb alerting get-alerting-rules-find --json 2>/dev/null) +count=$(echo "$output" | jq '[.data[] | select(.id == "'"$RULE_ID"'")] | length') +[ "$count" -eq 0 ] || { echo "FAIL: alerting delete — rule still present after delete"; exit 1; } + +echo "PASS: kb/alerting.sh" diff --git a/test/functional/kb/connectors.sh b/test/functional/kb/connectors.sh new file mode 100755 index 00000000..26c7af3d --- /dev/null +++ b/test/functional/kb/connectors.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash +# Copyright Elasticsearch B.V. and contributors +# SPDX-License-Identifier: Apache-2.0 +# +# Functional tests for the Kibana connectors API namespace. +# Exercises list-types / create / get / list / delete. + +set -euo pipefail +exec < /dev/null + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" +CLI="node $REPO_ROOT/dist/cli.js" + +CONNECTOR_ID="" + +teardown() { + if [ -n "$CONNECTOR_ID" ]; then + $CLI stack kb connectors delete-actions-connector-id --id "$CONNECTOR_ID" --json >/dev/null 2>&1 || true + fi +} +trap teardown EXIT + +# ── list connector types ─────────────────────────────────────────────── + +output=$($CLI stack kb connectors get-actions-connector-types --json 2>/tmp/cli-err.txt) \ + || { echo "FAIL: connectors list-types — command failed"; cat /tmp/cli-err.txt; exit 1; } +count=$(echo "$output" | jq 'length') +[ "$count" -gt 0 ] || { echo "FAIL: connectors list-types — empty list"; exit 1; } + +# The .index connector type ships with all Kibana deployments. +index_type=$(echo "$output" | jq -r '[.[] | select(.id == ".index")] | .[0].id') +[ "$index_type" = ".index" ] \ + || { echo "FAIL: connectors list-types — .index type not found"; exit 1; } + +# ── create ──────────────────────────────────────────────────────────── + +# Use Node.js crypto to generate a UUID (Node is always available in CI). +CONNECTOR_UUID=$(node -e "process.stdout.write(require('crypto').randomUUID())") +CONNECTOR_ID="$CONNECTOR_UUID" + +output=$($CLI stack kb connectors post-actions-connector-id \ + --id "$CONNECTOR_UUID" \ + --connector-type-id ".index" \ + --name "CLI FT Index Connector" \ + --kb-config '{"index":"cli-ft-connector-*"}' \ + --json 2>/tmp/cli-err.txt) \ + || { echo "FAIL: connectors create — command failed"; cat /tmp/cli-err.txt; exit 1; } +[ "$(echo "$output" | jq -r '.id')" = "$CONNECTOR_UUID" ] \ + || { echo "FAIL: connectors create — id mismatch"; exit 1; } +[ "$(echo "$output" | jq -r '.name')" = "CLI FT Index Connector" ] \ + || { echo "FAIL: connectors create — name mismatch"; exit 1; } +[ "$(echo "$output" | jq -r '.connector_type_id')" = ".index" ] \ + || { echo "FAIL: connectors create — connector_type_id mismatch"; exit 1; } + +# ── get ─────────────────────────────────────────────────────────────── + +output=$($CLI stack kb connectors get-actions-connector-id --id "$CONNECTOR_ID" --json 2>/dev/null) +[ "$(echo "$output" | jq -r '.id')" = "$CONNECTOR_ID" ] \ + || { echo "FAIL: connectors get — id mismatch"; exit 1; } +[ "$(echo "$output" | jq -r '.name')" = "CLI FT Index Connector" ] \ + || { echo "FAIL: connectors get — name mismatch"; exit 1; } + +# ── list ────────────────────────────────────────────────────────────── + +output=$($CLI stack kb connectors get-actions-connectors --json 2>/dev/null) +count=$(echo "$output" | jq '[.[] | select(.id == "'"$CONNECTOR_ID"'")] | length') +[ "$count" -eq 1 ] || { echo "FAIL: connectors list — created connector not found"; exit 1; } + +# ── delete ──────────────────────────────────────────────────────────── + +$CLI stack kb connectors delete-actions-connector-id --id "$CONNECTOR_ID" --json >/dev/null 2>/dev/null +CONNECTOR_ID="" + +output=$($CLI stack kb connectors get-actions-connectors --json 2>/dev/null) +count=$(echo "$output" | jq '[.[] | select(.name == "CLI FT Index Connector")] | length') +[ "$count" -eq 0 ] || { echo "FAIL: connectors delete — connector still present after delete"; exit 1; } + +echo "PASS: kb/connectors.sh" diff --git a/test/functional/kb/data_views.sh b/test/functional/kb/data_views.sh new file mode 100755 index 00000000..7950c8b9 --- /dev/null +++ b/test/functional/kb/data_views.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash +# Copyright Elasticsearch B.V. and contributors +# SPDX-License-Identifier: Apache-2.0 +# +# Functional tests for the Kibana data-views API namespace. +# Exercises create / get / list / delete for a single data view. + +set -euo pipefail +exec < /dev/null + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" +CLI="node $REPO_ROOT/dist/cli.js" + +VIEW_ID="" + +teardown() { + if [ -n "$VIEW_ID" ]; then + $CLI stack kb data-views delete-data-view-default --view-id "$VIEW_ID" --json >/dev/null 2>&1 || true + fi +} +trap teardown EXIT + +# ── create ──────────────────────────────────────────────────────────── + +output=$(echo '{"data_view":{"title":"cli-ft-dv-*","name":"cli-ft-dv"}}' \ + | $CLI stack kb data-views create-data-view-defaultw --json 2>/dev/null) +VIEW_ID=$(echo "$output" | jq -r '.data_view.id') +[ -n "$VIEW_ID" ] || { echo "FAIL: data_views create — empty id"; exit 1; } +[ "$(echo "$output" | jq -r '.data_view.name')" = "cli-ft-dv" ] \ + || { echo "FAIL: data_views create — name mismatch"; exit 1; } +[ "$(echo "$output" | jq -r '.data_view.title')" = "cli-ft-dv-*" ] \ + || { echo "FAIL: data_views create — title mismatch"; exit 1; } + +# ── get ─────────────────────────────────────────────────────────────── + +output=$($CLI stack kb data-views get-data-view-default --view-id "$VIEW_ID" --json 2>/dev/null) +[ "$(echo "$output" | jq -r '.data_view.id')" = "$VIEW_ID" ] \ + || { echo "FAIL: data_views get — id mismatch"; exit 1; } +[ "$(echo "$output" | jq -r '.data_view.name')" = "cli-ft-dv" ] \ + || { echo "FAIL: data_views get — name mismatch"; exit 1; } + +# ── list ────────────────────────────────────────────────────────────── + +output=$($CLI stack kb data-views get-all-data-views-default --json 2>/dev/null) +count=$(echo "$output" | jq '[.data_view[] | select(.id == "'"$VIEW_ID"'")] | length') +[ "$count" -eq 1 ] || { echo "FAIL: data_views list — created view not found in list"; exit 1; } + +# ── delete ──────────────────────────────────────────────────────────── + +$CLI stack kb data-views delete-data-view-default --view-id "$VIEW_ID" --json >/dev/null 2>/dev/null +VIEW_ID="" + +output=$($CLI stack kb data-views get-all-data-views-default --json 2>/dev/null) +count=$(echo "$output" | jq '[.data_view[] | select(.name == "cli-ft-dv")] | length') +[ "$count" -eq 0 ] || { echo "FAIL: data_views delete — view still present after delete"; exit 1; } + +echo "PASS: kb/data_views.sh" diff --git a/test/functional/kb/run.sh b/test/functional/kb/run.sh new file mode 100755 index 00000000..c89097a0 --- /dev/null +++ b/test/functional/kb/run.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# Copyright Elasticsearch B.V. and contributors +# SPDX-License-Identifier: Apache-2.0 +# +# Runner for Kibana functional tests. +# Each test file is run in a subshell; failures are collected and reported. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PASSED=0 +FAILED=0 +ERRORS="" + +run_test() { + local name="$1" + local file="$SCRIPT_DIR/$2" + if OUTPUT=$(bash "$file" 2>&1); then + PASSED=$((PASSED + 1)) + echo "PASS: $name" + else + FAILED=$((FAILED + 1)) + ERRORS="$ERRORS\n FAIL: $name" + echo "FAIL: $name" + echo "$OUTPUT" | tail -5 + fi +} + +run_test "data_views" "data_views.sh" +run_test "spaces" "spaces.sh" +run_test "alerting" "alerting.sh" +run_test "connectors" "connectors.sh" +run_test "saved_objects" "saved_objects.sh" + +echo "" +echo "================================" +echo "Results: $PASSED passed, $FAILED failed" +if [ "$FAILED" -gt 0 ]; then + printf "Failures:%b\n" "$ERRORS" + exit 1 +fi +echo "================================" diff --git a/test/functional/kb/saved_objects.sh b/test/functional/kb/saved_objects.sh new file mode 100755 index 00000000..493d6176 --- /dev/null +++ b/test/functional/kb/saved_objects.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +# Copyright Elasticsearch B.V. and contributors +# SPDX-License-Identifier: Apache-2.0 +# +# Functional tests for the Kibana saved-objects API namespace. +# Exercises export (ndjson response) and import (multipart/form-data request) +# using the built-in 'config' saved-object type which is always present. + +set -euo pipefail +exec < /dev/null + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" +CLI="node $REPO_ROOT/dist/cli.js" + +EXPORT_FILE="/tmp/cli-ft-so-export-$$.ndjson" + +teardown() { + rm -f "$EXPORT_FILE" +} +trap teardown EXIT + +# ── export ──────────────────────────────────────────────────────────── +# Export returns application/x-ndjson; the CLI parses it into a JSON array. + +output=$($CLI stack kb saved-objects post-saved-objects-export \ + --type "config" --exclude-export-details true --json 2>/dev/null) + +# Response must be a non-empty JSON array. +arr_type=$(echo "$output" | jq -r 'type') +[ "$arr_type" = "array" ] || { echo "FAIL: saved_objects export — response is not an array (got $arr_type)"; exit 1; } + +count=$(echo "$output" | jq 'length') +[ "$count" -gt 0 ] || { echo "FAIL: saved_objects export — empty array"; exit 1; } + +# Each element must have a type field. +first_type=$(echo "$output" | jq -r '.[0].type') +[ -n "$first_type" ] || { echo "FAIL: saved_objects export — first element missing type field"; exit 1; } + +# ── import ──────────────────────────────────────────────────────────── +# Re-serialise the JSON array back to ndjson (one compact object per line) +# then send as multipart/form-data via --file. + +echo "$output" | jq -c '.[]' > "$EXPORT_FILE" +ndjson_lines=$(wc -l < "$EXPORT_FILE" | tr -d ' ') +[ "$ndjson_lines" -gt 0 ] || { echo "FAIL: saved_objects import — ndjson file is empty"; exit 1; } + +import_result=$($CLI stack kb saved-objects post-saved-objects-import \ + --overwrite true --file "$EXPORT_FILE" --json 2>/dev/null) +success=$(echo "$import_result" | jq -r '.success') +[ "$success" = "true" ] || { echo "FAIL: saved_objects import — success != true (got $success)"; exit 1; } + +success_count=$(echo "$import_result" | jq -r '.successCount') +[ "$success_count" -gt 0 ] || { echo "FAIL: saved_objects import — successCount = 0"; exit 1; } + +echo "PASS: kb/saved_objects.sh" diff --git a/test/functional/kb/spaces.sh b/test/functional/kb/spaces.sh new file mode 100755 index 00000000..d676d24f --- /dev/null +++ b/test/functional/kb/spaces.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash +# Copyright Elasticsearch B.V. and contributors +# SPDX-License-Identifier: Apache-2.0 +# +# Functional tests for the Kibana spaces API namespace. +# Exercises create / get / list / delete for a non-default space. + +set -euo pipefail +exec < /dev/null + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" +CLI="node $REPO_ROOT/dist/cli.js" + +SPACE_ID="cli-ft-space" + +teardown() { + $CLI stack kb spaces delete-spaces-space-id --id "$SPACE_ID" --json >/dev/null 2>&1 || true +} +trap teardown EXIT + +# ── create ──────────────────────────────────────────────────────────── + +output=$($CLI stack kb spaces post-spaces-space \ + --id "$SPACE_ID" --name "CLI FT Space" --description "Created by functional test" \ + --json 2>/dev/null) +[ "$(echo "$output" | jq -r '.id')" = "$SPACE_ID" ] \ + || { echo "FAIL: spaces create — id mismatch"; exit 1; } +[ "$(echo "$output" | jq -r '.name')" = "CLI FT Space" ] \ + || { echo "FAIL: spaces create — name mismatch"; exit 1; } + +# ── get ─────────────────────────────────────────────────────────────── + +output=$($CLI stack kb spaces get-spaces-space-id --id "$SPACE_ID" --json 2>/dev/null) +[ "$(echo "$output" | jq -r '.id')" = "$SPACE_ID" ] \ + || { echo "FAIL: spaces get — id mismatch"; exit 1; } +[ "$(echo "$output" | jq -r '.description')" = "Created by functional test" ] \ + || { echo "FAIL: spaces get — description mismatch"; exit 1; } + +# ── list ────────────────────────────────────────────────────────────── + +# include_authorized_purposes is required by the generated schema even though +# the Kibana docs treat it as optional; pass false to satisfy validation. +output=$($CLI stack kb spaces get-spaces-space --include-authorized-purposes false --json 2>/dev/null) +count=$(echo "$output" | jq '[.[] | select(.id == "'"$SPACE_ID"'")] | length') +[ "$count" -eq 1 ] || { echo "FAIL: spaces list — created space not found"; exit 1; } + +# Default space must always be present. +default_count=$(echo "$output" | jq '[.[] | select(.id == "default")] | length') +[ "$default_count" -eq 1 ] || { echo "FAIL: spaces list — default space missing"; exit 1; } + +# ── delete ──────────────────────────────────────────────────────────── + +$CLI stack kb spaces delete-spaces-space-id --id "$SPACE_ID" --json >/dev/null 2>/dev/null + +output=$($CLI stack kb spaces get-spaces-space --include-authorized-purposes false --json 2>/dev/null) +count=$(echo "$output" | jq '[.[] | select(.id == "'"$SPACE_ID"'")] | length') +[ "$count" -eq 0 ] || { echo "FAIL: spaces delete — space still present after delete"; exit 1; } + +echo "PASS: kb/spaces.sh" diff --git a/test/kb/kibana-client.test.ts b/test/kb/kibana-client.test.ts index 64e734bd..d43f154c 100644 --- a/test/kb/kibana-client.test.ts +++ b/test/kb/kibana-client.test.ts @@ -52,6 +52,16 @@ function makeCapturingFetch (status = 200, body = '{}'): { // Constructor / auth header // --------------------------------------------------------------------------- +describe('KibanaClient — no auth', () => { + it('omits Authorization header when no auth is provided', async () => { + const { fetch, calls } = makeCapturingFetch() + const client = new KibanaClient('https://kb.example.com') + client._testSetFetch(fetch) + await client.request({ method: 'GET', path: '/api/status' }) + assert.equal((calls[0]!.init.headers as Record)['Authorization'], undefined) + }) +}) + describe('KibanaClient — API key auth', () => { it('sets Authorization header to ApiKey ', async () => { const { fetch, calls } = makeCapturingFetch() @@ -222,15 +232,11 @@ describe('getKibanaClient', () => { ) }) - it('throws missing_config when auth is missing api_key and credentials', () => { + it('returns an unauthenticated client when auth block is empty or absent', () => { setResolvedConfig(makeConfig({ kibana: { url: 'https://kb.example.com', auth: {} as never } })) - assert.throws( - () => getKibanaClient(), - (err: Error) => { - assert.ok(err.message.includes('missing_config')) - return true - } - ) + const client = getKibanaClient() + assert.ok(client instanceof KibanaClient) + assert.equal(client.baseUrl, 'https://kb.example.com') }) it('returns a KibanaClient instance configured with api_key auth', () => {