diff --git a/AGENTS.md b/AGENTS.md index 4bb5a61f..7e38b495 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -893,84 +893,59 @@ mock.module("./some-module", () => ({ ### Architecture - -* **api-client.ts split into domain modules under src/lib/api/**: The original monolithic \`src/lib/api-client.ts\` (1,977 lines) was split into 12 focused domain modules under \`src/lib/api/\`: infrastructure.ts (shared helpers, types, raw requests), organizations.ts, projects.ts, teams.ts, repositories.ts, issues.ts, events.ts, traces.ts, logs.ts, seer.ts, trials.ts, users.ts. The original \`api-client.ts\` was converted to a ~100-line barrel re-export file preserving all existing import paths. The \`biome.jsonc\` override for \`noBarrelFile\` already includes \`api-client.ts\`. When adding new API functions, place them in the appropriate domain module under \`src/lib/api/\`, not in the barrel file. + +* **AsyncIterable streaming for SDK blocked by four structural concerns**: AsyncIterable streaming for SDK implemented via AsyncChannel push/pull pattern. \`src/lib/async-channel.ts\` provides a dual-queue channel: producer calls \`push()\`/\`close()\`/\`error()\`, consumer iterates via \`for await...of\`. \`break\` triggers \`onReturn\` callback for cleanup. \`executeWithStream()\` in \`sdk-invoke.ts\` runs the command in background, pipes \`captureObject\` calls to the channel, and returns the channel immediately. Streaming detection: \`hasStreamingFlag()\` checks for \`--refresh\`/\`--follow\`/\`-f\`. \`buildInvoker\` accepts \`meta.streaming\` flag; \`buildRunner\` auto-detects from args. Abort wiring: \`AbortController\` created per stream, signal placed on fake \`process.abortSignal\`, \`channel.onReturn\` calls \`controller.abort()\`. Both \`log/list.ts\` and \`dashboard/view.ts\` check \`this.process?.abortSignal\` alongside SIGINT. Codegen generates callable interface overloads for streaming commands. - -* **Bun compiled binary sourcemap options and size impact**: Binary build (\`script/build.ts\`) uses two steps: (1) \`Bun.build()\` produces \`dist-bin/bin.js\` + \`.map\` with \`sourcemap: "linked"\` and minification. (2) \`Bun.build()\` with \`compile: true\` produces native binary — no sourcemap embedded. Bun's compiled binaries use \`/$bunfs/root/bin.js\` as the virtual path in stack traces. Sourcemap upload must use \`--url-prefix '/$bunfs/root/'\` so Sentry can match frames. The upload runs \`sentry-cli sourcemaps inject dist-bin/\` first (adds debug IDs), then uploads both JS and map. Bun's compile step strips comments (including \`//# debugId=\`), but debug ID matching still works via the injected runtime snippet + URL prefix matching. Size: +0.04 MB gzipped vs +2.30 MB for inline sourcemaps. Without \`SENTRY\_AUTH\_TOKEN\`, upload is skipped gracefully. + +* **Bundle uses esbuild with bun:sqlite polyfill plugin for Node.js compatibility**: \`script/bundle.ts\` uses esbuild to produce \`dist/index.cjs\` from \`src/index.ts\`. A \`bunSqlitePlugin\` replaces \`bun:sqlite\` imports with a polyfill. Build defines \`SENTRY\_CLI\_VERSION\` and \`SENTRY\_CLIENT\_ID\_BUILD\`, externalizes \`node:\*\` builtins. \`sentrySourcemapPlugin\` handles debug ID injection and sourcemap upload. After the main build, writes: (1) \`dist/bin.cjs\` — CLI wrapper with shebang/Node version check/warning suppression, (2) \`dist/index.d.cts\` — type declarations read from pre-built \`src/sdk.generated.d.cts\`. Both \`sdk.generated.\*\` files are gitignored and regenerated via \`generate:sdk\` script chained before \`bundle\` in \`package.json\`. Debug IDs solve sourcemap deduplication between npm bundle and bun compile builds. - -* **Auth token env var override pattern: SENTRY\_AUTH\_TOKEN > SENTRY\_TOKEN > SQLite**: Auth in \`src/lib/db/auth.ts\` follows layered precedence: \`SENTRY\_AUTH\_TOKEN\` > \`SENTRY\_TOKEN\` > SQLite OAuth token. \`getEnvToken()\` trims env vars (empty/whitespace = unset). \`AuthSource\` tracks provenance. \`ENV\_SOURCE\_PREFIX = "env:"\` — use \`.length\` not hardcoded 4. Env tokens bypass refresh/expiry. \`isEnvTokenActive()\` guards auth commands. Logout must NOT clear stored auth when env token active. These functions stay in \`db/auth.ts\` despite not touching DB because they're tightly coupled with token retrieval. + +* **CLI logic extracted from bin.ts into cli.ts for shared entry points**: \`src/cli.ts\` contains the full CLI runner extracted from \`bin.ts\`: \`runCompletion()\` (shell completion fast path), \`runCli()\` (full CLI with middleware — auto-auth, seer trial, unknown command telemetry), and \`startCli()\` (top-level dispatch). All functions are exported, no top-level execution. \`src/bin.ts\` is a thin ~30-line wrapper for bun compile that registers EPIPE/EIO stream error handlers and calls \`startCli()\`. The npm bin wrapper (\`dist/bin.cjs\`) is a ~300-byte generated script that \`require('./index.cjs').\_cli()\`. Both entry points share the same CLI logic via \`cli.ts\`. - -* **CLI telemetry DSN is public write-only — safe to embed in install script**: The CLI's Sentry DSN (\`SENTRY\_CLI\_DSN\` in \`src/lib/constants.ts\`) is a public write-only ingest key already baked into every binary. Safe to hardcode in install scripts. Opt-out: \`SENTRY\_CLI\_NO\_TELEMETRY=1\`. + +* **getConfigDir and getAuthToken read global process.env directly**: \`src/lib/env.ts\` provides a module-level env registry: \`getEnv()\` defaults to \`process.env\`, \`setEnv(env)\` swaps it. Library entry (\`src/index.ts\`) calls \`setEnv({ ...process.env, ...overrides })\` before running commands, restores in \`finally\`. All ~14 files that previously read \`process.env.SENTRY\_\*\` directly now use \`getEnv().SENTRY\_\*\`. Key files ported: \`db/auth.ts\`, \`db/index.ts\`, \`db/schema.ts\`, \`constants.ts\`, \`resolve-target.ts\`, \`telemetry.ts\`, \`formatters/plain-detect.ts\`, \`sentry-url-parser.ts\` (which also WRITES to env), \`logger.ts\`, \`response-cache.ts\`, \`api/infrastructure.ts\`, \`dsn/env.ts\`, \`version-check.ts\`, \`oauth.ts\`. CLI mode never calls \`setEnv()\` so behavior is unchanged. Tests using \`useTestConfigDir()\` continue to work since \`getEnv()\` defaults to \`process.env\`. - -* **cli.sentry.dev is served from gh-pages branch via GitHub Pages**: \`cli.sentry.dev\` is served from gh-pages branch via GitHub Pages. Craft's gh-pages target runs \`git rm -r -f .\` before extracting docs — persist extra files via \`postReleaseCommand\` in \`.craft.yml\`. Install script supports \`--channel nightly\`, downloading from the \`nightly\` release tag directly. version.json is only used by upgrade/version-check flow. + +* **Library API: variadic sentry() function with last-arg options detection**: \`createSentrySDK(options?)\` in \`src/index.ts\` is the sole public API. Returns a typed SDK object with methods for every CLI command plus \`run()\` escape hatch. \`SentryOptions\` in \`src/lib/sdk-types.ts\`: \`token?\`, \`text?\` (run-only), \`cwd?\`, \`url?\` (self-hosted base URL → \`SENTRY\_HOST\`), \`org?\` (default org → \`SENTRY\_ORG\`), \`project?\` (default project → \`SENTRY\_PROJECT\`). Env isolation via \`buildIsolatedEnv(options)\` helper in \`sdk-invoke.ts\` — shared by both \`buildInvoker\` and \`buildRunner\`, maps each option to its env var. Zero-copy \`captureObject\` return, \`OutputError\` → data recovery. Default JSON output via \`SENTRY\_OUTPUT\_FORMAT=json\`. Non-zero exit throws \`SentryError\` with \`.exitCode\` and \`.stderr\`. - -* **Nightly delta upgrade buildNightlyPatchGraph fetches ALL patch tags — O(N) HTTP calls**: Delta upgrade in \`src/lib/delta-upgrade.ts\` supports stable (GitHub Releases) and nightly (GHCR) channels. \`filterAndSortChainTags\` filters \`patch-\*\` tags by version range using \`Bun.semver.order()\`. GHCR uses \`fetchWithRetry\` (10s timeout + 1 retry; blobs 30s) with optional \`signal?: AbortSignal\` combined via \`AbortSignal.any()\`. \`isExternalAbort(error, signal)\` skips retries for external aborts — critical for background prefetch. Patches cached to \`~/.sentry/patch-cache/\` (file-based, 7-day TTY). \`loadCachedChain\` stitches patches for multi-hop offline upgrades. + +* **Library mode telemetry strips all global-polluting Sentry integrations**: When \`initSentry(enabled, { libraryMode: true })\` is called, the Sentry SDK initializes without integrations that pollute the host process. \`LIBRARY\_EXCLUDED\_INTEGRATIONS\` extends the base set with: \`OnUncaughtException\`, \`OnUnhandledRejection\`, \`ProcessSession\` (process listeners), \`Http\`/\`NodeFetch\` (trace header injection), \`FunctionToString\` (wraps \`Function.prototype.toString\`), \`ChildProcess\`/\`NodeContext\`. Also disables \`enableLogs\` and \`sendClientReports\` (both use timers/\`beforeExit\`), and skips \`process.on('beforeExit')\` handler registration. Keeps pure integrations: \`eventFiltersIntegration\`, \`linkedErrorsIntegration\`. Library entry manually calls \`client.flush(3000)\` after command completion (both success and error paths via \`flushTelemetry()\` helper). Only unavoidable global: \`globalThis.\_\_SENTRY\_\_\[SDK\_VERSION]\`. - -* **npm bundle requires Node.js >= 22 due to node:sqlite polyfill**: The npm package (dist/bin.cjs) requires Node.js >= 22 because the bun:sqlite polyfill uses \`node:sqlite\`. A runtime version guard in the esbuild banner catches this early. When writing esbuild banner strings in TS template literals, double-escape: \`\\\\\\\n\` in TS → \`\\\n\` in output → newline at runtime. Single \`\\\n\` produces a literal newline inside a JS string, causing SyntaxError. - - -* **Numeric issue ID resolution returns org:undefined despite API success**: Numeric issue ID resolution in \`resolveNumericIssue()\`: (1) try DSN/env/config for org, (2) if found use \`getIssueInOrg(org, id)\` with region routing, (3) else fall back to unscoped \`getIssue(id)\`, (4) extract org from \`issue.permalink\` via \`parseSentryUrl\` as final fallback. \`parseSentryUrl\` handles path-based (\`/organizations/{org}/...\`) and subdomain-style URLs. \`matchSubdomainOrg()\` filters region subdomains by requiring slug length > 2. Self-hosted uses path-based only. - - -* **Seer trial prompt uses middleware layering in bin.ts error handling chain**: Error recovery middlewares in \`bin.ts\` are layered: \`main() → executeWithAutoAuth() → executeWithSeerTrialPrompt() → runCommand()\`. Seer trial prompts (for \`no\_budget\`/\`not\_enabled\`) caught by inner wrapper; auth errors bubble to outer. Auth retry goes through full chain. Trial API: \`GET /api/0/customers/{org}/\` → \`productTrials\[]\` (prefer \`seerUsers\`, fallback \`seerAutofix\`). Start: \`PUT /api/0/customers/{org}/product-trial/\`. SaaS-only; self-hosted 404s gracefully. \`ai\_disabled\` excluded. \`startSeerTrial\` accepts \`category\` from trial object — don't hardcode. - - -* **SQLite DB functions are synchronous — async signatures are historical artifacts**: All \`src/lib/db/\` functions do synchronous SQLite operations (both \`bun:sqlite\` and the \`node:sqlite\` polyfill's \`DatabaseSync\` are sync). Many functions still have \`async\` signatures — this is a historical artifact from PR #89 which migrated config storage from JSON files (using async \`Bun.file().text()\` / \`Bun.write()\`) to SQLite. The function signatures were preserved to minimize diff size and never cleaned up. These can safely be converted to synchronous. Exceptions that ARE legitimately async: \`clearAuth()\` (cache dir cleanup), \`getCachedDetection()\`/\`getCachedProjectRoot()\`/\`setCachedProjectRoot()\` (stat for mtime), \`refreshToken()\`/\`performTokenRefresh()\` (HTTP calls). + +* **Typed SDK uses direct Command.loader() invocation bypassing Stricli dispatch**: \`createSentrySDK(options?)\` in \`src/index.ts\` builds a typed namespace API (\`sdk.org.list()\`, \`sdk.issue.view()\`) generated by \`script/generate-sdk.ts\`. At runtime, \`src/lib/sdk-invoke.ts\` resolves commands via Stricli route tree, caches \`Command\` objects, and calls \`command.loader()\` directly — bypassing string dispatch and flag parsing. The standalone variadic \`sentry()\` function has been removed. Typed SDK methods are the primary path, with \`sdk.run()\` as an escape hatch for arbitrary CLI strings (interactive commands like \`auth login\`, raw \`api\` passthrough). The codegen auto-discovers ALL commands from the route tree with zero config, using CLI route names as-is (\`org.list\`, \`dashboard.widget.add\`). Return types are derived from \`\_\_jsonSchema\` when present, otherwise \`unknown\`. Positional patterns are derived from introspection placeholder strings. Hidden routes (plural aliases) are skipped. ### Decision - -* **Raw markdown output for non-interactive terminals, rendered for TTY**: Markdown-first output pipeline: custom renderer in \`src/lib/formatters/markdown.ts\` walks \`marked\` tokens to produce ANSI-styled output. Commands build CommonMark using helpers (\`mdKvTable()\`, \`mdRow()\`, \`colorTag()\`, \`escapeMarkdownCell()\`, \`safeCodeSpan()\`) and pass through \`renderMarkdown()\`. \`isPlainOutput()\` precedence: \`SENTRY\_PLAIN\_OUTPUT\` > \`NO\_COLOR\` > \`FORCE\_COLOR\` > \`!isTTY\`. \`--json\` always outputs JSON. Colors defined in \`COLORS\` object in \`colors.ts\`. Tests run non-TTY so assertions match raw CommonMark; use \`stripAnsi()\` helper for rendered-mode assertions. + +* **OutputError propagates via throw instead of process.exit()**: The \`process.exit()\` call in \`command.ts\` (OutputError handler) is replaced with \`throw err\` to support library mode. \`OutputError\` is re-thrown through Stricli via \`exceptionWhileRunningCommand\` in \`app.ts\` (added before the \`AuthError\` check), so Stricli never writes an error message for it. In CLI mode (\`cli.ts\`), OutputError is caught and \`process.exitCode\` is set silently without writing to stderr (data was already rendered). In library mode (\`index.ts\`), the catch block checks if \`capturedResult\` has data (the OutputError's payload was rendered to stdout via \`captureObject\` before the throw) and returns it instead of throwing \`SentryError\`. This eliminates the only \`process.exit()\` outside of \`bin.ts\`. - -* **whoami should be separate from auth status command**: The \`sentry auth whoami\` command should be a dedicated command separate from \`sentry auth status\`. They serve different purposes: \`status\` shows everything about auth state (token, expiry, defaults, org verification), while \`whoami\` just shows user identity (name, email, username, ID) by fetching live from \`/auth/\` endpoint. \`sentry whoami\` should be a top-level alias (like \`sentry issues\` → \`sentry issue list\`). \`whoami\` should support \`--json\` for machine consumption and be lightweight — no credential verification, no defaults listing. + +* **SDK codegen moving to auto-generate all commands from route tree**: \`script/generate-sdk.ts\` walks the Stricli route tree via \`discoverCommands()\`, skipping hidden routes. For each command: extracts flags, derives positional params from placeholder strings, checks \`\_\_jsonSchema\` for typed return types. Naming uses CLI route path as-is: \`\["org", "list"]\` → \`sdk.org.list()\`. Generates TWO gitignored files: (1) \`src/sdk.generated.ts\` — runtime, (2) \`src/sdk.generated.d.cts\` — npm type declarations. \`generate:sdk\` is chained before \`typecheck\`, \`dev\`, \`build\`, \`build:all\`, \`bundle\`. \`INTERNAL\_FLAGS\` set excludes \`json\`, \`fields\`, \`refresh\`, \`follow\` from generated parameter types — streaming flags are library-incompatible. CI check \`bun run check:skill\` validates SKILL.md stays in sync. ### Gotcha - -* **@sentry/api SDK passes Request object to custom fetch — headers lost on Node.js**: @sentry/api SDK calls \`\_fetch(request)\` with no init object. In \`authenticatedFetch\`, \`init\` is undefined so \`prepareHeaders\` creates empty headers — on Node.js this strips Content-Type (HTTP 415). Fix: fall back to \`input.headers\` when \`init\` is undefined. Use \`unwrapPaginatedResult\` (not \`unwrapResult\`) to access the Response's Link header for pagination. \`per\_page\` is not in SDK types; cast query to pass it at runtime. - - -* **Bun binary build requires SENTRY\_CLIENT\_ID env var**: The build script (\`script/bundle.ts\`) requires \`SENTRY\_CLIENT\_ID\` environment variable and exits with code 1 if missing. When building locally, use \`bun run --env-file=.env.local build\` or set the env var explicitly. The binary build (\`bun run build\`) also needs it. Without it you get: \`Error: SENTRY\_CLIENT\_ID environment variable is required.\` - - -* **GitHub immutable releases prevent rolling nightly tag pattern**: getsentry/cli has immutable GitHub releases — assets can't be modified and tags can NEVER be reused. Nightly builds publish to GHCR with versioned tags like \`nightly-0.14.0-dev.1772661724\`, not GitHub Releases or npm. \`fetchManifest()\` throws \`UpgradeError("network\_error")\` for both network failures and non-200 — callers must check message for HTTP 404/403. Craft with no \`preReleaseCommand\` silently skips \`bump-version.sh\` if only target is \`github\`. - - -* **Install script: BSD sed and awk JSON parsing breaks OCI digest extraction**: The install script parses OCI manifests with awk (no jq). Key trap: BSD sed \`\n\` is literal, not newline. Fix: single awk pass tracking last-seen \`"digest"\`, printing when \`"org.opencontainers.image.title"\` matches target. The config digest (\`sha256:44136fa...\`) is a 2-byte \`{}\` blob — downloading it instead of the real binary causes \`gunzip: unexpected end of file\`. - - -* **Multi-region fan-out: distinguish all-403 from empty orgs with hasSuccessfulRegion flag**: In \`listOrganizationsUncached\` (\`src/lib/api/organizations.ts\`), \`Promise.allSettled\` collects multi-region results. Don't use \`flatResults.length === 0\` to detect all-regions-failed — a region returning 200 OK with zero orgs pushes nothing into \`flatResults\`. Track a \`hasSuccessfulRegion\` boolean on any \`"fulfilled"\` settlement. Only re-throw 403 \`ApiError\` when \`!hasSuccessfulRegion && lastScopeError\`. - - -* **Multiple mockFetch calls replace each other — use unified mocks for multi-endpoint tests**: Bun test mocking gotchas: (1) \`mockFetch()\` replaces \`globalThis.fetch\` — calling it twice replaces the first mock. Use a single unified fetch mock dispatching by URL pattern. (2) \`mock.module()\` pollutes the module registry for ALL subsequent test files. Tests using it must live in \`test/isolated/\` and run via \`test:isolated\`. This also causes \`delta-upgrade.test.ts\` to fail when run alongside \`test/isolated/delta-upgrade.test.ts\` — the isolated test's \`mock.module()\` replaces \`CLI\_VERSION\` for all subsequent files. (3) For \`Bun.spawn\`, use direct property assignment in \`beforeEach\`/\`afterEach\`. - - -* **useTestConfigDir without isolateProjectRoot causes DSN scanning of repo tree**: \`useTestConfigDir()\` creates temp dirs under \`.test-tmp/\` in the repo tree. Without \`{ isolateProjectRoot: true }\`, \`findProjectRoot\` walks up and finds the repo's \`.git\`, causing DSN detection to scan real source code and trigger network calls against test mocks (timeouts). Always pass \`isolateProjectRoot: true\` when tests exercise \`resolveOrg\`, \`detectDsn\`, or \`findProjectRoot\`. + +* **Test mocks lack process property — use optional chaining on this.process**: Command \`func()\` methods access \`this: SentryContext\` which has \`this.process\`. But test mocks created via \`createMockContext()\` only provide \`stdout\`/\`stderr\`/\`cwd\` — no \`process\` property. Accessing \`this.process.abortSignal\` crashes with \`undefined is not an object\`. Fix: always use optional chaining \`(this.process as T)?.abortSignal\` or check \`this.process\` exists first. This applies to any new property added to the process-like object in \`sdk-invoke.ts\` that commands read via \`this.process\`. ### Pattern - -* **findProjectsByPattern as fuzzy fallback for exact slug misses**: When \`findProjectsBySlug\` returns empty (no exact match), use \`findProjectsByPattern\` as a fallback to suggest similar projects. \`findProjectsByPattern\` does bidirectional word-boundary matching (\`matchesWordBoundary\`) against all projects in all orgs — the same logic used for directory name inference. In the \`project-search\` handler, call it after the exact miss, format matches as \`\/\\` suggestions in the \`ResolutionError\`. This avoids a dead-end error for typos like 'patagonai' when 'patagon-ai' exists. Note: \`findProjectsByPattern\` makes additional API calls (lists all projects per org), so only call it on the failure path. + +* **buildIsolatedEnv helper centralizes SDK env setup**: \`buildIsolatedEnv(options?)\` in \`src/lib/sdk-invoke.ts\` maps \`SentryOptions\` fields to env vars (\`token\` → \`SENTRY\_AUTH\_TOKEN\`, \`url\` → \`SENTRY\_HOST\`, etc.) plus \`SENTRY\_OUTPUT\_FORMAT=json\` (unless \`text: true\`). The core dedup is \`executeWithCapture\()\` which centralizes the env isolation → capture context → telemetry → error wrapping → output parsing pipeline. Both \`buildInvoker\` (typed methods) and \`buildRunner\` (\`run()\` escape hatch) are thin ~15-line wrappers providing only the executor callback. \`STREAMING\_FLAGS\` set (\`--refresh\`, \`--follow\`, \`-f\`) is checked in \`buildRunner\` before execution — throws \`SentryError\` immediately since streaming output is unsuitable for library mode. Same flags are in \`INTERNAL\_FLAGS\` in codegen so typed SDK methods can't trigger streaming. + + +* **SDK codegen callable interface pattern for streaming overloads**: In \`script/generate-sdk.ts\`, streaming-capable commands (those with flags in \`STREAMING\_FLAGS\` set) use a callable interface pattern instead of a simple method signature. This produces TypeScript overloaded signatures: \`(params?: T): Promise\\` for non-streaming and \`(params: T & { follow: string }): AsyncIterable\\` for streaming. At runtime, \`generateStreamingMethodBody()\` emits code that checks if any streaming flag is present in params, then passes \`{ streaming: true }\` meta to the invoker which branches to \`executeWithStream\` vs \`executeWithCapture\`. The \`STREAMING\_FLAGS\` set (\`refresh\`, \`follow\`) is separate from \`INTERNAL\_FLAGS\` — streaming flags ARE included in generated params but excluded from \`INTERNAL\_FLAGS\`. - -* **Org-scoped SDK calls follow getOrgSdkConfig + unwrapResult pattern**: All org-scoped API calls in src/lib/api-client.ts: (1) call \`getOrgSdkConfig(orgSlug)\` for regional URL + SDK config, (2) spread into SDK function: \`{ ...config, path: { organization\_id\_or\_slug: orgSlug, ... } }\`, (3) pass to \`unwrapResult(result, errorContext)\`. Shared helpers \`resolveAllTargets\`/\`resolveOrgAndProject\` must NOT call \`fetchProjectId\` — commands that need it enrich targets themselves. + +* **SENTRY\_OUTPUT\_FORMAT env var enables JSON mode from env instead of --json flag**: In \`src/lib/command.ts\`, the \`wrappedFunc\` checks \`this.env?.SENTRY\_OUTPUT\_FORMAT === "json"\` to force JSON output mode without passing \`--json\` on the command line. This is how the library entry point (\`src/index.ts\`) gets JSON by default — it sets this env var in the isolated env. The check runs after \`cleanRawFlags\` and only when the command has an \`output\` config (supports JSON). Commands without JSON support (help, version) are unaffected. ~5-line addition to \`command.ts\`. - -* **PR workflow: wait for Seer and Cursor BugBot before resolving**: CI includes Seer Code Review and Cursor Bugbot as advisory checks (~2-3 min, only on ready-for-review PRs). Workflow: push → wait for all CI (including npm build) → check inline review comments from Seer/BugBot → fix valid findings → repeat. Bugbot sometimes catches real logic bugs, not just style — always review before merging. Use \`gh pr checks \ --watch\` to monitor. Fetch comments via \`gh api repos/OWNER/REPO/pulls/NUM/comments\`. + +* **Target argument 4-mode parsing convention (project-search-first)**: \`parseOrgProjectArg()\` in \`src/lib/arg-parsing.ts\` returns a 4-mode discriminated union: \`auto-detect\` (empty), \`explicit\` (\`org/project\`), \`org-all\` (\`org/\` trailing slash), \`project-search\` (bare slug). Bare slugs are ALWAYS \`project-search\` first. The "is this an org?" check is secondary: list commands with \`orgSlugMatchBehavior\` pre-check cached orgs (\`redirect\` or \`error\` mode), and \`handleProjectSearch()\` has a safety net checking orgs after project search fails. Non-list commands (init, view) treat bare slugs purely as project search with no org pre-check. For \`init\`, unmatched bare slugs become new project names. Key files: \`src/lib/arg-parsing.ts\` (parsing), \`src/lib/org-list.ts\` (dispatch + org pre-check), \`src/lib/resolve-target.ts\` (resolution cascade). - -* **Shared pagination infrastructure: buildPaginationContextKey and parseCursorFlag**: Schema v12 replaced \`pagination\_cursors.cursor TEXT\` with \`cursor\_stack TEXT\` (JSON array) + \`page\_index INTEGER\`. Stack-based API in \`src/lib/db/pagination.ts\`: \`resolveCursor(flag, key, contextKey)\` maps keywords (next/prev/previous/first/last) to \`{cursor, direction}\`. \`advancePaginationState(key, contextKey, direction, nextCursor)\` pushes/pops the stack — back-then-forward truncates stale entries. \`hasPreviousPage(key, contextKey)\` checks \`page\_index > 0\`. \`clearPaginationState(key)\` removes state. \`parseCursorFlag\` in \`list-command.ts\` accepts next/prev/previous/first/last keywords. \`paginationHint()\` in \`org-list.ts\` builds bidirectional hints (\`-c prev | -c next\`). JSON envelope includes \`hasPrev\` boolean. All 7 list commands (trace, span, issue, project, team, repo, dashboard) use this stack API. \`resolveCursor()\` must be called inside \`org-all\` override closures. + +* **Writer type is the minimal output interface for streams and mocks**: The \`Writer\` type in \`src/types/index.ts\` is \`{ write(data: string): void; captureObject?: (obj: unknown) => void }\`. The optional \`captureObject\` property replaces the previous duck-typing pattern (\`hasCaptureObject()\` with \`typeof\` check and \`Record\\` cast). In library mode, the writer sets \`captureObject\` to capture the fully-transformed JSON object directly without serialization. In CLI mode, \`process.stdout\` lacks this property so it's \`undefined\` → falsy, and \`emitJsonObject()\` falls through to \`JSON.stringify\`. The check is now a simple truthiness test: \`if (stdout.captureObject)\`. Since \`captureObject\` is part of the \`Writer\` type, \`sdk-invoke.ts\` no longer needs \`Writer & { captureObject?: ... }\` intersection types — plain \`Writer\` suffices. - -* **Telemetry instrumentation pattern: withTracingSpan + captureException for handled errors**: For graceful-fallback operations, use \`withTracingSpan\` from \`src/lib/telemetry.ts\` for child spans and \`captureException\` from \`@sentry/bun\` (named import — Biome forbids namespace imports) with \`level: 'warning'\` for non-fatal errors. \`withTracingSpan\` uses \`onlyIfParent: true\` — no-op without active transaction. User-visible fallbacks use \`log.warn()\` not \`log.debug()\`. Several commands bypass telemetry by importing \`buildCommand\` from \`@stricli/core\` directly instead of \`../../lib/command.js\` (trace/list, trace/view, log/view, api.ts, help.ts). +### Preference - -* **Testing Stricli command func() bodies via spyOn mocking**: To unit-test a Stricli command's \`func()\` body: (1) \`const func = await cmd.loader()\`, (2) \`func.call(mockContext, flags, ...args)\` with mock \`stdout\`, \`stderr\`, \`cwd\`, \`setContext\`. (3) \`spyOn\` namespace imports to mock dependencies (e.g., \`spyOn(apiClient, 'getLogs')\`). The \`loader()\` return type union causes \`.call()\` LSP errors — these are false positives that pass \`tsc --noEmit\`. When API functions are renamed (e.g., \`getLog\` → \`getLogs\`), update both spy target name AND mock return shape (single → array). Slug normalization (\`normalizeSlug\`) replaces underscores with dashes but does NOT lowercase — test assertions must match original casing (e.g., \`'CAM-82X'\` not \`'cam-82x'\`). + +* **Library features require README and docs site updates**: When adding new features like the library API, documentation must be updated in both places: the root \`README.md\` (library usage section between Configuration and Development, before the \`---\` divider) and the docs website at \`docs/src/content/docs/\`. The docs site uses Astro + Starlight with sidebar defined in \`docs/astro.config.mjs\`. New pages outside \`commands/\` must be manually added to the sidebar config. \`library-usage.md\` was added to the "Getting Started" sidebar section after "Configuration". Note: \`features.md\` and \`agent-guidance.md\` exist but are NOT in the sidebar. diff --git a/src/types/dashboard.ts b/src/types/dashboard.ts index 7c12fbf3..2cd805bb 100644 --- a/src/types/dashboard.ts +++ b/src/types/dashboard.ts @@ -478,8 +478,126 @@ function extractFunctionName(aggregate: string): string { return parenIdx > 0 ? aggregate.slice(0, parenIdx) : aggregate; } +/** + * Extract the argument from a parsed aggregate string. + * "count()" → "" + * "p95(span.duration)" → "span.duration" + * "avg(g:custom/foo@b)" → "g:custom/foo@b" + */ +function extractAggregateArg(aggregate: string): string { + const openIdx = aggregate.indexOf("("); + const closeIdx = aggregate.lastIndexOf(")"); + if (openIdx < 0 || closeIdx <= openIdx) { + return ""; + } + return aggregate.slice(openIdx + 1, closeIdx); +} + +// --------------------------------------------------------------------------- +// MRI (Metric Resource Identifier) detection +// +// Port of Sentry's canonical Python parser: +// sentry/src/sentry/snuba/metrics/naming_layer/mri.py +// +// MRI format: :/@ +// entity: "c" (counter), "d" (distribution), "g" (gauge), "s" (set), +// "e" (extracted), or multi-char like "dist" +// namespace: "sessions", "transactions", "spans", "custom", etc. +// name: metric name, e.g. "duration", "measurements.fcp" +// unit: "none", "millisecond", "byte", "byte/second", etc. +// --------------------------------------------------------------------------- + +/** + * Regex matching the MRI schema format. + * + * Uses the same permissive `[^:]+` entity pattern as the canonical Python parser + * so it catches multi-char entity codes like "dist" in addition to the standard + * single-letter codes (c, d, g, s, e). + */ +const MRI_RE = + /^(?[^:]+):(?[^/]+)\/(?[^@]+)@(?.+)$/; + +/** Parsed components of an MRI string */ +export type ParsedMri = { + /** Entity type code (e.g. "c", "d", "g", "s", "e", or "dist") */ + entity: string; + /** Metric namespace (e.g. "custom", "sessions", "transactions") */ + namespace: string; + /** Metric name (e.g. "duration", "node.runtime.mem.rss") */ + name: string; + /** Metric unit (e.g. "byte", "millisecond", "none") */ + unit: string; +}; + +/** + * Parse an MRI (Metric Resource Identifier) string into its components. + * + * Port of Sentry's `parse_mri` from `sentry/snuba/metrics/naming_layer/mri.py`. + * + * @returns Parsed MRI components, or null if the input doesn't match MRI format + */ +export function parseMri(input: string): ParsedMri | null { + const match = MRI_RE.exec(input); + if (!match?.groups) { + return null; + } + const { entity, namespace, name, unit } = match.groups; + if (!(entity && namespace && name && unit)) { + return null; + } + return { entity, namespace, name, unit }; +} + +/** Maps known MRI entity codes to human-readable tracemetrics type names */ +const MRI_ENTITY_TYPE_NAMES: Record = { + c: "counter", + d: "distribution", + g: "gauge", + s: "set", + e: "extracted", +}; + +/** + * Check aggregates for MRI syntax and throw a ValidationError with guidance + * on using `--dataset tracemetrics` with the correct query format. + * + * MRI queries like `avg(g:custom/node.runtime.mem.rss@byte)` pass function-name + * validation (since "avg" is valid) but produce widgets that render as + * "Internal Error" in the Sentry dashboard UI. + * + * @param aggregates - Parsed aggregate strings to check + * @param dataset - Current dataset flag value (for context in error message) + */ +function rejectMriQueries(aggregates: string[], dataset?: string): void { + for (const agg of aggregates) { + const arg = extractAggregateArg(agg); + const mri = parseMri(arg); + if (!mri) { + continue; + } + + const fn = extractFunctionName(agg); + const typeName = MRI_ENTITY_TYPE_NAMES[mri.entity] ?? mri.entity; + const suggestion = `${fn}(value,${mri.name},${typeName},${mri.unit})`; + const datasetNote = + dataset === "tracemetrics" + ? "" + : `\nUse --dataset tracemetrics instead of --dataset ${dataset ?? "spans"}.`; + + throw new ValidationError( + `MRI query syntax is not supported for dashboard widgets: "${agg}".\n\n` + + "Use the tracemetrics query format instead:\n" + + ` --query '${suggestion}'${datasetNote}\n\n` + + "Tracemetrics format: fn(value,,,)", + "query" + ); + } +} + /** * Validate that all aggregate function names in a list are known. + * Also rejects MRI (Metric Resource Identifier) syntax with guidance + * on the correct tracemetrics query format. * Throws a ValidationError listing valid functions if any are invalid. * * @param aggregates - Parsed aggregate strings (e.g. ["count()", "p95(span.duration)"]) @@ -489,6 +607,10 @@ export function validateAggregateNames( aggregates: string[], dataset?: string ): void { + // Detect MRI syntax early — it passes function-name validation (e.g. "avg" is valid) + // but produces widgets that render as "Internal Error" in the dashboard UI. + rejectMriQueries(aggregates, dataset); + const validFunctions: readonly string[] = dataset === "discover" || dataset === "error-events" ? DISCOVER_AGGREGATE_FUNCTIONS diff --git a/test/commands/dashboard/widget/add.test.ts b/test/commands/dashboard/widget/add.test.ts index d4f3027c..0c77a0c7 100644 --- a/test/commands/dashboard/widget/add.test.ts +++ b/test/commands/dashboard/widget/add.test.ts @@ -212,6 +212,29 @@ describe("dashboard widget add", () => { expect(err.message).toContain("Unknown aggregate function"); }); + test("throws ValidationError for MRI query syntax", async () => { + const { context } = createMockContext(); + const func = await addCommand.loader(); + + const err = await func + .call( + context, + { + json: false, + display: "line", + dataset: "metrics", + query: ["avg(g:custom/node.runtime.mem.rss@byte)"], + }, + "123", + "Memory Usage" + ) + .catch((e: Error) => e); + expect(err).toBeInstanceOf(ValidationError); + expect(err.message).toContain("MRI query syntax is not supported"); + expect(err.message).toContain("tracemetrics"); + expect(err.message).toContain("avg(value,node.runtime.mem.rss,gauge,byte)"); + }); + test("throws ValidationError for big_number with issue dataset", async () => { const { context } = createMockContext(); const func = await addCommand.loader(); diff --git a/test/types/dashboard.test.ts b/test/types/dashboard.test.ts index 63e4b281..3f42ac24 100644 --- a/test/types/dashboard.test.ts +++ b/test/types/dashboard.test.ts @@ -24,6 +24,7 @@ import { IsFilterValueSchema, mapWidgetTypeToDataset, parseAggregate, + parseMri, parseSortExpression, parseWidgetInput, prepareWidgetQueries, @@ -32,6 +33,7 @@ import { stripWidgetServerFields, TABLE_DISPLAY_TYPES, TIMESERIES_DISPLAY_TYPES, + validateAggregateNames, validateWidgetLayout, WIDGET_TYPES, type WidgetType, @@ -879,3 +881,174 @@ describe("display type sets", () => { expect(TABLE_DISPLAY_TYPES.has("line")).toBe(false); }); }); + +// --------------------------------------------------------------------------- +// parseMri +// --------------------------------------------------------------------------- + +describe("parseMri", () => { + test("parses standard gauge MRI", () => { + expect(parseMri("g:custom/node.runtime.mem.rss@byte")).toEqual({ + entity: "g", + namespace: "custom", + name: "node.runtime.mem.rss", + unit: "byte", + }); + }); + + test("parses counter MRI", () => { + expect(parseMri("c:transactions/measurements.db_calls@none")).toEqual({ + entity: "c", + namespace: "transactions", + name: "measurements.db_calls", + unit: "none", + }); + }); + + test("parses distribution MRI", () => { + expect( + parseMri("d:transactions/measurements.stall_longest_time@millisecond") + ).toEqual({ + entity: "d", + namespace: "transactions", + name: "measurements.stall_longest_time", + unit: "millisecond", + }); + }); + + test("parses set MRI", () => { + expect(parseMri("s:sessions/error@none")).toEqual({ + entity: "s", + namespace: "sessions", + name: "error", + unit: "none", + }); + }); + + test("parses extracted MRI", () => { + expect(parseMri("e:spans/duration@millisecond")).toEqual({ + entity: "e", + namespace: "spans", + name: "duration", + unit: "millisecond", + }); + }); + + test("parses multi-char entity (matches canonical Python behavior)", () => { + expect(parseMri("dist:my_namespace/foo@none")).toEqual({ + entity: "dist", + namespace: "my_namespace", + name: "foo", + unit: "none", + }); + }); + + test("handles units with slashes", () => { + expect(parseMri("d:transactions/measurements.disk_io@byte/second")).toEqual( + { + entity: "d", + namespace: "transactions", + name: "measurements.disk_io", + unit: "byte/second", + } + ); + }); + + test("returns null for non-MRI strings", () => { + expect(parseMri("span.duration")).toBeNull(); + expect(parseMri("count()")).toBeNull(); + expect(parseMri("value,name,gauge,byte")).toBeNull(); + }); + + test("returns null for partial MRI (missing unit)", () => { + expect(parseMri("g:custom/foo")).toBeNull(); + }); + + test("returns null for empty string", () => { + expect(parseMri("")).toBeNull(); + }); + + test("returns null for malformed separators", () => { + expect(parseMri("d@transactions/foo")).toBeNull(); + expect(parseMri(":transactions/foo@none")).toBeNull(); + expect(parseMri("d/transactions@foo:none")).toBeNull(); + expect(parseMri(":/@")).toBeNull(); + }); +}); + +// --------------------------------------------------------------------------- +// validateAggregateNames — MRI rejection +// --------------------------------------------------------------------------- + +describe("validateAggregateNames MRI rejection", () => { + test("throws ValidationError for MRI query with metrics dataset", () => { + expect(() => + validateAggregateNames( + ["avg(g:custom/node.runtime.mem.rss@byte)"], + "metrics" + ) + ).toThrow(ValidationError); + }); + + test("throws ValidationError for MRI query with spans dataset", () => { + expect(() => + validateAggregateNames(["avg(g:custom/node.runtime.mem.rss@byte)"]) + ).toThrow(ValidationError); + }); + + test("error message contains tracemetrics format suggestion", () => { + try { + validateAggregateNames( + ["avg(g:custom/node.runtime.mem.rss@byte)"], + "metrics" + ); + expect.unreachable("Should have thrown"); + } catch (error) { + expect(error).toBeInstanceOf(ValidationError); + const msg = (error as ValidationError).message; + expect(msg).toContain("avg(value,node.runtime.mem.rss,gauge,byte)"); + expect(msg).toContain("tracemetrics"); + } + }); + + test("error message suggests --dataset tracemetrics when dataset is metrics", () => { + try { + validateAggregateNames( + ["avg(g:custom/node.runtime.mem.rss@byte)"], + "metrics" + ); + expect.unreachable("Should have thrown"); + } catch (error) { + const msg = (error as ValidationError).message; + expect(msg).toContain( + "Use --dataset tracemetrics instead of --dataset metrics" + ); + } + }); + + test("error message omits dataset switch when already tracemetrics", () => { + try { + validateAggregateNames( + ["avg(g:custom/node.runtime.mem.rss@byte)"], + "tracemetrics" + ); + expect.unreachable("Should have thrown"); + } catch (error) { + const msg = (error as ValidationError).message; + expect(msg).not.toContain("Use --dataset tracemetrics instead"); + } + }); + + test("does not throw for valid non-MRI queries", () => { + expect(() => validateAggregateNames(["avg(span.duration)"])).not.toThrow(); + expect(() => validateAggregateNames(["count()"])).not.toThrow(); + expect(() => validateAggregateNames(["p95(span.self_time)"])).not.toThrow(); + }); + + test("does not throw for tracemetrics format query", () => { + // tracemetrics format: fn(value,name,type,unit) — not MRI + expect(() => + validateAggregateNames(["avg(value,node.runtime.mem.rss,gauge,byte)"]) + ).not.toThrow(); + }); +});