diff --git a/plugins/sentry-cli/skills/sentry-cli/references/dashboard.md b/plugins/sentry-cli/skills/sentry-cli/references/dashboard.md index 56048574b..9b8d3294e 100644 --- a/plugins/sentry-cli/skills/sentry-cli/references/dashboard.md +++ b/plugins/sentry-cli/skills/sentry-cli/references/dashboard.md @@ -76,7 +76,7 @@ Add a widget to a dashboard **Flags:** - `-d, --display - Display type (big_number, line, area, bar, table, stacked_area, top_n, text, categorical_bar, details, wheel, rage_and_dead_clicks, server_tree, agents_traces_table)` -- `--dataset - Widget dataset (default: spans)` +- `--dataset - Widget dataset (default: spans). Accepts canonical names and API synonyms: spans, error-events/errors, transaction-like/transactions, tracemetrics/metrics, logs, issue, discover` - `-q, --query ... - Aggregate expression (e.g. count, p95:span.duration)` - `-w, --where - Search conditions filter (e.g. is:unresolved)` - `-g, --group-by ... - Group-by column (repeatable)` @@ -122,7 +122,7 @@ Edit a widget in a dashboard - `-t, --title - Widget title to match` - `--new-title - New widget title` - `-d, --display - Display type (big_number, line, area, bar, table, stacked_area, top_n, text, categorical_bar, details, wheel, rage_and_dead_clicks, server_tree, agents_traces_table)` -- `--dataset - Widget dataset (default: spans)` +- `--dataset - Widget dataset (default: spans). Accepts canonical names and API synonyms: spans, error-events/errors, transaction-like/transactions, tracemetrics/metrics, logs, issue, discover` - `-q, --query ... - Aggregate expression (e.g. count, p95:span.duration)` - `-w, --where - Search conditions filter (e.g. is:unresolved)` - `-g, --group-by ... - Group-by column (repeatable)` diff --git a/src/commands/dashboard/resolve.ts b/src/commands/dashboard/resolve.ts index 6e39bd9e3..6887c0d7b 100644 --- a/src/commands/dashboard/resolve.ts +++ b/src/commands/dashboard/resolve.ts @@ -609,11 +609,62 @@ export function enrichDashboardError( throw error; } +/** + * User-facing dataset synonyms resolved to the canonical Sentry widget type. + * + * The Sentry API/UI and docs surface names like `errors` and `transactions` + * but widget types use `error-events` and `transaction-like`. The CLI accepts + * both forms so users copying from docs or using API-dataset terminology + * don't have to translate. + * + * Keys are lowercase; matching is case-insensitive via {@link normalizeDataset}. + */ +const DATASET_ALIASES: Record = { + error: "error-events", + errors: "error-events", + transaction: "transaction-like", + transactions: "transaction-like", + log: "logs", + // `metrics` and `metricsEnhanced` both alias to the canonical `tracemetrics`. + // `metricsEnhanced` is the value surfaced by the events API dataset param + // (see WIDGET_TYPE_TO_DATASET in types/dashboard.ts) and may appear in docs. + metrics: "tracemetrics", + metricsenhanced: "tracemetrics", +}; + +/** + * Normalize a user-provided `--dataset` value to the canonical widget type. + * + * - Case-insensitive (e.g. `Errors`, `ERRORS` → `error-events`). + * - Maps known synonyms via {@link DATASET_ALIASES}. + * - Returns the lower-cased input unchanged if no alias matches (the enum + * check in {@link validateWidgetEnums} will reject unknown values). + * - Returns `undefined` for `undefined` input. + * + * Must be called once, up-front, and the result threaded through every + * downstream consumer (aggregate validator, warnings, PUT body). Leaving + * an un-normalized value in `flags.dataset` causes dataset-specific + * aggregate validation (e.g. `failure_rate` for `error-events`) to see + * the alias instead of the canonical name and reject valid inputs. + */ +export function normalizeDataset(dataset?: string): string | undefined { + if (dataset === undefined) { + return; + } + const lower = dataset.toLowerCase(); + return DATASET_ALIASES[lower] ?? lower; +} + /** * Validate --display and --dataset flag values against known enums. * + * Callers MUST pass a dataset value already normalized via + * {@link normalizeDataset} so aliases and casing don't leak into the + * enum check. This function does not mutate or resolve aliases itself — + * it is a pure validator. + * * @param display - Display type flag value - * @param dataset - Dataset flag value + * @param dataset - Dataset flag value (already normalized) */ export function validateWidgetEnums(display?: string, dataset?: string): void { if ( diff --git a/src/commands/dashboard/widget/add.ts b/src/commands/dashboard/widget/add.ts index 673f7bac9..6eb7c7e2f 100644 --- a/src/commands/dashboard/widget/add.ts +++ b/src/commands/dashboard/widget/add.ts @@ -25,6 +25,7 @@ import { import { buildWidgetFromFlags, enrichDashboardError, + normalizeDataset, parseDashboardPositionalArgs, resolveDashboardId, resolveOrgFromTarget, @@ -121,7 +122,8 @@ export const addCommand = buildCommand({ dataset: { kind: "parsed", parse: String, - brief: "Widget dataset (default: spans)", + brief: + "Widget dataset (default: spans). Accepts canonical names and API synonyms: spans, error-events/errors, transaction-like/transactions, tracemetrics/metrics, logs, issue, discover", optional: true, }, query: { @@ -204,8 +206,14 @@ export const addCommand = buildCommand({ const { dashboardArgs, title } = parseAddPositionalArgs(args); + // Resolve dataset aliases (e.g. "errors" → "error-events") once, up front. + // Every downstream consumer — enum validation, dataset-aware aggregate + // validation, the PUT body — must see the canonical name, so we thread + // the normalized value and never reference flags.dataset below. + const dataset = normalizeDataset(flags.dataset); + // Validate enums before any network calls (fail fast) - validateWidgetEnums(flags.display, flags.dataset); + validateWidgetEnums(flags.display, dataset); const { dashboardRef, targetArg } = parseDashboardPositionalArgs(dashboardArgs); @@ -220,7 +228,7 @@ export const addCommand = buildCommand({ let newWidget = buildWidgetFromFlags({ title, display: flags.display, - dataset: flags.dataset, + dataset, query: flags.query, where: flags.where, groupBy: flags["group-by"], diff --git a/src/commands/dashboard/widget/edit.ts b/src/commands/dashboard/widget/edit.ts index 95868d428..9022dd866 100644 --- a/src/commands/dashboard/widget/edit.ts +++ b/src/commands/dashboard/widget/edit.ts @@ -28,6 +28,7 @@ import { } from "../../../types/dashboard.js"; import { enrichDashboardError, + normalizeDataset, parseDashboardPositionalArgs, resolveDashboardId, resolveOrgFromTarget, @@ -247,7 +248,8 @@ export const editCommand = buildCommand({ dataset: { kind: "parsed", parse: String, - brief: "Widget dataset (default: spans)", + brief: + "Widget dataset (default: spans). Accepts canonical names and API synonyms: spans, error-events/errors, transaction-like/transactions, tracemetrics/metrics, logs, issue, discover", optional: true, }, query: { @@ -332,7 +334,19 @@ export const editCommand = buildCommand({ ); } - validateWidgetEnums(flags.display, flags.dataset); + // Resolve dataset aliases (e.g. "errors" → "error-events") once, up front. + // Replace flags.dataset with the canonical value so every downstream + // consumer — validateEnumsAndAggregates, validateAggregateNames, and the + // PUT body — sees the normalized name. Without this, dataset-aware + // aggregate validation (e.g. failure_rate for error-events) would fail + // when the user passes --dataset errors. + const normalizedDataset = normalizeDataset(flags.dataset); + const normalizedFlags: EditFlags = + normalizedDataset === flags.dataset + ? flags + : { ...flags, dataset: normalizedDataset }; + + validateWidgetEnums(normalizedFlags.display, normalizedFlags.dataset); const { dashboardRef, targetArg } = parseDashboardPositionalArgs(args); const parsed = parseOrgProjectArg(targetArg); @@ -349,15 +363,19 @@ export const editCommand = buildCommand({ enrichDashboardError(error, { orgSlug, dashboardId, operation: "view" }) ); const widgets = current.widgets ?? []; - const widgetIndex = resolveWidgetIndex(widgets, flags.index, flags.title); + const widgetIndex = resolveWidgetIndex( + widgets, + normalizedFlags.index, + normalizedFlags.title + ); const updateBody = prepareDashboardForUpdate(current); const existing = updateBody.widgets[widgetIndex] as DashboardWidget; // Validate individual layout flag ranges early (catches --col -1, --width 7, etc.) - validateWidgetLayout(flags, existing.layout); + validateWidgetLayout(normalizedFlags, existing.layout); - const replacement = buildReplacement(flags, existing); + const replacement = buildReplacement(normalizedFlags, existing); // Re-validate the final merged layout when the existing widget had no layout // and FALLBACK_LAYOUT was used — the early check couldn't cross-validate diff --git a/src/commands/dashboard/widget/index.ts b/src/commands/dashboard/widget/index.ts index 131d76baa..e99165696 100644 --- a/src/commands/dashboard/widget/index.ts +++ b/src/commands/dashboard/widget/index.ts @@ -19,17 +19,25 @@ export const widgetRoute = buildRouteMap({ " specialized: stacked_area (3×2), top_n (3×2), categorical_bar (3×2), text (3×2)\n" + " internal: details (3×2), wheel (3×2), rage_and_dead_clicks (3×2),\n" + " server_tree (3×2), agents_traces_table (3×2)\n\n" + - "Datasets:\n" + - " spans (default) Span-based queries: span.duration, span.op, transaction,\n" + - " span attributes, cache.hit, etc. Covers most use cases.\n" + - " tracemetrics Custom metrics from Sentry.metrics.distribution/gauge/count.\n" + - " Query format: aggregation(value,metric_name,metric_type,unit)\n" + - " Example: p50(value,completion.duration_ms,distribution,none)\n" + - " Supported displays: line, area, bar, big_number, categorical_bar\n" + - " discover Legacy discover queries (adds failure_rate, apdex, etc.)\n" + - " issue Issue-based queries\n" + - " error-events Error event queries\n" + - " logs Log queries\n\n" + + "Datasets (canonical name — accepted aliases):\n" + + " spans (default) Span-based queries: span.duration, span.op,\n" + + " transaction, span attributes, cache.hit, etc.\n" + + " Covers most use cases.\n" + + " tracemetrics — metrics, Custom metrics from Sentry.metrics.distribution/\n" + + " metricsEnhanced gauge/count. Query format:\n" + + " aggregation(value,metric_name,metric_type,unit)\n" + + " Example: p50(value,completion.duration_ms,distribution,none)\n" + + " Supported displays: line, area, bar, big_number,\n" + + " categorical_bar\n" + + " discover Legacy discover queries (adds failure_rate,\n" + + " apdex, etc.)\n" + + " issue Issue-based queries\n" + + " error-events — errors, error Error event queries\n" + + " transaction-like — transactions, transaction\n" + + " Transaction-based queries\n" + + " logs — log Log queries\n\n" + + "Dataset values are case-insensitive; Sentry UI/API names like 'errors'\n" + + "and 'transactions' are accepted in addition to the canonical forms.\n\n" + "Aggregates (spans): count, count_unique, sum, avg, percentile, p50, p75,\n" + " p90, p95, p99, p100, eps, epm, any, min, max\n" + "Aggregates (discover adds): failure_count, failure_rate, apdex,\n" + diff --git a/test/commands/dashboard/resolve.test.ts b/test/commands/dashboard/resolve.test.ts index 1e187b921..bb85a3cee 100644 --- a/test/commands/dashboard/resolve.test.ts +++ b/test/commands/dashboard/resolve.test.ts @@ -8,10 +8,12 @@ import { afterEach, beforeEach, describe, expect, spyOn, test } from "bun:test"; import { enrichDashboardError, + normalizeDataset, parseDashboardListArgs, parseDashboardPositionalArgs, resolveDashboardId, resolveOrgFromTarget, + validateWidgetEnums, } from "../../../src/commands/dashboard/resolve.js"; // biome-ignore lint/performance/noNamespaceImport: needed for spyOn mocking import * as apiClient from "../../../src/lib/api-client.js"; @@ -542,3 +544,82 @@ describe("enrichDashboardError", () => { ).toThrow(apiErr); }); }); + +// --------------------------------------------------------------------------- +// normalizeDataset + validateWidgetEnums +// --------------------------------------------------------------------------- + +describe("normalizeDataset", () => { + test("returns undefined for undefined input", () => { + expect(normalizeDataset(undefined)).toBeUndefined(); + }); + + test("lowercases canonical values (pass-through)", () => { + expect(normalizeDataset("spans")).toBe("spans"); + expect(normalizeDataset("error-events")).toBe("error-events"); + expect(normalizeDataset("transaction-like")).toBe("transaction-like"); + expect(normalizeDataset("tracemetrics")).toBe("tracemetrics"); + expect(normalizeDataset("logs")).toBe("logs"); + expect(normalizeDataset("issue")).toBe("issue"); + expect(normalizeDataset("discover")).toBe("discover"); + }); + + test("resolves error/errors aliases", () => { + expect(normalizeDataset("errors")).toBe("error-events"); + expect(normalizeDataset("error")).toBe("error-events"); + }); + + test("resolves transaction/transactions aliases", () => { + expect(normalizeDataset("transactions")).toBe("transaction-like"); + expect(normalizeDataset("transaction")).toBe("transaction-like"); + }); + + test("resolves metrics and metricsEnhanced aliases", () => { + expect(normalizeDataset("metrics")).toBe("tracemetrics"); + expect(normalizeDataset("metricsEnhanced")).toBe("tracemetrics"); + }); + + test("resolves log alias", () => { + expect(normalizeDataset("log")).toBe("logs"); + }); + + test("case-insensitive matching", () => { + expect(normalizeDataset("Errors")).toBe("error-events"); + expect(normalizeDataset("ERRORS")).toBe("error-events"); + expect(normalizeDataset("SPANS")).toBe("spans"); + expect(normalizeDataset("MetricsEnhanced")).toBe("tracemetrics"); + }); + + test("returns lowercased unknown input unchanged for validator to reject", () => { + expect(normalizeDataset("unknown-dataset")).toBe("unknown-dataset"); + expect(normalizeDataset("DoesNotExist")).toBe("doesnotexist"); + }); +}); + +describe("validateWidgetEnums (with normalizeDataset)", () => { + test("accepts a normalized alias without error", () => { + // Pipeline: caller runs normalizeDataset first, then passes the canonical + // value to validateWidgetEnums. This simulates the wiring in add/edit. + expect(() => + validateWidgetEnums("bar", normalizeDataset("errors")) + ).not.toThrow(); + }); + + test("rejects an unresolved alias when passed un-normalized", () => { + // Guard: forgetting to normalize surfaces as a ValidationError listing + // canonical values, not silent success. + expect(() => validateWidgetEnums("bar", "errors")).toThrow(ValidationError); + }); + + test("rejects unknown datasets (no such alias)", () => { + expect(() => validateWidgetEnums(undefined, "bogus-dataset")).toThrow( + ValidationError + ); + }); + + test("rejects unknown display types", () => { + expect(() => validateWidgetEnums("pie-chart", undefined)).toThrow( + ValidationError + ); + }); +}); diff --git a/test/commands/dashboard/widget/add.test.ts b/test/commands/dashboard/widget/add.test.ts index 4c8bde7f5..23ad433a1 100644 --- a/test/commands/dashboard/widget/add.test.ts +++ b/test/commands/dashboard/widget/add.test.ts @@ -244,6 +244,134 @@ describe("dashboard widget add", () => { expect(addedWidget.queries[0].orderby).toBe("-count()"); }); + test("resolves dataset alias 'errors' to 'error-events' in PUT body", async () => { + const { context } = createMockContext(); + const func = await addCommand.loader(); + await func.call( + context, + { + json: false, + display: "big_number", + dataset: "errors", + query: ["count"], + }, + "123", + "Error Count" + ); + + const body = updateDashboardSpy.mock.calls[0]?.[2]; + const addedWidget = body.widgets.at(-1); + expect(addedWidget.widgetType).toBe("error-events"); + }); + + test("resolves dataset alias 'transactions' to 'transaction-like'", async () => { + const { context } = createMockContext(); + const func = await addCommand.loader(); + await func.call( + context, + { + json: false, + display: "line", + dataset: "transactions", + query: ["count"], + }, + "123", + "Transactions Over Time" + ); + + const body = updateDashboardSpy.mock.calls[0]?.[2]; + const addedWidget = body.widgets.at(-1); + expect(addedWidget.widgetType).toBe("transaction-like"); + }); + + test("resolves dataset alias 'metricsEnhanced' to 'tracemetrics'", async () => { + const { context } = createMockContext(); + const func = await addCommand.loader(); + await func.call( + context, + { + json: false, + display: "line", + dataset: "metricsEnhanced", + query: ["p50(value,completion.duration_ms,distribution,none)"], + }, + "123", + "Latency" + ); + + const body = updateDashboardSpy.mock.calls[0]?.[2]; + const addedWidget = body.widgets.at(-1); + expect(addedWidget.widgetType).toBe("tracemetrics"); + }); + + test("dataset alias is resolved BEFORE dataset-aware aggregate validation", async () => { + // failure_rate is only valid for error-events/discover. With the alias + // "errors", dataset-aware validation must see "error-events" (canonical) + // before deciding whether to accept the aggregate. + const { context } = createMockContext(); + const func = await addCommand.loader(); + await func.call( + context, + { + json: false, + display: "big_number", + dataset: "errors", + query: ["failure_rate"], + }, + "123", + "Failure Rate" + ); + + const body = updateDashboardSpy.mock.calls[0]?.[2]; + const addedWidget = body.widgets.at(-1); + expect(addedWidget.widgetType).toBe("error-events"); + expect(addedWidget.queries[0].aggregates).toEqual(["failure_rate()"]); + }); + + test("case-insensitive dataset values are accepted", async () => { + const { context } = createMockContext(); + const func = await addCommand.loader(); + await func.call( + context, + { + json: false, + display: "big_number", + dataset: "ERRORS", + query: ["count"], + }, + "123", + "Errors" + ); + + const body = updateDashboardSpy.mock.calls[0]?.[2]; + const addedWidget = body.widgets.at(-1); + expect(addedWidget.widgetType).toBe("error-events"); + }); + + test("rejects unknown --dataset with canonical list", async () => { + const { context } = createMockContext(); + const func = await addCommand.loader(); + + const err = await func + .call( + context, + { + json: false, + display: "big_number", + dataset: "bogus-dataset", + query: ["count"], + }, + "123", + "Bad" + ) + .catch((e: Error) => e); + + expect(err).toBeInstanceOf(ValidationError); + // Error message surfaces the normalized (lowercased) value. + expect(err.message).toContain("bogus-dataset"); + expect(err.message).toContain("Valid datasets:"); + }); + test("issue dataset respects explicit --group-by over default", async () => { const { context } = createMockContext(); const func = await addCommand.loader(); diff --git a/test/commands/dashboard/widget/edit.test.ts b/test/commands/dashboard/widget/edit.test.ts index c73a05d5a..89f4e3b0f 100644 --- a/test/commands/dashboard/widget/edit.test.ts +++ b/test/commands/dashboard/widget/edit.test.ts @@ -415,4 +415,54 @@ describe("dashboard widget edit", () => { expect(body.widgets[0].widgetType).toBe("discover"); expect(body.widgets[0].queries[0].aggregates).toEqual(["failure_rate()"]); }); + + test("resolves --dataset alias 'errors' to 'error-events' in PUT body", async () => { + const { context } = createMockContext(); + const func = await editCommand.loader(); + await func.call( + context, + { json: false, index: 0, dataset: "errors" }, + "123" + ); + + const body = updateDashboardSpy.mock.calls[0]?.[2]; + expect(body.widgets[0].widgetType).toBe("error-events"); + }); + + test("dataset alias is resolved BEFORE dataset-aware aggregate validation", async () => { + // Regression test for the "aliases resolve too late" bug: failure_rate + // is valid for error-events, so passing --dataset errors --query + // failure_rate must succeed. If the alias is not applied before + // validateAggregateNames runs, "errors" falls through the canonical + // branch and "Unknown aggregate function" is thrown. + const { context } = createMockContext(); + const func = await editCommand.loader(); + await func.call( + context, + { + json: false, + index: 0, + dataset: "errors", + query: ["failure_rate"], + }, + "123" + ); + + const body = updateDashboardSpy.mock.calls[0]?.[2]; + expect(body.widgets[0].widgetType).toBe("error-events"); + expect(body.widgets[0].queries[0].aggregates).toEqual(["failure_rate()"]); + }); + + test("case-insensitive --dataset values are accepted", async () => { + const { context } = createMockContext(); + const func = await editCommand.loader(); + await func.call( + context, + { json: false, index: 0, dataset: "TRANSACTIONS" }, + "123" + ); + + const body = updateDashboardSpy.mock.calls[0]?.[2]; + expect(body.widgets[0].widgetType).toBe("transaction-like"); + }); });