diff --git a/docs/docs.json b/docs/docs.json index 5c2bddede0c..7f2a6de83d8 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -179,6 +179,10 @@ } ] }, + { + "group": "Insights", + "pages": ["insights/query", "insights/metrics"] + }, { "group": "Using the Dashboard", "pages": ["run-tests", "troubleshooting-alerts", "replaying", "bulk-actions"] @@ -239,10 +243,7 @@ }, { "group": "Batches API", - "pages": [ - "management/batches/create", - "management/batches/stream-items" - ] + "pages": ["management/batches/create", "management/batches/stream-items"] }, { "group": "Runs API", @@ -286,6 +287,10 @@ "management/deployments/get-latest", "management/deployments/promote" ] + }, + { + "group": "Query API", + "pages": ["management/query/execute"] } ] }, @@ -685,4 +690,4 @@ "destination": "/migrating-from-v3" } ] -} \ No newline at end of file +} diff --git a/docs/images/metrics-built-in.png b/docs/images/metrics-built-in.png new file mode 100644 index 00000000000..7104a16c4a3 Binary files /dev/null and b/docs/images/metrics-built-in.png differ diff --git a/docs/images/query-chart-usage-percentiles.png b/docs/images/query-chart-usage-percentiles.png new file mode 100644 index 00000000000..43142ea28ad Binary files /dev/null and b/docs/images/query-chart-usage-percentiles.png differ diff --git a/docs/insights/metrics.mdx b/docs/insights/metrics.mdx new file mode 100644 index 00000000000..bad0d844e83 --- /dev/null +++ b/docs/insights/metrics.mdx @@ -0,0 +1,96 @@ +--- +title: "Metrics dashboards" +description: "Create custom dashboards with real-time metrics powered by TRQL queries." +--- + +## Overview + +In the Trigger.dev dashboard we have built-in dashboards and you can create your own. + +Metrics dashboards are powered by [TRQL queries](/insights/query) with widgets that can be displayed as charts, tables, or single values. They automatically refresh to show the latest data. + +![The built-in Metrics dashboard](/images/metrics-built-in.png) + +### Visualization types + +- **Line chart** - Show trends over time +- **Bar chart** - Compare values across categories +- **Area chart** - Display cumulative trends +- **Table** - Show detailed data in rows +- **Single value** - Display a single metric (count, sum, average, etc.) + +You can also add Titles to your dashboard. + +## Filtering and time ranges + +All widgets on a dashboard use the time range filter applied to the dashboard. + +You can also filter the data by: + +- Scope: Environment, Project, Organization +- Tasks +- Queues + +## Creating custom metrics dashboards + +1. In the sidebar click the + icon next to "Metrics". +2. Name your custom dashboard. +3. From the top-right you can "Add chart" or "Add title". +4. For charts you write [TRQL queries](/insights/query) and choose a visualization type. +5. You can resize and reposition widgets on your dashboards. + +## Performance considerations + +### Optimize queries for metrics + +1. **Use time bucketing** - `timeBucket()` automatically groups by appropriate intervals +2. **Limit result size** - Add `LIMIT` clauses, especially for table widgets +3. **Use approximate functions** - `uniq()` instead of `uniqExact()` for faster approximate counts + +## Exporting metric data + +Export data from any metric widget: + +1. Click the widget menu (three dots) +2. Select "Copy JSON" or "Copy CSV" + +## Best practices + +1. **Start simple** - Begin with basic metrics and iterate based on insights +2. **Use meaningful names** - Give widgets clear, descriptive titles +3. **Group related metrics** - Organize dashboards by theme (performance, costs, errors) +4. **Test queries first** - Use the Query page to develop and test before adding to dashboards + +## Troubleshooting + +### Widget shows "No data" + +- Check that your query returns results in the Query page +- Verify time filters include the period with data +- Ensure task/queue filters match existing runs + +### Widget is slow to load + +- Add time range filters to your query +- Use `LIMIT` clauses +- Simplify aggregations +- Check query execution time in Query page + +### Chart displays incorrectly + +- Verify column names match visualization config +- Check data types (numbers for charts, dates for time series) +- Ensure `timeBucket()` is used for time-series charts +- Review that series columns exist in query results + +## Limits + +Metrics is powered by Query so have [the same limits](/insights/query#limits) as Query. + +There is a separate concurrency limits for metric widgets. + +| Limit | Details | +| :------------------------ | :------------- | +| Concurrent widget queries | 30 per project | + +See [Limits](/limits) for details. diff --git a/docs/insights/query.mdx b/docs/insights/query.mdx new file mode 100644 index 00000000000..16af436d878 --- /dev/null +++ b/docs/insights/query.mdx @@ -0,0 +1,545 @@ +--- +title: "Query" +description: "Query allows you to write custom queries against your data using TRQL (Trigger.dev Query Language), a SQL-style language based on ClickHouse SQL. You can query your data through the dashboard, SDK, or REST API." +--- + +### Available tables + +- `runs`: contains all task run data including status, timing, costs, and metadata +- `metrics`: contains metrics data for your runs including CPU, memory, and your custom metrics. + +## Using the Query dashboard + +Navigate to the Query page to write and execute queries. The dashboard provides: + +- **AI-powered query generation** - Describe what you want in natural language +- **Syntax highlighting** - SQL syntax highlighting for better readability +- **Query history** - Access your previous queries +- **Interactive help** - Built-in documentation for TRQL syntax and functions +- **Export options** - Download results as JSON or CSV + +![The Query dashboard](/images/query-chart-usage-percentiles.png) + +## Querying from the SDK + +Use `query.execute()` to run TRQL queries programmatically from your backend code: + +```typescript +import { query } from "@trigger.dev/sdk"; + +// Basic query with defaults (environment scope, json format) +const result = await query.execute("SELECT run_id, status FROM runs LIMIT 10"); +console.log(result.results); // Array> +``` + +### Type-safe queries + +Use the `QueryTable` type for nice inferred types in your query results: + +```typescript +import { query, type QueryTable } from "@trigger.dev/sdk"; + +// Type-safe query using QueryTable with specific columns +const typedResult = await query.execute>( + "SELECT run_id, status, triggered_at FROM runs LIMIT 10" +); + +typedResult.results.forEach((row) => { + console.log(row.run_id, row.status); // Fully typed! +}); +``` + +### Query options + +```typescript +import { query } from "@trigger.dev/sdk"; + +const result = await query.execute("SELECT COUNT(*) as count FROM runs", { + // Scope: "environment" (default), "project", or "organization" + scope: "project", + // Time period using shorthand (e.g., "7d", "30d", "1h") + period: "7d", + // Or use explicit time range + // from: new Date("2024-01-01"), + // to: new Date("2024-01-31"), + + // Response format: "json" (default) or "csv" + format: "json", +}); +``` + +### CSV export + +Export query results as CSV by setting `format: "csv"`: + +```typescript +const csvResult = await query.execute("SELECT run_id, status, triggered_at FROM runs", { + format: "csv", + period: "7d", +}); + +const lines = csvResult.results.split("\n"); +console.log(lines[0]); // CSV header row +``` + +## Querying from the REST API + +Execute queries via HTTP POST to `/api/v1/query`: + +```sh +curl -X POST https://api.trigger.dev/api/v1/query \ + -H "Authorization: Bearer YOUR_SECRET_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "query": "SELECT run_id, status FROM runs LIMIT 10", + "scope": "environment", + "period": "7d", + "format": "json" + }' +``` + +See the [API reference](/management/query/execute) for full details. + +## TRQL syntax guide + +### Basic queries + +Select columns from a table: + +```sql +SELECT run_id, task_identifier, status +FROM runs +LIMIT 10 +``` + +Alias columns with `AS`: + +```sql +SELECT task_identifier AS task, count() AS total +FROM runs +GROUP BY task +``` + +### Using \* + +Note that when you use `SELECT *` we don't return all the columns, we only return the core columns. This is for performance reasons (the underlying ClickHouse database is columnar and selecting lots of columns isn't efficient). + +You should specify the columns you want to return. + +### Filtering with WHERE + +Use comparison operators: + +```sql +SELECT run_id, task_identifier FROM runs +WHERE status = 'Failed' +``` + +Available operators: + +```sql +-- Comparison operators +WHERE status = 'Failed' -- Equal +WHERE status != 'Completed' -- Not equal +WHERE attempt_count > 3 -- Greater than +WHERE attempt_count >= 3 -- Greater than or equal +WHERE attempt_count < 5 -- Less than +WHERE attempt_count <= 5 -- Less than or equal + +-- IN for multiple values +WHERE status IN ('Failed', 'Crashed') + +-- LIKE for pattern matching (% = wildcard) +WHERE task_identifier LIKE 'email%' + +-- ILIKE for case-insensitive matching +WHERE task_identifier ILIKE '%send%' + +-- BETWEEN for ranges +WHERE triggered_at BETWEEN '2024-01-01' AND '2024-01-31' + +-- NULL checks +WHERE completed_at IS NOT NULL +WHERE completed_at IS NULL + +-- Array column checks +WHERE has(tags, 'user_12345') +WHERE notEmpty(tags) +WHERE hasAny(tags, array('user_12345', 'user_67890')) +WHERE hasAll(tags, array('user_12345', 'user_67890')) +WHERE indexOf(tags, 'user_12345') > 0 +WHERE arrayElement(tags, 1) = 'user_12345' +``` + +### Sorting and limiting + +Sort results with `ORDER BY`: + +```sql +SELECT run_id, compute_cost, triggered_at +FROM runs +ORDER BY compute_cost DESC, triggered_at ASC +LIMIT 50 +``` + +### Grouping and aggregation + +Use `GROUP BY` with aggregate functions: + +```sql +SELECT + task_identifier, + avg(value) AS avg_memory +FROM metrics +WHERE metric_name = 'system.memory.usage' +GROUP BY task_identifier +ORDER BY avg_memory DESC +LIMIT 20 +``` + +## Available functions + +TRQL provides a rich set of functions for data analysis. + +### Aggregate functions + +- `count()` - Count rows +- `countIf(col, cond)` - Count rows matching condition +- `countDistinct(col)` - Count unique values +- `sum(col)` - Sum of values +- `sumIf(col, cond)` - Sum values matching condition +- `avg(col)` - Average of values +- `min(col)` - Minimum value +- `max(col)` - Maximum value +- `median(col)` - Median value (50th percentile) +- `quantile(p)(col)` - Value at percentile p (0-1) +- `stddevPop(col)` - Population standard deviation +- `stddevSamp(col)` - Sample standard deviation + +Example: + +```sql +SELECT + task_identifier, + count() AS total_runs, + avg(usage_duration) AS avg_duration_ms, + median(usage_duration) AS median_duration_ms, + quantile(0.95)(usage_duration) AS p95_duration_ms +FROM runs +GROUP BY task_identifier +``` + +### Date/time functions + +**Time bucketing:** + +```sql +-- Auto-bucket by time period based on query's time range +SELECT timeBucket(), count() AS runs +FROM runs +GROUP BY timeBucket() +``` + +**Date extraction:** + +```sql +SELECT + toYear(triggered_at) AS year, + toMonth(triggered_at) AS month, + toDayOfWeek(triggered_at) AS day_of_week, + toHour(triggered_at) AS hour +FROM runs +``` + +**Date truncation:** + +```sql +SELECT + toStartOfDay(triggered_at) AS day, + count() AS runs_per_day +FROM runs +GROUP BY day +ORDER BY day DESC +``` + +**Date arithmetic:** + +```sql +-- Add/subtract time +SELECT dateAdd('day', 7, triggered_at) AS week_later +FROM runs + +-- Calculate differences +SELECT dateDiff('minute', executed_at, completed_at) AS duration_minutes +FROM runs +WHERE completed_at IS NOT NULL +``` + +Common date functions: + +- `now()` - Current date and time +- `today()` - Current date +- `toDate(dt)` - Convert to date +- `toStartOfDay(dt)`, `toStartOfHour(dt)`, `toStartOfMonth(dt)` - Truncate to start of period +- `formatDateTime(dt, format)` - Format datetime as string + +### String functions + +```sql +SELECT + lower(status) AS status_lower, + upper(status) AS status_upper, + concat(task_identifier, '-', status) AS combined, + substring(run_id, 1, 8) AS short_id, + length(task_identifier) AS name_length +FROM runs +``` + +Common string functions: + +- `length(s)` - String length +- `lower(s)`, `upper(s)` - Case conversion +- `concat(s1, s2, ...)` - Concatenate strings +- `substring(s, offset, len)` - Extract substring +- `trim(s)` - Remove whitespace +- `replace(s, from, to)` - Replace occurrences +- `startsWith(s, prefix)`, `endsWith(s, suffix)` - Check prefixes/suffixes + +### Conditional functions + +```sql +SELECT + run_id, + if(status = 'Failed', 1, 0) AS is_failed, + multiIf( + status = 'Completed', 'ok', + status = 'Failed', 'bad', + 'other' + ) AS status_category, + coalesce(completed_at, triggered_at) AS end_time +FROM runs +``` + +- `if(cond, then, else)` - Conditional expression +- `multiIf(c1, t1, c2, t2, ..., else)` - Multiple conditions (like CASE) +- `coalesce(a, b, ...)` - First non-null value + +### Math functions + +```sql +SELECT + round(compute_cost, 4) AS cost_rounded, + ceil(usage_duration / 1000) AS duration_seconds_up, + floor(usage_duration / 1000) AS duration_seconds_down, + abs(compute_cost) AS cost_abs +FROM runs +``` + +### Array functions + +Useful for working with tags and other array columns: + +```sql +SELECT + run_id, + tags, + length(tags) AS tag_count, + has(tags, 'user_12345') AS is_production, + arrayJoin(tags) AS individual_tag -- Expand array to rows +FROM runs +WHERE notEmpty(tags) +``` + +### JSON functions + +Extract data from JSON columns (like runs.output, runs.error, metrics.attributes, etc.): + +```sql +SELECT + run_id, + output.message AS output_message, + output.count AS count, + output.error != NULL AS has_error +FROM runs +WHERE output IS NOT NULL +``` + +## Query scopes + +Control what data your query can access: + +- **`environment`** (default) - Query runs in the current environment only +- **`project`** - Query runs across all environments in the project +- **`organization`** - Query runs across all projects in the organization + +```typescript +// Query across all environments in a project +const result = await query.execute("SELECT environment, count() FROM runs GROUP BY environment", { + scope: "project", +}); +``` + +## Time ranges + +We recommend avoiding adding `triggered_at` in the actual TRQL query. The dashboard, API, and SDK have a time filter that is applied automatically and is easier to work with. It means the queries can be executed with multiple periods easily. + +### Using period shorthand + +```typescript +await query.execute("SELECT count() FROM runs", { + period: "4d", // Last 4 days +}); + +// Supported periods: "1h", "6h", "12h", "1d", "7d", "30d", "90d", etc. +``` + +### Using explicit dates + +```typescript +await query.execute("SELECT count() FROM runs", { + from: new Date("2024-01-01"), + to: new Date("2024-01-31"), +}); + +// Or use Unix timestamps +await query.execute("SELECT count() FROM runs", { + from: Date.now() - 7 * 24 * 60 * 60 * 1000, // 7 days ago + to: Date.now(), +}); +``` + +## Example queries + +### Failed runs (in the last 24 hours) + +```sql +SELECT + task_identifier, + run_id, + error, + triggered_at +FROM runs +WHERE status = 'Failed' +ORDER BY triggered_at DESC +``` + +With the time filter set to 24h. + +### Task success rate by day + +```sql +SELECT + toDate(triggered_at) AS day, + task_identifier, + countIf(status = 'Completed') AS completed, + countIf(status = 'Failed') AS failed, + round(completed / (completed + failed) * 100, 2) AS success_rate_pct +FROM runs +WHERE status IN ('Completed', 'Failed') +GROUP BY day, task_identifier +ORDER BY day DESC, task_identifier +``` + +### Top 10 most expensive runs + +```sql +SELECT + run_id, + task_identifier, + compute_cost, + usage_duration, + triggered_at +FROM runs +WHERE compute_cost > 0 +ORDER BY compute_cost DESC +LIMIT 10 +``` + +### Average compute duration over time + +```sql +SELECT + timeBucket() AS time, + task_identifier, + avg(usage_duration) AS avg_duration_ms, + count() AS run_count +FROM runs +WHERE usage_duration IS NOT NULL +GROUP BY time, task_identifier +ORDER BY time ASC +``` + +### Runs by queue and machine + +```sql +SELECT + queue, + machine, + count() AS run_count, + countIf(status = 'Completed') AS completed, + countIf(status = 'Failed') AS failed +FROM runs +GROUP BY queue, machine +ORDER BY queue, machine +``` + +### CPU utilization over time + +Track process CPU utilization bucketed over time. + +```sql +SELECT + timeBucket(), + avg(value) AS avg_cpu +FROM metrics +WHERE metric_name = 'process.cpu.utilization' +GROUP BY timeBucket +ORDER BY timeBucket +LIMIT 1000 +``` + +### Memory usage by task (past 7d) + +Average memory usage per task identifier over the last 7 days. + +```sql +SELECT + task_identifier, + avg(value) AS avg_memory +FROM metrics +WHERE metric_name = 'system.memory.usage' +GROUP BY task_identifier +ORDER BY avg_memory DESC +LIMIT 20 +``` + +### Available metric names + +List all distinct metric names collected in your environment. + +```sql +SELECT + metric_name, + count() AS sample_count +FROM metrics +GROUP BY metric_name +ORDER BY sample_count DESC +LIMIT 100 +``` + +## Best practices + +1. **Use the built-in time filtering** - The dashboard, API, and SDK have a time filter that is applied automatically and is easier to work with. It means the queries can be executed with multiple periods easily. +2. **Use LIMIT** - Add a `LIMIT` clause to reduce the rows returned if you don't need everything. +3. **Use appropriate aggregations** - For large datasets, use `uniq()` instead of `uniqExact()` for approximate but faster counts + +## Limits + +We have several limits to prevent abuse and ensure performance: + +- **Concurrency limit**: We limit the number of concurrent queries per organization. +- **Row limit**: We limit the number of rows returned to 10k. +- **Time restrictions**: We limit the time period you can query. +- **Time/Memory limit**: We limit the memory a query can use and the time it can run for. As well as other limits like AST complexity. + +See [Limits](/limits) for current quota details. diff --git a/docs/limits.mdx b/docs/limits.mdx index 45da4e89aba..d507d1e0c91 100644 --- a/docs/limits.mdx +++ b/docs/limits.mdx @@ -57,8 +57,8 @@ If you're creating schedules for your user you will definitely need to request m ## Projects -| Pricing tier | Limit | -| :----------- | :----------------- | +| Pricing tier | Limit | +| :----------- | :------------------ | | All tiers | 10 per organization | Each project receives its own concurrency allocation. If you need to support multiple tenants with the same codebase but different environment variables, see the [Multi-tenant applications](/deploy-environment-variables#multi-tenant-applications) section for a recommended workaround. @@ -112,7 +112,9 @@ Batch triggering uses a token bucket algorithm to rate limit the number of runs **How it works**: You can burst up to your bucket size, then tokens refill at the specified rate. For example, a Free user can trigger 1,200 runs immediately, then must wait for tokens to refill (100 runs become available every 10 seconds). - When you hit batch rate limits, the SDK throws a `BatchTriggerError` with `isRateLimited: true`. See [Handling batch trigger errors](/triggering#handling-batch-trigger-errors) for how to detect and react to rate limits in your code. + When you hit batch rate limits, the SDK throws a `BatchTriggerError` with `isRateLimited: true`. + See [Handling batch trigger errors](/triggering#handling-batch-trigger-errors) for how to detect + and react to rate limits in your code. ## Batch processing concurrency @@ -186,6 +188,36 @@ An alert destination is a single email address, Slack channel, or webhook URL th If you're on the Pro plan and need more than the plan limit, you can request more by contacting us via [email](https://trigger.dev/contact) or [Discord](https://trigger.dev/discord). +## Query + +Query execution is subject to the following limits: + +| Limit | Details | +| :----------------- | :-------------------- | +| Max execution time | 10 seconds per query | +| Max result rows | 10,000 rows per query | +| Concurrent queries | 3 per project | + +### Query lookback period + +The maximum time range a query can look back is based on your plan: + +| Pricing tier | Limit | +| :----------- | :------ | +| Free | 1 day | +| Hobby | 7 days | +| Pro | 30 days | + +If your query's time range exceeds your plan's lookback limit, it will be automatically clipped to the maximum allowed period. + +## Metric widget concurrency + +The number of metric widgets that can be queried concurrently per project. + +| Limit | Details | +| :------------------------ | :------------- | +| Concurrent widget queries | 30 per project | + ## Machines The default machine is `small-1x` which has 0.5 vCPU and 0.5 GB of RAM. You can optionally configure a higher spec machine which will increase the cost of running the task but can also improve the performance of the task if it is CPU or memory bound. diff --git a/docs/management/query/execute.mdx b/docs/management/query/execute.mdx new file mode 100644 index 00000000000..ce0149e2d22 --- /dev/null +++ b/docs/management/query/execute.mdx @@ -0,0 +1,11 @@ +--- +title: "Execute a query" +openapi: "v3-openapi POST /api/v1/query" +--- + +See the [Query documentation](/insights/query#example-queries) for comprehensive examples including: + +- Failed runs analysis +- Task success rates over time +- Cost tracking and optimization +- Performance metrics and percentiles diff --git a/docs/v3-openapi.yaml b/docs/v3-openapi.yaml index 2fdcd0afd9d..5be8dbef3ce 100644 --- a/docs/v3-openapi.yaml +++ b/docs/v3-openapi.yaml @@ -530,7 +530,17 @@ paths: description: The deployment ID status: type: string - enum: ["PENDING", "INSTALLING", "BUILDING", "DEPLOYING", "DEPLOYED", "FAILED", "CANCELED", "TIMED_OUT"] + enum: + [ + "PENDING", + "INSTALLING", + "BUILDING", + "DEPLOYING", + "DEPLOYED", + "FAILED", + "CANCELED", + "TIMED_OUT", + ] description: The current status of the deployment contentHash: type: string @@ -622,7 +632,17 @@ paths: description: The deployment ID status: type: string - enum: ["PENDING", "INSTALLING", "BUILDING", "DEPLOYING", "DEPLOYED", "FAILED", "CANCELED", "TIMED_OUT"] + enum: + [ + "PENDING", + "INSTALLING", + "BUILDING", + "DEPLOYING", + "DEPLOYED", + "FAILED", + "CANCELED", + "TIMED_OUT", + ] description: The current status of the deployment contentHash: type: string @@ -733,6 +753,122 @@ paths: -H "Authorization: Bearer tr_dev_1234" \ -H "Content-Type: application/json" + "/api/v1/query": + post: + operationId: execute_query_v1 + summary: Execute a TRQL query + description: Execute a TRQL (Trigger.dev Query Language) query against your run data. TRQL is a SQL-style query language that allows you to analyze runs, calculate metrics, and export data. + requestBody: + required: true + content: + application/json: + schema: + "$ref": "#/components/schemas/ExecuteQueryRequestBody" + responses: + "200": + description: Query executed successfully + content: + application/json: + schema: + "$ref": "#/components/schemas/ExecuteQueryResponse" + "400": + description: Invalid query or request parameters + content: + application/json: + schema: + type: object + properties: + error: + type: string + description: Error message describing the query error + "401": + description: Unauthorized - API key is missing or invalid + "500": + description: Internal server error during query execution + tags: + - query + security: + - secretKey: [] + x-codeSamples: + - lang: typescript + label: SDK - Basic query + source: |- + import { query } from "@trigger.dev/sdk"; + + // Basic query with defaults (environment scope, json format) + const result = await query.execute( + "SELECT run_id, status FROM runs LIMIT 10" + ); + console.log(result.results); + - lang: typescript + label: SDK - Type-safe query + source: |- + import { query, type QueryTable } from "@trigger.dev/sdk"; + + // Type-safe query using QueryTable + const result = await query.execute< + QueryTable<"runs", "run_id" | "status" | "triggered_at"> + >( + "SELECT run_id, status, triggered_at FROM runs LIMIT 10" + ); + + result.results.forEach(row => { + console.log(row.run_id, row.status); // Fully typed! + }); + - lang: typescript + label: SDK - With options + source: |- + import { query } from "@trigger.dev/sdk"; + + const result = await query.execute( + "SELECT COUNT(*) as count FROM runs WHERE status = 'Failed'", + { + scope: "project", // Query across all environments + period: "7d", // Last 7 days + format: "json" + } + ); + - lang: typescript + label: SDK - CSV export + source: |- + import { query } from "@trigger.dev/sdk"; + + const csvResult = await query.execute( + "SELECT run_id, status, triggered_at FROM runs", + { + format: "csv", + period: "30d" + } + ); + + // csvResult.results is a CSV string + const lines = csvResult.results.split('\n'); + - lang: curl + label: cURL - Basic query + source: |- + curl -X POST "https://api.trigger.dev/api/v1/query" \ + -H "Authorization: Bearer tr_dev_1234" \ + -H "Content-Type: application/json" \ + -d '{ + "query": "SELECT run_id, status FROM runs LIMIT 10", + "scope": "environment", + "period": "7d", + "format": "json" + }' + - lang: curl + label: cURL - Aggregation query + source: |- + curl -X POST "https://api.trigger.dev/api/v1/query" \ + -H "Authorization: Bearer tr_dev_1234" \ + -H "Content-Type: application/json" \ + -d '{ + "query": "SELECT task_identifier, count() as runs, countIf(status = '\''Failed'\'') as failures FROM runs GROUP BY task_identifier", + "scope": "environment", + "from": "2024-01-01T00:00:00Z", + "to": "2024-01-31T23:59:59Z", + "format": "json" + }' + "/api/v1/runs/{runId}/reschedule": parameters: - $ref: "#/components/parameters/runId" @@ -2945,3 +3081,61 @@ components: stackTrace: type: string example: "Error: Something went wrong" + ExecuteQueryRequestBody: + type: object + required: + - query + properties: + query: + type: string + description: The TRQL query to execute + example: "SELECT run_id, status, triggered_at FROM runs WHERE status = 'Failed' LIMIT 10" + scope: + type: string + enum: ["environment", "project", "organization"] + default: "environment" + description: The scope of data to query - environment (default), project, or organization + period: + type: string + nullable: true + description: Time period shorthand (e.g., "7d", "30d", "1h"). Cannot be used with from/to. + example: "7d" + from: + type: string + format: date-time + nullable: true + description: Start of time range as ISO 8601 timestamp. Must be used with 'to'. + example: "2024-01-01T00:00:00Z" + to: + type: string + format: date-time + nullable: true + description: End of time range as ISO 8601 timestamp. Must be used with 'from'. + example: "2024-01-31T23:59:59Z" + format: + type: string + enum: ["json", "csv"] + default: "json" + description: Response format - "json" returns structured data (default), "csv" returns CSV string + ExecuteQueryResponse: + oneOf: + - type: object + description: JSON format response + properties: + format: + type: string + enum: ["json"] + results: + type: array + items: + type: object + description: Array of result rows + - type: object + description: CSV format response + properties: + format: + type: string + enum: ["csv"] + results: + type: string + description: CSV-formatted results