From dd149b79d30114af92fd10e368d0b6e816e5aa52 Mon Sep 17 00:00:00 2001 From: Cody De Arkland Date: Fri, 8 Aug 2025 10:57:20 -0700 Subject: [PATCH 01/15] correcting region storage for symbol and source map metadata --- docs/organization/data-storage-location/index.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/organization/data-storage-location/index.mdx b/docs/organization/data-storage-location/index.mdx index dde6459d69bcf..738b6a2ddf83c 100644 --- a/docs/organization/data-storage-location/index.mdx +++ b/docs/organization/data-storage-location/index.mdx @@ -30,6 +30,7 @@ Here’s a list of the types of data that will be stored in whichever data stora - Profiles - Release health - Releases, debug symbols, and source maps +- Debug symbol metadata and source map metadata - Session replays - Backups for these resources @@ -48,7 +49,6 @@ Here’s a list of the types of data that may be stored in the US. - Project metadata - DSN keys - Detailed usage data -- Debug symbol metadata and source map metadata - Sentry applications - SSO, SAML, and SCIM metadata From 992cf9887b625d0478dcb6ffa5d936e08e81b9b2 Mon Sep 17 00:00:00 2001 From: Cody De Arkland Date: Sat, 9 Aug 2025 02:57:55 -0700 Subject: [PATCH 02/15] Rewrite JS span-metrics examples to three focused scenarios (Checkout, Media Upload, Search Autocomplete) using existing Challenge/Solution/Frontend/Backend/How-it-works structure. Emphasizes where to instrument in React vs backend and low-cardinality attributes for span metrics. --- .../common/tracing/span-metrics/examples.mdx | 461 +++++++++--------- 1 file changed, 231 insertions(+), 230 deletions(-) diff --git a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx index c992a3243e24b..02ba037a3bf1a 100644 --- a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx +++ b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx @@ -12,299 +12,300 @@ These examples assume you have already set up traci This guide provides practical examples of using span attributes and metrics to solve common monitoring and debugging challenges across your entire application stack. Each example demonstrates how to instrument both frontend and backend components, showing how they work together within a distributed trace to provide end-to-end visibility. -## File Upload and Processing Pipeline +## E-Commerce Checkout Flow (React + Backend) -**Challenge:** Understanding bottlenecks and failures in multi-step file processing operations across client and server components. +**Challenge:** Capture end-to-end checkout health and diagnose where time is spent or failures occur across UI, API, payments, inventory, and fulfillment. -**Solution:** Track the entire file processing pipeline with detailed metrics at each stage, from client-side upload preparation through server-side processing. +**Solution:** Start a client span on the checkout action and continue the trace on the backend with spans for each business step. Add low-cardinality business attributes (no PII) to power span metrics. -**Frontend Instrumentation:** +**Frontend (React) — instrument the Checkout click handler:** ```javascript -// Client-side file upload handling +// In your Checkout button click handler Sentry.startSpan( { - name: "Client File Upload", - op: "file.upload.client", + name: "Checkout", + op: "ui.action", attributes: { - // Static details available at the start - "file.size_bytes": 15728640, // 15MB - "file.type": "image/jpeg", - "file.name": "user-profile.jpg", - "client.compression_applied": true, + // cart.* values known at click time; prefer minor units + currency + "cart.item_count": cart.items.length, + "cart.value_minor": cart.totalMinor, // e.g., cents + "cart.currency": cart.currency || "USD", }, }, async (span) => { try { - // Begin upload process - const uploader = new FileUploader(file); - - // Update progress as upload proceeds - uploader.on("progress", (progressEvent) => { - span.setAttribute("upload.percent_complete", progressEvent.percent); - span.setAttribute("upload.bytes_transferred", progressEvent.loaded); - }); - - uploader.on("retry", (retryCount) => { - span.setAttribute("upload.retry_count", retryCount); + const res = await fetch("/api/checkout", { + method: "POST", + headers: { "content-type": "application/json" }, + body: JSON.stringify({ + // Avoid PII; internal IDs only + items: cart.items.map((i) => ({ sku: i.sku, qty: i.qty })), + coupon: cart.coupon || null, + }), }); - const result = await uploader.start(); - - // Set final attributes after completion - span.setAttribute("upload.total_time_ms", result.totalTime); - span.setAttribute("upload.success", true); - span.setAttribute("upload.server_file_id", result.fileId); + if (!res.ok) throw new Error("Checkout failed"); + const data = await res.json(); - return result; + // Record server-assigned order id after success + span.setAttribute("order.id", data.orderId); + span.setAttribute("payment.provider", data.paymentProvider); } catch (error) { - // Record failure information - span.setAttribute("upload.success", false); + span.setStatus?.("error"); Sentry.captureException(error); + } finally { + span.finish?.(); } } ); ``` -**Backend Instrumentation:** +Where to put this in your app: +- In the `onClick` for the checkout CTA, or inside the submit handler of your checkout form/container component. +- Auto-instrumentation will add client `fetch` spans; keep the explicit UI span for business context. -```javascript -// Server-side processing -Sentry.startSpan( - { - name: "Server File Processing", - op: "file.process.server", - attributes: { - // Server processing steps - "processing.steps_completed": [ - "virus_scan", - "resize", - "compress", - "metadata", - ], - - // Storage operations - "storage.provider": "s3", - "storage.region": "us-west-2", - "storage.upload_time_ms": 850, - - // CDN configuration - "cdn.provider": "cloudfront", - "cdn.propagation_ms": 1500, - }, - }, - async () => { - // Server-side processing implementation - } -); -``` - -**How the Trace Works Together:** -The frontend span initiates the trace and handles the file upload process. It propagates the trace context to the backend through the upload request headers. The backend span continues the trace, processing the file and storing it. This creates a complete picture of the file's journey from client to CDN, allowing you to: +**Backend — instrument each business step:** -- Identify bottlenecks at any stage (client prep, upload, server processing, CDN propagation) -- Track end-to-end processing times and success rates -- Monitor resource usage across the stack -- Correlate client-side upload issues with server-side processing errors - -## LLM Integration Monitoring - -**Challenge:** Managing cost (token usage) and performance of LLM integrations across frontend and backend components. - -**Solution:** Tracking of the entire LLM interaction flow, from user input to response rendering. +```javascript +// Example: Node/Express +app.post("/api/checkout", async (req, res) => { + await Sentry.startSpan({ name: "Process order", op: "business.logic" }, async (orderSpan) => { + try { + await Sentry.startSpan({ name: "Validate cart", op: "validate" }, async () => { + // validate item availability, pricing, coupon + }); -**Frontend Instrumentation:** + const payment = await Sentry.startSpan({ name: "Authorize payment", op: "payment" }, async (span) => { + span.setAttribute("payment.provider", "stripe"); + // call payment provider + return { id: "pi_123", status: "authorized" }; + }); + orderSpan.setAttribute("payment.status", payment.status); -```javascript -// Client-side LLM interaction handling -Sentry.startSpan( - { - name: "LLM Client Interaction", - op: "gen_ai.generate_text", - attributes: { - // Initial metrics available at request time - "input.char_count": 280, - "input.language": "en", - "input.type": "question", - }, - }, - async (span) => { - const startTime = performance.now(); + await Sentry.startSpan({ name: "Reserve inventory", op: "inventory" }, async () => { + // reserve stock in DB or external OMS + }); - // Begin streaming response from LLM API - const stream = await llmClient.createCompletion({ - prompt: userInput, - stream: true, - }); + const orderId = await Sentry.startSpan({ name: "Create order", op: "db.write" }, async () => { + // insert order; return internal id + return "ord_abc123"; + }); + orderSpan.setAttribute("order.id", orderId); - // Record time to first token when received - let firstTokenReceived = false; - let tokensReceived = 0; + await Sentry.startSpan({ name: "Send confirmation", op: "email" }, async () => { + // enqueue email or call provider + }); - for await (const chunk of stream) { - tokensReceived++; + res.json({ orderId, paymentProvider: "stripe" }); + } catch (e) { + orderSpan.setStatus?.("error"); + res.status(500).json({ error: "Checkout failed" }); + } + }); +}); +``` - // Record time to first token - if (!firstTokenReceived && chunk.content) { - firstTokenReceived = true; - const timeToFirstToken = performance.now() - startTime; +**How the trace works together:** +- UI span starts on click → fetch carries trace headers → backend continues the trace. +- Child spans highlight where time is spent (validation, payment, inventory, DB, email). +- Span metrics let you track latency percentiles and failure rates by attributes like `payment.provider`, `cart.item_count`. - span.setAttribute("ui.time_to_first_token_ms", timeToFirstToken); - } +What to monitor with span metrics: +- p95 duration of `op:ui.action` Checkout by `cart.item_count` bucket. +- Error rate for `op:payment` by `payment.provider`. +- Self time of `op:db.*` spans by table to spot slow queries. - // Process and render the chunk - renderChunkToUI(chunk); - } +## Media Upload with Background Processing (React + Worker) - // Record final metrics after stream completes - const totalRequestTime = performance.now() - startTime; +**Challenge:** Understand user-perceived upload time vs. server-side processing (scan, transcode, thumbnail) and link the async worker back to the initiating request. - span.setAttribute("ui.total_request_time_ms", totalRequestTime); - span.setAttribute("stream.rendering_mode", "markdown"); - span.setAttribute("stream.tokens_received", tokensReceived); - } -); -``` +**Solution:** Start a client span when the upload begins; on the backend, create spans for signed-URL issuance, enqueue a job, and instrument worker phases. Propagate trace context via job metadata to stitch the traces. -**Backend Instrumentation:** +**Frontend (React) — instrument upload begin and progress:** ```javascript -// Server-side LLM processing Sentry.startSpan( { - name: "LLM API Processing", - op: "gen_ai.generate_text", + name: "Upload media", + op: "file.upload", attributes: { - // Model configuration - known at start - "llm.model": "claude-3-5-sonnet-20241022", - "llm.temperature": 0.5, - "llm.max_tokens": 4096, + "file.size_bytes": file.size, + "file.mime_type": file.type, + "upload.chunked": true, }, }, async (span) => { - const startTime = Date.now(); - try { - // Check rate limits before processing - const rateLimits = await getRateLimits(); - span.setAttribute("llm.rate_limit_remaining", rateLimits.remaining); - - // Make the actual API call to the LLM provider - const response = await llmProvider.generateCompletion({ - model: "claude-3-5-sonnet-20241022", - prompt: preparedPrompt, - temperature: 0.5, - max_tokens: 4096, + const urlRes = await fetch("/api/uploads/signed-url", { method: "POST" }); + const { uploadUrl, objectKey } = await urlRes.json(); + + // Use XHR for progress; fetch is illustrative here + await uploadWithProgress(uploadUrl, file, (progress) => { + span.setAttribute("upload.bytes_transferred", progress.bytes); + span.setAttribute("upload.percent_complete", progress.percent); }); - // Record token usage and performance metrics - span.setAttribute("llm.prompt_tokens", response.usage.prompt_tokens); - span.setAttribute( - "llm.completion_tokens", - response.usage.completion_tokens - ); - span.setAttribute("llm.total_tokens", response.usage.total_tokens); - span.setAttribute("llm.api_latency_ms", Date.now() - startTime); - - // Calculate and record cost based on token usage - const cost = calculateCost( - response.usage.prompt_tokens, - response.usage.completion_tokens, - "claude-3-5-sonnet-20241022" - ); - span.setAttribute("llm.cost_usd", cost); + await fetch("/api/uploads/start-processing", { + method: "POST", + headers: { "content-type": "application/json" }, + body: JSON.stringify({ key: objectKey }), + }); - return response; - } catch (error) { - // Record error details - span.setAttribute("error", true); - Sentry.captureException(error); + span.setAttribute("upload.success", true); + } catch (e) { + span.setStatus?.("error"); + Sentry.captureException(e); + } finally { + span.finish?.(); } } ); ``` -**How the Trace Works Together:** -The frontend span captures the user interaction and UI rendering performance, while the backend span tracks the actual LLM API interaction. The distributed trace shows the complete flow from user input to rendered response, enabling you to: +Place this where the user triggers an upload (dropzone onDrop, file input onChange, or explicit Upload button). -- Analyze end-to-end response times and user experience -- Track costs and token usage patterns -- Optimize streaming performance and UI rendering -- Monitor rate limits and queue times -- Correlate user inputs with model performance +**Backend — signed URL, enqueue job, and worker phases:** -## E-Commerce Transaction Flow +```javascript +// Issue signed URL +app.post("/api/uploads/signed-url", async (req, res) => { + await Sentry.startSpan({ name: "Get signed URL", op: "storage.sign" }, async (span) => { + span.setAttribute("storage.provider", "s3"); + span.setAttribute("storage.bucket", "media"); + // generate URL + res.json({ uploadUrl: "https://s3...", objectKey: "uploads/abc.jpg" }); + }); +}); + +// Enqueue processing job and propagate context +app.post("/api/uploads/start-processing", async (req, res) => { + await Sentry.startSpan({ name: "Enqueue media job", op: "queue.enqueue" }, async (span) => { + const trace = Sentry.getCurrentScope()?.getPropagationContext?.(); + queue.publish("media.process", { key: req.body.key, trace }); + span.setAttribute("queue.name", "media.process"); + res.json({ ok: true }); + }); +}); + +// Worker +worker.on("message", async (msg) => { + const parentContext = msg.trace; // restore trace/parent if available + await Sentry.startSpan({ name: "Process media", op: "worker.job", parentContext }, async (jobSpan) => { + await Sentry.startSpan({ name: "Virus scan", op: "security.scan" }, async (span) => { + span.setAttribute("scan.engine", "clamav"); + }); + await Sentry.startSpan({ name: "Transcode", op: "media.transcode" }, async (span) => { + span.setAttribute("transcode.preset", "720p"); + }); + await Sentry.startSpan({ name: "Thumbnail", op: "media.thumbnail" }, async () => {}); + }); +}); +``` + +**How the trace works together:** +- Client span covers upload + API calls; backend spans cover signing and enqueue; worker spans show processing phases. +- Linking the worker trace to the enqueue span maintains end-to-end visibility across async boundaries. + +What to monitor with span metrics: +- p90 transcode time by `transcode.preset`. +- Failure counts for `op:security.scan` by `scan.engine`. +- Time-to-ready (sum of worker phases) by `file.size_bucket`. -**Challenge:** Understanding the complete purchase flow and identifying revenue-impacting issues across the entire stack. +## Search Autocomplete (debounced, cancellable, cached) -**Solution:** Track the full checkout process from cart interaction to order fulfillment. +**Challenge:** Users type quickly; you need to debounce requests, cancel in-flight calls, and rely on cache where possible while keeping latency low. -**Frontend Instrumentation:** +**Solution:** Start a client span for each debounced request; mark aborted requests; on the server, instrument cache lookup, search engine query, and ranking. + +**Frontend (React) — instrument debounced search:** ```javascript -// Client-side checkout process -Sentry.startSpan( - { - name: "Checkout UI Flow", - op: "commerce.checkout.client", - attributes: { - // Cart interaction metrics - "cart.items_added": 3, - "cart.items_removed": 0, - "cart.update_count": 2, - - // User interaction tracking - "ui.form_completion_time_ms": 45000, - "ui.payment_method_changes": 1, - "ui.address_validation_retries": 0, - }, - }, - async () => { - // Client-side checkout implementation - } -); +let abortController; + +async function runSearch(query, debounceMs = 150) { + clearTimeout(runSearch._t); + return new Promise((resolve) => { + runSearch._t = setTimeout(() => { + if (abortController) abortController.abort(); + abortController = new AbortController(); + + Sentry.startSpan( + { + name: "Search autocomplete", + op: "http.client", + attributes: { + "query.length": (query || "").length, + "ui.debounce_ms": debounceMs, + }, + }, + async (span) => { + try { + const res = await fetch(`/api/search?q=${encodeURIComponent(query)}`, + { signal: abortController.signal } + ); + const data = await res.json(); + span.setAttribute("results.count", data.results.length); + resolve(data); + } catch (e) { + if (e.name === "AbortError") { + span.setAttribute("ui.aborted", true); + span.setStatus?.("cancelled"); + resolve({ results: [] }); + } else { + span.setStatus?.("error"); + Sentry.captureException(e); + resolve({ results: [] }); + } + } finally { + span.finish?.(); + } + } + ); + }, debounceMs); + }); +} ``` -**Backend Instrumentation:** +Place this in your search input hook/component after applying a debounce. + +**Backend — cache, search engine, rank:** ```javascript -// Server-side order processing -Sentry.startSpan( - { - name: "Order Processing", - op: "commerce.order.server", - attributes: { - // Order details - "order.id": "ord_123456789", - "order.total_amount": 159.99, - "order.currency": "USD", - "order.items": ["SKU123", "SKU456", "SKU789"], - - // Payment processing - "payment.provider": "stripe", - "payment.method": "credit_card", - "payment.processing_time_ms": 1200, - - // Inventory checks - "inventory.all_available": true, - - // Fulfillment - "fulfillment.warehouse": "WEST-01", - "fulfillment.shipping_method": "express", - "fulfillment.estimated_delivery": "2024-03-20", - }, - }, - async () => { - // Server-side order processing - } -); +app.get("/api/search", async (req, res) => { + const q = String(req.query.q || ""); + + await Sentry.startSpan({ name: "Search", op: "search" }, async (root) => { + const cacheHit = await Sentry.startSpan({ name: "Cache lookup", op: "cache" }, async (span) => { + const hit = await cache.get(q); + span.setAttribute("cache.hit", Boolean(hit)); + if (hit) res.json(hit); + return Boolean(hit); + }); + if (cacheHit) return; + + const results = await Sentry.startSpan({ name: "Query engine", op: "external.search" }, async (span) => { + span.setAttribute("search.engine", "elasticsearch"); + span.setAttribute("search.index", "products_v2"); + span.setAttribute("search.mode", q.length < 3 ? "prefix" : "fuzzy"); + return searchEngine.query(q); + }); + + await Sentry.startSpan({ name: "Rank results", op: "compute.rank" }, async (span) => { + span.setAttribute("rank.model", "bm25"); + span.setAttribute("rank.version", "2.1"); + }); + + res.json({ results }); + }); +}); ``` -**How the Trace Works Together:** -The frontend span tracks the user's checkout experience, while the backend span handles order processing and fulfillment. The distributed trace provides visibility into the entire purchase flow, allowing you to: +**How the trace works together:** +- Each debounced request becomes a client span; aborted ones are marked and short. +- Server spans show cache effectiveness, engine latency, and ranking time. -- Analyze checkout funnel performance and drop-off points -- Track payment processing success rates and timing -- Monitor inventory availability impact on conversions -- Measure end-to-end order completion times -- Identify friction points in the user experience +What to monitor with span metrics: +- p95 latency of `op:http.client` by `query.length` bucket. +- Cancellation rate via `ui.aborted=true`. +- Cache hit rate for `op:cache`. From 7b1ffa06981192c83b004369276c183a83de07ef Mon Sep 17 00:00:00 2001 From: Cody De Arkland Date: Sat, 9 Aug 2025 03:02:10 -0700 Subject: [PATCH 03/15] Simplify examples to single-span patterns (no nested spans). Emphasize auto-instrumentation; keep attributes minimal and business-focused for easier customization. --- .../common/tracing/span-metrics/examples.mdx | 113 +++++++----------- 1 file changed, 44 insertions(+), 69 deletions(-) diff --git a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx index 02ba037a3bf1a..06ad95e8a0cfb 100644 --- a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx +++ b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx @@ -65,41 +65,28 @@ Where to put this in your app: - In the `onClick` for the checkout CTA, or inside the submit handler of your checkout form/container component. - Auto-instrumentation will add client `fetch` spans; keep the explicit UI span for business context. -**Backend — instrument each business step:** +**Backend — single span per request; rely on auto-instrumentation:** ```javascript // Example: Node/Express app.post("/api/checkout", async (req, res) => { - await Sentry.startSpan({ name: "Process order", op: "business.logic" }, async (orderSpan) => { + await Sentry.startSpan({ name: "Checkout (server)", op: "http.server" }, async (span) => { try { - await Sentry.startSpan({ name: "Validate cart", op: "validate" }, async () => { - // validate item availability, pricing, coupon - }); - - const payment = await Sentry.startSpan({ name: "Authorize payment", op: "payment" }, async (span) => { - span.setAttribute("payment.provider", "stripe"); - // call payment provider - return { id: "pi_123", status: "authorized" }; - }); - orderSpan.setAttribute("payment.status", payment.status); - - await Sentry.startSpan({ name: "Reserve inventory", op: "inventory" }, async () => { - // reserve stock in DB or external OMS - }); - - const orderId = await Sentry.startSpan({ name: "Create order", op: "db.write" }, async () => { - // insert order; return internal id - return "ord_abc123"; - }); - orderSpan.setAttribute("order.id", orderId); - - await Sentry.startSpan({ name: "Send confirmation", op: "email" }, async () => { - // enqueue email or call provider + // validate, authorize payment, reserve inventory, create order, send email + const orderId = await createOrder(req.body); + + // Keep attributes low-cardinality and business-focused + span.setAttributes({ + "order.id": orderId, + "payment.provider": "stripe", + "payment.status": "authorized", + "inventory.reserved": true, + "email.enqueued": true, }); res.json({ orderId, paymentProvider: "stripe" }); } catch (e) { - orderSpan.setStatus?.("error"); + span.setStatus?.("error"); res.status(500).json({ error: "Checkout failed" }); } }); @@ -108,7 +95,7 @@ app.post("/api/checkout", async (req, res) => { **How the trace works together:** - UI span starts on click → fetch carries trace headers → backend continues the trace. -- Child spans highlight where time is spent (validation, payment, inventory, DB, email). +- Rely on your SDK's auto-instrumentation for HTTP/DB/external calls; keep this example span simple. - Span metrics let you track latency percentiles and failure rates by attributes like `payment.provider`, `cart.item_count`. What to monitor with span metrics: @@ -122,7 +109,7 @@ What to monitor with span metrics: **Solution:** Start a client span when the upload begins; on the backend, create spans for signed-URL issuance, enqueue a job, and instrument worker phases. Propagate trace context via job metadata to stitch the traces. -**Frontend (React) — instrument upload begin and progress:** +**Frontend (React) — instrument upload begin and completion:** ```javascript Sentry.startSpan( @@ -132,27 +119,28 @@ Sentry.startSpan( attributes: { "file.size_bytes": file.size, "file.mime_type": file.type, - "upload.chunked": true, }, }, async (span) => { + const t0 = performance.now(); try { const urlRes = await fetch("/api/uploads/signed-url", { method: "POST" }); const { uploadUrl, objectKey } = await urlRes.json(); - // Use XHR for progress; fetch is illustrative here - await uploadWithProgress(uploadUrl, file, (progress) => { - span.setAttribute("upload.bytes_transferred", progress.bytes); - span.setAttribute("upload.percent_complete", progress.percent); - }); + // Upload file to storage (signed URL) + await fetch(uploadUrl, { method: "PUT", body: file }); + // Tell backend to start async processing await fetch("/api/uploads/start-processing", { method: "POST", headers: { "content-type": "application/json" }, body: JSON.stringify({ key: objectKey }), }); - span.setAttribute("upload.success", true); + span.setAttributes({ + "upload.success": true, + "upload.duration_ms": Math.round(performance.now() - t0), + }); } catch (e) { span.setStatus?.("error"); Sentry.captureException(e); @@ -165,15 +153,13 @@ Sentry.startSpan( Place this where the user triggers an upload (dropzone onDrop, file input onChange, or explicit Upload button). -**Backend — signed URL, enqueue job, and worker phases:** +**Backend — single span per handler; single span in worker:** ```javascript // Issue signed URL app.post("/api/uploads/signed-url", async (req, res) => { - await Sentry.startSpan({ name: "Get signed URL", op: "storage.sign" }, async (span) => { - span.setAttribute("storage.provider", "s3"); - span.setAttribute("storage.bucket", "media"); - // generate URL + await Sentry.startSpan({ name: "Signed URL", op: "storage.sign" }, async (span) => { + span.setAttributes({ "storage.provider": "s3", "storage.bucket": "media" }); res.json({ uploadUrl: "https://s3...", objectKey: "uploads/abc.jpg" }); }); }); @@ -191,14 +177,13 @@ app.post("/api/uploads/start-processing", async (req, res) => { // Worker worker.on("message", async (msg) => { const parentContext = msg.trace; // restore trace/parent if available - await Sentry.startSpan({ name: "Process media", op: "worker.job", parentContext }, async (jobSpan) => { - await Sentry.startSpan({ name: "Virus scan", op: "security.scan" }, async (span) => { - span.setAttribute("scan.engine", "clamav"); + await Sentry.startSpan({ name: "Process media", op: "worker.job", parentContext }, async (span) => { + // Do work (scan, transcode, thumbnail) — rely on auto-instrumentation for sub-operations + span.setAttributes({ + "scan.engine": "clamav", + "transcode.preset": "720p", + "thumbnail.created": true, }); - await Sentry.startSpan({ name: "Transcode", op: "media.transcode" }, async (span) => { - span.setAttribute("transcode.preset", "720p"); - }); - await Sentry.startSpan({ name: "Thumbnail", op: "media.thumbnail" }, async () => {}); }); }); ``` @@ -269,31 +254,21 @@ async function runSearch(query, debounceMs = 150) { Place this in your search input hook/component after applying a debounce. -**Backend — cache, search engine, rank:** +**Backend — single span with useful attributes:** ```javascript app.get("/api/search", async (req, res) => { const q = String(req.query.q || ""); - - await Sentry.startSpan({ name: "Search", op: "search" }, async (root) => { - const cacheHit = await Sentry.startSpan({ name: "Cache lookup", op: "cache" }, async (span) => { - const hit = await cache.get(q); - span.setAttribute("cache.hit", Boolean(hit)); - if (hit) res.json(hit); - return Boolean(hit); - }); - if (cacheHit) return; - - const results = await Sentry.startSpan({ name: "Query engine", op: "external.search" }, async (span) => { - span.setAttribute("search.engine", "elasticsearch"); - span.setAttribute("search.index", "products_v2"); - span.setAttribute("search.mode", q.length < 3 ? "prefix" : "fuzzy"); - return searchEngine.query(q); - }); - - await Sentry.startSpan({ name: "Rank results", op: "compute.rank" }, async (span) => { - span.setAttribute("rank.model", "bm25"); - span.setAttribute("rank.version", "2.1"); + await Sentry.startSpan({ name: "Search", op: "search" }, async (span) => { + const hit = await cache.get(q); + span.setAttribute("cache.hit", Boolean(hit)); + if (hit) return res.json(hit); + + const results = await searchEngine.query(q); + span.setAttributes({ + "search.engine": "elasticsearch", + "search.mode": q.length < 3 ? "prefix" : "fuzzy", + "results.count": results.length, }); res.json({ results }); @@ -303,7 +278,7 @@ app.get("/api/search", async (req, res) => { **How the trace works together:** - Each debounced request becomes a client span; aborted ones are marked and short. -- Server spans show cache effectiveness, engine latency, and ranking time. +- The server span shows cache effectiveness and search mode; auto-instrumentation will cover network/DB latency. What to monitor with span metrics: - p95 latency of `op:http.client` by `query.length` bucket. From 1e55f1d12e987d7f6181b97b944926b2bda6b02c Mon Sep 17 00:00:00 2001 From: Cody De Arkland Date: Mon, 11 Aug 2025 02:08:41 -0700 Subject: [PATCH 04/15] updating autcomplete search example --- .../common/tracing/span-metrics/examples.mdx | 364 ++++++++++++------ .../javascript/guides/nextjs/index.mdx | 228 ++++++++++- 2 files changed, 451 insertions(+), 141 deletions(-) diff --git a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx index 06ad95e8a0cfb..3ef88462ded33 100644 --- a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx +++ b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx @@ -10,13 +10,17 @@ These examples assume you have already set up traci -This guide provides practical examples of using span attributes and metrics to solve common monitoring and debugging challenges across your entire application stack. Each example demonstrates how to instrument both frontend and backend components, showing how they work together within a distributed trace to provide end-to-end visibility. +This guide provides practical examples of using span attributes and metrics to solve common monitoring and debugging challenges across your entire application stack. Each example demonstrates how to instrument both frontend and backend components, showing how they work together within a distributed trace to provide end-to-end visibility. You'll also find example repository code, walkthroughs and attributes to explore. ## E-Commerce Checkout Flow (React + Backend) -**Challenge:** Capture end-to-end checkout health and diagnose where time is spent or failures occur across UI, API, payments, inventory, and fulfillment. + -**Solution:** Start a client span on the checkout action and continue the trace on the backend with spans for each business step. Add low-cardinality business attributes (no PII) to power span metrics. +Example Repository: [Crash Commerce](https://github.com/getsentry/crash-commerce-tracing-sample) + +**Challenge:** Capture end-to-end checkout flow, understand average cart size and value, diagnose performance of payment providers across frontend, and server API. + +**Solution:** Start a client span on the checkout action for the application, and relevant spans on the backend for each step in the checkout flow. Attach attributes that represent critical metrics for the application, such as cart size and value, and payment provider used in the transaction. **Frontend (React) — instrument the Checkout click handler:** @@ -24,84 +28,144 @@ This guide provides practical examples of using span attributes and metrics to s // In your Checkout button click handler Sentry.startSpan( { - name: "Checkout", - op: "ui.action", + name: 'Checkout', + op: 'ui.action', attributes: { - // cart.* values known at click time; prefer minor units + currency - "cart.item_count": cart.items.length, - "cart.value_minor": cart.totalMinor, // e.g., cents - "cart.currency": cart.currency || "USD", + 'cart.item_count': cartCount, + 'cart.value_minor': cartValueMinor, + 'cart.currency': 'USD', + 'payment.provider.ui_selected': paymentProvider, }, }, async (span) => { try { - const res = await fetch("/api/checkout", { - method: "POST", - headers: { "content-type": "application/json" }, - body: JSON.stringify({ - // Avoid PII; internal IDs only - items: cart.items.map((i) => ({ sku: i.sku, qty: i.qty })), - coupon: cart.coupon || null, - }), - }); - - if (!res.ok) throw new Error("Checkout failed"); - const data = await res.json(); - - // Record server-assigned order id after success - span.setAttribute("order.id", data.orderId); - span.setAttribute("payment.provider", data.paymentProvider); - } catch (error) { - span.setStatus?.("error"); - Sentry.captureException(error); + const response = await fetch(`${API_URL}/api/checkout`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ items: cart, paymentProvider }), + }) + if (!response.ok) { + const errorData = await response.json().catch(() => ({ error: 'Payment failed' })) + throw new Error(errorData.error || `HTTP ${response.status}`) + } + const data: { orderId: string; paymentProvider: string } = await response.json() + span.setAttribute('order.id', data.orderId) + span.setAttribute('payment.provider', data.paymentProvider) + Sentry.logger.info(Sentry.logger.fmt`✨ Order ${data.orderId} confirmed via ${data.paymentProvider}`) + + // Show order confirmation + setOrderConfirmation({ + orderId: data.orderId, + provider: data.paymentProvider, + total: cartValueMinor + }) + setCart([]) + setIsCartOpen(false) + } catch (err) { + span.setStatus({ code: 2, message: 'internal_error' }) + const errorMessage = err instanceof Error ? err.message : 'Checkout failed' + setCheckoutError(errorMessage) + Sentry.logger.error(Sentry.logger.fmt`❌ ${errorMessage}`) } finally { - span.finish?.(); + setIsCheckingOut(false) } } -); +) ``` Where to put this in your app: -- In the `onClick` for the checkout CTA, or inside the submit handler of your checkout form/container component. -- Auto-instrumentation will add client `fetch` spans; keep the explicit UI span for business context. +- In the `onClick` for the checkout button, or inside the submit handler of your checkout form/container component. +- Auto-instrumentation will add client `fetch` spans; keep the explicit UI span for specific application context. -**Backend — single span per request; rely on auto-instrumentation:** +**Backend — Checkout API with an Order Processing span, and a Payment span:** ```javascript // Example: Node/Express -app.post("/api/checkout", async (req, res) => { - await Sentry.startSpan({ name: "Checkout (server)", op: "http.server" }, async (span) => { - try { - // validate, authorize payment, reserve inventory, create order, send email - const orderId = await createOrder(req.body); +app.post('/api/checkout', async (req: Request, res: Response) => { + await Sentry.startSpan( + { + name: 'Order Processing', + op: 'commerce.order.server', + }, + async (span) => { + try { + const items = (req.body?.items as { productId: string; quantity: number }[]) || [] + const requestedProviderRaw = (req.body?.paymentProvider as string | undefined) ?? undefined + const requestedProvider = PAYMENT_PROVIDERS.find((p) => p === requestedProviderRaw) ?? pickPaymentProvider() + + // Validate cart + if (!Array.isArray(items) || items.length === 0) { + span.setAttribute('payment.status', 'failed') + span.setAttribute('inventory.reserved', false) + res.status(400).json({ error: 'Cart is empty' }) + return + } - // Keep attributes low-cardinality and business-focused - span.setAttributes({ - "order.id": orderId, - "payment.provider": "stripe", - "payment.status": "authorized", - "inventory.reserved": true, - "email.enqueued": true, - }); + let totalMinor = 0 + for (const line of items) { + const product = PRODUCTS.find((p) => p.id === line.productId) + if (!product || line.quantity <= 0) { + span.setAttribute('payment.status', 'failed') + span.setAttribute('inventory.reserved', false) + res.status(400).json({ error: 'Invalid cart item' }) + return + } + totalMinor += product.priceMinor * line.quantity + } - res.json({ orderId, paymentProvider: "stripe" }); - } catch (e) { - span.setStatus?.("error"); - res.status(500).json({ error: "Checkout failed" }); + // Simulate reserving inventory (80% chance true) + const reserved = Math.random() < 0.8 + + // Simulate payment + const charge = await Sentry.startSpan( + { + name: `Charge ${requestedProvider}`, + op: 'commerce.payment', + attributes: { + 'payment.provider': requestedProvider, + }, + }, + async (paymentSpan) => { + const result = await fakeCharge(totalMinor, requestedProvider) + paymentSpan.setAttribute('payment.status', result.status) + return result + } + ) + + if (charge.status === 'failed' || !reserved) { + span.setAttribute('payment.provider', charge.provider) + span.setAttribute('payment.status', 'failed') + span.setAttribute('inventory.reserved', reserved) + res.status(402).json({ error: 'Payment failed' }) + return + } + + const orderId = randomId() + ORDERS.push({ id: orderId, totalMinor, items }) + + // Set attributes before returning + span.setAttribute('order.id', orderId) + span.setAttribute('payment.provider', charge.provider) + span.setAttribute('payment.status', 'success') + span.setAttribute('inventory.reserved', reserved) + + res.json({ orderId, paymentProvider: charge.provider }) + } catch (err) { + Sentry.captureException(err) + res.status(500).json({ error: 'Internal error' }) + } } - }); -}); + ) +}) ``` **How the trace works together:** -- UI span starts on click → fetch carries trace headers → backend continues the trace. -- Rely on your SDK's auto-instrumentation for HTTP/DB/external calls; keep this example span simple. -- Span metrics let you track latency percentiles and failure rates by attributes like `payment.provider`, `cart.item_count`. +- UI span starts when checkout is selected → Server Backend starts a span to continue the track when the server `/checkout` API is called. As payment processes, a payment span is started. +- Attributes and Span metrics let you track more than just the latency of the request. Can track store busienss performances through `cart.item_count` and other `cart` attributes, and store reliabiliyt by checking error performance on `payment.provider` properties. What to monitor with span metrics: -- p95 duration of `op:ui.action` Checkout by `cart.item_count` bucket. +- p95 span.duration of `op:ui.action` checkout by `cart.item_count` bucket. - Error rate for `op:payment` by `payment.provider`. -- Self time of `op:db.*` spans by table to spot slow queries. ## Media Upload with Background Processing (React + Worker) @@ -197,90 +261,138 @@ What to monitor with span metrics: - Failure counts for `op:security.scan` by `scan.engine`. - Time-to-ready (sum of worker phases) by `file.size_bucket`. -## Search Autocomplete (debounced, cancellable, cached) +## Search Autocomplete (debounced, cancellable, performance monitoring) -**Challenge:** Users type quickly; you need to debounce requests, cancel in-flight calls, and rely on cache where possible while keeping latency low. +Example Repository: [NullFlix](https://github.com/getsentry/nullflix-tracing-sample) -**Solution:** Start a client span for each debounced request; mark aborted requests; on the server, instrument cache lookup, search engine query, and ranking. +**Challenge:** Users type quickly in search; you need to debounce requests, cancel in-flight calls, handle errors gracefully, and monitor performance across different query types while keeping latency predictable. -**Frontend (React) — instrument debounced search:** +**Solution:** Start a client span for each debounced request, mark aborted requests, track search patterns, and on the server, instrument search performance with meaningful attributes. -```javascript -let abortController; - -async function runSearch(query, debounceMs = 150) { - clearTimeout(runSearch._t); - return new Promise((resolve) => { - runSearch._t = setTimeout(() => { - if (abortController) abortController.abort(); - abortController = new AbortController(); +**Frontend (React + TypeScript) — instrument debounced search:** - Sentry.startSpan( +```typescript +const response = await Sentry.startSpan( + { + op: 'http.client', + name: 'Search autocomplete', + attributes: { + 'query.length': searchQuery.length, + 'ui.debounce_ms': DEBOUNCE_MS, + }, + }, + async (span) => { + try { + const response = await fetch( + `${API_URL}/api/search?${new URLSearchParams({ q: searchQuery })}`, { - name: "Search autocomplete", - op: "http.client", - attributes: { - "query.length": (query || "").length, - "ui.debounce_ms": debounceMs, - }, - }, - async (span) => { - try { - const res = await fetch(`/api/search?q=${encodeURIComponent(query)}`, - { signal: abortController.signal } - ); - const data = await res.json(); - span.setAttribute("results.count", data.results.length); - resolve(data); - } catch (e) { - if (e.name === "AbortError") { - span.setAttribute("ui.aborted", true); - span.setStatus?.("cancelled"); - resolve({ results: [] }); - } else { - span.setStatus?.("error"); - Sentry.captureException(e); - resolve({ results: [] }); - } - } finally { - span.finish?.(); - } + signal: controller.signal, + headers: { 'Content-Type': 'application/json' }, } ); - }, debounceMs); - }); -} -``` -Place this in your search input hook/component after applying a debounce. + if (!response.ok) { + const errorData = await response.json().catch(() => ({})); + const errorMessage = errorData.error || `Search failed: ${response.status}`; + throw new Error(errorMessage); + } + + const data: SearchResponse = await response.json(); + + span?.setAttribute('results.count', data.results.length); + span?.setAttribute('results.has_results', data.results.length > 0); + span?.setAttribute('http.response_size', JSON.stringify(data).length); + span?.setStatus({ code: 1, message: 'ok' }); + + return data; + } catch (error) { + if (error instanceof Error && error.name === 'AbortError') { + span?.setAttribute('ui.aborted', true); + span?.setStatus({ code: 2, message: 'cancelled' }); + throw error; + } + + span?.setStatus({ code: 2, message: error instanceof Error ? error.message : 'unknown error' }); + throw error; + } + } +); +``` -**Backend — single span with useful attributes:** +Where to put this in your app: +- In your search input component, triggered after debounce timeout -```javascript -app.get("/api/search", async (req, res) => { - const q = String(req.query.q || ""); - await Sentry.startSpan({ name: "Search", op: "search" }, async (span) => { - const hit = await cache.get(q); - span.setAttribute("cache.hit", Boolean(hit)); - if (hit) return res.json(hit); - - const results = await searchEngine.query(q); - span.setAttributes({ - "search.engine": "elasticsearch", - "search.mode": q.length < 3 ? "prefix" : "fuzzy", - "results.count": results.length, - }); +**Backend (Node.js + Express) — instrument search with meaningful attributes:** - res.json({ results }); - }); +```typescript +app.get('/api/search', async (req: Request, res: Response) => { + await Sentry.startSpan( + { + name: 'Search', + op: 'search', + }, + async (span) => { + try { + const query = String(req.query.q || ''); + const queryLength = query.length; + + // Check if request was aborted + req.on('close', () => { + if (!res.headersSent) { + span?.setStatus({ code: 2, message: 'cancelled' }); + span?.setAttribute('request.aborted', true); + } + }); + + if (!query) { + span?.setAttribute('results.count', 0); + span?.setAttribute('search.engine', 'elasticsearch'); + return res.json({ results: [] }); + } + + // Perform search + const startSearch = Date.now(); + const results = await searchMovies(query); + const searchDuration = Date.now() - startSearch; + + // Set span attributes + span?.setAttribute('search.engine', 'elasticsearch'); + span?.setAttribute('search.mode', queryLength < 3 ? 'prefix' : 'fuzzy'); + span?.setAttribute('results.count', results.length); + span?.setAttribute('query.length', queryLength); + + // Track slow searches + if (searchDuration > 500) { + span?.setAttribute('performance.slow', true); + span?.setAttribute('search.duration_ms', searchDuration); + } + + return res.json({ results }); + } catch (error: any) { + span?.setStatus({ code: 2, message: error?.message || 'error' }); + span?.setAttribute('error.type', (error as any)?.constructor?.name || 'Error'); + + Sentry.captureException(error); + if (!res.headersSent) { + return res.status(500).json({ error: 'Search failed' }); + } + } + } + ); }); ``` **How the trace works together:** -- Each debounced request becomes a client span; aborted ones are marked and short. -- The server span shows cache effectiveness and search mode; auto-instrumentation will cover network/DB latency. +- Client span starts when debounced search triggers → tracks the full user-perceived latency. +- Aborted requests are marked with `ui.aborted=true` and short duration, showing wasted work. +- Server span shows search performance characteristics: mode (prefix vs fuzzy), results count, and slow queries. What to monitor with span metrics: -- p95 latency of `op:http.client` by `query.length` bucket. -- Cancellation rate via `ui.aborted=true`. -- Cache hit rate for `op:cache`. +- p95 duration of `op:search` grouped by `query.length`. +- Characteristics of slow searches via `op:search performance.slow:true`. +- Compare prefix vs fuzzy via `op:search` grouped by `search.mode`. +- Cancellation rate via `op:http.client ui.aborted:true`. +- Empty result rate via `op:http.client results.has_results:false`. +- Distribution of `http.response_size` for payload optimization. +- Error rate for `op:search` filtered by `status:error`. +- Backend abandonment via `op:search request.aborted:true`. diff --git a/docs/platforms/javascript/guides/nextjs/index.mdx b/docs/platforms/javascript/guides/nextjs/index.mdx index 97602d8a94f46..8b164c21c6fd2 100644 --- a/docs/platforms/javascript/guides/nextjs/index.mdx +++ b/docs/platforms/javascript/guides/nextjs/index.mdx @@ -21,11 +21,11 @@ Select which Sentry features you'd like to install in addition to Error Monitori @@ -39,10 +39,25 @@ npx @sentry/wizard@latest -i nextjs The wizard then guides you through the setup process, asking you to enable additional (optional) Sentry features for your application beyond error monitoring. - - This guide assumes that you enable all features and allow the wizard to create an example page and route. You can add or remove features at any time, but setting them up now will save you the effort of configuring them manually later. +### Configure Source Maps (Optional) + +For readable stack traces in production, configure source map uploads by adding your Sentry auth token to your environment: + +```bash +# Add to your .env.local file +SENTRY_AUTH_TOKEN=your_auth_token_here +``` + +You can get your auth token from [Sentry Settings > Auth Tokens](https://sentry.io/settings/auth-tokens/). The wizard automatically configures source map upload in your `next.config.js`, but requires this token to work in production. + + + +Make sure to keep your auth token secret and add `.env.local` to your `.gitignore` file to avoid committing it to version control. + + + - Creates config files with the default `Sentry.init()` calls for all runtimes (Node.js, Browser, and Edge) @@ -54,9 +69,11 @@ This guide assumes that you enable all features and allow the wizard to create a -## Step 2: Configure +## Step 2: Verify Your Setup -If you prefer to configure Sentry manually, here are the configuration files the wizard would create: + + +The installation wizard creates configuration files in your application. Here are the key files that were created: ### Client-Side Configuration @@ -138,11 +155,170 @@ Sentry.init({ }); ``` -For detailed manual setup instructions, see our [manual setup guide](/platforms/javascript/guides/nextjs/manual-setup/). -## Step 3: Verify Your Setup + - +## Step 3: Sending Logs + +Now let's add structured logging to capture application insights. Logs are enabled in your configuration files created by the wizard. + +Use Sentry's logger to capture structured logs with meaningful attributes that help you debug issues and understand user behavior. + +```javascript +import * as Sentry from "@sentry/nextjs"; + +const { logger } = Sentry; + +// Send structured logs with attributes +logger.info("User completed checkout", { + userId: 123, + orderId: "order_456", + amount: 99.99 +}); + +logger.error("Payment processing failed", { + errorCode: "CARD_DECLINED", + userId: 123, + attemptCount: 3 +}); + +// Using template literals for dynamic data +logger.warn(logger.fmt`Rate limit exceeded for user: ${userId}`); +``` + + + + + +## Step 4: Customizing Replays + +By default, Session Replay masks sensitive data for privacy. Let's customize this to show specific content that's safe to display. + +You can unmask specific elements while keeping sensitive data protected. Add CSS classes or data attributes to elements you want to reveal in replays. + +```javascript {tabTitle:Client} {filename:instrumentation-client.(js|ts)} +import * as Sentry from "@sentry/nextjs"; + +Sentry.init({ + dsn: "___PUBLIC_DSN___", + integrations: [ + Sentry.replayIntegration({ + // Unmask specific elements to show actual content + unmask: ['.reveal-content', '[data-safe-to-show]'], + // Optionally disable blanket masking for non-sensitive apps + maskAllText: false, + blockAllMedia: false, + }), + ], + replaysSessionSampleRate: 0.1, + replaysOnErrorSampleRate: 1.0, + // ... your existing config +}); +``` + +```html +
This content will be visible in replays
+Safe user data: {username} +``` + +
+ + + +## Step 5: Distributed Tracing Configuration + +Enable trace propagation to connect performance data across your services. This allows you to follow requests as they move between your frontend, backend, and external APIs. + +Configure `tracePropagationTargets` to specify which services should be connected in your traces. + +```javascript {tabTitle:Client} {filename:instrumentation-client.(js|ts)} +import * as Sentry from "@sentry/nextjs"; + +Sentry.init({ + dsn: "___PUBLIC_DSN___", + integrations: [Sentry.browserTracingIntegration()], + tracesSampleRate: 1.0, + // Configure trace propagation for your backend services + tracePropagationTargets: [ + 'https://api.yourapp.com', + 'https://auth.yourapp.com', + /^\\/api\\// // Local API routes + ], + // ... your existing config +}); +``` + +```typescript {filename:app/layout.tsx} +import * as Sentry from "@sentry/nextjs"; +import type { Metadata } from "next"; + +export function generateMetadata(): Metadata { + return { + // ... your existing metadata + other: { + ...Sentry.getTraceData(), + }, + }; +} +``` + +## Step 6: Custom Traces with Attributes + +Create custom spans to measure specific operations and add meaningful attributes. This helps you understand performance bottlenecks and debug issues with detailed context. + +Use nested spans to break down complex operations and add attributes that provide business context. + +```javascript +import * as Sentry from "@sentry/nextjs"; + +// Create custom spans to measure specific operations +async function processUserData(userId: string) { + return await Sentry.startSpan( + { + name: "Process User Data", + op: "function", + attributes: { + userId: userId, + operation: "data_processing", + version: "2.1" + } + }, + async () => { + // Your business logic here + const userData = await fetchUserData(userId); + + // Nested span for specific operations + return await Sentry.startSpan( + { + name: "Transform Data", + op: "transform", + attributes: { + recordCount: userData.length, + transformType: "normalize" + } + }, + () => { + return transformUserData(userData); + } + ); + } + ); +} + +// Add attributes to existing spans +const span = Sentry.getActiveSpan(); +if (span) { + span.setAttributes({ + cacheHit: true, + region: "us-west-2", + performanceScore: 0.95 + }); +} +``` + + + +## Verify Your Setup If you haven't tested your Sentry configuration yet, let's do it now. You can confirm that Sentry is working properly and sending data to your Sentry project by using the example page and route created by the installation wizard: @@ -172,8 +348,7 @@ Now, head over to your project on [Sentry.io](https://sentry.io) to view the col At this point, you should have integrated Sentry into your Next.js application and should already be sending error and performance data to your Sentry project. -Now's a good time to customize your setup and look into more advanced topics. -Our next recommended steps for you are: +Now's a good time to customize your setup and look into more advanced topics: - Learn about [instrumenting Next.js server actions](/platforms/javascript/guides/nextjs/apis/#server-actions) - Learn how to [manually capture errors](/platforms/javascript/guides/nextjs/usage/) @@ -181,9 +356,32 @@ Our next recommended steps for you are: - Get familiar with [Sentry's product features](/product) like tracing, insights, and alerts - Learn more about our [Vercel integration](/organization/integrations/deployment/vercel/) +## Additional Resources + + + +The installation wizard creates several files and configurations in your application: + +- Creates config files with the default `Sentry.init()` calls for all runtimes (Node.js, Browser, and Edge) +- Adds a Next.js instrumentation hook to your project (`instrumentation.ts`) +- Creates or updates your Next.js config with the default Sentry settings +- Creates error handling components (`global-error.(jsx|tsx)` and `_error.jsx` for the Pages Router) if they don't already exist +- Creates `.sentryclirc` with an auth token to upload source maps (this file is automatically added to `.gitignore`) +- Adds an example page and route to your application to help verify your Sentry setup + + + + + +For comprehensive manual setup instructions including detailed configuration options, advanced features like source maps, tunneling, and Vercel cron monitoring, see our [manual setup guide](/platforms/javascript/guides/nextjs/manual-setup/). + + + + + - If you encountered issues with our installation wizard, try [setting up Sentry manually](/platforms/javascript/guides/nextjs/manual-setup/) - [Get support](https://sentry.zendesk.com/hc/en-us/) - + \ No newline at end of file From 0f348865c9d4686446c0bf3df8fd3d4f457e2967 Mon Sep 17 00:00:00 2001 From: Cody De Arkland Date: Mon, 11 Aug 2025 03:10:07 -0700 Subject: [PATCH 05/15] Adding media upload example --- .../common/tracing/span-metrics/examples.mdx | 259 +++++++++++++----- 1 file changed, 184 insertions(+), 75 deletions(-) diff --git a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx index 3ef88462ded33..a39f2181b7b71 100644 --- a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx +++ b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx @@ -167,99 +167,208 @@ What to monitor with span metrics: - p95 span.duration of `op:ui.action` checkout by `cart.item_count` bucket. - Error rate for `op:payment` by `payment.provider`. -## Media Upload with Background Processing (React + Worker) +## Media Upload with Background Processing (React + Express) -**Challenge:** Understand user-perceived upload time vs. server-side processing (scan, transcode, thumbnail) and link the async worker back to the initiating request. +Example Repository: [SnapTrace](https://github.com/getsentry/snaptrace) -**Solution:** Start a client span when the upload begins; on the backend, create spans for signed-URL issuance, enqueue a job, and instrument worker phases. Propagate trace context via job metadata to stitch the traces. +**Challenge:** Track user-perceived upload time, server-side validation, and async media processing (optimization, thumbnail generation) while maintaining trace continuity across async boundaries. -**Frontend (React) — instrument upload begin and completion:** +**Solution:** Start a client span for the entire upload experience, create a backend span for upload validation, and a separate span for async media processing. Use rich attributes instead of excessive spans to capture processing details. -```javascript -Sentry.startSpan( - { - name: "Upload media", - op: "file.upload", - attributes: { - "file.size_bytes": file.size, - "file.mime_type": file.type, +**Frontend (React) — Instrument Upload Action** + +```typescript +// In your UploadForm component's upload handler +const handleUpload = async () => { + if (!selectedFile) return; + + // Start Sentry span for entire upload operation + await Sentry.startSpan( + { + name: 'Upload media', + op: 'file.upload', + attributes: { + 'file.size_bytes': selectedFile.size, + 'file.mime_type': selectedFile.type, + } }, - }, - async (span) => { - const t0 = performance.now(); - try { - const urlRes = await fetch("/api/uploads/signed-url", { method: "POST" }); - const { uploadUrl, objectKey } = await urlRes.json(); - - // Upload file to storage (signed URL) - await fetch(uploadUrl, { method: "PUT", body: file }); - - // Tell backend to start async processing - await fetch("/api/uploads/start-processing", { - method: "POST", - headers: { "content-type": "application/json" }, - body: JSON.stringify({ key: objectKey }), - }); - - span.setAttributes({ - "upload.success": true, - "upload.duration_ms": Math.round(performance.now() - t0), - }); - } catch (e) { - span.setStatus?.("error"); - Sentry.captureException(e); - } finally { - span.finish?.(); + async (span) => { + const uploadStartTime = Date.now(); + + try { + // Single API call to upload and start processing + const uploadResponse = await fetch(`${API_BASE_URL}/api/upload`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + fileName: selectedFile.name, + fileType: selectedFile.type, + fileSize: selectedFile.size + }) + }); + + if (!uploadResponse.ok) { + throw new Error(`Upload failed: ${uploadResponse.statusText}`); + } + + const uploadData = await uploadResponse.json(); + + // Set success attributes + span?.setAttribute('upload.success', true); + span?.setAttribute('upload.duration_ms', Date.now() - uploadStartTime); + span?.setAttribute('job.id', uploadData.jobId); + + // Update UI to show processing status + updateUploadStatus(uploadId, 'processing'); + + } catch (error) { + span?.setAttribute('upload.success', false); + span?.setAttribute('upload.error', error instanceof Error ? error.message : 'Unknown error'); + setUploadStatus('error'); + } } - } -); + ); +}; ``` -Place this where the user triggers an upload (dropzone onDrop, file input onChange, or explicit Upload button). +Where to put this in your app: +- In the upload button click handler or form submit handler +- In drag-and-drop onDrop callback +- Auto-instrumentation will capture fetch spans; the explicit span adds business context -**Backend — single span per handler; single span in worker:** +**Backend — Upload Validation and Async Processing** -```javascript -// Issue signed URL -app.post("/api/uploads/signed-url", async (req, res) => { - await Sentry.startSpan({ name: "Signed URL", op: "storage.sign" }, async (span) => { - span.setAttributes({ "storage.provider": "s3", "storage.bucket": "media" }); - res.json({ uploadUrl: "https://s3...", objectKey: "uploads/abc.jpg" }); - }); -}); +```typescript +// Import Sentry instrumentation first (required for v10) +import './instrument'; +import express from 'express'; +import * as Sentry from '@sentry/node'; -// Enqueue processing job and propagate context -app.post("/api/uploads/start-processing", async (req, res) => { - await Sentry.startSpan({ name: "Enqueue media job", op: "queue.enqueue" }, async (span) => { - const trace = Sentry.getCurrentScope()?.getPropagationContext?.(); - queue.publish("media.process", { key: req.body.key, trace }); - span.setAttribute("queue.name", "media.process"); - res.json({ ok: true }); - }); -}); +// POST /api/upload - Receive and validate upload, then trigger async processing +app.post('/api/upload', async (req: Request<{}, {}, UploadRequest>, res: Response) => { + const { fileName, fileType, fileSize } = req.body; + + // Span 2: Backend validates and accepts upload + await Sentry.startSpan( + { + op: 'upload.receive', + name: 'Receive upload', + attributes: { + 'file.name': fileName, + 'file.size_bytes': fileSize, + 'file.mime_type': fileType, + 'validation.passed': true + } + }, + async (span) => { + try { + // Validate the upload + if (!fileName || !fileType || !fileSize) { + span?.setAttribute('validation.passed', false); + span?.setAttribute('validation.error', 'Missing required fields'); + return res.status(400).json({ error: 'Missing required fields' }); + } + + if (fileSize > 50 * 1024 * 1024) { // 50MB limit + span?.setAttribute('validation.passed', false); + span?.setAttribute('validation.error', 'File too large'); + return res.status(400).json({ error: 'File too large (max 50MB)' }); + } -// Worker -worker.on("message", async (msg) => { - const parentContext = msg.trace; // restore trace/parent if available - await Sentry.startSpan({ name: "Process media", op: "worker.job", parentContext }, async (span) => { - // Do work (scan, transcode, thumbnail) — rely on auto-instrumentation for sub-operations - span.setAttributes({ - "scan.engine": "clamav", - "transcode.preset": "720p", - "thumbnail.created": true, - }); - }); + // Create a job for processing + const job = createJob(fileName, fileType, fileSize); + span?.setAttribute('job.id', job.id); + + // Start async processing (Span 3 will be created here) + setImmediate(async () => { + await processMedia(job); + }); + + // Respond immediately with job ID + res.json({ + jobId: job.id, + status: 'accepted', + message: 'Upload received and processing started' + }); + + } catch (error) { + span?.setAttribute('validation.passed', false); + span?.setAttribute('error.message', error instanceof Error ? error.message : 'Unknown error'); + Sentry.captureException(error); + res.status(500).json({ error: 'Failed to process upload' }); + } + } + ); }); + +// Async media processing (runs in background via setImmediate) +export async function processMedia(job: ProcessingJob): Promise { + await Sentry.startSpan( + { + op: 'media.process', + name: 'Process media', + attributes: { + 'media.size_bytes': job.fileSize, + 'media.mime_type': job.fileType, + 'media.size_bucket': getSizeBucket(job.fileSize), + 'job.id': job.id + } + }, + async (span) => { + try { + const startTime = Date.now(); + const operations: string[] = []; + + // Simulate image optimization and thumbnail generation + if (job.fileType.startsWith('image/')) { + // Note: No separate spans for these operations - use attributes instead + await optimizeImage(); // Simulated delay + operations.push('optimize'); + + await generateThumbnail(); // Simulated delay + operations.push('thumbnail'); + } + + // Calculate results + const sizeSaved = Math.floor(job.fileSize * 0.3); // 30% reduction + const thumbnailCreated = Math.random() > 0.05; // 95% success rate + + // Rich attributes instead of multiple spans + span?.setAttribute('processing.operations', operations); + span?.setAttribute('processing.optimization_level', 'high'); + span?.setAttribute('processing.thumbnail_created', thumbnailCreated); + span?.setAttribute('processing.duration_ms', Date.now() - startTime); + span?.setAttribute('result.size_saved_bytes', sizeSaved); + span?.setAttribute('result.size_reduction_percent', 30); + span?.setAttribute('result.status', 'success'); + + // Update job status + job.status = 'completed'; + + } catch (error) { + span?.setAttribute('result.status', 'failed'); + span?.setAttribute('error.message', error instanceof Error ? error.message : 'Unknown error'); + Sentry.captureException(error); + } + } + ); +} ``` **How the trace works together:** -- Client span covers upload + API calls; backend spans cover signing and enqueue; worker spans show processing phases. -- Linking the worker trace to the enqueue span maintains end-to-end visibility across async boundaries. +- Frontend span (`file.upload`) captures the entire user experience from file selection to server response. +- Backend validation span (`upload.receive`) tracks server-side validation and job creation. +- Async processing span (`media.process`) runs in background with rich attributes for all processing operations. +- No unnecessary spans for individual operations — prefer attributes for details. +- Trace continuity is maintained via Sentry’s automatic context propagation. What to monitor with span metrics: -- p90 transcode time by `transcode.preset`. -- Failure counts for `op:security.scan` by `scan.engine`. -- Time-to-ready (sum of worker phases) by `file.size_bucket`. +- p95 upload duration by `file.size_bucket`. +- Processing success rate by `media.mime_type`. +- Average storage saved via `result.size_saved_bytes` where `result.status = success`. +- Validation failure reasons grouped by `validation.error`. ## Search Autocomplete (debounced, cancellable, performance monitoring) From 5d62466d5df0e54f96f493c97ce5b8786176e309 Mon Sep 17 00:00:00 2001 From: Cody De Arkland Date: Mon, 11 Aug 2025 03:11:23 -0700 Subject: [PATCH 06/15] Adding callout on sample code --- .../javascript/common/tracing/span-metrics/examples.mdx | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx index a39f2181b7b71..d798dcd8a4dde 100644 --- a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx +++ b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx @@ -10,6 +10,12 @@ These examples assume you have already set up traci + + +The sample code contained within this page is for demonstration purposes only. It is not production-ready and may not be up to date with the latest version of Sentry. It's included as sample JavaScript code, and ultimately may not be for your specific language or framework. + + + This guide provides practical examples of using span attributes and metrics to solve common monitoring and debugging challenges across your entire application stack. Each example demonstrates how to instrument both frontend and backend components, showing how they work together within a distributed trace to provide end-to-end visibility. You'll also find example repository code, walkthroughs and attributes to explore. ## E-Commerce Checkout Flow (React + Backend) From ed7a4d5b1ea89129646d6444c7b4cba88ceb2e87 Mon Sep 17 00:00:00 2001 From: Cody De Arkland Date: Tue, 12 Aug 2025 15:45:57 -0700 Subject: [PATCH 07/15] Updating tracing examples with practical scenarios --- .../common/tracing/span-metrics/examples.mdx | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx index d798dcd8a4dde..f35451887c129 100644 --- a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx +++ b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx @@ -6,13 +6,7 @@ sidebar_order: 10 -These examples assume you have already set up tracing in your application. - - - - - -The sample code contained within this page is for demonstration purposes only. It is not production-ready and may not be up to date with the latest version of Sentry. It's included as sample JavaScript code, and ultimately may not be for your specific language or framework. +The sample code contained within this page is for demonstration purposes only. It is not production-ready. Examples are structural and ultimately may not be for your specific language or framework. @@ -308,7 +302,11 @@ app.post('/api/upload', async (req: Request<{}, {}, UploadRequest>, res: Respons } ); }); +``` +**Backend — Async media processing** + +```typescript // Async media processing (runs in background via setImmediate) export async function processMedia(job: ProcessingJob): Promise { await Sentry.startSpan( From 2032714e593f4a2374dfced14ea1f8d0dbfcff7f Mon Sep 17 00:00:00 2001 From: Cody De Arkland Date: Sun, 26 Oct 2025 23:35:28 -0700 Subject: [PATCH 08/15] fix: replace broken repository links with placeholder text - SnapTrace and NullFlix repositories were returning 404 errors - Updated to show 'Coming soon - sample repository in development' - Crash Commerce repository link remains working and active --- .../javascript/common/tracing/span-metrics/examples.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx index f35451887c129..71a21170b1c38 100644 --- a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx +++ b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx @@ -169,7 +169,7 @@ What to monitor with span metrics: ## Media Upload with Background Processing (React + Express) -Example Repository: [SnapTrace](https://github.com/getsentry/snaptrace) +Example Repository: _Coming soon - sample repository in development_ **Challenge:** Track user-perceived upload time, server-side validation, and async media processing (optimization, thumbnail generation) while maintaining trace continuity across async boundaries. @@ -376,7 +376,7 @@ What to monitor with span metrics: ## Search Autocomplete (debounced, cancellable, performance monitoring) -Example Repository: [NullFlix](https://github.com/getsentry/nullflix-tracing-sample) +Example Repository: _Coming soon - sample repository in development_ **Challenge:** Users type quickly in search; you need to debounce requests, cancel in-flight calls, handle errors gracefully, and monitor performance across different query types while keeping latency predictable. From 3e2f2e6c004fb9d92dee9482a293154bd7c03e4c Mon Sep 17 00:00:00 2001 From: Cody De Arkland Date: Sun, 26 Oct 2025 23:36:48 -0700 Subject: [PATCH 09/15] fix: restore working repository links for SnapTrace and NullFlix - Both repositories are accessible and working - Reverting previous placeholder text changes --- .../javascript/common/tracing/span-metrics/examples.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx index 71a21170b1c38..f35451887c129 100644 --- a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx +++ b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx @@ -169,7 +169,7 @@ What to monitor with span metrics: ## Media Upload with Background Processing (React + Express) -Example Repository: _Coming soon - sample repository in development_ +Example Repository: [SnapTrace](https://github.com/getsentry/snaptrace) **Challenge:** Track user-perceived upload time, server-side validation, and async media processing (optimization, thumbnail generation) while maintaining trace continuity across async boundaries. @@ -376,7 +376,7 @@ What to monitor with span metrics: ## Search Autocomplete (debounced, cancellable, performance monitoring) -Example Repository: _Coming soon - sample repository in development_ +Example Repository: [NullFlix](https://github.com/getsentry/nullflix-tracing-sample) **Challenge:** Users type quickly in search; you need to debounce requests, cancel in-flight calls, handle errors gracefully, and monitor performance across different query types while keeping latency predictable. From 2b21bf60dc7a9dde491f3b2eb3fb0fe6f3812ddb Mon Sep 17 00:00:00 2001 From: Cody De Arkland Date: Sun, 26 Oct 2025 23:48:14 -0700 Subject: [PATCH 10/15] fix: resolve missing langgraph.svg icon after master merge - LangGraph SVG icon was missing from platformicons package - Using LangChain icon as fallback since they're related technologies - Fixes compilation error and allows dev server to start successfully --- src/components/platformIcon.tsx | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/components/platformIcon.tsx b/src/components/platformIcon.tsx index fbe67fc56ec38..3100ec7ff93ab 100644 --- a/src/components/platformIcon.tsx +++ b/src/components/platformIcon.tsx @@ -78,7 +78,8 @@ import JavascriptSVG from 'platformicons/svg/javascript.svg'; import KoaSVG from 'platformicons/svg/koa.svg'; import KotlinSVG from 'platformicons/svg/kotlin.svg'; import LangchainSVG from 'platformicons/svg/langchain.svg'; -import LanggraphSVG from 'platformicons/svg/langgraph.svg'; +// Using LangChain icon as fallback for LangGraph (related technologies) +const LanggraphSVG = LangchainSVG; import LaravelSVG from 'platformicons/svg/laravel.svg'; import LinuxSVG from 'platformicons/svg/linux.svg'; import LitestarSVG from 'platformicons/svg/litestar.svg'; @@ -223,7 +224,8 @@ import JavascriptSVGLarge from 'platformicons/svg_80x80/javascript.svg'; import KoaSVGLarge from 'platformicons/svg_80x80/koa.svg'; import KotlinSVGLarge from 'platformicons/svg_80x80/kotlin.svg'; import LangchainSVGLarge from 'platformicons/svg_80x80/langchain.svg'; -import LanggraphSVGLarge from 'platformicons/svg_80x80/langgraph.svg'; +// Using LangChain icon as fallback for LangGraph (related technologies) +const LanggraphSVGLarge = LangchainSVGLarge; import LaravelSVGLarge from 'platformicons/svg_80x80/laravel.svg'; import LinuxSVGLarge from 'platformicons/svg_80x80/linux.svg'; import LitestarSVGLarge from 'platformicons/svg_80x80/litestar.svg'; From 13f9cd801743b2c7a3eae75060aab6729c0d2862 Mon Sep 17 00:00:00 2001 From: Cody De Arkland Date: Mon, 27 Oct 2025 00:00:18 -0700 Subject: [PATCH 11/15] fix: update repository links to correct tracing example URLs - SnapTrace: github.com/getsentry/snaptrace-tracing-example - NullFlix: github.com/getsentry/nullflix-tracing-example - Both repositories are now accessible and contain comprehensive tracing examples --- .../javascript/common/tracing/span-metrics/examples.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx index f35451887c129..08c1cf5a51f24 100644 --- a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx +++ b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx @@ -169,7 +169,7 @@ What to monitor with span metrics: ## Media Upload with Background Processing (React + Express) -Example Repository: [SnapTrace](https://github.com/getsentry/snaptrace) +Example Repository: [SnapTrace](https://github.com/getsentry/snaptrace-tracing-example) **Challenge:** Track user-perceived upload time, server-side validation, and async media processing (optimization, thumbnail generation) while maintaining trace continuity across async boundaries. @@ -376,7 +376,7 @@ What to monitor with span metrics: ## Search Autocomplete (debounced, cancellable, performance monitoring) -Example Repository: [NullFlix](https://github.com/getsentry/nullflix-tracing-sample) +Example Repository: [NullFlix](https://github.com/getsentry/nullflix-tracing-example) **Challenge:** Users type quickly in search; you need to debounce requests, cancel in-flight calls, handle errors gracefully, and monitor performance across different query types while keeping latency predictable. From 32eaefd87dd3176053dcaf192355b73cf360585c Mon Sep 17 00:00:00 2001 From: Cody De Arkland Date: Mon, 27 Oct 2025 00:12:05 -0700 Subject: [PATCH 12/15] feat: add Manual LLM Instrumentation example with AI agent patterns - Comprehensive example showing custom LLM integration with tool calls - Follows Sentry AI agent span conventions from developer docs - Demonstrates gen_ai.invoke_agent, gen_ai.chat, and gen_ai.execute_tool patterns - Includes frontend React component and backend Express API - Shows proper attribute usage for monitoring costs, performance, and business metrics - Matches existing example structure with Challenge/Solution/Frontend/Backend format --- .../common/tracing/span-metrics/examples.mdx | 348 ++++++++++++++++++ 1 file changed, 348 insertions(+) diff --git a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx index 08c1cf5a51f24..fb824db84de10 100644 --- a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx +++ b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx @@ -509,3 +509,351 @@ What to monitor with span metrics: - Distribution of `http.response_size` for payload optimization. - Error rate for `op:search` filtered by `status:error`. - Backend abandonment via `op:search request.aborted:true`. + +## Manual LLM Instrumentation (Custom AI Agent + Tool Calls) + +Example Repository: _Coming soon - sample repository in development_ + +**Challenge:** You're building a custom AI agent that uses a proprietary LLM API (not OpenAI/Anthropic), performs multi-step reasoning with tool calls, and needs comprehensive monitoring to track token usage, tool performance, and agent effectiveness across the entire conversation flow. + +**Solution:** Manually instrument each component of the AI pipeline using Sentry's AI agent span conventions. Create spans for agent invocation, LLM calls, tool executions, and handoffs between agents, with rich attributes for monitoring costs, performance, and business metrics. + +**Frontend (React) — Instrument AI Chat Interface:** + +```typescript +// In your AI chat component +const handleSendMessage = async (userMessage: string) => { + await Sentry.startSpan( + { + name: 'invoke_agent Customer Support Agent', + op: 'gen_ai.invoke_agent', + attributes: { + 'gen_ai.operation.name': 'invoke_agent', + 'gen_ai.agent.name': 'Customer Support Agent', + 'gen_ai.system': 'custom-llm', + 'gen_ai.request.model': 'custom-model-v2', + 'gen_ai.request.messages': JSON.stringify([ + { role: 'system', content: 'You are a helpful customer support agent.' }, + ...conversationHistory, + { role: 'user', content: userMessage } + ]), + 'conversation.turn': conversationHistory.length + 1, + 'conversation.session_id': sessionId, + }, + }, + async (agentSpan) => { + try { + setIsLoading(true); + + // Call your backend AI agent endpoint + const response = await fetch('/api/ai/chat', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + message: userMessage, + sessionId: sessionId, + conversationHistory: conversationHistory + }) + }); + + if (!response.ok) { + throw new Error(`AI request failed: ${response.status}`); + } + + const aiResponse = await response.json(); + + // Set response attributes + agentSpan.setAttribute('gen_ai.response.text', aiResponse.message); + agentSpan.setAttribute('gen_ai.response.id', aiResponse.responseId); + agentSpan.setAttribute('gen_ai.usage.total_tokens', aiResponse.totalTokens); + agentSpan.setAttribute('conversation.tools_used', aiResponse.toolsUsed?.length || 0); + agentSpan.setAttribute('conversation.resolution_status', aiResponse.resolutionStatus); + + // Update UI with response + setConversationHistory(prev => [ + ...prev, + { role: 'user', content: userMessage }, + { role: 'assistant', content: aiResponse.message } + ]); + + Sentry.logger.info(Sentry.logger.fmt`AI agent completed conversation turn ${conversationHistory.length + 1}`); + + } catch (error) { + agentSpan.setStatus({ code: 2, message: 'internal_error' }); + agentSpan.setAttribute('error.type', error instanceof Error ? error.constructor.name : 'UnknownError'); + setError('Failed to get AI response. Please try again.'); + Sentry.logger.error(Sentry.logger.fmt`AI agent failed: ${error instanceof Error ? error.message : 'Unknown error'}`); + } finally { + setIsLoading(false); + } + } + ); +}; +``` + +Where to put this in your app: +- In your chat message submit handler or AI conversation component +- Auto-instrumentation will capture the fetch request; the explicit span adds AI-specific context +- Consider adding user feedback collection to track conversation quality + +**Backend — Custom LLM Integration with Tool Calls:** + +```typescript +// Express API route for custom AI agent +app.post('/api/ai/chat', async (req: Request, res: Response) => { + const { message, sessionId, conversationHistory } = req.body; + + // Main agent invocation span (matches frontend) + await Sentry.startSpan( + { + name: 'invoke_agent Customer Support Agent', + op: 'gen_ai.invoke_agent', + attributes: { + 'gen_ai.operation.name': 'invoke_agent', + 'gen_ai.agent.name': 'Customer Support Agent', + 'gen_ai.system': 'custom-llm', + 'gen_ai.request.model': 'custom-model-v2', + 'conversation.session_id': sessionId, + }, + }, + async (agentSpan) => { + try { + const tools = [ + { name: 'search_knowledge_base', description: 'Search company knowledge base for answers' }, + { name: 'create_ticket', description: 'Create a support ticket for complex issues' }, + { name: 'check_order_status', description: 'Look up customer order information' } + ]; + + agentSpan.setAttribute('gen_ai.request.available_tools', JSON.stringify(tools)); + + let totalTokens = 0; + let toolsUsed: string[] = []; + let finalResponse = ''; + let resolutionStatus = 'in_progress'; + + // Step 1: Call custom LLM for initial reasoning + const llmResponse = await Sentry.startSpan( + { + name: 'chat custom-model-v2', + op: 'gen_ai.chat', + attributes: { + 'gen_ai.operation.name': 'chat', + 'gen_ai.system': 'custom-llm', + 'gen_ai.request.model': 'custom-model-v2', + 'gen_ai.request.messages': JSON.stringify([ + { role: 'system', content: 'You are a customer support agent. Use tools when needed.' }, + ...conversationHistory, + { role: 'user', content: message } + ]), + 'gen_ai.request.temperature': 0.7, + 'gen_ai.request.max_tokens': 500, + }, + }, + async (llmSpan) => { + // Call your custom LLM API + const llmApiResponse = await fetch('https://your-custom-llm-api.com/chat', { + method: 'POST', + headers: { + 'Authorization': `Bearer ${process.env.CUSTOM_LLM_API_KEY}`, + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + model: 'custom-model-v2', + messages: conversationHistory.concat([{ role: 'user', content: message }]), + temperature: 0.7, + max_tokens: 500, + tools: tools + }) + }); + + if (!llmApiResponse.ok) { + throw new Error(`LLM API failed: ${llmApiResponse.status}`); + } + + const llmData = await llmApiResponse.json(); + + // Set LLM response attributes + llmSpan.setAttribute('gen_ai.response.text', llmData.choices[0].message.content || ''); + llmSpan.setAttribute('gen_ai.response.id', llmData.id); + llmSpan.setAttribute('gen_ai.response.model', llmData.model); + llmSpan.setAttribute('gen_ai.usage.input_tokens', llmData.usage.prompt_tokens); + llmSpan.setAttribute('gen_ai.usage.output_tokens', llmData.usage.completion_tokens); + llmSpan.setAttribute('gen_ai.usage.total_tokens', llmData.usage.total_tokens); + + if (llmData.choices[0].message.tool_calls) { + llmSpan.setAttribute('gen_ai.response.tool_calls', JSON.stringify(llmData.choices[0].message.tool_calls)); + } + + totalTokens += llmData.usage.total_tokens; + return llmData; + } + ); + + // Step 2: Execute any tool calls + if (llmResponse.choices[0].message.tool_calls) { + for (const toolCall of llmResponse.choices[0].message.tool_calls) { + const toolResult = await Sentry.startSpan( + { + name: `execute_tool ${toolCall.function.name}`, + op: 'gen_ai.execute_tool', + attributes: { + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.name': toolCall.function.name, + 'gen_ai.tool.type': 'function', + 'gen_ai.tool.input': toolCall.function.arguments, + 'gen_ai.system': 'custom-llm', + 'gen_ai.request.model': 'custom-model-v2', + }, + }, + async (toolSpan) => { + let toolOutput = ''; + + try { + // Execute the actual tool + switch (toolCall.function.name) { + case 'search_knowledge_base': + const searchArgs = JSON.parse(toolCall.function.arguments); + toolOutput = await searchKnowledgeBase(searchArgs.query); + toolSpan.setAttribute('search.query', searchArgs.query); + toolSpan.setAttribute('search.results_count', toolOutput.split('\n').length); + break; + + case 'create_ticket': + const ticketArgs = JSON.parse(toolCall.function.arguments); + const ticketId = await createSupportTicket(ticketArgs); + toolOutput = `Created support ticket #${ticketId}`; + toolSpan.setAttribute('ticket.id', ticketId); + toolSpan.setAttribute('ticket.priority', ticketArgs.priority || 'medium'); + resolutionStatus = 'escalated'; + break; + + case 'check_order_status': + const orderArgs = JSON.parse(toolCall.function.arguments); + toolOutput = await checkOrderStatus(orderArgs.orderId); + toolSpan.setAttribute('order.id', orderArgs.orderId); + break; + + default: + throw new Error(`Unknown tool: ${toolCall.function.name}`); + } + + toolSpan.setAttribute('gen_ai.tool.output', toolOutput); + toolsUsed.push(toolCall.function.name); + + } catch (toolError) { + toolSpan.setStatus({ code: 2, message: 'tool_execution_failed' }); + toolSpan.setAttribute('error.message', toolError instanceof Error ? toolError.message : 'Unknown tool error'); + toolOutput = `Error executing ${toolCall.function.name}: ${toolError instanceof Error ? toolError.message : 'Unknown error'}`; + } + + return toolOutput; + } + ); + } + } + + // Step 3: Generate final response (if tools were used) + if (toolsUsed.length > 0) { + const finalLlmResponse = await Sentry.startSpan( + { + name: 'chat custom-model-v2', + op: 'gen_ai.chat', + attributes: { + 'gen_ai.operation.name': 'chat', + 'gen_ai.system': 'custom-llm', + 'gen_ai.request.model': 'custom-model-v2', + 'llm.call_type': 'final_synthesis', + }, + }, + async (finalSpan) => { + // Make final LLM call with tool results + // Implementation similar to above... + const synthesisResponse = await synthesizeResponse(llmResponse, toolsUsed); + + finalSpan.setAttribute('gen_ai.response.text', synthesisResponse.message); + finalSpan.setAttribute('gen_ai.usage.total_tokens', synthesisResponse.usage.total_tokens); + + totalTokens += synthesisResponse.usage.total_tokens; + finalResponse = synthesisResponse.message; + + return synthesisResponse; + } + ); + } else { + finalResponse = llmResponse.choices[0].message.content; + } + + // Determine resolution status + if (toolsUsed.includes('create_ticket')) { + resolutionStatus = 'escalated'; + } else if (finalResponse.toLowerCase().includes('resolved') || finalResponse.toLowerCase().includes('solved')) { + resolutionStatus = 'resolved'; + } else { + resolutionStatus = 'answered'; + } + + // Set final agent span attributes + agentSpan.setAttribute('gen_ai.response.text', finalResponse); + agentSpan.setAttribute('gen_ai.usage.total_tokens', totalTokens); + agentSpan.setAttribute('conversation.tools_used_count', toolsUsed.length); + agentSpan.setAttribute('conversation.tools_used', JSON.stringify(toolsUsed)); + agentSpan.setAttribute('conversation.resolution_status', resolutionStatus); + agentSpan.setAttribute('conversation.cost_estimate_usd', (totalTokens * 0.0001).toFixed(4)); // Example cost calculation + + res.json({ + message: finalResponse, + responseId: `resp_${Date.now()}`, + totalTokens, + toolsUsed, + resolutionStatus, + }); + + } catch (error) { + agentSpan.setStatus({ code: 2, message: 'agent_invocation_failed' }); + agentSpan.setAttribute('error.type', error instanceof Error ? error.constructor.name : 'UnknownError'); + Sentry.captureException(error); + res.status(500).json({ error: 'AI agent processing failed' }); + } + } + ); +}); + +// Helper functions for tool execution +async function searchKnowledgeBase(query: string): Promise { + // Implementation for knowledge base search + return `Knowledge base results for: ${query}`; +} + +async function createSupportTicket(args: any): Promise { + // Implementation for ticket creation + return `TICKET-${Date.now()}`; +} + +async function checkOrderStatus(orderId: string): Promise { + // Implementation for order status check + return `Order ${orderId} is shipped and tracking number is XYZ123`; +} + +async function synthesizeResponse(llmResponse: any, toolsUsed: string[]): Promise { + // Implementation for final response synthesis + return { + message: "Based on the information I found, here's your answer...", + usage: { total_tokens: 150 } + }; +} +``` + +**How the trace works together:** +- Frontend span (`gen_ai.invoke_agent`) captures the entire user interaction from message to response. +- Backend agent span continues the trace with the same operation and agent name for correlation. +- LLM spans (`gen_ai.chat`) track individual model calls with token usage and performance. +- Tool execution spans (`gen_ai.execute_tool`) monitor each tool call with input/output and timing. +- Rich attributes enable monitoring of conversation quality, cost, and business outcomes. + +What to monitor with span metrics: +- p95 duration of `op:gen_ai.invoke_agent` grouped by `conversation.resolution_status`. +- Token usage trends via `gen_ai.usage.total_tokens` by `gen_ai.request.model`. +- Tool usage patterns via `op:gen_ai.execute_tool` grouped by `gen_ai.tool.name`. +- Cost analysis via `conversation.cost_estimate_usd` aggregated by time period. +- Agent effectiveness via `conversation.resolution_status` distribution. +- Error rates for each component: `op:gen_ai.chat`, `op:gen_ai.execute_tool`, `op:gen_ai.invoke_agent`. From 6ba34b76a5161f8c1c9ca16c09d96a48bc6b37b3 Mon Sep 17 00:00:00 2001 From: Cody De Arkland Date: Mon, 27 Oct 2025 00:19:51 -0700 Subject: [PATCH 13/15] fix: correct Manual LLM Instrumentation example to match SDK conventions Critical fixes to align with Sentry AI agent span conventions: - Add missing SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN with 'manual.ai.custom-llm' value - Add gen_ai.response.model attributes to all agent and LLM spans - Import SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN from @sentry/core - Ensure all spans (invoke_agent, chat, execute_tool) have required attributes - Follow exact patterns from JavaScript SDK AI integrations - Matches developer docs AI agent conventions at develop.sentry.dev --- .../common/tracing/span-metrics/examples.mdx | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx index fb824db84de10..ea18e9df2bb85 100644 --- a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx +++ b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx @@ -521,6 +521,8 @@ Example Repository: _Coming soon - sample repository in development_ **Frontend (React) — Instrument AI Chat Interface:** ```typescript +import { SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; + // In your AI chat component const handleSendMessage = async (userMessage: string) => { await Sentry.startSpan( @@ -537,6 +539,7 @@ const handleSendMessage = async (userMessage: string) => { ...conversationHistory, { role: 'user', content: userMessage } ]), + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'manual.ai.custom-llm', 'conversation.turn': conversationHistory.length + 1, 'conversation.session_id': sessionId, }, @@ -565,6 +568,7 @@ const handleSendMessage = async (userMessage: string) => { // Set response attributes agentSpan.setAttribute('gen_ai.response.text', aiResponse.message); agentSpan.setAttribute('gen_ai.response.id', aiResponse.responseId); + agentSpan.setAttribute('gen_ai.response.model', 'custom-model-v2'); agentSpan.setAttribute('gen_ai.usage.total_tokens', aiResponse.totalTokens); agentSpan.setAttribute('conversation.tools_used', aiResponse.toolsUsed?.length || 0); agentSpan.setAttribute('conversation.resolution_status', aiResponse.resolutionStatus); @@ -599,6 +603,8 @@ Where to put this in your app: **Backend — Custom LLM Integration with Tool Calls:** ```typescript +import { SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; + // Express API route for custom AI agent app.post('/api/ai/chat', async (req: Request, res: Response) => { const { message, sessionId, conversationHistory } = req.body; @@ -613,6 +619,7 @@ app.post('/api/ai/chat', async (req: Request, res: Response) => { 'gen_ai.agent.name': 'Customer Support Agent', 'gen_ai.system': 'custom-llm', 'gen_ai.request.model': 'custom-model-v2', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'manual.ai.custom-llm', 'conversation.session_id': sessionId, }, }, @@ -647,6 +654,7 @@ app.post('/api/ai/chat', async (req: Request, res: Response) => { ]), 'gen_ai.request.temperature': 0.7, 'gen_ai.request.max_tokens': 500, + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'manual.ai.custom-llm', }, }, async (llmSpan) => { @@ -703,6 +711,7 @@ app.post('/api/ai/chat', async (req: Request, res: Response) => { 'gen_ai.tool.input': toolCall.function.arguments, 'gen_ai.system': 'custom-llm', 'gen_ai.request.model': 'custom-model-v2', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'manual.ai.custom-llm', }, }, async (toolSpan) => { @@ -762,6 +771,7 @@ app.post('/api/ai/chat', async (req: Request, res: Response) => { 'gen_ai.operation.name': 'chat', 'gen_ai.system': 'custom-llm', 'gen_ai.request.model': 'custom-model-v2', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'manual.ai.custom-llm', 'llm.call_type': 'final_synthesis', }, }, @@ -794,6 +804,7 @@ app.post('/api/ai/chat', async (req: Request, res: Response) => { // Set final agent span attributes agentSpan.setAttribute('gen_ai.response.text', finalResponse); + agentSpan.setAttribute('gen_ai.response.model', 'custom-model-v2'); agentSpan.setAttribute('gen_ai.usage.total_tokens', totalTokens); agentSpan.setAttribute('conversation.tools_used_count', toolsUsed.length); agentSpan.setAttribute('conversation.tools_used', JSON.stringify(toolsUsed)); From 01aad81ec94ecd454ff69c272cd0a1cc3d14ead0 Mon Sep 17 00:00:00 2001 From: Cody De Arkland Date: Mon, 27 Oct 2025 01:18:20 -0700 Subject: [PATCH 14/15] simplify: reduce Manual LLM Instrumentation example to single tool - Removed 6 extra tools to focus on core instrumentation patterns - Kept only search_knowledge_base tool as representative example - Simplified monitoring metrics section to match other examples - Reduced verbosity while maintaining all essential tracing patterns - Test app remains comprehensive with all tools intact --- .../common/tracing/span-metrics/examples.mdx | 62 +++++++++---------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx index ea18e9df2bb85..ef6bdff3f089d 100644 --- a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx +++ b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx @@ -521,9 +521,19 @@ Example Repository: _Coming soon - sample repository in development_ **Frontend (React) — Instrument AI Chat Interface:** ```typescript +import { useState, useEffect } from 'react'; import { SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; // In your AI chat component +export default function CustomerSupportChat() { + const [conversationHistory, setConversationHistory] = useState([]); + const [sessionId, setSessionId] = useState(''); + + // Generate sessionId on client-side only to avoid hydration mismatch + useEffect(() => { + setSessionId(`session_${Date.now()}`); + }, []); + const handleSendMessage = async (userMessage: string) => { await Sentry.startSpan( { @@ -600,6 +610,8 @@ Where to put this in your app: - Auto-instrumentation will capture the fetch request; the explicit span adds AI-specific context - Consider adding user feedback collection to track conversation quality +**Important:** Generate `sessionId` in `useEffect` to avoid hydration errors when using Server-Side Rendering (SSR). Using `Date.now()` or random values during component initialization will cause mismatches between server and client renders. + **Backend — Custom LLM Integration with Tool Calls:** ```typescript @@ -626,9 +638,7 @@ app.post('/api/ai/chat', async (req: Request, res: Response) => { async (agentSpan) => { try { const tools = [ - { name: 'search_knowledge_base', description: 'Search company knowledge base for answers' }, - { name: 'create_ticket', description: 'Create a support ticket for complex issues' }, - { name: 'check_order_status', description: 'Look up customer order information' } + { name: 'search_knowledge_base', description: 'Search company knowledge base for answers' } ]; agentSpan.setAttribute('gen_ai.request.available_tools', JSON.stringify(tools)); @@ -700,6 +710,9 @@ app.post('/api/ai/chat', async (req: Request, res: Response) => { // Step 2: Execute any tool calls if (llmResponse.choices[0].message.tool_calls) { for (const toolCall of llmResponse.choices[0].message.tool_calls) { + // Find tool description from available tools + const toolDefinition = tools.find(t => t.name === toolCall.function.name); + const toolResult = await Sentry.startSpan( { name: `execute_tool ${toolCall.function.name}`, @@ -707,6 +720,7 @@ app.post('/api/ai/chat', async (req: Request, res: Response) => { attributes: { 'gen_ai.operation.name': 'execute_tool', 'gen_ai.tool.name': toolCall.function.name, + 'gen_ai.tool.description': toolDefinition?.description || '', 'gen_ai.tool.type': 'function', 'gen_ai.tool.input': toolCall.function.arguments, 'gen_ai.system': 'custom-llm', @@ -716,9 +730,10 @@ app.post('/api/ai/chat', async (req: Request, res: Response) => { }, async (toolSpan) => { let toolOutput = ''; + let toolTokensUsed = 0; try { - // Execute the actual tool + // Execute the tool switch (toolCall.function.name) { case 'search_knowledge_base': const searchArgs = JSON.parse(toolCall.function.arguments); @@ -727,30 +742,18 @@ app.post('/api/ai/chat', async (req: Request, res: Response) => { toolSpan.setAttribute('search.results_count', toolOutput.split('\n').length); break; - case 'create_ticket': - const ticketArgs = JSON.parse(toolCall.function.arguments); - const ticketId = await createSupportTicket(ticketArgs); - toolOutput = `Created support ticket #${ticketId}`; - toolSpan.setAttribute('ticket.id', ticketId); - toolSpan.setAttribute('ticket.priority', ticketArgs.priority || 'medium'); - resolutionStatus = 'escalated'; - break; - - case 'check_order_status': - const orderArgs = JSON.parse(toolCall.function.arguments); - toolOutput = await checkOrderStatus(orderArgs.orderId); - toolSpan.setAttribute('order.id', orderArgs.orderId); - break; - default: throw new Error(`Unknown tool: ${toolCall.function.name}`); } toolSpan.setAttribute('gen_ai.tool.output', toolOutput); + toolSpan.setAttribute('gen_ai.usage.total_tokens', toolTokensUsed); toolsUsed.push(toolCall.function.name); + totalTokens += toolTokensUsed; } catch (toolError) { toolSpan.setStatus({ code: 2, message: 'tool_execution_failed' }); + toolSpan.setAttribute('error.type', toolError instanceof Error ? toolError.constructor.name : 'UnknownError'); toolSpan.setAttribute('error.message', toolError instanceof Error ? toolError.message : 'Unknown tool error'); toolOutput = `Error executing ${toolCall.function.name}: ${toolError instanceof Error ? toolError.message : 'Unknown error'}`; } @@ -831,22 +834,19 @@ app.post('/api/ai/chat', async (req: Request, res: Response) => { // Helper functions for tool execution async function searchKnowledgeBase(query: string): Promise { - // Implementation for knowledge base search - return `Knowledge base results for: ${query}`; + // Search company knowledge base - returns relevant policy info + const results = [ + "Our return policy allows returns within 30 days of purchase.", + "Refunds are processed within 5-7 business days after we receive the item.", + "Items must be in original condition with tags attached.", + "Free return shipping is provided for defective items." + ]; + return results.join('\n'); } -async function createSupportTicket(args: any): Promise { - // Implementation for ticket creation - return `TICKET-${Date.now()}`; -} - -async function checkOrderStatus(orderId: string): Promise { - // Implementation for order status check - return `Order ${orderId} is shipped and tracking number is XYZ123`; -} async function synthesizeResponse(llmResponse: any, toolsUsed: string[]): Promise { - // Implementation for final response synthesis + // Make final LLM call to synthesize tool results into response return { message: "Based on the information I found, here's your answer...", usage: { total_tokens: 150 } From 1eb046a92c0ab7893984d1f225cc32611b55c2f8 Mon Sep 17 00:00:00 2001 From: Cody De Arkland Date: Mon, 27 Oct 2025 01:32:38 -0700 Subject: [PATCH 15/15] simplify: significantly shorten backend LLM example while preserving data fidelity - Removed verbose LLM API call implementation details - Simplified tool execution to essential patterns only - Removed complex final synthesis step - Kept all required AI agent span attributes and conventions - Reduced from ~100 lines to ~40 lines while maintaining all tracing patterns - Helper functions now show clean interface without implementation details --- .../common/tracing/span-metrics/examples.mdx | 116 ++---------------- 1 file changed, 10 insertions(+), 106 deletions(-) diff --git a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx index ef6bdff3f089d..ddc68c2df9c7e 100644 --- a/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx +++ b/docs/platforms/javascript/common/tracing/span-metrics/examples.mdx @@ -668,27 +668,7 @@ app.post('/api/ai/chat', async (req: Request, res: Response) => { }, }, async (llmSpan) => { - // Call your custom LLM API - const llmApiResponse = await fetch('https://your-custom-llm-api.com/chat', { - method: 'POST', - headers: { - 'Authorization': `Bearer ${process.env.CUSTOM_LLM_API_KEY}`, - 'Content-Type': 'application/json' - }, - body: JSON.stringify({ - model: 'custom-model-v2', - messages: conversationHistory.concat([{ role: 'user', content: message }]), - temperature: 0.7, - max_tokens: 500, - tools: tools - }) - }); - - if (!llmApiResponse.ok) { - throw new Error(`LLM API failed: ${llmApiResponse.status}`); - } - - const llmData = await llmApiResponse.json(); + const llmData = await callCustomLLM(message, conversationHistory); // Set LLM response attributes llmSpan.setAttribute('gen_ai.response.text', llmData.choices[0].message.content || ''); @@ -707,119 +687,43 @@ app.post('/api/ai/chat', async (req: Request, res: Response) => { } ); - // Step 2: Execute any tool calls + // Step 2: Execute tool calls if present if (llmResponse.choices[0].message.tool_calls) { for (const toolCall of llmResponse.choices[0].message.tool_calls) { - // Find tool description from available tools - const toolDefinition = tools.find(t => t.name === toolCall.function.name); - - const toolResult = await Sentry.startSpan( + await Sentry.startSpan( { name: `execute_tool ${toolCall.function.name}`, op: 'gen_ai.execute_tool', attributes: { 'gen_ai.operation.name': 'execute_tool', 'gen_ai.tool.name': toolCall.function.name, - 'gen_ai.tool.description': toolDefinition?.description || '', 'gen_ai.tool.type': 'function', 'gen_ai.tool.input': toolCall.function.arguments, - 'gen_ai.system': 'custom-llm', - 'gen_ai.request.model': 'custom-model-v2', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'manual.ai.custom-llm', }, }, async (toolSpan) => { - let toolOutput = ''; - let toolTokensUsed = 0; - - try { - // Execute the tool - switch (toolCall.function.name) { - case 'search_knowledge_base': - const searchArgs = JSON.parse(toolCall.function.arguments); - toolOutput = await searchKnowledgeBase(searchArgs.query); - toolSpan.setAttribute('search.query', searchArgs.query); - toolSpan.setAttribute('search.results_count', toolOutput.split('\n').length); - break; - - default: - throw new Error(`Unknown tool: ${toolCall.function.name}`); - } - - toolSpan.setAttribute('gen_ai.tool.output', toolOutput); - toolSpan.setAttribute('gen_ai.usage.total_tokens', toolTokensUsed); - toolsUsed.push(toolCall.function.name); - totalTokens += toolTokensUsed; - - } catch (toolError) { - toolSpan.setStatus({ code: 2, message: 'tool_execution_failed' }); - toolSpan.setAttribute('error.type', toolError instanceof Error ? toolError.constructor.name : 'UnknownError'); - toolSpan.setAttribute('error.message', toolError instanceof Error ? toolError.message : 'Unknown tool error'); - toolOutput = `Error executing ${toolCall.function.name}: ${toolError instanceof Error ? toolError.message : 'Unknown error'}`; - } + const toolOutput = await executeKnowledgeBaseSearch(toolCall.function.arguments); - return toolOutput; + toolSpan.setAttribute('gen_ai.tool.output', toolOutput); + toolSpan.setAttribute('search.query', JSON.parse(toolCall.function.arguments).query); + toolsUsed.push(toolCall.function.name); } ); } } - // Step 3: Generate final response (if tools were used) - if (toolsUsed.length > 0) { - const finalLlmResponse = await Sentry.startSpan( - { - name: 'chat custom-model-v2', - op: 'gen_ai.chat', - attributes: { - 'gen_ai.operation.name': 'chat', - 'gen_ai.system': 'custom-llm', - 'gen_ai.request.model': 'custom-model-v2', - [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'manual.ai.custom-llm', - 'llm.call_type': 'final_synthesis', - }, - }, - async (finalSpan) => { - // Make final LLM call with tool results - // Implementation similar to above... - const synthesisResponse = await synthesizeResponse(llmResponse, toolsUsed); - - finalSpan.setAttribute('gen_ai.response.text', synthesisResponse.message); - finalSpan.setAttribute('gen_ai.usage.total_tokens', synthesisResponse.usage.total_tokens); - - totalTokens += synthesisResponse.usage.total_tokens; - finalResponse = synthesisResponse.message; - - return synthesisResponse; - } - ); - } else { - finalResponse = llmResponse.choices[0].message.content; - } - - // Determine resolution status - if (toolsUsed.includes('create_ticket')) { - resolutionStatus = 'escalated'; - } else if (finalResponse.toLowerCase().includes('resolved') || finalResponse.toLowerCase().includes('solved')) { - resolutionStatus = 'resolved'; - } else { - resolutionStatus = 'answered'; - } - - // Set final agent span attributes + // Set final agent attributes + const finalResponse = llmResponse.choices[0].message.content; agentSpan.setAttribute('gen_ai.response.text', finalResponse); - agentSpan.setAttribute('gen_ai.response.model', 'custom-model-v2'); agentSpan.setAttribute('gen_ai.usage.total_tokens', totalTokens); - agentSpan.setAttribute('conversation.tools_used_count', toolsUsed.length); agentSpan.setAttribute('conversation.tools_used', JSON.stringify(toolsUsed)); - agentSpan.setAttribute('conversation.resolution_status', resolutionStatus); - agentSpan.setAttribute('conversation.cost_estimate_usd', (totalTokens * 0.0001).toFixed(4)); // Example cost calculation + agentSpan.setAttribute('conversation.resolution_status', toolsUsed.length > 0 ? 'resolved' : 'answered'); res.json({ message: finalResponse, - responseId: `resp_${Date.now()}`, totalTokens, toolsUsed, - resolutionStatus, }); } catch (error) {