Skip to content

Commit 68fcc82

Browse files
authored
feat(core): Improve error handling for Anthropic AI instrumentation (#17535)
This PR enhances Anthropic AI instrumentation to properly handle and record errors that occur as part of response metadata. Core Error Handling Improvements: - Enhanced error detection: Added proper handling for Anthropic API error responses with type: 'error' structure - Improved streaming error handling: Better error type detection and reporting for streaming operations - Error capturing: Manually captured errors for both standard and streaming errors
1 parent 65e549c commit 68fcc82

File tree

6 files changed

+484
-61
lines changed

6 files changed

+484
-61
lines changed
Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
import { instrumentAnthropicAiClient } from '@sentry/core';
2+
import * as Sentry from '@sentry/node';
3+
4+
class MockAnthropic {
5+
constructor(config) {
6+
this.apiKey = config.apiKey;
7+
this.messages = {
8+
create: this._messagesCreate.bind(this),
9+
};
10+
this.models = {
11+
retrieve: this._modelsRetrieve.bind(this),
12+
};
13+
}
14+
15+
async _messagesCreate(params) {
16+
await new Promise(resolve => setTimeout(resolve, 5));
17+
18+
// Case 1: Invalid tool format error
19+
if (params.model === 'invalid-format') {
20+
const error = new Error('Invalid format');
21+
error.status = 400;
22+
error.headers = { 'x-request-id': 'mock-invalid-tool-format-error' };
23+
throw error;
24+
}
25+
26+
// Default case (success) - return tool use for successful tool usage test
27+
return {
28+
id: 'msg_ok',
29+
type: 'message',
30+
model: params.model,
31+
role: 'assistant',
32+
content: [
33+
{
34+
type: 'tool_use',
35+
id: 'tool_ok_1',
36+
name: 'calculator',
37+
input: { expression: '2+2' },
38+
},
39+
],
40+
stop_reason: 'tool_use',
41+
usage: { input_tokens: 7, output_tokens: 9 },
42+
};
43+
}
44+
45+
async _modelsRetrieve(modelId) {
46+
await new Promise(resolve => setTimeout(resolve, 5));
47+
48+
// Case for model retrieval error
49+
if (modelId === 'nonexistent-model') {
50+
const error = new Error('Model not found');
51+
error.status = 404;
52+
error.headers = { 'x-request-id': 'mock-model-retrieval-error' };
53+
throw error;
54+
}
55+
56+
return {
57+
id: modelId,
58+
name: modelId,
59+
created_at: 1715145600,
60+
model: modelId,
61+
};
62+
}
63+
}
64+
65+
async function run() {
66+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
67+
const mockClient = new MockAnthropic({ apiKey: 'mock-api-key' });
68+
const client = instrumentAnthropicAiClient(mockClient);
69+
70+
// 1. Test invalid format error
71+
// https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/implement-tool-use#handling-tool-use-and-tool-result-content-blocks
72+
try {
73+
await client.messages.create({
74+
model: 'invalid-format',
75+
messages: [
76+
{
77+
role: 'user',
78+
content: [
79+
{ type: 'text', text: 'Here are the results:' }, // ❌ Text before tool_result
80+
{ type: 'tool_result', tool_use_id: 'toolu_01' },
81+
],
82+
},
83+
],
84+
});
85+
} catch {
86+
// Error expected
87+
}
88+
89+
// 2. Test model retrieval error
90+
try {
91+
await client.models.retrieve('nonexistent-model');
92+
} catch {
93+
// Error expected
94+
}
95+
96+
// 3. Test successful tool usage for comparison
97+
await client.messages.create({
98+
model: 'claude-3-haiku-20240307',
99+
messages: [{ role: 'user', content: 'Calculate 2+2' }],
100+
tools: [
101+
{
102+
name: 'calculator',
103+
description: 'Perform calculations',
104+
input_schema: {
105+
type: 'object',
106+
properties: { expression: { type: 'string' } },
107+
required: ['expression'],
108+
},
109+
},
110+
],
111+
});
112+
});
113+
}
114+
115+
run();
Lines changed: 166 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,166 @@
1+
import { instrumentAnthropicAiClient } from '@sentry/core';
2+
import * as Sentry from '@sentry/node';
3+
4+
// Generator for default fallback
5+
function createMockDefaultFallbackStream() {
6+
async function* generator() {
7+
yield {
8+
type: 'content_block_start',
9+
index: 0,
10+
};
11+
yield {
12+
type: 'content_block_delta',
13+
index: 0,
14+
delta: { text: 'This stream will work fine.' },
15+
};
16+
yield {
17+
type: 'content_block_stop',
18+
index: 0,
19+
};
20+
}
21+
return generator();
22+
}
23+
24+
// Generator that errors midway through streaming
25+
function createMockMidwayErrorStream() {
26+
async function* generator() {
27+
// First yield some initial data to start the stream
28+
yield {
29+
type: 'content_block_start',
30+
message: {
31+
id: 'msg_error_stream_1',
32+
type: 'message',
33+
role: 'assistant',
34+
model: 'claude-3-haiku-20240307',
35+
content: [],
36+
usage: { input_tokens: 5 },
37+
},
38+
};
39+
40+
// Yield one chunk of content
41+
yield { type: 'content_block_delta', delta: { text: 'This stream will ' } };
42+
43+
// Then throw an error
44+
await new Promise(resolve => setTimeout(resolve, 5));
45+
throw new Error('Stream interrupted');
46+
}
47+
48+
return generator();
49+
}
50+
51+
class MockAnthropic {
52+
constructor(config) {
53+
this.apiKey = config.apiKey;
54+
55+
this.messages = {
56+
create: this._messagesCreate.bind(this),
57+
stream: this._messagesStream.bind(this),
58+
};
59+
}
60+
61+
// client.messages.create with stream: true
62+
async _messagesCreate(params) {
63+
await new Promise(resolve => setTimeout(resolve, 5));
64+
65+
// Error on initialization for 'error-stream-init' model
66+
if (params.model === 'error-stream-init') {
67+
if (params?.stream === true) {
68+
throw new Error('Failed to initialize stream');
69+
}
70+
}
71+
72+
// Error midway for 'error-stream-midway' model
73+
if (params.model === 'error-stream-midway') {
74+
if (params?.stream === true) {
75+
return createMockMidwayErrorStream();
76+
}
77+
}
78+
79+
// Default fallback
80+
return {
81+
id: 'msg_mock123',
82+
type: 'message',
83+
model: params.model,
84+
role: 'assistant',
85+
content: [{ type: 'text', text: 'Non-stream response' }],
86+
usage: { input_tokens: 5, output_tokens: 7 },
87+
};
88+
}
89+
90+
// client.messages.stream
91+
async _messagesStream(params) {
92+
await new Promise(resolve => setTimeout(resolve, 5));
93+
94+
// Error on initialization for 'error-stream-init' model
95+
if (params.model === 'error-stream-init') {
96+
throw new Error('Failed to initialize stream');
97+
}
98+
99+
// Error midway for 'error-stream-midway' model
100+
if (params.model === 'error-stream-midway') {
101+
return createMockMidwayErrorStream();
102+
}
103+
104+
// Default fallback
105+
return createMockDefaultFallbackStream();
106+
}
107+
}
108+
109+
async function run() {
110+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
111+
const mockClient = new MockAnthropic({ apiKey: 'mock-api-key' });
112+
const client = instrumentAnthropicAiClient(mockClient);
113+
114+
// 1) Error on stream initialization with messages.create
115+
try {
116+
await client.messages.create({
117+
model: 'error-stream-init',
118+
messages: [{ role: 'user', content: 'This will fail immediately' }],
119+
stream: true,
120+
});
121+
} catch {
122+
// Error expected
123+
}
124+
125+
// 2) Error on stream initialization with messages.stream
126+
try {
127+
await client.messages.stream({
128+
model: 'error-stream-init',
129+
messages: [{ role: 'user', content: 'This will also fail immediately' }],
130+
});
131+
} catch {
132+
// Error expected
133+
}
134+
135+
// 3) Error midway through streaming with messages.create
136+
try {
137+
const stream = await client.messages.create({
138+
model: 'error-stream-midway',
139+
messages: [{ role: 'user', content: 'This will fail midway' }],
140+
stream: true,
141+
});
142+
143+
for await (const _ of stream) {
144+
void _;
145+
}
146+
} catch {
147+
// Error expected
148+
}
149+
150+
// 4) Error midway through streaming with messages.stream
151+
try {
152+
const stream = await client.messages.stream({
153+
model: 'error-stream-midway',
154+
messages: [{ role: 'user', content: 'This will also fail midway' }],
155+
});
156+
157+
for await (const _ of stream) {
158+
void _;
159+
}
160+
} catch {
161+
// Error expected
162+
}
163+
});
164+
}
165+
166+
run();

dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts

Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -348,4 +348,101 @@ describe('Anthropic integration', () => {
348348
.completed();
349349
});
350350
});
351+
352+
// Additional error scenarios - Streaming errors
353+
const EXPECTED_STREAM_ERROR_SPANS = {
354+
transaction: 'main',
355+
spans: expect.arrayContaining([
356+
// Error with messages.create on stream initialization
357+
expect.objectContaining({
358+
description: 'messages error-stream-init stream-response',
359+
op: 'gen_ai.messages',
360+
status: 'internal_error', // Actual status coming from the instrumentation
361+
data: expect.objectContaining({
362+
'gen_ai.request.model': 'error-stream-init',
363+
'gen_ai.request.stream': true,
364+
}),
365+
}),
366+
// Error with messages.stream on stream initialization
367+
expect.objectContaining({
368+
description: 'messages error-stream-init stream-response',
369+
op: 'gen_ai.messages',
370+
status: 'internal_error', // Actual status coming from the instrumentation
371+
data: expect.objectContaining({
372+
'gen_ai.request.model': 'error-stream-init',
373+
}),
374+
}),
375+
// Error midway with messages.create on streaming - note: The stream is started successfully
376+
// so we get a successful span with the content that was streamed before the error
377+
expect.objectContaining({
378+
description: 'messages error-stream-midway stream-response',
379+
op: 'gen_ai.messages',
380+
status: 'ok',
381+
data: expect.objectContaining({
382+
'gen_ai.request.model': 'error-stream-midway',
383+
'gen_ai.request.stream': true,
384+
'gen_ai.response.streaming': true,
385+
'gen_ai.response.text': 'This stream will ', // We received some data before error
386+
}),
387+
}),
388+
// Error midway with messages.stream - same behavior, we get a span with the streamed data
389+
expect.objectContaining({
390+
description: 'messages error-stream-midway stream-response',
391+
op: 'gen_ai.messages',
392+
status: 'ok',
393+
data: expect.objectContaining({
394+
'gen_ai.request.model': 'error-stream-midway',
395+
'gen_ai.response.streaming': true,
396+
'gen_ai.response.text': 'This stream will ', // We received some data before error
397+
}),
398+
}),
399+
]),
400+
};
401+
402+
createEsmAndCjsTests(__dirname, 'scenario-stream-errors.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
403+
test('handles streaming errors correctly', async () => {
404+
await createRunner().ignore('event').expect({ transaction: EXPECTED_STREAM_ERROR_SPANS }).start().completed();
405+
});
406+
});
407+
408+
// Additional error scenarios - Tool errors and model retrieval errors
409+
const EXPECTED_ERROR_SPANS = {
410+
transaction: 'main',
411+
spans: expect.arrayContaining([
412+
// Invalid tool format error
413+
expect.objectContaining({
414+
description: 'messages invalid-format',
415+
op: 'gen_ai.messages',
416+
status: 'unknown_error',
417+
data: expect.objectContaining({
418+
'gen_ai.request.model': 'invalid-format',
419+
}),
420+
}),
421+
// Model retrieval error
422+
expect.objectContaining({
423+
description: 'models nonexistent-model',
424+
op: 'gen_ai.models',
425+
status: 'unknown_error',
426+
data: expect.objectContaining({
427+
'gen_ai.request.model': 'nonexistent-model',
428+
}),
429+
}),
430+
// Successful tool usage (for comparison)
431+
expect.objectContaining({
432+
description: 'messages claude-3-haiku-20240307',
433+
op: 'gen_ai.messages',
434+
status: 'ok',
435+
data: expect.objectContaining({
436+
'gen_ai.request.model': 'claude-3-haiku-20240307',
437+
'gen_ai.response.tool_calls': expect.stringContaining('tool_ok_1'),
438+
}),
439+
}),
440+
]),
441+
};
442+
443+
createEsmAndCjsTests(__dirname, 'scenario-errors.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
444+
test('handles tool errors and model retrieval errors correctly', async () => {
445+
await createRunner().ignore('event').expect({ transaction: EXPECTED_ERROR_SPANS }).start().completed();
446+
});
447+
});
351448
});

0 commit comments

Comments
 (0)