Skip to content

Commit

Permalink
feat: Ensure API errors are tracked (#1880)
Browse files Browse the repository at this point in the history
  • Loading branch information
jsumners-nr committed Nov 21, 2023
1 parent 86d83f2 commit 289c2a2
Show file tree
Hide file tree
Showing 10 changed files with 269 additions and 40 deletions.
59 changes: 46 additions & 13 deletions lib/instrumentation/openai.js
Expand Up @@ -8,7 +8,8 @@ const { openAiHeaders, openAiApiKey } = require('../../lib/symbols')
const {
LlmChatCompletionMessage,
LlmChatCompletionSummary,
LlmEmbedding
LlmEmbedding,
LlmErrorMessage
} = require('../../lib/llm-events/openai')
const LlmTrackedIds = require('../../lib/llm-events/tracked-ids')

Expand All @@ -35,6 +36,7 @@ function shouldSkipInstrumentation(config, shim) {
return semver.lt(pkgVersion, MIN_VERSION)
}

// eslint-disable-next-line sonarjs/cognitive-complexity
module.exports = function initialize(agent, openai, moduleName, shim) {
if (shouldSkipInstrumentation(agent.config, shim)) {
shim.logger.debug(
Expand All @@ -55,8 +57,14 @@ module.exports = function initialize(agent, openai, moduleName, shim) {

if (segment) {
segment[openAiApiKey] = apiKey
segment[openAiHeaders] =
result?.response?.headers && Object.fromEntries(result.response.headers)

// If the result is an error, which is an OpenAI client error, then
// the headers are provided via a proxy attached to `result.headers`.
// Otherwise, result is a typical response-like object.
const headers = result?.response?.headers
? Object.fromEntries(result.response.headers)
: { ...result?.headers }
segment[openAiHeaders] = headers
}
}

Expand Down Expand Up @@ -127,14 +135,18 @@ module.exports = function initialize(agent, openai, moduleName, shim) {
* @param {TraceSegment} params.segment active segment from chat completion
* @param {object} params.request chat completion params
* @param {object} params.response chat completion response
* @param {boolean} [params.hasError=false] indicates that the completion
* resulted in some sort of error case
* @returns {LlmChatCompletionSummary} A summary object.
*/
function recordChatCompletionMessages({ segment, request, response }) {
function recordChatCompletionMessages({ segment, request, response, hasError = false }) {
const tx = segment.transaction
const completionSummary = new LlmChatCompletionSummary({
agent,
segment,
request,
response
response,
withError: hasError
})

// Only take the first response message and append to input messages
Expand All @@ -155,6 +167,8 @@ module.exports = function initialize(agent, openai, moduleName, shim) {
})

recordEvent('LlmChatCompletionSummary', completionSummary)

return completionSummary
}

/**
Expand All @@ -173,19 +187,26 @@ module.exports = function initialize(agent, openai, moduleName, shim) {
promise: true,
// eslint-disable-next-line max-params
after(_shim, _fn, _name, err, response, segment) {
if (!response) {
// If we get an error, it is possible that `response = null`.
// In that case, we define it to be an empty object.
response = {}
}
response.headers = segment[openAiHeaders]
response.api_key = segment[openAiApiKey]

// TODO: add LlmErrorMessage on failure
// and exit
// See: https://github.com/newrelic/node-newrelic/issues/1845
// if (err) {}

recordChatCompletionMessages({
const summary = recordChatCompletionMessages({
segment,
request,
response
response,
hasError: err != null
})

if (err) {
const llmError = new LlmErrorMessage({ cause: err, summary, response })
shim.agent.errors.add(segment.transaction, err, llmError)
}

// cleanup keys on response before returning to user code
delete response.api_key
delete response.headers
Expand All @@ -208,16 +229,28 @@ module.exports = function initialize(agent, openai, moduleName, shim) {
promise: true,
// eslint-disable-next-line max-params
after(_shim, _fn, _name, err, response, segment) {
if (!response) {
// If we get an error, it is possible that `response = null`.
// In that case, we define it to be an empty object.
response = {}
}
response.headers = segment[openAiHeaders]
response.api_key = segment[openAiApiKey]
const embedding = new LlmEmbedding({
agent,
segment,
request,
response
response,
withError: err != null
})

recordEvent('LlmEmbedding', embedding)

if (err) {
const llmError = new LlmErrorMessage({ cause: err, embedding, response })
shim.agent.errors.add(segment.transaction, err, llmError)
}

// cleanup keys on response before returning to user code
delete response.api_key
delete response.headers
Expand Down
3 changes: 2 additions & 1 deletion lib/llm-events/openai/chat-completion-summary.js
Expand Up @@ -7,8 +7,9 @@
const LlmEvent = require('./event')

module.exports = class LlmChatCompletionSummary extends LlmEvent {
constructor({ agent, segment, request = {}, response = {} }) {
constructor({ agent, segment, request = {}, response = {}, withError = false }) {
super({ agent, segment, request, response, responseAttrs: true })
this.error = withError
this.conversation_id = this.conversationId(agent)
this['request.max_tokens'] = request.max_tokens
this['request.temperature'] = request.temperature
Expand Down
3 changes: 2 additions & 1 deletion lib/llm-events/openai/embedding.js
Expand Up @@ -7,8 +7,9 @@
const LlmEvent = require('./event')

module.exports = class LlmEmbedding extends LlmEvent {
constructor({ agent, segment, request = {}, response = {} }) {
constructor({ agent, segment, request = {}, response = {}, withError = false }) {
super({ agent, segment, request, response, responseAttrs: true })
this.error = withError
this.input = request.input?.toString()
}
}
33 changes: 21 additions & 12 deletions lib/llm-events/openai/error-message.js
Expand Up @@ -4,18 +4,27 @@
*/

'use strict'

/**
* Represents an error object, to be tracked via `agent.errors`, that is the
* result of some error returned from the OpenAI API.
*/
module.exports = class LlmErrorMessage {
constructor(request, response) {
this.api_key_last_four_digits = response?.api_key && `sk-${response.api_key.slice(-4)}`
this['request.model'] = request.model || request.engine
this['request.temperature'] = request.temperature
this['request.max_tokens'] = request.max_tokens
this.vendor = 'openAI'
this.ingest_source = 'Node'
this['response.number_of_messages'] = request?.messages?.length
this['http.statusCode'] = response.status
this['response.organization'] = response.headers['openai-organization']
this['error.code'] = response.code
this['error.param'] = response.param
/**
* @param {object} params Constructor parameters
* @param {object} [params.response] Instance of an incoming message.
* @param {object} [params.cause] An instance of the OpenAI error object.
* @param {LlmChatCompletionSummary} [params.summary] Details about the
* conversation if it was a chat completion conversation.
* @param {LlmEmbedding} [params.embedding] Details about the conversation
* if it was an embedding conversation.
*/
constructor({ response, cause, summary, embedding } = {}) {
this['http.statusCode'] = response?.status ?? cause?.status
this['error.message'] = cause?.message
this['error.code'] = response?.code ?? cause?.error?.code
this['error.param'] = response?.param ?? cause?.error?.param
this.completion_id = summary?.id
this.embedding_id = embedding?.id
}
}
36 changes: 36 additions & 0 deletions test/lib/openai-responses.js
Expand Up @@ -158,6 +158,42 @@ responses.set('You are a mathematician.', {
}
})

responses.set('Invalid role.', {
headers: {
'content-type': 'application/json',
'x-request-id': '5db943f509e9031e73de8f4a5e46de32',
'openai-organization': 'new-relic-nkmd8b',
'openai-processing-ms': '4',
'openai-version': '2020-10-01'
},
code: 400,
body: {
error: {
message:
"'bad-role' is not one of ['system', 'assistant', 'user', 'function'] - 'messages.0.role'",
type: 'invalid_request_error',
param: null,
code: null
}
}
})

responses.set('Embedding not allowed.', {
headers: {
'content-type': 'application/json; charset=utf-8',
'x-request-id': '25e22d5a7af9e40b7d5b1828593b52aa'
},
code: 403,
body: {
error: {
message: 'You are not allowed to generate embeddings from this model',
type: 'invalid_request_error',
param: null,
code: null
}
}
})

responses.set('Streamed response', {
headers: {
'Content-Type': 'text/event-stream',
Expand Down
17 changes: 17 additions & 0 deletions test/unit/llm-events/openai/chat-completion-summary.test.js
Expand Up @@ -55,4 +55,21 @@ tap.test('LlmChatCompletionSummary', (t) => {
t.end()
})
})

t.test('should set error to true', (t) => {
const api = helper.getAgentApi()
const conversationId = 'convo-id'
helper.runInTransaction(agent, () => {
api.addCustomAttribute('conversation_id', conversationId)
const chatSummaryEvent = new LlmChatCompletionSummary({
agent,
segment: null,
request: {},
response: {},
withError: true
})
t.equal(true, chatSummaryEvent.error)
t.end()
})
})
})
4 changes: 3 additions & 1 deletion test/unit/llm-events/openai/common.js
Expand Up @@ -76,6 +76,7 @@ function getExpectedResult(tx, event, type, completionId) {
case 'embedding':
expected = { ...expected, ...resKeys }
expected.input = 'This is my test input'
expected.error = false
break
case 'summary':
expected = {
Expand All @@ -86,7 +87,8 @@ function getExpectedResult(tx, event, type, completionId) {
['request.temperature']: 'medium-rare',
['response.number_of_messages']: 3,
['response.usage.completion_tokens']: 10,
['response.choices.finish_reason']: 'stop'
['response.choices.finish_reason']: 'stop',
error: false
}
break
case 'message':
Expand Down
23 changes: 23 additions & 0 deletions test/unit/llm-events/openai/embedding.test.js
Expand Up @@ -68,4 +68,27 @@ tap.test('LlmEmbedding', (t) => {
t.end()
})
})

t.test('should set error to true', (t) => {
const req = {
input: 'This is my test input',
model: 'gpt-3.5-turbo-0613'
}

const api = helper.getAgentApi()
helper.runInTransaction(agent, () => {
api.startSegment('fakeSegment', false, () => {
const segment = api.shim.getActiveSegment()
const embeddingEvent = new LlmEmbedding({
agent,
segment,
request: req,
response: res,
withError: true
})
t.equal(true, embeddingEvent.error)
t.end()
})
})
})
})
15 changes: 5 additions & 10 deletions test/unit/llm-events/openai/error.test.js
Expand Up @@ -11,19 +11,14 @@ const { req, chatRes } = require('./common')

tap.test('LlmErrorMessage', (t) => {
const res = { ...chatRes, code: 'insufficient_quota', param: 'test-param', status: 429 }
const errorMsg = new LlmErrorMessage(req, res)
const errorMsg = new LlmErrorMessage({ request: req, response: res })
const expected = {
'api_key_last_four_digits': 'sk-7890',
'request.model': 'gpt-3.5-turbo-0613',
'request.temperature': 'medium-rare',
'request.max_tokens': '1000000',
'vendor': 'openAI',
'ingest_source': 'Node',
'response.number_of_messages': 2,
'http.statusCode': 429,
'response.organization': 'new-relic',
'error.message': undefined,
'error.code': 'insufficient_quota',
'error.param': 'test-param'
'error.param': 'test-param',
'completion_id': undefined,
'embedding_id': undefined
}
t.same(errorMsg, expected)
t.end()
Expand Down

0 comments on commit 289c2a2

Please sign in to comment.