Skip to content

Commit

Permalink
address feedback
Browse files Browse the repository at this point in the history
  • Loading branch information
jsumners-nr committed Nov 20, 2023
1 parent 406f831 commit 9c155c7
Show file tree
Hide file tree
Showing 9 changed files with 89 additions and 34 deletions.
21 changes: 13 additions & 8 deletions lib/instrumentation/openai.js
Original file line number Diff line number Diff line change
Expand Up @@ -135,15 +135,18 @@ module.exports = function initialize(agent, openai, moduleName, shim) {
* @param {TraceSegment} params.segment active segment from chat completion
* @param {object} params.request chat completion params
* @param {object} params.response chat completion response
* @param {boolean} [params.hasError=false] indicates that the completion
* resulted in some sort of error case
* @returns {LlmChatCompletionSummary} A summary object.
*/
function recordChatCompletionMessages({ segment, request, response }) {
function recordChatCompletionMessages({ segment, request, response, hasError = false }) {
const tx = segment.transaction
const completionSummary = new LlmChatCompletionSummary({
agent,
segment,
request,
response
response,
withError: hasError
})

// Only take the first response message and append to input messages
Expand Down Expand Up @@ -195,12 +198,13 @@ module.exports = function initialize(agent, openai, moduleName, shim) {
const summary = recordChatCompletionMessages({
segment,
request,
response
response,
hasError: err != null
})

if (err) {
const llmError = new LlmErrorMessage({ cause: err, summary, request, response })
shim.agent.errors.add(segment.transaction, llmError)
const llmError = new LlmErrorMessage({ cause: err, summary, response })
shim.agent.errors.add(segment.transaction, err, llmError)
}

// cleanup keys on response before returning to user code
Expand Down Expand Up @@ -236,14 +240,15 @@ module.exports = function initialize(agent, openai, moduleName, shim) {
agent,
segment,
request,
response
response,
withError: err != null
})

recordEvent('LlmEmbedding', embedding)

if (err) {
const llmError = new LlmErrorMessage({ cause: err, embedding, request, response })
shim.agent.errors.add(segment.transaction, llmError)
const llmError = new LlmErrorMessage({ cause: err, embedding, response })
shim.agent.errors.add(segment.transaction, err, llmError)
}

// cleanup keys on response before returning to user code
Expand Down
3 changes: 2 additions & 1 deletion lib/llm-events/openai/chat-completion-summary.js
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,9 @@
const LlmEvent = require('./event')

module.exports = class LlmChatCompletionSummary extends LlmEvent {
constructor({ agent, segment, request = {}, response = {} }) {
constructor({ agent, segment, request = {}, response = {}, withError = false }) {
super({ agent, segment, request, response, responseAttrs: true })
this.error = withError
this.conversation_id = this.conversationId(agent)
this['request.max_tokens'] = request.max_tokens
this['request.temperature'] = request.temperature
Expand Down
3 changes: 2 additions & 1 deletion lib/llm-events/openai/embedding.js
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,9 @@
const LlmEvent = require('./event')

module.exports = class LlmEmbedding extends LlmEvent {
constructor({ agent, segment, request = {}, response = {} }) {
constructor({ agent, segment, request = {}, response = {}, withError = false }) {
super({ agent, segment, request, response, responseAttrs: true })
this.error = withError
this.input = request.input?.toString()
}
}
11 changes: 1 addition & 10 deletions lib/llm-events/openai/error-message.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,24 +12,15 @@
module.exports = class LlmErrorMessage {
/**
* @param {object} params Constructor parameters
* @param {object} params.request Instance of an outgoing message.
* @param {object} [params.response] Instance of an incoming message.
* @param {object} [params.cause] An instance of the OpenAI error object.
* @param {LlmChatCompletionSummary} [params.summary] Details about the
* conversation if it was a chat completion conversation.
* @param {LlmEmbedding} [params.embedding] Details about the conversation
* if it was an embedding conversation.
*/
constructor({ request, response, cause, summary, embedding } = {}) {
this.api_key_last_four_digits = response?.api_key && `sk-${response.api_key.slice(-4)}`
this['request.model'] = request.model || request.engine
this['request.temperature'] = request.temperature
this['request.max_tokens'] = request.max_tokens
this.vendor = 'openAI'
this.ingest_source = 'Node'
this['response.number_of_messages'] = request?.messages?.length
constructor({ response, cause, summary, embedding } = {}) {
this['http.statusCode'] = response?.status ?? cause?.status
this['response.organization'] = response.headers['openai-organization']
this['error.message'] = cause?.message
this['error.code'] = response?.code ?? cause?.error?.code
this['error.param'] = response?.param ?? cause?.error?.param
Expand Down
17 changes: 17 additions & 0 deletions test/unit/llm-events/openai/chat-completion-summary.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -55,4 +55,21 @@ tap.test('LlmChatCompletionSummary', (t) => {
t.end()
})
})

t.test('should set error to true', (t) => {
const api = helper.getAgentApi()
const conversationId = 'convo-id'
helper.runInTransaction(agent, () => {
api.addCustomAttribute('conversation_id', conversationId)
const chatSummaryEvent = new LlmChatCompletionSummary({
agent,
segment: null,
request: {},
response: {},
withError: true
})
t.equal(true, chatSummaryEvent.error)
t.end()
})
})
})
4 changes: 3 additions & 1 deletion test/unit/llm-events/openai/common.js
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ function getExpectedResult(tx, event, type, completionId) {
case 'embedding':
expected = { ...expected, ...resKeys }
expected.input = 'This is my test input'
expected.error = false
break
case 'summary':
expected = {
Expand All @@ -86,7 +87,8 @@ function getExpectedResult(tx, event, type, completionId) {
['request.temperature']: 'medium-rare',
['response.number_of_messages']: 3,
['response.usage.completion_tokens']: 10,
['response.choices.finish_reason']: 'stop'
['response.choices.finish_reason']: 'stop',
error: false
}
break
case 'message':
Expand Down
23 changes: 23 additions & 0 deletions test/unit/llm-events/openai/embedding.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -68,4 +68,27 @@ tap.test('LlmEmbedding', (t) => {
t.end()
})
})

t.test('should set error to true', (t) => {
const req = {
input: 'This is my test input',
model: 'gpt-3.5-turbo-0613'
}

const api = helper.getAgentApi()
helper.runInTransaction(agent, () => {
api.startSegment('fakeSegment', false, () => {
const segment = api.shim.getActiveSegment()
const embeddingEvent = new LlmEmbedding({
agent,
segment,
request: req,
response: res,
withError: true
})
t.equal(true, embeddingEvent.error)
t.end()
})
})
})
})
8 changes: 0 additions & 8 deletions test/unit/llm-events/openai/error.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,7 @@ tap.test('LlmErrorMessage', (t) => {
const res = { ...chatRes, code: 'insufficient_quota', param: 'test-param', status: 429 }
const errorMsg = new LlmErrorMessage({ request: req, response: res })
const expected = {
'api_key_last_four_digits': 'sk-7890',
'request.model': 'gpt-3.5-turbo-0613',
'request.temperature': 'medium-rare',
'request.max_tokens': '1000000',
'vendor': 'openAI',
'ingest_source': 'Node',
'response.number_of_messages': 2,
'http.statusCode': 429,
'response.organization': 'new-relic',
'error.message': undefined,
'error.code': 'insufficient_quota',
'error.param': 'test-param',
Expand Down
33 changes: 28 additions & 5 deletions test/versioned/openai/openai.tap.js
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,8 @@ tap.test('OpenAI instrumentation', (t) => {
'response.headers.ratelimitRemainingRequests': '199',
'response.number_of_messages': 3,
'response.usage.completion_tokens': 11,
'response.choices.finish_reason': 'stop'
'response.choices.finish_reason': 'stop',
'error': false
}
test.match(chatSummary[1], expectedChatSummary, 'should match chat summary message')
tx.end()
Expand Down Expand Up @@ -317,7 +318,8 @@ tap.test('OpenAI instrumentation', (t) => {
'response.headers.ratelimitResetTokens': '2ms',
'response.headers.ratelimitRemainingTokens': '149994',
'response.headers.ratelimitRemainingRequests': '197',
'input': 'This is an embedding test.'
'input': 'This is an embedding test.',
'error': false
}

test.equal(embedding[0].type, 'LlmEmbedding')
Expand Down Expand Up @@ -369,18 +371,28 @@ tap.test('OpenAI instrumentation', (t) => {
t.equal(tx.exceptions.length, 1)
t.match(tx.exceptions[0], {
error: {
status: 401,
code: 'invalid_api_key',
param: 'null'
},
customAttributes: {
'http.statusCode': 401,
'error.message': /Incorrect API key provided:/,
'error.code': 'invalid_api_key',
'error.param': 'null',
'completion_id': /[\w\d]{32}/
},
customAttributes: {},
agentAttributes: {
spanId: /[\w\d]+/
}
})

const summary = agent.customEventAggregator.events.toArray().find((e) => {
return e[0].type === 'LlmChatCompletionSummary'
})
t.ok(summary)
t.equal(summary[1].error, true)

tx.end()
test.end()
})
Expand All @@ -398,13 +410,17 @@ tap.test('OpenAI instrumentation', (t) => {
t.equal(tx.exceptions.length, 1)
t.match(tx.exceptions[0], {
error: {
status: 400,
code: null,
param: null
},
customAttributes: {
'http.statusCode': 400,
'error.message': /'bad-role' is not one of/,
'error.code': null,
'error.param': null,
'completion_id': /\w{32}/
},
customAttributes: {},
agentAttributes: {
spanId: /\w+/
}
Expand All @@ -428,19 +444,26 @@ tap.test('OpenAI instrumentation', (t) => {
t.equal(tx.exceptions.length, 1)
t.match(tx.exceptions[0], {
error: {
status: 403,
code: null,
param: null
},
customAttributes: {
'http.statusCode': 403,
'error.message': 'You are not allowed to generate embeddings from this model',
'error.code': null,
'error.param': null,
'completion_id': undefined,
'embedding_id': /\w{32}/
},
customAttributes: {},
agentAttributes: {
spanId: /\w+/
}
})

const embedding = agent.customEventAggregator.events.toArray().slice(0, 1)[0][1]
t.equal(embedding.error, true)

tx.end()
test.end()
})
Expand Down

0 comments on commit 9c155c7

Please sign in to comment.