Skip to content

Commit

Permalink
chore: Changed token_count to only use tokenCountCallback (#2070)
Browse files Browse the repository at this point in the history
  • Loading branch information
jsumners-nr committed Mar 11, 2024
1 parent 64b4ca2 commit 66f94b0
Show file tree
Hide file tree
Showing 6 changed files with 14 additions and 15 deletions.
13 changes: 7 additions & 6 deletions lib/llm-events/openai/chat-completion-message.js
Expand Up @@ -19,14 +19,15 @@ module.exports = class LlmChatCompletionMessage extends LlmEvent {
this.content = message?.content
}

const tokenCB = agent.llm?.tokenCountCallback
if (typeof tokenCB !== 'function') {
return
}

if (this.is_response) {
this.token_count =
response?.usage?.completion_tokens ||
agent.llm?.tokenCountCallback?.(this['response.model'], message?.content)
this.token_count = tokenCB(this['response.model'], message?.content)
} else {
this.token_count =
response?.usage?.prompt_tokens ||
agent.llm?.tokenCountCallback?.(request.model || request.engine, message?.content)
this.token_count = tokenCB(request.model || request.engine, message?.content)
}
}
}
7 changes: 4 additions & 3 deletions lib/llm-events/openai/embedding.js
Expand Up @@ -14,8 +14,9 @@ module.exports = class LlmEmbedding extends LlmEvent {
if (agent.config.ai_monitoring.record_content.enabled === true) {
this.input = request.input?.toString()
}
this.token_count =
response?.usage?.prompt_tokens ||
agent.llm?.tokenCountCallback?.(this['request.model'], request.input?.toString())
this.token_count = agent.llm?.tokenCountCallback?.(
this['request.model'],
request.input?.toString()
)
}
}
Expand Up @@ -62,7 +62,6 @@ tap.test('LlmChatCompletionMessage', (t) => {
expected.content = chatRes.choices[0].message.content
expected.role = chatRes.choices[0].message.role
expected.is_response = true
expected.token_count = 20
t.same(chatMessageEvent, expected)
t.end()
})
Expand Down
3 changes: 1 addition & 2 deletions test/unit/llm-events/openai/common.js
Expand Up @@ -75,7 +75,7 @@ function getExpectedResult(tx, event, type, completionId) {
expected = { ...expected, ...resKeys }
expected.input = 'This is my test input'
expected.error = false
expected.token_count = 10
expected.token_count = undefined
break
case 'summary':
expected = {
Expand All @@ -96,7 +96,6 @@ function getExpectedResult(tx, event, type, completionId) {
role: 'inquisitive-kid',
sequence: 0,
completion_id: completionId,
token_count: 10,
is_response: false
}
}
Expand Down
3 changes: 1 addition & 2 deletions test/versioned/openai/chat-completions.tap.js
Expand Up @@ -96,8 +96,7 @@ tap.test('OpenAI instrumentation - chat completions', (t) => {
model,
id: 'chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTeat',
resContent: '1 plus 2 is 3.',
reqContent: content,
tokenUsage: true
reqContent: content
})

const chatSummary = events.filter(([{ type }]) => type === 'LlmChatCompletionSummary')[0]
Expand Down
2 changes: 1 addition & 1 deletion test/versioned/openai/embeddings.tap.js
Expand Up @@ -100,7 +100,7 @@ tap.test('OpenAI instrumentation - embedding', (t) => {
'response.organization': 'new-relic-nkmd8b',
'response.usage.total_tokens': 6,
'response.usage.prompt_tokens': 6,
'token_count': 6,
'token_count': undefined,
'response.headers.llmVersion': '2020-10-01',
'response.headers.ratelimitLimitRequests': '200',
'response.headers.ratelimitLimitTokens': '150000',
Expand Down

0 comments on commit 66f94b0

Please sign in to comment.