Skip to content

Commit 2263be2

Browse files
authored
ci(llmobs): use plugin test handler for receiving both llm observability and apm spans (#6477)
* most tests updated * skip problematic test * working solution for distinct traces * revert agent changes and do something different in useLlmObs * review comments
1 parent 3de3a35 commit 2263be2

File tree

5 files changed

+1173
-1520
lines changed

5 files changed

+1173
-1520
lines changed

packages/dd-trace/test/llmobs/plugins/google-cloud-vertexai/index.spec.js

Lines changed: 92 additions & 131 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,11 @@ const { expect } = require('chai')
44
const { describe, it, beforeEach, afterEach, before, after } = require('mocha')
55
const sinon = require('sinon')
66

7-
const LLMObsSpanWriter = require('../../../../src/llmobs/writers/spans')
8-
const agent = require('../../../plugins/agent')
97
const { withVersions } = require('../../../setup/mocha')
108
const {
119
expectedLLMObsLLMSpanEvent,
12-
deepEqualWithMockValues
10+
deepEqualWithMockValues,
11+
useLlmObs
1312
} = require('../../util')
1413
const chai = require('chai')
1514

@@ -82,28 +81,7 @@ describe('integrations', () => {
8281
}
8382

8483
describe('vertexai', () => {
85-
before(async () => {
86-
sinon.stub(LLMObsSpanWriter.prototype, 'append')
87-
88-
// reduce errors related to too many listeners
89-
process.removeAllListeners('beforeExit')
90-
91-
return agent.load('google-cloud-vertexai', {}, {
92-
llmobs: {
93-
mlApp: 'test',
94-
agentlessEnabled: false
95-
}
96-
})
97-
})
98-
99-
afterEach(() => {
100-
LLMObsSpanWriter.prototype.append.reset()
101-
})
102-
103-
after(() => {
104-
sinon.restore()
105-
return agent.close({ ritmReset: false, wipe: true })
106-
})
84+
const getEvents = useLlmObs({ plugin: 'google-cloud-vertexai' })
10785

10886
withVersions('google-cloud-vertexai', '@google-cloud/vertexai', '>=1', version => {
10987
before(() => {
@@ -137,88 +115,76 @@ describe('integrations', () => {
137115
useScenario({ scenario: 'generate-content-single-response' })
138116

139117
it('makes a successful call', async () => {
140-
const checkTraces = agent.assertSomeTraces(traces => {
141-
const span = traces[0][0]
142-
const spanEvent = LLMObsSpanWriter.prototype.append.getCall(0).args[0]
143-
144-
const expected = expectedLLMObsLLMSpanEvent({
145-
span,
146-
spanKind: 'llm',
147-
modelName: 'gemini-1.5-flash-002',
148-
modelProvider: 'google',
149-
name: 'GenerativeModel.generateContent',
150-
inputMessages: getInputMessages('Hello, how are you?'),
151-
outputMessages: [
152-
{
153-
role: 'model',
154-
content: 'Hello! How can I assist you today?'
155-
}
156-
],
157-
metadata: {
158-
temperature: 1,
159-
max_output_tokens: 50
160-
},
161-
tokenMetrics: { input_tokens: 35, output_tokens: 2, total_tokens: 37 },
162-
tags: { ml_app: 'test', language: 'javascript', integration: 'vertexai' }
163-
})
164-
165-
expect(spanEvent).to.deepEqualWithMockValues(expected)
166-
})
167-
168118
await model.generateContent({
169119
contents: [{ role: 'user', parts: [{ text: 'Hello, how are you?' }] }]
170120
})
171121

172-
await checkTraces
122+
const { apmSpans, llmobsSpans } = await getEvents()
123+
const expected = expectedLLMObsLLMSpanEvent({
124+
span: apmSpans[0],
125+
spanKind: 'llm',
126+
modelName: 'gemini-1.5-flash-002',
127+
modelProvider: 'google',
128+
name: 'GenerativeModel.generateContent',
129+
inputMessages: getInputMessages('Hello, how are you?'),
130+
outputMessages: [
131+
{
132+
role: 'model',
133+
content: 'Hello! How can I assist you today?'
134+
}
135+
],
136+
metadata: {
137+
temperature: 1,
138+
max_output_tokens: 50
139+
},
140+
tokenMetrics: { input_tokens: 35, output_tokens: 2, total_tokens: 37 },
141+
tags: { ml_app: 'test', language: 'javascript', integration: 'vertexai' }
142+
})
143+
144+
expect(llmobsSpans[0]).to.deepEqualWithMockValues(expected)
173145
})
174146
})
175147

176148
describe('tool calls', () => {
177149
useScenario({ scenario: 'generate-content-single-response-with-tools' })
178150

179151
it('makes a successful call', async () => {
180-
const checkTraces = agent.assertSomeTraces(traces => {
181-
const span = traces[0][0]
182-
const spanEvent = LLMObsSpanWriter.prototype.append.getCall(0).args[0]
183-
184-
const expected = expectedLLMObsLLMSpanEvent({
185-
span,
186-
spanKind: 'llm',
187-
modelName: 'gemini-1.5-flash-002',
188-
modelProvider: 'google',
189-
name: 'GenerativeModel.generateContent',
190-
inputMessages: getInputMessages('what is 2 + 2?'),
191-
outputMessages: [
192-
{
193-
role: 'model',
194-
content: '',
195-
tool_calls: [
196-
{
197-
name: 'add',
198-
arguments: {
199-
a: 2,
200-
b: 2
201-
}
202-
}
203-
]
204-
}
205-
],
206-
metadata: {
207-
temperature: 1,
208-
max_output_tokens: 50
209-
},
210-
tokenMetrics: { input_tokens: 20, output_tokens: 3, total_tokens: 23 },
211-
tags: { ml_app: 'test', language: 'javascript', integration: 'vertexai' }
212-
})
213-
214-
expect(spanEvent).to.deepEqualWithMockValues(expected)
215-
})
216-
217152
await model.generateContent({
218153
contents: [{ role: 'user', parts: [{ text: 'what is 2 + 2?' }] }]
219154
})
220155

221-
await checkTraces
156+
const { apmSpans, llmobsSpans } = await getEvents()
157+
const expected = expectedLLMObsLLMSpanEvent({
158+
span: apmSpans[0],
159+
spanKind: 'llm',
160+
modelName: 'gemini-1.5-flash-002',
161+
modelProvider: 'google',
162+
name: 'GenerativeModel.generateContent',
163+
inputMessages: getInputMessages('what is 2 + 2?'),
164+
outputMessages: [
165+
{
166+
role: 'model',
167+
content: '',
168+
tool_calls: [
169+
{
170+
name: 'add',
171+
arguments: {
172+
a: 2,
173+
b: 2
174+
}
175+
}
176+
]
177+
}
178+
],
179+
metadata: {
180+
temperature: 1,
181+
max_output_tokens: 50
182+
},
183+
tokenMetrics: { input_tokens: 20, output_tokens: 3, total_tokens: 23 },
184+
tags: { ml_app: 'test', language: 'javascript', integration: 'vertexai' }
185+
})
186+
187+
expect(llmobsSpans[0]).to.deepEqualWithMockValues(expected)
222188
})
223189
})
224190

@@ -227,44 +193,6 @@ describe('integrations', () => {
227193
useScenario({ scenario: 'generate-content-single-response' })
228194

229195
it('makes a successful call', async () => {
230-
const checkTraces = agent.assertSomeTraces(traces => {
231-
const span = traces[0][0]
232-
const spanEvent = LLMObsSpanWriter.prototype.append.getCall(0).args[0]
233-
234-
const inputMessages = []
235-
236-
if (model.systemInstruction) {
237-
inputMessages.push({ role: 'system', content: 'Please provide an answer' })
238-
}
239-
240-
inputMessages.push({ role: 'user', content: 'Foobar?' })
241-
inputMessages.push({ role: 'model', content: 'Foobar!' })
242-
inputMessages.push({ content: 'Hello, how are you?' })
243-
244-
const expected = expectedLLMObsLLMSpanEvent({
245-
span,
246-
spanKind: 'llm',
247-
modelName: 'gemini-1.5-flash-002',
248-
modelProvider: 'google',
249-
name: 'ChatSession.sendMessage',
250-
inputMessages,
251-
outputMessages: [
252-
{
253-
role: 'model',
254-
content: 'Hello! How can I assist you today?'
255-
}
256-
],
257-
metadata: {
258-
temperature: 1,
259-
max_output_tokens: 50
260-
},
261-
tokenMetrics: { input_tokens: 35, output_tokens: 2, total_tokens: 37 },
262-
tags: { ml_app: 'test', language: 'javascript', integration: 'vertexai' }
263-
})
264-
265-
expect(spanEvent).to.deepEqualWithMockValues(expected)
266-
})
267-
268196
const chat = model.startChat({
269197
history: [
270198
{ role: 'user', parts: [{ text: 'Foobar?' }] },
@@ -274,7 +202,40 @@ describe('integrations', () => {
274202

275203
await chat.sendMessage([{ text: 'Hello, how are you?' }])
276204

277-
await checkTraces
205+
const { apmSpans, llmobsSpans } = await getEvents()
206+
207+
const inputMessages = []
208+
209+
if (model.systemInstruction) {
210+
inputMessages.push({ role: 'system', content: 'Please provide an answer' })
211+
}
212+
213+
inputMessages.push({ role: 'user', content: 'Foobar?' })
214+
inputMessages.push({ role: 'model', content: 'Foobar!' })
215+
inputMessages.push({ content: 'Hello, how are you?' })
216+
217+
const expected = expectedLLMObsLLMSpanEvent({
218+
span: apmSpans[0],
219+
spanKind: 'llm',
220+
modelName: 'gemini-1.5-flash-002',
221+
modelProvider: 'google',
222+
name: 'ChatSession.sendMessage',
223+
inputMessages,
224+
outputMessages: [
225+
{
226+
role: 'model',
227+
content: 'Hello! How can I assist you today?'
228+
}
229+
],
230+
metadata: {
231+
temperature: 1,
232+
max_output_tokens: 50
233+
},
234+
tokenMetrics: { input_tokens: 35, output_tokens: 2, total_tokens: 37 },
235+
tags: { ml_app: 'test', language: 'javascript', integration: 'vertexai' }
236+
})
237+
238+
expect(llmobsSpans[0]).to.deepEqualWithMockValues(expected)
278239
})
279240
})
280241
})

0 commit comments

Comments
 (0)