Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

πŸ› fix: fix azure openai stream #2465

Merged
merged 1 commit into from
May 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
188 changes: 161 additions & 27 deletions src/libs/agent-runtime/azureOpenai/index.test.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
// @vitest-environment node
import { AzureKeyCredential, OpenAIClient } from '@azure/openai';
import OpenAI from 'openai';
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';

import * as debugStreamModule from '../utils/debugStream';
Expand Down Expand Up @@ -50,7 +51,7 @@ describe('LobeAzureOpenAI', () => {
});

describe('chat', () => {
it('should return a StreamingTextResponse on successful API call', async () => {
it('should return a Response on successful API call', async () => {
// Arrange
const mockStream = new ReadableStream();
const mockResponse = Promise.resolve(mockStream);
Expand All @@ -68,6 +69,140 @@ describe('LobeAzureOpenAI', () => {
expect(result).toBeInstanceOf(Response);
});

describe('streaming response', () => {
it('should handle multiple data chunks correctly', async () => {
const data = [
{
choices: [],
created: 0,
id: '',
model: '',
object: '',
prompt_filter_results: [
{
prompt_index: 0,
content_filter_results: {
hate: { filtered: false, severity: 'safe' },
self_harm: { filtered: false, severity: 'safe' },
sexual: { filtered: false, severity: 'safe' },
violence: { filtered: false, severity: 'safe' },
},
},
],
},
{
choices: [
{
content_filter_results: {
hate: { filtered: false, severity: 'safe' },
self_harm: { filtered: false, severity: 'safe' },
sexual: { filtered: false, severity: 'safe' },
violence: { filtered: false, severity: 'safe' },
},
delta: { content: 'δ½ ' },
finish_reason: null,
index: 0,
logprobs: null,
},
],
created: 1715516381,
id: 'chatcmpl-9O2SzeGv5xy6yz0TcQNA1DHHLJ8N1',
model: 'gpt-35-turbo-16k',
object: 'chat.completion.chunk',
system_fingerprint: null,
},
{
choices: [
{
content_filter_results: {
hate: { filtered: false, severity: 'safe' },
self_harm: { filtered: false, severity: 'safe' },
sexual: { filtered: false, severity: 'safe' },
violence: { filtered: false, severity: 'safe' },
},
delta: { content: 'ε₯½' },
finish_reason: null,
index: 0,
logprobs: null,
},
],
created: 1715516381,
id: 'chatcmpl-9O2SzeGv5xy6yz0TcQNA1DHHLJ8N1',
model: 'gpt-35-turbo-16k',
object: 'chat.completion.chunk',
system_fingerprint: null,
},
{
choices: [
{
content_filter_results: {
hate: { filtered: false, severity: 'safe' },
self_harm: { filtered: false, severity: 'safe' },
sexual: { filtered: false, severity: 'safe' },
violence: { filtered: false, severity: 'safe' },
},
delta: { content: '!' },
finish_reason: null,
index: 0,
logprobs: null,
},
],
created: 1715516381,
id: 'chatcmpl-9O2SzeGv5xy6yz0TcQNA1DHHLJ8N1',
model: 'gpt-35-turbo-16k',
object: 'chat.completion.chunk',
system_fingerprint: null,
},
];

const mockStream = new ReadableStream({
start(controller) {
data.forEach((chunk) => controller.enqueue(chunk));
controller.close();
},
});
vi.spyOn(instance['client'], 'streamChatCompletions').mockResolvedValue(mockStream as any);

const result = await instance.chat({
stream: true,
max_tokens: 2048,
temperature: 0.6,
top_p: 1,
model: 'gpt-35-turbo-16k',
presence_penalty: 0,
frequency_penalty: 0,
messages: [{ role: 'user', content: 'δ½ ε₯½' }],
});

const decoder = new TextDecoder();
const reader = result.body!.getReader();
const stream: string[] = [];

while (true) {
const { value, done } = await reader.read();
if (done) break;
stream.push(decoder.decode(value));
}

expect(stream).toEqual(
[
'id: ',
'event: data',
'data: {"choices":[],"created":0,"id":"","model":"","object":"","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}]}\n',
'id: chatcmpl-9O2SzeGv5xy6yz0TcQNA1DHHLJ8N1',
'event: text',
'data: "δ½ "\n',
'id: chatcmpl-9O2SzeGv5xy6yz0TcQNA1DHHLJ8N1',
'event: text',
'data: "ε₯½"\n',
'id: chatcmpl-9O2SzeGv5xy6yz0TcQNA1DHHLJ8N1',
'event: text',
'data: "!"\n',
].map((item) => `${item}\n`),
);
});
});

describe('Error', () => {
it('should return AzureBizError with DeploymentNotFound error', async () => {
// Arrange
Expand Down Expand Up @@ -165,7 +300,6 @@ describe('LobeAzureOpenAI', () => {
});

describe('private method', () => {

describe('tocamelCase', () => {
it('should convert string to camel case', () => {
const key = 'image_url';
Expand All @@ -179,41 +313,41 @@ describe('LobeAzureOpenAI', () => {
describe('camelCaseKeys', () => {
it('should convert object keys to camel case', () => {
const obj = {
"frequency_penalty": 0,
"messages": [
frequency_penalty: 0,
messages: [
{
"role": "user",
"content": [
role: 'user',
content: [
{
"type": "image_url",
"image_url": {
"url": "<image URL>"
}
}
]
}
]
type: 'image_url',
image_url: {
url: '<image URL>',
},
},
],
},
],
};

const newObj = instance['camelCaseKeys'](obj);

expect(newObj).toEqual({
"frequencyPenalty": 0,
"messages": [
frequencyPenalty: 0,
messages: [
{
"role": "user",
"content": [
role: 'user',
content: [
{
"type": "image_url",
"imageUrl": {
"url": "<image URL>"
}
}
]
}
]
type: 'image_url',
imageUrl: {
url: '<image URL>',
},
},
],
},
],
});
});
});
})
});
});
4 changes: 4 additions & 0 deletions src/libs/agent-runtime/utils/streams/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,11 @@ import {

export const transformOpenAIStream = (chunk: OpenAI.ChatCompletionChunk): StreamProtocolChunk => {
// maybe need another structure to add support for multiple choices

const item = chunk.choices[0];
if (!item) {
return { data: chunk, id: chunk.id, type: 'data' };
}

if (typeof item.delta?.content === 'string') {
return { data: item.delta.content, id: chunk.id, type: 'text' };
Expand Down
Loading