-
Notifications
You must be signed in to change notification settings - Fork 544
[Tiny Agents] Expose a OpenAI-compatible Web server #1473
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
acb4631
7460a2b
e8e5fa6
ae60159
6ce1162
a0a865f
c5fbd54
1dfaae2
27e763b
cd50de4
b974932
e5d98ad
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -34,7 +34,8 @@ | |
| "prepare": "pnpm run build", | ||
| "test": "vitest run", | ||
| "check": "tsc", | ||
| "cli": "tsx src/cli.ts" | ||
| "cli": "tsx src/cli.ts", | ||
| "cli:watch": "tsx watch src/cli.ts" | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm running with when I change code in cc: @coyotte508
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I guess you could include either
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. yes can you try adding
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. (potentially in a later PR as i'm going to merge this one soon) |
||
| }, | ||
| "files": [ | ||
| "src", | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,18 @@ | ||
| import { chatCompletionStream } from "@huggingface/inference"; | ||
|
|
||
| async function main() { | ||
| const endpointUrl = `http://localhost:9999/v1/chat/completions`; | ||
| // launch "tiny-agents serve" before running this | ||
|
|
||
| for await (const chunk of chatCompletionStream({ | ||
| endpointUrl, | ||
| model: "", | ||
| messages: [{ role: "user", content: "What are the top 5 trending models on Hugging Face?" }], | ||
mishig25 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| })) { | ||
| console.log(chunk.choices[0]?.delta.content); | ||
| } | ||
| } | ||
|
|
||
| if (require.main === module) { | ||
| main(); | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,134 @@ | ||
| import type { IncomingMessage } from "node:http"; | ||
| import { createServer, ServerResponse } from "node:http"; | ||
| import type { AddressInfo } from "node:net"; | ||
| import { z } from "zod"; | ||
| import type { Agent } from "../index"; | ||
| import { ANSI } from "./utils"; | ||
| import { stdout } from "node:process"; | ||
| import type { ChatCompletionStreamOutput } from "@huggingface/tasks"; | ||
|
|
||
| const REQUEST_ID_HEADER = "X-Request-Id"; | ||
|
|
||
| const ChatCompletionInputSchema = z.object({ | ||
| messages: z.array( | ||
| z.object({ | ||
| role: z.enum(["user", "assistant"]), | ||
| content: z.string().or( | ||
| z.array( | ||
| z | ||
| .object({ | ||
| type: z.literal("text"), | ||
| text: z.string(), | ||
| }) | ||
| .or( | ||
| z.object({ | ||
| type: z.literal("image_url"), | ||
| image_url: z.object({ | ||
| url: z.string(), | ||
| }), | ||
| }) | ||
| ) | ||
| ) | ||
| ), | ||
| }) | ||
| ), | ||
| /// Only allow stream: true | ||
| stream: z.literal(true), | ||
| }); | ||
| function getJsonBody(req: IncomingMessage) { | ||
| return new Promise((resolve, reject) => { | ||
| let data = ""; | ||
| req.on("data", (chunk) => (data += chunk)); | ||
| req.on("end", () => { | ||
| try { | ||
| resolve(JSON.parse(data)); | ||
| } catch (e) { | ||
| reject(e); | ||
| } | ||
| }); | ||
| req.on("error", reject); | ||
| }); | ||
| } | ||
| class ServerResp extends ServerResponse { | ||
| error(statusCode: number, reason: string) { | ||
| this.writeHead(statusCode).end(JSON.stringify({ error: reason })); | ||
| } | ||
| } | ||
|
|
||
| export function startServer(agent: Agent): void { | ||
| const server = createServer({ ServerResponse: ServerResp }, async (req, res) => { | ||
| res.setHeader(REQUEST_ID_HEADER, crypto.randomUUID()); | ||
| res.setHeader("Content-Type", "application/json"); | ||
| if (req.method === "POST" && req.url === "/v1/chat/completions") { | ||
| let body: unknown; | ||
| let requestBody: z.infer<typeof ChatCompletionInputSchema>; | ||
| try { | ||
| body = await getJsonBody(req); | ||
| } catch { | ||
| return res.error(400, "Invalid JSON"); | ||
| } | ||
| try { | ||
| requestBody = ChatCompletionInputSchema.parse(body); | ||
| } catch (err) { | ||
| if (err instanceof z.ZodError) { | ||
| return res.error(400, "Invalid ChatCompletionInput body \n" + JSON.stringify(err)); | ||
| } | ||
| return res.error(400, "Invalid ChatCompletionInput body"); | ||
| } | ||
| /// Ok, from now on we will send a SSE (Server-Sent Events) response. | ||
| res.setHeaders( | ||
| new Headers({ | ||
| "Content-Type": "text/event-stream", | ||
| "Cache-Control": "no-cache", | ||
| Connection: "keep-alive", | ||
| }) | ||
| ); | ||
|
|
||
| /// Prepend the agent's prompt | ||
| const messages = [ | ||
| { | ||
| role: "system", | ||
| content: agent.prompt, | ||
| }, | ||
| ...requestBody.messages, | ||
| ]; | ||
|
|
||
| for await (const chunk of agent.run(messages)) { | ||
| if ("choices" in chunk) { | ||
| res.write(`data: ${JSON.stringify(chunk)}\n\n`); | ||
| } else { | ||
| /// Tool call info | ||
| /// /!\ We format it as a regular chunk of role = "tool" | ||
| const chunkToolcallInfo = { | ||
| choices: [ | ||
| { | ||
| index: 0, | ||
| delta: { | ||
| role: "tool", | ||
| content: `Tool[${chunk.name}] ${chunk.tool_call_id}\n` + chunk.content, | ||
| }, | ||
| }, | ||
| ], | ||
| created: Math.floor(Date.now() / 1000), | ||
| id: chunk.tool_call_id, | ||
| model: "", | ||
| system_fingerprint: "", | ||
| } satisfies ChatCompletionStreamOutput; | ||
|
|
||
| res.write(`data: ${JSON.stringify(chunkToolcallInfo)}\n\n`); | ||
| } | ||
| } | ||
| res.end(); | ||
| } else { | ||
| res.error(404, "Route or method not found, try POST /v1/chat/completions"); | ||
| } | ||
| }); | ||
| server.listen(process.env.PORT ? parseInt(process.env.PORT) : 9_999, () => { | ||
| stdout.write(ANSI.BLUE); | ||
| stdout.write(`Agent loaded with ${agent.availableTools.length} tools:\n`); | ||
| stdout.write(agent.availableTools.map((t) => `- ${t.function.name}`).join("\n")); | ||
| stdout.write(ANSI.RESET); | ||
| stdout.write("\n"); | ||
| console.log(ANSI.GRAY + `listening on http://localhost:${(server.address() as AddressInfo).port}` + ANSI.RESET); | ||
| }); | ||
| } |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
this part of the diff you are maybe not going to be a fan of, @Wauplin @hanouticelina...
Basically an OpenAI-compatible chat completion endpoint is
statelessso we need to feed the full array of messages from the downstream application here.Let me know what you think.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm not shocked by the logic. Maybe a bit clunky to mix the local behavior (stateful with only a
stringpassed) and the server behavior (stateless messages) but not too problematic IMO