Skip to content

Commit 6cd9abf

Browse files
authoredSep 2, 2024
feat: options.tools for prompt() (#48)
1 parent 730db8c commit 6cd9abf

File tree

4 files changed

+125
-3
lines changed

4 files changed

+125
-3
lines changed
 

‎index.d.ts

+20
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,14 @@ export interface Message {
158158
content: string
159159
copilot_references: MessageCopilotReference[]
160160
copilot_confirmations?: MessageCopilotConfirmation[]
161+
tool_calls?: {
162+
"function": {
163+
"arguments": string,
164+
"name": string
165+
},
166+
"id": string,
167+
"type": "function"
168+
}[]
161169
name?: string
162170
}
163171

@@ -251,9 +259,21 @@ export type ModelName =
251259
| "gpt-4"
252260
| "gpt-3.5-turbo"
253261

262+
export interface PromptFunction {
263+
type: "function"
264+
function: {
265+
name: string;
266+
description?: string;
267+
/** @see https://platform.openai.com/docs/guides/structured-outputs/supported-schemas */
268+
parameters?: Record<string, unknown>;
269+
strict?: boolean | null;
270+
}
271+
}
272+
254273
export type PromptOptions = {
255274
model: ModelName
256275
token: string
276+
tools?: PromptFunction[]
257277
request?: {
258278
fetch?: Function
259279
}

‎index.test-d.ts

+20
Original file line numberDiff line numberDiff line change
@@ -295,4 +295,24 @@ export async function promptTest() {
295295

296296
// @ts-expect-error - token argument is required
297297
prompt("What is the capital of France?", { model: "" })
298+
}
299+
300+
export async function promptWithToolsTest() {
301+
await prompt("What is the capital of France?", {
302+
model: "gpt-4",
303+
token: "secret",
304+
tools: [
305+
{
306+
type: "function",
307+
function: {
308+
name: "",
309+
description: "",
310+
parameters: {
311+
312+
},
313+
strict: true,
314+
}
315+
}
316+
]
317+
})
298318
}

‎lib/prompt.js

+8-1
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,11 @@
33
/** @type {import('..').PromptInterface} */
44
export async function prompt(userPrompt, promptOptions) {
55
const promptFetch = promptOptions.request?.fetch || fetch;
6+
7+
const systemMessage = promptOptions.tools
8+
? "You are a helpful assistant. Use the supplied tools to assist the user."
9+
: "You are a helpful assistant.";
10+
611
const response = await promptFetch(
712
"https://api.githubcopilot.com/chat/completions",
813
{
@@ -17,14 +22,16 @@ export async function prompt(userPrompt, promptOptions) {
1722
messages: [
1823
{
1924
role: "system",
20-
content: "You are a helpful assistant.",
25+
content: systemMessage,
2126
},
2227
{
2328
role: "user",
2429
content: userPrompt,
2530
},
2631
],
2732
model: promptOptions.model,
33+
toolChoice: promptOptions.tools ? "auto" : undefined,
34+
tools: promptOptions.tools,
2835
}),
2936
}
3037
);

‎test/prompt.test.js

+77-2
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ test("minimal usage", async (t) => {
3333
content: "What is the capital of France?",
3434
},
3535
],
36-
model: "gpt-4o-mini",
36+
model: "gpt-4",
3737
}),
3838
})
3939
.reply(
@@ -57,7 +57,82 @@ test("minimal usage", async (t) => {
5757

5858
const result = await prompt("What is the capital of France?", {
5959
token: "secret",
60-
model: "gpt-4o-mini",
60+
model: "gpt-4",
61+
request: { fetch: fetchMock },
62+
});
63+
64+
t.assert.deepEqual(result, {
65+
requestId: "<request-id>",
66+
message: {
67+
content: "<response text>",
68+
},
69+
});
70+
});
71+
72+
test("function calling", async (t) => {
73+
const mockAgent = new MockAgent();
74+
function fetchMock(url, opts) {
75+
opts ||= {};
76+
opts.dispatcher = mockAgent;
77+
return fetch(url, opts);
78+
}
79+
80+
mockAgent.disableNetConnect();
81+
const mockPool = mockAgent.get("https://api.githubcopilot.com");
82+
mockPool
83+
.intercept({
84+
method: "post",
85+
path: `/chat/completions`,
86+
body: JSON.stringify({
87+
messages: [
88+
{
89+
role: "system",
90+
content:
91+
"You are a helpful assistant. Use the supplied tools to assist the user.",
92+
},
93+
{ role: "user", content: "Call the function" },
94+
],
95+
model: "gpt-4",
96+
toolChoice: "auto",
97+
tools: [
98+
{
99+
type: "function",
100+
function: { name: "the_function", description: "The function" },
101+
},
102+
],
103+
}),
104+
})
105+
.reply(
106+
200,
107+
{
108+
choices: [
109+
{
110+
message: {
111+
content: "<response text>",
112+
},
113+
},
114+
],
115+
},
116+
{
117+
headers: {
118+
"content-type": "application/json",
119+
"x-request-id": "<request-id>",
120+
},
121+
}
122+
);
123+
124+
const result = await prompt("Call the function", {
125+
token: "secret",
126+
model: "gpt-4",
127+
tools: [
128+
{
129+
type: "function",
130+
function: {
131+
name: "the_function",
132+
description: "The function",
133+
},
134+
},
135+
],
61136
request: { fetch: fetchMock },
62137
});
63138

0 commit comments

Comments
 (0)
Failed to load comments.