Skip to content

Commit

Permalink
Model updates (#237)
Browse files Browse the repository at this point in the history
Fixing some issues I ran in to when trying to use new models.
  • Loading branch information
NickHeiner committed Aug 11, 2023
1 parent a1c1af7 commit 94624be
Show file tree
Hide file tree
Showing 22 changed files with 220 additions and 1,229 deletions.
4 changes: 2 additions & 2 deletions packages/ai-jsx/package.json
Expand Up @@ -4,7 +4,7 @@
"repository": "fixie-ai/ai-jsx",
"bugs": "https://github.com/fixie-ai/ai-jsx/issues",
"homepage": "https://ai-jsx.com",
"version": "0.8.5",
"version": "0.9.0",
"volta": {
"extends": "../../package.json"
},
Expand Down Expand Up @@ -348,7 +348,7 @@
"react": "^16.8.0 || ^17.0.0 || ^18.0.0"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.5.0",
"@anthropic-ai/sdk": "^0.5.10",
"@nick.heiner/wandb-fork": "^0.5.2-5",
"@opentelemetry/api": "^1.4.1",
"@opentelemetry/api-logs": "^0.41.1",
Expand Down
133 changes: 5 additions & 128 deletions packages/ai-jsx/src/batteries/use-tools.tsx
Expand Up @@ -4,97 +4,9 @@
* @packageDocumentation
*/

import {
ChatCompletion,
FunctionParameters,
FunctionResponse,
SystemMessage,
UserMessage,
} from '../core/completion.js';
import { Node, RenderContext, ComponentContext, isElement } from '../index.js';
import z from 'zod';
import { zodToJsonSchema } from 'zod-to-json-schema';
import { AIJSXError, ErrorCode } from '../core/errors.js';
import { Converse, isConversationalComponent, renderToConversation } from '../core/conversation.js';

const toolChoiceSchema = z.object({
nameOfTool: z.string(),
parameters: z.record(z.string(), z.any()),
responseToUser: z.string(),
});
export type ToolChoice = z.infer<typeof toolChoiceSchema> | null;

function ChooseTools(props: Pick<UseToolsProps, 'tools' | 'userData' | 'children'>): Node {
return (
<ChatCompletion>
<SystemMessage>
You are an expert agent who knows how to use tools. You can use the following tools:
{Object.entries(props.tools).map(([toolName, tool]) => (
<>
{toolName}: Description: {tool.description}. Schema: {JSON.stringify(tool.parameters)}.
</>
))}
The user will ask you a question. Pick the tool that best addresses what they're looking for. Which tool do you
want to use? Name the tool, identify the parameters, and generate a response to the user explaining what you're
doing. Do not answer the user's question itself. Your answer should be a JSON object matching this schema:{' '}
{JSON.stringify(zodToJsonSchema(toolChoiceSchema))}. Make sure to follow the schema strictly and do not include
any explanatory prose prefix or suffix.{' '}
{props.userData && <>When picking parameters, choose values according to this user data: {props.userData}</>}
If none of the tools seem appropriate, or the user data doesn't have the necessary context to use the tool the
user needs, respond with `null`.
</SystemMessage>
<UserMessage>Generate a JSON response for this query: {props.children}</UserMessage>
</ChatCompletion>
);
}

async function InvokeTool(
props: { tools: Record<string, Tool>; toolChoice: Node; fallback: Node },
{ render }: RenderContext
) {
// TODO: better validation around when this produces unexpected output.
const toolChoiceLLMOutput = await render(props.toolChoice);
let toolChoiceResult: ToolChoice;
try {
const parsedJson = JSON.parse(toolChoiceLLMOutput);
if (parsedJson === null) {
return props.fallback;
}
toolChoiceResult = toolChoiceSchema.parse(parsedJson);
} catch (e: any) {
const error = new AIJSXError(
`Failed to parse LLM output into a tool choice: ${e.message}. Output: ${toolChoiceLLMOutput}`,
ErrorCode.ModelOutputCouldNotBeParsedForTool,
'runtime',
{ toolChoiceLLMOutput }
);
throw error;
}
if (!(toolChoiceResult.nameOfTool in props.tools)) {
throw new AIJSXError(
`LLM hallucinated a tool that does not exist: ${toolChoiceResult.nameOfTool}.`,
ErrorCode.ModelHallucinatedTool,
'runtime',
{ toolChoiceResult }
);
}
const tool = props.tools[toolChoiceResult.nameOfTool];
const toolResult = await tool.func(toolChoiceResult.parameters);

// TDOO: Restore this once we have the logger attached to the render context.
// log.info({ toolChoice: toolChoiceResult }, 'Invoking tool');

return (
<ChatCompletion>
<SystemMessage>
You are a tool-using agent. You previously chose to use a tool, and generated this response to the user:
{toolChoiceResult.responseToUser}
When you ran the tool, you got this result: {JSON.stringify(toolResult)}
Using the above, provide a final response to the user.
</SystemMessage>
</ChatCompletion>
);
}
import { ChatCompletion, FunctionParameters, FunctionResponse } from '../core/completion.js';
import { Node, RenderContext } from '../index.js';
import { Converse, renderToConversation } from '../core/conversation.js';

/**
* Represents a tool that can be provided for the Large Language Model.
Expand Down Expand Up @@ -138,12 +50,6 @@ export interface UseToolsProps {
*/
showSteps?: boolean;

/**
* A fallback response to use if the AI doesn't think any of the tools are relevant. This is only used for models that do not support functions natively. Models that support functions natively don't need this, because they generate
* their own messages in the case of failure.
*/
fallback: Node;

/**
* User data the AI can use to determine what parameters to invoke the tool with.
*
Expand Down Expand Up @@ -189,7 +95,7 @@ export interface UseToolsProps {
* },
* };
*
* <UseTools tools={tools} fallback="Politely explain you aren't able to help with that request.">
* <UseTools tools={tools}>
* <SystemMessage>
* You control a home automation system. The user will request an action in their home. You should take an action and
* then generate a response telling the user what you've done.
Expand All @@ -199,31 +105,7 @@ export interface UseToolsProps {
* ```
*
*/
export async function UseTools(props: UseToolsProps, { render, memo }: RenderContext) {
try {
// TODO: ErrorBoundaries should be able to preserve the conversational structure, but they can't currently.
// So instead we start rendering the function call until it yields any conversational message. If we see one,
// we know that the <ChatCompletion> didn't immediately fail.
const memoizedFunctionCall = memo(<UseToolsFunctionCall {...props} />);
for await (const containsElement of render(memoizedFunctionCall, {
stop: isConversationalComponent,
map: (frame) => frame.find(isElement) !== undefined,
})) {
if (containsElement) {
break;
}
}
return memoizedFunctionCall;
} catch (e: any) {
if (e.code === ErrorCode.ChatModelDoesNotSupportFunctions) {
return <UseToolsPromptEngineered {...props} />;
}
throw e;
}
}

/** @hidden */
export async function UseToolsFunctionCall(props: UseToolsProps, { render }: ComponentContext) {
export async function UseTools(props: UseToolsProps, { render }: RenderContext) {
const converse = (
<Converse
reply={async (messages, fullConversation) => {
Expand Down Expand Up @@ -264,8 +146,3 @@ export async function UseToolsFunctionCall(props: UseToolsProps, { render }: Com
const messages = await renderToConversation(converse, render);
return messages.length && messages[messages.length - 1].element;
}

/** @hidden */
export function UseToolsPromptEngineered(props: UseToolsProps) {
return <InvokeTool tools={props.tools} toolChoice={<ChooseTools {...props} />} fallback={props.fallback} />;
}
15 changes: 6 additions & 9 deletions packages/ai-jsx/src/lib/anthropic.tsx
Expand Up @@ -20,7 +20,7 @@ type ValidCompletionModel = never;
*
* @see https://docs.anthropic.com/claude/reference/complete_post.
*/
type ValidChatModel =
export type ValidChatModel =
| 'claude-1'
| 'claude-1-100k'
| 'claude-instant-1'
Expand All @@ -31,7 +31,9 @@ type ValidChatModel =
| 'claude-1.0'
| 'claude-instant-1.1'
| 'claude-instant-1.1-100k'
| 'claude-instant-1.0';
| 'claude-instant-1.0'
| 'claude-2'
| 'claude-2.0';

type AnthropicModelChoices = ChatOrCompletionModelOrBoth<ValidChatModel, ValidCompletionModel>;

Expand Down Expand Up @@ -121,18 +123,13 @@ export async function* AnthropicChatModel(
.map(async (message) => {
switch (message.type) {
case 'user':
return `${AnthropicSDK.HUMAN_PROMPT}:${
return `${AnthropicSDK.HUMAN_PROMPT}${
message.element.props.name ? ` (${message.element.props.name})` : ''
} ${await render(message.element)}`;
case 'assistant':
return `${AnthropicSDK.AI_PROMPT}: ${await render(message.element)}`;
case 'functionCall':
case 'functionResponse':
throw new AIJSXError(
'Anthropic models do not support functions.',
ErrorCode.AnthropicDoesNotSupportFunctions,
'user'
);
return `${AnthropicSDK.AI_PROMPT} ${await render(message.element)}`;
}
})
);
Expand Down
12 changes: 3 additions & 9 deletions packages/ai-jsx/src/lib/openai.tsx
Expand Up @@ -176,15 +176,7 @@ function logitBiasOfTokens(tokens: Record<string, number>) {
* @returns True if the model supports function calling, false otherwise.
*/
function chatModelSupportsFunctions(model: ValidChatModel) {
return [
'gpt-4',
'gpt-3.5-turbo',
'gpt-4-0613',
'gpt-4-32k-0613',
'gpt-3.5-turbo-0613',
'gpt-3.5-turbo-16k',
'gpt-3.5-turbo-16k-0613',
].includes(model);
return model.startsWith('gpt-4') || model.startsWith('gpt-3.5-turbo');
}

type OpenAIMethod = 'createCompletion' | 'createChatCompletion' | 'createImage';
Expand Down Expand Up @@ -220,6 +212,8 @@ export class OpenAIError<M extends OpenAIMethod> extends HttpError {
}
}

// Anthropic has a similar polyfill here:
// https://github.com/anthropics/anthropic-sdk-typescript/blob/9af152707a9bcf3027afc64f027566be25da2eb9/src/streaming.ts#L266C1-L291C2
async function* asyncIteratorOfFetchStream(reader: ReturnType<NonNullable<Response['body']>['getReader']>) {
while (true) {
const { done, value } =
Expand Down
8 changes: 8 additions & 0 deletions packages/docs/docs/changelog.md
@@ -1,5 +1,13 @@
# Changelog

## 0.9.0

- **Breaking:** Remove prompt-engineered `UseTools`. Previously, if you called `UseTools` with a model that doesn't support native function calling (e.g. Anthropic), `UseTools` would use a polyfilled version that uses prompt engineering to simulate function calling. However, this wasn't reliable enough in practice, so we've dropped it.
- Fix issue where `gpt-4-32k` didn't accept functions.
- Fix issue where Anthropic didn't permit function call/responses in its conversation history.
- Add Anthropic's claude-2 models as valid chat model types.
- Fix issue where Anthropic prompt formatting had extra `:`s.

## 0.8.5

- Fix issue where OpenTelemetry failures were not being properly attributed.
Expand Down
2 changes: 1 addition & 1 deletion packages/docs/docs/guides/brand-new.md
Expand Up @@ -129,7 +129,7 @@ const tools: Record<string, Tool> = {
};

// Provide the tools to the agent
<UseTools tools={tools} fallback="Politely explain you aren't able to help with that request.">
<UseTools tools={tools}>
<SystemMessage>
You control a home automation system. The user will request an action in their home. You should take an action and
then generate a response telling the user what you've done.
Expand Down
2 changes: 1 addition & 1 deletion packages/docs/docs/tutorial/part7-tools.md
Expand Up @@ -43,7 +43,7 @@ const tools = {
};

return (
<UseTools tools={tools} fallback="Politely explain that you cannot help.">
<UseTools tools={tools}>
<SystemMessage>You are an agent that can answer questions about stocks.</SystemMessage>
<UserMessage>What is the current price for AAPL?</UserMessage>
</UseTools>
Expand Down
2 changes: 2 additions & 0 deletions packages/examples/package.json
Expand Up @@ -19,6 +19,7 @@
"eslint-plugin-jest": "^27.2.2",
"jest": "^29.5.0",
"jest-fetch-mock": "^3.0.3",
"nock": "^13.3.2",
"openai": "^3.3.0",
"ts-jest": "^29.1.0",
"ts-node": "^10.9.1",
Expand Down Expand Up @@ -69,6 +70,7 @@
"test": "yarn run typecheck && yarn run unit"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.5.10",
"@opentelemetry/api": "^1.4.1",
"@opentelemetry/api-logs": "^0.41.1",
"@opentelemetry/exporter-logs-otlp-grpc": "^0.41.1",
Expand Down

3 comments on commit 94624be

@vercel
Copy link

@vercel vercel bot commented on 94624be Aug 11, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Successfully deployed to the following URLs:

ai-jsx-docs – ./packages/docs

ai-jsx-docs-fixie-ai.vercel.app
ai-jsx-docs-git-main-fixie-ai.vercel.app
ai-jsx-docs.vercel.app
docs.ai-jsx.com

@vercel
Copy link

@vercel vercel bot commented on 94624be Aug 11, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Successfully deployed to the following URLs:

ai-jsx-nextjs-demo – ./packages/nextjs-demo

ai-jsx-nextjs-demo-git-main-fixie-ai.vercel.app
ai-jsx-nextjs-demo.vercel.app
ai-jsx-nextjs-demo-fixie-ai.vercel.app

@vercel
Copy link

@vercel vercel bot commented on 94624be Aug 11, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Successfully deployed to the following URLs:

ai-jsx-tutorial-nextjs – ./packages/tutorial-nextjs

ai-jsx-tutorial-nextjs.vercel.app
ai-jsx-tutorial-nextjs-git-main-fixie-ai.vercel.app
ai-jsx-tutorial-nextjs-fixie-ai.vercel.app

Please sign in to comment.