Skip to content

Commit

Permalink
Observability + make streaming demo look better (#168)
Browse files Browse the repository at this point in the history
Last night, I noticed the streaming demo wouldn't display any results
until the whole thing had finished. I wanted to make the demo have an
immediately-streaming text result, following by the UI, but that didn't
work because everything appeared at the end.

This PR includes a number of fixes I needed to make that work, as well
as observability I added along the way.

Gif of the UI changes:

https://github.com/fixie-ai/ai-jsx/assets/829827/055baf67-b63c-4a31-be5d-e2ad7bc48401

To push the UI further, I think we'll want to do more engine-level
stuff, like add the ability for AI to pass props. What I did here is
pretty hacky.
  • Loading branch information
NickHeiner committed Jun 30, 2023
1 parent 183f4bb commit e2735fd
Show file tree
Hide file tree
Showing 22 changed files with 538 additions and 65 deletions.
2 changes: 1 addition & 1 deletion packages/ai-jsx/package.json
Expand Up @@ -4,7 +4,7 @@
"repository": "fixie-ai/ai-jsx",
"bugs": "https://github.com/fixie-ai/ai-jsx/issues",
"homepage": "https://ai-jsx.com",
"version": "0.5.9",
"version": "0.5.10",
"volta": {
"extends": "../../package.json"
},
Expand Down
25 changes: 20 additions & 5 deletions packages/ai-jsx/src/core/core.ts
Expand Up @@ -367,10 +367,20 @@ async function* renderStream(
const logImpl = renderingContext.getContext(LoggerContext);
const renderId = uuidv4();
try {
return yield* renderingContext.render(
/**
* This approach is pretty noisy because there are many internal components about which the users don't care.
* For instance, if the user writes <ChatCompletion>, that'll generate a bunch of internal helpers to
* locate + call the model provider, etc.
*
* To get around this, maybe we want components to be able to choose the loglevel used for their rendering.
*/
logImpl.log('debug', renderable, renderId, 'Start rendering element');
const finalResult = yield* renderingContext.render(
renderable.render(renderingContext, new BoundLogger(logImpl, renderId, renderable)),
recursiveRenderOpts
);
logImpl.log('debug', renderable, renderId, { finalResult }, 'Finished rendering element');
return finalResult;
} catch (ex) {
logImpl.logException(renderable, renderId, ex);
throw ex;
Expand All @@ -385,6 +395,8 @@ async function* renderStream(
while (true) {
const next = await iterator.next();
if (next.value === AppendOnlyStream) {
// TODO: I'd like to emit a log here indicating that an element has chosen to AppendOnlyStream,
// but I'm not sure what the best way is to know which element/renderId produced `renderable`.
isAppendOnlyStream = true;
} else if (isAppendOnlyStream) {
const renderResult = context.render(next.value, recursiveRenderOpts);
Expand Down Expand Up @@ -428,7 +440,7 @@ export function createRenderContext(opts?: { logger?: LogImplementation }) {
});
}

function createRenderContextInternal(render: StreamRenderer, userContext: Record<symbol, any>): RenderContext {
function createRenderContextInternal(renderStream: StreamRenderer, userContext: Record<symbol, any>): RenderContext {
const context: RenderContext = {
render: <TFinal extends string | PartiallyRendered[], TIntermediate>(
renderable: Renderable,
Expand All @@ -441,7 +453,7 @@ function createRenderContextInternal(render: StreamRenderer, userContext: Record
const generator = (async function* () {
// eslint-disable-next-line @typescript-eslint/prefer-nullish-coalescing
const shouldStop = (opts?.stop || (() => false)) as ElementPredicate;
const generatorToWrap = render(context, renderable, shouldStop, Boolean(opts?.appendOnly));
const generatorToWrap = renderStream(context, renderable, shouldStop, Boolean(opts?.appendOnly));
while (true) {
const next = await generatorToWrap.next();
const value = opts?.stop ? (next.value as TFinal) : (next.value.join('') as TFinal);
Expand Down Expand Up @@ -520,10 +532,13 @@ function createRenderContextInternal(render: StreamRenderer, userContext: Record
return defaultValue;
},

wrapRender: (getRender) => createRenderContextInternal(getRender(render), userContext),
wrapRender: (getRenderStream) => createRenderContextInternal(getRenderStream(renderStream), userContext),

[pushContextSymbol]: (contextReference, value) =>
createRenderContextInternal(render, { ...userContext, [contextReference[contextKey].userContextSymbol]: value }),
createRenderContextInternal(renderStream, {
...userContext,
[contextReference[contextKey].userContextSymbol]: value,
}),
};

return context;
Expand Down
8 changes: 6 additions & 2 deletions packages/ai-jsx/src/experimental/next/index.ts
Expand Up @@ -160,8 +160,12 @@ export const jsx = asJsxBoundary(function jsx(
});
export const JSX = jsx;

export function toReactStream(componentMap: ComponentMap<any>, renderable: AI.Renderable): Response {
const renderResult = AI.createRenderContext().render(renderable, {
export function toReactStream(
componentMap: ComponentMap<any>,
renderable: AI.Renderable,
renderContextOpts?: Parameters<typeof AI.createRenderContext>[0]
): Response {
const renderResult = AI.createRenderContext(renderContextOpts).render(renderable, {
stop: (e) => boundaryElements.some((special) => special.tag === e.tag),
map: (x) => x,
});
Expand Down
19 changes: 14 additions & 5 deletions packages/ai-jsx/src/lib/openai.tsx
Expand Up @@ -448,14 +448,10 @@ export async function* OpenAIChatModel(
* @param size The size of the image to generate. Defaults to `512x512`.
* @returns URL(s) to the generated image, wrapped in {@link Image} component(s).
*/
export async function DalleImageGen(
export async function* DalleImageGen(
{ numSamples = 1, size = '512x512', children }: ImageGenPropsWithChildren,
{ render, getContext, logger }: AI.ComponentContext
) {
const prompt = await render(children);

const openai = getContext(openAiClientContext);

let sizeEnum;
switch (size) {
case '256x256':
Expand All @@ -475,6 +471,19 @@ export async function DalleImageGen(
);
}

// Consider emitting http://via.placeholder.com/256x256 instead.
yield (
<Image
url={`http://via.placeholder.com/${size}`}
prompt="placeholder while real results renderes"
modelName="placeholder.com"
/>
);

const prompt = await render(children);

const openai = getContext(openAiClientContext);

const imageRequest = {
prompt,
n: numSamples,
Expand Down
3 changes: 2 additions & 1 deletion packages/ai-jsx/src/react/completion.tsx
Expand Up @@ -9,10 +9,11 @@ function reactComponentName(component: React.JSXElementConstructor<any> | string
return typeof component === 'string' ? component : component.name;
}

export async function UICompletion(
export async function* UICompletion(
{ example, children }: { example: React.ReactNode; children: AI.Node },
{ render, logger }: AI.ComponentContext
) {
yield '';
const reactComponents = new Set<React.JSXElementConstructor<any> | string>();
function collectComponents(node: React.ReactNode | AI.Node, inReact: boolean) {
if (Array.isArray(node)) {
Expand Down
2 changes: 1 addition & 1 deletion packages/create-react-app-demo/package.json
Expand Up @@ -13,7 +13,7 @@
"@testing-library/react": "^13.4.0",
"@testing-library/user-event": "^13.5.0",
"@types/http-proxy": "^1.17.11",
"ai-jsx": "0.5.9",
"ai-jsx": "0.5.10",
"babel-jest": "^27.4.2",
"babel-loader": "^8.2.3",
"babel-plugin-named-asset-import": "^0.3.8",
Expand Down
5 changes: 5 additions & 0 deletions packages/docs/docs/changelog.md
@@ -1,5 +1,10 @@
# Changelog

## 0.5.10

- Update logging to log the output of every component.
- Update [`UseTools`](./api/modules/batteries_use_tools.md) to use [OpenAI function calls](https://openai.com/blog/function-calling-and-other-api-updates) if you're using a model that supports them.

## [0.5.9](https://github.com/fixie-ai/ai-jsx/commit/92b6e0f28580fbd9b8fb62072d8c13e28b14d9fe)

- [Add Anthropic support.](./guides/models.md).
Expand Down
43 changes: 38 additions & 5 deletions packages/docs/docs/guides/rules-of-jsx.md
Expand Up @@ -39,7 +39,6 @@ function App({ query }) {
<ChatCompletion>
<SystemMessage>
Answer customer questions based on their data: <CustomerData />
Here's data about our company: <OrgData />
</SystemMessage>
<UserMessage>{query}</UserMessage>
</ChatCompletion>
Expand All @@ -50,14 +49,48 @@ async function CustomerData() {
const accountId = await getCustomerAccount();
return isLegacyAccount(accountId) ? fetchLegacy() : fetchModern();
}
```

:::caution Edge case

Imagine you have an slow async component, which is used as a sibling of faster components:

```tsx
async function Slow() {
await new Promise((resolve) => setTimeout(resolve, 4000));
return 'slow result';
}

async function Fast() {
await Promise.resolve();
return 'fast result';
}

const app = (
<>
<Fast />
<Slow />
</>
);
```

Surprisingly, you won't get any results streamed out of `Fast` until `Slow` completes.

function* OrgData() {
yield firstData;
yield secondData;
yield thirdData;
To solve this, return an intermediate value from `Slow`:

```tsx
async function* Slow() {
// highlight-next-line
yield '';

await new Promise((resolve) => setTimeout(resolve, 4000));
return 'slow result';
}
```

This is not ideal and we plan to improve it in the future.
:::

### Append-only generators

If your component is a generator, the default behavior is that each `yield`ed value replaces the previous value. For instance, imagine you have an image generation API like Midjourney that returns a series of image URLs showing the image render in progress:
Expand Down
3 changes: 2 additions & 1 deletion packages/examples/package.json
Expand Up @@ -29,6 +29,7 @@
"demo:wandb": "yarn build && node dist/wandb.js",
"demo:helloworld": "yarn build && node dist/helloworld.js",
"demo:inline-chat": "yarn build && node dist/inline-chat.js",
"demo:stream-ui": "yarn build && node dist/stream-ui.js",
"demo:inline-completion": "yarn build && node dist/inline-completion.js",
"demo:simple-chat": "yarn build && node dist/simple-chat.js",
"demo:multi-model-chat": "yarn build && node dist/multi-model-chat.js",
Expand Down Expand Up @@ -59,7 +60,7 @@
"@opentelemetry/sdk-metrics": "^1.13.0",
"@opentelemetry/sdk-node": "^0.39.1",
"@wandb/sdk": "^0.5.1",
"ai-jsx": "0.5.9",
"ai-jsx": "0.5.10",
"axios": "^1.4.0",
"csv-stringify": "^6.4.0",
"globby": "^13.1.4",
Expand Down
27 changes: 19 additions & 8 deletions packages/examples/src/logger.tsx
Expand Up @@ -20,6 +20,13 @@ class ConsoleLogger extends LogImplementation {
}
}

async function* Slow({ delay }: { delay: number }): AI.RenderableStream {
yield AI.AppendOnlyStream;
yield `first ${delay}`;
await new Promise((resolve) => setTimeout(resolve, delay));
return ` second ${delay}`;
}

function CharacterGenerator() {
const inlineCompletion = (prompt: AI.Node) => (
<Completion stop={['"']} temperature={1.0}>
Expand All @@ -28,14 +35,18 @@ function CharacterGenerator() {
);

return (
<Inline>
The following is a character profile for an RPG game in JSON format:{'\n'}
{'{'}
{'\n '}"class": "{inlineCompletion}",
{'\n '}"name": "{inlineCompletion}",
{'\n '}"mantra": "{inlineCompletion}"{'\n'}
{'}'}
</Inline>
<>
<Slow delay={1000} />
<Slow delay={2000} />
<Inline>
The following is a character profile for an RPG game in JSON format:{'\n'}
{'{'}
{'\n '}"class": "{inlineCompletion}",
{'\n '}"name": "{inlineCompletion}",
{'\n '}"mantra": "{inlineCompletion}"{'\n'}
{'}'}
</Inline>
</>
);
}

Expand Down
62 changes: 62 additions & 0 deletions packages/examples/src/stream-ui.tsx
@@ -0,0 +1,62 @@
/**
* To make this demo work, comment out the `import 'server-only'` line in `ai-jsx/experimental/next`.
*/

/** @jsxImportSource ai-jsx/react */
import * as AI from 'ai-jsx/experimental/next';
import { memo } from 'ai-jsx/core/memoize';
import { ChatCompletion, UserMessage } from 'ai-jsx/core/completion';
import { makeComponentMap } from 'ai-jsx/react/map';
import { pino } from 'pino';
import { PinoLogger } from 'ai-jsx/core/log';

function App() {
const chatCompletion = memo(
<ChatCompletion temperature={1}>
<UserMessage>List five dog names</UserMessage>
</ChatCompletion>
);

return (
<>
{chatCompletion}
<Slow delay={2000} />
</>
);
}

async function Slow({ delay }: { delay: number }) {
// By default, this demo will show that the tree stream waits for `Slow` to complete before rendering anything.
// If we `yield ''`, the problem is solved.

// yield ''
await new Promise((resolve) => setTimeout(resolve, delay));
return ` returned after ${delay}`;
}

const pinoStdoutLogger = pino({
name: 'ai-jsx',
level: process.env.loglevel ?? 'trace',
transport: {
target: 'pino-pretty',
options: {
colorize: true,
},
},
});

const decoder = new TextDecoder();

const response = await AI.toReactStream(makeComponentMap({}), <App />, {
logger: new PinoLogger(pinoStdoutLogger),
});
const body = await response.body;
const reader = body.getReader();
// eslint-disable-next-line no-constant-condition
while (true) {
const { done, value } = await reader.read();
if (done) {
break;
}
process.stdout.write(decoder.decode(value));
}
5 changes: 4 additions & 1 deletion packages/nextjs-demo/package.json
Expand Up @@ -11,12 +11,15 @@
"lint:fix": "yarn lint --fix"
},
"dependencies": {
"@headlessui/react": "^1.7.15",
"@heroicons/react": "^2.0.18",
"@octokit/graphql": "^5.0.6",
"@tailwindcss/forms": "^0.5.3",
"@types/node": "20.2.5",
"@types/react": "18.2.8",
"@types/react-dom": "18.2.4",
"ai": "^2.1.8",
"ai-jsx": "0.5.9",
"ai-jsx": "0.5.10",
"autoprefixer": "10.4.14",
"classnames": "^2.3.2",
"eslint": "8.42.0",
Expand Down
12 changes: 10 additions & 2 deletions packages/nextjs-demo/src/app/layout.tsx
Expand Up @@ -8,12 +8,20 @@ export const metadata = {
description: 'A framework for AI-native UIs',
};

const isScreenshare = false;

export default function RootLayout({ children }: { children: React.ReactNode }) {
return (
<html lang="en">
<body>
<NavBar />
<About />
{/* This is intentionally a constant value. */}
{/* eslint-disable-next-line @typescript-eslint/no-unnecessary-condition */}
{!isScreenshare && (
<>
<NavBar />
<About />
</>
)}
<main className="flex min-h-screen flex-col items-start px-24 py-6">{children}</main>
<div className="hidden">
<h1 className="text-2xl"></h1>
Expand Down

3 comments on commit e2735fd

@vercel
Copy link

@vercel vercel bot commented on e2735fd Jun 30, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Successfully deployed to the following URLs:

ai-jsx-tutorial-nextjs – ./packages/tutorial-nextjs

ai-jsx-tutorial-nextjs-fixie-ai.vercel.app
ai-jsx-tutorial-nextjs.vercel.app
ai-jsx-tutorial-nextjs-git-main-fixie-ai.vercel.app

@vercel
Copy link

@vercel vercel bot commented on e2735fd Jun 30, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Successfully deployed to the following URLs:

ai-jsx-nextjs-demo – ./packages/nextjs-demo

ai-jsx-nextjs-demo.vercel.app
ai-jsx-nextjs-demo-fixie-ai.vercel.app
ai-jsx-nextjs-demo-git-main-fixie-ai.vercel.app

@vercel
Copy link

@vercel vercel bot commented on e2735fd Jun 30, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Successfully deployed to the following URLs:

ai-jsx-docs – ./packages/docs

ai-jsx-docs-git-main-fixie-ai.vercel.app
ai-jsx-docs-fixie-ai.vercel.app
docs.ai-jsx.com
ai-jsx-docs.vercel.app

Please sign in to comment.