From c4fadb4513fc2e35cf0e4b9512ec39a6bd0e42dd Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 14 Dec 2023 22:04:05 -0500 Subject: [PATCH] feat(api): add optional `name` argument + improve docs (#569) --- src/lib/AbstractChatCompletionRunner.ts | 8 +- src/resources/audio/speech.ts | 4 +- src/resources/chat/completions.ts | 91 ++++++++++++------- src/resources/completions.ts | 4 +- src/resources/embeddings.ts | 3 +- src/resources/files.ts | 6 +- src/resources/shared.ts | 28 +++--- .../beta/assistants/files.test.ts | 8 +- .../beta/threads/messages/files.test.ts | 15 ++- tests/api-resources/chat/completions.test.ts | 2 +- 10 files changed, 91 insertions(+), 78 deletions(-) diff --git a/src/lib/AbstractChatCompletionRunner.ts b/src/lib/AbstractChatCompletionRunner.ts index 817a853b..8a8f4670 100644 --- a/src/lib/AbstractChatCompletionRunner.ts +++ b/src/lib/AbstractChatCompletionRunner.ts @@ -6,7 +6,6 @@ import { type ChatCompletionMessage, type ChatCompletionMessageParam, type ChatCompletionCreateParams, - type ChatCompletionAssistantMessageParam, type ChatCompletionTool, } from 'openai/resources/chat/completions'; import { APIUserAbortError, OpenAIError } from 'openai/error'; @@ -90,7 +89,6 @@ export abstract class AbstractChatCompletionRunner< } protected _addMessage(message: ChatCompletionMessageParam, emit = true) { - // @ts-expect-error this works around a bug in the Azure OpenAI API in which `content` is missing instead of null. if (!('content' in message)) message.content = null; this.messages.push(message); @@ -217,7 +215,7 @@ export abstract class AbstractChatCompletionRunner< } #getFinalContent(): string | null { - return this.#getFinalMessage().content; + return this.#getFinalMessage().content ?? null; } /** @@ -229,12 +227,12 @@ export abstract class AbstractChatCompletionRunner< return this.#getFinalContent(); } - #getFinalMessage(): ChatCompletionAssistantMessageParam { + #getFinalMessage(): ChatCompletionMessage { let i = this.messages.length; while (i-- > 0) { const message = this.messages[i]; if (isAssistantMessage(message)) { - return message; + return { ...message, content: message.content ?? null }; } } throw new OpenAIError('stream ended without producing a ChatCompletionMessage with role=assistant'); diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index 89e42684..faa28168 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -28,7 +28,9 @@ export interface SpeechCreateParams { /** * The voice to use when generating the audio. Supported voices are `alloy`, - * `echo`, `fable`, `onyx`, `nova`, and `shimmer`. + * `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are + * available in the + * [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options). */ voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer'; diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 3f83a7dd..759c6e7c 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -105,14 +105,15 @@ export namespace ChatCompletion { export interface ChatCompletionAssistantMessageParam { /** - * The contents of the assistant message. + * The role of the messages author, in this case `assistant`. */ - content: string | null; + role: 'assistant'; /** - * The role of the messages author, in this case `assistant`. + * The contents of the assistant message. Required unless `tool_calls` or + * `function_call` is specified. */ - role: 'assistant'; + content?: string | null; /** * Deprecated and replaced by `tool_calls`. The name and arguments of a function @@ -120,6 +121,12 @@ export interface ChatCompletionAssistantMessageParam { */ function_call?: ChatCompletionAssistantMessageParam.FunctionCall; + /** + * An optional name for the participant. Provides the model information to + * differentiate between participants of the same role. + */ + name?: string; + /** * The tool calls generated by the model, such as function calls. */ @@ -309,7 +316,8 @@ export namespace ChatCompletionContentPartImage { url: string; /** - * Specifies the detail level of the image. + * Specifies the detail level of the image. Learn more in the + * [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). */ detail?: 'auto' | 'low' | 'high'; } @@ -340,9 +348,9 @@ export interface ChatCompletionFunctionCallOption { export interface ChatCompletionFunctionMessageParam { /** - * The return value from the function call, to return to the model. + * The contents of the function message. */ - content: string | null; + content: string; /** * The name of the function to call. @@ -451,12 +459,12 @@ export namespace ChatCompletionMessageToolCall { * function. */ export interface ChatCompletionNamedToolChoice { - function?: ChatCompletionNamedToolChoice.Function; + function: ChatCompletionNamedToolChoice.Function; /** * The type of the tool. Currently, only `function` is supported. */ - type?: 'function'; + type: 'function'; } export namespace ChatCompletionNamedToolChoice { @@ -477,12 +485,18 @@ export interface ChatCompletionSystemMessageParam { /** * The contents of the system message. */ - content: string | null; + content: string; /** * The role of the messages author, in this case `system`. */ role: 'system'; + + /** + * An optional name for the participant. Provides the model information to + * differentiate between participants of the same role. + */ + name?: string; } export interface ChatCompletionTool { @@ -511,7 +525,7 @@ export interface ChatCompletionToolMessageParam { /** * The contents of the tool message. */ - content: string | null; + content: string; /** * The role of the messages author, in this case `tool`. @@ -528,12 +542,18 @@ export interface ChatCompletionUserMessageParam { /** * The contents of the user message. */ - content: string | Array | null; + content: string | Array; /** * The role of the messages author, in this case `user`. */ role: 'user'; + + /** + * An optional name for the participant. Provides the model information to + * differentiate between participants of the same role. + */ + name?: string; } /** @@ -567,11 +587,11 @@ export interface ChatCompletionCreateParamsBase { | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' - | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' + | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k-0613'; /** @@ -579,7 +599,7 @@ export interface ChatCompletionCreateParamsBase { * existing frequency in the text so far, decreasing the model's likelihood to * repeat the same line verbatim. * - * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) */ frequency_penalty?: number | null; @@ -627,7 +647,9 @@ export interface ChatCompletionCreateParamsBase { max_tokens?: number | null; /** - * How many chat completion choices to generate for each input message. + * How many chat completion choices to generate for each input message. Note that + * you will be charged based on the number of generated tokens across all of the + * choices. Keep `n` as `1` to minimize costs. */ n?: number | null; @@ -636,7 +658,7 @@ export interface ChatCompletionCreateParamsBase { * whether they appear in the text so far, increasing the model's likelihood to * talk about new topics. * - * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) */ presence_penalty?: number | null; @@ -649,10 +671,10 @@ export interface ChatCompletionCreateParamsBase { * **Important:** when using JSON mode, you **must** also instruct the model to * produce JSON yourself via a system or user message. Without this, the model may * generate an unending stream of whitespace until the generation reaches the token - * limit, resulting in increased latency and appearance of a "stuck" request. Also - * note that the message content may be partially cut off if - * `finish_reason="length"`, which indicates the generation exceeded `max_tokens` - * or the conversation exceeded the max context length. + * limit, resulting in a long-running and seemingly "stuck" request. Also note that + * the message content may be partially cut off if `finish_reason="length"`, which + * indicates the generation exceeded `max_tokens` or the conversation exceeded the + * max context length. */ response_format?: ChatCompletionCreateParams.ResponseFormat; @@ -734,23 +756,22 @@ export namespace ChatCompletionCreateParams { */ name: string; + /** + * A description of what the function does, used by the model to choose when and + * how to call the function. + */ + description?: string; + /** * The parameters the functions accepts, described as a JSON Schema object. See the - * [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for - * examples, and the + * [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) + * for examples, and the * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for * documentation about the format. * - * To describe a function that accepts no parameters, provide the value - * `{"type": "object", "properties": {}}`. - */ - parameters: Shared.FunctionParameters; - - /** - * A description of what the function does, used by the model to choose when and - * how to call the function. + * Omitting `parameters` defines a function with an empty parameter list. */ - description?: string; + parameters?: Shared.FunctionParameters; } /** @@ -762,10 +783,10 @@ export namespace ChatCompletionCreateParams { * **Important:** when using JSON mode, you **must** also instruct the model to * produce JSON yourself via a system or user message. Without this, the model may * generate an unending stream of whitespace until the generation reaches the token - * limit, resulting in increased latency and appearance of a "stuck" request. Also - * note that the message content may be partially cut off if - * `finish_reason="length"`, which indicates the generation exceeded `max_tokens` - * or the conversation exceeded the max context length. + * limit, resulting in a long-running and seemingly "stuck" request. Also note that + * the message content may be partially cut off if `finish_reason="length"`, which + * indicates the generation exceeded `max_tokens` or the conversation exceeded the + * max context length. */ export interface ResponseFormat { /** diff --git a/src/resources/completions.ts b/src/resources/completions.ts index 0bee9313..f33624e7 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -177,7 +177,7 @@ export interface CompletionCreateParamsBase { * existing frequency in the text so far, decreasing the model's likelihood to * repeat the same line verbatim. * - * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) */ frequency_penalty?: number | null; @@ -232,7 +232,7 @@ export interface CompletionCreateParamsBase { * whether they appear in the text so far, increasing the model's likelihood to * talk about new topics. * - * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) */ presence_penalty?: number | null; diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index 7ace4589..318a4527 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -82,7 +82,8 @@ export interface EmbeddingCreateParams { * Input text to embed, encoded as a string or array of tokens. To embed multiple * inputs in a single request, pass an array of strings or array of token arrays. * The input must not exceed the max input tokens for the model (8192 tokens for - * `text-embedding-ada-002`) and cannot be an empty string. + * `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 + * dimensions or less. * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) * for counting tokens. */ diff --git a/src/resources/files.ts b/src/resources/files.ts index 8e3a759d..ea3f3b9c 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -12,10 +12,10 @@ import { Page } from 'openai/pagination'; export class Files extends APIResource { /** - * Upload a file that can be used across various endpoints/features. The size of - * all the files uploaded by one organization can be up to 100 GB. + * Upload a file that can be used across various endpoints. The size of all the + * files uploaded by one organization can be up to 100 GB. * - * The size of individual files for can be a maximum of 512MB. See the + * The size of individual files can be a maximum of 512 MB. See the * [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) to * learn more about the types of files supported. The Fine-tuning API only supports * `.jsonl` files. diff --git a/src/resources/shared.ts b/src/resources/shared.ts index d8d9bd0c..05ab6638 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -7,33 +7,31 @@ export interface FunctionDefinition { */ name: string; + /** + * A description of what the function does, used by the model to choose when and + * how to call the function. + */ + description?: string; + /** * The parameters the functions accepts, described as a JSON Schema object. See the - * [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for - * examples, and the + * [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) + * for examples, and the * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for * documentation about the format. * - * To describe a function that accepts no parameters, provide the value - * `{"type": "object", "properties": {}}`. + * Omitting `parameters` defines a function with an empty parameter list. */ - parameters: FunctionParameters; - - /** - * A description of what the function does, used by the model to choose when and - * how to call the function. - */ - description?: string; + parameters?: FunctionParameters; } /** * The parameters the functions accepts, described as a JSON Schema object. See the - * [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for - * examples, and the + * [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) + * for examples, and the * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for * documentation about the format. * - * To describe a function that accepts no parameters, provide the value - * `{"type": "object", "properties": {}}`. + * Omitting `parameters` defines a function with an empty parameter list. */ export type FunctionParameters = Record; diff --git a/tests/api-resources/beta/assistants/files.test.ts b/tests/api-resources/beta/assistants/files.test.ts index b06cac85..8db32844 100644 --- a/tests/api-resources/beta/assistants/files.test.ts +++ b/tests/api-resources/beta/assistants/files.test.ts @@ -10,9 +10,7 @@ const openai = new OpenAI({ describe('resource files', () => { test('create: only required params', async () => { - const responsePromise = openai.beta.assistants.files.create('file-AF1WoRqd3aJAHsqc9NY7iL8F', { - file_id: 'string', - }); + const responsePromise = openai.beta.assistants.files.create('file-abc123', { file_id: 'string' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -23,9 +21,7 @@ describe('resource files', () => { }); test('create: required and optional params', async () => { - const response = await openai.beta.assistants.files.create('file-AF1WoRqd3aJAHsqc9NY7iL8F', { - file_id: 'string', - }); + const response = await openai.beta.assistants.files.create('file-abc123', { file_id: 'string' }); }); test('retrieve', async () => { diff --git a/tests/api-resources/beta/threads/messages/files.test.ts b/tests/api-resources/beta/threads/messages/files.test.ts index 501ed831..b4a00a86 100644 --- a/tests/api-resources/beta/threads/messages/files.test.ts +++ b/tests/api-resources/beta/threads/messages/files.test.ts @@ -11,9 +11,9 @@ const openai = new OpenAI({ describe('resource files', () => { test('retrieve', async () => { const responsePromise = openai.beta.threads.messages.files.retrieve( - 'thread_AF1WoRqd3aJAHsqc9NY7iL8F', - 'msg_AF1WoRqd3aJAHsqc9NY7iL8F', - 'file-AF1WoRqd3aJAHsqc9NY7iL8F', + 'thread_abc123', + 'msg_abc123', + 'file-abc123', ); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -27,12 +27,9 @@ describe('resource files', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.messages.files.retrieve( - 'thread_AF1WoRqd3aJAHsqc9NY7iL8F', - 'msg_AF1WoRqd3aJAHsqc9NY7iL8F', - 'file-AF1WoRqd3aJAHsqc9NY7iL8F', - { path: '/_stainless_unknown_path' }, - ), + openai.beta.threads.messages.files.retrieve('thread_abc123', 'msg_abc123', 'file-abc123', { + path: '/_stainless_unknown_path', + }), ).rejects.toThrow(OpenAI.NotFoundError); }); diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index b4eb00df..15b815a5 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -25,7 +25,7 @@ describe('resource completions', () => { test('create: required and optional params', async () => { const response = await openai.chat.completions.create({ - messages: [{ content: 'string', role: 'system' }], + messages: [{ content: 'string', role: 'system', name: 'string' }], model: 'gpt-3.5-turbo', frequency_penalty: -2, function_call: 'none',