diff --git a/.stats.yml b/.stats.yml
index 54ba5613..40f1ba49 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,2 @@
configured_endpoints: 21
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/writerai%2Fwriter-6b363dd34169cab18f5ec3bcf6586aecd4799f79a80c90bf54e5a12f91d9e7c2.yml
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/writerai%2Fwriter-82683f2fd5f8778a27960ebabda40d6dc4640bdfb77ac4ec7f173b8bf8076d3c.yml
diff --git a/README.md b/README.md
index 3383764a..16602d5a 100644
--- a/README.md
+++ b/README.md
@@ -27,9 +27,9 @@ const client = new Writer({
});
async function main() {
- const chatCompletion = await client.chat.chat({ messages: [{ role: 'user' }], model: 'palmyra-x-004' });
+ const chat = await client.chat.chat({ messages: [{ role: 'user' }], model: 'palmyra-x-004' });
- console.log(chatCompletion.id);
+ console.log(chat.id);
}
main();
@@ -49,8 +49,8 @@ const stream = await client.completions.create({
prompt: 'Hi, my name is',
stream: true,
});
-for await (const completionChunk of stream) {
- console.log(completionChunk.choices);
+for await (const streamingData of stream) {
+ console.log(streamingData.choices);
}
```
@@ -71,7 +71,7 @@ const client = new Writer({
async function main() {
const params: Writer.ChatChatParams = { messages: [{ role: 'user' }], model: 'palmyra-x-004' };
- const chatCompletion: Writer.ChatCompletion = await client.chat.chat(params);
+ const chat: Writer.Chat = await client.chat.chat(params);
}
main();
@@ -88,7 +88,7 @@ a subclass of `APIError` will be thrown:
```ts
async function main() {
- const chatCompletion = await client.chat
+ const chat = await client.chat
.chat({ messages: [{ role: 'user' }], model: 'palmyra-x-004' })
.catch(async (err) => {
if (err instanceof Writer.APIError) {
@@ -208,11 +208,11 @@ const response = await client.chat
console.log(response.headers.get('X-My-Header'));
console.log(response.statusText); // access the underlying Response object
-const { data: chatCompletion, response: raw } = await client.chat
+const { data: chat, response: raw } = await client.chat
.chat({ messages: [{ role: 'user' }], model: 'palmyra-x-004' })
.withResponse();
console.log(raw.headers.get('X-My-Header'));
-console.log(chatCompletion.id);
+console.log(chat.id);
```
### Making custom/undocumented requests
diff --git a/api.md b/api.md
index 41696c39..65561ed0 100644
--- a/api.md
+++ b/api.md
@@ -1,21 +1,3 @@
-# Shared
-
-Types:
-
-- ErrorMessage
-- ErrorObject
-- FunctionDefinition
-- FunctionParams
-- GraphData
-- Logprobs
-- LogprobsToken
-- Source
-- ToolCall
-- ToolCallStreaming
-- ToolChoiceJsonObject
-- ToolChoiceString
-- ToolParam
-
# Applications
Types:
@@ -30,24 +12,19 @@ Methods:
Types:
-- ChatCompletion
-- ChatCompletionChoice
+- Chat
- ChatCompletionChunk
-- ChatCompletionMessage
-- ChatCompletionParams
-- ChatCompletionUsage
Methods:
-- client.chat.chat({ ...params }) -> ChatCompletion
+- client.chat.chat({ ...params }) -> Chat
# Completions
Types:
- Completion
-- CompletionChunk
-- CompletionParams
+- StreamingData
Methods:
diff --git a/src/index.ts b/src/index.ts
index c7718f78..5b10f5d6 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -17,21 +17,16 @@ import {
ChatChatParams,
ChatChatParamsNonStreaming,
ChatChatParamsStreaming,
- ChatCompletion,
- ChatCompletionChoice,
ChatCompletionChunk,
- ChatCompletionMessage,
- ChatCompletionParams,
- ChatCompletionUsage,
+ ChatResource,
} from './resources/chat';
import {
Completion,
- CompletionChunk,
CompletionCreateParams,
CompletionCreateParamsNonStreaming,
CompletionCreateParamsStreaming,
- CompletionParams,
Completions,
+ StreamingData,
} from './resources/completions';
import {
File,
@@ -184,7 +179,7 @@ export class Writer extends Core.APIClient {
}
applications: API.Applications = new API.Applications(this);
- chat: API.Chat = new API.Chat(this);
+ chat: API.ChatResource = new API.ChatResource(this);
completions: API.Completions = new API.Completions(this);
models: API.Models = new API.Models(this);
graphs: API.Graphs = new API.Graphs(this);
@@ -228,7 +223,7 @@ export class Writer extends Core.APIClient {
}
Writer.Applications = Applications;
-Writer.Chat = Chat;
+Writer.ChatResource = ChatResource;
Writer.Completions = Completions;
Writer.Models = Models;
Writer.Graphs = Graphs;
@@ -249,13 +244,9 @@ export declare namespace Writer {
};
export {
- Chat as Chat,
- type ChatCompletion as ChatCompletion,
- type ChatCompletionChoice as ChatCompletionChoice,
+ ChatResource as ChatResource,
+ type Chat as Chat,
type ChatCompletionChunk as ChatCompletionChunk,
- type ChatCompletionMessage as ChatCompletionMessage,
- type ChatCompletionParams as ChatCompletionParams,
- type ChatCompletionUsage as ChatCompletionUsage,
type ChatChatParams as ChatChatParams,
type ChatChatParamsNonStreaming as ChatChatParamsNonStreaming,
type ChatChatParamsStreaming as ChatChatParamsStreaming,
@@ -264,8 +255,7 @@ export declare namespace Writer {
export {
Completions as Completions,
type Completion as Completion,
- type CompletionChunk as CompletionChunk,
- type CompletionParams as CompletionParams,
+ type StreamingData as StreamingData,
type CompletionCreateParams as CompletionCreateParams,
type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming,
type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming,
@@ -310,20 +300,6 @@ export declare namespace Writer {
type ToolContextAwareSplittingParams as ToolContextAwareSplittingParams,
type ToolParsePdfParams as ToolParsePdfParams,
};
-
- export type ErrorMessage = API.ErrorMessage;
- export type ErrorObject = API.ErrorObject;
- export type FunctionDefinition = API.FunctionDefinition;
- export type FunctionParams = API.FunctionParams;
- export type GraphData = API.GraphData;
- export type Logprobs = API.Logprobs;
- export type LogprobsToken = API.LogprobsToken;
- export type Source = API.Source;
- export type ToolCall = API.ToolCall;
- export type ToolCallStreaming = API.ToolCallStreaming;
- export type ToolChoiceJsonObject = API.ToolChoiceJsonObject;
- export type ToolChoiceString = API.ToolChoiceString;
- export type ToolParam = API.ToolParam;
}
export { toFile, fileFromPath } from './uploads';
diff --git a/src/resources/chat.ts b/src/resources/chat.ts
index b53b0e52..b1d65b13 100644
--- a/src/resources/chat.ts
+++ b/src/resources/chat.ts
@@ -4,32 +4,31 @@ import { APIResource } from '../resource';
import { APIPromise } from '../core';
import * as Core from '../core';
import * as ChatAPI from './chat';
-import * as Shared from './shared';
import { Stream } from '../streaming';
-export class Chat extends APIResource {
+export class ChatResource extends APIResource {
/**
* Generate a chat completion based on the provided messages. The response shown
* below is for non-streaming. To learn about streaming responses, see the
* [chat completion guide](/api-guides/chat-completion).
*/
- chat(body: ChatChatParamsNonStreaming, options?: Core.RequestOptions): APIPromise;
+ chat(body: ChatChatParamsNonStreaming, options?: Core.RequestOptions): APIPromise;
chat(body: ChatChatParamsStreaming, options?: Core.RequestOptions): APIPromise>;
chat(
body: ChatChatParamsBase,
options?: Core.RequestOptions,
- ): APIPromise | ChatCompletion>;
+ ): APIPromise | Chat>;
chat(
body: ChatChatParams,
options?: Core.RequestOptions,
- ): APIPromise | APIPromise> {
+ ): APIPromise | APIPromise> {
return this._client.post('/v1/chat', { body, ...options, stream: body.stream ?? false }) as
- | APIPromise
+ | APIPromise
| APIPromise>;
}
}
-export interface ChatCompletion {
+export interface Chat {
/**
* A globally unique identifier (UUID) for the response generated by the API. This
* ID can be used to reference the specific operation or transaction within the
@@ -41,7 +40,7 @@ export interface ChatCompletion {
* An array of objects representing the different outcomes or results produced by
* the model based on the input provided.
*/
- choices: Array;
+ choices: Array;
/**
* The Unix timestamp (in seconds) when the response was created. This timestamp
@@ -75,33 +74,215 @@ export interface ChatCompletion {
* Usage information for the chat completion response. Please note that at this
* time Knowledge Graph tool usage is not included in this object.
*/
- usage?: ChatCompletionUsage;
+ usage?: Chat.Usage;
}
-export interface ChatCompletionChoice {
- /**
- * Describes the condition under which the model ceased generating content. Common
- * reasons include 'length' (reached the maximum output size), 'stop' (encountered
- * a stop sequence), 'content_filter' (harmful content filtered out), or
- * 'tool_calls' (encountered tool calls).
- */
- finish_reason: 'stop' | 'length' | 'content_filter' | 'tool_calls';
+export namespace Chat {
+ export interface Choice {
+ /**
+ * Describes the condition under which the model ceased generating content. Common
+ * reasons include 'length' (reached the maximum output size), 'stop' (encountered
+ * a stop sequence), 'content_filter' (harmful content filtered out), or
+ * 'tool_calls' (encountered tool calls).
+ */
+ finish_reason: 'stop' | 'length' | 'content_filter' | 'tool_calls';
- /**
- * The index of the choice in the list of completions generated by the model.
- */
- index: number;
+ /**
+ * The index of the choice in the list of completions generated by the model.
+ */
+ index: number;
- /**
- * The chat completion message from the model. Note: this field is deprecated for
- * streaming. Use `delta` instead.
- */
- message: ChatCompletionMessage;
+ /**
+ * The chat completion message from the model. Note: this field is deprecated for
+ * streaming. Use `delta` instead.
+ */
+ message: Choice.Message;
+
+ /**
+ * Log probability information for the choice.
+ */
+ logprobs?: Choice.Logprobs | null;
+ }
+
+ export namespace Choice {
+ /**
+ * The chat completion message from the model. Note: this field is deprecated for
+ * streaming. Use `delta` instead.
+ */
+ export interface Message {
+ /**
+ * The text content produced by the model. This field contains the actual output
+ * generated, reflecting the model's response to the input query or command.
+ */
+ content: string;
+
+ refusal: string | null;
+
+ /**
+ * Specifies the role associated with the content.
+ */
+ role: 'assistant';
+
+ graph_data?: Message.GraphData;
+
+ tool_calls?: Array;
+ }
+
+ export namespace Message {
+ export interface GraphData {
+ sources?: Array;
+
+ status?: 'processing' | 'finished';
+
+ subqueries?: Array;
+ }
+
+ export namespace GraphData {
+ export interface Source {
+ /**
+ * The unique identifier of the file.
+ */
+ file_id: string;
+
+ /**
+ * A snippet of text from the source file.
+ */
+ snippet: string;
+ }
+
+ export interface Subquery {
+ /**
+ * The answer to the subquery.
+ */
+ answer: string;
+
+ /**
+ * The subquery that was asked.
+ */
+ query: string;
+
+ sources: Array;
+ }
+
+ export namespace Subquery {
+ export interface Source {
+ /**
+ * The unique identifier of the file.
+ */
+ file_id: string;
+
+ /**
+ * A snippet of text from the source file.
+ */
+ snippet: string;
+ }
+ }
+ }
+
+ export interface ToolCall {
+ id: string;
+
+ function: ToolCall.Function;
+
+ type: string;
+
+ index?: number;
+ }
+
+ export namespace ToolCall {
+ export interface Function {
+ arguments: string;
+
+ name: string;
+ }
+ }
+ }
+
+ /**
+ * Log probability information for the choice.
+ */
+ export interface Logprobs {
+ content: Array | null;
+
+ refusal: Array | null;
+ }
+
+ export namespace Logprobs {
+ export interface Content {
+ token: string;
+
+ logprob: number;
+
+ top_logprobs: Array;
+
+ bytes?: Array;
+ }
+
+ export namespace Content {
+ /**
+ * An array of mappings for each token to its top log probabilities, showing
+ * detailed prediction probabilities.
+ */
+ export interface TopLogprob {
+ token: string;
+
+ logprob: number;
+
+ bytes?: Array;
+ }
+ }
+
+ export interface Refusal {
+ token: string;
+
+ logprob: number;
+
+ top_logprobs: Array;
+
+ bytes?: Array;
+ }
+
+ export namespace Refusal {
+ /**
+ * An array of mappings for each token to its top log probabilities, showing
+ * detailed prediction probabilities.
+ */
+ export interface TopLogprob {
+ token: string;
+
+ logprob: number;
+
+ bytes?: Array;
+ }
+ }
+ }
+ }
/**
- * Log probability information for the choice.
+ * Usage information for the chat completion response. Please note that at this
+ * time Knowledge Graph tool usage is not included in this object.
*/
- logprobs?: Shared.Logprobs | null;
+ export interface Usage {
+ completion_tokens: number;
+
+ prompt_tokens: number;
+
+ total_tokens: number;
+
+ completion_tokens_details?: Usage.CompletionTokensDetails;
+
+ prompt_token_details?: Usage.PromptTokenDetails;
+ }
+
+ export namespace Usage {
+ export interface CompletionTokensDetails {
+ reasoning_tokens: number;
+ }
+
+ export interface PromptTokenDetails {
+ cached_tokens: number;
+ }
+ }
}
export interface ChatCompletionChunk {
@@ -144,7 +325,7 @@ export interface ChatCompletionChunk {
* Usage information for the chat completion response. Please note that at this
* time Knowledge Graph tool usage is not included in this object.
*/
- usage?: ChatCompletionUsage;
+ usage?: ChatCompletionChunk.Usage;
}
export namespace ChatCompletionChunk {
@@ -170,13 +351,13 @@ export namespace ChatCompletionChunk {
/**
* Log probability information for the choice.
*/
- logprobs?: Shared.Logprobs | null;
+ logprobs?: Choice.Logprobs | null;
/**
* The chat completion message from the model. Note: this field is deprecated for
* streaming. Use `delta` instead.
*/
- message?: ChatAPI.ChatCompletionMessage;
+ message?: Choice.Message;
}
export namespace Choice {
@@ -190,7 +371,7 @@ export namespace ChatCompletionChunk {
*/
content?: string;
- graph_data?: Shared.GraphData;
+ graph_data?: Delta.GraphData;
refusal?: string | null;
@@ -201,166 +382,256 @@ export namespace ChatCompletionChunk {
*/
role?: 'user' | 'assistant' | 'system';
- tool_calls?: Array | null;
+ tool_calls?: Array;
}
- }
-}
-/**
- * The chat completion message from the model. Note: this field is deprecated for
- * streaming. Use `delta` instead.
- */
-export interface ChatCompletionMessage {
- /**
- * The text content produced by the model. This field contains the actual output
- * generated, reflecting the model's response to the input query or command.
- */
- content: string;
-
- refusal: string | null;
+ export namespace Delta {
+ export interface GraphData {
+ sources?: Array;
+
+ status?: 'processing' | 'finished';
+
+ subqueries?: Array;
+ }
+
+ export namespace GraphData {
+ export interface Source {
+ /**
+ * The unique identifier of the file.
+ */
+ file_id: string;
+
+ /**
+ * A snippet of text from the source file.
+ */
+ snippet: string;
+ }
+
+ export interface Subquery {
+ /**
+ * The answer to the subquery.
+ */
+ answer: string;
+
+ /**
+ * The subquery that was asked.
+ */
+ query: string;
+
+ sources: Array;
+ }
+
+ export namespace Subquery {
+ export interface Source {
+ /**
+ * The unique identifier of the file.
+ */
+ file_id: string;
+
+ /**
+ * A snippet of text from the source file.
+ */
+ snippet: string;
+ }
+ }
+ }
+
+ export interface ToolCall {
+ index: number;
+
+ id?: string;
+
+ function?: ToolCall.Function;
+
+ type?: string;
+ }
+
+ export namespace ToolCall {
+ export interface Function {
+ arguments: string;
+
+ name: string;
+ }
+ }
+ }
- /**
- * Specifies the role associated with the content.
- */
- role: 'assistant';
+ /**
+ * Log probability information for the choice.
+ */
+ export interface Logprobs {
+ content: Array | null;
- graph_data?: Shared.GraphData;
+ refusal: Array | null;
+ }
- tool_calls?: Array | null;
-}
+ export namespace Logprobs {
+ export interface Content {
+ token: string;
-export interface ChatCompletionParams {
- /**
- * An array of message objects that form the conversation history or context for
- * the model to respond to. The array must contain at least one message.
- */
- messages: Array;
+ logprob: number;
- /**
- * Specifies the model to be used for generating responses. The chat model is
- * always `palmyra-x-004` for conversational use.
- */
- model: string;
+ top_logprobs: Array;
- /**
- * Specifies whether to return log probabilities of the output tokens.
- */
- logprobs?: boolean;
+ bytes?: Array;
+ }
- /**
- * Defines the maximum number of tokens (words and characters) that the model can
- * generate in the response. The default value is set to 16, but it can be adjusted
- * to allow for longer or shorter responses as needed.
- */
- max_tokens?: number;
+ export namespace Content {
+ /**
+ * An array of mappings for each token to its top log probabilities, showing
+ * detailed prediction probabilities.
+ */
+ export interface TopLogprob {
+ token: string;
- /**
- * Specifies the number of completions (responses) to generate from the model in a
- * single request. This parameter allows multiple responses to be generated,
- * offering a variety of potential replies from which to choose.
- */
- n?: number;
+ logprob: number;
- /**
- * A token or sequence of tokens that, when generated, will cause the model to stop
- * producing further content. This can be a single token or an array of tokens,
- * acting as a signal to end the output.
- */
- stop?: Array | string;
+ bytes?: Array;
+ }
+ }
- /**
- * Indicates whether the response should be streamed incrementally as it is
- * generated or only returned once fully complete. Streaming can be useful for
- * providing real-time feedback in interactive applications.
- */
- stream?: boolean;
+ export interface Refusal {
+ token: string;
- /**
- * Additional options for streaming.
- */
- stream_options?: ChatCompletionParams.StreamOptions;
+ logprob: number;
- /**
- * Controls the randomness or creativity of the model's responses. A higher
- * temperature results in more varied and less predictable text, while a lower
- * temperature produces more deterministic and conservative outputs.
- */
- temperature?: number;
+ top_logprobs: Array;
- /**
- * Configure how the model will call functions: `auto` will allow the model to
- * automatically choose the best tool, `none` disables tool calling. You can also
- * pass a specific previously defined function.
- */
- tool_choice?: Shared.ToolChoiceString | Shared.ToolChoiceJsonObject;
+ bytes?: Array;
+ }
- /**
- * An array of tools described to the model using JSON schema that the model can
- * use to generate responses. Passing graph IDs will automatically use the
- * Knowledge Graph tool.
- */
- tools?: Array;
+ export namespace Refusal {
+ /**
+ * An array of mappings for each token to its top log probabilities, showing
+ * detailed prediction probabilities.
+ */
+ export interface TopLogprob {
+ token: string;
- /**
- * Sets the threshold for "nucleus sampling," a technique to focus the model's
- * token generation on the most likely subset of tokens. Only tokens with
- * cumulative probability above this threshold are considered, controlling the
- * trade-off between creativity and coherence.
- */
- top_p?: number;
-}
+ logprob: number;
-export namespace ChatCompletionParams {
- export interface Message {
- role: 'user' | 'assistant' | 'system' | 'tool';
+ bytes?: Array;
+ }
+ }
+ }
- content?: string | null;
+ /**
+ * The chat completion message from the model. Note: this field is deprecated for
+ * streaming. Use `delta` instead.
+ */
+ export interface Message {
+ /**
+ * The text content produced by the model. This field contains the actual output
+ * generated, reflecting the model's response to the input query or command.
+ */
+ content: string;
- graph_data?: Shared.GraphData | null;
+ refusal: string | null;
- name?: string | null;
+ /**
+ * Specifies the role associated with the content.
+ */
+ role: 'assistant';
- refusal?: string | null;
+ graph_data?: Message.GraphData;
- tool_call_id?: string | null;
+ tool_calls?: Array;
+ }
- tool_calls?: Array | null;
+ export namespace Message {
+ export interface GraphData {
+ sources?: Array;
+
+ status?: 'processing' | 'finished';
+
+ subqueries?: Array;
+ }
+
+ export namespace GraphData {
+ export interface Source {
+ /**
+ * The unique identifier of the file.
+ */
+ file_id: string;
+
+ /**
+ * A snippet of text from the source file.
+ */
+ snippet: string;
+ }
+
+ export interface Subquery {
+ /**
+ * The answer to the subquery.
+ */
+ answer: string;
+
+ /**
+ * The subquery that was asked.
+ */
+ query: string;
+
+ sources: Array;
+ }
+
+ export namespace Subquery {
+ export interface Source {
+ /**
+ * The unique identifier of the file.
+ */
+ file_id: string;
+
+ /**
+ * A snippet of text from the source file.
+ */
+ snippet: string;
+ }
+ }
+ }
+
+ export interface ToolCall {
+ id: string;
+
+ function: ToolCall.Function;
+
+ type: string;
+
+ index?: number;
+ }
+
+ export namespace ToolCall {
+ export interface Function {
+ arguments: string;
+
+ name: string;
+ }
+ }
+ }
}
/**
- * Additional options for streaming.
+ * Usage information for the chat completion response. Please note that at this
+ * time Knowledge Graph tool usage is not included in this object.
*/
- export interface StreamOptions {
- /**
- * Indicate whether to include usage information.
- */
- include_usage: boolean;
- }
-}
-
-/**
- * Usage information for the chat completion response. Please note that at this
- * time Knowledge Graph tool usage is not included in this object.
- */
-export interface ChatCompletionUsage {
- completion_tokens: number;
+ export interface Usage {
+ completion_tokens: number;
- prompt_tokens: number;
+ prompt_tokens: number;
- total_tokens: number;
+ total_tokens: number;
- completion_tokens_details?: ChatCompletionUsage.CompletionTokensDetails;
-
- prompt_token_details?: ChatCompletionUsage.PromptTokenDetails;
-}
+ completion_tokens_details?: Usage.CompletionTokensDetails;
-export namespace ChatCompletionUsage {
- export interface CompletionTokensDetails {
- reasoning_tokens: number;
+ prompt_token_details?: Usage.PromptTokenDetails;
}
- export interface PromptTokenDetails {
- cached_tokens: number;
+ export namespace Usage {
+ export interface CompletionTokensDetails {
+ reasoning_tokens: number;
+ }
+
+ export interface PromptTokenDetails {
+ cached_tokens: number;
+ }
}
}
@@ -429,14 +700,14 @@ export interface ChatChatParamsBase {
* automatically choose the best tool, `none` disables tool calling. You can also
* pass a specific previously defined function.
*/
- tool_choice?: Shared.ToolChoiceString | Shared.ToolChoiceJsonObject;
+ tool_choice?: ChatChatParams.StringToolChoice | ChatChatParams.JsonObjectToolChoice;
/**
* An array of tools described to the model using JSON schema that the model can
* use to generate responses. Passing graph IDs will automatically use the
* Knowledge Graph tool.
*/
- tools?: Array;
+ tools?: Array;
/**
* Sets the threshold for "nucleus sampling," a technique to focus the model's
@@ -453,7 +724,7 @@ export namespace ChatChatParams {
content?: string | null;
- graph_data?: Shared.GraphData | null;
+ graph_data?: Message.GraphData | null;
name?: string | null;
@@ -461,7 +732,77 @@ export namespace ChatChatParams {
tool_call_id?: string | null;
- tool_calls?: Array | null;
+ tool_calls?: Array | null;
+ }
+
+ export namespace Message {
+ export interface GraphData {
+ sources?: Array;
+
+ status?: 'processing' | 'finished';
+
+ subqueries?: Array;
+ }
+
+ export namespace GraphData {
+ export interface Source {
+ /**
+ * The unique identifier of the file.
+ */
+ file_id: string;
+
+ /**
+ * A snippet of text from the source file.
+ */
+ snippet: string;
+ }
+
+ export interface Subquery {
+ /**
+ * The answer to the subquery.
+ */
+ answer: string;
+
+ /**
+ * The subquery that was asked.
+ */
+ query: string;
+
+ sources: Array;
+ }
+
+ export namespace Subquery {
+ export interface Source {
+ /**
+ * The unique identifier of the file.
+ */
+ file_id: string;
+
+ /**
+ * A snippet of text from the source file.
+ */
+ snippet: string;
+ }
+ }
+ }
+
+ export interface ToolCall {
+ id: string;
+
+ function: ToolCall.Function;
+
+ type: string;
+
+ index?: number;
+ }
+
+ export namespace ToolCall {
+ export interface Function {
+ arguments: string;
+
+ name: string;
+ }
+ }
}
/**
@@ -474,6 +815,67 @@ export namespace ChatChatParams {
include_usage: boolean;
}
+ export interface StringToolChoice {
+ value: 'none' | 'auto' | 'required';
+ }
+
+ export interface JsonObjectToolChoice {
+ value: Record;
+ }
+
+ export interface FunctionTool {
+ function: FunctionTool.Function;
+
+ /**
+ * The type of tool.
+ */
+ type: 'function';
+ }
+
+ export namespace FunctionTool {
+ export interface Function {
+ /**
+ * Name of the function
+ */
+ name: string;
+
+ /**
+ * Description of the function
+ */
+ description?: string;
+
+ parameters?: Record;
+ }
+ }
+
+ export interface GraphTool {
+ function: GraphTool.Function;
+
+ /**
+ * The type of tool.
+ */
+ type: 'graph';
+ }
+
+ export namespace GraphTool {
+ export interface Function {
+ /**
+ * An array of graph IDs to be used in the tool.
+ */
+ graph_ids: Array;
+
+ /**
+ * Boolean to indicate whether to include subqueries in the response.
+ */
+ subqueries: boolean;
+
+ /**
+ * A description of the graph content.
+ */
+ description?: string;
+ }
+ }
+
export type ChatChatParamsNonStreaming = ChatAPI.ChatChatParamsNonStreaming;
export type ChatChatParamsStreaming = ChatAPI.ChatChatParamsStreaming;
}
@@ -496,14 +898,10 @@ export interface ChatChatParamsStreaming extends ChatChatParamsBase {
stream: true;
}
-export declare namespace Chat {
+export declare namespace ChatResource {
export {
- type ChatCompletion as ChatCompletion,
- type ChatCompletionChoice as ChatCompletionChoice,
+ type Chat as Chat,
type ChatCompletionChunk as ChatCompletionChunk,
- type ChatCompletionMessage as ChatCompletionMessage,
- type ChatCompletionParams as ChatCompletionParams,
- type ChatCompletionUsage as ChatCompletionUsage,
type ChatChatParams as ChatChatParams,
type ChatChatParamsNonStreaming as ChatChatParamsNonStreaming,
type ChatChatParamsStreaming as ChatChatParamsStreaming,
diff --git a/src/resources/completions.ts b/src/resources/completions.ts
index 89f72083..3c415531 100644
--- a/src/resources/completions.ts
+++ b/src/resources/completions.ts
@@ -4,7 +4,6 @@ import { APIResource } from '../resource';
import { APIPromise } from '../core';
import * as Core from '../core';
import * as CompletionsAPI from './completions';
-import * as Shared from './shared';
import { Stream } from '../streaming';
export class Completions extends APIResource {
@@ -15,18 +14,18 @@ export class Completions extends APIResource {
create(
body: CompletionCreateParamsStreaming,
options?: Core.RequestOptions,
- ): APIPromise>;
+ ): APIPromise>;
create(
body: CompletionCreateParamsBase,
options?: Core.RequestOptions,
- ): APIPromise | Completion>;
+ ): APIPromise | Completion>;
create(
body: CompletionCreateParams,
options?: Core.RequestOptions,
- ): APIPromise | APIPromise> {
+ ): APIPromise | APIPromise> {
return this._client.post('/v1/completions', { body, ...options, stream: body.stream ?? false }) as
| APIPromise
- | APIPromise>;
+ | APIPromise>;
}
}
@@ -52,68 +51,70 @@ export namespace Completion {
*/
text: string;
- log_probs?: Shared.Logprobs;
+ log_probs?: Choice.LogProbs;
}
-}
-export interface CompletionChunk {
- value: string;
-}
+ export namespace Choice {
+ export interface LogProbs {
+ content: Array | null;
-export interface CompletionParams {
- /**
- * The identifier of the model to be used for processing the request.
- */
- model: string;
+ refusal: Array | null;
+ }
- /**
- * The input text that the model will process to generate a response.
- */
- prompt: string;
+ export namespace LogProbs {
+ export interface Content {
+ token: string;
- /**
- * Specifies the number of completions to generate and return the best one. Useful
- * for generating multiple outputs and choosing the best based on some criteria.
- */
- best_of?: number;
+ logprob: number;
- /**
- * The maximum number of tokens that the model can generate in the response.
- */
- max_tokens?: number;
+ top_logprobs: Array;
- /**
- * A seed used to initialize the random number generator for the model, ensuring
- * reproducibility of the output when the same inputs are provided.
- */
- random_seed?: number;
+ bytes?: Array;
+ }
- /**
- * Specifies stopping conditions for the model's output generation. This can be an
- * array of strings or a single string that the model will look for as a signal to
- * stop generating further tokens.
- */
- stop?: Array | string;
+ export namespace Content {
+ /**
+ * An array of mappings for each token to its top log probabilities, showing
+ * detailed prediction probabilities.
+ */
+ export interface TopLogprob {
+ token: string;
- /**
- * Determines whether the model's output should be streamed. If true, the output is
- * generated and sent incrementally, which can be useful for real-time
- * applications.
- */
- stream?: boolean;
+ logprob: number;
- /**
- * Controls the randomness of the model's outputs. Higher values lead to more
- * random outputs, while lower values make the model more deterministic.
- */
- temperature?: number;
+ bytes?: Array;
+ }
+ }
- /**
- * Used to control the nucleus sampling, where only the most probable tokens with a
- * cumulative probability of top_p are considered for sampling, providing a way to
- * fine-tune the randomness of predictions.
- */
- top_p?: number;
+ export interface Refusal {
+ token: string;
+
+ logprob: number;
+
+ top_logprobs: Array;
+
+ bytes?: Array;
+ }
+
+ export namespace Refusal {
+ /**
+ * An array of mappings for each token to its top log probabilities, showing
+ * detailed prediction probabilities.
+ */
+ export interface TopLogprob {
+ token: string;
+
+ logprob: number;
+
+ bytes?: Array;
+ }
+ }
+ }
+ }
+}
+
+export interface StreamingData {
+ value: string;
}
export type CompletionCreateParams = CompletionCreateParamsNonStreaming | CompletionCreateParamsStreaming;
@@ -200,8 +201,7 @@ export interface CompletionCreateParamsStreaming extends CompletionCreateParamsB
export declare namespace Completions {
export {
type Completion as Completion,
- type CompletionChunk as CompletionChunk,
- type CompletionParams as CompletionParams,
+ type StreamingData as StreamingData,
type CompletionCreateParams as CompletionCreateParams,
type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming,
type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming,
diff --git a/src/resources/graphs.ts b/src/resources/graphs.ts
index 1e64d858..30507aca 100644
--- a/src/resources/graphs.ts
+++ b/src/resources/graphs.ts
@@ -6,7 +6,6 @@ import { APIPromise } from '../core';
import * as Core from '../core';
import * as GraphsAPI from './graphs';
import * as FilesAPI from './files';
-import * as Shared from './shared';
import { CursorPage, type CursorPageParams } from '../pagination';
import { Stream } from '../streaming';
@@ -163,12 +162,24 @@ export interface Question {
*/
question: string;
- sources: Array;
+ sources: Array;
subqueries?: Array;
}
export namespace Question {
+ export interface Source {
+ /**
+ * The unique identifier of the file.
+ */
+ file_id: string;
+
+ /**
+ * A snippet of text from the source file.
+ */
+ snippet: string;
+ }
+
export interface Subquery {
/**
* The answer to the subquery.
@@ -180,7 +191,21 @@ export namespace Question {
*/
query: string;
- sources: Array;
+ sources: Array;
+ }
+
+ export namespace Subquery {
+ export interface Source {
+ /**
+ * The unique identifier of the file.
+ */
+ file_id: string;
+
+ /**
+ * A snippet of text from the source file.
+ */
+ snippet: string;
+ }
}
}
diff --git a/src/resources/index.ts b/src/resources/index.ts
index 9bae7805..4efc7069 100644
--- a/src/resources/index.ts
+++ b/src/resources/index.ts
@@ -1,19 +1,14 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-export * from './shared';
export {
Applications,
type ApplicationGenerateContentResponse,
type ApplicationGenerateContentParams,
} from './applications';
export {
- Chat,
- type ChatCompletion,
- type ChatCompletionChoice,
+ ChatResource,
+ type Chat,
type ChatCompletionChunk,
- type ChatCompletionMessage,
- type ChatCompletionParams,
- type ChatCompletionUsage,
type ChatChatParams,
type ChatChatParamsNonStreaming,
type ChatChatParamsStreaming,
@@ -21,8 +16,7 @@ export {
export {
Completions,
type Completion,
- type CompletionChunk,
- type CompletionParams,
+ type StreamingData,
type CompletionCreateParams,
type CompletionCreateParamsNonStreaming,
type CompletionCreateParamsStreaming,
diff --git a/src/resources/shared.ts b/src/resources/shared.ts
deleted file mode 100644
index 21777c89..00000000
--- a/src/resources/shared.ts
+++ /dev/null
@@ -1,186 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import * as Shared from './shared';
-
-export interface ErrorMessage {
- description: string;
-
- extras: Record;
-
- key: string;
-}
-
-export interface ErrorObject {
- errors: Array;
-
- extras: Record;
-
- tpe: string;
-}
-
-export interface FunctionDefinition {
- /**
- * Name of the function
- */
- name: string;
-
- /**
- * Description of the function
- */
- description?: string;
-
- parameters?: FunctionParams;
-}
-
-export type FunctionParams = Record;
-
-export interface GraphData {
- sources?: Array;
-
- status?: 'processing' | 'finished';
-
- subqueries?: Array;
-}
-
-export namespace GraphData {
- export interface Subquery {
- /**
- * The answer to the subquery.
- */
- answer: string;
-
- /**
- * The subquery that was asked.
- */
- query: string;
-
- sources: Array;
- }
-}
-
-export interface Logprobs {
- content: Array | null;
-
- refusal: Array | null;
-}
-
-export interface LogprobsToken {
- token: string;
-
- logprob: number;
-
- top_logprobs: Array;
-
- bytes?: Array;
-}
-
-export namespace LogprobsToken {
- /**
- * An array of mappings for each token to its top log probabilities, showing
- * detailed prediction probabilities.
- */
- export interface TopLogprob {
- token: string;
-
- logprob: number;
-
- bytes?: Array;
- }
-}
-
-export interface Source {
- /**
- * The unique identifier of the file.
- */
- file_id: string;
-
- /**
- * A snippet of text from the source file.
- */
- snippet: string;
-}
-
-export interface ToolCall {
- id: string;
-
- function: ToolCall.Function;
-
- type: string;
-
- index?: number;
-}
-
-export namespace ToolCall {
- export interface Function {
- arguments: string;
-
- name?: string;
- }
-}
-
-export interface ToolCallStreaming {
- index: number;
-
- id?: string;
-
- function?: ToolCallStreaming.Function;
-
- type?: string;
-}
-
-export namespace ToolCallStreaming {
- export interface Function {
- arguments: string;
-
- name?: string;
- }
-}
-
-export interface ToolChoiceJsonObject {
- value: Record;
-}
-
-export interface ToolChoiceString {
- value: 'none' | 'auto' | 'required';
-}
-
-export type ToolParam = ToolParam.FunctionTool | ToolParam.GraphTool;
-
-export namespace ToolParam {
- export interface FunctionTool {
- function: Shared.FunctionDefinition;
-
- /**
- * The type of tool.
- */
- type: 'function';
- }
-
- export interface GraphTool {
- function: GraphTool.Function;
-
- /**
- * The type of tool.
- */
- type: 'graph';
- }
-
- export namespace GraphTool {
- export interface Function {
- /**
- * An array of graph IDs to be used in the tool.
- */
- graph_ids: Array;
-
- /**
- * Boolean to indicate whether to include subqueries in the response.
- */
- subqueries: boolean;
-
- /**
- * A description of the graph content.
- */
- description?: string;
- }
- }
-}
diff --git a/src/streaming.ts b/src/streaming.ts
index c71a75c1..84450322 100644
--- a/src/streaming.ts
+++ b/src/streaming.ts
@@ -34,13 +34,6 @@ export class Stream- implements AsyncIterable
- {
let done = false;
try {
for await (const sse of _iterSSEMessages(response, controller)) {
- if (done) continue;
-
- if (sse.data.startsWith('[DONE]')) {
- done = true;
- continue;
- }
-
if (sse.event === null) {
try {
yield JSON.parse(sse.data);