Skip to content

Commit

Permalink
feat(types): improve streaming params types (#102)
Browse files Browse the repository at this point in the history
  • Loading branch information
stainless-bot authored and rattrayalex committed Aug 9, 2023
1 parent f4cae3f commit cdf808c
Show file tree
Hide file tree
Showing 3 changed files with 127 additions and 197 deletions.
2 changes: 2 additions & 0 deletions src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,8 @@ export namespace Anthropic {
export import Completions = API.Completions;
export import Completion = API.Completion;
export import CompletionCreateParams = API.CompletionCreateParams;
export import CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming;
export import CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming;
}

export default Anthropic;
314 changes: 118 additions & 196 deletions src/resources/completions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,17 @@ export class Completions extends APIResource {
* Create a completion
*/
create(
body: CompletionCreateParams.CompletionRequestNonStreaming,
body: CompletionCreateParamsNonStreaming,
options?: Core.RequestOptions,
): Promise<Core.APIResponse<Completion>>;
create(
body: CompletionCreateParams.CompletionRequestStreaming,
body: CompletionCreateParamsStreaming,
options?: Core.RequestOptions,
): Promise<Core.APIResponse<Stream<Completion>>>;
create(
body: CompletionCreateParams,
options?: Core.RequestOptions,
): Promise<Core.APIResponse<Completion | Stream<Completion>>>;
create(
body: CompletionCreateParams,
options?: Core.RequestOptions,
Expand Down Expand Up @@ -48,219 +52,137 @@ export interface Completion {
stop_reason: string;
}

export type CompletionCreateParams =
| CompletionCreateParams.CompletionRequestNonStreaming
| CompletionCreateParams.CompletionRequestStreaming;
export interface CompletionCreateParams {
/**
* The maximum number of tokens to generate before stopping.
*
* Note that our models may stop _before_ reaching this maximum. This parameter
* only specifies the absolute maximum number of tokens to generate.
*/
max_tokens_to_sample: number;

export namespace CompletionCreateParams {
export interface CompletionRequestNonStreaming {
/**
* The maximum number of tokens to generate before stopping.
*
* Note that our models may stop _before_ reaching this maximum. This parameter
* only specifies the absolute maximum number of tokens to generate.
*/
max_tokens_to_sample: number;
/**
* The model that will complete your prompt.
*
* As we improve Claude, we develop new versions of it that you can query. This
* parameter controls which version of Claude answers your request. Right now we
* are offering two model families: Claude, and Claude Instant. You can use them by
* setting `model` to `"claude-2"` or `"claude-instant-1"`, respectively. See
* [models](https://docs.anthropic.com/claude/reference/selecting-a-model) for
* additional details.
*/
model: (string & {}) | 'claude-2' | 'claude-instant-1';

/**
* The model that will complete your prompt.
*
* As we improve Claude, we develop new versions of it that you can query. This
* parameter controls which version of Claude answers your request. Right now we
* are offering two model families: Claude, and Claude Instant. You can use them by
* setting `model` to `"claude-2"` or `"claude-instant-1"`, respectively. See
* [models](https://docs.anthropic.com/claude/reference/selecting-a-model) for
* additional details.
*/
model: (string & {}) | 'claude-2' | 'claude-instant-1';
/**
* The prompt that you want Claude to complete.
*
* For proper response generation you will need to format your prompt as follows:
*
* ```javascript
* const userQuestion = r"Why is the sky blue?";
* const prompt = `\n\nHuman: ${userQuestion}\n\nAssistant:`;
* ```
*
* See our
* [comments on prompts](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
* for more context.
*/
prompt: string;

/**
* The prompt that you want Claude to complete.
*
* For proper response generation you will need to format your prompt as follows:
*
* ```javascript
* const userQuestion = r"Why is the sky blue?";
* const prompt = `\n\nHuman: ${userQuestion}\n\nAssistant:`;
* ```
*
* See our
* [comments on prompts](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
* for more context.
*/
prompt: string;
/**
* An object describing metadata about the request.
*/
metadata?: CompletionCreateParams.Metadata;

/**
* An object describing metadata about the request.
*/
metadata?: CompletionCreateParams.CompletionRequestNonStreaming.Metadata;
/**
* Sequences that will cause the model to stop generating completion text.
*
* Our models stop on `"\n\nHuman:"`, and may include additional built-in stop
* sequences in the future. By providing the stop_sequences parameter, you may
* include additional strings that will cause the model to stop generating.
*/
stop_sequences?: Array<string>;

/**
* Sequences that will cause the model to stop generating completion text.
*
* Our models stop on `"\n\nHuman:"`, and may include additional built-in stop
* sequences in the future. By providing the stop_sequences parameter, you may
* include additional strings that will cause the model to stop generating.
*/
stop_sequences?: Array<string>;
/**
* Whether to incrementally stream the response using server-sent events.
*
* See
* [this guide to SSE events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events)
* for details.
*/
stream?: boolean;

/**
* Whether to incrementally stream the response using server-sent events.
*
* See
* [this guide to SSE events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events)
* for details.
*/
stream?: false;
/**
* Amount of randomness injected into the response.
*
* Defaults to 1. Ranges from 0 to 1. Use temp closer to 0 for analytical /
* multiple choice, and closer to 1 for creative and generative tasks.
*/
temperature?: number;

/**
* Amount of randomness injected into the response.
*
* Defaults to 1. Ranges from 0 to 1. Use temp closer to 0 for analytical /
* multiple choice, and closer to 1 for creative and generative tasks.
*/
temperature?: number;
/**
* Only sample from the top K options for each subsequent token.
*
* Used to remove "long tail" low probability responses.
* [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
*/
top_k?: number;

/**
* Only sample from the top K options for each subsequent token.
*
* Used to remove "long tail" low probability responses.
* [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
*/
top_k?: number;
/**
* Use nucleus sampling.
*
* In nucleus sampling, we compute the cumulative distribution over all the options
* for each subsequent token in decreasing probability order and cut it off once it
* reaches a particular probability specified by `top_p`. You should either alter
* `temperature` or `top_p`, but not both.
*/
top_p?: number;
}

export namespace CompletionCreateParams {
/**
* An object describing metadata about the request.
*/
export interface Metadata {
/**
* Use nucleus sampling.
* An external identifier for the user who is associated with the request.
*
* In nucleus sampling, we compute the cumulative distribution over all the options
* for each subsequent token in decreasing probability order and cut it off once it
* reaches a particular probability specified by `top_p`. You should either alter
* `temperature` or `top_p`, but not both.
*/
top_p?: number;
}

export namespace CompletionRequestNonStreaming {
/**
* An object describing metadata about the request.
* This should be a uuid, hash value, or other opaque identifier. Anthropic may use
* this id to help detect abuse. Do not include any identifying information such as
* name, email address, or phone number.
*/
export interface Metadata {
/**
* An external identifier for the user who is associated with the request.
*
* This should be a uuid, hash value, or other opaque identifier. Anthropic may use
* this id to help detect abuse. Do not include any identifying information such as
* name, email address, or phone number.
*/
user_id?: string;
}
user_id?: string;
}

export interface CompletionRequestStreaming {
/**
* The maximum number of tokens to generate before stopping.
*
* Note that our models may stop _before_ reaching this maximum. This parameter
* only specifies the absolute maximum number of tokens to generate.
*/
max_tokens_to_sample: number;

/**
* The model that will complete your prompt.
*
* As we improve Claude, we develop new versions of it that you can query. This
* parameter controls which version of Claude answers your request. Right now we
* are offering two model families: Claude, and Claude Instant. You can use them by
* setting `model` to `"claude-2"` or `"claude-instant-1"`, respectively. See
* [models](https://docs.anthropic.com/claude/reference/selecting-a-model) for
* additional details.
*/
model: (string & {}) | 'claude-2' | 'claude-instant-1';

/**
* The prompt that you want Claude to complete.
*
* For proper response generation you will need to format your prompt as follows:
*
* ```javascript
* const userQuestion = r"Why is the sky blue?";
* const prompt = `\n\nHuman: ${userQuestion}\n\nAssistant:`;
* ```
*
* See our
* [comments on prompts](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
* for more context.
*/
prompt: string;

/**
* Whether to incrementally stream the response using server-sent events.
*
* See
* [this guide to SSE events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events)
* for details.
*/
stream: true;

/**
* An object describing metadata about the request.
*/
metadata?: CompletionCreateParams.CompletionRequestStreaming.Metadata;

/**
* Sequences that will cause the model to stop generating completion text.
*
* Our models stop on `"\n\nHuman:"`, and may include additional built-in stop
* sequences in the future. By providing the stop_sequences parameter, you may
* include additional strings that will cause the model to stop generating.
*/
stop_sequences?: Array<string>;

/**
* Amount of randomness injected into the response.
*
* Defaults to 1. Ranges from 0 to 1. Use temp closer to 0 for analytical /
* multiple choice, and closer to 1 for creative and generative tasks.
*/
temperature?: number;

/**
* Only sample from the top K options for each subsequent token.
*
* Used to remove "long tail" low probability responses.
* [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
*/
top_k?: number;
export type CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming;
export type CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming;
}

/**
* Use nucleus sampling.
*
* In nucleus sampling, we compute the cumulative distribution over all the options
* for each subsequent token in decreasing probability order and cut it off once it
* reaches a particular probability specified by `top_p`. You should either alter
* `temperature` or `top_p`, but not both.
*/
top_p?: number;
}
export interface CompletionCreateParamsNonStreaming extends CompletionCreateParams {
/**
* Whether to incrementally stream the response using server-sent events.
*
* See
* [this guide to SSE events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events)
* for details.
*/
stream?: false;
}

export namespace CompletionRequestStreaming {
/**
* An object describing metadata about the request.
*/
export interface Metadata {
/**
* An external identifier for the user who is associated with the request.
*
* This should be a uuid, hash value, or other opaque identifier. Anthropic may use
* this id to help detect abuse. Do not include any identifying information such as
* name, email address, or phone number.
*/
user_id?: string;
}
}
export interface CompletionCreateParamsStreaming extends CompletionCreateParams {
/**
* Whether to incrementally stream the response using server-sent events.
*
* See
* [this guide to SSE events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events)
* for details.
*/
stream: true;
}

export namespace Completions {
export import Completion = API.Completion;
export import CompletionCreateParams = API.CompletionCreateParams;
export import CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming;
export import CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming;
}
8 changes: 7 additions & 1 deletion src/resources/index.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,10 @@
// File generated from our OpenAPI spec by Stainless.

export {} from './top-level';
export { Completion, CompletionCreateParams, Completions } from './completions';
export {
Completion,
CompletionCreateParams,
CompletionCreateParamsNonStreaming,
CompletionCreateParamsStreaming,
Completions,
} from './completions';

0 comments on commit cdf808c

Please sign in to comment.