Skip to content

Commit

Permalink
fix!: remove anthropic-beta and x-api-key headers from param types (#243
Browse files Browse the repository at this point in the history
)

This fixes a bug where the api key header would be removed unintentionally.
# Migration
These headers should not be passed as params; the SDK adds them for you.
- If you were previously passing `'anthropic-beta': 'messages-2023-12-15'`,
  remove it.
- If you were previously passing `'x-api-key': myKey`,
  pass it to the client as `apiKey` or as an environment variable instead.
If you really need to pass these headers in a specific call,
pass them as a header option.
  • Loading branch information
stainless-bot committed Dec 21, 2023
1 parent eb12705 commit 60f67ae
Show file tree
Hide file tree
Showing 5 changed files with 36 additions and 74 deletions.
4 changes: 2 additions & 2 deletions src/lib/MessageStream.ts
Expand Up @@ -95,7 +95,7 @@ export class MessageStream implements AsyncIterable<MessageStreamEvent> {
runner._run(() =>
runner._createMessage(
messages,
{ ...params, stream: true, 'anthropic-beta': 'messages-2023-12-15' },
{ ...params, stream: true },
{ ...options, headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' } },
),
);
Expand Down Expand Up @@ -132,7 +132,7 @@ export class MessageStream implements AsyncIterable<MessageStreamEvent> {
}
this.#beginRequest();
const stream = await messages.create(
{ ...params, stream: true, 'anthropic-beta': 'messages-2023-12-15' },
{ ...params, stream: true },
{ ...options, signal: this.controller.signal },
);
this._connected();
Expand Down
60 changes: 18 additions & 42 deletions src/resources/beta/messages.ts
Expand Up @@ -14,31 +14,25 @@ export class Messages extends APIResource {
*
* The Messages API is currently in beta.
*/
create(params: MessageCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise<Message>;
create(body: MessageCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise<Message>;
create(
params: MessageCreateParamsStreaming,
body: MessageCreateParamsStreaming,
options?: Core.RequestOptions,
): APIPromise<Stream<MessageStreamEvent>>;
create(
params: MessageCreateParamsBase,
body: MessageCreateParamsBase,
options?: Core.RequestOptions,
): APIPromise<Stream<MessageStreamEvent> | Message>;
create(
params: MessageCreateParams,
body: MessageCreateParams,
options?: Core.RequestOptions,
): APIPromise<Message> | APIPromise<Stream<MessageStreamEvent>> {
const { 'anthropic-beta': anthropicBeta, 'x-api-key': xAPIKey, ...body } = params;
return this._client.post('/v1/messages', {
body,
timeout: 600000,
...options,
headers: {
'Anthropic-Beta': 'messages-2023-12-15',
'anthropic-beta': anthropicBeta,
'x-api-key': xAPIKey || '',
...options?.headers,
},
stream: params.stream ?? false,
headers: { 'Anthropic-Beta': 'messages-2023-12-15', ...options?.headers },
stream: body.stream ?? false,
}) as APIPromise<Message> | APIPromise<Stream<MessageStreamEvent>>;
}

Expand Down Expand Up @@ -218,7 +212,7 @@ export type MessageCreateParams = MessageCreateParamsNonStreaming | MessageCreat

export interface MessageCreateParamsBase {
/**
* Body param: The maximum number of tokens to generate before stopping.
* The maximum number of tokens to generate before stopping.
*
* Note that our models may stop _before_ reaching this maximum. This parameter
* only specifies the absolute maximum number of tokens to generate.
Expand All @@ -230,7 +224,7 @@ export interface MessageCreateParamsBase {
max_tokens: number;

/**
* Body param: Input messages.
* Input messages.
*
* Our models are trained to operate on alternating `user` and `assistant`
* conversational turns. When creating a new `Message`, you specify the prior
Expand Down Expand Up @@ -293,7 +287,7 @@ export interface MessageCreateParamsBase {
messages: Array<MessageParam>;

/**
* Body param: The model that will complete your prompt.
* The model that will complete your prompt.
*
* As we improve Claude, we develop new versions of it that you can query. The
* `model` parameter controls which version of Claude responds to your request.
Expand All @@ -307,22 +301,12 @@ export interface MessageCreateParamsBase {
model: string;

/**
* Header param: Beta version header.
*
* During the beta, the Messages API requires sending the
* `anthropic-beta: messages-2023-12-15` header. This is a beta-specific header
* that is required in addition to the normal `anthropic-version` header. If you
* are using our SDKs, both of these headers will be sent automatically.
*/
'anthropic-beta': string;

/**
* Body param: An object describing metadata about the request.
* An object describing metadata about the request.
*/
metadata?: MessageCreateParams.Metadata;

/**
* Body param: Custom text sequences that will cause the model to stop generating.
* Custom text sequences that will cause the model to stop generating.
*
* Our models will normally stop when they have naturally completed their turn,
* which will result in a response `stop_reason` of `"end_turn"`.
Expand All @@ -335,16 +319,15 @@ export interface MessageCreateParamsBase {
stop_sequences?: Array<string>;

/**
* Body param: Whether to incrementally stream the response using server-sent
* events.
* Whether to incrementally stream the response using server-sent events.
*
* See [streaming](https://docs.anthropic.com/claude/reference/streaming) for
* details.
*/
stream?: boolean;

/**
* Body param: System prompt.
* System prompt.
*
* A system prompt is a way of providing context and instructions to Claude, such
* as specifying a particular goal or role. See our
Expand All @@ -353,35 +336,30 @@ export interface MessageCreateParamsBase {
system?: string;

/**
* Body param: Amount of randomness injected into the response.
* Amount of randomness injected into the response.
*
* Defaults to 1. Ranges from 0 to 1. Use temp closer to 0 for analytical /
* multiple choice, and closer to 1 for creative and generative tasks.
*/
temperature?: number;

/**
* Body param: Only sample from the top K options for each subsequent token.
* Only sample from the top K options for each subsequent token.
*
* Used to remove "long tail" low probability responses.
* [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
*/
top_k?: number;

/**
* Body param: Use nucleus sampling.
* Use nucleus sampling.
*
* In nucleus sampling, we compute the cumulative distribution over all the options
* for each subsequent token in decreasing probability order and cut it off once it
* reaches a particular probability specified by `top_p`. You should either alter
* `temperature` or `top_p`, but not both.
*/
top_p?: number;

/**
* Header param:
*/
'x-api-key'?: string;
}

export namespace MessageCreateParams {
Expand All @@ -405,8 +383,7 @@ export namespace MessageCreateParams {

export interface MessageCreateParamsNonStreaming extends MessageCreateParamsBase {
/**
* Body param: Whether to incrementally stream the response using server-sent
* events.
* Whether to incrementally stream the response using server-sent events.
*
* See [streaming](https://docs.anthropic.com/claude/reference/streaming) for
* details.
Expand All @@ -416,8 +393,7 @@ export interface MessageCreateParamsNonStreaming extends MessageCreateParamsBase

export interface MessageCreateParamsStreaming extends MessageCreateParamsBase {
/**
* Body param: Whether to incrementally stream the response using server-sent
* events.
* Whether to incrementally stream the response using server-sent events.
*
* See [streaming](https://docs.anthropic.com/claude/reference/streaming) for
* details.
Expand Down
42 changes: 16 additions & 26 deletions src/resources/completions.ts
Expand Up @@ -10,26 +10,24 @@ export class Completions extends APIResource {
/**
* Create a Completion
*/
create(params: CompletionCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise<Completion>;
create(body: CompletionCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise<Completion>;
create(
params: CompletionCreateParamsStreaming,
body: CompletionCreateParamsStreaming,
options?: Core.RequestOptions,
): APIPromise<Stream<Completion>>;
create(
params: CompletionCreateParamsBase,
body: CompletionCreateParamsBase,
options?: Core.RequestOptions,
): APIPromise<Stream<Completion> | Completion>;
create(
params: CompletionCreateParams,
body: CompletionCreateParams,
options?: Core.RequestOptions,
): APIPromise<Completion> | APIPromise<Stream<Completion>> {
const { 'x-api-key': xAPIKey, ...body } = params;
return this._client.post('/v1/complete', {
body,
timeout: 600000,
...options,
headers: { 'x-api-key': xAPIKey || '', ...options?.headers },
stream: params.stream ?? false,
stream: body.stream ?? false,
}) as APIPromise<Completion> | APIPromise<Stream<Completion>>;
}
}
Expand Down Expand Up @@ -70,15 +68,15 @@ export type CompletionCreateParams = CompletionCreateParamsNonStreaming | Comple

export interface CompletionCreateParamsBase {
/**
* Body param: The maximum number of tokens to generate before stopping.
* The maximum number of tokens to generate before stopping.
*
* Note that our models may stop _before_ reaching this maximum. This parameter
* only specifies the absolute maximum number of tokens to generate.
*/
max_tokens_to_sample: number;

/**
* Body param: The model that will complete your prompt.
* The model that will complete your prompt.
*
* As we improve Claude, we develop new versions of it that you can query. The
* `model` parameter controls which version of Claude responds to your request.
Expand All @@ -92,7 +90,7 @@ export interface CompletionCreateParamsBase {
model: (string & {}) | 'claude-2.1' | 'claude-instant-1';

/**
* Body param: The prompt that you want Claude to complete.
* The prompt that you want Claude to complete.
*
* For proper response generation you will need to format your prompt using
* alternating `\n\nHuman:` and `\n\nAssistant:` conversational turns. For example:
Expand All @@ -110,12 +108,12 @@ export interface CompletionCreateParamsBase {
prompt: string;

/**
* Body param: An object describing metadata about the request.
* An object describing metadata about the request.
*/
metadata?: CompletionCreateParams.Metadata;

/**
* Body param: Sequences that will cause the model to stop generating.
* Sequences that will cause the model to stop generating.
*
* Our models stop on `"\n\nHuman:"`, and may include additional built-in stop
* sequences in the future. By providing the stop_sequences parameter, you may
Expand All @@ -124,44 +122,38 @@ export interface CompletionCreateParamsBase {
stop_sequences?: Array<string>;

/**
* Body param: Whether to incrementally stream the response using server-sent
* events.
* Whether to incrementally stream the response using server-sent events.
*
* See [streaming](https://docs.anthropic.com/claude/reference/streaming) for
* details.
*/
stream?: boolean;

/**
* Body param: Amount of randomness injected into the response.
* Amount of randomness injected into the response.
*
* Defaults to 1. Ranges from 0 to 1. Use temp closer to 0 for analytical /
* multiple choice, and closer to 1 for creative and generative tasks.
*/
temperature?: number;

/**
* Body param: Only sample from the top K options for each subsequent token.
* Only sample from the top K options for each subsequent token.
*
* Used to remove "long tail" low probability responses.
* [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
*/
top_k?: number;

/**
* Body param: Use nucleus sampling.
* Use nucleus sampling.
*
* In nucleus sampling, we compute the cumulative distribution over all the options
* for each subsequent token in decreasing probability order and cut it off once it
* reaches a particular probability specified by `top_p`. You should either alter
* `temperature` or `top_p`, but not both.
*/
top_p?: number;

/**
* Header param:
*/
'x-api-key'?: string;
}

export namespace CompletionCreateParams {
Expand All @@ -185,8 +177,7 @@ export namespace CompletionCreateParams {

export interface CompletionCreateParamsNonStreaming extends CompletionCreateParamsBase {
/**
* Body param: Whether to incrementally stream the response using server-sent
* events.
* Whether to incrementally stream the response using server-sent events.
*
* See [streaming](https://docs.anthropic.com/claude/reference/streaming) for
* details.
Expand All @@ -196,8 +187,7 @@ export interface CompletionCreateParamsNonStreaming extends CompletionCreatePara

export interface CompletionCreateParamsStreaming extends CompletionCreateParamsBase {
/**
* Body param: Whether to incrementally stream the response using server-sent
* events.
* Whether to incrementally stream the response using server-sent events.
*
* See [streaming](https://docs.anthropic.com/claude/reference/streaming) for
* details.
Expand Down
3 changes: 0 additions & 3 deletions tests/api-resources/beta/messages.test.ts
Expand Up @@ -14,7 +14,6 @@ describe('resource messages', () => {
max_tokens: 1024,
messages: [{ role: 'user', content: 'In one sentence, what is good about the color blue?' }],
model: 'claude-2.1',
'anthropic-beta': 'messages-2023-12-15',
});
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
Expand All @@ -30,15 +29,13 @@ describe('resource messages', () => {
max_tokens: 1024,
messages: [{ role: 'user', content: 'In one sentence, what is good about the color blue?' }],
model: 'claude-2.1',
'anthropic-beta': 'messages-2023-12-15',
metadata: { user_id: '13803d75-b4b5-4c3e-b2a2-6f21399b021b' },
stop_sequences: ['string', 'string', 'string'],
stream: false,
system: "Today's date is 2024-01-01.",
temperature: 1,
top_k: 5,
top_p: 0.7,
'x-api-key': 'string',
});
});
});
1 change: 0 additions & 1 deletion tests/api-resources/completions.test.ts
Expand Up @@ -35,7 +35,6 @@ describe('resource completions', () => {
temperature: 1,
top_k: 5,
top_p: 0.7,
'x-api-key': 'string',
});
});
});

0 comments on commit 60f67ae

Please sign in to comment.