Skip to content

Commit f654e3b

Browse files
SDK regeneration (#205)
Co-authored-by: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
1 parent 1c8de5f commit f654e3b

19 files changed

+845
-811
lines changed

package.json

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,8 @@
2323
"@aws-sdk/client-sagemaker": "^3.583.0",
2424
"@aws-sdk/credential-providers": "^3.583.0",
2525
"@aws-sdk/protocol-http": "^3.374.0",
26-
"@aws-sdk/signature-v4": "^3.374.0"
26+
"@aws-sdk/signature-v4": "^3.374.0",
27+
"convict": "^6.2.4"
2728
},
2829
"devDependencies": {
2930
"@types/url-join": "4.0.1",
@@ -39,7 +40,8 @@
3940
"typescript": "4.6.4",
4041
"@types/readable-stream": "^4.0.14",
4142
"ts-loader": "^9.5.1",
42-
"webpack": "^5.91.0"
43+
"webpack": "^5.91.0",
44+
"@types/convict": "^6.1.6"
4345
},
4446
"browser": {
4547
"fs": false,

reference.md

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,9 @@ await client.checkApiKey();
7171
<dl>
7272
<dd>
7373

74-
Generates a message from the model in response to a provided conversation. To learn how to use the Chat API with Streaming and RAG follow our Text Generation guides.
74+
Generates a message from the model in response to a provided conversation. To learn more about the features of the Chat API follow our [Text Generation guides](https://docs.cohere.com/v2/docs/chat-api).
75+
76+
Follow the [Migration Guide](https://docs.cohere.com/v2/docs/migrating-v1-to-v2) for instructions on moving from API v1 to API v2.
7577

7678
</dd>
7779
</dl>
@@ -173,7 +175,9 @@ await client.v2.chatStream({
173175
<dl>
174176
<dd>
175177

176-
Generates a message from the model in response to a provided conversation. To learn how to use the Chat API with Streaming and RAG follow our Text Generation guides.
178+
Generates a message from the model in response to a provided conversation. To learn more about the features of the Chat API follow our [Text Generation guides](https://docs.cohere.com/v2/docs/chat-api).
179+
180+
Follow the [Migration Guide](https://docs.cohere.com/v2/docs/migrating-v1-to-v2) for instructions on moving from API v1 to API v2.
177181

178182
</dd>
179183
</dl>
@@ -266,6 +270,8 @@ If you want to learn more how to use the embedding model, have a look at the [Se
266270
```typescript
267271
await client.v2.embed({
268272
model: "model",
273+
inputType: Cohere.EmbedInputType.SearchDocument,
274+
embeddingTypes: [Cohere.EmbeddingType.Float],
269275
});
270276
```
271277

src/api/resources/v2/client/Client.ts

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,9 @@ export class V2 {
3535
constructor(protected readonly _options: V2.Options = {}) {}
3636

3737
/**
38-
* Generates a message from the model in response to a provided conversation. To learn how to use the Chat API with Streaming and RAG follow our Text Generation guides.
38+
* Generates a message from the model in response to a provided conversation. To learn more about the features of the Chat API follow our [Text Generation guides](https://docs.cohere.com/v2/docs/chat-api).
39+
*
40+
* Follow the [Migration Guide](https://docs.cohere.com/v2/docs/migrating-v1-to-v2) for instructions on moving from API v1 to API v2.
3941
*/
4042
public async chatStream(
4143
request: Cohere.V2ChatStreamRequest,
@@ -183,7 +185,9 @@ export class V2 {
183185
}
184186

185187
/**
186-
* Generates a message from the model in response to a provided conversation. To learn how to use the Chat API with Streaming and RAG follow our Text Generation guides.
188+
* Generates a message from the model in response to a provided conversation. To learn more about the features of the Chat API follow our [Text Generation guides](https://docs.cohere.com/v2/docs/chat-api).
189+
*
190+
* Follow the [Migration Guide](https://docs.cohere.com/v2/docs/migrating-v1-to-v2) for instructions on moving from API v1 to API v2.
187191
*
188192
* @param {Cohere.V2ChatRequest} request
189193
* @param {V2.RequestOptions} requestOptions - Request-specific configuration.
@@ -364,7 +368,9 @@ export class V2 {
364368
*
365369
* @example
366370
* await client.v2.embed({
367-
* model: "model"
371+
* model: "model",
372+
* inputType: Cohere.EmbedInputType.SearchDocument,
373+
* embeddingTypes: [Cohere.EmbeddingType.Float]
368374
* })
369375
*/
370376
public async embed(

src/api/resources/v2/client/requests/V2ChatRequest.ts

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ import * as Cohere from "../../../../index";
1515
* }
1616
*/
1717
export interface V2ChatRequest {
18-
/** The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) (such as command-r or command-r-plus) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model. */
18+
/** The name of a compatible [Cohere model](https://docs.cohere.com/v2/docs/models) (such as command-r or command-r-plus) or the ID of a [fine-tuned](https://docs.cohere.com/v2/docs/chat-fine-tuning) model. */
1919
model: string;
2020
messages: Cohere.ChatMessages;
2121
/**
@@ -33,19 +33,19 @@ export interface V2ChatRequest {
3333
citationOptions?: Cohere.CitationOptions;
3434
responseFormat?: Cohere.ResponseFormatV2;
3535
/**
36-
* Used to select the [safety instruction](/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
36+
* Used to select the [safety instruction](https://docs.cohere.com/v2/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
3737
* When `OFF` is specified, the safety instruction will be omitted.
3838
*
3939
* Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.
4040
*
41-
* **Note**: This parameter is only compatible with models [Command R 08-2024](/docs/command-r#august-2024-release), [Command R+ 08-2024](/docs/command-r-plus#august-2024-release) and newer.
42-
*
43-
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
41+
* **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/v2/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/v2/docs/command-r-plus#august-2024-release) and newer.
4442
*
4543
*/
4644
safetyMode?: Cohere.V2ChatRequestSafetyMode;
4745
/**
48-
* The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations.
46+
* The maximum number of tokens the model will generate as part of the response.
47+
*
48+
* **Note**: Setting a low value may result in incomplete generations.
4949
*
5050
*/
5151
maxTokens?: number;

src/api/resources/v2/client/requests/V2ChatStreamRequest.ts

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ import * as Cohere from "../../../../index";
4444
* }
4545
*/
4646
export interface V2ChatStreamRequest {
47-
/** The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) (such as command-r or command-r-plus) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model. */
47+
/** The name of a compatible [Cohere model](https://docs.cohere.com/v2/docs/models) (such as command-r or command-r-plus) or the ID of a [fine-tuned](https://docs.cohere.com/v2/docs/chat-fine-tuning) model. */
4848
model: string;
4949
messages: Cohere.ChatMessages;
5050
/**
@@ -62,19 +62,19 @@ export interface V2ChatStreamRequest {
6262
citationOptions?: Cohere.CitationOptions;
6363
responseFormat?: Cohere.ResponseFormatV2;
6464
/**
65-
* Used to select the [safety instruction](/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
65+
* Used to select the [safety instruction](https://docs.cohere.com/v2/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
6666
* When `OFF` is specified, the safety instruction will be omitted.
6767
*
6868
* Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.
6969
*
70-
* **Note**: This parameter is only compatible with models [Command R 08-2024](/docs/command-r#august-2024-release), [Command R+ 08-2024](/docs/command-r-plus#august-2024-release) and newer.
71-
*
72-
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
70+
* **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/v2/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/v2/docs/command-r-plus#august-2024-release) and newer.
7371
*
7472
*/
7573
safetyMode?: Cohere.V2ChatStreamRequestSafetyMode;
7674
/**
77-
* The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations.
75+
* The maximum number of tokens the model will generate as part of the response.
76+
*
77+
* **Note**: Setting a low value may result in incomplete generations.
7878
*
7979
*/
8080
maxTokens?: number;

src/api/resources/v2/client/requests/V2EmbedRequest.ts

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,9 @@ import * as Cohere from "../../../../index";
77
/**
88
* @example
99
* {
10-
* model: "model"
10+
* model: "model",
11+
* inputType: Cohere.EmbedInputType.SearchDocument,
12+
* embeddingTypes: [Cohere.EmbeddingType.Float]
1113
* }
1214
*/
1315
export interface V2EmbedRequest {
@@ -36,7 +38,7 @@ export interface V2EmbedRequest {
3638
* * `embed-multilingual-v2.0` 768
3739
*/
3840
model: string;
39-
inputType?: Cohere.EmbedInputType;
41+
inputType: Cohere.EmbedInputType;
4042
/**
4143
* Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types.
4244
*
@@ -46,7 +48,7 @@ export interface V2EmbedRequest {
4648
* * `"binary"`: Use this when you want to get back signed binary embeddings. Valid for only v3 models.
4749
* * `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models.
4850
*/
49-
embeddingTypes?: Cohere.EmbeddingType[];
51+
embeddingTypes: Cohere.EmbeddingType[];
5052
/**
5153
* One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.
5254
*

src/api/resources/v2/types/V2ChatRequestSafetyMode.ts

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,12 @@
33
*/
44

55
/**
6-
* Used to select the [safety instruction](/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
6+
* Used to select the [safety instruction](https://docs.cohere.com/v2/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
77
* When `OFF` is specified, the safety instruction will be omitted.
88
*
99
* Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.
1010
*
11-
* **Note**: This parameter is only compatible with models [Command R 08-2024](/docs/command-r#august-2024-release), [Command R+ 08-2024](/docs/command-r-plus#august-2024-release) and newer.
12-
*
13-
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
11+
* **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/v2/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/v2/docs/command-r-plus#august-2024-release) and newer.
1412
*/
1513
export type V2ChatRequestSafetyMode = "CONTEXTUAL" | "STRICT" | "OFF";
1614

src/api/resources/v2/types/V2ChatStreamRequestSafetyMode.ts

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,12 @@
33
*/
44

55
/**
6-
* Used to select the [safety instruction](/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
6+
* Used to select the [safety instruction](https://docs.cohere.com/v2/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
77
* When `OFF` is specified, the safety instruction will be omitted.
88
*
99
* Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.
1010
*
11-
* **Note**: This parameter is only compatible with models [Command R 08-2024](/docs/command-r#august-2024-release), [Command R+ 08-2024](/docs/command-r-plus#august-2024-release) and newer.
12-
*
13-
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
11+
* **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/v2/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/v2/docs/command-r-plus#august-2024-release) and newer.
1412
*/
1513
export type V2ChatStreamRequestSafetyMode = "CONTEXTUAL" | "STRICT" | "OFF";
1614

src/api/types/AssistantMessage.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ import * as Cohere from "../index";
99
*/
1010
export interface AssistantMessage {
1111
toolCalls?: Cohere.ToolCallV2[];
12+
/** A chain-of-thought style reflection and plan that the model generates when working with Tools. */
1213
toolPlan?: string;
1314
content?: Cohere.AssistantMessageContent;
1415
citations?: Cohere.Citation[];

src/api/types/AssistantMessageResponse.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ import * as Cohere from "../index";
1010
export interface AssistantMessageResponse {
1111
role: "assistant";
1212
toolCalls?: Cohere.ToolCallV2[];
13+
/** A chain-of-thought style reflection and plan that the model generates when working with Tools. */
1314
toolPlan?: string;
1415
content?: Cohere.AssistantMessageResponseContentItem[];
1516
citations?: Cohere.Citation[];

src/api/types/ChatFinishReason.ts

Lines changed: 12 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -4,22 +4,19 @@
44

55
/**
66
* The reason a chat request has finished.
7+
*
8+
* - **complete**: The model finished sending a complete message.
9+
* - **max_tokens**: The number of generated tokens exceeded the model's context length or the value specified via the `max_tokens` parameter.
10+
* - **stop_sequence**: One of the provided `stop_sequence` entries was reached in the model's generation.
11+
* - **tool_call**: The model generated a Tool Call and is expecting a Tool Message in return
12+
* - **error**: The generation failed due to an internal error
713
*/
8-
export type ChatFinishReason =
9-
| "complete"
10-
| "stop_sequence"
11-
| "max_tokens"
12-
| "tool_call"
13-
| "error"
14-
| "content_blocked"
15-
| "error_limit";
14+
export type ChatFinishReason = "COMPLETE" | "STOP_SEQUENCE" | "MAX_TOKENS" | "TOOL_CALL" | "ERROR";
1615

1716
export const ChatFinishReason = {
18-
Complete: "complete",
19-
StopSequence: "stop_sequence",
20-
MaxTokens: "max_tokens",
21-
ToolCall: "tool_call",
22-
Error: "error",
23-
ContentBlocked: "content_blocked",
24-
ErrorLimit: "error_limit",
17+
Complete: "COMPLETE",
18+
StopSequence: "STOP_SEQUENCE",
19+
MaxTokens: "MAX_TOKENS",
20+
ToolCall: "TOOL_CALL",
21+
Error: "ERROR",
2522
} as const;

src/api/types/ChatMessages.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,6 @@ import * as Cohere from "../index";
77
/**
88
* A list of chat messages in chronological order, representing a conversation between the user and the model.
99
*
10-
* Messages can be from `User`, `Assistant`, `Tool` and `System` roles. Learn more about messages and roles in [the Chat API guide](https://docs.cohere.com/docs/chat-api).
10+
* Messages can be from `User`, `Assistant`, `Tool` and `System` roles. Learn more about messages and roles in [the Chat API guide](https://docs.cohere.com/v2/docs/chat-api).
1111
*/
1212
export type ChatMessages = Cohere.ChatMessageV2[];

src/api/types/Citation.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,11 @@ import * as Cohere from "../index";
88
* Citation information containing sources and the text cited.
99
*/
1010
export interface Citation {
11+
/** Start index of the cited snippet in the original source text. */
1112
start?: number;
13+
/** End index of the cited snippet in the original source text. */
1214
end?: number;
15+
/** Text snippet that is being cited. */
1316
text?: string;
1417
sources?: Cohere.Source[];
1518
}

src/api/types/JsonResponseFormatV2.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
export interface JsonResponseFormatV2 {
66
/**
7-
* [BETA] A JSON schema object that the output will adhere to. There are some restrictions we have on the schema, refer to [our guide](/docs/structured-outputs-json#schema-constraints) for more information.
7+
* A [JSON schema](https://json-schema.org/overview/what-is-jsonschema) object that the output will adhere to. There are some restrictions we have on the schema, refer to [our guide](/docs/structured-outputs-json#schema-constraints) for more information.
88
* Example (required name and age object):
99
*
1010
* ```json

src/api/types/ResponseFormatV2.ts

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,14 +5,17 @@
55
import * as Cohere from "../index";
66

77
/**
8-
* Configuration for forcing the model output to adhere to the specified format. Supported on [Command R](https://docs.cohere.com/docs/command-r), [Command R+](https://docs.cohere.com/docs/command-r-plus) and newer models.
8+
* Configuration for forcing the model output to adhere to the specified format. Supported on [Command R](https://docs.cohere.com/v2/docs/command-r), [Command R+](https://docs.cohere.com/v2/docs/command-r-plus) and newer models.
99
*
10-
* The model can be forced into outputting JSON objects (with up to 5 levels of nesting) by setting `{ "type": "json_object" }`.
10+
* The model can be forced into outputting JSON objects by setting `{ "type": "json_object" }`.
1111
*
1212
* A [JSON Schema](https://json-schema.org/) can optionally be provided, to ensure a specific structure.
1313
*
1414
* **Note**: When using `{ "type": "json_object" }` your `message` should always explicitly instruct the model to generate a JSON (eg: _"Generate a JSON ..."_) . Otherwise the model may end up getting stuck generating an infinite stream of characters and eventually run out of context length.
15-
* **Limitation**: The parameter is not supported in RAG mode (when any of `connectors`, `documents`, `tools`, `tool_results` are provided).
15+
*
16+
* **Note**: When `json_schema` is not specified, the generated object can have up to 5 layers of nesting.
17+
*
18+
* **Limitation**: The parameter is not supported when used in combinations with the `documents` or `tools` parameters.
1619
*/
1720
export type ResponseFormatV2 = Cohere.ResponseFormatV2.Text | Cohere.ResponseFormatV2.JsonObject;
1821

src/api/types/ToolCallV2.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
import * as Cohere from "../index";
66

77
/**
8-
* A array of tool calls to be made.
8+
* An array of tool calls to be made.
99
*/
1010
export interface ToolCallV2 {
1111
id?: string;

src/serialization/resources/v2/client/requests/V2EmbedRequest.ts

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,11 +14,8 @@ export const V2EmbedRequest: core.serialization.Schema<serializers.V2EmbedReques
1414
texts: core.serialization.list(core.serialization.string()).optional(),
1515
images: core.serialization.list(core.serialization.string()).optional(),
1616
model: core.serialization.string(),
17-
inputType: core.serialization.property("input_type", EmbedInputType.optional()),
18-
embeddingTypes: core.serialization.property(
19-
"embedding_types",
20-
core.serialization.list(EmbeddingType).optional()
21-
),
17+
inputType: core.serialization.property("input_type", EmbedInputType),
18+
embeddingTypes: core.serialization.property("embedding_types", core.serialization.list(EmbeddingType)),
2219
truncate: V2EmbedRequestTruncate.optional(),
2320
});
2421

@@ -27,8 +24,8 @@ export declare namespace V2EmbedRequest {
2724
texts?: string[] | null;
2825
images?: string[] | null;
2926
model: string;
30-
input_type?: EmbedInputType.Raw | null;
31-
embedding_types?: EmbeddingType.Raw[] | null;
27+
input_type: EmbedInputType.Raw;
28+
embedding_types: EmbeddingType.Raw[];
3229
truncate?: V2EmbedRequestTruncate.Raw | null;
3330
}
3431
}

src/serialization/types/ChatFinishReason.ts

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -7,16 +7,8 @@ import * as Cohere from "../../api/index";
77
import * as core from "../../core";
88

99
export const ChatFinishReason: core.serialization.Schema<serializers.ChatFinishReason.Raw, Cohere.ChatFinishReason> =
10-
core.serialization.enum_([
11-
"complete",
12-
"stop_sequence",
13-
"max_tokens",
14-
"tool_call",
15-
"error",
16-
"content_blocked",
17-
"error_limit",
18-
]);
10+
core.serialization.enum_(["COMPLETE", "STOP_SEQUENCE", "MAX_TOKENS", "TOOL_CALL", "ERROR"]);
1911

2012
export declare namespace ChatFinishReason {
21-
type Raw = "complete" | "stop_sequence" | "max_tokens" | "tool_call" | "error" | "content_blocked" | "error_limit";
13+
type Raw = "COMPLETE" | "STOP_SEQUENCE" | "MAX_TOKENS" | "TOOL_CALL" | "ERROR";
2214
}

0 commit comments

Comments
 (0)