diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index d5ae1e64..3d37ffed 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 9c8bd4d6bf675b159a80173b97c1265c + docChecksum: e0186c33d0269977e1790dfcc7d11aac docVersion: 1.0.0 speakeasyVersion: 1.568.2 generationVersion: 2.634.2 - releaseVersion: 1.9.0 - configChecksum: a67788bf50c3de92f0ef16f385b615b3 + releaseVersion: 1.9.1 + configChecksum: 5f97671226b9fdcc9adc3c7662003247 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -193,6 +193,7 @@ generatedFiles: - docs/models/entries.md - docs/models/eventout.md - docs/models/file.md + - docs/models/filechunk.md - docs/models/filepurpose.md - docs/models/filesapiroutesdeletefilerequest.md - docs/models/filesapiroutesdownloadfilerequest.md @@ -212,7 +213,6 @@ generatedFiles: - docs/models/ftclassifierlossfunction.md - docs/models/ftmodelcapabilitiesout.md - docs/models/ftmodelcard.md - - docs/models/ftmodelcardtype.md - docs/models/function.md - docs/models/functioncall.md - docs/models/functioncallentry.md @@ -280,7 +280,6 @@ generatedFiles: - docs/models/messageinputentry.md - docs/models/messageinputentrycontent.md - docs/models/messageinputentryrole.md - - docs/models/messageinputentrytype.md - docs/models/messageoutputcontentchunks.md - docs/models/messageoutputentry.md - docs/models/messageoutputentrycontent.md @@ -506,6 +505,7 @@ generatedFiles: - src/mistralai/models/embeddingresponse.py - src/mistralai/models/embeddingresponsedata.py - src/mistralai/models/eventout.py + - src/mistralai/models/filechunk.py - src/mistralai/models/filepurpose.py - src/mistralai/models/files_api_routes_delete_fileop.py - src/mistralai/models/files_api_routes_download_fileop.py @@ -652,7 +652,7 @@ examples: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": true, "fine_tuning": false, "vision": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": true, "fine_tuning": false, "vision": false, "classification": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} "422": application/json: {} delete_model_v1_models__model_id__delete: diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 2c46f3c0..6eb63598 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -21,7 +21,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.9.0 + version: 1.9.1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 12dfd152..2e95e59c 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,10 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:21244d618cafcc163c3aa4acbc443ca16c63b8614632b65b87fbb2c4066987f3 - sourceBlobDigest: sha256:74aeb6a2e0d466c206f983ce79581cc72d205cc7866826282c181207ebe841a2 + sourceRevisionDigest: sha256:e9fd379cd22f75a10ccc5b866f4de98f973c3c0f77cb15a7bebcb94bf10c82f2 + sourceBlobDigest: sha256:4bb656d10d1cfbe09f9b1b7734c79f1855eb27184590362d3747116f6abf69d1 tags: - latest + - speakeasy-sdk-regen-1751306196 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -36,23 +37,23 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:21244d618cafcc163c3aa4acbc443ca16c63b8614632b65b87fbb2c4066987f3 - sourceBlobDigest: sha256:74aeb6a2e0d466c206f983ce79581cc72d205cc7866826282c181207ebe841a2 + sourceRevisionDigest: sha256:e9fd379cd22f75a10ccc5b866f4de98f973c3c0f77cb15a7bebcb94bf10c82f2 + sourceBlobDigest: sha256:4bb656d10d1cfbe09f9b1b7734c79f1855eb27184590362d3747116f6abf69d1 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:b631243aae349ddebec1b984874a8e1d5b40e67d6229a199a3d5e63ba69d1538 + codeSamplesRevisionDigest: sha256:85311d42c06d86b38d49ed14b2485f45ad219ef76da40dfbec2592bb75dcaf00 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.568.2 sources: mistral-azure-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:sha256:c5931a7e0cc2db844149d71db57dfc2178665f0400bc26c90ee113795ea2872f + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main mistral-google-cloud-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:sha256:4a5343e63c6a78152e472b00ccc46d7bcb15594496bc94c8040039d3a9d4c5f8 + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main mistral-openapi: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:sha256:21244d618cafcc163c3aa4acbc443ca16c63b8614632b65b87fbb2c4066987f3 + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main targets: mistralai-azure-sdk: target: python diff --git a/RELEASES.md b/RELEASES.md index 265eda73..3d7513fc 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -238,4 +238,14 @@ Based on: ### Generated - [python v1.8.2] . ### Releases -- [PyPI v1.8.2] https://pypi.org/project/mistralai/1.8.2 - . \ No newline at end of file +- [PyPI v1.8.2] https://pypi.org/project/mistralai/1.8.2 - . + +## 2025-06-30 17:56:20 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.1] . +### Releases +- [PyPI v1.9.1] https://pypi.org/project/mistralai/1.9.1 - . \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsgetrequest.md b/docs/models/agentsapiv1conversationsgetrequest.md index 0d2d7827..67d450c8 100644 --- a/docs/models/agentsapiv1conversationsgetrequest.md +++ b/docs/models/agentsapiv1conversationsgetrequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching metadata. | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationshistoryrequest.md b/docs/models/agentsapiv1conversationshistoryrequest.md index f0d4f049..7e5d39e9 100644 --- a/docs/models/agentsapiv1conversationshistoryrequest.md +++ b/docs/models/agentsapiv1conversationshistoryrequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching entries. | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsmessagesrequest.md b/docs/models/agentsapiv1conversationsmessagesrequest.md index b3189925..a91ab046 100644 --- a/docs/models/agentsapiv1conversationsmessagesrequest.md +++ b/docs/models/agentsapiv1conversationsmessagesrequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching messages. | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsrestartrequest.md b/docs/models/agentsapiv1conversationsrestartrequest.md index 11a2fe2e..a18a41f5 100644 --- a/docs/models/agentsapiv1conversationsrestartrequest.md +++ b/docs/models/agentsapiv1conversationsrestartrequest.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | | `conversation_restart_request` | [models.ConversationRestartRequest](../models/conversationrestartrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsrestartstreamrequest.md b/docs/models/agentsapiv1conversationsrestartstreamrequest.md index 4cbb9d6c..7548286a 100644 --- a/docs/models/agentsapiv1conversationsrestartstreamrequest.md +++ b/docs/models/agentsapiv1conversationsrestartstreamrequest.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | | `conversation_restart_stream_request` | [models.ConversationRestartStreamRequest](../models/conversationrestartstreamrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md index e4a3f849..73615ed9 100644 --- a/docs/models/agentscompletionrequest.md +++ b/docs/models/agentscompletionrequest.md @@ -18,5 +18,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md index bd55190b..b0aac6c1 100644 --- a/docs/models/agentscompletionstreamrequest.md +++ b/docs/models/agentscompletionstreamrequest.md @@ -18,5 +18,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | \ No newline at end of file diff --git a/docs/models/basemodelcard.md b/docs/models/basemodelcard.md index a2a19fcb..18cb9a27 100644 --- a/docs/models/basemodelcard.md +++ b/docs/models/basemodelcard.md @@ -15,5 +15,6 @@ | `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | | `aliases` | List[*str*] | :heavy_minus_sign: | N/A | | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | *Optional[Literal["base"]]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index ecbcad39..a9806a4d 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -21,5 +21,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index 7f73a269..6faeb411 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -21,5 +21,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/contentchunk.md b/docs/models/contentchunk.md index 8cf7fad1..a65cd054 100644 --- a/docs/models/contentchunk.md +++ b/docs/models/contentchunk.md @@ -27,3 +27,9 @@ value: models.TextChunk = /* values here */ value: models.ReferenceChunk = /* values here */ ``` +### `models.FileChunk` + +```python +value: models.FileChunk = /* values here */ +``` + diff --git a/docs/models/conversationrestartrequest.md b/docs/models/conversationrestartrequest.md index 16786f6a..61679df6 100644 --- a/docs/models/conversationrestartrequest.md +++ b/docs/models/conversationrestartrequest.md @@ -11,5 +11,5 @@ Request to restart a new conversation from a given entry in the conversation. | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | -| `from_entry_id` | *str* | :heavy_check_mark: | N/A | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationrestartstreamrequest.md b/docs/models/conversationrestartstreamrequest.md index 23bf9851..9548b336 100644 --- a/docs/models/conversationrestartstreamrequest.md +++ b/docs/models/conversationrestartstreamrequest.md @@ -11,5 +11,5 @@ Request to restart a new conversation from a given entry in the conversation. | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | -| `from_entry_id` | *str* | :heavy_check_mark: | N/A | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/document.md b/docs/models/document.md index e2940355..509d43b7 100644 --- a/docs/models/document.md +++ b/docs/models/document.md @@ -5,6 +5,12 @@ Document to run OCR on ## Supported Types +### `models.FileChunk` + +```python +value: models.FileChunk = /* values here */ +``` + ### `models.DocumentURLChunk` ```python diff --git a/docs/models/filechunk.md b/docs/models/filechunk.md new file mode 100644 index 00000000..18217114 --- /dev/null +++ b/docs/models/filechunk.md @@ -0,0 +1,9 @@ +# FileChunk + + +## Fields + +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `type` | *Optional[Literal["file"]]* | :heavy_minus_sign: | N/A | +| `file_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/ftmodelcard.md b/docs/models/ftmodelcard.md index aaa5b401..a286f04e 100644 --- a/docs/models/ftmodelcard.md +++ b/docs/models/ftmodelcard.md @@ -17,8 +17,9 @@ Extra fields for fine-tuned models. | `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | | `aliases` | List[*str*] | :heavy_minus_sign: | N/A | | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.FTModelCardType]](../models/ftmodelcardtype.md) | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["fine-tuned"]]* | :heavy_minus_sign: | N/A | | `job` | *str* | :heavy_check_mark: | N/A | | `root` | *str* | :heavy_check_mark: | N/A | | `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/ftmodelcardtype.md b/docs/models/ftmodelcardtype.md deleted file mode 100644 index 0b38470b..00000000 --- a/docs/models/ftmodelcardtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# FTModelCardType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `FINE_TUNED` | fine-tuned | \ No newline at end of file diff --git a/docs/models/inputentries.md b/docs/models/inputentries.md index e1e48279..b44a467d 100644 --- a/docs/models/inputentries.md +++ b/docs/models/inputentries.md @@ -9,9 +9,33 @@ value: models.MessageInputEntry = /* values here */ ``` +### `models.MessageOutputEntry` + +```python +value: models.MessageOutputEntry = /* values here */ +``` + ### `models.FunctionResultEntry` ```python value: models.FunctionResultEntry = /* values here */ ``` +### `models.FunctionCallEntry` + +```python +value: models.FunctionCallEntry = /* values here */ +``` + +### `models.ToolExecutionEntry` + +```python +value: models.ToolExecutionEntry = /* values here */ +``` + +### `models.AgentHandoffEntry` + +```python +value: models.AgentHandoffEntry = /* values here */ +``` + diff --git a/docs/models/messageinputentry.md b/docs/models/messageinputentry.md index f5bb6c25..d0168f6e 100644 --- a/docs/models/messageinputentry.md +++ b/docs/models/messageinputentry.md @@ -5,12 +5,12 @@ Representation of an input message inside the conversation. ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.MessageInputEntryType]](../models/messageinputentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | -| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | +| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/messageinputentrytype.md b/docs/models/messageinputentrytype.md deleted file mode 100644 index d3378124..00000000 --- a/docs/models/messageinputentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageInputEntryType - - -## Values - -| Name | Value | -| --------------- | --------------- | -| `MESSAGE_INPUT` | message.input | \ No newline at end of file diff --git a/docs/models/modelcapabilities.md b/docs/models/modelcapabilities.md index 2e399ab6..36b27938 100644 --- a/docs/models/modelcapabilities.md +++ b/docs/models/modelcapabilities.md @@ -9,4 +9,5 @@ | `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `vision` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `vision` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `classification` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/type.md b/docs/models/type.md index 239a00f5..357acf0b 100644 --- a/docs/models/type.md +++ b/docs/models/type.md @@ -3,6 +3,6 @@ ## Values -| Name | Value | -| ------ | ------ | -| `BASE` | base | \ No newline at end of file +| Name | Value | +| --------------- | --------------- | +| `MESSAGE_INPUT` | message.input | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index d5014a36..6bab08dd 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -55,7 +55,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -116,7 +116,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index f8aca31f..7b467b58 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -57,7 +57,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | @@ -121,7 +121,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index 25b1ab9c..38d5915b 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -135,7 +135,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching metadata. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -220,7 +220,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching entries. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -260,7 +260,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching messages. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -300,7 +300,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | | `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | | `from_entry_id` | *str* | :heavy_check_mark: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | @@ -461,7 +461,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | | `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | | `from_entry_id` | *str* | :heavy_check_mark: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index d7a5ed85..7dd5d1de 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -55,7 +55,7 @@ with Mistral( ## retrieve -Retrieve a model information. +Retrieve information about a model. ### Example Usage diff --git a/examples/ocr_process_from_file.py b/examples/ocr_process_from_file.py index 70c9d4a8..84a7b4d8 100644 --- a/examples/ocr_process_from_file.py +++ b/examples/ocr_process_from_file.py @@ -26,12 +26,9 @@ def main(): purpose="ocr", ) - signed_url = client.files.get_signed_url(file_id=uploaded_file.id, expiry=1) - pdf_response = client.ocr.process(document={ - "document_url": signed_url.url, - "type": "document_url", - "document_name": "mistral-7b-pdf", + "type": "file", + "file_id": uploaded_file.id, }, model="mistral-ocr-latest", include_image_base64=True) # Print the parsed PDF diff --git a/pyproject.toml b/pyproject.toml index f8cf20a9..5cf64972 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.9.0" +version = "1.9.1" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index db4f08fe..d22b4e90 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.9.0" +__version__: str = "1.9.1" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.634.2" -__user_agent__: str = "speakeasy-sdk/python 1.9.0 2.634.2 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.9.1 2.634.2 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index febc3383..48c06372 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -69,7 +69,7 @@ def complete( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -239,7 +239,7 @@ async def complete_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -411,7 +411,7 @@ def stream( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -589,7 +589,7 @@ async def stream_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 8556c5a0..1ed067e8 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -148,7 +148,7 @@ def complete( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -317,7 +317,7 @@ async def complete_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -496,7 +496,7 @@ def stream( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -683,7 +683,7 @@ async def stream_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py index f8b6ec2c..009df94d 100644 --- a/src/mistralai/conversations.py +++ b/src/mistralai/conversations.py @@ -712,7 +712,7 @@ def get( Given a conversation_id retrieve a conversation entity with its attributes. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching metadata. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -815,7 +815,7 @@ async def get_async( Given a conversation_id retrieve a conversation entity with its attributes. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching metadata. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1178,7 +1178,7 @@ def get_history( Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching entries. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1278,7 +1278,7 @@ async def get_history_async( Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching entries. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1378,7 +1378,7 @@ def get_messages( Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching messages. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1478,7 +1478,7 @@ async def get_messages_async( Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching messages. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1588,7 +1588,7 @@ def restart( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - :param conversation_id: + :param conversation_id: ID of the original conversation which is being restarted. :param inputs: :param from_entry_id: :param stream: @@ -1617,10 +1617,10 @@ def restart( stream=stream, store=store, handoff_execution=handoff_execution, - from_entry_id=from_entry_id, completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + from_entry_id=from_entry_id, ), ) @@ -1721,7 +1721,7 @@ async def restart_async( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - :param conversation_id: + :param conversation_id: ID of the original conversation which is being restarted. :param inputs: :param from_entry_id: :param stream: @@ -1750,10 +1750,10 @@ async def restart_async( stream=stream, store=store, handoff_execution=handoff_execution, - from_entry_id=from_entry_id, completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + from_entry_id=from_entry_id, ), ) @@ -2426,7 +2426,7 @@ def restart_stream( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - :param conversation_id: + :param conversation_id: ID of the original conversation which is being restarted. :param inputs: :param from_entry_id: :param stream: @@ -2455,10 +2455,10 @@ def restart_stream( stream=stream, store=store, handoff_execution=handoff_execution, - from_entry_id=from_entry_id, completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + from_entry_id=from_entry_id, ), ) @@ -2564,7 +2564,7 @@ async def restart_stream_async( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - :param conversation_id: + :param conversation_id: ID of the original conversation which is being restarted. :param inputs: :param from_entry_id: :param stream: @@ -2593,10 +2593,10 @@ async def restart_stream_async( stream=stream, store=store, handoff_execution=handoff_execution, - from_entry_id=from_entry_id, completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + from_entry_id=from_entry_id, ), ) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 1b7b9c6c..a44de97c 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -129,7 +129,7 @@ AssistantMessageRole, AssistantMessageTypedDict, ) - from .basemodelcard import BaseModelCard, BaseModelCardTypedDict, Type + from .basemodelcard import BaseModelCard, BaseModelCardTypedDict from .batcherror import BatchError, BatchErrorTypedDict from .batchjobin import BatchJobIn, BatchJobInTypedDict from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict @@ -378,6 +378,7 @@ EmbeddingResponseDataTypedDict, ) from .eventout import EventOut, EventOutTypedDict + from .filechunk import FileChunk, FileChunkTypedDict from .filepurpose import FilePurpose from .files_api_routes_delete_fileop import ( FilesAPIRoutesDeleteFileRequest, @@ -429,7 +430,7 @@ FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict, ) - from .ftmodelcard import FTModelCard, FTModelCardType, FTModelCardTypedDict + from .ftmodelcard import FTModelCard, FTModelCardTypedDict from .function import Function, FunctionTypedDict from .functioncall import ( Arguments, @@ -589,9 +590,9 @@ MessageInputEntryContent, MessageInputEntryContentTypedDict, MessageInputEntryRole, - MessageInputEntryType, MessageInputEntryTypedDict, Object, + Type, ) from .messageoutputcontentchunks import ( MessageOutputContentChunks, @@ -1020,9 +1021,10 @@ "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelCard", - "FTModelCardType", "FTModelCardTypedDict", "File", + "FileChunk", + "FileChunkTypedDict", "FilePurpose", "FileSchema", "FileSchemaTypedDict", @@ -1161,7 +1163,6 @@ "MessageInputEntryContent", "MessageInputEntryContentTypedDict", "MessageInputEntryRole", - "MessageInputEntryType", "MessageInputEntryTypedDict", "MessageOutputContentChunks", "MessageOutputContentChunksTypedDict", @@ -1411,7 +1412,6 @@ "AssistantMessageTypedDict": ".assistantmessage", "BaseModelCard": ".basemodelcard", "BaseModelCardTypedDict": ".basemodelcard", - "Type": ".basemodelcard", "BatchError": ".batcherror", "BatchErrorTypedDict": ".batcherror", "BatchJobIn": ".batchjobin", @@ -1603,6 +1603,8 @@ "EmbeddingResponseDataTypedDict": ".embeddingresponsedata", "EventOut": ".eventout", "EventOutTypedDict": ".eventout", + "FileChunk": ".filechunk", + "FileChunkTypedDict": ".filechunk", "FilePurpose": ".filepurpose", "FilesAPIRoutesDeleteFileRequest": ".files_api_routes_delete_fileop", "FilesAPIRoutesDeleteFileRequestTypedDict": ".files_api_routes_delete_fileop", @@ -1637,7 +1639,6 @@ "FTModelCapabilitiesOut": ".ftmodelcapabilitiesout", "FTModelCapabilitiesOutTypedDict": ".ftmodelcapabilitiesout", "FTModelCard": ".ftmodelcard", - "FTModelCardType": ".ftmodelcard", "FTModelCardTypedDict": ".ftmodelcard", "Function": ".function", "FunctionTypedDict": ".function", @@ -1756,9 +1757,9 @@ "MessageInputEntryContent": ".messageinputentry", "MessageInputEntryContentTypedDict": ".messageinputentry", "MessageInputEntryRole": ".messageinputentry", - "MessageInputEntryType": ".messageinputentry", "MessageInputEntryTypedDict": ".messageinputentry", "Object": ".messageinputentry", + "Type": ".messageinputentry", "MessageOutputContentChunks": ".messageoutputcontentchunks", "MessageOutputContentChunksTypedDict": ".messageoutputcontentchunks", "MessageOutputEntry": ".messageoutputentry", diff --git a/src/mistralai/models/agents_api_v1_conversations_getop.py b/src/mistralai/models/agents_api_v1_conversations_getop.py index 4a800ad6..a37a61ba 100644 --- a/src/mistralai/models/agents_api_v1_conversations_getop.py +++ b/src/mistralai/models/agents_api_v1_conversations_getop.py @@ -11,12 +11,14 @@ class AgentsAPIV1ConversationsGetRequestTypedDict(TypedDict): conversation_id: str + r"""ID of the conversation from which we are fetching metadata.""" class AgentsAPIV1ConversationsGetRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + r"""ID of the conversation from which we are fetching metadata.""" AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict = TypeAliasType( diff --git a/src/mistralai/models/agents_api_v1_conversations_historyop.py b/src/mistralai/models/agents_api_v1_conversations_historyop.py index 09fb6081..b8c33d1b 100644 --- a/src/mistralai/models/agents_api_v1_conversations_historyop.py +++ b/src/mistralai/models/agents_api_v1_conversations_historyop.py @@ -8,9 +8,11 @@ class AgentsAPIV1ConversationsHistoryRequestTypedDict(TypedDict): conversation_id: str + r"""ID of the conversation from which we are fetching entries.""" class AgentsAPIV1ConversationsHistoryRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + r"""ID of the conversation from which we are fetching entries.""" diff --git a/src/mistralai/models/agents_api_v1_conversations_messagesop.py b/src/mistralai/models/agents_api_v1_conversations_messagesop.py index ade66e5e..f0dac8bf 100644 --- a/src/mistralai/models/agents_api_v1_conversations_messagesop.py +++ b/src/mistralai/models/agents_api_v1_conversations_messagesop.py @@ -8,9 +8,11 @@ class AgentsAPIV1ConversationsMessagesRequestTypedDict(TypedDict): conversation_id: str + r"""ID of the conversation from which we are fetching messages.""" class AgentsAPIV1ConversationsMessagesRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + r"""ID of the conversation from which we are fetching messages.""" diff --git a/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py b/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py index c8fd8475..f39b74eb 100644 --- a/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py +++ b/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py @@ -12,6 +12,7 @@ class AgentsAPIV1ConversationsRestartStreamRequestTypedDict(TypedDict): conversation_id: str + r"""ID of the original conversation which is being restarted.""" conversation_restart_stream_request: ConversationRestartStreamRequestTypedDict @@ -19,6 +20,7 @@ class AgentsAPIV1ConversationsRestartStreamRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + r"""ID of the original conversation which is being restarted.""" conversation_restart_stream_request: Annotated[ ConversationRestartStreamRequest, diff --git a/src/mistralai/models/agents_api_v1_conversations_restartop.py b/src/mistralai/models/agents_api_v1_conversations_restartop.py index aa867aff..f706c066 100644 --- a/src/mistralai/models/agents_api_v1_conversations_restartop.py +++ b/src/mistralai/models/agents_api_v1_conversations_restartop.py @@ -12,6 +12,7 @@ class AgentsAPIV1ConversationsRestartRequestTypedDict(TypedDict): conversation_id: str + r"""ID of the original conversation which is being restarted.""" conversation_restart_request: ConversationRestartRequestTypedDict @@ -19,6 +20,7 @@ class AgentsAPIV1ConversationsRestartRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + r"""ID of the original conversation which is being restarted.""" conversation_restart_request: Annotated[ ConversationRestartRequest, diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py index 2e3c35f8..c832edfd 100644 --- a/src/mistralai/models/agentscompletionrequest.py +++ b/src/mistralai/models/agentscompletionrequest.py @@ -89,6 +89,7 @@ class AgentsCompletionRequestTypedDict(TypedDict): prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" class AgentsCompletionRequest(BaseModel): @@ -132,6 +133,7 @@ class AgentsCompletionRequest(BaseModel): prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py index a74842f6..6e619b77 100644 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -88,6 +88,7 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" class AgentsCompletionStreamRequest(BaseModel): @@ -130,6 +131,7 @@ class AgentsCompletionStreamRequest(BaseModel): prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/basemodelcard.py b/src/mistralai/models/basemodelcard.py index 8a4e3710..fc852f4b 100644 --- a/src/mistralai/models/basemodelcard.py +++ b/src/mistralai/models/basemodelcard.py @@ -12,9 +12,6 @@ from typing_extensions import Annotated, NotRequired, TypedDict -Type = Literal["base"] - - class BaseModelCardTypedDict(TypedDict): id: str capabilities: ModelCapabilitiesTypedDict @@ -26,8 +23,9 @@ class BaseModelCardTypedDict(TypedDict): max_context_length: NotRequired[int] aliases: NotRequired[List[str]] deprecation: NotRequired[Nullable[datetime]] + deprecation_replacement_model: NotRequired[Nullable[str]] default_model_temperature: NotRequired[Nullable[float]] - type: Type + type: Literal["base"] class BaseModelCard(BaseModel): @@ -51,10 +49,12 @@ class BaseModelCard(BaseModel): deprecation: OptionalNullable[datetime] = UNSET + deprecation_replacement_model: OptionalNullable[str] = UNSET + default_model_temperature: OptionalNullable[float] = UNSET TYPE: Annotated[ - Annotated[Optional[Type], AfterValidator(validate_const("base"))], + Annotated[Optional[Literal["base"]], AfterValidator(validate_const("base"))], pydantic.Field(alias="type"), ] = "base" @@ -69,6 +69,7 @@ def serialize_model(self, handler): "max_context_length", "aliases", "deprecation", + "deprecation_replacement_model", "default_model_temperature", "type", ] @@ -76,6 +77,7 @@ def serialize_model(self, handler): "name", "description", "deprecation", + "deprecation_replacement_model", "default_model_temperature", ] null_default_fields = [] diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index ac90de32..6f195f13 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -89,6 +89,7 @@ class ChatCompletionRequestTypedDict(TypedDict): prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -140,6 +141,7 @@ class ChatCompletionRequest(BaseModel): prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index 00f57144..0fa102e5 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -92,6 +92,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -142,6 +143,7 @@ class ChatCompletionStreamRequest(BaseModel): prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" diff --git a/src/mistralai/models/contentchunk.py b/src/mistralai/models/contentchunk.py index ff7d9fcf..4cb8ab6d 100644 --- a/src/mistralai/models/contentchunk.py +++ b/src/mistralai/models/contentchunk.py @@ -2,6 +2,7 @@ from __future__ import annotations from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .filechunk import FileChunk, FileChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict @@ -17,6 +18,7 @@ TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict, + FileChunkTypedDict, DocumentURLChunkTypedDict, ], ) @@ -28,6 +30,7 @@ Annotated[DocumentURLChunk, Tag("document_url")], Annotated[TextChunk, Tag("text")], Annotated[ReferenceChunk, Tag("reference")], + Annotated[FileChunk, Tag("file")], ], Discriminator(lambda m: get_discriminator(m, "type", "type")), ] diff --git a/src/mistralai/models/filechunk.py b/src/mistralai/models/filechunk.py new file mode 100644 index 00000000..83e60cef --- /dev/null +++ b/src/mistralai/models/filechunk.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class FileChunkTypedDict(TypedDict): + file_id: str + type: Literal["file"] + + +class FileChunk(BaseModel): + file_id: str + + TYPE: Annotated[ + Annotated[Optional[Literal["file"]], AfterValidator(validate_const("file"))], + pydantic.Field(alias="type"), + ] = "file" diff --git a/src/mistralai/models/ftmodelcard.py b/src/mistralai/models/ftmodelcard.py index 48937f48..286357e7 100644 --- a/src/mistralai/models/ftmodelcard.py +++ b/src/mistralai/models/ftmodelcard.py @@ -12,9 +12,6 @@ from typing_extensions import Annotated, NotRequired, TypedDict -FTModelCardType = Literal["fine-tuned"] - - class FTModelCardTypedDict(TypedDict): r"""Extra fields for fine-tuned models.""" @@ -30,8 +27,9 @@ class FTModelCardTypedDict(TypedDict): max_context_length: NotRequired[int] aliases: NotRequired[List[str]] deprecation: NotRequired[Nullable[datetime]] + deprecation_replacement_model: NotRequired[Nullable[str]] default_model_temperature: NotRequired[Nullable[float]] - type: FTModelCardType + type: Literal["fine-tuned"] archived: NotRequired[bool] @@ -62,11 +60,14 @@ class FTModelCard(BaseModel): deprecation: OptionalNullable[datetime] = UNSET + deprecation_replacement_model: OptionalNullable[str] = UNSET + default_model_temperature: OptionalNullable[float] = UNSET TYPE: Annotated[ Annotated[ - Optional[FTModelCardType], AfterValidator(validate_const("fine-tuned")) + Optional[Literal["fine-tuned"]], + AfterValidator(validate_const("fine-tuned")), ], pydantic.Field(alias="type"), ] = "fine-tuned" @@ -84,6 +85,7 @@ def serialize_model(self, handler): "max_context_length", "aliases", "deprecation", + "deprecation_replacement_model", "default_model_temperature", "type", "archived", @@ -92,6 +94,7 @@ def serialize_model(self, handler): "name", "description", "deprecation", + "deprecation_replacement_model", "default_model_temperature", ] null_default_fields = [] diff --git a/src/mistralai/models/inputentries.py b/src/mistralai/models/inputentries.py index 9c0fea6e..0221f968 100644 --- a/src/mistralai/models/inputentries.py +++ b/src/mistralai/models/inputentries.py @@ -1,18 +1,37 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict +from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict from typing import Union from typing_extensions import TypeAliasType InputEntriesTypedDict = TypeAliasType( "InputEntriesTypedDict", - Union[MessageInputEntryTypedDict, FunctionResultEntryTypedDict], + Union[ + MessageInputEntryTypedDict, + FunctionResultEntryTypedDict, + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, + MessageOutputEntryTypedDict, + AgentHandoffEntryTypedDict, + ], ) InputEntries = TypeAliasType( - "InputEntries", Union[MessageInputEntry, FunctionResultEntry] + "InputEntries", + Union[ + MessageInputEntry, + FunctionResultEntry, + ToolExecutionEntry, + FunctionCallEntry, + MessageOutputEntry, + AgentHandoffEntry, + ], ) diff --git a/src/mistralai/models/messageinputentry.py b/src/mistralai/models/messageinputentry.py index 0ea6f24c..6f1190c7 100644 --- a/src/mistralai/models/messageinputentry.py +++ b/src/mistralai/models/messageinputentry.py @@ -14,7 +14,7 @@ Object = Literal["entry"] -MessageInputEntryType = Literal["message.input"] +Type = Literal["message.input"] MessageInputEntryRole = Literal["assistant", "user"] @@ -35,7 +35,7 @@ class MessageInputEntryTypedDict(TypedDict): role: MessageInputEntryRole content: MessageInputEntryContentTypedDict object: NotRequired[Object] - type: NotRequired[MessageInputEntryType] + type: NotRequired[Type] created_at: NotRequired[datetime] completed_at: NotRequired[Nullable[datetime]] id: NotRequired[str] @@ -50,7 +50,7 @@ class MessageInputEntry(BaseModel): object: Optional[Object] = "entry" - type: Optional[MessageInputEntryType] = "message.input" + type: Optional[Type] = "message.input" created_at: Optional[datetime] = None diff --git a/src/mistralai/models/modelcapabilities.py b/src/mistralai/models/modelcapabilities.py index 961f8664..54c5f2a2 100644 --- a/src/mistralai/models/modelcapabilities.py +++ b/src/mistralai/models/modelcapabilities.py @@ -12,6 +12,7 @@ class ModelCapabilitiesTypedDict(TypedDict): function_calling: NotRequired[bool] fine_tuning: NotRequired[bool] vision: NotRequired[bool] + classification: NotRequired[bool] class ModelCapabilities(BaseModel): @@ -24,3 +25,5 @@ class ModelCapabilities(BaseModel): fine_tuning: Optional[bool] = False vision: Optional[bool] = False + + classification: Optional[bool] = False diff --git a/src/mistralai/models/ocrrequest.py b/src/mistralai/models/ocrrequest.py index 53ad6111..df932c2a 100644 --- a/src/mistralai/models/ocrrequest.py +++ b/src/mistralai/models/ocrrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .filechunk import FileChunk, FileChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL @@ -11,12 +12,13 @@ DocumentTypedDict = TypeAliasType( - "DocumentTypedDict", Union[ImageURLChunkTypedDict, DocumentURLChunkTypedDict] + "DocumentTypedDict", + Union[FileChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict], ) r"""Document to run OCR on""" -Document = TypeAliasType("Document", Union[ImageURLChunk, DocumentURLChunk]) +Document = TypeAliasType("Document", Union[FileChunk, ImageURLChunk, DocumentURLChunk]) r"""Document to run OCR on""" diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index 86259e17..b712c545 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -208,7 +208,7 @@ def retrieve( ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: r"""Retrieve Model - Retrieve a model information. + Retrieve information about a model. :param model_id: The ID of the model to retrieve. :param retries: Override the default retry configuration for this method @@ -311,7 +311,7 @@ async def retrieve_async( ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: r"""Retrieve Model - Retrieve a model information. + Retrieve information about a model. :param model_id: The ID of the model to retrieve. :param retries: Override the default retry configuration for this method