diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index ad83b298..c568d4f3 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 12226a12b72a82af24e4b098c631ff42 + docChecksum: 9c8bd4d6bf675b159a80173b97c1265c docVersion: 1.0.0 speakeasyVersion: 1.517.3 generationVersion: 2.548.6 - releaseVersion: 1.8.1 - configChecksum: a47e6a59e54c30528cf829e1a6fcc310 + releaseVersion: 1.8.2 + configChecksum: 5024c28578f991eabb85310ad8df96b7 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -293,6 +293,7 @@ generatedFiles: - docs/models/messageoutputeventtype.md - docs/models/messages.md - docs/models/metricout.md + - docs/models/mistralpromptmode.md - docs/models/modelcapabilities.md - docs/models/modelconversation.md - docs/models/modelconversationobject.md @@ -562,6 +563,7 @@ generatedFiles: - src/mistralai/models/messageoutputentry.py - src/mistralai/models/messageoutputevent.py - src/mistralai/models/metricout.py + - src/mistralai/models/mistralpromptmode.py - src/mistralai/models/modelcapabilities.py - src/mistralai/models/modelconversation.py - src/mistralai/models/modellist.py diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index a8255953..77710816 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -15,7 +15,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.8.1 + version: 1.8.2 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index d38d57a2..c618ac1d 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:ab414b4936bea95f6713273bbcee90d66af9da18c6672d62ce885769c47edc34 - sourceBlobDigest: sha256:109235b09f0f6d400d146591dff959fac189a873fc95f4b32867d47d65779d28 + sourceRevisionDigest: sha256:21244d618cafcc163c3aa4acbc443ca16c63b8614632b65b87fbb2c4066987f3 + sourceBlobDigest: sha256:74aeb6a2e0d466c206f983ce79581cc72d205cc7866826282c181207ebe841a2 tags: - latest - - speakeasy-sdk-regen-1748446717 + - speakeasy-sdk-regen-1749573609 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:ab414b4936bea95f6713273bbcee90d66af9da18c6672d62ce885769c47edc34 - sourceBlobDigest: sha256:109235b09f0f6d400d146591dff959fac189a873fc95f4b32867d47d65779d28 + sourceRevisionDigest: sha256:21244d618cafcc163c3aa4acbc443ca16c63b8614632b65b87fbb2c4066987f3 + sourceBlobDigest: sha256:74aeb6a2e0d466c206f983ce79581cc72d205cc7866826282c181207ebe841a2 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:03b507fe6fdcabb21ec711d436300a3888b22fbfc970722bb3433db31c06047a + codeSamplesRevisionDigest: sha256:dc4396ba994048a9f31c008dced1a46a9e54d89973e9608039a7bc37b1052957 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.517.3 diff --git a/RELEASES.md b/RELEASES.md index 14663d6c..265eda73 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -228,4 +228,14 @@ Based on: ### Generated - [python v1.8.1] . ### Releases -- [PyPI v1.8.1] https://pypi.org/project/mistralai/1.8.1 - . \ No newline at end of file +- [PyPI v1.8.1] https://pypi.org/project/mistralai/1.8.1 - . + +## 2025-06-10 16:42:28 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.8.2] . +### Releases +- [PyPI v1.8.2] https://pypi.org/project/mistralai/1.8.2 - . \ No newline at end of file diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md index 73a0f77a..8ace69d9 100644 --- a/docs/models/agentscompletionrequest.md +++ b/docs/models/agentscompletionrequest.md @@ -18,4 +18,5 @@ | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md index 4a3093dd..0bab012c 100644 --- a/docs/models/agentscompletionstreamrequest.md +++ b/docs/models/agentscompletionstreamrequest.md @@ -18,4 +18,5 @@ | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index 904ad6c5..a850b5b8 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -21,4 +21,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index f2cce68b..cf286cda 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -21,4 +21,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/mistralpromptmode.md b/docs/models/mistralpromptmode.md new file mode 100644 index 00000000..7416e203 --- /dev/null +++ b/docs/models/mistralpromptmode.md @@ -0,0 +1,8 @@ +# MistralPromptMode + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `REASONING` | reasoning | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 28e10497..c7fdb687 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -55,6 +55,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -115,6 +116,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 12d9feca..3a8d57fa 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -57,6 +57,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | @@ -120,6 +121,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index 8b462c16..6d6aaa2c 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -3,7 +3,7 @@ ## Overview -(beta) Converstations API +(beta) Conversations API ### Available Operations diff --git a/pyproject.toml b/pyproject.toml index 961af49d..c7cb9095 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.8.1" +version = "1.8.2" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index ddccfbfa..fc416fd3 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.8.1" +__version__: str = "1.8.2" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.8.1 2.548.6 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.8.2 2.548.6 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index e81f01aa..4fbb25dd 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -47,6 +47,7 @@ def complete( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -68,6 +69,7 @@ def complete( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -105,6 +107,7 @@ def complete( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, agent_id=agent_id, ) @@ -213,6 +216,7 @@ async def complete_async( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -234,6 +238,7 @@ async def complete_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -271,6 +276,7 @@ async def complete_async( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, agent_id=agent_id, ) @@ -379,6 +385,7 @@ def stream( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -402,6 +409,7 @@ def stream( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -439,6 +447,7 @@ def stream( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, agent_id=agent_id, ) @@ -553,6 +562,7 @@ async def stream_async( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -576,6 +586,7 @@ async def stream_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -613,6 +624,7 @@ async def stream_async( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, agent_id=agent_id, ) diff --git a/src/mistralai/beta.py b/src/mistralai/beta.py index a0d45f67..04209d74 100644 --- a/src/mistralai/beta.py +++ b/src/mistralai/beta.py @@ -8,7 +8,7 @@ class Beta(BaseSDK): conversations: Conversations - r"""(beta) Converstations API""" + r"""(beta) Conversations API""" agents: MistralAgents r"""(beta) Agents API""" diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 0e7294f9..96fcf65d 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -123,6 +123,7 @@ def complete( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -147,6 +148,7 @@ def complete( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -186,6 +188,7 @@ def complete( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, safe_prompt=safe_prompt, ) @@ -288,6 +291,7 @@ async def complete_async( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -312,6 +316,7 @@ async def complete_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -351,6 +356,7 @@ async def complete_async( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, safe_prompt=safe_prompt, ) @@ -461,6 +467,7 @@ def stream( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -487,6 +494,7 @@ def stream( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -528,6 +536,7 @@ def stream( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, safe_prompt=safe_prompt, ) @@ -644,6 +653,7 @@ async def stream_async( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -670,6 +680,7 @@ async def stream_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -711,6 +722,7 @@ async def stream_async( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, safe_prompt=safe_prompt, ) diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py index 320e3d09..6ef02edd 100644 --- a/src/mistralai/conversations.py +++ b/src/mistralai/conversations.py @@ -35,7 +35,7 @@ class Conversations(BaseSDK): - r"""(beta) Converstations API""" + r"""(beta) Conversations API""" # region sdk-class-body # Custom run code allowing client side execution of code diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 9ed85c07..e6493e90 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -591,6 +591,7 @@ MessageOutputEventTypedDict, ) from .metricout import MetricOut, MetricOutTypedDict +from .mistralpromptmode import MistralPromptMode from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict from .modelconversation import ( ModelConversation, @@ -1154,6 +1155,7 @@ "MessagesTypedDict", "MetricOut", "MetricOutTypedDict", + "MistralPromptMode", "ModelCapabilities", "ModelCapabilitiesTypedDict", "ModelConversation", diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py index cd81393a..e99dcfc2 100644 --- a/src/mistralai/models/agentscompletionrequest.py +++ b/src/mistralai/models/agentscompletionrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict @@ -11,8 +12,9 @@ from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator +from mistralai.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer +from pydantic.functional_validators import PlainValidator from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -86,6 +88,7 @@ class AgentsCompletionRequestTypedDict(TypedDict): r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] class AgentsCompletionRequest(BaseModel): @@ -126,6 +129,10 @@ class AgentsCompletionRequest(BaseModel): parallel_tool_calls: Optional[bool] = None + prompt_mode: Annotated[ + OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) + ] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -141,8 +148,9 @@ def serialize_model(self, handler): "n", "prediction", "parallel_tool_calls", + "prompt_mode", ] - nullable_fields = ["max_tokens", "random_seed", "tools", "n"] + nullable_fields = ["max_tokens", "random_seed", "tools", "n", "prompt_mode"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py index ab6a307a..b4b423f5 100644 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict @@ -11,8 +12,9 @@ from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator +from mistralai.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer +from pydantic.functional_validators import PlainValidator from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -85,6 +87,7 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] class AgentsCompletionStreamRequest(BaseModel): @@ -124,6 +127,10 @@ class AgentsCompletionStreamRequest(BaseModel): parallel_tool_calls: Optional[bool] = None + prompt_mode: Annotated[ + OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) + ] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -139,8 +146,9 @@ def serialize_model(self, handler): "n", "prediction", "parallel_tool_calls", + "prompt_mode", ] - nullable_fields = ["max_tokens", "random_seed", "tools", "n"] + nullable_fields = ["max_tokens", "random_seed", "tools", "n", "prompt_mode"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index a277db8f..004cc011 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict @@ -11,8 +12,9 @@ from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator +from mistralai.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer +from pydantic.functional_validators import PlainValidator from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -86,6 +88,7 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -134,6 +137,10 @@ class ChatCompletionRequest(BaseModel): parallel_tool_calls: Optional[bool] = None + prompt_mode: Annotated[ + OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) + ] = UNSET + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -154,9 +161,17 @@ def serialize_model(self, handler): "n", "prediction", "parallel_tool_calls", + "prompt_mode", "safe_prompt", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + "prompt_mode", + ] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index 9ed7b3f6..78a85bef 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict @@ -11,8 +12,9 @@ from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator +from mistralai.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer +from pydantic.functional_validators import PlainValidator from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -89,6 +91,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -136,6 +139,10 @@ class ChatCompletionStreamRequest(BaseModel): parallel_tool_calls: Optional[bool] = None + prompt_mode: Annotated[ + OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) + ] = UNSET + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -156,9 +163,17 @@ def serialize_model(self, handler): "n", "prediction", "parallel_tool_calls", + "prompt_mode", "safe_prompt", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + "prompt_mode", + ] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/mistralpromptmode.py b/src/mistralai/models/mistralpromptmode.py new file mode 100644 index 00000000..0ffd6787 --- /dev/null +++ b/src/mistralai/models/mistralpromptmode.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import UnrecognizedStr +from typing import Literal, Union + + +MistralPromptMode = Union[Literal["reasoning"], UnrecognizedStr]