diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 6eb1248e..59249dd4 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 81cc8be96362e2f1cb145b08a2e6c4fa + docChecksum: 406e00c323dba0db26d6994620926af4 docVersion: 0.0.2 - speakeasyVersion: 1.477.0 - generationVersion: 2.497.0 - releaseVersion: 1.5.1 - configChecksum: ef3439d915c5d16e7cfb88fe2bf94907 + speakeasyVersion: 1.517.3 + generationVersion: 2.548.6 + releaseVersion: 1.5.2-rc.1 + configChecksum: 321ba0a46c45c1b64b391afe2abe901c repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -14,7 +14,7 @@ features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.10.5 + core: 5.12.3 customCodeRegions: 0.1.1 defaultEnabledRetries: 0.2.0 downloadStreams: 1.0.1 @@ -22,19 +22,19 @@ features: envVarSecurityUsage: 0.3.2 examples: 3.0.1 flatRequests: 1.0.1 - flattening: 3.1.0 - globalSecurity: 3.0.2 + flattening: 3.1.1 + globalSecurity: 3.0.3 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.1.0 methodArguments: 1.0.2 multipartFileContentType: 1.0.0 nameOverrides: 3.0.1 - nullables: 1.0.0 + nullables: 1.0.1 openEnums: 1.0.0 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.0 + sdkHooks: 1.0.1 serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 @@ -101,6 +101,7 @@ generatedFiles: - docs/models/detailedjoboutstatus.md - docs/models/document.md - docs/models/documenturlchunk.md + - docs/models/documenturlchunktype.md - docs/models/embeddingrequest.md - docs/models/embeddingresponse.md - docs/models/embeddingresponsedata.md @@ -238,7 +239,7 @@ generatedFiles: - poetry.toml - py.typed - pylintrc - - scripts/prepare-readme.py + - scripts/prepare_readme.py - scripts/publish.sh - src/mistralai/__init__.py - src/mistralai/_hooks/__init__.py @@ -448,7 +449,7 @@ examples: files_api_routes_upload_file: speakeasy-default-files-api-routes-upload-file: requestBody: - multipart/form-data: {"file": {"": "x-file: example.file"}} + multipart/form-data: {"file": {}} responses: "200": application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "batch_request", "source": "repository"} @@ -504,14 +505,14 @@ examples: created_by_me: false responses: "200": - application/json: {"total": 768578} + application/json: {"object": "list", "total": 768578} jobs_api_routes_fine_tuning_create_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-create-fine-tuning-job: requestBody: application/json: {"model": "Fiesta", "hyperparameters": {"learning_rate": 0.0001}} responses: "200": - application/json: {"expected_duration_seconds": 220, "cost": 10, "cost_currency": "EUR", "train_tokens_per_step": 131072, "train_tokens": 1310720, "data_tokens": 305375, "deprecated": true, "details": "", "epochs": 4.2922, "training_steps": 10} + application/json: {"expected_duration_seconds": 220, "cost": 10, "cost_currency": "EUR", "train_tokens_per_step": 131072, "train_tokens": 1310720, "data_tokens": 305375, "deprecated": true, "details": "", "epochs": 4.2922, "training_steps": 10, "object": "job.metadata"} jobs_api_routes_fine_tuning_get_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-job: parameters: @@ -519,7 +520,7 @@ examples: job_id: "b888f774-3e7c-4135-a18c-6b985523c4bc" responses: "200": - application/json: {"id": "e50f7622-81da-484b-9c66-1c8a99c6b71b", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "Model Y", "status": "CANCELLED", "job_type": "", "created_at": 415305, "modified_at": 149108, "training_files": ["8f7112aa-f0ab-44e4-83b4-cca3716f6208", "7aa1f8cf-05d8-49d5-88ee-381f6b4b885c"], "checkpoints": [{"metrics": {}, "step_number": 856562, "created_at": 1716963433}, {"metrics": {}, "step_number": 328633, "created_at": 1716963433}]} + application/json: {"id": "e50f7622-81da-484b-9c66-1c8a99c6b71b", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "Model Y", "status": "CANCELLED", "job_type": "", "created_at": 415305, "modified_at": 149108, "training_files": ["8f7112aa-f0ab-44e4-83b4-cca3716f6208", "7aa1f8cf-05d8-49d5-88ee-381f6b4b885c"], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 856562, "created_at": 1716963433}, {"metrics": {}, "step_number": 328633, "created_at": 1716963433}]} jobs_api_routes_fine_tuning_cancel_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-cancel-fine-tuning-job: parameters: @@ -527,7 +528,7 @@ examples: job_id: "0f713502-9233-41c6-9ebd-c570b7edb496" responses: "200": - application/json: {"id": "d50fbe4e-3e32-4613-8574-4d82f3fd6b3c", "auto_start": true, "hyperparameters": {"learning_rate": 0.0001}, "model": "Taurus", "status": "SUCCESS", "job_type": "", "created_at": 251316, "modified_at": 342605, "training_files": ["247ac10c-dc31-412f-a7cc-924123024afa", "0f84bd49-4511-4689-9d07-a64aa724280b", "200aa489-3801-4d6e-a454-eb14cac448cb"], "checkpoints": [{"metrics": {}, "step_number": 949854, "created_at": 1716963433}, {"metrics": {}, "step_number": 516599, "created_at": 1716963433}, {"metrics": {}, "step_number": 439590, "created_at": 1716963433}]} + application/json: {"id": "d50fbe4e-3e32-4613-8574-4d82f3fd6b3c", "auto_start": true, "hyperparameters": {"learning_rate": 0.0001}, "model": "Taurus", "status": "SUCCESS", "job_type": "", "created_at": 251316, "modified_at": 342605, "training_files": ["247ac10c-dc31-412f-a7cc-924123024afa", "0f84bd49-4511-4689-9d07-a64aa724280b", "200aa489-3801-4d6e-a454-eb14cac448cb"], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 949854, "created_at": 1716963433}, {"metrics": {}, "step_number": 516599, "created_at": 1716963433}, {"metrics": {}, "step_number": 439590, "created_at": 1716963433}]} jobs_api_routes_fine_tuning_start_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-start-fine-tuning-job: parameters: @@ -535,7 +536,7 @@ examples: job_id: "0bf0f9e6-c3e5-4d61-aac8-0e36dcac0dfc" responses: "200": - application/json: {"id": "b676fe58-2c47-483e-831e-c71dbed4c90a", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "A4", "status": "CANCELLED", "job_type": "", "created_at": 874397, "modified_at": 483387, "training_files": [], "checkpoints": [{"metrics": {}, "step_number": 331375, "created_at": 1716963433}, {"metrics": {}, "step_number": 590686, "created_at": 1716963433}, {"metrics": {}, "step_number": 543177, "created_at": 1716963433}]} + application/json: {"id": "b676fe58-2c47-483e-831e-c71dbed4c90a", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "A4", "status": "CANCELLED", "job_type": "", "created_at": 874397, "modified_at": 483387, "training_files": [], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 331375, "created_at": 1716963433}, {"metrics": {}, "step_number": 590686, "created_at": 1716963433}, {"metrics": {}, "step_number": 543177, "created_at": 1716963433}]} jobs_api_routes_batch_get_batch_jobs: speakeasy-default-jobs-api-routes-batch-get-batch-jobs: parameters: @@ -545,14 +546,14 @@ examples: created_by_me: false responses: "200": - application/json: {"total": 768578} + application/json: {"object": "list", "total": 768578} jobs_api_routes_batch_create_batch_job: speakeasy-default-jobs-api-routes-batch-create-batch-job: requestBody: application/json: {"input_files": ["a621cf02-1cd9-4cf5-8403-315211a509a3"], "endpoint": "/v1/fim/completions", "model": "2", "timeout_hours": 24} responses: "200": - application/json: {"id": "", "input_files": ["8e774c2b-ecc3-4769-b177-5e024985613d", "0ee803d5-6a1d-4f94-836b-fd39494798bc"], "endpoint": "", "model": "Impala", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}, {"message": "", "count": 1}], "status": "RUNNING", "created_at": 770370, "total_requests": 350586, "completed_requests": 95214, "succeeded_requests": 930830, "failed_requests": 617761} + application/json: {"id": "", "object": "batch", "input_files": ["8e774c2b-ecc3-4769-b177-5e024985613d", "0ee803d5-6a1d-4f94-836b-fd39494798bc"], "endpoint": "", "model": "Impala", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}, {"message": "", "count": 1}], "status": "RUNNING", "created_at": 770370, "total_requests": 350586, "completed_requests": 95214, "succeeded_requests": 930830, "failed_requests": 617761} jobs_api_routes_batch_get_batch_job: speakeasy-default-jobs-api-routes-batch-get-batch-job: parameters: @@ -560,7 +561,7 @@ examples: job_id: "b888f774-3e7c-4135-a18c-6b985523c4bc" responses: "200": - application/json: {"id": "", "input_files": ["50f76228-1da8-44bc-b661-c8a99c6b71b6", "cd62b8f7-112a-4af0-bab4-e43b4cca3716", "620807aa-1f8c-4f05-ad89-d58ee381f6b4"], "endpoint": "", "model": "Golf", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "SUCCESS", "created_at": 790898, "total_requests": 55097, "completed_requests": 578320, "succeeded_requests": 856562, "failed_requests": 328633} + application/json: {"id": "", "object": "batch", "input_files": ["50f76228-1da8-44bc-b661-c8a99c6b71b6", "cd62b8f7-112a-4af0-bab4-e43b4cca3716", "620807aa-1f8c-4f05-ad89-d58ee381f6b4"], "endpoint": "", "model": "Golf", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "SUCCESS", "created_at": 790898, "total_requests": 55097, "completed_requests": 578320, "succeeded_requests": 856562, "failed_requests": 328633} jobs_api_routes_batch_cancel_batch_job: speakeasy-default-jobs-api-routes-batch-cancel-batch-job: parameters: @@ -568,7 +569,7 @@ examples: job_id: "0f713502-9233-41c6-9ebd-c570b7edb496" responses: "200": - application/json: {"id": "", "input_files": ["50fbe4e3-e326-4135-8744-d82f3fd6b3c1", "eb45e247-ac10-4cdc-8311-2f7cc9241230", "4afaa0f8-4bd4-4945-9116-89d07a64aa72"], "endpoint": "", "model": "Alpine", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "QUEUED", "created_at": 709109, "total_requests": 275794, "completed_requests": 158938, "succeeded_requests": 12381, "failed_requests": 11864} + application/json: {"id": "", "object": "batch", "input_files": ["50fbe4e3-e326-4135-8744-d82f3fd6b3c1", "eb45e247-ac10-4cdc-8311-2f7cc9241230", "4afaa0f8-4bd4-4945-9116-89d07a64aa72"], "endpoint": "", "model": "Alpine", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "QUEUED", "created_at": 709109, "total_requests": 275794, "completed_requests": 158938, "succeeded_requests": 12381, "failed_requests": 11864} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: @@ -650,7 +651,7 @@ examples: ocr_v1_ocr_post: speakeasy-default-ocr-v1-ocr-post: requestBody: - application/json: {"model": "Focus", "document": {"document_url": "https://dutiful-horst.org"}} + application/json: {"model": "Focus", "document": {"document_url": "https://dutiful-horst.org", "type": "document_url"}} responses: "200": application/json: {"pages": [], "model": "A4", "usage_info": {"pages_processed": 442675}} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index f020895b..666982eb 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -7,13 +7,15 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true + nameResolutionFeb2025: false parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true + securityFeb2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.5.1 + version: 1.5.2-rc.1 additionalDependencies: dev: pytest: ^8.2.2 @@ -44,5 +46,6 @@ python: methodArguments: infer-optional-args outputModelSuffix: output packageName: mistralai + pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 21228dc5..ae45ec0e 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,49 +1,49 @@ -speakeasyVersion: 1.477.0 +speakeasyVersion: 1.517.3 sources: mistral-azure-source: - sourceNamespace: mistral-azure-source - sourceRevisionDigest: sha256:ff181b1e0e3894a4925f7ae87415323058538a13bae9d9d508a8fe3b6ec0e333 - sourceBlobDigest: sha256:a2b4fd69298ebb9adb0d3c8dfb452db52defac512a7532203eebffb6a252df76 + sourceNamespace: mistral-openapi-azure + sourceRevisionDigest: sha256:b9be39effd24c50514ea00965c7b7089b6ae09d7aaacfb5f9eeafe465f131a62 + sourceBlobDigest: sha256:38505cbdf426ed228e4cce7667721237ddb32f72fb7df8f26c289082a568d7cb tags: - latest mistral-google-cloud-source: - sourceNamespace: mistral-google-cloud-source - sourceRevisionDigest: sha256:36c7de11e35023dc8fa5f3c0fb0e486d2a102275a2df808c08cfe9d43089be04 - sourceBlobDigest: sha256:dd65bfa5d0448ad1851ebb18b57aa675533cd3e166beb86a390b0ab51d16a1c1 + sourceNamespace: mistral-openapi-google-cloud + sourceRevisionDigest: sha256:f4b8b9311a39f5c62deaed92c473eff44f139d784f122fa3c9e41d5236c96cd7 + sourceBlobDigest: sha256:cd5c242a598ba671e83fc7572ce7def4486cba035d2729d61cf7c64189a6fd89 tags: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:bdfe3bd4e867529e1821e0f195c2d5832083f7699315f4a42d6b5551bd7847a3 - sourceBlobDigest: sha256:7e8a475b75404d724fc7936bd6f585b8e5226d3dca00ab4b69807b53fb63151b + sourceRevisionDigest: sha256:8655bba5635f9f9bc3aa94690c26d6124c778e03478786074288cd63414a7a84 + sourceBlobDigest: sha256:cd9280b2b089ef5e0b06ba94ed2736b928f7e4e542d04f408df84e6912049ba6 tags: - latest - - speakeasy-sdk-regen-1741279153 + - speakeasy-sdk-regen-1741964260 targets: mistralai-azure-sdk: source: mistral-azure-source - sourceNamespace: mistral-azure-source - sourceRevisionDigest: sha256:ff181b1e0e3894a4925f7ae87415323058538a13bae9d9d508a8fe3b6ec0e333 - sourceBlobDigest: sha256:a2b4fd69298ebb9adb0d3c8dfb452db52defac512a7532203eebffb6a252df76 + sourceNamespace: mistral-openapi-azure + sourceRevisionDigest: sha256:b9be39effd24c50514ea00965c7b7089b6ae09d7aaacfb5f9eeafe465f131a62 + sourceBlobDigest: sha256:38505cbdf426ed228e4cce7667721237ddb32f72fb7df8f26c289082a568d7cb codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:28356dba7ea28436035e20182b8ce4d1951e19503b5accef6a128d860361e5c0 + codeSamplesRevisionDigest: sha256:6a21f785e0bc1861ae9bf237939c6252d4589f4b5ece596938bad54b3f5c1ac9 mistralai-gcp-sdk: source: mistral-google-cloud-source - sourceNamespace: mistral-google-cloud-source - sourceRevisionDigest: sha256:36c7de11e35023dc8fa5f3c0fb0e486d2a102275a2df808c08cfe9d43089be04 - sourceBlobDigest: sha256:dd65bfa5d0448ad1851ebb18b57aa675533cd3e166beb86a390b0ab51d16a1c1 + sourceNamespace: mistral-openapi-google-cloud + sourceRevisionDigest: sha256:f4b8b9311a39f5c62deaed92c473eff44f139d784f122fa3c9e41d5236c96cd7 + sourceBlobDigest: sha256:cd5c242a598ba671e83fc7572ce7def4486cba035d2729d61cf7c64189a6fd89 codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:7de23f90d6543356f310f46375bef4db7f43eb22b2871ad4dfe1b7d0cc875bb4 + codeSamplesRevisionDigest: sha256:a0d0890a8e87eac8ade9832883c7a129a749142696e01b1e611cf2d97fbeed9d mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:bdfe3bd4e867529e1821e0f195c2d5832083f7699315f4a42d6b5551bd7847a3 - sourceBlobDigest: sha256:7e8a475b75404d724fc7936bd6f585b8e5226d3dca00ab4b69807b53fb63151b + sourceRevisionDigest: sha256:8655bba5635f9f9bc3aa94690c26d6124c778e03478786074288cd63414a7a84 + sourceBlobDigest: sha256:cd9280b2b089ef5e0b06ba94ed2736b928f7e4e542d04f408df84e6912049ba6 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:ba10be893f3e6dae275eb8fb09a688f3652de81eebd314427f28c274800edc48 + codeSamplesRevisionDigest: sha256:2a2f61e5c5e1eaad48e6a74164bd6249855c3ad7976ef83068199d57ebcdd055 workflow: workflowVersion: 1.0.0 - speakeasyVersion: 1.477.0 + speakeasyVersion: 1.517.3 sources: mistral-azure-source: inputs: diff --git a/README.md b/README.md index fd31bcd8..fd17da18 100644 --- a/README.md +++ b/README.md @@ -75,6 +75,37 @@ pip install mistralai ```bash poetry add mistralai ``` + +### Shell and script usage with `uv` + +You can use this SDK in a Python shell with [uv](https://docs.astral.sh/uv/) and the `uvx` command that comes with it like so: + +```shell +uvx --from mistralai python +``` + +It's also possible to write a standalone Python script without needing to set up a whole project like so: + +```python +#!/usr/bin/env -S uv run --script +# /// script +# requires-python = ">=3.9" +# dependencies = [ +# "mistralai", +# ] +# /// + +from mistralai import Mistral + +sdk = Mistral( + # SDK arguments +) + +# Rest of script here... +``` + +Once that is saved to a file, you can run it with `uv run script.py` where +`script.py` can be replaced with the actual file name. @@ -89,6 +120,7 @@ This example shows how to create chat completions. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -98,7 +130,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ]) # Handle response print(res) @@ -114,6 +146,7 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -123,7 +156,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ]) # Handle response print(res) @@ -140,6 +173,7 @@ This example shows how to upload a file. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -163,6 +197,7 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -187,6 +222,7 @@ This example shows how to create agents completions. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -196,7 +232,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="") # Handle response print(res) @@ -212,6 +248,7 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -221,7 +258,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="") # Handle response print(res) @@ -238,14 +275,15 @@ This example shows how to create embedding request. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.embeddings.create(inputs=[ + res = mistral.embeddings.create(model="mistral-embed", inputs=[ "Embed this sentence.", "As well as this one.", - ], model="mistral-embed") + ]) # Handle response print(res) @@ -261,14 +299,15 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = await mistral.embeddings.create_async(inputs=[ + res = await mistral.embeddings.create_async(model="mistral-embed", inputs=[ "Embed this sentence.", "As well as this one.", - ], model="mistral-embed") + ]) # Handle response print(res) @@ -465,6 +504,7 @@ underlying connection when the context is exited. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -474,7 +514,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=True) + ]) with res as event_stream: for event in event_stream: @@ -502,6 +542,7 @@ Certain SDK methods accept file objects as part of a request body or multi-part from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -528,6 +569,7 @@ from mistralai import Mistral from mistralai.utils import BackoffStrategy, RetryConfig import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -546,6 +588,7 @@ from mistralai import Mistral from mistralai.utils import BackoffStrategy, RetryConfig import os + with Mistral( retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), api_key=os.getenv("MISTRAL_API_KEY", ""), @@ -586,6 +629,7 @@ When custom error responses are specified for an operation, the SDK may also rai from mistralai import Mistral, models import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -613,9 +657,9 @@ with Mistral( You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: -| Name | Server | -| ---- | ------------------------ | -| `eu` | `https://api.mistral.ai` | +| Name | Server | Description | +| ---- | ------------------------ | -------------------- | +| `eu` | `https://api.mistral.ai` | EU Production server | #### Example @@ -623,6 +667,7 @@ You can override the default server globally by passing a server name to the `se from mistralai import Mistral import os + with Mistral( server="eu", api_key=os.getenv("MISTRAL_API_KEY", ""), @@ -642,6 +687,7 @@ The default server can also be overridden globally by passing a URL to the `serv from mistralai import Mistral import os + with Mistral( server_url="https://api.mistral.ai", api_key=os.getenv("MISTRAL_API_KEY", ""), @@ -752,6 +798,7 @@ To authenticate with the API the `api_key` parameter must be set when initializi from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -775,6 +822,7 @@ The `Mistral` class implements the context manager protocol and registers a fina from mistralai import Mistral import os def main(): + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -783,6 +831,7 @@ def main(): # Or when using async: async def amain(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/RELEASES.md b/RELEASES.md index d7b657bb..0ad3429c 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -168,4 +168,14 @@ Based on: ### Generated - [python v1.5.1] . ### Releases -- [PyPI v1.5.1] https://pypi.org/project/mistralai/1.5.1 - . \ No newline at end of file +- [PyPI v1.5.1] https://pypi.org/project/mistralai/1.5.1 - . + +## 2025-03-14 15:08:57 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.5.2-rc.1] . +### Releases +- [PyPI v1.5.2-rc.1] https://pypi.org/project/mistralai/1.5.2-rc.1 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index 3e1cae03..fa3a77de 100644 --- a/USAGE.md +++ b/USAGE.md @@ -8,6 +8,7 @@ This example shows how to create chat completions. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -17,7 +18,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ]) # Handle response print(res) @@ -33,6 +34,7 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -42,7 +44,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ]) # Handle response print(res) @@ -59,6 +61,7 @@ This example shows how to upload a file. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -82,6 +85,7 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -106,6 +110,7 @@ This example shows how to create agents completions. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -115,7 +120,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="") # Handle response print(res) @@ -131,6 +136,7 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -140,7 +146,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="") # Handle response print(res) @@ -157,14 +163,15 @@ This example shows how to create embedding request. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.embeddings.create(inputs=[ + res = mistral.embeddings.create(model="mistral-embed", inputs=[ "Embed this sentence.", "As well as this one.", - ], model="mistral-embed") + ]) # Handle response print(res) @@ -180,14 +187,15 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = await mistral.embeddings.create_async(inputs=[ + res = await mistral.embeddings.create_async(model="mistral-embed", inputs=[ "Embed this sentence.", "As well as this one.", - ], model="mistral-embed") + ]) # Handle response print(res) diff --git a/docs/models/documenturlchunk.md b/docs/models/documenturlchunk.md index 33785c34..6c9a5b4d 100644 --- a/docs/models/documenturlchunk.md +++ b/docs/models/documenturlchunk.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | -| `document_url` | *str* | :heavy_check_mark: | N/A | -| `type` | *Optional[Literal["document_url"]]* | :heavy_minus_sign: | N/A | -| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `document_url` | *str* | :heavy_check_mark: | N/A | +| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | +| `type` | [Optional[models.DocumentURLChunkType]](../models/documenturlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/documenturlchunktype.md b/docs/models/documenturlchunktype.md new file mode 100644 index 00000000..32e1fa9e --- /dev/null +++ b/docs/models/documenturlchunktype.md @@ -0,0 +1,8 @@ +# DocumentURLChunkType + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `DOCUMENT_URL` | document_url | \ No newline at end of file diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md index 07ab903a..3bdd79e8 100644 --- a/docs/models/embeddingrequest.md +++ b/docs/models/embeddingrequest.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | Example | | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | -| `model` | *Optional[str]* | :heavy_minus_sign: | ID of the model to use. | | \ No newline at end of file +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | +| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | \ No newline at end of file diff --git a/docs/models/filepurpose.md b/docs/models/filepurpose.md index 5152aeeb..14cab13e 100644 --- a/docs/models/filepurpose.md +++ b/docs/models/filepurpose.md @@ -6,4 +6,5 @@ | Name | Value | | ----------- | ----------- | | `FINE_TUNE` | fine-tune | -| `BATCH` | batch | \ No newline at end of file +| `BATCH` | batch | +| `OCR` | ocr | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 00ca33ac..1e6f9069 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -20,6 +20,7 @@ Agents Completion from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -29,7 +30,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="") # Handle response print(res) @@ -76,6 +77,7 @@ Mistral AI provides the ability to stream responses back to a client in order to from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -85,7 +87,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=True) + ], agent_id="") with res as event_stream: for event in event_stream: diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 38e16adc..8cac6db4 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -20,6 +20,7 @@ Chat Completion from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -29,7 +30,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ]) # Handle response print(res) @@ -79,6 +80,7 @@ Mistral AI provides the ability to stream responses back to a client in order to from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -88,7 +90,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=True) + ]) with res as event_stream: for event in event_stream: diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 6bcc68a9..6c7127af 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -20,6 +20,7 @@ Moderations from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -62,6 +63,7 @@ Moderations Chat from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -95,7 +97,7 @@ with Mistral( "role": "assistant", }, ], - ], truncate_for_context_length=False) + ]) # Handle response print(res) diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 44fae4ac..8c386439 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -19,14 +19,15 @@ Embeddings from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.embeddings.create(inputs=[ + res = mistral.embeddings.create(model="mistral-embed", inputs=[ "Embed this sentence.", "As well as this one.", - ], model="mistral-embed") + ]) # Handle response print(res) @@ -37,8 +38,8 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | | `inputs` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | -| `model` | *Optional[str]* | :heavy_minus_sign: | ID of the model to use. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index 8f01a668..befa4d67 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -28,6 +28,7 @@ Please contact us if you need to increase these storage limits. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -70,11 +71,12 @@ Returns a list of files that belong to the user's organization. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.list(page=0, page_size=100) + res = mistral.files.list() # Handle response print(res) @@ -113,6 +115,7 @@ Returns information about a specific file. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -151,6 +154,7 @@ Delete a file. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -189,6 +193,7 @@ Download a file from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -227,11 +232,12 @@ Get Signed Url from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.get_signed_url(file_id="", expiry=24) + res = mistral.files.get_signed_url(file_id="") # Handle response print(res) diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index 28de6c02..c70b3da4 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -20,11 +20,12 @@ FIM completion. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fim.complete(model="codestral-2405", prompt="def", top_p=1, stream=False, suffix="return a+b") + res = mistral.fim.complete(model="codestral-2405", prompt="def", suffix="return a+b") # Handle response print(res) @@ -68,11 +69,12 @@ Mistral AI provides the ability to stream responses back to a client in order to from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fim.stream(model="codestral-2405", prompt="def", top_p=1, stream=True, suffix="return a+b") + res = mistral.fim.stream(model="codestral-2405", prompt="def", suffix="return a+b") with res as event_stream: for event in event_stream: diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index 06605877..ecb11def 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -21,11 +21,12 @@ Get a list of fine-tuning jobs for your organization and user. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.list(page=0, page_size=100, created_by_me=False) + res = mistral.fine_tuning.jobs.list() # Handle response print(res) @@ -67,13 +68,12 @@ Create a new fine-tuning job, it will be queued for processing. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.create(model="Fiesta", hyperparameters={ - "learning_rate": 0.0001, - }) + res = mistral.fine_tuning.jobs.create(model="Fiesta", hyperparameters={}) # Handle response print(res) @@ -114,6 +114,7 @@ Get a fine-tuned job details by its UUID. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -152,6 +153,7 @@ Request the cancellation of a fine tuning job. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -190,6 +192,7 @@ Request the start of a validated fine tuning job. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index 56a7f60b..5b80a45b 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -20,11 +20,12 @@ Get a list of batch jobs for your organization and user. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.list(page=0, page_size=100, created_by_me=False) + res = mistral.batch.jobs.list() # Handle response print(res) @@ -64,13 +65,14 @@ Create a new batch job, it will be queued for processing. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: res = mistral.batch.jobs.create(input_files=[ "a621cf02-1cd9-4cf5-8403-315211a509a3", - ], endpoint="/v1/fim/completions", model="2", timeout_hours=24) + ], endpoint="/v1/fim/completions", model="2") # Handle response print(res) @@ -108,6 +110,7 @@ Get a batch job details by its UUID. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -146,6 +149,7 @@ Request the cancellation of a batch job. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index e048d20e..dd7baf50 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -24,6 +24,7 @@ List all models available to the user. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -62,6 +63,7 @@ Retrieve a model information. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -101,6 +103,7 @@ Delete a fine-tuned model. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -140,6 +143,7 @@ Update a model name or description. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -180,6 +184,7 @@ Archive a fine-tuned model. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -218,6 +223,7 @@ Un-archive a fine-tuned model. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md index 54f8af96..61988ea6 100644 --- a/docs/sdks/ocr/README.md +++ b/docs/sdks/ocr/README.md @@ -19,6 +19,7 @@ OCR from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index 16a5196b..8b8ef6ae 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,34 +1,34 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: 4da7c33f650ddf206c58fa6c941d347f + docChecksum: 3b99cf44527c23ce3298616720b00a62 docVersion: 0.0.2 - speakeasyVersion: 1.462.2 - generationVersion: 2.486.1 + speakeasyVersion: 1.517.3 + generationVersion: 2.548.6 releaseVersion: 1.2.6 - configChecksum: cc2ac1769a87215774fce0075ff2e77d + configChecksum: 1a623455d46169b8a271df9cd9d58d86 published: true features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.7.4 + core: 5.12.3 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 - examples: 3.0.0 + examples: 3.0.1 flatRequests: 1.0.1 - globalSecurity: 3.0.2 + globalSecurity: 3.0.3 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.1.0 methodArguments: 1.0.2 nameOverrides: 3.0.1 - nullables: 1.0.0 + nullables: 1.0.1 openEnums: 1.0.0 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.0 + sdkHooks: 1.0.1 serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 @@ -61,8 +61,10 @@ generatedFiles: - docs/models/functioncall.md - docs/models/functionname.md - docs/models/httpvalidationerror.md + - docs/models/jsonschema.md - docs/models/loc.md - docs/models/messages.md + - docs/models/prediction.md - docs/models/referencechunk.md - docs/models/referencechunktype.md - docs/models/responseformat.md @@ -92,7 +94,7 @@ generatedFiles: - py.typed - pylintrc - pyproject.toml - - scripts/prepare-readme.py + - scripts/prepare_readme.py - scripts/publish.sh - src/mistralai_azure/__init__.py - src/mistralai_azure/_hooks/__init__.py @@ -117,6 +119,8 @@ generatedFiles: - src/mistralai_azure/models/functioncall.py - src/mistralai_azure/models/functionname.py - src/mistralai_azure/models/httpvalidationerror.py + - src/mistralai_azure/models/jsonschema.py + - src/mistralai_azure/models/prediction.py - src/mistralai_azure/models/referencechunk.py - src/mistralai_azure/models/responseformat.py - src/mistralai_azure/models/responseformats.py @@ -134,6 +138,7 @@ generatedFiles: - src/mistralai_azure/models/usermessage.py - src/mistralai_azure/models/validationerror.py - src/mistralai_azure/py.typed + - src/mistralai_azure/sdk.py - src/mistralai_azure/sdkconfiguration.py - src/mistralai_azure/types/__init__.py - src/mistralai_azure/types/basemodel.py @@ -156,17 +161,19 @@ examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "azureai", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "azureai", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "422": application/json: {} + "200": {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "azureai", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "azureai", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} "422": application/json: {} +examplesVersion: 1.0.0 generatedTests: {} diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index 17344d9b..04ed562b 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -7,8 +7,10 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true + nameResolutionFeb2025: false parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true + securityFeb2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false @@ -23,6 +25,7 @@ python: clientServerStatusCodesAsErrors: true defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in Azure. + enableCustomCodeRegions: false enumFormat: union fixFlags: responseRequiredSep2024: false @@ -42,5 +45,6 @@ python: methodArguments: infer-optional-args outputModelSuffix: output packageName: mistralai_azure + pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/packages/mistralai_azure/docs/models/assistantmessage.md b/packages/mistralai_azure/docs/models/assistantmessage.md index 53f1cc76..3d0bd90b 100644 --- a/packages/mistralai_azure/docs/models/assistantmessage.md +++ b/packages/mistralai_azure/docs/models/assistantmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequest.md b/packages/mistralai_azure/docs/models/chatcompletionrequest.md index 68cef4a1..eb43a4da 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionrequest.md @@ -6,7 +6,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | @@ -19,4 +19,5 @@ | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md index c9c5c87b..78442736 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md @@ -6,7 +6,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | @@ -19,4 +19,5 @@ | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/function.md b/packages/mistralai_azure/docs/models/function.md index 8af398f5..a166b7bb 100644 --- a/packages/mistralai_azure/docs/models/function.md +++ b/packages/mistralai_azure/docs/models/function.md @@ -7,4 +7,5 @@ | ------------------ | ------------------ | ------------------ | ------------------ | | `name` | *str* | :heavy_check_mark: | N/A | | `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | -| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/jsonschema.md b/packages/mistralai_azure/docs/models/jsonschema.md new file mode 100644 index 00000000..ae387867 --- /dev/null +++ b/packages/mistralai_azure/docs/models/jsonschema.md @@ -0,0 +1,11 @@ +# JSONSchema + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/prediction.md b/packages/mistralai_azure/docs/models/prediction.md new file mode 100644 index 00000000..86e9c396 --- /dev/null +++ b/packages/mistralai_azure/docs/models/prediction.md @@ -0,0 +1,9 @@ +# Prediction + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | +| `type` | *Optional[Literal["content"]]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/responseformat.md b/packages/mistralai_azure/docs/models/responseformat.md index 9c627f55..23a1641b 100644 --- a/packages/mistralai_azure/docs/models/responseformat.md +++ b/packages/mistralai_azure/docs/models/responseformat.md @@ -5,4 +5,5 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | \ No newline at end of file +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/responseformats.md b/packages/mistralai_azure/docs/models/responseformats.md index ce35fbb3..06886afe 100644 --- a/packages/mistralai_azure/docs/models/responseformats.md +++ b/packages/mistralai_azure/docs/models/responseformats.md @@ -8,4 +8,5 @@ An object specifying the format that the model must output. Setting to `{ "type" | Name | Value | | ------------- | ------------- | | `TEXT` | text | -| `JSON_OBJECT` | json_object | \ No newline at end of file +| `JSON_OBJECT` | json_object | +| `JSON_SCHEMA` | json_schema | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolcall.md b/packages/mistralai_azure/docs/models/toolcall.md index 7aca5fc9..574be1ea 100644 --- a/packages/mistralai_azure/docs/models/toolcall.md +++ b/packages/mistralai_azure/docs/models/toolcall.md @@ -7,4 +7,5 @@ | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | | `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/pylintrc b/packages/mistralai_azure/pylintrc index 393d0f70..266bc815 100644 --- a/packages/mistralai_azure/pylintrc +++ b/packages/mistralai_azure/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.8 +py-version=3.9 # Discover python modules and packages in the file system subtree. recursive=no @@ -455,7 +455,10 @@ disable=raw-checker-failed, bare-except, broad-exception-caught, fixme, - relative-beyond-top-level + relative-beyond-top-level, + consider-using-with, + wildcard-import, + unused-wildcard-import # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index 5c227f66..bf120e67 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -1,9 +1,19 @@ -[tool.poetry] +[project] name = "mistralai_azure" version = "1.2.6" description = "Python Client SDK for the Mistral AI API in Azure." -authors = ["Mistral",] +authors = [{ name = "Mistral" },] readme = "README-PYPI.md" +requires-python = ">=3.9" +dependencies = [ + "eval-type-backport >=0.2.0", + "httpx >=0.28.1", + "pydantic >=2.10.3", + "python-dateutil >=2.8.2", + "typing-inspection >=0.4.0", +] + +[tool.poetry] packages = [ { include = "mistralai_azure", from = "src" } ] @@ -15,17 +25,8 @@ include = ["py.typed", "src/mistralai_azure/py.typed"] [virtualenvs] in-project = true -[tool.poetry.dependencies] -python = "^3.8" -eval-type-backport = "^0.2.0" -httpx = "^0.28.1" -jsonpath-python = "^1.0.6" -pydantic = "~2.10.3" -python-dateutil = "^2.8.2" -typing-inspect = "^0.9.0" - [tool.poetry.group.dev.dependencies] -mypy = "==1.13.0" +mypy = "==1.14.1" pylint = "==3.2.3" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" @@ -36,6 +37,7 @@ requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.pytest.ini_options] +asyncio_default_fixture_loop_scope = "function" pythonpath = ["src"] [tool.mypy] diff --git a/packages/mistralai_azure/scripts/prepare-readme.py b/packages/mistralai_azure/scripts/prepare_readme.py similarity index 100% rename from packages/mistralai_azure/scripts/prepare-readme.py rename to packages/mistralai_azure/scripts/prepare_readme.py diff --git a/packages/mistralai_azure/scripts/publish.sh b/packages/mistralai_azure/scripts/publish.sh index ab45b1f9..f2f2cf2c 100755 --- a/packages/mistralai_azure/scripts/publish.sh +++ b/packages/mistralai_azure/scripts/publish.sh @@ -2,6 +2,6 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} -poetry run python scripts/prepare-readme.py +poetry run python scripts/prepare_readme.py poetry publish --build --skip-existing diff --git a/packages/mistralai_azure/src/mistralai_azure/__init__.py b/packages/mistralai_azure/src/mistralai_azure/__init__.py index a1b7f626..dd02e42e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/__init__.py @@ -1,9 +1,18 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from ._version import __title__, __version__ +from ._version import ( + __title__, + __version__, + __openapi_doc_version__, + __gen_version__, + __user_agent__, +) from .sdk import * from .sdkconfiguration import * from .models import * VERSION: str = __version__ +OPENAPI_DOC_VERSION = __openapi_doc_version__ +SPEAKEASY_GENERATOR_VERSION = __gen_version__ +USER_AGENT = __user_agent__ diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py index 5e34da26..297dfa2f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py @@ -7,16 +7,19 @@ class HookContext: + base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None def __init__( self, + base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): + self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -25,21 +28,30 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) diff --git a/packages/mistralai_azure/src/mistralai_azure/_version.py b/packages/mistralai_azure/src/mistralai_azure/_version.py index c7215b89..23e2d1c2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_version.py +++ b/packages/mistralai_azure/src/mistralai_azure/_version.py @@ -4,6 +4,9 @@ __title__: str = "mistralai_azure" __version__: str = "1.2.6" +__openapi_doc_version__: str = "0.0.2" +__gen_version__: str = "2.548.6" +__user_agent__: str = "speakeasy-sdk/python 1.2.6 2.548.6 0.0.2 mistralai_azure" try: if __package__ is not None: diff --git a/packages/mistralai_azure/src/mistralai_azure/basesdk.py b/packages/mistralai_azure/src/mistralai_azure/basesdk.py index 05c100d4..24e4935e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/basesdk.py +++ b/packages/mistralai_azure/src/mistralai_azure/basesdk.py @@ -231,6 +231,10 @@ def do(): req.headers, get_body_content(req), ) + + if client is None: + raise ValueError("client is required") + http_res = client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( @@ -303,6 +307,10 @@ async def do(): req.headers, get_body_content(req), ) + + if client is None: + raise ValueError("client is required") + http_res = await client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/mistralai_azure/src/mistralai_azure/chat.py index 0ed464ba..6f126a4b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/chat.py +++ b/packages/mistralai_azure/src/mistralai_azure/chat.py @@ -15,7 +15,7 @@ def stream( self, *, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - model: OptionalNullable[str] = "azureai", + model: Optional[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, @@ -37,6 +37,9 @@ def stream( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -61,6 +64,7 @@ def stream( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -74,6 +78,8 @@ def stream( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -94,6 +100,9 @@ def stream( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), safe_prompt=safe_prompt, ) @@ -126,6 +135,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -136,7 +146,7 @@ def stream( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -145,9 +155,16 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -166,7 +183,7 @@ async def stream_async( self, *, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - model: OptionalNullable[str] = "azureai", + model: Optional[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, @@ -188,6 +205,9 @@ async def stream_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -212,6 +232,7 @@ async def stream_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -225,6 +246,8 @@ async def stream_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -245,6 +268,9 @@ async def stream_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), safe_prompt=safe_prompt, ) @@ -277,6 +303,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -287,7 +314,7 @@ async def stream_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -296,9 +323,16 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -320,7 +354,7 @@ def complete( List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], ], - model: OptionalNullable[str] = "azureai", + model: Optional[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, @@ -347,6 +381,9 @@ def complete( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -369,6 +406,7 @@ def complete( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -382,6 +420,8 @@ def complete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -404,6 +444,9 @@ def complete( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), safe_prompt=safe_prompt, ) @@ -436,6 +479,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -445,15 +489,22 @@ def complete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.ChatCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -475,7 +526,7 @@ async def complete_async( List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], ], - model: OptionalNullable[str] = "azureai", + model: Optional[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, @@ -502,6 +553,9 @@ async def complete_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -524,6 +578,7 @@ async def complete_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -537,6 +592,8 @@ async def complete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -559,6 +616,9 @@ async def complete_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), safe_prompt=safe_prompt, ) @@ -591,6 +651,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -600,15 +661,22 @@ async def complete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.ChatCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/packages/mistralai_azure/src/mistralai_azure/httpclient.py b/packages/mistralai_azure/src/mistralai_azure/httpclient.py index 167cea4e..1e426352 100644 --- a/packages/mistralai_azure/src/mistralai_azure/httpclient.py +++ b/packages/mistralai_azure/src/mistralai_azure/httpclient.py @@ -1,6 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" # pyright: reportReturnType = false +import asyncio +from concurrent.futures import ThreadPoolExecutor from typing_extensions import Protocol, runtime_checkable import httpx from typing import Any, Optional, Union @@ -82,3 +84,53 @@ def build_request( async def aclose(self) -> None: pass + + +class ClientOwner(Protocol): + client: Union[HttpClient, None] + async_client: Union[AsyncHttpClient, None] + + +def close_clients( + owner: ClientOwner, + sync_client: Union[HttpClient, None], + sync_client_supplied: bool, + async_client: Union[AsyncHttpClient, None], + async_client_supplied: bool, +) -> None: + """ + A finalizer function that is meant to be used with weakref.finalize to close + httpx clients used by an SDK so that underlying resources can be garbage + collected. + """ + + # Unset the client/async_client properties so there are no more references + # to them from the owning SDK instance and they can be reaped. + owner.client = None + owner.async_client = None + + if sync_client is not None and not sync_client_supplied: + try: + sync_client.close() + except Exception: + pass + + if async_client is not None and not async_client_supplied: + is_async = False + try: + asyncio.get_running_loop() + is_async = True + except RuntimeError: + pass + + try: + # If this function is called in an async loop then start another + # loop in a separate thread to close the async http client. + if is_async: + with ThreadPoolExecutor(max_workers=1) as executor: + future = executor.submit(asyncio.run, async_client.aclose()) + future.result() + else: + asyncio.run(async_client.aclose()) + except Exception: + pass diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py index 379a0dfe..ed9d9362 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py @@ -54,6 +54,8 @@ ) from .functionname import FunctionName, FunctionNameTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData +from .jsonschema import JSONSchema, JSONSchemaTypedDict +from .prediction import Prediction, PredictionTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats @@ -94,6 +96,7 @@ ValidationErrorTypedDict, ) + __all__ = [ "Arguments", "ArgumentsTypedDict", @@ -140,10 +143,14 @@ "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", + "JSONSchema", + "JSONSchemaTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", + "Prediction", + "PredictionTypedDict", "ReferenceChunk", "ReferenceChunkType", "ReferenceChunkTypedDict", diff --git a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py index 031677cf..530b33df 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py @@ -32,6 +32,7 @@ class AssistantMessageTypedDict(TypedDict): content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] @@ -41,6 +42,7 @@ class AssistantMessage(BaseModel): tool_calls: OptionalNullable[List[ToolCall]] = UNSET prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: Optional[AssistantMessageRole] = "assistant" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py index 67c91bba..08c66467 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -70,7 +71,7 @@ class ChatCompletionRequestTypedDict(TypedDict): messages: List[ChatCompletionRequestMessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: NotRequired[Nullable[str]] + model: NotRequired[str] r"""The ID of the model to use for this request.""" temperature: NotRequired[Nullable[float]] r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" @@ -93,6 +94,7 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -101,7 +103,7 @@ class ChatCompletionRequest(BaseModel): messages: List[ChatCompletionRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: OptionalNullable[str] = "azureai" + model: Optional[str] = "azureai" r"""The ID of the model to use for this request.""" temperature: OptionalNullable[float] = UNSET @@ -137,6 +139,8 @@ class ChatCompletionRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: Optional[Prediction] = None + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -156,16 +160,10 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", + "prediction", "safe_prompt", ] - nullable_fields = [ - "model", - "temperature", - "max_tokens", - "random_seed", - "tools", - "n", - ] + nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py index 465647eb..a2eec92b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -66,7 +67,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): messages: List[MessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: NotRequired[Nullable[str]] + model: NotRequired[str] r"""The ID of the model to use for this request.""" temperature: NotRequired[Nullable[float]] r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" @@ -88,6 +89,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -96,7 +98,7 @@ class ChatCompletionStreamRequest(BaseModel): messages: List[Messages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: OptionalNullable[str] = "azureai" + model: Optional[str] = "azureai" r"""The ID of the model to use for this request.""" temperature: OptionalNullable[float] = UNSET @@ -131,6 +133,8 @@ class ChatCompletionStreamRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: Optional[Prediction] = None + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -150,16 +154,10 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", + "prediction", "safe_prompt", ] - nullable_fields = [ - "model", - "temperature", - "max_tokens", - "random_seed", - "tools", - "n", - ] + nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/function.py b/packages/mistralai_azure/src/mistralai_azure/models/function.py index 488cdcea..e6ea8495 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/function.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/function.py @@ -10,6 +10,7 @@ class FunctionTypedDict(TypedDict): name: str parameters: Dict[str, Any] description: NotRequired[str] + strict: NotRequired[bool] class Function(BaseModel): @@ -18,3 +19,5 @@ class Function(BaseModel): parameters: Dict[str, Any] description: Optional[str] = "" + + strict: Optional[bool] = False diff --git a/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py b/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py new file mode 100644 index 00000000..210417c7 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JSONSchemaTypedDict(TypedDict): + name: str + schema_definition: Dict[str, Any] + description: NotRequired[Nullable[str]] + strict: NotRequired[bool] + + +class JSONSchema(BaseModel): + name: str + + schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] + + description: OptionalNullable[str] = UNSET + + strict: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["description", "strict"] + nullable_fields = ["description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/prediction.py b/packages/mistralai_azure/src/mistralai_azure/models/prediction.py new file mode 100644 index 00000000..888337d3 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/prediction.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +from mistralai_azure.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class PredictionTypedDict(TypedDict): + type: Literal["content"] + content: NotRequired[str] + + +class Prediction(BaseModel): + TYPE: Annotated[ + Annotated[ + Optional[Literal["content"]], AfterValidator(validate_const("content")) + ], + pydantic.Field(alias="type"), + ] = "content" + + content: Optional[str] = "" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py index e4a9d7dd..cfd58dcf 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py @@ -1,8 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .jsonschema import JSONSchema, JSONSchemaTypedDict from .responseformats import ResponseFormats -from mistralai_azure.types import BaseModel +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -10,8 +18,41 @@ class ResponseFormatTypedDict(TypedDict): type: NotRequired[ResponseFormats] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] class ResponseFormat(BaseModel): type: Optional[ResponseFormats] = None r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + + json_schema: OptionalNullable[JSONSchema] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "json_schema"] + nullable_fields = ["json_schema"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py index 2c06b812..08c39951 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py @@ -4,5 +4,5 @@ from typing import Literal -ResponseFormats = Literal["text", "json_object"] +ResponseFormats = Literal["text", "json_object", "json_schema"] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py index 69b47310..6ccdcaa2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py @@ -14,6 +14,7 @@ class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] + index: NotRequired[int] class ToolCall(BaseModel): @@ -24,3 +25,5 @@ class ToolCall(BaseModel): type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( None ) + + index: Optional[int] = 0 diff --git a/packages/mistralai_azure/src/mistralai_azure/sdk.py b/packages/mistralai_azure/src/mistralai_azure/sdk.py index a83faa7b..8379e55f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdk.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdk.py @@ -1,17 +1,19 @@ -"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from typing import Any, Callable, Dict, Optional, Union +import weakref +from typing import Any, Callable, Dict, Optional, Union, cast import httpx + from mistralai_azure import models, utils from mistralai_azure._hooks import SDKHooks from mistralai_azure.chat import Chat -from mistralai_azure.types import Nullable +from mistralai_azure.types import UNSET, OptionalNullable from .basesdk import BaseSDK -from .httpclient import AsyncHttpClient, HttpClient +from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients from .sdkconfiguration import SDKConfiguration -from .utils.logger import Logger, NoOpLogger +from .utils.logger import Logger, get_default_logger from .utils.retries import RetryConfig @@ -19,7 +21,7 @@ class MistralAzure(BaseSDK): r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" chat: Chat - r"""Chat Completion API""" + r"""Chat Completion API.""" def __init__( self, @@ -28,7 +30,8 @@ def __init__( url_params: Optional[Dict[str, str]] = None, client: Optional[HttpClient] = None, async_client: Optional[AsyncHttpClient] = None, - retry_config: Optional[Nullable[RetryConfig]] = None, + retry_config: OptionalNullable[RetryConfig] = UNSET, + timeout_ms: Optional[int] = None, debug_logger: Optional[Logger] = None, ) -> None: r"""Instantiates the SDK configuring it with the provided parameters. @@ -39,7 +42,9 @@ def __init__( :param client: The HTTP client to use for all synchronous methods :param async_client: The Async HTTP client to use for all asynchronous methods :param retry_config: The retry configuration to use for all supported methods + :param timeout_ms: Optional request timeout applied to each operation in milliseconds """ + # if azure_endpoint doesn't end with `/v1` add it if not azure_endpoint.endswith("/"): azure_endpoint += "/" @@ -47,28 +52,30 @@ def __init__( azure_endpoint += "v1/" server_url = azure_endpoint + client_supplied = True if client is None: client = httpx.Client() + client_supplied = False assert issubclass( type(client), HttpClient ), "The provided client must implement the HttpClient protocol." + async_client_supplied = True if async_client is None: async_client = httpx.AsyncClient() + async_client_supplied = False + + if debug_logger is None: + debug_logger = get_default_logger() assert issubclass( type(async_client), AsyncHttpClient ), "The provided async_client must implement the AsyncHttpClient protocol." - if debug_logger is None: - debug_logger = NoOpLogger() - security: Any = None if callable(azure_api_key): - security = lambda: models.Security( # pylint: disable=unnecessary-lambda-assignment - api_key=azure_api_key() - ) + security = lambda: models.Security(api_key=azure_api_key()) # pylint: disable=unnecessary-lambda-assignment else: security = models.Security(api_key=azure_api_key) @@ -80,11 +87,14 @@ def __init__( self, SDKConfiguration( client=client, + client_supplied=client_supplied, async_client=async_client, + async_client_supplied=async_client_supplied, security=security, server_url=server_url, server=None, retry_config=retry_config, + timeout_ms=timeout_ms, debug_logger=debug_logger, ), ) @@ -93,7 +103,7 @@ def __init__( current_server_url, *_ = self.sdk_configuration.get_server_details() server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, self.sdk_configuration.client + current_server_url, client ) if current_server_url != server_url: self.sdk_configuration.server_url = server_url @@ -101,7 +111,39 @@ def __init__( # pylint: disable=protected-access self.sdk_configuration.__dict__["_hooks"] = hooks + weakref.finalize( + self, + close_clients, + cast(ClientOwner, self.sdk_configuration), + self.sdk_configuration.client, + self.sdk_configuration.client_supplied, + self.sdk_configuration.async_client, + self.sdk_configuration.async_client_supplied, + ) + self._init_sdks() def _init_sdks(self): self.chat = Chat(self.sdk_configuration) + + def __enter__(self): + return self + + async def __aenter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if ( + self.sdk_configuration.client is not None + and not self.sdk_configuration.client_supplied + ): + self.sdk_configuration.client.close() + self.sdk_configuration.client = None + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if ( + self.sdk_configuration.async_client is not None + and not self.sdk_configuration.async_client_supplied + ): + await self.sdk_configuration.async_client.aclose() + self.sdk_configuration.async_client = None diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py index 73b8d517..605e5d74 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py @@ -1,6 +1,12 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from ._hooks import SDKHooks +from ._version import ( + __gen_version__, + __openapi_doc_version__, + __user_agent__, + __version__, +) from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix from dataclasses import dataclass @@ -20,17 +26,19 @@ @dataclass class SDKConfiguration: - client: HttpClient - async_client: AsyncHttpClient + client: Union[HttpClient, None] + client_supplied: bool + async_client: Union[AsyncHttpClient, None] + async_client_supplied: bool debug_logger: Logger security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" server: Optional[str] = "" language: str = "python" - openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.6" - gen_version: str = "2.486.1" - user_agent: str = "speakeasy-sdk/python 1.2.6 2.486.1 0.0.2 mistralai_azure" + openapi_doc_version: str = __openapi_doc_version__ + sdk_version: str = __version__ + gen_version: str = __gen_version__ + user_agent: str = __user_agent__ retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py index 26d51ae8..3cded8fe 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py @@ -42,6 +42,7 @@ match_content_type, match_status_codes, match_response, + cast_partial, ) from .logger import Logger, get_body_content, get_default_logger @@ -94,4 +95,5 @@ "validate_float", "validate_int", "validate_open_enum", + "cast_partial", ] diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py index c5eb3659..baa41fbd 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py @@ -7,14 +7,15 @@ from typing_extensions import get_origin from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspect import is_optional_type +from typing_inspection.typing_objects import is_union from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset def serialize_decimal(as_str: bool): def serialize(d): - if is_optional_type(type(d)) and d is None: + # Optional[T] is a Union[T, None] + if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: return None if isinstance(d, Unset): return d @@ -42,7 +43,8 @@ def validate_decimal(d): def serialize_float(as_str: bool): def serialize(f): - if is_optional_type(type(f)) and f is None: + # Optional[T] is a Union[T, None] + if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: return None if isinstance(f, Unset): return f @@ -70,7 +72,8 @@ def validate_float(f): def serialize_int(as_str: bool): def serialize(i): - if is_optional_type(type(i)) and i is None: + # Optional[T] is a Union[T, None] + if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: return None if isinstance(i, Unset): return i @@ -118,7 +121,8 @@ def validate(e): def validate_const(v): def validate(c): - if is_optional_type(type(c)) and c is None: + # Optional[T] is a Union[T, None] + if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: return None if v != c: @@ -163,7 +167,7 @@ def marshal_json(val, typ): if len(d) == 0: return "" - return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) + return json.dumps(d[next(iter(d))], separators=(",", ":")) def is_nullable(field): diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/values.py b/packages/mistralai_azure/src/mistralai_azure/utils/values.py index 2b4b6832..dae01a44 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/values.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/values.py @@ -3,8 +3,9 @@ from datetime import datetime from enum import Enum from email.message import Message +from functools import partial import os -from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast from httpx import Response from pydantic import BaseModel @@ -51,6 +52,8 @@ def match_status_codes(status_codes: List[str], status_code: int) -> bool: T = TypeVar("T") +def cast_partial(typ): + return partial(cast, typ) def get_global_from_env( value: Optional[T], env_key: str, type_cast: Callable[[str], T] diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index f74b9759..eab663b0 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,34 +1,34 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: 849dde0ef239604ca71711ffc1220b54 + docChecksum: 0e9c7ff94b487395628de6c99a5954ce docVersion: 0.0.2 - speakeasyVersion: 1.462.2 - generationVersion: 2.486.1 + speakeasyVersion: 1.517.3 + generationVersion: 2.548.6 releaseVersion: 1.2.6 - configChecksum: ba11718a5b49fb4a979ae9693a68b191 + configChecksum: 31db65297a20fe8af5b30effb1421b52 published: true features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.7.4 + core: 5.12.3 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 - examples: 3.0.0 + examples: 3.0.1 flatRequests: 1.0.1 - globalSecurity: 3.0.2 + globalSecurity: 3.0.3 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.1.0 methodArguments: 1.0.2 nameOverrides: 3.0.1 - nullables: 1.0.0 + nullables: 1.0.1 openEnums: 1.0.0 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.0 + sdkHooks: 1.0.1 serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 @@ -66,8 +66,10 @@ generatedFiles: - docs/models/functioncall.md - docs/models/functionname.md - docs/models/httpvalidationerror.md + - docs/models/jsonschema.md - docs/models/loc.md - docs/models/messages.md + - docs/models/prediction.md - docs/models/referencechunk.md - docs/models/referencechunktype.md - docs/models/responseformat.md @@ -97,7 +99,7 @@ generatedFiles: - py.typed - pylintrc - pyproject.toml - - scripts/prepare-readme.py + - scripts/prepare_readme.py - scripts/publish.sh - src/mistralai_gcp/__init__.py - src/mistralai_gcp/_hooks/__init__.py @@ -126,6 +128,8 @@ generatedFiles: - src/mistralai_gcp/models/functioncall.py - src/mistralai_gcp/models/functionname.py - src/mistralai_gcp/models/httpvalidationerror.py + - src/mistralai_gcp/models/jsonschema.py + - src/mistralai_gcp/models/prediction.py - src/mistralai_gcp/models/referencechunk.py - src/mistralai_gcp/models/responseformat.py - src/mistralai_gcp/models/responseformats.py @@ -143,6 +147,7 @@ generatedFiles: - src/mistralai_gcp/models/usermessage.py - src/mistralai_gcp/models/validationerror.py - src/mistralai_gcp/py.typed + - src/mistralai_gcp/sdk.py - src/mistralai_gcp/sdkconfiguration.py - src/mistralai_gcp/types/__init__.py - src/mistralai_gcp/types/basemodel.py @@ -165,33 +170,36 @@ examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "mistral-small-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "422": application/json: {} + "200": {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "mistral-small-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} "422": application/json: {} stream_fim: speakeasy-default-stream-fim: requestBody: - application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} + application/json: {"model": "codestral-2405", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} responses: "422": application/json: {} + "200": {} fim_completion_v1_fim_completions_post: speakeasy-default-fim-completion-v1-fim-completions-post: requestBody: - application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} + application/json: {"model": "codestral-2405", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} "422": application/json: {} +examplesVersion: 1.0.0 generatedTests: {} diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index afa4d1d0..572d3ed1 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -7,8 +7,10 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true + nameResolutionFeb2025: false parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true + securityFeb2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false @@ -26,6 +28,7 @@ python: clientServerStatusCodesAsErrors: true defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in GCP. + enableCustomCodeRegions: false enumFormat: union fixFlags: responseRequiredSep2024: false @@ -45,5 +48,6 @@ python: methodArguments: infer-optional-args outputModelSuffix: output packageName: mistralai-gcp + pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/packages/mistralai_gcp/docs/models/assistantmessage.md b/packages/mistralai_gcp/docs/models/assistantmessage.md index 53f1cc76..3d0bd90b 100644 --- a/packages/mistralai_gcp/docs/models/assistantmessage.md +++ b/packages/mistralai_gcp/docs/models/assistantmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md index abc83281..3e9de262 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -18,4 +18,5 @@ | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md index 863c0229..8200f8a3 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -18,4 +18,5 @@ | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md index 236d2d21..7507b90c 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md index fa635932..6cc439c7 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/packages/mistralai_gcp/docs/models/function.md b/packages/mistralai_gcp/docs/models/function.md index 8af398f5..a166b7bb 100644 --- a/packages/mistralai_gcp/docs/models/function.md +++ b/packages/mistralai_gcp/docs/models/function.md @@ -7,4 +7,5 @@ | ------------------ | ------------------ | ------------------ | ------------------ | | `name` | *str* | :heavy_check_mark: | N/A | | `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | -| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/jsonschema.md b/packages/mistralai_gcp/docs/models/jsonschema.md new file mode 100644 index 00000000..ae387867 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/jsonschema.md @@ -0,0 +1,11 @@ +# JSONSchema + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/prediction.md b/packages/mistralai_gcp/docs/models/prediction.md new file mode 100644 index 00000000..86e9c396 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/prediction.md @@ -0,0 +1,9 @@ +# Prediction + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | +| `type` | *Optional[Literal["content"]]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/responseformat.md b/packages/mistralai_gcp/docs/models/responseformat.md index 9c627f55..23a1641b 100644 --- a/packages/mistralai_gcp/docs/models/responseformat.md +++ b/packages/mistralai_gcp/docs/models/responseformat.md @@ -5,4 +5,5 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | \ No newline at end of file +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/responseformats.md b/packages/mistralai_gcp/docs/models/responseformats.md index ce35fbb3..06886afe 100644 --- a/packages/mistralai_gcp/docs/models/responseformats.md +++ b/packages/mistralai_gcp/docs/models/responseformats.md @@ -8,4 +8,5 @@ An object specifying the format that the model must output. Setting to `{ "type" | Name | Value | | ------------- | ------------- | | `TEXT` | text | -| `JSON_OBJECT` | json_object | \ No newline at end of file +| `JSON_OBJECT` | json_object | +| `JSON_SCHEMA` | json_schema | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolcall.md b/packages/mistralai_gcp/docs/models/toolcall.md index 7aca5fc9..574be1ea 100644 --- a/packages/mistralai_gcp/docs/models/toolcall.md +++ b/packages/mistralai_gcp/docs/models/toolcall.md @@ -7,4 +7,5 @@ | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | | `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/pylintrc b/packages/mistralai_gcp/pylintrc index 393d0f70..266bc815 100644 --- a/packages/mistralai_gcp/pylintrc +++ b/packages/mistralai_gcp/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.8 +py-version=3.9 # Discover python modules and packages in the file system subtree. recursive=no @@ -455,7 +455,10 @@ disable=raw-checker-failed, bare-except, broad-exception-caught, fixme, - relative-beyond-top-level + relative-beyond-top-level, + consider-using-with, + wildcard-import, + unused-wildcard-import # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index 6692f1d5..21cf7eb9 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -1,9 +1,21 @@ -[tool.poetry] +[project] name = "mistralai-gcp" version = "1.2.6" description = "Python Client SDK for the Mistral AI API in GCP." -authors = ["Mistral",] +authors = [{ name = "Mistral" },] readme = "README-PYPI.md" +requires-python = ">=3.9" +dependencies = [ + "eval-type-backport >=0.2.0", + "google-auth (>=2.31.0,<3.0.0)", + "httpx >=0.28.1", + "pydantic >=2.10.3", + "python-dateutil >=2.8.2", + "requests (>=2.32.3,<3.0.0)", + "typing-inspection >=0.4.0", +] + +[tool.poetry] packages = [ { include = "mistralai_gcp", from = "src" } ] @@ -15,19 +27,8 @@ include = ["py.typed", "src/mistralai_gcp/py.typed"] [virtualenvs] in-project = true -[tool.poetry.dependencies] -python = "^3.8" -eval-type-backport = "^0.2.0" -google-auth = "2.27.0" -httpx = "^0.28.1" -jsonpath-python = "^1.0.6" -pydantic = "~2.10.3" -python-dateutil = "^2.8.2" -requests = "^2.32.3" -typing-inspect = "^0.9.0" - [tool.poetry.group.dev.dependencies] -mypy = "==1.13.0" +mypy = "==1.14.1" pylint = "==3.2.3" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" @@ -38,6 +39,7 @@ requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.pytest.ini_options] +asyncio_default_fixture_loop_scope = "function" pythonpath = ["src"] [tool.mypy] diff --git a/packages/mistralai_gcp/scripts/prepare-readme.py b/packages/mistralai_gcp/scripts/prepare_readme.py similarity index 100% rename from packages/mistralai_gcp/scripts/prepare-readme.py rename to packages/mistralai_gcp/scripts/prepare_readme.py diff --git a/packages/mistralai_gcp/scripts/publish.sh b/packages/mistralai_gcp/scripts/publish.sh index ab45b1f9..f2f2cf2c 100755 --- a/packages/mistralai_gcp/scripts/publish.sh +++ b/packages/mistralai_gcp/scripts/publish.sh @@ -2,6 +2,6 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} -poetry run python scripts/prepare-readme.py +poetry run python scripts/prepare_readme.py poetry publish --build --skip-existing diff --git a/packages/mistralai_gcp/src/mistralai_gcp/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/__init__.py index a1b7f626..dd02e42e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/__init__.py @@ -1,9 +1,18 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from ._version import __title__, __version__ +from ._version import ( + __title__, + __version__, + __openapi_doc_version__, + __gen_version__, + __user_agent__, +) from .sdk import * from .sdkconfiguration import * from .models import * VERSION: str = __version__ +OPENAPI_DOC_VERSION = __openapi_doc_version__ +SPEAKEASY_GENERATOR_VERSION = __gen_version__ +USER_AGENT = __user_agent__ diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py index 417126fd..bb867b5b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py @@ -7,16 +7,19 @@ class HookContext: + base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None def __init__( self, + base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): + self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -25,21 +28,30 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_version.py b/packages/mistralai_gcp/src/mistralai_gcp/_version.py index 30081f34..32be746c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_version.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_version.py @@ -4,6 +4,9 @@ __title__: str = "mistralai-gcp" __version__: str = "1.2.6" +__openapi_doc_version__: str = "0.0.2" +__gen_version__: str = "2.548.6" +__user_agent__: str = "speakeasy-sdk/python 1.2.6 2.548.6 0.0.2 mistralai-gcp" try: if __package__ is not None: diff --git a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py index 40620018..bb0aab96 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py @@ -231,6 +231,10 @@ def do(): req.headers, get_body_content(req), ) + + if client is None: + raise ValueError("client is required") + http_res = client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( @@ -303,6 +307,10 @@ async def do(): req.headers, get_body_content(req), ) + + if client is None: + raise ValueError("client is required") + http_res = await client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py index 47e5b63a..f162d2f7 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py @@ -3,7 +3,7 @@ from .basesdk import BaseSDK from mistralai_gcp import models, utils from mistralai_gcp._hooks import HookContext -from mistralai_gcp.types import Nullable, OptionalNullable, UNSET +from mistralai_gcp.types import OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming from typing import Any, List, Mapping, Optional, Union @@ -14,7 +14,7 @@ class Chat(BaseSDK): def stream( self, *, - model: Nullable[str], + model: str, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -37,6 +37,9 @@ def stream( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -60,6 +63,7 @@ def stream( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -72,6 +76,8 @@ def stream( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -92,6 +98,9 @@ def stream( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), ) req = self._build_request( @@ -123,6 +132,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -133,7 +143,7 @@ def stream( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -142,9 +152,16 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -162,7 +179,7 @@ def stream( async def stream_async( self, *, - model: Nullable[str], + model: str, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -185,6 +202,9 @@ async def stream_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -208,6 +228,7 @@ async def stream_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -220,6 +241,8 @@ async def stream_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -240,6 +263,9 @@ async def stream_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), ) req = self._build_request_async( @@ -271,6 +297,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -281,7 +308,7 @@ async def stream_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -290,9 +317,16 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -310,7 +344,7 @@ async def stream_async( def complete( self, *, - model: Nullable[str], + model: str, messages: Union[ List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], @@ -341,6 +375,9 @@ def complete( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -362,6 +399,7 @@ def complete( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -374,6 +412,8 @@ def complete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -396,6 +436,9 @@ def complete( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), ) req = self._build_request( @@ -427,6 +470,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -436,15 +480,22 @@ def complete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.ChatCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -462,7 +513,7 @@ def complete( async def complete_async( self, *, - model: Nullable[str], + model: str, messages: Union[ List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], @@ -493,6 +544,9 @@ async def complete_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -514,6 +568,7 @@ async def complete_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -526,6 +581,8 @@ async def complete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -548,6 +605,9 @@ async def complete_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), ) req = self._build_request_async( @@ -579,6 +639,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -588,15 +649,22 @@ async def complete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.ChatCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/mistralai_gcp/src/mistralai_gcp/fim.py index 89146a4a..84821c6a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/fim.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/fim.py @@ -3,7 +3,7 @@ from .basesdk import BaseSDK from mistralai_gcp import models, utils from mistralai_gcp._hooks import HookContext -from mistralai_gcp.types import Nullable, OptionalNullable, UNSET +from mistralai_gcp.types import OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming from typing import Any, Mapping, Optional, Union @@ -14,7 +14,7 @@ class Fim(BaseSDK): def stream( self, *, - model: Nullable[str], + model: str, prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -60,6 +60,8 @@ def stream( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionStreamRequest( model=model, @@ -103,6 +105,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -113,7 +116,7 @@ def stream( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -122,9 +125,16 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -142,7 +152,7 @@ def stream( async def stream_async( self, *, - model: Nullable[str], + model: str, prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -188,6 +198,8 @@ async def stream_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionStreamRequest( model=model, @@ -231,6 +243,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -241,7 +254,7 @@ async def stream_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -250,9 +263,16 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -270,7 +290,7 @@ async def stream_async( def complete( self, *, - model: Nullable[str], + model: str, prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -316,6 +336,8 @@ def complete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionRequest( model=model, @@ -359,6 +381,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -368,15 +391,22 @@ def complete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.FIMCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -394,7 +424,7 @@ def complete( async def complete_async( self, *, - model: Nullable[str], + model: str, prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -440,6 +470,8 @@ async def complete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionRequest( model=model, @@ -483,6 +515,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -492,15 +525,22 @@ async def complete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.FIMCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py index 167cea4e..1e426352 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py @@ -1,6 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" # pyright: reportReturnType = false +import asyncio +from concurrent.futures import ThreadPoolExecutor from typing_extensions import Protocol, runtime_checkable import httpx from typing import Any, Optional, Union @@ -82,3 +84,53 @@ def build_request( async def aclose(self) -> None: pass + + +class ClientOwner(Protocol): + client: Union[HttpClient, None] + async_client: Union[AsyncHttpClient, None] + + +def close_clients( + owner: ClientOwner, + sync_client: Union[HttpClient, None], + sync_client_supplied: bool, + async_client: Union[AsyncHttpClient, None], + async_client_supplied: bool, +) -> None: + """ + A finalizer function that is meant to be used with weakref.finalize to close + httpx clients used by an SDK so that underlying resources can be garbage + collected. + """ + + # Unset the client/async_client properties so there are no more references + # to them from the owning SDK instance and they can be reaped. + owner.client = None + owner.async_client = None + + if sync_client is not None and not sync_client_supplied: + try: + sync_client.close() + except Exception: + pass + + if async_client is not None and not async_client_supplied: + is_async = False + try: + asyncio.get_running_loop() + is_async = True + except RuntimeError: + pass + + try: + # If this function is called in an async loop then start another + # loop in a separate thread to close the async http client. + if is_async: + with ThreadPoolExecutor(max_workers=1) as executor: + future = executor.submit(asyncio.run, async_client.aclose()) + future.result() + else: + asyncio.run(async_client.aclose()) + except Exception: + pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py index f3c6ce7e..154777da 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py @@ -67,6 +67,8 @@ ) from .functionname import FunctionName, FunctionNameTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData +from .jsonschema import JSONSchema, JSONSchemaTypedDict +from .prediction import Prediction, PredictionTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats @@ -107,6 +109,7 @@ ValidationErrorTypedDict, ) + __all__ = [ "Arguments", "ArgumentsTypedDict", @@ -163,10 +166,14 @@ "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", + "JSONSchema", + "JSONSchemaTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", + "Prediction", + "PredictionTypedDict", "ReferenceChunk", "ReferenceChunkType", "ReferenceChunkTypedDict", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py index 6a9b58f2..9147f566 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py @@ -32,6 +32,7 @@ class AssistantMessageTypedDict(TypedDict): content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] @@ -41,6 +42,7 @@ class AssistantMessage(BaseModel): tool_calls: OptionalNullable[List[ToolCall]] = UNSET prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: Optional[AssistantMessageRole] = "assistant" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py index ab97e52a..60a37f2f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -68,7 +69,7 @@ class ChatCompletionRequestTypedDict(TypedDict): - model: Nullable[str] + model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionRequestMessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" @@ -93,10 +94,11 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] class ChatCompletionRequest(BaseModel): - model: Nullable[str] + model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionRequestMessages] @@ -135,6 +137,8 @@ class ChatCompletionRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: Optional[Prediction] = None + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -150,15 +154,9 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", + "prediction", ] - nullable_fields = [ - "model", - "temperature", - "max_tokens", - "random_seed", - "tools", - "n", - ] + nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py index e6c5429b..f2041426 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -64,7 +65,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): - model: Nullable[str] + model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[MessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" @@ -88,10 +89,11 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] class ChatCompletionStreamRequest(BaseModel): - model: Nullable[str] + model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[Messages] @@ -129,6 +131,8 @@ class ChatCompletionStreamRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: Optional[Prediction] = None + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -144,15 +148,9 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", + "prediction", ] - nullable_fields = [ - "model", - "temperature", - "max_tokens", - "random_seed", - "tools", - "n", - ] + nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py index 81c87b7e..6dfb7373 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py @@ -26,7 +26,7 @@ class FIMCompletionRequestTypedDict(TypedDict): - model: Nullable[str] + model: str r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -52,7 +52,7 @@ class FIMCompletionRequestTypedDict(TypedDict): class FIMCompletionRequest(BaseModel): - model: Nullable[str] + model: str r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -98,7 +98,6 @@ def serialize_model(self, handler): "min_tokens", ] nullable_fields = [ - "model", "temperature", "max_tokens", "random_seed", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py index 356758d3..406749bb 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py @@ -26,7 +26,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): - model: Nullable[str] + model: str r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -51,7 +51,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): class FIMCompletionStreamRequest(BaseModel): - model: Nullable[str] + model: str r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -96,7 +96,6 @@ def serialize_model(self, handler): "min_tokens", ] nullable_fields = [ - "model", "temperature", "max_tokens", "random_seed", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py index c3168eec..3d61e624 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py @@ -10,6 +10,7 @@ class FunctionTypedDict(TypedDict): name: str parameters: Dict[str, Any] description: NotRequired[str] + strict: NotRequired[bool] class Function(BaseModel): @@ -18,3 +19,5 @@ class Function(BaseModel): parameters: Dict[str, Any] description: Optional[str] = "" + + strict: Optional[bool] = False diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py b/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py new file mode 100644 index 00000000..2c6bd478 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JSONSchemaTypedDict(TypedDict): + name: str + schema_definition: Dict[str, Any] + description: NotRequired[Nullable[str]] + strict: NotRequired[bool] + + +class JSONSchema(BaseModel): + name: str + + schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] + + description: OptionalNullable[str] = UNSET + + strict: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["description", "strict"] + nullable_fields = ["description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py b/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py new file mode 100644 index 00000000..742aac0b --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +from mistralai_gcp.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class PredictionTypedDict(TypedDict): + type: Literal["content"] + content: NotRequired[str] + + +class Prediction(BaseModel): + TYPE: Annotated[ + Annotated[ + Optional[Literal["content"]], AfterValidator(validate_const("content")) + ], + pydantic.Field(alias="type"), + ] = "content" + + content: Optional[str] = "" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py index fde89862..5a24f644 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py @@ -1,8 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .jsonschema import JSONSchema, JSONSchemaTypedDict from .responseformats import ResponseFormats -from mistralai_gcp.types import BaseModel +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -10,8 +18,41 @@ class ResponseFormatTypedDict(TypedDict): type: NotRequired[ResponseFormats] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] class ResponseFormat(BaseModel): type: Optional[ResponseFormats] = None r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + + json_schema: OptionalNullable[JSONSchema] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "json_schema"] + nullable_fields = ["json_schema"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py index 2c06b812..08c39951 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py @@ -4,5 +4,5 @@ from typing import Literal -ResponseFormats = Literal["text", "json_object"] +ResponseFormats = Literal["text", "json_object", "json_schema"] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py index 5b4b217a..ecbac8d6 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py @@ -14,6 +14,7 @@ class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] + index: NotRequired[int] class ToolCall(BaseModel): @@ -24,3 +25,5 @@ class ToolCall(BaseModel): type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( None ) + + index: Optional[int] = 0 diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py index abfea8db..dd93cc7f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py @@ -1,23 +1,25 @@ -"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" import json -from typing import Optional, Tuple, Union +import weakref +from typing import Any, Optional, cast import google.auth import google.auth.credentials import google.auth.transport import google.auth.transport.requests import httpx + from mistralai_gcp import models from mistralai_gcp._hooks import BeforeRequestHook, SDKHooks from mistralai_gcp.chat import Chat from mistralai_gcp.fim import Fim -from mistralai_gcp.types import Nullable +from mistralai_gcp.types import UNSET, OptionalNullable from .basesdk import BaseSDK -from .httpclient import AsyncHttpClient, HttpClient +from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients from .sdkconfiguration import SDKConfiguration -from .utils.logger import Logger, NoOpLogger +from .utils.logger import Logger, get_default_logger from .utils.retries import RetryConfig LEGACY_MODEL_ID_FORMAT = { @@ -26,20 +28,21 @@ "mistral-nemo-2407": "mistral-nemo@2407", } -def get_model_info(model: str) -> Tuple[str, str]: + +def get_model_info(model: str) -> tuple[str, str]: # if the model requiers the legacy fomat, use it, else do nothing. if model in LEGACY_MODEL_ID_FORMAT: return "-".join(model.split("-")[:-1]), LEGACY_MODEL_ID_FORMAT[model] return model, model - class MistralGoogleCloud(BaseSDK): r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" chat: Chat + r"""Chat Completion API.""" fim: Fim - r"""Chat Completion API""" + r"""Fill-in-the-middle API.""" def __init__( self, @@ -48,16 +51,20 @@ def __init__( access_token: Optional[str] = None, client: Optional[HttpClient] = None, async_client: Optional[AsyncHttpClient] = None, - retry_config: Optional[Nullable[RetryConfig]] = None, + retry_config: OptionalNullable[RetryConfig] = UNSET, + timeout_ms: Optional[int] = None, debug_logger: Optional[Logger] = None, ) -> None: r"""Instantiates the SDK configuring it with the provided parameters. - :param region: The Google Cloud region to use for all methods - :param project_id: The project ID to use for all methods + :param api_key: The api_key required for authentication + :param server: The server by name to use for all methods + :param server_url: The server URL to use for all methods + :param url_params: Parameters to optionally template the server URL with :param client: The HTTP client to use for all synchronous methods :param async_client: The Async HTTP client to use for all asynchronous methods :param retry_config: The retry configuration to use for all supported methods + :param timeout_ms: Optional request timeout applied to each operation in milliseconds """ if not access_token: @@ -72,36 +79,42 @@ def __init__( ) project_id = project_id or loaded_project_id + if project_id is None: raise models.SDKError("project_id must be provided") def auth_token() -> str: if access_token: return access_token + credentials.refresh(google.auth.transport.requests.Request()) token = credentials.token if not token: raise models.SDKError("Failed to get token from credentials") return token + client_supplied = True if client is None: client = httpx.Client() + client_supplied = False assert issubclass( type(client), HttpClient ), "The provided client must implement the HttpClient protocol." + async_client_supplied = True if async_client is None: async_client = httpx.AsyncClient() + async_client_supplied = False if debug_logger is None: - debug_logger = NoOpLogger() + debug_logger = get_default_logger() assert issubclass( type(async_client), AsyncHttpClient ), "The provided async_client must implement the AsyncHttpClient protocol." - security = None + security: Any = None if callable(auth_token): security = lambda: models.Security( # pylint: disable=unnecessary-lambda-assignment api_key=auth_token() @@ -113,23 +126,24 @@ def auth_token() -> str: self, SDKConfiguration( client=client, + client_supplied=client_supplied, async_client=async_client, + async_client_supplied=async_client_supplied, security=security, server_url=f"https://{region}-aiplatform.googleapis.com", server=None, retry_config=retry_config, + timeout_ms=timeout_ms, debug_logger=debug_logger, ), ) hooks = SDKHooks() - hook = GoogleCloudBeforeRequestHook(region, project_id) hooks.register_before_request_hook(hook) - current_server_url, *_ = self.sdk_configuration.get_server_details() server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, self.sdk_configuration.client + current_server_url, client ) if current_server_url != server_url: self.sdk_configuration.server_url = server_url @@ -137,22 +151,53 @@ def auth_token() -> str: # pylint: disable=protected-access self.sdk_configuration.__dict__["_hooks"] = hooks + weakref.finalize( + self, + close_clients, + cast(ClientOwner, self.sdk_configuration), + self.sdk_configuration.client, + self.sdk_configuration.client_supplied, + self.sdk_configuration.async_client, + self.sdk_configuration.async_client_supplied, + ) + self._init_sdks() def _init_sdks(self): self.chat = Chat(self.sdk_configuration) self.fim = Fim(self.sdk_configuration) + def __enter__(self): + return self -class GoogleCloudBeforeRequestHook(BeforeRequestHook): + async def __aenter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if ( + self.sdk_configuration.client is not None + and not self.sdk_configuration.client_supplied + ): + self.sdk_configuration.client.close() + self.sdk_configuration.client = None + async def __aexit__(self, exc_type, exc_val, exc_tb): + if ( + self.sdk_configuration.async_client is not None + and not self.sdk_configuration.async_client_supplied + ): + await self.sdk_configuration.async_client.aclose() + self.sdk_configuration.async_client = None + + +class GoogleCloudBeforeRequestHook(BeforeRequestHook): def __init__(self, region: str, project_id: str): self.region = region self.project_id = project_id def before_request( self, hook_ctx, request: httpx.Request - ) -> Union[httpx.Request, Exception]: + ) -> httpx.Request | Exception: # The goal of this function is to template in the region, project and model into the URL path # We do this here so that the API remains more user-friendly model_id = None @@ -167,7 +212,6 @@ def before_request( if model_id == "": raise models.SDKError("model must be provided") - stream = "streamRawPredict" in request.url.path specifier = "streamRawPredict" if stream else "rawPredict" url = f"/v1/projects/{self.project_id}/locations/{self.region}/publishers/mistralai/models/{model_id}:{specifier}" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py index 3c149cc6..c373d27d 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -1,6 +1,12 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from ._hooks import SDKHooks +from ._version import ( + __gen_version__, + __openapi_doc_version__, + __user_agent__, + __version__, +) from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix from dataclasses import dataclass @@ -20,17 +26,19 @@ @dataclass class SDKConfiguration: - client: HttpClient - async_client: AsyncHttpClient + client: Union[HttpClient, None] + client_supplied: bool + async_client: Union[AsyncHttpClient, None] + async_client_supplied: bool debug_logger: Logger security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" server: Optional[str] = "" language: str = "python" - openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.6" - gen_version: str = "2.486.1" - user_agent: str = "speakeasy-sdk/python 1.2.6 2.486.1 0.0.2 mistralai-gcp" + openapi_doc_version: str = __openapi_doc_version__ + sdk_version: str = __version__ + gen_version: str = __gen_version__ + user_agent: str = __user_agent__ retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py index 26d51ae8..3cded8fe 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py @@ -42,6 +42,7 @@ match_content_type, match_status_codes, match_response, + cast_partial, ) from .logger import Logger, get_body_content, get_default_logger @@ -94,4 +95,5 @@ "validate_float", "validate_int", "validate_open_enum", + "cast_partial", ] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py index c5eb3659..baa41fbd 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py @@ -7,14 +7,15 @@ from typing_extensions import get_origin from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspect import is_optional_type +from typing_inspection.typing_objects import is_union from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset def serialize_decimal(as_str: bool): def serialize(d): - if is_optional_type(type(d)) and d is None: + # Optional[T] is a Union[T, None] + if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: return None if isinstance(d, Unset): return d @@ -42,7 +43,8 @@ def validate_decimal(d): def serialize_float(as_str: bool): def serialize(f): - if is_optional_type(type(f)) and f is None: + # Optional[T] is a Union[T, None] + if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: return None if isinstance(f, Unset): return f @@ -70,7 +72,8 @@ def validate_float(f): def serialize_int(as_str: bool): def serialize(i): - if is_optional_type(type(i)) and i is None: + # Optional[T] is a Union[T, None] + if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: return None if isinstance(i, Unset): return i @@ -118,7 +121,8 @@ def validate(e): def validate_const(v): def validate(c): - if is_optional_type(type(c)) and c is None: + # Optional[T] is a Union[T, None] + if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: return None if v != c: @@ -163,7 +167,7 @@ def marshal_json(val, typ): if len(d) == 0: return "" - return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) + return json.dumps(d[next(iter(d))], separators=(",", ":")) def is_nullable(field): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py index 2b4b6832..dae01a44 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py @@ -3,8 +3,9 @@ from datetime import datetime from enum import Enum from email.message import Message +from functools import partial import os -from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast from httpx import Response from pydantic import BaseModel @@ -51,6 +52,8 @@ def match_status_codes(status_codes: List[str], status_code: int) -> bool: T = TypeVar("T") +def cast_partial(typ): + return partial(cast, typ) def get_global_from_env( value: Optional[T], env_key: str, type_cast: Callable[[str], T] diff --git a/poetry.lock b/poetry.lock index 78003ff1..3d36b94f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -12,9 +12,6 @@ files = [ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} - [[package]] name = "anyio" version = "4.5.2" @@ -315,14 +312,14 @@ trio = ["trio (>=0.22.0,<1.0)"] [[package]] name = "httpx" -version = "0.27.2" +version = "0.28.1" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, - {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, + {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, + {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, ] [package.dependencies] @@ -330,7 +327,6 @@ anyio = "*" certifi = "*" httpcore = "==1.*" idna = "*" -sniffio = "*" [package.extras] brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] @@ -381,18 +377,6 @@ files = [ [package.extras] colors = ["colorama (>=0.4.6)"] -[[package]] -name = "jsonpath-python" -version = "1.0.6" -description = "A more powerful JSONPath implementation in modern python" -optional = false -python-versions = ">=3.6" -groups = ["main"] -files = [ - {file = "jsonpath-python-1.0.6.tar.gz", hash = "sha256:dd5be4a72d8a2995c3f583cf82bf3cd1a9544cfdabf2d22595b67aff07349666"}, - {file = "jsonpath_python-1.0.6-py3-none-any.whl", hash = "sha256:1e3b78df579f5efc23565293612decee04214609208a2335884b3ee3f786b575"}, -] - [[package]] name = "mccabe" version = "0.7.0" @@ -407,50 +391,56 @@ files = [ [[package]] name = "mypy" -version = "1.13.0" +version = "1.14.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ - {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"}, - {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"}, - {file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"}, - {file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"}, - {file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"}, - {file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"}, - {file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"}, - {file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"}, - {file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"}, - {file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"}, - {file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"}, - {file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"}, - {file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"}, - {file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"}, - {file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"}, - {file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"}, - {file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"}, - {file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"}, - {file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"}, - {file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"}, - {file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"}, - {file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"}, - {file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"}, - {file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"}, - {file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"}, - {file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"}, - {file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"}, - {file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"}, - {file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"}, - {file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"}, - {file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"}, - {file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"}, + {file = "mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb"}, + {file = "mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0"}, + {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d"}, + {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b"}, + {file = "mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427"}, + {file = "mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f"}, + {file = "mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c"}, + {file = "mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1"}, + {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8"}, + {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f"}, + {file = "mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1"}, + {file = "mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae"}, + {file = "mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14"}, + {file = "mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9"}, + {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11"}, + {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e"}, + {file = "mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89"}, + {file = "mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b"}, + {file = "mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255"}, + {file = "mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34"}, + {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a"}, + {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9"}, + {file = "mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd"}, + {file = "mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107"}, + {file = "mypy-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7084fb8f1128c76cd9cf68fe5971b37072598e7c31b2f9f95586b65c741a9d31"}, + {file = "mypy-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f845a00b4f420f693f870eaee5f3e2692fa84cc8514496114649cfa8fd5e2c6"}, + {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44bf464499f0e3a2d14d58b54674dee25c031703b2ffc35064bd0df2e0fac319"}, + {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c99f27732c0b7dc847adb21c9d47ce57eb48fa33a17bc6d7d5c5e9f9e7ae5bac"}, + {file = "mypy-1.14.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:bce23c7377b43602baa0bd22ea3265c49b9ff0b76eb315d6c34721af4cdf1d9b"}, + {file = "mypy-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:8edc07eeade7ebc771ff9cf6b211b9a7d93687ff892150cb5692e4f4272b0837"}, + {file = "mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35"}, + {file = "mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc"}, + {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9"}, + {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb"}, + {file = "mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60"}, + {file = "mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c"}, + {file = "mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1"}, + {file = "mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6"}, ] [package.dependencies] -mypy-extensions = ">=1.0.0" +mypy_extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.6.0" +typing_extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] @@ -465,7 +455,7 @@ version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, @@ -547,19 +537,19 @@ pyasn1 = ">=0.4.6,<0.7.0" [[package]] name = "pydantic" -version = "2.10.2" +version = "2.10.6" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "pydantic-2.10.2-py3-none-any.whl", hash = "sha256:cfb96e45951117c3024e6b67b25cdc33a3cb7b2fa62e239f7af1378358a1d99e"}, - {file = "pydantic-2.10.2.tar.gz", hash = "sha256:2bc2d7f17232e0841cbba4641e65ba1eb6fafb3a08de3a091ff3ce14a197c4fa"}, + {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, + {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.27.1" +pydantic-core = "2.27.2" typing-extensions = ">=4.12.2" [package.extras] @@ -568,112 +558,112 @@ timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows [[package]] name = "pydantic-core" -version = "2.27.1" +version = "2.27.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a"}, - {file = "pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:121ceb0e822f79163dd4699e4c54f5ad38b157084d97b34de8b232bcaad70278"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4603137322c18eaf2e06a4495f426aa8d8388940f3c457e7548145011bb68e05"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a33cd6ad9017bbeaa9ed78a2e0752c5e250eafb9534f308e7a5f7849b0b1bfb4"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15cc53a3179ba0fcefe1e3ae50beb2784dede4003ad2dfd24f81bba4b23a454f"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45d9c5eb9273aa50999ad6adc6be5e0ecea7e09dbd0d31bd0c65a55a2592ca08"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8bf7b66ce12a2ac52d16f776b31d16d91033150266eb796967a7e4621707e4f6"}, - {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:655d7dd86f26cb15ce8a431036f66ce0318648f8853d709b4167786ec2fa4807"}, - {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:5556470f1a2157031e676f776c2bc20acd34c1990ca5f7e56f1ebf938b9ab57c"}, - {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f69ed81ab24d5a3bd93861c8c4436f54afdf8e8cc421562b0c7504cf3be58206"}, - {file = "pydantic_core-2.27.1-cp310-none-win32.whl", hash = "sha256:f5a823165e6d04ccea61a9f0576f345f8ce40ed533013580e087bd4d7442b52c"}, - {file = "pydantic_core-2.27.1-cp310-none-win_amd64.whl", hash = "sha256:57866a76e0b3823e0b56692d1a0bf722bffb324839bb5b7226a7dbd6c9a40b17"}, - {file = "pydantic_core-2.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac3b20653bdbe160febbea8aa6c079d3df19310d50ac314911ed8cc4eb7f8cb8"}, - {file = "pydantic_core-2.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a5a8e19d7c707c4cadb8c18f5f60c843052ae83c20fa7d44f41594c644a1d330"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f7059ca8d64fea7f238994c97d91f75965216bcbe5f695bb44f354893f11d52"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bed0f8a0eeea9fb72937ba118f9db0cb7e90773462af7962d382445f3005e5a4"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3cb37038123447cf0f3ea4c74751f6a9d7afef0eb71aa07bf5f652b5e6a132c"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84286494f6c5d05243456e04223d5a9417d7f443c3b76065e75001beb26f88de"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acc07b2cfc5b835444b44a9956846b578d27beeacd4b52e45489e93276241025"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4fefee876e07a6e9aad7a8c8c9f85b0cdbe7df52b8a9552307b09050f7512c7e"}, - {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:258c57abf1188926c774a4c94dd29237e77eda19462e5bb901d88adcab6af919"}, - {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:35c14ac45fcfdf7167ca76cc80b2001205a8d5d16d80524e13508371fb8cdd9c"}, - {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1b26e1dff225c31897696cab7d4f0a315d4c0d9e8666dbffdb28216f3b17fdc"}, - {file = "pydantic_core-2.27.1-cp311-none-win32.whl", hash = "sha256:2cdf7d86886bc6982354862204ae3b2f7f96f21a3eb0ba5ca0ac42c7b38598b9"}, - {file = "pydantic_core-2.27.1-cp311-none-win_amd64.whl", hash = "sha256:3af385b0cee8df3746c3f406f38bcbfdc9041b5c2d5ce3e5fc6637256e60bbc5"}, - {file = "pydantic_core-2.27.1-cp311-none-win_arm64.whl", hash = "sha256:81f2ec23ddc1b476ff96563f2e8d723830b06dceae348ce02914a37cb4e74b89"}, - {file = "pydantic_core-2.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9cbd94fc661d2bab2bc702cddd2d3370bbdcc4cd0f8f57488a81bcce90c7a54f"}, - {file = "pydantic_core-2.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f8c4718cd44ec1580e180cb739713ecda2bdee1341084c1467802a417fe0f02"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15aae984e46de8d376df515f00450d1522077254ef6b7ce189b38ecee7c9677c"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ba5e3963344ff25fc8c40da90f44b0afca8cfd89d12964feb79ac1411a260ac"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:992cea5f4f3b29d6b4f7f1726ed8ee46c8331c6b4eed6db5b40134c6fe1768bb"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0325336f348dbee6550d129b1627cb8f5351a9dc91aad141ffb96d4937bd9529"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7597c07fbd11515f654d6ece3d0e4e5093edc30a436c63142d9a4b8e22f19c35"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3bbd5d8cc692616d5ef6fbbbd50dbec142c7e6ad9beb66b78a96e9c16729b089"}, - {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:dc61505e73298a84a2f317255fcc72b710b72980f3a1f670447a21efc88f8381"}, - {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:e1f735dc43da318cad19b4173dd1ffce1d84aafd6c9b782b3abc04a0d5a6f5bb"}, - {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f4e5658dbffe8843a0f12366a4c2d1c316dbe09bb4dfbdc9d2d9cd6031de8aae"}, - {file = "pydantic_core-2.27.1-cp312-none-win32.whl", hash = "sha256:672ebbe820bb37988c4d136eca2652ee114992d5d41c7e4858cdd90ea94ffe5c"}, - {file = "pydantic_core-2.27.1-cp312-none-win_amd64.whl", hash = "sha256:66ff044fd0bb1768688aecbe28b6190f6e799349221fb0de0e6f4048eca14c16"}, - {file = "pydantic_core-2.27.1-cp312-none-win_arm64.whl", hash = "sha256:9a3b0793b1bbfd4146304e23d90045f2a9b5fd5823aa682665fbdaf2a6c28f3e"}, - {file = "pydantic_core-2.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f216dbce0e60e4d03e0c4353c7023b202d95cbaeff12e5fd2e82ea0a66905073"}, - {file = "pydantic_core-2.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a2e02889071850bbfd36b56fd6bc98945e23670773bc7a76657e90e6b6603c08"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42b0e23f119b2b456d07ca91b307ae167cc3f6c846a7b169fca5326e32fdc6cf"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:764be71193f87d460a03f1f7385a82e226639732214b402f9aa61f0d025f0737"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c00666a3bd2f84920a4e94434f5974d7bbc57e461318d6bb34ce9cdbbc1f6b2"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ccaa88b24eebc0f849ce0a4d09e8a408ec5a94afff395eb69baf868f5183107"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c65af9088ac534313e1963443d0ec360bb2b9cba6c2909478d22c2e363d98a51"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206b5cf6f0c513baffaeae7bd817717140770c74528f3e4c3e1cec7871ddd61a"}, - {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:062f60e512fc7fff8b8a9d680ff0ddaaef0193dba9fa83e679c0c5f5fbd018bc"}, - {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:a0697803ed7d4af5e4c1adf1670af078f8fcab7a86350e969f454daf598c4960"}, - {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:58ca98a950171f3151c603aeea9303ef6c235f692fe555e883591103da709b23"}, - {file = "pydantic_core-2.27.1-cp313-none-win32.whl", hash = "sha256:8065914ff79f7eab1599bd80406681f0ad08f8e47c880f17b416c9f8f7a26d05"}, - {file = "pydantic_core-2.27.1-cp313-none-win_amd64.whl", hash = "sha256:ba630d5e3db74c79300d9a5bdaaf6200172b107f263c98a0539eeecb857b2337"}, - {file = "pydantic_core-2.27.1-cp313-none-win_arm64.whl", hash = "sha256:45cf8588c066860b623cd11c4ba687f8d7175d5f7ef65f7129df8a394c502de5"}, - {file = "pydantic_core-2.27.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5897bec80a09b4084aee23f9b73a9477a46c3304ad1d2d07acca19723fb1de62"}, - {file = "pydantic_core-2.27.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0165ab2914379bd56908c02294ed8405c252250668ebcb438a55494c69f44ab"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b9af86e1d8e4cfc82c2022bfaa6f459381a50b94a29e95dcdda8442d6d83864"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f6c8a66741c5f5447e047ab0ba7a1c61d1e95580d64bce852e3df1f895c4067"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a42d6a8156ff78981f8aa56eb6394114e0dedb217cf8b729f438f643608cbcd"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64c65f40b4cd8b0e049a8edde07e38b476da7e3aaebe63287c899d2cff253fa5"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdcf339322a3fae5cbd504edcefddd5a50d9ee00d968696846f089b4432cf78"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bf99c8404f008750c846cb4ac4667b798a9f7de673ff719d705d9b2d6de49c5f"}, - {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8f1edcea27918d748c7e5e4d917297b2a0ab80cad10f86631e488b7cddf76a36"}, - {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:159cac0a3d096f79ab6a44d77a961917219707e2a130739c64d4dd46281f5c2a"}, - {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:029d9757eb621cc6e1848fa0b0310310de7301057f623985698ed7ebb014391b"}, - {file = "pydantic_core-2.27.1-cp38-none-win32.whl", hash = "sha256:a28af0695a45f7060e6f9b7092558a928a28553366519f64083c63a44f70e618"}, - {file = "pydantic_core-2.27.1-cp38-none-win_amd64.whl", hash = "sha256:2d4567c850905d5eaaed2f7a404e61012a51caf288292e016360aa2b96ff38d4"}, - {file = "pydantic_core-2.27.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e9386266798d64eeb19dd3677051f5705bf873e98e15897ddb7d76f477131967"}, - {file = "pydantic_core-2.27.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4228b5b646caa73f119b1ae756216b59cc6e2267201c27d3912b592c5e323b60"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3dfe500de26c52abe0477dde16192ac39c98f05bf2d80e76102d394bd13854"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aee66be87825cdf72ac64cb03ad4c15ffef4143dbf5c113f64a5ff4f81477bf9"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b748c44bb9f53031c8cbc99a8a061bc181c1000c60a30f55393b6e9c45cc5bd"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ca038c7f6a0afd0b2448941b6ef9d5e1949e999f9e5517692eb6da58e9d44be"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e0bd57539da59a3e4671b90a502da9a28c72322a4f17866ba3ac63a82c4498e"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ac6c2c45c847bbf8f91930d88716a0fb924b51e0c6dad329b793d670ec5db792"}, - {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b94d4ba43739bbe8b0ce4262bcc3b7b9f31459ad120fb595627eaeb7f9b9ca01"}, - {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:00e6424f4b26fe82d44577b4c842d7df97c20be6439e8e685d0d715feceb9fb9"}, - {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:38de0a70160dd97540335b7ad3a74571b24f1dc3ed33f815f0880682e6880131"}, - {file = "pydantic_core-2.27.1-cp39-none-win32.whl", hash = "sha256:7ccebf51efc61634f6c2344da73e366c75e735960b5654b63d7e6f69a5885fa3"}, - {file = "pydantic_core-2.27.1-cp39-none-win_amd64.whl", hash = "sha256:a57847b090d7892f123726202b7daa20df6694cbd583b67a592e856bff603d6c"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3fa80ac2bd5856580e242dbc202db873c60a01b20309c8319b5c5986fbe53ce6"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d950caa237bb1954f1b8c9227b5065ba6875ac9771bb8ec790d956a699b78676"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e4216e64d203e39c62df627aa882f02a2438d18a5f21d7f721621f7a5d3611d"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a3d637bd387c41d46b002f0e49c52642281edacd2740e5a42f7017feea3f2c"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:161c27ccce13b6b0c8689418da3885d3220ed2eae2ea5e9b2f7f3d48f1d52c27"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19910754e4cc9c63bc1c7f6d73aa1cfee82f42007e407c0f413695c2f7ed777f"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:e173486019cc283dc9778315fa29a363579372fe67045e971e89b6365cc035ed"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:af52d26579b308921b73b956153066481f064875140ccd1dfd4e77db89dbb12f"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:981fb88516bd1ae8b0cbbd2034678a39dedc98752f264ac9bc5839d3923fa04c"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5fde892e6c697ce3e30c61b239330fc5d569a71fefd4eb6512fc6caec9dd9e2f"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:816f5aa087094099fff7edabb5e01cc370eb21aa1a1d44fe2d2aefdfb5599b31"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c10c309e18e443ddb108f0ef64e8729363adbfd92d6d57beec680f6261556f3"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98476c98b02c8e9b2eec76ac4156fd006628b1b2d0ef27e548ffa978393fd154"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c3027001c28434e7ca5a6e1e527487051136aa81803ac812be51802150d880dd"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7699b1df36a48169cdebda7ab5a2bac265204003f153b4bd17276153d997670a"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1c39b07d90be6b48968ddc8c19e7585052088fd7ec8d568bb31ff64c70ae3c97"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:46ccfe3032b3915586e469d4972973f893c0a2bb65669194a5bdea9bacc088c2"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:62ba45e21cf6571d7f716d903b5b7b6d2617e2d5d67c0923dc47b9d41369f840"}, - {file = "pydantic_core-2.27.1.tar.gz", hash = "sha256:62a763352879b84aa31058fc931884055fd75089cccbd9d58bb6afd01141b235"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, + {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, ] [package.dependencies] @@ -909,20 +899,19 @@ files = [ ] [[package]] -name = "typing-inspect" -version = "0.9.0" -description = "Runtime inspection utilities for typing module." +name = "typing-inspection" +version = "0.4.0" +description = "Runtime typing introspection tools" optional = false -python-versions = "*" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, - {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, + {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, + {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, ] [package.dependencies] -mypy-extensions = ">=0.3.0" -typing-extensions = ">=3.7.4" +typing-extensions = ">=4.12.0" [[package]] name = "urllib3" @@ -948,5 +937,5 @@ gcp = ["google-auth", "requests"] [metadata] lock-version = "2.1" -python-versions = ">=3.8" -content-hash = "f0f19d81d36ebe966895f21a0a9dd33118783904418f4103189c475e5903b958" +python-versions = ">=3.9" +content-hash = "c3917a9114ca2a0c01aedf207fa1b59cc259bb07c4d2914fe2ed9a4cb3e1785e" diff --git a/pylintrc b/pylintrc index 9d193c42..266bc815 100644 --- a/pylintrc +++ b/pylintrc @@ -455,7 +455,10 @@ disable=raw-checker-failed, bare-except, broad-exception-caught, fixme, - relative-beyond-top-level + relative-beyond-top-level, + consider-using-with, + wildcard-import, + unused-wildcard-import # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option diff --git a/pyproject.toml b/pyproject.toml index 5e7ddb8c..8edc7537 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.5.1" +version = "1.5.2-rc.1" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/scripts/prepare-readme.py b/scripts/prepare_readme.py similarity index 84% rename from scripts/prepare-readme.py rename to scripts/prepare_readme.py index 9111d6cb..16f6fc7e 100644 --- a/scripts/prepare-readme.py +++ b/scripts/prepare_readme.py @@ -4,7 +4,7 @@ import shutil try: - with open("README.md", "r") as rh: + with open("README.md", "r", encoding="utf-8") as rh: readme_contents = rh.read() GITHUB_URL = "https://github.com/mistralai/client-python.git" GITHUB_URL = ( @@ -21,13 +21,13 @@ readme_contents, ) - with open("README-PYPI.md", "w") as wh: + with open("README-PYPI.md", "w", encoding="utf-8") as wh: wh.write(readme_contents) except Exception as e: try: print("Failed to rewrite README.md to README-PYPI.md, copying original instead") print(e) shutil.copyfile("README.md", "README-PYPI.md") - except Exception as e: + except Exception as ie: print("Failed to copy README.md to README-PYPI.md") - print(e) + print(ie) diff --git a/scripts/publish.sh b/scripts/publish.sh index ab45b1f9..f2f2cf2c 100755 --- a/scripts/publish.sh +++ b/scripts/publish.sh @@ -2,6 +2,6 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} -poetry run python scripts/prepare-readme.py +poetry run python scripts/prepare_readme.py poetry publish --build --skip-existing diff --git a/src/mistralai/_hooks/types.py b/src/mistralai/_hooks/types.py index fe448e94..ebc789ff 100644 --- a/src/mistralai/_hooks/types.py +++ b/src/mistralai/_hooks/types.py @@ -7,16 +7,19 @@ class HookContext: + base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None def __init__( self, + base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): + self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -25,21 +28,30 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 700c880e..6b24498d 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.5.1" +__version__: str = "1.5.2-rc.1" __openapi_doc_version__: str = "0.0.2" -__gen_version__: str = "2.497.0" -__user_agent__: str = "speakeasy-sdk/python 1.5.1 2.497.0 0.0.2 mistralai" +__gen_version__: str = "2.548.6" +__user_agent__: str = "speakeasy-sdk/python 1.5.2-rc.1 2.548.6 0.0.2 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index 05fd165c..6d43b480 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -78,6 +78,8 @@ def complete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.AgentsCompletionRequest( max_tokens=max_tokens, @@ -132,6 +134,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="agents_completion_v1_agents_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -143,12 +146,14 @@ def complete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -236,6 +241,8 @@ async def complete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.AgentsCompletionRequest( max_tokens=max_tokens, @@ -290,6 +297,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="agents_completion_v1_agents_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -301,12 +309,14 @@ async def complete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -396,6 +406,8 @@ def stream( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.AgentsCompletionStreamRequest( max_tokens=max_tokens, @@ -450,6 +462,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_agents", oauth2_scopes=[], security_source=get_security_from_env( @@ -462,7 +475,7 @@ def stream( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -471,8 +484,10 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -562,6 +577,8 @@ async def stream_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.AgentsCompletionStreamRequest( max_tokens=max_tokens, @@ -616,6 +633,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_agents", oauth2_scopes=[], security_source=get_security_from_env( @@ -628,7 +646,7 @@ async def stream_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -637,8 +655,10 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/basesdk.py b/src/mistralai/basesdk.py index cda8adda..512e3072 100644 --- a/src/mistralai/basesdk.py +++ b/src/mistralai/basesdk.py @@ -231,6 +231,10 @@ def do(): req.headers, get_body_content(req), ) + + if client is None: + raise ValueError("client is required") + http_res = client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( @@ -303,6 +307,10 @@ async def do(): req.headers, get_body_content(req), ) + + if client is None: + raise ValueError("client is required") + http_res = await client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 67646ffe..558796d8 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -158,6 +158,8 @@ def complete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -213,6 +215,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -224,12 +227,14 @@ def complete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -315,6 +320,8 @@ async def complete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -370,6 +377,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -381,12 +389,14 @@ async def complete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -482,6 +492,8 @@ def stream( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -539,6 +551,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=get_security_from_env( @@ -551,7 +564,7 @@ def stream( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -560,8 +573,10 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -657,6 +672,8 @@ async def stream_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -714,6 +731,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=get_security_from_env( @@ -726,7 +744,7 @@ async def stream_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -735,8 +753,10 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py index 6ff1d6a8..7191df0c 100644 --- a/src/mistralai/classifiers.py +++ b/src/mistralai/classifiers.py @@ -40,6 +40,8 @@ def moderate( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ClassificationRequest( model=model, @@ -75,6 +77,7 @@ def moderate( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="moderations_v1_moderations_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -86,12 +89,14 @@ def moderate( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -141,6 +146,8 @@ async def moderate_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ClassificationRequest( model=model, @@ -176,6 +183,7 @@ async def moderate_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="moderations_v1_moderations_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -187,12 +195,14 @@ async def moderate_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -244,6 +254,8 @@ def moderate_chat( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatModerationRequest( model=model, @@ -280,6 +292,7 @@ def moderate_chat( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="moderations_chat_v1_chat_moderations_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -291,12 +304,14 @@ def moderate_chat( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -348,6 +363,8 @@ async def moderate_chat_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatModerationRequest( model=model, @@ -384,6 +401,7 @@ async def moderate_chat_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="moderations_chat_v1_chat_moderations_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -395,12 +413,14 @@ async def moderate_chat_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index f6f558b8..b99ff0cf 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -14,8 +14,8 @@ class Embeddings(BaseSDK): def create( self, *, + model: str, inputs: Union[models.Inputs, models.InputsTypedDict], - model: Optional[str] = "mistral-embed", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -25,8 +25,8 @@ def create( Embeddings - :param inputs: Text to embed. :param model: ID of the model to use. + :param inputs: Text to embed. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -39,6 +39,8 @@ def create( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.EmbeddingRequest( model=model, @@ -74,6 +76,7 @@ def create( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -85,12 +88,14 @@ def create( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.EmbeddingResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -114,8 +119,8 @@ def create( async def create_async( self, *, + model: str, inputs: Union[models.Inputs, models.InputsTypedDict], - model: Optional[str] = "mistral-embed", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -125,8 +130,8 @@ async def create_async( Embeddings - :param inputs: Text to embed. :param model: ID of the model to use. + :param inputs: Text to embed. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -139,6 +144,8 @@ async def create_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.EmbeddingRequest( model=model, @@ -174,6 +181,7 @@ async def create_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -185,12 +193,14 @@ async def create_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.EmbeddingResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/files.py b/src/mistralai/files.py index 042e4aea..0ffc4857 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -44,6 +44,8 @@ def upload( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( file=utils.get_pydantic_model(file, models.File), @@ -83,6 +85,7 @@ def upload( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_upload_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -148,6 +151,8 @@ async def upload_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( file=utils.get_pydantic_model(file, models.File), @@ -187,6 +192,7 @@ async def upload_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_upload_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -256,6 +262,8 @@ def list( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesListFilesRequest( page=page, @@ -292,6 +300,7 @@ def list( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_list_files", oauth2_scopes=[], security_source=get_security_from_env( @@ -361,6 +370,8 @@ async def list_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesListFilesRequest( page=page, @@ -397,6 +408,7 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_list_files", oauth2_scopes=[], security_source=get_security_from_env( @@ -456,6 +468,8 @@ def retrieve( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesRetrieveFileRequest( file_id=file_id, @@ -487,6 +501,7 @@ def retrieve( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_retrieve_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -546,6 +561,8 @@ async def retrieve_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesRetrieveFileRequest( file_id=file_id, @@ -577,6 +594,7 @@ async def retrieve_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_retrieve_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -636,6 +654,8 @@ def delete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesDeleteFileRequest( file_id=file_id, @@ -667,6 +687,7 @@ def delete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_delete_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -726,6 +747,8 @@ async def delete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesDeleteFileRequest( file_id=file_id, @@ -757,6 +780,7 @@ async def delete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_delete_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -816,6 +840,8 @@ def download( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesDownloadFileRequest( file_id=file_id, @@ -847,6 +873,7 @@ def download( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_download_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -907,6 +934,8 @@ async def download_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesDownloadFileRequest( file_id=file_id, @@ -938,6 +967,7 @@ async def download_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_download_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -998,6 +1028,8 @@ def get_signed_url( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesGetSignedURLRequest( file_id=file_id, @@ -1030,6 +1062,7 @@ def get_signed_url( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_get_signed_url", oauth2_scopes=[], security_source=get_security_from_env( @@ -1089,6 +1122,8 @@ async def get_signed_url_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesGetSignedURLRequest( file_id=file_id, @@ -1121,6 +1156,7 @@ async def get_signed_url_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_get_signed_url", oauth2_scopes=[], security_source=get_security_from_env( diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py index c11f6c99..032c722f 100644 --- a/src/mistralai/fim.py +++ b/src/mistralai/fim.py @@ -60,6 +60,8 @@ def complete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionRequest( model=model, @@ -103,6 +105,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -114,12 +117,14 @@ def complete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.FIMCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -189,6 +194,8 @@ async def complete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionRequest( model=model, @@ -232,6 +239,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -243,12 +251,14 @@ async def complete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.FIMCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -318,6 +328,8 @@ def stream( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionStreamRequest( model=model, @@ -361,6 +373,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], security_source=get_security_from_env( @@ -373,7 +386,7 @@ def stream( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -382,8 +395,10 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -453,6 +468,8 @@ async def stream_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionStreamRequest( model=model, @@ -496,6 +513,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], security_source=get_security_from_env( @@ -508,7 +526,7 @@ async def stream_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -517,8 +535,10 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/httpclient.py b/src/mistralai/httpclient.py index 9dc43cb0..1e426352 100644 --- a/src/mistralai/httpclient.py +++ b/src/mistralai/httpclient.py @@ -94,7 +94,9 @@ class ClientOwner(Protocol): def close_clients( owner: ClientOwner, sync_client: Union[HttpClient, None], + sync_client_supplied: bool, async_client: Union[AsyncHttpClient, None], + async_client_supplied: bool, ) -> None: """ A finalizer function that is meant to be used with weakref.finalize to close @@ -107,13 +109,13 @@ def close_clients( owner.client = None owner.async_client = None - if sync_client is not None: + if sync_client is not None and not sync_client_supplied: try: sync_client.close() except Exception: pass - if async_client is not None: + if async_client is not None and not async_client_supplied: is_async = False try: asyncio.get_running_loop() diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index ea66bfc6..675ece0b 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -52,6 +52,8 @@ def list( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, @@ -91,6 +93,7 @@ def list( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=[], security_source=get_security_from_env( @@ -166,6 +169,8 @@ async def list_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, @@ -205,6 +210,7 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=[], security_source=get_security_from_env( @@ -290,6 +296,8 @@ def create( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobIn( model=model, @@ -339,6 +347,7 @@ def create( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -426,6 +435,8 @@ async def create_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobIn( model=model, @@ -475,6 +486,7 @@ async def create_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -536,6 +548,8 @@ def get( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( job_id=job_id, @@ -567,6 +581,7 @@ def get( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -626,6 +641,8 @@ async def get_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( job_id=job_id, @@ -657,6 +674,7 @@ async def get_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -716,6 +734,8 @@ def cancel( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( job_id=job_id, @@ -747,6 +767,7 @@ def cancel( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -806,6 +827,8 @@ async def cancel_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( job_id=job_id, @@ -837,6 +860,7 @@ async def cancel_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -896,6 +920,8 @@ def start( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( job_id=job_id, @@ -927,6 +953,7 @@ def start( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -986,6 +1013,8 @@ async def start_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( job_id=job_id, @@ -1017,6 +1046,7 @@ async def start_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py index fe6b266a..e0d3c616 100644 --- a/src/mistralai/mistral_jobs.py +++ b/src/mistralai/mistral_jobs.py @@ -48,6 +48,8 @@ def list( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchGetBatchJobsRequest( page=page, @@ -85,6 +87,7 @@ def list( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_jobs", oauth2_scopes=[], security_source=get_security_from_env( @@ -156,6 +159,8 @@ async def list_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchGetBatchJobsRequest( page=page, @@ -193,6 +198,7 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_jobs", oauth2_scopes=[], security_source=get_security_from_env( @@ -260,6 +266,8 @@ def create( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.BatchJobIn( input_files=input_files, @@ -298,6 +306,7 @@ def create( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_create_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -365,6 +374,8 @@ async def create_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.BatchJobIn( input_files=input_files, @@ -403,6 +414,7 @@ async def create_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_create_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -462,6 +474,8 @@ def get( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchGetBatchJobRequest( job_id=job_id, @@ -493,6 +507,7 @@ def get( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -552,6 +567,8 @@ async def get_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchGetBatchJobRequest( job_id=job_id, @@ -583,6 +600,7 @@ async def get_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -642,6 +660,8 @@ def cancel( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchCancelBatchJobRequest( job_id=job_id, @@ -673,6 +693,7 @@ def cancel( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_cancel_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -732,6 +753,8 @@ async def cancel_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchCancelBatchJobRequest( job_id=job_id, @@ -763,6 +786,7 @@ async def cancel_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_cancel_batch_job", oauth2_scopes=[], security_source=get_security_from_env( diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 197f6e1f..0750906a 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -115,7 +115,11 @@ DetailedJobOutStatus, DetailedJobOutTypedDict, ) -from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .documenturlchunk import ( + DocumentURLChunk, + DocumentURLChunkType, + DocumentURLChunkTypedDict, +) from .embeddingrequest import ( EmbeddingRequest, EmbeddingRequestTypedDict, @@ -455,6 +459,7 @@ "Document", "DocumentTypedDict", "DocumentURLChunk", + "DocumentURLChunkType", "DocumentURLChunkTypedDict", "EmbeddingRequest", "EmbeddingRequestTypedDict", diff --git a/src/mistralai/models/documenturlchunk.py b/src/mistralai/models/documenturlchunk.py index 23622335..29945102 100644 --- a/src/mistralai/models/documenturlchunk.py +++ b/src/mistralai/models/documenturlchunk.py @@ -2,38 +2,32 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict + + +DocumentURLChunkType = Literal["document_url"] class DocumentURLChunkTypedDict(TypedDict): document_url: str - type: Literal["document_url"] document_name: NotRequired[Nullable[str]] r"""The filename of the document""" + type: NotRequired[DocumentURLChunkType] class DocumentURLChunk(BaseModel): document_url: str - TYPE: Annotated[ - Annotated[ - Optional[Literal["document_url"]], - AfterValidator(validate_const("document_url")), - ], - pydantic.Field(alias="type"), - ] = "document_url" - document_name: OptionalNullable[str] = UNSET r"""The filename of the document""" + type: Optional[DocumentURLChunkType] = "document_url" + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "document_name"] + optional_fields = ["document_name", "type"] nullable_fields = ["document_name"] null_default_fields = [] diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index b5ccd26e..5c37fd48 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -3,8 +3,8 @@ from __future__ import annotations from mistralai.types import BaseModel import pydantic -from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict InputsTypedDict = TypeAliasType("InputsTypedDict", Union[str, List[str]]) @@ -16,15 +16,15 @@ class EmbeddingRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use.""" inputs: InputsTypedDict r"""Text to embed.""" - model: NotRequired[str] - r"""ID of the model to use.""" class EmbeddingRequest(BaseModel): + model: str + r"""ID of the model to use.""" + inputs: Annotated[Inputs, pydantic.Field(alias="input")] r"""Text to embed.""" - - model: Optional[str] = "mistral-embed" - r"""ID of the model to use.""" diff --git a/src/mistralai/models/filepurpose.py b/src/mistralai/models/filepurpose.py index 8628b308..8599192b 100644 --- a/src/mistralai/models/filepurpose.py +++ b/src/mistralai/models/filepurpose.py @@ -5,4 +5,4 @@ from typing import Literal, Union -FilePurpose = Union[Literal["fine-tune", "batch"], UnrecognizedStr] +FilePurpose = Union[Literal["fine-tune", "batch", "ocr"], UnrecognizedStr] diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index ec45eb36..0b04694d 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -35,6 +35,8 @@ def list( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) req = self._build_request( method="GET", path="/v1/models", @@ -61,6 +63,7 @@ def list( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="list_models_v1_models_get", oauth2_scopes=[], security_source=get_security_from_env( @@ -72,12 +75,14 @@ def list( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ModelList) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -122,6 +127,8 @@ async def list_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) req = self._build_request_async( method="GET", path="/v1/models", @@ -148,6 +155,7 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="list_models_v1_models_get", oauth2_scopes=[], security_source=get_security_from_env( @@ -159,12 +167,14 @@ async def list_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ModelList) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -211,6 +221,8 @@ def retrieve( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.RetrieveModelV1ModelsModelIDGetRequest( model_id=model_id, @@ -242,6 +254,7 @@ def retrieve( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=[], security_source=get_security_from_env( @@ -253,15 +266,17 @@ def retrieve( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -308,6 +323,8 @@ async def retrieve_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.RetrieveModelV1ModelsModelIDGetRequest( model_id=model_id, @@ -339,6 +356,7 @@ async def retrieve_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=[], security_source=get_security_from_env( @@ -350,15 +368,17 @@ async def retrieve_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -405,6 +425,8 @@ def delete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.DeleteModelV1ModelsModelIDDeleteRequest( model_id=model_id, @@ -436,6 +458,7 @@ def delete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=[], security_source=get_security_from_env( @@ -447,12 +470,14 @@ def delete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.DeleteModelOut) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -499,6 +524,8 @@ async def delete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.DeleteModelV1ModelsModelIDDeleteRequest( model_id=model_id, @@ -530,6 +557,7 @@ async def delete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=[], security_source=get_security_from_env( @@ -541,12 +569,14 @@ async def delete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.DeleteModelOut) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -597,6 +627,8 @@ def update( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( model_id=model_id, @@ -635,6 +667,7 @@ def update( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -698,6 +731,8 @@ async def update_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( model_id=model_id, @@ -736,6 +771,7 @@ async def update_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -795,6 +831,8 @@ def archive( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( model_id=model_id, @@ -826,6 +864,7 @@ def archive( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -885,6 +924,8 @@ async def archive_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( model_id=model_id, @@ -916,6 +957,7 @@ async def archive_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -975,6 +1017,8 @@ def unarchive( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( model_id=model_id, @@ -1006,6 +1050,7 @@ def unarchive( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -1065,6 +1110,8 @@ async def unarchive_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( model_id=model_id, @@ -1096,6 +1143,7 @@ async def unarchive_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( diff --git a/src/mistralai/ocr.py b/src/mistralai/ocr.py index 56c1da51..5d0e2414 100644 --- a/src/mistralai/ocr.py +++ b/src/mistralai/ocr.py @@ -47,6 +47,8 @@ def process( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.OCRRequest( model=model, @@ -87,6 +89,7 @@ def process( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="ocr_v1_ocr_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -98,12 +101,14 @@ def process( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.OCRResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -160,6 +165,8 @@ async def process_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.OCRRequest( model=model, @@ -200,6 +207,7 @@ async def process_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="ocr_v1_ocr_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -211,12 +219,14 @@ async def process_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.OCRResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index 00d8370a..e801eaf3 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -68,15 +68,19 @@ def __init__( :param retry_config: The retry configuration to use for all supported methods :param timeout_ms: Optional request timeout applied to each operation in milliseconds """ + client_supplied = True if client is None: client = httpx.Client() + client_supplied = False assert issubclass( type(client), HttpClient ), "The provided client must implement the HttpClient protocol." + async_client_supplied = True if async_client is None: async_client = httpx.AsyncClient() + async_client_supplied = False if debug_logger is None: debug_logger = get_default_logger() @@ -100,7 +104,9 @@ def __init__( self, SDKConfiguration( client=client, + client_supplied=client_supplied, async_client=async_client, + async_client_supplied=async_client_supplied, security=security, server_url=server_url, server=server, @@ -114,7 +120,7 @@ def __init__( current_server_url, *_ = self.sdk_configuration.get_server_details() server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, self.sdk_configuration.client + current_server_url, client ) if current_server_url != server_url: self.sdk_configuration.server_url = server_url @@ -127,7 +133,9 @@ def __init__( close_clients, cast(ClientOwner, self.sdk_configuration), self.sdk_configuration.client, + self.sdk_configuration.client_supplied, self.sdk_configuration.async_client, + self.sdk_configuration.async_client_supplied, ) self._init_sdks() @@ -151,9 +159,17 @@ async def __aenter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): - if self.sdk_configuration.client is not None: + if ( + self.sdk_configuration.client is not None + and not self.sdk_configuration.client_supplied + ): self.sdk_configuration.client.close() + self.sdk_configuration.client = None async def __aexit__(self, exc_type, exc_val, exc_tb): - if self.sdk_configuration.async_client is not None: + if ( + self.sdk_configuration.async_client is not None + and not self.sdk_configuration.async_client_supplied + ): await self.sdk_configuration.async_client.aclose() + self.sdk_configuration.async_client = None diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 2ccbcbe1..257ff01d 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -26,8 +26,10 @@ @dataclass class SDKConfiguration: - client: HttpClient - async_client: AsyncHttpClient + client: Union[HttpClient, None] + client_supplied: bool + async_client: Union[AsyncHttpClient, None] + async_client_supplied: bool debug_logger: Logger security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" diff --git a/src/mistralai/utils/__init__.py b/src/mistralai/utils/__init__.py index 151c87d4..d8b21128 100644 --- a/src/mistralai/utils/__init__.py +++ b/src/mistralai/utils/__init__.py @@ -43,6 +43,7 @@ match_content_type, match_status_codes, match_response, + cast_partial, ) from .logger import Logger, get_body_content, get_default_logger @@ -96,4 +97,5 @@ "validate_float", "validate_int", "validate_open_enum", + "cast_partial", ] diff --git a/src/mistralai/utils/serializers.py b/src/mistralai/utils/serializers.py index c5eb3659..baa41fbd 100644 --- a/src/mistralai/utils/serializers.py +++ b/src/mistralai/utils/serializers.py @@ -7,14 +7,15 @@ from typing_extensions import get_origin from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspect import is_optional_type +from typing_inspection.typing_objects import is_union from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset def serialize_decimal(as_str: bool): def serialize(d): - if is_optional_type(type(d)) and d is None: + # Optional[T] is a Union[T, None] + if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: return None if isinstance(d, Unset): return d @@ -42,7 +43,8 @@ def validate_decimal(d): def serialize_float(as_str: bool): def serialize(f): - if is_optional_type(type(f)) and f is None: + # Optional[T] is a Union[T, None] + if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: return None if isinstance(f, Unset): return f @@ -70,7 +72,8 @@ def validate_float(f): def serialize_int(as_str: bool): def serialize(i): - if is_optional_type(type(i)) and i is None: + # Optional[T] is a Union[T, None] + if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: return None if isinstance(i, Unset): return i @@ -118,7 +121,8 @@ def validate(e): def validate_const(v): def validate(c): - if is_optional_type(type(c)) and c is None: + # Optional[T] is a Union[T, None] + if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: return None if v != c: @@ -163,7 +167,7 @@ def marshal_json(val, typ): if len(d) == 0: return "" - return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) + return json.dumps(d[next(iter(d))], separators=(",", ":")) def is_nullable(field): diff --git a/src/mistralai/utils/values.py b/src/mistralai/utils/values.py index 2b4b6832..dae01a44 100644 --- a/src/mistralai/utils/values.py +++ b/src/mistralai/utils/values.py @@ -3,8 +3,9 @@ from datetime import datetime from enum import Enum from email.message import Message +from functools import partial import os -from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast from httpx import Response from pydantic import BaseModel @@ -51,6 +52,8 @@ def match_status_codes(status_codes: List[str], status_code: int) -> bool: T = TypeVar("T") +def cast_partial(typ): + return partial(cast, typ) def get_global_from_env( value: Optional[T], env_key: str, type_cast: Callable[[str], T]