From aef3d4c049b614187796b5e41f8ef1b79fcee9bd Mon Sep 17 00:00:00 2001 From: gaspardBT Date: Wed, 19 Mar 2025 09:31:49 +0100 Subject: [PATCH 1/3] =?UTF-8?q?Revert=20"chore:=20=F0=9F=90=9D=20Update=20?= =?UTF-8?q?SDK=20-=20Generate=20MISTRALAI=20MISTRALAI-SDK=201.5.2-rc.1=20(?= =?UTF-8?q?#202)"?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 1741021792af56d2f940b76dfd286d8f386e8894. --- .speakeasy/gen.lock | 45 ++- .speakeasy/gen.yaml | 5 +- .speakeasy/workflow.lock | 44 +-- README.md | 73 +--- RELEASES.md | 12 +- USAGE.md | 24 +- docs/models/documenturlchunk.md | 10 +- docs/models/documenturlchunktype.md | 8 - docs/models/embeddingrequest.md | 4 +- docs/models/filepurpose.md | 3 +- docs/sdks/agents/README.md | 6 +- docs/sdks/chat/README.md | 6 +- docs/sdks/classifiers/README.md | 4 +- docs/sdks/embeddings/README.md | 7 +- docs/sdks/files/README.md | 10 +- docs/sdks/fim/README.md | 6 +- docs/sdks/jobs/README.md | 11 +- docs/sdks/mistraljobs/README.md | 8 +- docs/sdks/models/README.md | 6 - docs/sdks/ocr/README.md | 1 - packages/mistralai_azure/.speakeasy/gen.lock | 33 +- packages/mistralai_azure/.speakeasy/gen.yaml | 4 - .../docs/models/assistantmessage.md | 12 +- .../docs/models/chatcompletionrequest.md | 3 +- .../models/chatcompletionstreamrequest.md | 3 +- .../mistralai_azure/docs/models/function.md | 3 +- .../mistralai_azure/docs/models/jsonschema.md | 11 - .../mistralai_azure/docs/models/prediction.md | 9 - .../docs/models/responseformat.md | 3 +- .../docs/models/responseformats.md | 3 +- .../mistralai_azure/docs/models/toolcall.md | 3 +- packages/mistralai_azure/pylintrc | 7 +- packages/mistralai_azure/pyproject.toml | 26 +- .../{prepare_readme.py => prepare-readme.py} | 0 packages/mistralai_azure/scripts/publish.sh | 2 +- .../src/mistralai_azure/__init__.py | 11 +- .../src/mistralai_azure/_hooks/types.py | 18 +- .../src/mistralai_azure/_version.py | 3 - .../src/mistralai_azure/basesdk.py | 8 - .../src/mistralai_azure/chat.py | 108 ++---- .../src/mistralai_azure/httpclient.py | 52 --- .../src/mistralai_azure/models/__init__.py | 7 - .../models/assistantmessage.py | 2 - .../models/chatcompletionrequest.py | 18 +- .../models/chatcompletionstreamrequest.py | 18 +- .../src/mistralai_azure/models/function.py | 3 - .../src/mistralai_azure/models/jsonschema.py | 61 ---- .../src/mistralai_azure/models/prediction.py | 25 -- .../mistralai_azure/models/responseformat.py | 43 +-- .../mistralai_azure/models/responseformats.py | 2 +- .../src/mistralai_azure/models/toolcall.py | 3 - .../src/mistralai_azure/sdk.py | 70 +--- .../src/mistralai_azure/sdkconfiguration.py | 20 +- .../src/mistralai_azure/utils/__init__.py | 2 - .../src/mistralai_azure/utils/serializers.py | 16 +- .../src/mistralai_azure/utils/values.py | 5 +- packages/mistralai_gcp/.speakeasy/gen.lock | 40 +-- packages/mistralai_gcp/.speakeasy/gen.yaml | 4 - .../docs/models/assistantmessage.md | 12 +- .../docs/models/chatcompletionrequest.md | 5 +- .../models/chatcompletionstreamrequest.md | 5 +- .../docs/models/fimcompletionrequest.md | 2 +- .../docs/models/fimcompletionstreamrequest.md | 2 +- .../mistralai_gcp/docs/models/function.md | 3 +- .../mistralai_gcp/docs/models/jsonschema.md | 11 - .../mistralai_gcp/docs/models/prediction.md | 9 - .../docs/models/responseformat.md | 3 +- .../docs/models/responseformats.md | 3 +- .../mistralai_gcp/docs/models/toolcall.md | 3 +- packages/mistralai_gcp/pylintrc | 7 +- packages/mistralai_gcp/pyproject.toml | 30 +- .../{prepare_readme.py => prepare-readme.py} | 0 packages/mistralai_gcp/scripts/publish.sh | 2 +- .../src/mistralai_gcp/__init__.py | 11 +- .../src/mistralai_gcp/_hooks/types.py | 18 +- .../src/mistralai_gcp/_version.py | 3 - .../src/mistralai_gcp/basesdk.py | 8 - .../mistralai_gcp/src/mistralai_gcp/chat.py | 110 ++---- .../mistralai_gcp/src/mistralai_gcp/fim.py | 82 ++--- .../src/mistralai_gcp/httpclient.py | 52 --- .../src/mistralai_gcp/models/__init__.py | 7 - .../mistralai_gcp/models/assistantmessage.py | 2 - .../models/chatcompletionrequest.py | 18 +- .../models/chatcompletionstreamrequest.py | 18 +- .../models/fimcompletionrequest.py | 5 +- .../models/fimcompletionstreamrequest.py | 5 +- .../src/mistralai_gcp/models/function.py | 3 - .../src/mistralai_gcp/models/jsonschema.py | 61 ---- .../src/mistralai_gcp/models/prediction.py | 25 -- .../mistralai_gcp/models/responseformat.py | 43 +-- .../mistralai_gcp/models/responseformats.py | 2 +- .../src/mistralai_gcp/models/toolcall.py | 3 - .../mistralai_gcp/src/mistralai_gcp/sdk.py | 82 +---- .../src/mistralai_gcp/sdkconfiguration.py | 20 +- .../src/mistralai_gcp/utils/__init__.py | 2 - .../src/mistralai_gcp/utils/serializers.py | 16 +- .../src/mistralai_gcp/utils/values.py | 5 +- poetry.lock | 329 +++++++++--------- pylintrc | 5 +- pyproject.toml | 2 +- .../{prepare_readme.py => prepare-readme.py} | 8 +- scripts/publish.sh | 2 +- src/mistralai/_hooks/types.py | 18 +- src/mistralai/_version.py | 6 +- src/mistralai/agents.py | 44 +-- src/mistralai/basesdk.py | 8 - src/mistralai/chat.py | 44 +-- src/mistralai/classifiers.py | 44 +-- src/mistralai/embeddings.py | 30 +- src/mistralai/files.py | 36 -- src/mistralai/fim.py | 44 +-- src/mistralai/httpclient.py | 6 +- src/mistralai/jobs.py | 30 -- src/mistralai/mistral_jobs.py | 24 -- src/mistralai/models/__init__.py | 7 +- src/mistralai/models/documenturlchunk.py | 22 +- src/mistralai/models/embeddingrequest.py | 14 +- src/mistralai/models/filepurpose.py | 2 +- src/mistralai/models_.py | 84 +---- src/mistralai/ocr.py | 22 +- src/mistralai/sdk.py | 22 +- src/mistralai/sdkconfiguration.py | 6 +- src/mistralai/utils/__init__.py | 2 - src/mistralai/utils/serializers.py | 16 +- src/mistralai/utils/values.py | 5 +- 125 files changed, 662 insertions(+), 1872 deletions(-) delete mode 100644 docs/models/documenturlchunktype.md delete mode 100644 packages/mistralai_azure/docs/models/jsonschema.md delete mode 100644 packages/mistralai_azure/docs/models/prediction.md rename packages/mistralai_azure/scripts/{prepare_readme.py => prepare-readme.py} (100%) delete mode 100644 packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py delete mode 100644 packages/mistralai_azure/src/mistralai_azure/models/prediction.py delete mode 100644 packages/mistralai_gcp/docs/models/jsonschema.md delete mode 100644 packages/mistralai_gcp/docs/models/prediction.md rename packages/mistralai_gcp/scripts/{prepare_readme.py => prepare-readme.py} (100%) delete mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py delete mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py rename scripts/{prepare_readme.py => prepare-readme.py} (84%) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 59249dd4..6eb1248e 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 406e00c323dba0db26d6994620926af4 + docChecksum: 81cc8be96362e2f1cb145b08a2e6c4fa docVersion: 0.0.2 - speakeasyVersion: 1.517.3 - generationVersion: 2.548.6 - releaseVersion: 1.5.2-rc.1 - configChecksum: 321ba0a46c45c1b64b391afe2abe901c + speakeasyVersion: 1.477.0 + generationVersion: 2.497.0 + releaseVersion: 1.5.1 + configChecksum: ef3439d915c5d16e7cfb88fe2bf94907 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -14,7 +14,7 @@ features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.12.3 + core: 5.10.5 customCodeRegions: 0.1.1 defaultEnabledRetries: 0.2.0 downloadStreams: 1.0.1 @@ -22,19 +22,19 @@ features: envVarSecurityUsage: 0.3.2 examples: 3.0.1 flatRequests: 1.0.1 - flattening: 3.1.1 - globalSecurity: 3.0.3 + flattening: 3.1.0 + globalSecurity: 3.0.2 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.1.0 methodArguments: 1.0.2 multipartFileContentType: 1.0.0 nameOverrides: 3.0.1 - nullables: 1.0.1 + nullables: 1.0.0 openEnums: 1.0.0 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.1 + sdkHooks: 1.0.0 serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 @@ -101,7 +101,6 @@ generatedFiles: - docs/models/detailedjoboutstatus.md - docs/models/document.md - docs/models/documenturlchunk.md - - docs/models/documenturlchunktype.md - docs/models/embeddingrequest.md - docs/models/embeddingresponse.md - docs/models/embeddingresponsedata.md @@ -239,7 +238,7 @@ generatedFiles: - poetry.toml - py.typed - pylintrc - - scripts/prepare_readme.py + - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai/__init__.py - src/mistralai/_hooks/__init__.py @@ -449,7 +448,7 @@ examples: files_api_routes_upload_file: speakeasy-default-files-api-routes-upload-file: requestBody: - multipart/form-data: {"file": {}} + multipart/form-data: {"file": {"": "x-file: example.file"}} responses: "200": application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "batch_request", "source": "repository"} @@ -505,14 +504,14 @@ examples: created_by_me: false responses: "200": - application/json: {"object": "list", "total": 768578} + application/json: {"total": 768578} jobs_api_routes_fine_tuning_create_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-create-fine-tuning-job: requestBody: application/json: {"model": "Fiesta", "hyperparameters": {"learning_rate": 0.0001}} responses: "200": - application/json: {"expected_duration_seconds": 220, "cost": 10, "cost_currency": "EUR", "train_tokens_per_step": 131072, "train_tokens": 1310720, "data_tokens": 305375, "deprecated": true, "details": "", "epochs": 4.2922, "training_steps": 10, "object": "job.metadata"} + application/json: {"expected_duration_seconds": 220, "cost": 10, "cost_currency": "EUR", "train_tokens_per_step": 131072, "train_tokens": 1310720, "data_tokens": 305375, "deprecated": true, "details": "", "epochs": 4.2922, "training_steps": 10} jobs_api_routes_fine_tuning_get_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-job: parameters: @@ -520,7 +519,7 @@ examples: job_id: "b888f774-3e7c-4135-a18c-6b985523c4bc" responses: "200": - application/json: {"id": "e50f7622-81da-484b-9c66-1c8a99c6b71b", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "Model Y", "status": "CANCELLED", "job_type": "", "created_at": 415305, "modified_at": 149108, "training_files": ["8f7112aa-f0ab-44e4-83b4-cca3716f6208", "7aa1f8cf-05d8-49d5-88ee-381f6b4b885c"], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 856562, "created_at": 1716963433}, {"metrics": {}, "step_number": 328633, "created_at": 1716963433}]} + application/json: {"id": "e50f7622-81da-484b-9c66-1c8a99c6b71b", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "Model Y", "status": "CANCELLED", "job_type": "", "created_at": 415305, "modified_at": 149108, "training_files": ["8f7112aa-f0ab-44e4-83b4-cca3716f6208", "7aa1f8cf-05d8-49d5-88ee-381f6b4b885c"], "checkpoints": [{"metrics": {}, "step_number": 856562, "created_at": 1716963433}, {"metrics": {}, "step_number": 328633, "created_at": 1716963433}]} jobs_api_routes_fine_tuning_cancel_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-cancel-fine-tuning-job: parameters: @@ -528,7 +527,7 @@ examples: job_id: "0f713502-9233-41c6-9ebd-c570b7edb496" responses: "200": - application/json: {"id": "d50fbe4e-3e32-4613-8574-4d82f3fd6b3c", "auto_start": true, "hyperparameters": {"learning_rate": 0.0001}, "model": "Taurus", "status": "SUCCESS", "job_type": "", "created_at": 251316, "modified_at": 342605, "training_files": ["247ac10c-dc31-412f-a7cc-924123024afa", "0f84bd49-4511-4689-9d07-a64aa724280b", "200aa489-3801-4d6e-a454-eb14cac448cb"], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 949854, "created_at": 1716963433}, {"metrics": {}, "step_number": 516599, "created_at": 1716963433}, {"metrics": {}, "step_number": 439590, "created_at": 1716963433}]} + application/json: {"id": "d50fbe4e-3e32-4613-8574-4d82f3fd6b3c", "auto_start": true, "hyperparameters": {"learning_rate": 0.0001}, "model": "Taurus", "status": "SUCCESS", "job_type": "", "created_at": 251316, "modified_at": 342605, "training_files": ["247ac10c-dc31-412f-a7cc-924123024afa", "0f84bd49-4511-4689-9d07-a64aa724280b", "200aa489-3801-4d6e-a454-eb14cac448cb"], "checkpoints": [{"metrics": {}, "step_number": 949854, "created_at": 1716963433}, {"metrics": {}, "step_number": 516599, "created_at": 1716963433}, {"metrics": {}, "step_number": 439590, "created_at": 1716963433}]} jobs_api_routes_fine_tuning_start_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-start-fine-tuning-job: parameters: @@ -536,7 +535,7 @@ examples: job_id: "0bf0f9e6-c3e5-4d61-aac8-0e36dcac0dfc" responses: "200": - application/json: {"id": "b676fe58-2c47-483e-831e-c71dbed4c90a", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "A4", "status": "CANCELLED", "job_type": "", "created_at": 874397, "modified_at": 483387, "training_files": [], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 331375, "created_at": 1716963433}, {"metrics": {}, "step_number": 590686, "created_at": 1716963433}, {"metrics": {}, "step_number": 543177, "created_at": 1716963433}]} + application/json: {"id": "b676fe58-2c47-483e-831e-c71dbed4c90a", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "A4", "status": "CANCELLED", "job_type": "", "created_at": 874397, "modified_at": 483387, "training_files": [], "checkpoints": [{"metrics": {}, "step_number": 331375, "created_at": 1716963433}, {"metrics": {}, "step_number": 590686, "created_at": 1716963433}, {"metrics": {}, "step_number": 543177, "created_at": 1716963433}]} jobs_api_routes_batch_get_batch_jobs: speakeasy-default-jobs-api-routes-batch-get-batch-jobs: parameters: @@ -546,14 +545,14 @@ examples: created_by_me: false responses: "200": - application/json: {"object": "list", "total": 768578} + application/json: {"total": 768578} jobs_api_routes_batch_create_batch_job: speakeasy-default-jobs-api-routes-batch-create-batch-job: requestBody: application/json: {"input_files": ["a621cf02-1cd9-4cf5-8403-315211a509a3"], "endpoint": "/v1/fim/completions", "model": "2", "timeout_hours": 24} responses: "200": - application/json: {"id": "", "object": "batch", "input_files": ["8e774c2b-ecc3-4769-b177-5e024985613d", "0ee803d5-6a1d-4f94-836b-fd39494798bc"], "endpoint": "", "model": "Impala", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}, {"message": "", "count": 1}], "status": "RUNNING", "created_at": 770370, "total_requests": 350586, "completed_requests": 95214, "succeeded_requests": 930830, "failed_requests": 617761} + application/json: {"id": "", "input_files": ["8e774c2b-ecc3-4769-b177-5e024985613d", "0ee803d5-6a1d-4f94-836b-fd39494798bc"], "endpoint": "", "model": "Impala", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}, {"message": "", "count": 1}], "status": "RUNNING", "created_at": 770370, "total_requests": 350586, "completed_requests": 95214, "succeeded_requests": 930830, "failed_requests": 617761} jobs_api_routes_batch_get_batch_job: speakeasy-default-jobs-api-routes-batch-get-batch-job: parameters: @@ -561,7 +560,7 @@ examples: job_id: "b888f774-3e7c-4135-a18c-6b985523c4bc" responses: "200": - application/json: {"id": "", "object": "batch", "input_files": ["50f76228-1da8-44bc-b661-c8a99c6b71b6", "cd62b8f7-112a-4af0-bab4-e43b4cca3716", "620807aa-1f8c-4f05-ad89-d58ee381f6b4"], "endpoint": "", "model": "Golf", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "SUCCESS", "created_at": 790898, "total_requests": 55097, "completed_requests": 578320, "succeeded_requests": 856562, "failed_requests": 328633} + application/json: {"id": "", "input_files": ["50f76228-1da8-44bc-b661-c8a99c6b71b6", "cd62b8f7-112a-4af0-bab4-e43b4cca3716", "620807aa-1f8c-4f05-ad89-d58ee381f6b4"], "endpoint": "", "model": "Golf", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "SUCCESS", "created_at": 790898, "total_requests": 55097, "completed_requests": 578320, "succeeded_requests": 856562, "failed_requests": 328633} jobs_api_routes_batch_cancel_batch_job: speakeasy-default-jobs-api-routes-batch-cancel-batch-job: parameters: @@ -569,7 +568,7 @@ examples: job_id: "0f713502-9233-41c6-9ebd-c570b7edb496" responses: "200": - application/json: {"id": "", "object": "batch", "input_files": ["50fbe4e3-e326-4135-8744-d82f3fd6b3c1", "eb45e247-ac10-4cdc-8311-2f7cc9241230", "4afaa0f8-4bd4-4945-9116-89d07a64aa72"], "endpoint": "", "model": "Alpine", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "QUEUED", "created_at": 709109, "total_requests": 275794, "completed_requests": 158938, "succeeded_requests": 12381, "failed_requests": 11864} + application/json: {"id": "", "input_files": ["50fbe4e3-e326-4135-8744-d82f3fd6b3c1", "eb45e247-ac10-4cdc-8311-2f7cc9241230", "4afaa0f8-4bd4-4945-9116-89d07a64aa72"], "endpoint": "", "model": "Alpine", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "QUEUED", "created_at": 709109, "total_requests": 275794, "completed_requests": 158938, "succeeded_requests": 12381, "failed_requests": 11864} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: @@ -651,7 +650,7 @@ examples: ocr_v1_ocr_post: speakeasy-default-ocr-v1-ocr-post: requestBody: - application/json: {"model": "Focus", "document": {"document_url": "https://dutiful-horst.org", "type": "document_url"}} + application/json: {"model": "Focus", "document": {"document_url": "https://dutiful-horst.org"}} responses: "200": application/json: {"pages": [], "model": "A4", "usage_info": {"pages_processed": 442675}} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 666982eb..f020895b 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -7,15 +7,13 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true - nameResolutionFeb2025: false parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true - securityFeb2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.5.2-rc.1 + version: 1.5.1 additionalDependencies: dev: pytest: ^8.2.2 @@ -46,6 +44,5 @@ python: methodArguments: infer-optional-args outputModelSuffix: output packageName: mistralai - pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index ae45ec0e..21228dc5 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,49 +1,49 @@ -speakeasyVersion: 1.517.3 +speakeasyVersion: 1.477.0 sources: mistral-azure-source: - sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:b9be39effd24c50514ea00965c7b7089b6ae09d7aaacfb5f9eeafe465f131a62 - sourceBlobDigest: sha256:38505cbdf426ed228e4cce7667721237ddb32f72fb7df8f26c289082a568d7cb + sourceNamespace: mistral-azure-source + sourceRevisionDigest: sha256:ff181b1e0e3894a4925f7ae87415323058538a13bae9d9d508a8fe3b6ec0e333 + sourceBlobDigest: sha256:a2b4fd69298ebb9adb0d3c8dfb452db52defac512a7532203eebffb6a252df76 tags: - latest mistral-google-cloud-source: - sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:f4b8b9311a39f5c62deaed92c473eff44f139d784f122fa3c9e41d5236c96cd7 - sourceBlobDigest: sha256:cd5c242a598ba671e83fc7572ce7def4486cba035d2729d61cf7c64189a6fd89 + sourceNamespace: mistral-google-cloud-source + sourceRevisionDigest: sha256:36c7de11e35023dc8fa5f3c0fb0e486d2a102275a2df808c08cfe9d43089be04 + sourceBlobDigest: sha256:dd65bfa5d0448ad1851ebb18b57aa675533cd3e166beb86a390b0ab51d16a1c1 tags: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:8655bba5635f9f9bc3aa94690c26d6124c778e03478786074288cd63414a7a84 - sourceBlobDigest: sha256:cd9280b2b089ef5e0b06ba94ed2736b928f7e4e542d04f408df84e6912049ba6 + sourceRevisionDigest: sha256:bdfe3bd4e867529e1821e0f195c2d5832083f7699315f4a42d6b5551bd7847a3 + sourceBlobDigest: sha256:7e8a475b75404d724fc7936bd6f585b8e5226d3dca00ab4b69807b53fb63151b tags: - latest - - speakeasy-sdk-regen-1741964260 + - speakeasy-sdk-regen-1741279153 targets: mistralai-azure-sdk: source: mistral-azure-source - sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:b9be39effd24c50514ea00965c7b7089b6ae09d7aaacfb5f9eeafe465f131a62 - sourceBlobDigest: sha256:38505cbdf426ed228e4cce7667721237ddb32f72fb7df8f26c289082a568d7cb + sourceNamespace: mistral-azure-source + sourceRevisionDigest: sha256:ff181b1e0e3894a4925f7ae87415323058538a13bae9d9d508a8fe3b6ec0e333 + sourceBlobDigest: sha256:a2b4fd69298ebb9adb0d3c8dfb452db52defac512a7532203eebffb6a252df76 codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:6a21f785e0bc1861ae9bf237939c6252d4589f4b5ece596938bad54b3f5c1ac9 + codeSamplesRevisionDigest: sha256:28356dba7ea28436035e20182b8ce4d1951e19503b5accef6a128d860361e5c0 mistralai-gcp-sdk: source: mistral-google-cloud-source - sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:f4b8b9311a39f5c62deaed92c473eff44f139d784f122fa3c9e41d5236c96cd7 - sourceBlobDigest: sha256:cd5c242a598ba671e83fc7572ce7def4486cba035d2729d61cf7c64189a6fd89 + sourceNamespace: mistral-google-cloud-source + sourceRevisionDigest: sha256:36c7de11e35023dc8fa5f3c0fb0e486d2a102275a2df808c08cfe9d43089be04 + sourceBlobDigest: sha256:dd65bfa5d0448ad1851ebb18b57aa675533cd3e166beb86a390b0ab51d16a1c1 codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:a0d0890a8e87eac8ade9832883c7a129a749142696e01b1e611cf2d97fbeed9d + codeSamplesRevisionDigest: sha256:7de23f90d6543356f310f46375bef4db7f43eb22b2871ad4dfe1b7d0cc875bb4 mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:8655bba5635f9f9bc3aa94690c26d6124c778e03478786074288cd63414a7a84 - sourceBlobDigest: sha256:cd9280b2b089ef5e0b06ba94ed2736b928f7e4e542d04f408df84e6912049ba6 + sourceRevisionDigest: sha256:bdfe3bd4e867529e1821e0f195c2d5832083f7699315f4a42d6b5551bd7847a3 + sourceBlobDigest: sha256:7e8a475b75404d724fc7936bd6f585b8e5226d3dca00ab4b69807b53fb63151b codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:2a2f61e5c5e1eaad48e6a74164bd6249855c3ad7976ef83068199d57ebcdd055 + codeSamplesRevisionDigest: sha256:ba10be893f3e6dae275eb8fb09a688f3652de81eebd314427f28c274800edc48 workflow: workflowVersion: 1.0.0 - speakeasyVersion: 1.517.3 + speakeasyVersion: 1.477.0 sources: mistral-azure-source: inputs: diff --git a/README.md b/README.md index fd17da18..fd31bcd8 100644 --- a/README.md +++ b/README.md @@ -75,37 +75,6 @@ pip install mistralai ```bash poetry add mistralai ``` - -### Shell and script usage with `uv` - -You can use this SDK in a Python shell with [uv](https://docs.astral.sh/uv/) and the `uvx` command that comes with it like so: - -```shell -uvx --from mistralai python -``` - -It's also possible to write a standalone Python script without needing to set up a whole project like so: - -```python -#!/usr/bin/env -S uv run --script -# /// script -# requires-python = ">=3.9" -# dependencies = [ -# "mistralai", -# ] -# /// - -from mistralai import Mistral - -sdk = Mistral( - # SDK arguments -) - -# Rest of script here... -``` - -Once that is saved to a file, you can run it with `uv run script.py` where -`script.py` can be replaced with the actual file name. @@ -120,7 +89,6 @@ This example shows how to create chat completions. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -130,7 +98,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) + ], stream=False) # Handle response print(res) @@ -146,7 +114,6 @@ from mistralai import Mistral import os async def main(): - async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -156,7 +123,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) + ], stream=False) # Handle response print(res) @@ -173,7 +140,6 @@ This example shows how to upload a file. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -197,7 +163,6 @@ from mistralai import Mistral import os async def main(): - async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -222,7 +187,6 @@ This example shows how to create agents completions. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -232,7 +196,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") + ], agent_id="", stream=False) # Handle response print(res) @@ -248,7 +212,6 @@ from mistralai import Mistral import os async def main(): - async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -258,7 +221,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") + ], agent_id="", stream=False) # Handle response print(res) @@ -275,15 +238,14 @@ This example shows how to create embedding request. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.embeddings.create(model="mistral-embed", inputs=[ + res = mistral.embeddings.create(inputs=[ "Embed this sentence.", "As well as this one.", - ]) + ], model="mistral-embed") # Handle response print(res) @@ -299,15 +261,14 @@ from mistralai import Mistral import os async def main(): - async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = await mistral.embeddings.create_async(model="mistral-embed", inputs=[ + res = await mistral.embeddings.create_async(inputs=[ "Embed this sentence.", "As well as this one.", - ]) + ], model="mistral-embed") # Handle response print(res) @@ -504,7 +465,6 @@ underlying connection when the context is exited. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -514,7 +474,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) + ], stream=True) with res as event_stream: for event in event_stream: @@ -542,7 +502,6 @@ Certain SDK methods accept file objects as part of a request body or multi-part from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -569,7 +528,6 @@ from mistralai import Mistral from mistralai.utils import BackoffStrategy, RetryConfig import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -588,7 +546,6 @@ from mistralai import Mistral from mistralai.utils import BackoffStrategy, RetryConfig import os - with Mistral( retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), api_key=os.getenv("MISTRAL_API_KEY", ""), @@ -629,7 +586,6 @@ When custom error responses are specified for an operation, the SDK may also rai from mistralai import Mistral, models import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -657,9 +613,9 @@ with Mistral( You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: -| Name | Server | Description | -| ---- | ------------------------ | -------------------- | -| `eu` | `https://api.mistral.ai` | EU Production server | +| Name | Server | +| ---- | ------------------------ | +| `eu` | `https://api.mistral.ai` | #### Example @@ -667,7 +623,6 @@ You can override the default server globally by passing a server name to the `se from mistralai import Mistral import os - with Mistral( server="eu", api_key=os.getenv("MISTRAL_API_KEY", ""), @@ -687,7 +642,6 @@ The default server can also be overridden globally by passing a URL to the `serv from mistralai import Mistral import os - with Mistral( server_url="https://api.mistral.ai", api_key=os.getenv("MISTRAL_API_KEY", ""), @@ -798,7 +752,6 @@ To authenticate with the API the `api_key` parameter must be set when initializi from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -822,7 +775,6 @@ The `Mistral` class implements the context manager protocol and registers a fina from mistralai import Mistral import os def main(): - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -831,7 +783,6 @@ def main(): # Or when using async: async def amain(): - async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/RELEASES.md b/RELEASES.md index 0ad3429c..d7b657bb 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -168,14 +168,4 @@ Based on: ### Generated - [python v1.5.1] . ### Releases -- [PyPI v1.5.1] https://pypi.org/project/mistralai/1.5.1 - . - -## 2025-03-14 15:08:57 -### Changes -Based on: -- OpenAPI Doc -- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy -### Generated -- [python v1.5.2-rc.1] . -### Releases -- [PyPI v1.5.2-rc.1] https://pypi.org/project/mistralai/1.5.2-rc.1 - . \ No newline at end of file +- [PyPI v1.5.1] https://pypi.org/project/mistralai/1.5.1 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index fa3a77de..3e1cae03 100644 --- a/USAGE.md +++ b/USAGE.md @@ -8,7 +8,6 @@ This example shows how to create chat completions. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -18,7 +17,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) + ], stream=False) # Handle response print(res) @@ -34,7 +33,6 @@ from mistralai import Mistral import os async def main(): - async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -44,7 +42,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) + ], stream=False) # Handle response print(res) @@ -61,7 +59,6 @@ This example shows how to upload a file. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -85,7 +82,6 @@ from mistralai import Mistral import os async def main(): - async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -110,7 +106,6 @@ This example shows how to create agents completions. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -120,7 +115,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") + ], agent_id="", stream=False) # Handle response print(res) @@ -136,7 +131,6 @@ from mistralai import Mistral import os async def main(): - async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -146,7 +140,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") + ], agent_id="", stream=False) # Handle response print(res) @@ -163,15 +157,14 @@ This example shows how to create embedding request. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.embeddings.create(model="mistral-embed", inputs=[ + res = mistral.embeddings.create(inputs=[ "Embed this sentence.", "As well as this one.", - ]) + ], model="mistral-embed") # Handle response print(res) @@ -187,15 +180,14 @@ from mistralai import Mistral import os async def main(): - async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = await mistral.embeddings.create_async(model="mistral-embed", inputs=[ + res = await mistral.embeddings.create_async(inputs=[ "Embed this sentence.", "As well as this one.", - ]) + ], model="mistral-embed") # Handle response print(res) diff --git a/docs/models/documenturlchunk.md b/docs/models/documenturlchunk.md index 6c9a5b4d..33785c34 100644 --- a/docs/models/documenturlchunk.md +++ b/docs/models/documenturlchunk.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -| `document_url` | *str* | :heavy_check_mark: | N/A | -| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | -| `type` | [Optional[models.DocumentURLChunkType]](../models/documenturlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | +| `document_url` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[Literal["document_url"]]* | :heavy_minus_sign: | N/A | +| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | \ No newline at end of file diff --git a/docs/models/documenturlchunktype.md b/docs/models/documenturlchunktype.md deleted file mode 100644 index 32e1fa9e..00000000 --- a/docs/models/documenturlchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# DocumentURLChunkType - - -## Values - -| Name | Value | -| -------------- | -------------- | -| `DOCUMENT_URL` | document_url | \ No newline at end of file diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md index 3bdd79e8..07ab903a 100644 --- a/docs/models/embeddingrequest.md +++ b/docs/models/embeddingrequest.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | Example | | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | -| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | \ No newline at end of file +| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | +| `model` | *Optional[str]* | :heavy_minus_sign: | ID of the model to use. | | \ No newline at end of file diff --git a/docs/models/filepurpose.md b/docs/models/filepurpose.md index 14cab13e..5152aeeb 100644 --- a/docs/models/filepurpose.md +++ b/docs/models/filepurpose.md @@ -6,5 +6,4 @@ | Name | Value | | ----------- | ----------- | | `FINE_TUNE` | fine-tune | -| `BATCH` | batch | -| `OCR` | ocr | \ No newline at end of file +| `BATCH` | batch | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 1e6f9069..00ca33ac 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -20,7 +20,6 @@ Agents Completion from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -30,7 +29,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") + ], agent_id="", stream=False) # Handle response print(res) @@ -77,7 +76,6 @@ Mistral AI provides the ability to stream responses back to a client in order to from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -87,7 +85,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") + ], agent_id="", stream=True) with res as event_stream: for event in event_stream: diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 8cac6db4..38e16adc 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -20,7 +20,6 @@ Chat Completion from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -30,7 +29,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) + ], stream=False) # Handle response print(res) @@ -80,7 +79,6 @@ Mistral AI provides the ability to stream responses back to a client in order to from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -90,7 +88,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) + ], stream=True) with res as event_stream: for event in event_stream: diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 6c7127af..6bcc68a9 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -20,7 +20,6 @@ Moderations from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -63,7 +62,6 @@ Moderations Chat from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -97,7 +95,7 @@ with Mistral( "role": "assistant", }, ], - ]) + ], truncate_for_context_length=False) # Handle response print(res) diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 8c386439..44fae4ac 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -19,15 +19,14 @@ Embeddings from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.embeddings.create(model="mistral-embed", inputs=[ + res = mistral.embeddings.create(inputs=[ "Embed this sentence.", "As well as this one.", - ]) + ], model="mistral-embed") # Handle response print(res) @@ -38,8 +37,8 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | | `inputs` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | +| `model` | *Optional[str]* | :heavy_minus_sign: | ID of the model to use. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index befa4d67..8f01a668 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -28,7 +28,6 @@ Please contact us if you need to increase these storage limits. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -71,12 +70,11 @@ Returns a list of files that belong to the user's organization. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.list() + res = mistral.files.list(page=0, page_size=100) # Handle response print(res) @@ -115,7 +113,6 @@ Returns information about a specific file. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -154,7 +151,6 @@ Delete a file. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -193,7 +189,6 @@ Download a file from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -232,12 +227,11 @@ Get Signed Url from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.get_signed_url(file_id="") + res = mistral.files.get_signed_url(file_id="", expiry=24) # Handle response print(res) diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index c70b3da4..28de6c02 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -20,12 +20,11 @@ FIM completion. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fim.complete(model="codestral-2405", prompt="def", suffix="return a+b") + res = mistral.fim.complete(model="codestral-2405", prompt="def", top_p=1, stream=False, suffix="return a+b") # Handle response print(res) @@ -69,12 +68,11 @@ Mistral AI provides the ability to stream responses back to a client in order to from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fim.stream(model="codestral-2405", prompt="def", suffix="return a+b") + res = mistral.fim.stream(model="codestral-2405", prompt="def", top_p=1, stream=True, suffix="return a+b") with res as event_stream: for event in event_stream: diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index ecb11def..06605877 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -21,12 +21,11 @@ Get a list of fine-tuning jobs for your organization and user. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.list() + res = mistral.fine_tuning.jobs.list(page=0, page_size=100, created_by_me=False) # Handle response print(res) @@ -68,12 +67,13 @@ Create a new fine-tuning job, it will be queued for processing. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.create(model="Fiesta", hyperparameters={}) + res = mistral.fine_tuning.jobs.create(model="Fiesta", hyperparameters={ + "learning_rate": 0.0001, + }) # Handle response print(res) @@ -114,7 +114,6 @@ Get a fine-tuned job details by its UUID. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -153,7 +152,6 @@ Request the cancellation of a fine tuning job. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -192,7 +190,6 @@ Request the start of a validated fine tuning job. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index 5b80a45b..56a7f60b 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -20,12 +20,11 @@ Get a list of batch jobs for your organization and user. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.list() + res = mistral.batch.jobs.list(page=0, page_size=100, created_by_me=False) # Handle response print(res) @@ -65,14 +64,13 @@ Create a new batch job, it will be queued for processing. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: res = mistral.batch.jobs.create(input_files=[ "a621cf02-1cd9-4cf5-8403-315211a509a3", - ], endpoint="/v1/fim/completions", model="2") + ], endpoint="/v1/fim/completions", model="2", timeout_hours=24) # Handle response print(res) @@ -110,7 +108,6 @@ Get a batch job details by its UUID. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -149,7 +146,6 @@ Request the cancellation of a batch job. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index dd7baf50..e048d20e 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -24,7 +24,6 @@ List all models available to the user. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -63,7 +62,6 @@ Retrieve a model information. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -103,7 +101,6 @@ Delete a fine-tuned model. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -143,7 +140,6 @@ Update a model name or description. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -184,7 +180,6 @@ Archive a fine-tuned model. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -223,7 +218,6 @@ Un-archive a fine-tuned model. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md index 61988ea6..54f8af96 100644 --- a/docs/sdks/ocr/README.md +++ b/docs/sdks/ocr/README.md @@ -19,7 +19,6 @@ OCR from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index 8b8ef6ae..16a5196b 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,34 +1,34 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: 3b99cf44527c23ce3298616720b00a62 + docChecksum: 4da7c33f650ddf206c58fa6c941d347f docVersion: 0.0.2 - speakeasyVersion: 1.517.3 - generationVersion: 2.548.6 + speakeasyVersion: 1.462.2 + generationVersion: 2.486.1 releaseVersion: 1.2.6 - configChecksum: 1a623455d46169b8a271df9cd9d58d86 + configChecksum: cc2ac1769a87215774fce0075ff2e77d published: true features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.12.3 + core: 5.7.4 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 - examples: 3.0.1 + examples: 3.0.0 flatRequests: 1.0.1 - globalSecurity: 3.0.3 + globalSecurity: 3.0.2 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.1.0 methodArguments: 1.0.2 nameOverrides: 3.0.1 - nullables: 1.0.1 + nullables: 1.0.0 openEnums: 1.0.0 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.1 + sdkHooks: 1.0.0 serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 @@ -61,10 +61,8 @@ generatedFiles: - docs/models/functioncall.md - docs/models/functionname.md - docs/models/httpvalidationerror.md - - docs/models/jsonschema.md - docs/models/loc.md - docs/models/messages.md - - docs/models/prediction.md - docs/models/referencechunk.md - docs/models/referencechunktype.md - docs/models/responseformat.md @@ -94,7 +92,7 @@ generatedFiles: - py.typed - pylintrc - pyproject.toml - - scripts/prepare_readme.py + - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai_azure/__init__.py - src/mistralai_azure/_hooks/__init__.py @@ -119,8 +117,6 @@ generatedFiles: - src/mistralai_azure/models/functioncall.py - src/mistralai_azure/models/functionname.py - src/mistralai_azure/models/httpvalidationerror.py - - src/mistralai_azure/models/jsonschema.py - - src/mistralai_azure/models/prediction.py - src/mistralai_azure/models/referencechunk.py - src/mistralai_azure/models/responseformat.py - src/mistralai_azure/models/responseformats.py @@ -138,7 +134,6 @@ generatedFiles: - src/mistralai_azure/models/usermessage.py - src/mistralai_azure/models/validationerror.py - src/mistralai_azure/py.typed - - src/mistralai_azure/sdk.py - src/mistralai_azure/sdkconfiguration.py - src/mistralai_azure/types/__init__.py - src/mistralai_azure/types/basemodel.py @@ -161,19 +156,17 @@ examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "azureai", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "azureai", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "422": application/json: {} - "200": {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "azureai", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "azureai", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} "422": application/json: {} -examplesVersion: 1.0.0 generatedTests: {} diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index 04ed562b..17344d9b 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -7,10 +7,8 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true - nameResolutionFeb2025: false parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true - securityFeb2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false @@ -25,7 +23,6 @@ python: clientServerStatusCodesAsErrors: true defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in Azure. - enableCustomCodeRegions: false enumFormat: union fixFlags: responseRequiredSep2024: false @@ -45,6 +42,5 @@ python: methodArguments: infer-optional-args outputModelSuffix: output packageName: mistralai_azure - pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/packages/mistralai_azure/docs/models/assistantmessage.md b/packages/mistralai_azure/docs/models/assistantmessage.md index 3d0bd90b..53f1cc76 100644 --- a/packages/mistralai_azure/docs/models/assistantmessage.md +++ b/packages/mistralai_azure/docs/models/assistantmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequest.md b/packages/mistralai_azure/docs/models/chatcompletionrequest.md index eb43a4da..68cef4a1 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionrequest.md @@ -6,7 +6,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | @@ -19,5 +19,4 @@ | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md index 78442736..c9c5c87b 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md @@ -6,7 +6,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | @@ -19,5 +19,4 @@ | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/function.md b/packages/mistralai_azure/docs/models/function.md index a166b7bb..8af398f5 100644 --- a/packages/mistralai_azure/docs/models/function.md +++ b/packages/mistralai_azure/docs/models/function.md @@ -7,5 +7,4 @@ | ------------------ | ------------------ | ------------------ | ------------------ | | `name` | *str* | :heavy_check_mark: | N/A | | `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | -| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/jsonschema.md b/packages/mistralai_azure/docs/models/jsonschema.md deleted file mode 100644 index ae387867..00000000 --- a/packages/mistralai_azure/docs/models/jsonschema.md +++ /dev/null @@ -1,11 +0,0 @@ -# JSONSchema - - -## Fields - -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `name` | *str* | :heavy_check_mark: | N/A | -| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/prediction.md b/packages/mistralai_azure/docs/models/prediction.md deleted file mode 100644 index 86e9c396..00000000 --- a/packages/mistralai_azure/docs/models/prediction.md +++ /dev/null @@ -1,9 +0,0 @@ -# Prediction - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | -| `type` | *Optional[Literal["content"]]* | :heavy_minus_sign: | N/A | -| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/responseformat.md b/packages/mistralai_azure/docs/models/responseformat.md index 23a1641b..9c627f55 100644 --- a/packages/mistralai_azure/docs/models/responseformat.md +++ b/packages/mistralai_azure/docs/models/responseformat.md @@ -5,5 +5,4 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | -| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/responseformats.md b/packages/mistralai_azure/docs/models/responseformats.md index 06886afe..ce35fbb3 100644 --- a/packages/mistralai_azure/docs/models/responseformats.md +++ b/packages/mistralai_azure/docs/models/responseformats.md @@ -8,5 +8,4 @@ An object specifying the format that the model must output. Setting to `{ "type" | Name | Value | | ------------- | ------------- | | `TEXT` | text | -| `JSON_OBJECT` | json_object | -| `JSON_SCHEMA` | json_schema | \ No newline at end of file +| `JSON_OBJECT` | json_object | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolcall.md b/packages/mistralai_azure/docs/models/toolcall.md index 574be1ea..7aca5fc9 100644 --- a/packages/mistralai_azure/docs/models/toolcall.md +++ b/packages/mistralai_azure/docs/models/toolcall.md @@ -7,5 +7,4 @@ | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | | `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | -| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/pylintrc b/packages/mistralai_azure/pylintrc index 266bc815..393d0f70 100644 --- a/packages/mistralai_azure/pylintrc +++ b/packages/mistralai_azure/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.9 +py-version=3.8 # Discover python modules and packages in the file system subtree. recursive=no @@ -455,10 +455,7 @@ disable=raw-checker-failed, bare-except, broad-exception-caught, fixme, - relative-beyond-top-level, - consider-using-with, - wildcard-import, - unused-wildcard-import + relative-beyond-top-level # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index bf120e67..5c227f66 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -1,19 +1,9 @@ -[project] +[tool.poetry] name = "mistralai_azure" version = "1.2.6" description = "Python Client SDK for the Mistral AI API in Azure." -authors = [{ name = "Mistral" },] +authors = ["Mistral",] readme = "README-PYPI.md" -requires-python = ">=3.9" -dependencies = [ - "eval-type-backport >=0.2.0", - "httpx >=0.28.1", - "pydantic >=2.10.3", - "python-dateutil >=2.8.2", - "typing-inspection >=0.4.0", -] - -[tool.poetry] packages = [ { include = "mistralai_azure", from = "src" } ] @@ -25,8 +15,17 @@ include = ["py.typed", "src/mistralai_azure/py.typed"] [virtualenvs] in-project = true +[tool.poetry.dependencies] +python = "^3.8" +eval-type-backport = "^0.2.0" +httpx = "^0.28.1" +jsonpath-python = "^1.0.6" +pydantic = "~2.10.3" +python-dateutil = "^2.8.2" +typing-inspect = "^0.9.0" + [tool.poetry.group.dev.dependencies] -mypy = "==1.14.1" +mypy = "==1.13.0" pylint = "==3.2.3" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" @@ -37,7 +36,6 @@ requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.pytest.ini_options] -asyncio_default_fixture_loop_scope = "function" pythonpath = ["src"] [tool.mypy] diff --git a/packages/mistralai_azure/scripts/prepare_readme.py b/packages/mistralai_azure/scripts/prepare-readme.py similarity index 100% rename from packages/mistralai_azure/scripts/prepare_readme.py rename to packages/mistralai_azure/scripts/prepare-readme.py diff --git a/packages/mistralai_azure/scripts/publish.sh b/packages/mistralai_azure/scripts/publish.sh index f2f2cf2c..ab45b1f9 100755 --- a/packages/mistralai_azure/scripts/publish.sh +++ b/packages/mistralai_azure/scripts/publish.sh @@ -2,6 +2,6 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} -poetry run python scripts/prepare_readme.py +poetry run python scripts/prepare-readme.py poetry publish --build --skip-existing diff --git a/packages/mistralai_azure/src/mistralai_azure/__init__.py b/packages/mistralai_azure/src/mistralai_azure/__init__.py index dd02e42e..a1b7f626 100644 --- a/packages/mistralai_azure/src/mistralai_azure/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/__init__.py @@ -1,18 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from ._version import ( - __title__, - __version__, - __openapi_doc_version__, - __gen_version__, - __user_agent__, -) +from ._version import __title__, __version__ from .sdk import * from .sdkconfiguration import * from .models import * VERSION: str = __version__ -OPENAPI_DOC_VERSION = __openapi_doc_version__ -SPEAKEASY_GENERATOR_VERSION = __gen_version__ -USER_AGENT = __user_agent__ diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py index 297dfa2f..5e34da26 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py @@ -7,19 +7,16 @@ class HookContext: - base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None def __init__( self, - base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): - self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -28,30 +25,21 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source ) diff --git a/packages/mistralai_azure/src/mistralai_azure/_version.py b/packages/mistralai_azure/src/mistralai_azure/_version.py index 23e2d1c2..c7215b89 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_version.py +++ b/packages/mistralai_azure/src/mistralai_azure/_version.py @@ -4,9 +4,6 @@ __title__: str = "mistralai_azure" __version__: str = "1.2.6" -__openapi_doc_version__: str = "0.0.2" -__gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.2.6 2.548.6 0.0.2 mistralai_azure" try: if __package__ is not None: diff --git a/packages/mistralai_azure/src/mistralai_azure/basesdk.py b/packages/mistralai_azure/src/mistralai_azure/basesdk.py index 24e4935e..05c100d4 100644 --- a/packages/mistralai_azure/src/mistralai_azure/basesdk.py +++ b/packages/mistralai_azure/src/mistralai_azure/basesdk.py @@ -231,10 +231,6 @@ def do(): req.headers, get_body_content(req), ) - - if client is None: - raise ValueError("client is required") - http_res = client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( @@ -307,10 +303,6 @@ async def do(): req.headers, get_body_content(req), ) - - if client is None: - raise ValueError("client is required") - http_res = await client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/mistralai_azure/src/mistralai_azure/chat.py index 6f126a4b..0ed464ba 100644 --- a/packages/mistralai_azure/src/mistralai_azure/chat.py +++ b/packages/mistralai_azure/src/mistralai_azure/chat.py @@ -15,7 +15,7 @@ def stream( self, *, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - model: Optional[str] = "azureai", + model: OptionalNullable[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, @@ -37,9 +37,6 @@ def stream( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -64,7 +61,6 @@ def stream( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -78,8 +74,6 @@ def stream( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -100,9 +94,6 @@ def stream( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), safe_prompt=safe_prompt, ) @@ -135,7 +126,6 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -146,7 +136,7 @@ def stream( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -155,16 +145,9 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -183,7 +166,7 @@ async def stream_async( self, *, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - model: Optional[str] = "azureai", + model: OptionalNullable[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, @@ -205,9 +188,6 @@ async def stream_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -232,7 +212,6 @@ async def stream_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -246,8 +225,6 @@ async def stream_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -268,9 +245,6 @@ async def stream_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), safe_prompt=safe_prompt, ) @@ -303,7 +277,6 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -314,7 +287,7 @@ async def stream_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -323,16 +296,9 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -354,7 +320,7 @@ def complete( List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], ], - model: Optional[str] = "azureai", + model: OptionalNullable[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, @@ -381,9 +347,6 @@ def complete( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -406,7 +369,6 @@ def complete( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -420,8 +382,6 @@ def complete( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -444,9 +404,6 @@ def complete( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), safe_prompt=safe_prompt, ) @@ -479,7 +436,6 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -489,22 +445,15 @@ def complete( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.ChatCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -526,7 +475,7 @@ async def complete_async( List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], ], - model: Optional[str] = "azureai", + model: OptionalNullable[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, @@ -553,9 +502,6 @@ async def complete_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -578,7 +524,6 @@ async def complete_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -592,8 +537,6 @@ async def complete_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -616,9 +559,6 @@ async def complete_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), safe_prompt=safe_prompt, ) @@ -651,7 +591,6 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -661,22 +600,15 @@ async def complete_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.ChatCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/packages/mistralai_azure/src/mistralai_azure/httpclient.py b/packages/mistralai_azure/src/mistralai_azure/httpclient.py index 1e426352..167cea4e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/httpclient.py +++ b/packages/mistralai_azure/src/mistralai_azure/httpclient.py @@ -1,8 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" # pyright: reportReturnType = false -import asyncio -from concurrent.futures import ThreadPoolExecutor from typing_extensions import Protocol, runtime_checkable import httpx from typing import Any, Optional, Union @@ -84,53 +82,3 @@ def build_request( async def aclose(self) -> None: pass - - -class ClientOwner(Protocol): - client: Union[HttpClient, None] - async_client: Union[AsyncHttpClient, None] - - -def close_clients( - owner: ClientOwner, - sync_client: Union[HttpClient, None], - sync_client_supplied: bool, - async_client: Union[AsyncHttpClient, None], - async_client_supplied: bool, -) -> None: - """ - A finalizer function that is meant to be used with weakref.finalize to close - httpx clients used by an SDK so that underlying resources can be garbage - collected. - """ - - # Unset the client/async_client properties so there are no more references - # to them from the owning SDK instance and they can be reaped. - owner.client = None - owner.async_client = None - - if sync_client is not None and not sync_client_supplied: - try: - sync_client.close() - except Exception: - pass - - if async_client is not None and not async_client_supplied: - is_async = False - try: - asyncio.get_running_loop() - is_async = True - except RuntimeError: - pass - - try: - # If this function is called in an async loop then start another - # loop in a separate thread to close the async http client. - if is_async: - with ThreadPoolExecutor(max_workers=1) as executor: - future = executor.submit(asyncio.run, async_client.aclose()) - future.result() - else: - asyncio.run(async_client.aclose()) - except Exception: - pass diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py index ed9d9362..379a0dfe 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py @@ -54,8 +54,6 @@ ) from .functionname import FunctionName, FunctionNameTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData -from .jsonschema import JSONSchema, JSONSchemaTypedDict -from .prediction import Prediction, PredictionTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats @@ -96,7 +94,6 @@ ValidationErrorTypedDict, ) - __all__ = [ "Arguments", "ArgumentsTypedDict", @@ -143,14 +140,10 @@ "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", - "JSONSchema", - "JSONSchemaTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", - "Prediction", - "PredictionTypedDict", "ReferenceChunk", "ReferenceChunkType", "ReferenceChunkTypedDict", diff --git a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py index 530b33df..031677cf 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py @@ -32,7 +32,6 @@ class AssistantMessageTypedDict(TypedDict): content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] @@ -42,7 +41,6 @@ class AssistantMessage(BaseModel): tool_calls: OptionalNullable[List[ToolCall]] = UNSET prefix: Optional[bool] = False - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: Optional[AssistantMessageRole] = "assistant" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py index 08c66467..67c91bba 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py @@ -2,7 +2,6 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -71,7 +70,7 @@ class ChatCompletionRequestTypedDict(TypedDict): messages: List[ChatCompletionRequestMessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: NotRequired[str] + model: NotRequired[Nullable[str]] r"""The ID of the model to use for this request.""" temperature: NotRequired[Nullable[float]] r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" @@ -94,7 +93,6 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -103,7 +101,7 @@ class ChatCompletionRequest(BaseModel): messages: List[ChatCompletionRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: Optional[str] = "azureai" + model: OptionalNullable[str] = "azureai" r"""The ID of the model to use for this request.""" temperature: OptionalNullable[float] = UNSET @@ -139,8 +137,6 @@ class ChatCompletionRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: Optional[Prediction] = None - safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -160,10 +156,16 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", - "prediction", "safe_prompt", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + ] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py index a2eec92b..465647eb 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py @@ -2,7 +2,6 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -67,7 +66,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): messages: List[MessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: NotRequired[str] + model: NotRequired[Nullable[str]] r"""The ID of the model to use for this request.""" temperature: NotRequired[Nullable[float]] r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" @@ -89,7 +88,6 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -98,7 +96,7 @@ class ChatCompletionStreamRequest(BaseModel): messages: List[Messages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: Optional[str] = "azureai" + model: OptionalNullable[str] = "azureai" r"""The ID of the model to use for this request.""" temperature: OptionalNullable[float] = UNSET @@ -133,8 +131,6 @@ class ChatCompletionStreamRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: Optional[Prediction] = None - safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -154,10 +150,16 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", - "prediction", "safe_prompt", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + ] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/function.py b/packages/mistralai_azure/src/mistralai_azure/models/function.py index e6ea8495..488cdcea 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/function.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/function.py @@ -10,7 +10,6 @@ class FunctionTypedDict(TypedDict): name: str parameters: Dict[str, Any] description: NotRequired[str] - strict: NotRequired[bool] class Function(BaseModel): @@ -19,5 +18,3 @@ class Function(BaseModel): parameters: Dict[str, Any] description: Optional[str] = "" - - strict: Optional[bool] = False diff --git a/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py b/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py deleted file mode 100644 index 210417c7..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py +++ /dev/null @@ -1,61 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_azure.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -import pydantic -from pydantic import model_serializer -from typing import Any, Dict, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class JSONSchemaTypedDict(TypedDict): - name: str - schema_definition: Dict[str, Any] - description: NotRequired[Nullable[str]] - strict: NotRequired[bool] - - -class JSONSchema(BaseModel): - name: str - - schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] - - description: OptionalNullable[str] = UNSET - - strict: Optional[bool] = False - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["description", "strict"] - nullable_fields = ["description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/prediction.py b/packages/mistralai_azure/src/mistralai_azure/models/prediction.py deleted file mode 100644 index 888337d3..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/prediction.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_azure.types import BaseModel -from mistralai_azure.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class PredictionTypedDict(TypedDict): - type: Literal["content"] - content: NotRequired[str] - - -class Prediction(BaseModel): - TYPE: Annotated[ - Annotated[ - Optional[Literal["content"]], AfterValidator(validate_const("content")) - ], - pydantic.Field(alias="type"), - ] = "content" - - content: Optional[str] = "" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py index cfd58dcf..e4a9d7dd 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py @@ -1,16 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .jsonschema import JSONSchema, JSONSchemaTypedDict from .responseformats import ResponseFormats -from mistralai_azure.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer +from mistralai_azure.types import BaseModel from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -18,41 +10,8 @@ class ResponseFormatTypedDict(TypedDict): type: NotRequired[ResponseFormats] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] class ResponseFormat(BaseModel): type: Optional[ResponseFormats] = None r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - - json_schema: OptionalNullable[JSONSchema] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "json_schema"] - nullable_fields = ["json_schema"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py index 08c39951..2c06b812 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py @@ -4,5 +4,5 @@ from typing import Literal -ResponseFormats = Literal["text", "json_object", "json_schema"] +ResponseFormats = Literal["text", "json_object"] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py index 6ccdcaa2..69b47310 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py @@ -14,7 +14,6 @@ class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] - index: NotRequired[int] class ToolCall(BaseModel): @@ -25,5 +24,3 @@ class ToolCall(BaseModel): type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( None ) - - index: Optional[int] = 0 diff --git a/packages/mistralai_azure/src/mistralai_azure/sdk.py b/packages/mistralai_azure/src/mistralai_azure/sdk.py index 8379e55f..a83faa7b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdk.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdk.py @@ -1,19 +1,17 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" -import weakref -from typing import Any, Callable, Dict, Optional, Union, cast +from typing import Any, Callable, Dict, Optional, Union import httpx - from mistralai_azure import models, utils from mistralai_azure._hooks import SDKHooks from mistralai_azure.chat import Chat -from mistralai_azure.types import UNSET, OptionalNullable +from mistralai_azure.types import Nullable from .basesdk import BaseSDK -from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients +from .httpclient import AsyncHttpClient, HttpClient from .sdkconfiguration import SDKConfiguration -from .utils.logger import Logger, get_default_logger +from .utils.logger import Logger, NoOpLogger from .utils.retries import RetryConfig @@ -21,7 +19,7 @@ class MistralAzure(BaseSDK): r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" chat: Chat - r"""Chat Completion API.""" + r"""Chat Completion API""" def __init__( self, @@ -30,8 +28,7 @@ def __init__( url_params: Optional[Dict[str, str]] = None, client: Optional[HttpClient] = None, async_client: Optional[AsyncHttpClient] = None, - retry_config: OptionalNullable[RetryConfig] = UNSET, - timeout_ms: Optional[int] = None, + retry_config: Optional[Nullable[RetryConfig]] = None, debug_logger: Optional[Logger] = None, ) -> None: r"""Instantiates the SDK configuring it with the provided parameters. @@ -42,9 +39,7 @@ def __init__( :param client: The HTTP client to use for all synchronous methods :param async_client: The Async HTTP client to use for all asynchronous methods :param retry_config: The retry configuration to use for all supported methods - :param timeout_ms: Optional request timeout applied to each operation in milliseconds """ - # if azure_endpoint doesn't end with `/v1` add it if not azure_endpoint.endswith("/"): azure_endpoint += "/" @@ -52,30 +47,28 @@ def __init__( azure_endpoint += "v1/" server_url = azure_endpoint - client_supplied = True if client is None: client = httpx.Client() - client_supplied = False assert issubclass( type(client), HttpClient ), "The provided client must implement the HttpClient protocol." - async_client_supplied = True if async_client is None: async_client = httpx.AsyncClient() - async_client_supplied = False - - if debug_logger is None: - debug_logger = get_default_logger() assert issubclass( type(async_client), AsyncHttpClient ), "The provided async_client must implement the AsyncHttpClient protocol." + if debug_logger is None: + debug_logger = NoOpLogger() + security: Any = None if callable(azure_api_key): - security = lambda: models.Security(api_key=azure_api_key()) # pylint: disable=unnecessary-lambda-assignment + security = lambda: models.Security( # pylint: disable=unnecessary-lambda-assignment + api_key=azure_api_key() + ) else: security = models.Security(api_key=azure_api_key) @@ -87,14 +80,11 @@ def __init__( self, SDKConfiguration( client=client, - client_supplied=client_supplied, async_client=async_client, - async_client_supplied=async_client_supplied, security=security, server_url=server_url, server=None, retry_config=retry_config, - timeout_ms=timeout_ms, debug_logger=debug_logger, ), ) @@ -103,7 +93,7 @@ def __init__( current_server_url, *_ = self.sdk_configuration.get_server_details() server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, client + current_server_url, self.sdk_configuration.client ) if current_server_url != server_url: self.sdk_configuration.server_url = server_url @@ -111,39 +101,7 @@ def __init__( # pylint: disable=protected-access self.sdk_configuration.__dict__["_hooks"] = hooks - weakref.finalize( - self, - close_clients, - cast(ClientOwner, self.sdk_configuration), - self.sdk_configuration.client, - self.sdk_configuration.client_supplied, - self.sdk_configuration.async_client, - self.sdk_configuration.async_client_supplied, - ) - self._init_sdks() def _init_sdks(self): self.chat = Chat(self.sdk_configuration) - - def __enter__(self): - return self - - async def __aenter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.client is not None - and not self.sdk_configuration.client_supplied - ): - self.sdk_configuration.client.close() - self.sdk_configuration.client = None - - async def __aexit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.async_client is not None - and not self.sdk_configuration.async_client_supplied - ): - await self.sdk_configuration.async_client.aclose() - self.sdk_configuration.async_client = None diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py index 605e5d74..73b8d517 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py @@ -1,12 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from ._hooks import SDKHooks -from ._version import ( - __gen_version__, - __openapi_doc_version__, - __user_agent__, - __version__, -) from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix from dataclasses import dataclass @@ -26,19 +20,17 @@ @dataclass class SDKConfiguration: - client: Union[HttpClient, None] - client_supplied: bool - async_client: Union[AsyncHttpClient, None] - async_client_supplied: bool + client: HttpClient + async_client: AsyncHttpClient debug_logger: Logger security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" server: Optional[str] = "" language: str = "python" - openapi_doc_version: str = __openapi_doc_version__ - sdk_version: str = __version__ - gen_version: str = __gen_version__ - user_agent: str = __user_agent__ + openapi_doc_version: str = "0.0.2" + sdk_version: str = "1.2.6" + gen_version: str = "2.486.1" + user_agent: str = "speakeasy-sdk/python 1.2.6 2.486.1 0.0.2 mistralai_azure" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py index 3cded8fe..26d51ae8 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py @@ -42,7 +42,6 @@ match_content_type, match_status_codes, match_response, - cast_partial, ) from .logger import Logger, get_body_content, get_default_logger @@ -95,5 +94,4 @@ "validate_float", "validate_int", "validate_open_enum", - "cast_partial", ] diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py index baa41fbd..c5eb3659 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py @@ -7,15 +7,14 @@ from typing_extensions import get_origin from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspection.typing_objects import is_union +from typing_inspect import is_optional_type from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset def serialize_decimal(as_str: bool): def serialize(d): - # Optional[T] is a Union[T, None] - if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: + if is_optional_type(type(d)) and d is None: return None if isinstance(d, Unset): return d @@ -43,8 +42,7 @@ def validate_decimal(d): def serialize_float(as_str: bool): def serialize(f): - # Optional[T] is a Union[T, None] - if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: + if is_optional_type(type(f)) and f is None: return None if isinstance(f, Unset): return f @@ -72,8 +70,7 @@ def validate_float(f): def serialize_int(as_str: bool): def serialize(i): - # Optional[T] is a Union[T, None] - if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: + if is_optional_type(type(i)) and i is None: return None if isinstance(i, Unset): return i @@ -121,8 +118,7 @@ def validate(e): def validate_const(v): def validate(c): - # Optional[T] is a Union[T, None] - if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: + if is_optional_type(type(c)) and c is None: return None if v != c: @@ -167,7 +163,7 @@ def marshal_json(val, typ): if len(d) == 0: return "" - return json.dumps(d[next(iter(d))], separators=(",", ":")) + return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) def is_nullable(field): diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/values.py b/packages/mistralai_azure/src/mistralai_azure/utils/values.py index dae01a44..2b4b6832 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/values.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/values.py @@ -3,9 +3,8 @@ from datetime import datetime from enum import Enum from email.message import Message -from functools import partial import os -from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union from httpx import Response from pydantic import BaseModel @@ -52,8 +51,6 @@ def match_status_codes(status_codes: List[str], status_code: int) -> bool: T = TypeVar("T") -def cast_partial(typ): - return partial(cast, typ) def get_global_from_env( value: Optional[T], env_key: str, type_cast: Callable[[str], T] diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index eab663b0..f74b9759 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,34 +1,34 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: 0e9c7ff94b487395628de6c99a5954ce + docChecksum: 849dde0ef239604ca71711ffc1220b54 docVersion: 0.0.2 - speakeasyVersion: 1.517.3 - generationVersion: 2.548.6 + speakeasyVersion: 1.462.2 + generationVersion: 2.486.1 releaseVersion: 1.2.6 - configChecksum: 31db65297a20fe8af5b30effb1421b52 + configChecksum: ba11718a5b49fb4a979ae9693a68b191 published: true features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.12.3 + core: 5.7.4 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 - examples: 3.0.1 + examples: 3.0.0 flatRequests: 1.0.1 - globalSecurity: 3.0.3 + globalSecurity: 3.0.2 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.1.0 methodArguments: 1.0.2 nameOverrides: 3.0.1 - nullables: 1.0.1 + nullables: 1.0.0 openEnums: 1.0.0 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.1 + sdkHooks: 1.0.0 serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 @@ -66,10 +66,8 @@ generatedFiles: - docs/models/functioncall.md - docs/models/functionname.md - docs/models/httpvalidationerror.md - - docs/models/jsonschema.md - docs/models/loc.md - docs/models/messages.md - - docs/models/prediction.md - docs/models/referencechunk.md - docs/models/referencechunktype.md - docs/models/responseformat.md @@ -99,7 +97,7 @@ generatedFiles: - py.typed - pylintrc - pyproject.toml - - scripts/prepare_readme.py + - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai_gcp/__init__.py - src/mistralai_gcp/_hooks/__init__.py @@ -128,8 +126,6 @@ generatedFiles: - src/mistralai_gcp/models/functioncall.py - src/mistralai_gcp/models/functionname.py - src/mistralai_gcp/models/httpvalidationerror.py - - src/mistralai_gcp/models/jsonschema.py - - src/mistralai_gcp/models/prediction.py - src/mistralai_gcp/models/referencechunk.py - src/mistralai_gcp/models/responseformat.py - src/mistralai_gcp/models/responseformats.py @@ -147,7 +143,6 @@ generatedFiles: - src/mistralai_gcp/models/usermessage.py - src/mistralai_gcp/models/validationerror.py - src/mistralai_gcp/py.typed - - src/mistralai_gcp/sdk.py - src/mistralai_gcp/sdkconfiguration.py - src/mistralai_gcp/types/__init__.py - src/mistralai_gcp/types/basemodel.py @@ -170,36 +165,33 @@ examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "mistral-small-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "422": application/json: {} - "200": {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "mistral-small-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} "422": application/json: {} stream_fim: speakeasy-default-stream-fim: requestBody: - application/json: {"model": "codestral-2405", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} + application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} responses: "422": application/json: {} - "200": {} fim_completion_v1_fim_completions_post: speakeasy-default-fim-completion-v1-fim-completions-post: requestBody: - application/json: {"model": "codestral-2405", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} + application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} "422": application/json: {} -examplesVersion: 1.0.0 generatedTests: {} diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index 572d3ed1..afa4d1d0 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -7,10 +7,8 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true - nameResolutionFeb2025: false parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true - securityFeb2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false @@ -28,7 +26,6 @@ python: clientServerStatusCodesAsErrors: true defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in GCP. - enableCustomCodeRegions: false enumFormat: union fixFlags: responseRequiredSep2024: false @@ -48,6 +45,5 @@ python: methodArguments: infer-optional-args outputModelSuffix: output packageName: mistralai-gcp - pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/packages/mistralai_gcp/docs/models/assistantmessage.md b/packages/mistralai_gcp/docs/models/assistantmessage.md index 3d0bd90b..53f1cc76 100644 --- a/packages/mistralai_gcp/docs/models/assistantmessage.md +++ b/packages/mistralai_gcp/docs/models/assistantmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md index 3e9de262..abc83281 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -18,5 +18,4 @@ | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md index 8200f8a3..863c0229 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -18,5 +18,4 @@ | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md index 7507b90c..236d2d21 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md index 6cc439c7..fa635932 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/packages/mistralai_gcp/docs/models/function.md b/packages/mistralai_gcp/docs/models/function.md index a166b7bb..8af398f5 100644 --- a/packages/mistralai_gcp/docs/models/function.md +++ b/packages/mistralai_gcp/docs/models/function.md @@ -7,5 +7,4 @@ | ------------------ | ------------------ | ------------------ | ------------------ | | `name` | *str* | :heavy_check_mark: | N/A | | `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | -| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/jsonschema.md b/packages/mistralai_gcp/docs/models/jsonschema.md deleted file mode 100644 index ae387867..00000000 --- a/packages/mistralai_gcp/docs/models/jsonschema.md +++ /dev/null @@ -1,11 +0,0 @@ -# JSONSchema - - -## Fields - -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `name` | *str* | :heavy_check_mark: | N/A | -| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/prediction.md b/packages/mistralai_gcp/docs/models/prediction.md deleted file mode 100644 index 86e9c396..00000000 --- a/packages/mistralai_gcp/docs/models/prediction.md +++ /dev/null @@ -1,9 +0,0 @@ -# Prediction - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | -| `type` | *Optional[Literal["content"]]* | :heavy_minus_sign: | N/A | -| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/responseformat.md b/packages/mistralai_gcp/docs/models/responseformat.md index 23a1641b..9c627f55 100644 --- a/packages/mistralai_gcp/docs/models/responseformat.md +++ b/packages/mistralai_gcp/docs/models/responseformat.md @@ -5,5 +5,4 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | -| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/responseformats.md b/packages/mistralai_gcp/docs/models/responseformats.md index 06886afe..ce35fbb3 100644 --- a/packages/mistralai_gcp/docs/models/responseformats.md +++ b/packages/mistralai_gcp/docs/models/responseformats.md @@ -8,5 +8,4 @@ An object specifying the format that the model must output. Setting to `{ "type" | Name | Value | | ------------- | ------------- | | `TEXT` | text | -| `JSON_OBJECT` | json_object | -| `JSON_SCHEMA` | json_schema | \ No newline at end of file +| `JSON_OBJECT` | json_object | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolcall.md b/packages/mistralai_gcp/docs/models/toolcall.md index 574be1ea..7aca5fc9 100644 --- a/packages/mistralai_gcp/docs/models/toolcall.md +++ b/packages/mistralai_gcp/docs/models/toolcall.md @@ -7,5 +7,4 @@ | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | | `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | -| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/pylintrc b/packages/mistralai_gcp/pylintrc index 266bc815..393d0f70 100644 --- a/packages/mistralai_gcp/pylintrc +++ b/packages/mistralai_gcp/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.9 +py-version=3.8 # Discover python modules and packages in the file system subtree. recursive=no @@ -455,10 +455,7 @@ disable=raw-checker-failed, bare-except, broad-exception-caught, fixme, - relative-beyond-top-level, - consider-using-with, - wildcard-import, - unused-wildcard-import + relative-beyond-top-level # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index 21cf7eb9..6692f1d5 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -1,21 +1,9 @@ -[project] +[tool.poetry] name = "mistralai-gcp" version = "1.2.6" description = "Python Client SDK for the Mistral AI API in GCP." -authors = [{ name = "Mistral" },] +authors = ["Mistral",] readme = "README-PYPI.md" -requires-python = ">=3.9" -dependencies = [ - "eval-type-backport >=0.2.0", - "google-auth (>=2.31.0,<3.0.0)", - "httpx >=0.28.1", - "pydantic >=2.10.3", - "python-dateutil >=2.8.2", - "requests (>=2.32.3,<3.0.0)", - "typing-inspection >=0.4.0", -] - -[tool.poetry] packages = [ { include = "mistralai_gcp", from = "src" } ] @@ -27,8 +15,19 @@ include = ["py.typed", "src/mistralai_gcp/py.typed"] [virtualenvs] in-project = true +[tool.poetry.dependencies] +python = "^3.8" +eval-type-backport = "^0.2.0" +google-auth = "2.27.0" +httpx = "^0.28.1" +jsonpath-python = "^1.0.6" +pydantic = "~2.10.3" +python-dateutil = "^2.8.2" +requests = "^2.32.3" +typing-inspect = "^0.9.0" + [tool.poetry.group.dev.dependencies] -mypy = "==1.14.1" +mypy = "==1.13.0" pylint = "==3.2.3" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" @@ -39,7 +38,6 @@ requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.pytest.ini_options] -asyncio_default_fixture_loop_scope = "function" pythonpath = ["src"] [tool.mypy] diff --git a/packages/mistralai_gcp/scripts/prepare_readme.py b/packages/mistralai_gcp/scripts/prepare-readme.py similarity index 100% rename from packages/mistralai_gcp/scripts/prepare_readme.py rename to packages/mistralai_gcp/scripts/prepare-readme.py diff --git a/packages/mistralai_gcp/scripts/publish.sh b/packages/mistralai_gcp/scripts/publish.sh index f2f2cf2c..ab45b1f9 100755 --- a/packages/mistralai_gcp/scripts/publish.sh +++ b/packages/mistralai_gcp/scripts/publish.sh @@ -2,6 +2,6 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} -poetry run python scripts/prepare_readme.py +poetry run python scripts/prepare-readme.py poetry publish --build --skip-existing diff --git a/packages/mistralai_gcp/src/mistralai_gcp/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/__init__.py index dd02e42e..a1b7f626 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/__init__.py @@ -1,18 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from ._version import ( - __title__, - __version__, - __openapi_doc_version__, - __gen_version__, - __user_agent__, -) +from ._version import __title__, __version__ from .sdk import * from .sdkconfiguration import * from .models import * VERSION: str = __version__ -OPENAPI_DOC_VERSION = __openapi_doc_version__ -SPEAKEASY_GENERATOR_VERSION = __gen_version__ -USER_AGENT = __user_agent__ diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py index bb867b5b..417126fd 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py @@ -7,19 +7,16 @@ class HookContext: - base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None def __init__( self, - base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): - self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -28,30 +25,21 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_version.py b/packages/mistralai_gcp/src/mistralai_gcp/_version.py index 32be746c..30081f34 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_version.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_version.py @@ -4,9 +4,6 @@ __title__: str = "mistralai-gcp" __version__: str = "1.2.6" -__openapi_doc_version__: str = "0.0.2" -__gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.2.6 2.548.6 0.0.2 mistralai-gcp" try: if __package__ is not None: diff --git a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py index bb0aab96..40620018 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py @@ -231,10 +231,6 @@ def do(): req.headers, get_body_content(req), ) - - if client is None: - raise ValueError("client is required") - http_res = client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( @@ -307,10 +303,6 @@ async def do(): req.headers, get_body_content(req), ) - - if client is None: - raise ValueError("client is required") - http_res = await client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py index f162d2f7..47e5b63a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py @@ -3,7 +3,7 @@ from .basesdk import BaseSDK from mistralai_gcp import models, utils from mistralai_gcp._hooks import HookContext -from mistralai_gcp.types import OptionalNullable, UNSET +from mistralai_gcp.types import Nullable, OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming from typing import Any, List, Mapping, Optional, Union @@ -14,7 +14,7 @@ class Chat(BaseSDK): def stream( self, *, - model: str, + model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -37,9 +37,6 @@ def stream( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -63,7 +60,6 @@ def stream( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -76,8 +72,6 @@ def stream( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -98,9 +92,6 @@ def stream( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), ) req = self._build_request( @@ -132,7 +123,6 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -143,7 +133,7 @@ def stream( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -152,16 +142,9 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -179,7 +162,7 @@ def stream( async def stream_async( self, *, - model: str, + model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -202,9 +185,6 @@ async def stream_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -228,7 +208,6 @@ async def stream_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -241,8 +220,6 @@ async def stream_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -263,9 +240,6 @@ async def stream_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), ) req = self._build_request_async( @@ -297,7 +271,6 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -308,7 +281,7 @@ async def stream_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -317,16 +290,9 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -344,7 +310,7 @@ async def stream_async( def complete( self, *, - model: str, + model: Nullable[str], messages: Union[ List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], @@ -375,9 +341,6 @@ def complete( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -399,7 +362,6 @@ def complete( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -412,8 +374,6 @@ def complete( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -436,9 +396,6 @@ def complete( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), ) req = self._build_request( @@ -470,7 +427,6 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -480,22 +436,15 @@ def complete( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.ChatCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -513,7 +462,7 @@ def complete( async def complete_async( self, *, - model: str, + model: Nullable[str], messages: Union[ List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], @@ -544,9 +493,6 @@ async def complete_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -568,7 +514,6 @@ async def complete_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -581,8 +526,6 @@ async def complete_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -605,9 +548,6 @@ async def complete_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), ) req = self._build_request_async( @@ -639,7 +579,6 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -649,22 +588,15 @@ async def complete_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.ChatCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/mistralai_gcp/src/mistralai_gcp/fim.py index 84821c6a..89146a4a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/fim.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/fim.py @@ -3,7 +3,7 @@ from .basesdk import BaseSDK from mistralai_gcp import models, utils from mistralai_gcp._hooks import HookContext -from mistralai_gcp.types import OptionalNullable, UNSET +from mistralai_gcp.types import Nullable, OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming from typing import Any, Mapping, Optional, Union @@ -14,7 +14,7 @@ class Fim(BaseSDK): def stream( self, *, - model: str, + model: Nullable[str], prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -60,8 +60,6 @@ def stream( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionStreamRequest( model=model, @@ -105,7 +103,6 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -116,7 +113,7 @@ def stream( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -125,16 +122,9 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -152,7 +142,7 @@ def stream( async def stream_async( self, *, - model: str, + model: Nullable[str], prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -198,8 +188,6 @@ async def stream_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionStreamRequest( model=model, @@ -243,7 +231,6 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -254,7 +241,7 @@ async def stream_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -263,16 +250,9 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -290,7 +270,7 @@ async def stream_async( def complete( self, *, - model: str, + model: Nullable[str], prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -336,8 +316,6 @@ def complete( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionRequest( model=model, @@ -381,7 +359,6 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -391,22 +368,15 @@ def complete( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.FIMCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -424,7 +394,7 @@ def complete( async def complete_async( self, *, - model: str, + model: Nullable[str], prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -470,8 +440,6 @@ async def complete_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionRequest( model=model, @@ -515,7 +483,6 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -525,22 +492,15 @@ async def complete_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.FIMCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py index 1e426352..167cea4e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py @@ -1,8 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" # pyright: reportReturnType = false -import asyncio -from concurrent.futures import ThreadPoolExecutor from typing_extensions import Protocol, runtime_checkable import httpx from typing import Any, Optional, Union @@ -84,53 +82,3 @@ def build_request( async def aclose(self) -> None: pass - - -class ClientOwner(Protocol): - client: Union[HttpClient, None] - async_client: Union[AsyncHttpClient, None] - - -def close_clients( - owner: ClientOwner, - sync_client: Union[HttpClient, None], - sync_client_supplied: bool, - async_client: Union[AsyncHttpClient, None], - async_client_supplied: bool, -) -> None: - """ - A finalizer function that is meant to be used with weakref.finalize to close - httpx clients used by an SDK so that underlying resources can be garbage - collected. - """ - - # Unset the client/async_client properties so there are no more references - # to them from the owning SDK instance and they can be reaped. - owner.client = None - owner.async_client = None - - if sync_client is not None and not sync_client_supplied: - try: - sync_client.close() - except Exception: - pass - - if async_client is not None and not async_client_supplied: - is_async = False - try: - asyncio.get_running_loop() - is_async = True - except RuntimeError: - pass - - try: - # If this function is called in an async loop then start another - # loop in a separate thread to close the async http client. - if is_async: - with ThreadPoolExecutor(max_workers=1) as executor: - future = executor.submit(asyncio.run, async_client.aclose()) - future.result() - else: - asyncio.run(async_client.aclose()) - except Exception: - pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py index 154777da..f3c6ce7e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py @@ -67,8 +67,6 @@ ) from .functionname import FunctionName, FunctionNameTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData -from .jsonschema import JSONSchema, JSONSchemaTypedDict -from .prediction import Prediction, PredictionTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats @@ -109,7 +107,6 @@ ValidationErrorTypedDict, ) - __all__ = [ "Arguments", "ArgumentsTypedDict", @@ -166,14 +163,10 @@ "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", - "JSONSchema", - "JSONSchemaTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", - "Prediction", - "PredictionTypedDict", "ReferenceChunk", "ReferenceChunkType", "ReferenceChunkTypedDict", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py index 9147f566..6a9b58f2 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py @@ -32,7 +32,6 @@ class AssistantMessageTypedDict(TypedDict): content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] @@ -42,7 +41,6 @@ class AssistantMessage(BaseModel): tool_calls: OptionalNullable[List[ToolCall]] = UNSET prefix: Optional[bool] = False - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: Optional[AssistantMessageRole] = "assistant" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py index 60a37f2f..ab97e52a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -2,7 +2,6 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -69,7 +68,7 @@ class ChatCompletionRequestTypedDict(TypedDict): - model: str + model: Nullable[str] r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionRequestMessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" @@ -94,11 +93,10 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] class ChatCompletionRequest(BaseModel): - model: str + model: Nullable[str] r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionRequestMessages] @@ -137,8 +135,6 @@ class ChatCompletionRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: Optional[Prediction] = None - @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -154,9 +150,15 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", - "prediction", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + ] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py index f2041426..e6c5429b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -2,7 +2,6 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -65,7 +64,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): - model: str + model: Nullable[str] r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[MessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" @@ -89,11 +88,10 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] class ChatCompletionStreamRequest(BaseModel): - model: str + model: Nullable[str] r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[Messages] @@ -131,8 +129,6 @@ class ChatCompletionStreamRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: Optional[Prediction] = None - @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -148,9 +144,15 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", - "prediction", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + ] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py index 6dfb7373..81c87b7e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py @@ -26,7 +26,7 @@ class FIMCompletionRequestTypedDict(TypedDict): - model: str + model: Nullable[str] r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -52,7 +52,7 @@ class FIMCompletionRequestTypedDict(TypedDict): class FIMCompletionRequest(BaseModel): - model: str + model: Nullable[str] r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -98,6 +98,7 @@ def serialize_model(self, handler): "min_tokens", ] nullable_fields = [ + "model", "temperature", "max_tokens", "random_seed", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py index 406749bb..356758d3 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py @@ -26,7 +26,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): - model: str + model: Nullable[str] r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -51,7 +51,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): class FIMCompletionStreamRequest(BaseModel): - model: str + model: Nullable[str] r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -96,6 +96,7 @@ def serialize_model(self, handler): "min_tokens", ] nullable_fields = [ + "model", "temperature", "max_tokens", "random_seed", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py index 3d61e624..c3168eec 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py @@ -10,7 +10,6 @@ class FunctionTypedDict(TypedDict): name: str parameters: Dict[str, Any] description: NotRequired[str] - strict: NotRequired[bool] class Function(BaseModel): @@ -19,5 +18,3 @@ class Function(BaseModel): parameters: Dict[str, Any] description: Optional[str] = "" - - strict: Optional[bool] = False diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py b/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py deleted file mode 100644 index 2c6bd478..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py +++ /dev/null @@ -1,61 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -import pydantic -from pydantic import model_serializer -from typing import Any, Dict, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class JSONSchemaTypedDict(TypedDict): - name: str - schema_definition: Dict[str, Any] - description: NotRequired[Nullable[str]] - strict: NotRequired[bool] - - -class JSONSchema(BaseModel): - name: str - - schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] - - description: OptionalNullable[str] = UNSET - - strict: Optional[bool] = False - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["description", "strict"] - nullable_fields = ["description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py b/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py deleted file mode 100644 index 742aac0b..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import BaseModel -from mistralai_gcp.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class PredictionTypedDict(TypedDict): - type: Literal["content"] - content: NotRequired[str] - - -class Prediction(BaseModel): - TYPE: Annotated[ - Annotated[ - Optional[Literal["content"]], AfterValidator(validate_const("content")) - ], - pydantic.Field(alias="type"), - ] = "content" - - content: Optional[str] = "" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py index 5a24f644..fde89862 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py @@ -1,16 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .jsonschema import JSONSchema, JSONSchemaTypedDict from .responseformats import ResponseFormats -from mistralai_gcp.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer +from mistralai_gcp.types import BaseModel from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -18,41 +10,8 @@ class ResponseFormatTypedDict(TypedDict): type: NotRequired[ResponseFormats] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] class ResponseFormat(BaseModel): type: Optional[ResponseFormats] = None r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - - json_schema: OptionalNullable[JSONSchema] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "json_schema"] - nullable_fields = ["json_schema"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py index 08c39951..2c06b812 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py @@ -4,5 +4,5 @@ from typing import Literal -ResponseFormats = Literal["text", "json_object", "json_schema"] +ResponseFormats = Literal["text", "json_object"] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py index ecbac8d6..5b4b217a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py @@ -14,7 +14,6 @@ class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] - index: NotRequired[int] class ToolCall(BaseModel): @@ -25,5 +24,3 @@ class ToolCall(BaseModel): type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( None ) - - index: Optional[int] = 0 diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py index dd93cc7f..abfea8db 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py @@ -1,25 +1,23 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" import json -import weakref -from typing import Any, Optional, cast +from typing import Optional, Tuple, Union import google.auth import google.auth.credentials import google.auth.transport import google.auth.transport.requests import httpx - from mistralai_gcp import models from mistralai_gcp._hooks import BeforeRequestHook, SDKHooks from mistralai_gcp.chat import Chat from mistralai_gcp.fim import Fim -from mistralai_gcp.types import UNSET, OptionalNullable +from mistralai_gcp.types import Nullable from .basesdk import BaseSDK -from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients +from .httpclient import AsyncHttpClient, HttpClient from .sdkconfiguration import SDKConfiguration -from .utils.logger import Logger, get_default_logger +from .utils.logger import Logger, NoOpLogger from .utils.retries import RetryConfig LEGACY_MODEL_ID_FORMAT = { @@ -28,21 +26,20 @@ "mistral-nemo-2407": "mistral-nemo@2407", } - -def get_model_info(model: str) -> tuple[str, str]: +def get_model_info(model: str) -> Tuple[str, str]: # if the model requiers the legacy fomat, use it, else do nothing. if model in LEGACY_MODEL_ID_FORMAT: return "-".join(model.split("-")[:-1]), LEGACY_MODEL_ID_FORMAT[model] return model, model + class MistralGoogleCloud(BaseSDK): r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" chat: Chat - r"""Chat Completion API.""" fim: Fim - r"""Fill-in-the-middle API.""" + r"""Chat Completion API""" def __init__( self, @@ -51,20 +48,16 @@ def __init__( access_token: Optional[str] = None, client: Optional[HttpClient] = None, async_client: Optional[AsyncHttpClient] = None, - retry_config: OptionalNullable[RetryConfig] = UNSET, - timeout_ms: Optional[int] = None, + retry_config: Optional[Nullable[RetryConfig]] = None, debug_logger: Optional[Logger] = None, ) -> None: r"""Instantiates the SDK configuring it with the provided parameters. - :param api_key: The api_key required for authentication - :param server: The server by name to use for all methods - :param server_url: The server URL to use for all methods - :param url_params: Parameters to optionally template the server URL with + :param region: The Google Cloud region to use for all methods + :param project_id: The project ID to use for all methods :param client: The HTTP client to use for all synchronous methods :param async_client: The Async HTTP client to use for all asynchronous methods :param retry_config: The retry configuration to use for all supported methods - :param timeout_ms: Optional request timeout applied to each operation in milliseconds """ if not access_token: @@ -79,42 +72,36 @@ def __init__( ) project_id = project_id or loaded_project_id - if project_id is None: raise models.SDKError("project_id must be provided") def auth_token() -> str: if access_token: return access_token - credentials.refresh(google.auth.transport.requests.Request()) token = credentials.token if not token: raise models.SDKError("Failed to get token from credentials") return token - client_supplied = True if client is None: client = httpx.Client() - client_supplied = False assert issubclass( type(client), HttpClient ), "The provided client must implement the HttpClient protocol." - async_client_supplied = True if async_client is None: async_client = httpx.AsyncClient() - async_client_supplied = False if debug_logger is None: - debug_logger = get_default_logger() + debug_logger = NoOpLogger() assert issubclass( type(async_client), AsyncHttpClient ), "The provided async_client must implement the AsyncHttpClient protocol." - security: Any = None + security = None if callable(auth_token): security = lambda: models.Security( # pylint: disable=unnecessary-lambda-assignment api_key=auth_token() @@ -126,24 +113,23 @@ def auth_token() -> str: self, SDKConfiguration( client=client, - client_supplied=client_supplied, async_client=async_client, - async_client_supplied=async_client_supplied, security=security, server_url=f"https://{region}-aiplatform.googleapis.com", server=None, retry_config=retry_config, - timeout_ms=timeout_ms, debug_logger=debug_logger, ), ) hooks = SDKHooks() + hook = GoogleCloudBeforeRequestHook(region, project_id) hooks.register_before_request_hook(hook) + current_server_url, *_ = self.sdk_configuration.get_server_details() server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, client + current_server_url, self.sdk_configuration.client ) if current_server_url != server_url: self.sdk_configuration.server_url = server_url @@ -151,53 +137,22 @@ def auth_token() -> str: # pylint: disable=protected-access self.sdk_configuration.__dict__["_hooks"] = hooks - weakref.finalize( - self, - close_clients, - cast(ClientOwner, self.sdk_configuration), - self.sdk_configuration.client, - self.sdk_configuration.client_supplied, - self.sdk_configuration.async_client, - self.sdk_configuration.async_client_supplied, - ) - self._init_sdks() def _init_sdks(self): self.chat = Chat(self.sdk_configuration) self.fim = Fim(self.sdk_configuration) - def __enter__(self): - return self - - async def __aenter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.client is not None - and not self.sdk_configuration.client_supplied - ): - self.sdk_configuration.client.close() - self.sdk_configuration.client = None - - async def __aexit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.async_client is not None - and not self.sdk_configuration.async_client_supplied - ): - await self.sdk_configuration.async_client.aclose() - self.sdk_configuration.async_client = None - class GoogleCloudBeforeRequestHook(BeforeRequestHook): + def __init__(self, region: str, project_id: str): self.region = region self.project_id = project_id def before_request( self, hook_ctx, request: httpx.Request - ) -> httpx.Request | Exception: + ) -> Union[httpx.Request, Exception]: # The goal of this function is to template in the region, project and model into the URL path # We do this here so that the API remains more user-friendly model_id = None @@ -212,6 +167,7 @@ def before_request( if model_id == "": raise models.SDKError("model must be provided") + stream = "streamRawPredict" in request.url.path specifier = "streamRawPredict" if stream else "rawPredict" url = f"/v1/projects/{self.project_id}/locations/{self.region}/publishers/mistralai/models/{model_id}:{specifier}" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py index c373d27d..3c149cc6 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -1,12 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from ._hooks import SDKHooks -from ._version import ( - __gen_version__, - __openapi_doc_version__, - __user_agent__, - __version__, -) from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix from dataclasses import dataclass @@ -26,19 +20,17 @@ @dataclass class SDKConfiguration: - client: Union[HttpClient, None] - client_supplied: bool - async_client: Union[AsyncHttpClient, None] - async_client_supplied: bool + client: HttpClient + async_client: AsyncHttpClient debug_logger: Logger security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" server: Optional[str] = "" language: str = "python" - openapi_doc_version: str = __openapi_doc_version__ - sdk_version: str = __version__ - gen_version: str = __gen_version__ - user_agent: str = __user_agent__ + openapi_doc_version: str = "0.0.2" + sdk_version: str = "1.2.6" + gen_version: str = "2.486.1" + user_agent: str = "speakeasy-sdk/python 1.2.6 2.486.1 0.0.2 mistralai-gcp" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py index 3cded8fe..26d51ae8 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py @@ -42,7 +42,6 @@ match_content_type, match_status_codes, match_response, - cast_partial, ) from .logger import Logger, get_body_content, get_default_logger @@ -95,5 +94,4 @@ "validate_float", "validate_int", "validate_open_enum", - "cast_partial", ] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py index baa41fbd..c5eb3659 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py @@ -7,15 +7,14 @@ from typing_extensions import get_origin from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspection.typing_objects import is_union +from typing_inspect import is_optional_type from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset def serialize_decimal(as_str: bool): def serialize(d): - # Optional[T] is a Union[T, None] - if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: + if is_optional_type(type(d)) and d is None: return None if isinstance(d, Unset): return d @@ -43,8 +42,7 @@ def validate_decimal(d): def serialize_float(as_str: bool): def serialize(f): - # Optional[T] is a Union[T, None] - if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: + if is_optional_type(type(f)) and f is None: return None if isinstance(f, Unset): return f @@ -72,8 +70,7 @@ def validate_float(f): def serialize_int(as_str: bool): def serialize(i): - # Optional[T] is a Union[T, None] - if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: + if is_optional_type(type(i)) and i is None: return None if isinstance(i, Unset): return i @@ -121,8 +118,7 @@ def validate(e): def validate_const(v): def validate(c): - # Optional[T] is a Union[T, None] - if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: + if is_optional_type(type(c)) and c is None: return None if v != c: @@ -167,7 +163,7 @@ def marshal_json(val, typ): if len(d) == 0: return "" - return json.dumps(d[next(iter(d))], separators=(",", ":")) + return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) def is_nullable(field): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py index dae01a44..2b4b6832 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py @@ -3,9 +3,8 @@ from datetime import datetime from enum import Enum from email.message import Message -from functools import partial import os -from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union from httpx import Response from pydantic import BaseModel @@ -52,8 +51,6 @@ def match_status_codes(status_codes: List[str], status_code: int) -> bool: T = TypeVar("T") -def cast_partial(typ): - return partial(cast, typ) def get_global_from_env( value: Optional[T], env_key: str, type_cast: Callable[[str], T] diff --git a/poetry.lock b/poetry.lock index 3d36b94f..78003ff1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -12,6 +12,9 @@ files = [ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + [[package]] name = "anyio" version = "4.5.2" @@ -312,14 +315,14 @@ trio = ["trio (>=0.22.0,<1.0)"] [[package]] name = "httpx" -version = "0.28.1" +version = "0.27.2" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, - {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, + {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, + {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, ] [package.dependencies] @@ -327,6 +330,7 @@ anyio = "*" certifi = "*" httpcore = "==1.*" idna = "*" +sniffio = "*" [package.extras] brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] @@ -377,6 +381,18 @@ files = [ [package.extras] colors = ["colorama (>=0.4.6)"] +[[package]] +name = "jsonpath-python" +version = "1.0.6" +description = "A more powerful JSONPath implementation in modern python" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "jsonpath-python-1.0.6.tar.gz", hash = "sha256:dd5be4a72d8a2995c3f583cf82bf3cd1a9544cfdabf2d22595b67aff07349666"}, + {file = "jsonpath_python-1.0.6-py3-none-any.whl", hash = "sha256:1e3b78df579f5efc23565293612decee04214609208a2335884b3ee3f786b575"}, +] + [[package]] name = "mccabe" version = "0.7.0" @@ -391,56 +407,50 @@ files = [ [[package]] name = "mypy" -version = "1.14.1" +version = "1.13.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ - {file = "mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb"}, - {file = "mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0"}, - {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d"}, - {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b"}, - {file = "mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427"}, - {file = "mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f"}, - {file = "mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c"}, - {file = "mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1"}, - {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8"}, - {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f"}, - {file = "mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1"}, - {file = "mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae"}, - {file = "mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14"}, - {file = "mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9"}, - {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11"}, - {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e"}, - {file = "mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89"}, - {file = "mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b"}, - {file = "mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255"}, - {file = "mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34"}, - {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a"}, - {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9"}, - {file = "mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd"}, - {file = "mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107"}, - {file = "mypy-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7084fb8f1128c76cd9cf68fe5971b37072598e7c31b2f9f95586b65c741a9d31"}, - {file = "mypy-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f845a00b4f420f693f870eaee5f3e2692fa84cc8514496114649cfa8fd5e2c6"}, - {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44bf464499f0e3a2d14d58b54674dee25c031703b2ffc35064bd0df2e0fac319"}, - {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c99f27732c0b7dc847adb21c9d47ce57eb48fa33a17bc6d7d5c5e9f9e7ae5bac"}, - {file = "mypy-1.14.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:bce23c7377b43602baa0bd22ea3265c49b9ff0b76eb315d6c34721af4cdf1d9b"}, - {file = "mypy-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:8edc07eeade7ebc771ff9cf6b211b9a7d93687ff892150cb5692e4f4272b0837"}, - {file = "mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35"}, - {file = "mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc"}, - {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9"}, - {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb"}, - {file = "mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60"}, - {file = "mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c"}, - {file = "mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1"}, - {file = "mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6"}, + {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"}, + {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"}, + {file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"}, + {file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"}, + {file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"}, + {file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"}, + {file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"}, + {file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"}, + {file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"}, + {file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"}, + {file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"}, + {file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"}, + {file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"}, + {file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"}, + {file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"}, + {file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"}, + {file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"}, + {file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"}, + {file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"}, + {file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"}, + {file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"}, + {file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"}, ] [package.dependencies] -mypy_extensions = ">=1.0.0" +mypy-extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing_extensions = ">=4.6.0" +typing-extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] @@ -455,7 +465,7 @@ version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" -groups = ["dev"] +groups = ["main", "dev"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, @@ -537,19 +547,19 @@ pyasn1 = ">=0.4.6,<0.7.0" [[package]] name = "pydantic" -version = "2.10.6" +version = "2.10.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, - {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, + {file = "pydantic-2.10.2-py3-none-any.whl", hash = "sha256:cfb96e45951117c3024e6b67b25cdc33a3cb7b2fa62e239f7af1378358a1d99e"}, + {file = "pydantic-2.10.2.tar.gz", hash = "sha256:2bc2d7f17232e0841cbba4641e65ba1eb6fafb3a08de3a091ff3ce14a197c4fa"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.27.2" +pydantic-core = "2.27.1" typing-extensions = ">=4.12.2" [package.extras] @@ -558,112 +568,112 @@ timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows [[package]] name = "pydantic-core" -version = "2.27.2" +version = "2.27.1" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, - {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, - {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, - {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, - {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, - {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, - {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, - {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, - {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, - {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, - {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, - {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, - {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, - {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, - {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, - {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, - {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, - {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, + {file = "pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a"}, + {file = "pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:121ceb0e822f79163dd4699e4c54f5ad38b157084d97b34de8b232bcaad70278"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4603137322c18eaf2e06a4495f426aa8d8388940f3c457e7548145011bb68e05"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a33cd6ad9017bbeaa9ed78a2e0752c5e250eafb9534f308e7a5f7849b0b1bfb4"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15cc53a3179ba0fcefe1e3ae50beb2784dede4003ad2dfd24f81bba4b23a454f"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45d9c5eb9273aa50999ad6adc6be5e0ecea7e09dbd0d31bd0c65a55a2592ca08"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8bf7b66ce12a2ac52d16f776b31d16d91033150266eb796967a7e4621707e4f6"}, + {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:655d7dd86f26cb15ce8a431036f66ce0318648f8853d709b4167786ec2fa4807"}, + {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:5556470f1a2157031e676f776c2bc20acd34c1990ca5f7e56f1ebf938b9ab57c"}, + {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f69ed81ab24d5a3bd93861c8c4436f54afdf8e8cc421562b0c7504cf3be58206"}, + {file = "pydantic_core-2.27.1-cp310-none-win32.whl", hash = "sha256:f5a823165e6d04ccea61a9f0576f345f8ce40ed533013580e087bd4d7442b52c"}, + {file = "pydantic_core-2.27.1-cp310-none-win_amd64.whl", hash = "sha256:57866a76e0b3823e0b56692d1a0bf722bffb324839bb5b7226a7dbd6c9a40b17"}, + {file = "pydantic_core-2.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac3b20653bdbe160febbea8aa6c079d3df19310d50ac314911ed8cc4eb7f8cb8"}, + {file = "pydantic_core-2.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a5a8e19d7c707c4cadb8c18f5f60c843052ae83c20fa7d44f41594c644a1d330"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f7059ca8d64fea7f238994c97d91f75965216bcbe5f695bb44f354893f11d52"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bed0f8a0eeea9fb72937ba118f9db0cb7e90773462af7962d382445f3005e5a4"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3cb37038123447cf0f3ea4c74751f6a9d7afef0eb71aa07bf5f652b5e6a132c"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84286494f6c5d05243456e04223d5a9417d7f443c3b76065e75001beb26f88de"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acc07b2cfc5b835444b44a9956846b578d27beeacd4b52e45489e93276241025"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4fefee876e07a6e9aad7a8c8c9f85b0cdbe7df52b8a9552307b09050f7512c7e"}, + {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:258c57abf1188926c774a4c94dd29237e77eda19462e5bb901d88adcab6af919"}, + {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:35c14ac45fcfdf7167ca76cc80b2001205a8d5d16d80524e13508371fb8cdd9c"}, + {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1b26e1dff225c31897696cab7d4f0a315d4c0d9e8666dbffdb28216f3b17fdc"}, + {file = "pydantic_core-2.27.1-cp311-none-win32.whl", hash = "sha256:2cdf7d86886bc6982354862204ae3b2f7f96f21a3eb0ba5ca0ac42c7b38598b9"}, + {file = "pydantic_core-2.27.1-cp311-none-win_amd64.whl", hash = "sha256:3af385b0cee8df3746c3f406f38bcbfdc9041b5c2d5ce3e5fc6637256e60bbc5"}, + {file = "pydantic_core-2.27.1-cp311-none-win_arm64.whl", hash = "sha256:81f2ec23ddc1b476ff96563f2e8d723830b06dceae348ce02914a37cb4e74b89"}, + {file = "pydantic_core-2.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9cbd94fc661d2bab2bc702cddd2d3370bbdcc4cd0f8f57488a81bcce90c7a54f"}, + {file = "pydantic_core-2.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f8c4718cd44ec1580e180cb739713ecda2bdee1341084c1467802a417fe0f02"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15aae984e46de8d376df515f00450d1522077254ef6b7ce189b38ecee7c9677c"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ba5e3963344ff25fc8c40da90f44b0afca8cfd89d12964feb79ac1411a260ac"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:992cea5f4f3b29d6b4f7f1726ed8ee46c8331c6b4eed6db5b40134c6fe1768bb"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0325336f348dbee6550d129b1627cb8f5351a9dc91aad141ffb96d4937bd9529"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7597c07fbd11515f654d6ece3d0e4e5093edc30a436c63142d9a4b8e22f19c35"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3bbd5d8cc692616d5ef6fbbbd50dbec142c7e6ad9beb66b78a96e9c16729b089"}, + {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:dc61505e73298a84a2f317255fcc72b710b72980f3a1f670447a21efc88f8381"}, + {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:e1f735dc43da318cad19b4173dd1ffce1d84aafd6c9b782b3abc04a0d5a6f5bb"}, + {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f4e5658dbffe8843a0f12366a4c2d1c316dbe09bb4dfbdc9d2d9cd6031de8aae"}, + {file = "pydantic_core-2.27.1-cp312-none-win32.whl", hash = "sha256:672ebbe820bb37988c4d136eca2652ee114992d5d41c7e4858cdd90ea94ffe5c"}, + {file = "pydantic_core-2.27.1-cp312-none-win_amd64.whl", hash = "sha256:66ff044fd0bb1768688aecbe28b6190f6e799349221fb0de0e6f4048eca14c16"}, + {file = "pydantic_core-2.27.1-cp312-none-win_arm64.whl", hash = "sha256:9a3b0793b1bbfd4146304e23d90045f2a9b5fd5823aa682665fbdaf2a6c28f3e"}, + {file = "pydantic_core-2.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f216dbce0e60e4d03e0c4353c7023b202d95cbaeff12e5fd2e82ea0a66905073"}, + {file = "pydantic_core-2.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a2e02889071850bbfd36b56fd6bc98945e23670773bc7a76657e90e6b6603c08"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42b0e23f119b2b456d07ca91b307ae167cc3f6c846a7b169fca5326e32fdc6cf"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:764be71193f87d460a03f1f7385a82e226639732214b402f9aa61f0d025f0737"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c00666a3bd2f84920a4e94434f5974d7bbc57e461318d6bb34ce9cdbbc1f6b2"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ccaa88b24eebc0f849ce0a4d09e8a408ec5a94afff395eb69baf868f5183107"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c65af9088ac534313e1963443d0ec360bb2b9cba6c2909478d22c2e363d98a51"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206b5cf6f0c513baffaeae7bd817717140770c74528f3e4c3e1cec7871ddd61a"}, + {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:062f60e512fc7fff8b8a9d680ff0ddaaef0193dba9fa83e679c0c5f5fbd018bc"}, + {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:a0697803ed7d4af5e4c1adf1670af078f8fcab7a86350e969f454daf598c4960"}, + {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:58ca98a950171f3151c603aeea9303ef6c235f692fe555e883591103da709b23"}, + {file = "pydantic_core-2.27.1-cp313-none-win32.whl", hash = "sha256:8065914ff79f7eab1599bd80406681f0ad08f8e47c880f17b416c9f8f7a26d05"}, + {file = "pydantic_core-2.27.1-cp313-none-win_amd64.whl", hash = "sha256:ba630d5e3db74c79300d9a5bdaaf6200172b107f263c98a0539eeecb857b2337"}, + {file = "pydantic_core-2.27.1-cp313-none-win_arm64.whl", hash = "sha256:45cf8588c066860b623cd11c4ba687f8d7175d5f7ef65f7129df8a394c502de5"}, + {file = "pydantic_core-2.27.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5897bec80a09b4084aee23f9b73a9477a46c3304ad1d2d07acca19723fb1de62"}, + {file = "pydantic_core-2.27.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0165ab2914379bd56908c02294ed8405c252250668ebcb438a55494c69f44ab"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b9af86e1d8e4cfc82c2022bfaa6f459381a50b94a29e95dcdda8442d6d83864"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f6c8a66741c5f5447e047ab0ba7a1c61d1e95580d64bce852e3df1f895c4067"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a42d6a8156ff78981f8aa56eb6394114e0dedb217cf8b729f438f643608cbcd"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64c65f40b4cd8b0e049a8edde07e38b476da7e3aaebe63287c899d2cff253fa5"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdcf339322a3fae5cbd504edcefddd5a50d9ee00d968696846f089b4432cf78"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bf99c8404f008750c846cb4ac4667b798a9f7de673ff719d705d9b2d6de49c5f"}, + {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8f1edcea27918d748c7e5e4d917297b2a0ab80cad10f86631e488b7cddf76a36"}, + {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:159cac0a3d096f79ab6a44d77a961917219707e2a130739c64d4dd46281f5c2a"}, + {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:029d9757eb621cc6e1848fa0b0310310de7301057f623985698ed7ebb014391b"}, + {file = "pydantic_core-2.27.1-cp38-none-win32.whl", hash = "sha256:a28af0695a45f7060e6f9b7092558a928a28553366519f64083c63a44f70e618"}, + {file = "pydantic_core-2.27.1-cp38-none-win_amd64.whl", hash = "sha256:2d4567c850905d5eaaed2f7a404e61012a51caf288292e016360aa2b96ff38d4"}, + {file = "pydantic_core-2.27.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e9386266798d64eeb19dd3677051f5705bf873e98e15897ddb7d76f477131967"}, + {file = "pydantic_core-2.27.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4228b5b646caa73f119b1ae756216b59cc6e2267201c27d3912b592c5e323b60"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3dfe500de26c52abe0477dde16192ac39c98f05bf2d80e76102d394bd13854"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aee66be87825cdf72ac64cb03ad4c15ffef4143dbf5c113f64a5ff4f81477bf9"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b748c44bb9f53031c8cbc99a8a061bc181c1000c60a30f55393b6e9c45cc5bd"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ca038c7f6a0afd0b2448941b6ef9d5e1949e999f9e5517692eb6da58e9d44be"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e0bd57539da59a3e4671b90a502da9a28c72322a4f17866ba3ac63a82c4498e"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ac6c2c45c847bbf8f91930d88716a0fb924b51e0c6dad329b793d670ec5db792"}, + {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b94d4ba43739bbe8b0ce4262bcc3b7b9f31459ad120fb595627eaeb7f9b9ca01"}, + {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:00e6424f4b26fe82d44577b4c842d7df97c20be6439e8e685d0d715feceb9fb9"}, + {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:38de0a70160dd97540335b7ad3a74571b24f1dc3ed33f815f0880682e6880131"}, + {file = "pydantic_core-2.27.1-cp39-none-win32.whl", hash = "sha256:7ccebf51efc61634f6c2344da73e366c75e735960b5654b63d7e6f69a5885fa3"}, + {file = "pydantic_core-2.27.1-cp39-none-win_amd64.whl", hash = "sha256:a57847b090d7892f123726202b7daa20df6694cbd583b67a592e856bff603d6c"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3fa80ac2bd5856580e242dbc202db873c60a01b20309c8319b5c5986fbe53ce6"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d950caa237bb1954f1b8c9227b5065ba6875ac9771bb8ec790d956a699b78676"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e4216e64d203e39c62df627aa882f02a2438d18a5f21d7f721621f7a5d3611d"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a3d637bd387c41d46b002f0e49c52642281edacd2740e5a42f7017feea3f2c"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:161c27ccce13b6b0c8689418da3885d3220ed2eae2ea5e9b2f7f3d48f1d52c27"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19910754e4cc9c63bc1c7f6d73aa1cfee82f42007e407c0f413695c2f7ed777f"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:e173486019cc283dc9778315fa29a363579372fe67045e971e89b6365cc035ed"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:af52d26579b308921b73b956153066481f064875140ccd1dfd4e77db89dbb12f"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:981fb88516bd1ae8b0cbbd2034678a39dedc98752f264ac9bc5839d3923fa04c"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5fde892e6c697ce3e30c61b239330fc5d569a71fefd4eb6512fc6caec9dd9e2f"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:816f5aa087094099fff7edabb5e01cc370eb21aa1a1d44fe2d2aefdfb5599b31"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c10c309e18e443ddb108f0ef64e8729363adbfd92d6d57beec680f6261556f3"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98476c98b02c8e9b2eec76ac4156fd006628b1b2d0ef27e548ffa978393fd154"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c3027001c28434e7ca5a6e1e527487051136aa81803ac812be51802150d880dd"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7699b1df36a48169cdebda7ab5a2bac265204003f153b4bd17276153d997670a"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1c39b07d90be6b48968ddc8c19e7585052088fd7ec8d568bb31ff64c70ae3c97"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:46ccfe3032b3915586e469d4972973f893c0a2bb65669194a5bdea9bacc088c2"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:62ba45e21cf6571d7f716d903b5b7b6d2617e2d5d67c0923dc47b9d41369f840"}, + {file = "pydantic_core-2.27.1.tar.gz", hash = "sha256:62a763352879b84aa31058fc931884055fd75089cccbd9d58bb6afd01141b235"}, ] [package.dependencies] @@ -899,19 +909,20 @@ files = [ ] [[package]] -name = "typing-inspection" -version = "0.4.0" -description = "Runtime typing introspection tools" +name = "typing-inspect" +version = "0.9.0" +description = "Runtime inspection utilities for typing module." optional = false -python-versions = ">=3.9" +python-versions = "*" groups = ["main"] files = [ - {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, - {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, + {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, + {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, ] [package.dependencies] -typing-extensions = ">=4.12.0" +mypy-extensions = ">=0.3.0" +typing-extensions = ">=3.7.4" [[package]] name = "urllib3" @@ -937,5 +948,5 @@ gcp = ["google-auth", "requests"] [metadata] lock-version = "2.1" -python-versions = ">=3.9" -content-hash = "c3917a9114ca2a0c01aedf207fa1b59cc259bb07c4d2914fe2ed9a4cb3e1785e" +python-versions = ">=3.8" +content-hash = "f0f19d81d36ebe966895f21a0a9dd33118783904418f4103189c475e5903b958" diff --git a/pylintrc b/pylintrc index 266bc815..9d193c42 100644 --- a/pylintrc +++ b/pylintrc @@ -455,10 +455,7 @@ disable=raw-checker-failed, bare-except, broad-exception-caught, fixme, - relative-beyond-top-level, - consider-using-with, - wildcard-import, - unused-wildcard-import + relative-beyond-top-level # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option diff --git a/pyproject.toml b/pyproject.toml index 8edc7537..5e7ddb8c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.5.2-rc.1" +version = "1.5.1" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/scripts/prepare_readme.py b/scripts/prepare-readme.py similarity index 84% rename from scripts/prepare_readme.py rename to scripts/prepare-readme.py index 16f6fc7e..9111d6cb 100644 --- a/scripts/prepare_readme.py +++ b/scripts/prepare-readme.py @@ -4,7 +4,7 @@ import shutil try: - with open("README.md", "r", encoding="utf-8") as rh: + with open("README.md", "r") as rh: readme_contents = rh.read() GITHUB_URL = "https://github.com/mistralai/client-python.git" GITHUB_URL = ( @@ -21,13 +21,13 @@ readme_contents, ) - with open("README-PYPI.md", "w", encoding="utf-8") as wh: + with open("README-PYPI.md", "w") as wh: wh.write(readme_contents) except Exception as e: try: print("Failed to rewrite README.md to README-PYPI.md, copying original instead") print(e) shutil.copyfile("README.md", "README-PYPI.md") - except Exception as ie: + except Exception as e: print("Failed to copy README.md to README-PYPI.md") - print(ie) + print(e) diff --git a/scripts/publish.sh b/scripts/publish.sh index f2f2cf2c..ab45b1f9 100755 --- a/scripts/publish.sh +++ b/scripts/publish.sh @@ -2,6 +2,6 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} -poetry run python scripts/prepare_readme.py +poetry run python scripts/prepare-readme.py poetry publish --build --skip-existing diff --git a/src/mistralai/_hooks/types.py b/src/mistralai/_hooks/types.py index ebc789ff..fe448e94 100644 --- a/src/mistralai/_hooks/types.py +++ b/src/mistralai/_hooks/types.py @@ -7,19 +7,16 @@ class HookContext: - base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None def __init__( self, - base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): - self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -28,30 +25,21 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source ) diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 6b24498d..700c880e 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.5.2-rc.1" +__version__: str = "1.5.1" __openapi_doc_version__: str = "0.0.2" -__gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.5.2-rc.1 2.548.6 0.0.2 mistralai" +__gen_version__: str = "2.497.0" +__user_agent__: str = "speakeasy-sdk/python 1.5.1 2.497.0 0.0.2 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index 6d43b480..05fd165c 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -78,8 +78,6 @@ def complete( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.AgentsCompletionRequest( max_tokens=max_tokens, @@ -134,7 +132,6 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="agents_completion_v1_agents_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -146,14 +143,12 @@ def complete( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -241,8 +236,6 @@ async def complete_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.AgentsCompletionRequest( max_tokens=max_tokens, @@ -297,7 +290,6 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="agents_completion_v1_agents_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -309,14 +301,12 @@ async def complete_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -406,8 +396,6 @@ def stream( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.AgentsCompletionStreamRequest( max_tokens=max_tokens, @@ -462,7 +450,6 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_agents", oauth2_scopes=[], security_source=get_security_from_env( @@ -475,7 +462,7 @@ def stream( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -484,10 +471,8 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -577,8 +562,6 @@ async def stream_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.AgentsCompletionStreamRequest( max_tokens=max_tokens, @@ -633,7 +616,6 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_agents", oauth2_scopes=[], security_source=get_security_from_env( @@ -646,7 +628,7 @@ async def stream_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -655,10 +637,8 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/basesdk.py b/src/mistralai/basesdk.py index 512e3072..cda8adda 100644 --- a/src/mistralai/basesdk.py +++ b/src/mistralai/basesdk.py @@ -231,10 +231,6 @@ def do(): req.headers, get_body_content(req), ) - - if client is None: - raise ValueError("client is required") - http_res = client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( @@ -307,10 +303,6 @@ async def do(): req.headers, get_body_content(req), ) - - if client is None: - raise ValueError("client is required") - http_res = await client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 558796d8..67646ffe 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -158,8 +158,6 @@ def complete( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -215,7 +213,6 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -227,14 +224,12 @@ def complete( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -320,8 +315,6 @@ async def complete_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -377,7 +370,6 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -389,14 +381,12 @@ async def complete_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -492,8 +482,6 @@ def stream( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -551,7 +539,6 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=get_security_from_env( @@ -564,7 +551,7 @@ def stream( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -573,10 +560,8 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -672,8 +657,6 @@ async def stream_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -731,7 +714,6 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=get_security_from_env( @@ -744,7 +726,7 @@ async def stream_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -753,10 +735,8 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py index 7191df0c..6ff1d6a8 100644 --- a/src/mistralai/classifiers.py +++ b/src/mistralai/classifiers.py @@ -40,8 +40,6 @@ def moderate( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ClassificationRequest( model=model, @@ -77,7 +75,6 @@ def moderate( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="moderations_v1_moderations_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -89,14 +86,12 @@ def moderate( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -146,8 +141,6 @@ async def moderate_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ClassificationRequest( model=model, @@ -183,7 +176,6 @@ async def moderate_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="moderations_v1_moderations_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -195,14 +187,12 @@ async def moderate_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -254,8 +244,6 @@ def moderate_chat( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatModerationRequest( model=model, @@ -292,7 +280,6 @@ def moderate_chat( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="moderations_chat_v1_chat_moderations_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -304,14 +291,12 @@ def moderate_chat( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -363,8 +348,6 @@ async def moderate_chat_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatModerationRequest( model=model, @@ -401,7 +384,6 @@ async def moderate_chat_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="moderations_chat_v1_chat_moderations_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -413,14 +395,12 @@ async def moderate_chat_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index b99ff0cf..f6f558b8 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -14,8 +14,8 @@ class Embeddings(BaseSDK): def create( self, *, - model: str, inputs: Union[models.Inputs, models.InputsTypedDict], + model: Optional[str] = "mistral-embed", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -25,8 +25,8 @@ def create( Embeddings - :param model: ID of the model to use. :param inputs: Text to embed. + :param model: ID of the model to use. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -39,8 +39,6 @@ def create( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.EmbeddingRequest( model=model, @@ -76,7 +74,6 @@ def create( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -88,14 +85,12 @@ def create( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.EmbeddingResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -119,8 +114,8 @@ def create( async def create_async( self, *, - model: str, inputs: Union[models.Inputs, models.InputsTypedDict], + model: Optional[str] = "mistral-embed", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -130,8 +125,8 @@ async def create_async( Embeddings - :param model: ID of the model to use. :param inputs: Text to embed. + :param model: ID of the model to use. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -144,8 +139,6 @@ async def create_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.EmbeddingRequest( model=model, @@ -181,7 +174,6 @@ async def create_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -193,14 +185,12 @@ async def create_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.EmbeddingResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/files.py b/src/mistralai/files.py index 0ffc4857..042e4aea 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -44,8 +44,6 @@ def upload( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( file=utils.get_pydantic_model(file, models.File), @@ -85,7 +83,6 @@ def upload( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_upload_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -151,8 +148,6 @@ async def upload_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( file=utils.get_pydantic_model(file, models.File), @@ -192,7 +187,6 @@ async def upload_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_upload_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -262,8 +256,6 @@ def list( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesListFilesRequest( page=page, @@ -300,7 +292,6 @@ def list( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_list_files", oauth2_scopes=[], security_source=get_security_from_env( @@ -370,8 +361,6 @@ async def list_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesListFilesRequest( page=page, @@ -408,7 +397,6 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_list_files", oauth2_scopes=[], security_source=get_security_from_env( @@ -468,8 +456,6 @@ def retrieve( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesRetrieveFileRequest( file_id=file_id, @@ -501,7 +487,6 @@ def retrieve( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_retrieve_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -561,8 +546,6 @@ async def retrieve_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesRetrieveFileRequest( file_id=file_id, @@ -594,7 +577,6 @@ async def retrieve_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_retrieve_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -654,8 +636,6 @@ def delete( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesDeleteFileRequest( file_id=file_id, @@ -687,7 +667,6 @@ def delete( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_delete_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -747,8 +726,6 @@ async def delete_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesDeleteFileRequest( file_id=file_id, @@ -780,7 +757,6 @@ async def delete_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_delete_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -840,8 +816,6 @@ def download( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesDownloadFileRequest( file_id=file_id, @@ -873,7 +847,6 @@ def download( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_download_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -934,8 +907,6 @@ async def download_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesDownloadFileRequest( file_id=file_id, @@ -967,7 +938,6 @@ async def download_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_download_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -1028,8 +998,6 @@ def get_signed_url( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesGetSignedURLRequest( file_id=file_id, @@ -1062,7 +1030,6 @@ def get_signed_url( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_get_signed_url", oauth2_scopes=[], security_source=get_security_from_env( @@ -1122,8 +1089,6 @@ async def get_signed_url_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesGetSignedURLRequest( file_id=file_id, @@ -1156,7 +1121,6 @@ async def get_signed_url_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_get_signed_url", oauth2_scopes=[], security_source=get_security_from_env( diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py index 032c722f..c11f6c99 100644 --- a/src/mistralai/fim.py +++ b/src/mistralai/fim.py @@ -60,8 +60,6 @@ def complete( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionRequest( model=model, @@ -105,7 +103,6 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -117,14 +114,12 @@ def complete( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.FIMCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -194,8 +189,6 @@ async def complete_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionRequest( model=model, @@ -239,7 +232,6 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -251,14 +243,12 @@ async def complete_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.FIMCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -328,8 +318,6 @@ def stream( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionStreamRequest( model=model, @@ -373,7 +361,6 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], security_source=get_security_from_env( @@ -386,7 +373,7 @@ def stream( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -395,10 +382,8 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -468,8 +453,6 @@ async def stream_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionStreamRequest( model=model, @@ -513,7 +496,6 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], security_source=get_security_from_env( @@ -526,7 +508,7 @@ async def stream_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -535,10 +517,8 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/httpclient.py b/src/mistralai/httpclient.py index 1e426352..9dc43cb0 100644 --- a/src/mistralai/httpclient.py +++ b/src/mistralai/httpclient.py @@ -94,9 +94,7 @@ class ClientOwner(Protocol): def close_clients( owner: ClientOwner, sync_client: Union[HttpClient, None], - sync_client_supplied: bool, async_client: Union[AsyncHttpClient, None], - async_client_supplied: bool, ) -> None: """ A finalizer function that is meant to be used with weakref.finalize to close @@ -109,13 +107,13 @@ def close_clients( owner.client = None owner.async_client = None - if sync_client is not None and not sync_client_supplied: + if sync_client is not None: try: sync_client.close() except Exception: pass - if async_client is not None and not async_client_supplied: + if async_client is not None: is_async = False try: asyncio.get_running_loop() diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index 675ece0b..ea66bfc6 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -52,8 +52,6 @@ def list( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, @@ -93,7 +91,6 @@ def list( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=[], security_source=get_security_from_env( @@ -169,8 +166,6 @@ async def list_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, @@ -210,7 +205,6 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=[], security_source=get_security_from_env( @@ -296,8 +290,6 @@ def create( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobIn( model=model, @@ -347,7 +339,6 @@ def create( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -435,8 +426,6 @@ async def create_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobIn( model=model, @@ -486,7 +475,6 @@ async def create_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -548,8 +536,6 @@ def get( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( job_id=job_id, @@ -581,7 +567,6 @@ def get( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -641,8 +626,6 @@ async def get_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( job_id=job_id, @@ -674,7 +657,6 @@ async def get_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -734,8 +716,6 @@ def cancel( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( job_id=job_id, @@ -767,7 +747,6 @@ def cancel( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -827,8 +806,6 @@ async def cancel_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( job_id=job_id, @@ -860,7 +837,6 @@ async def cancel_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -920,8 +896,6 @@ def start( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( job_id=job_id, @@ -953,7 +927,6 @@ def start( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -1013,8 +986,6 @@ async def start_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( job_id=job_id, @@ -1046,7 +1017,6 @@ async def start_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py index e0d3c616..fe6b266a 100644 --- a/src/mistralai/mistral_jobs.py +++ b/src/mistralai/mistral_jobs.py @@ -48,8 +48,6 @@ def list( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchGetBatchJobsRequest( page=page, @@ -87,7 +85,6 @@ def list( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_jobs", oauth2_scopes=[], security_source=get_security_from_env( @@ -159,8 +156,6 @@ async def list_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchGetBatchJobsRequest( page=page, @@ -198,7 +193,6 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_jobs", oauth2_scopes=[], security_source=get_security_from_env( @@ -266,8 +260,6 @@ def create( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.BatchJobIn( input_files=input_files, @@ -306,7 +298,6 @@ def create( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_batch_create_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -374,8 +365,6 @@ async def create_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.BatchJobIn( input_files=input_files, @@ -414,7 +403,6 @@ async def create_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_batch_create_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -474,8 +462,6 @@ def get( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchGetBatchJobRequest( job_id=job_id, @@ -507,7 +493,6 @@ def get( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -567,8 +552,6 @@ async def get_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchGetBatchJobRequest( job_id=job_id, @@ -600,7 +583,6 @@ async def get_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -660,8 +642,6 @@ def cancel( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchCancelBatchJobRequest( job_id=job_id, @@ -693,7 +673,6 @@ def cancel( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_batch_cancel_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -753,8 +732,6 @@ async def cancel_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchCancelBatchJobRequest( job_id=job_id, @@ -786,7 +763,6 @@ async def cancel_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_batch_cancel_batch_job", oauth2_scopes=[], security_source=get_security_from_env( diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 0750906a..197f6e1f 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -115,11 +115,7 @@ DetailedJobOutStatus, DetailedJobOutTypedDict, ) -from .documenturlchunk import ( - DocumentURLChunk, - DocumentURLChunkType, - DocumentURLChunkTypedDict, -) +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .embeddingrequest import ( EmbeddingRequest, EmbeddingRequestTypedDict, @@ -459,7 +455,6 @@ "Document", "DocumentTypedDict", "DocumentURLChunk", - "DocumentURLChunkType", "DocumentURLChunkTypedDict", "EmbeddingRequest", "EmbeddingRequestTypedDict", diff --git a/src/mistralai/models/documenturlchunk.py b/src/mistralai/models/documenturlchunk.py index 29945102..23622335 100644 --- a/src/mistralai/models/documenturlchunk.py +++ b/src/mistralai/models/documenturlchunk.py @@ -2,32 +2,38 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -DocumentURLChunkType = Literal["document_url"] +from typing_extensions import Annotated, NotRequired, TypedDict class DocumentURLChunkTypedDict(TypedDict): document_url: str + type: Literal["document_url"] document_name: NotRequired[Nullable[str]] r"""The filename of the document""" - type: NotRequired[DocumentURLChunkType] class DocumentURLChunk(BaseModel): document_url: str + TYPE: Annotated[ + Annotated[ + Optional[Literal["document_url"]], + AfterValidator(validate_const("document_url")), + ], + pydantic.Field(alias="type"), + ] = "document_url" + document_name: OptionalNullable[str] = UNSET r"""The filename of the document""" - type: Optional[DocumentURLChunkType] = "document_url" - @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["document_name", "type"] + optional_fields = ["type", "document_name"] nullable_fields = ["document_name"] null_default_fields = [] diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index 5c37fd48..b5ccd26e 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -3,8 +3,8 @@ from __future__ import annotations from mistralai.types import BaseModel import pydantic -from typing import List, Union -from typing_extensions import Annotated, TypeAliasType, TypedDict +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict InputsTypedDict = TypeAliasType("InputsTypedDict", Union[str, List[str]]) @@ -16,15 +16,15 @@ class EmbeddingRequestTypedDict(TypedDict): - model: str - r"""ID of the model to use.""" inputs: InputsTypedDict r"""Text to embed.""" + model: NotRequired[str] + r"""ID of the model to use.""" class EmbeddingRequest(BaseModel): - model: str - r"""ID of the model to use.""" - inputs: Annotated[Inputs, pydantic.Field(alias="input")] r"""Text to embed.""" + + model: Optional[str] = "mistral-embed" + r"""ID of the model to use.""" diff --git a/src/mistralai/models/filepurpose.py b/src/mistralai/models/filepurpose.py index 8599192b..8628b308 100644 --- a/src/mistralai/models/filepurpose.py +++ b/src/mistralai/models/filepurpose.py @@ -5,4 +5,4 @@ from typing import Literal, Union -FilePurpose = Union[Literal["fine-tune", "batch", "ocr"], UnrecognizedStr] +FilePurpose = Union[Literal["fine-tune", "batch"], UnrecognizedStr] diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index 0b04694d..ec45eb36 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -35,8 +35,6 @@ def list( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) req = self._build_request( method="GET", path="/v1/models", @@ -63,7 +61,6 @@ def list( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="list_models_v1_models_get", oauth2_scopes=[], security_source=get_security_from_env( @@ -75,14 +72,12 @@ def list( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ModelList) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -127,8 +122,6 @@ async def list_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) req = self._build_request_async( method="GET", path="/v1/models", @@ -155,7 +148,6 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="list_models_v1_models_get", oauth2_scopes=[], security_source=get_security_from_env( @@ -167,14 +159,12 @@ async def list_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ModelList) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -221,8 +211,6 @@ def retrieve( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.RetrieveModelV1ModelsModelIDGetRequest( model_id=model_id, @@ -254,7 +242,6 @@ def retrieve( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=[], security_source=get_security_from_env( @@ -266,17 +253,15 @@ def retrieve( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -323,8 +308,6 @@ async def retrieve_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.RetrieveModelV1ModelsModelIDGetRequest( model_id=model_id, @@ -356,7 +339,6 @@ async def retrieve_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=[], security_source=get_security_from_env( @@ -368,17 +350,15 @@ async def retrieve_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -425,8 +405,6 @@ def delete( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.DeleteModelV1ModelsModelIDDeleteRequest( model_id=model_id, @@ -458,7 +436,6 @@ def delete( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=[], security_source=get_security_from_env( @@ -470,14 +447,12 @@ def delete( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.DeleteModelOut) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -524,8 +499,6 @@ async def delete_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.DeleteModelV1ModelsModelIDDeleteRequest( model_id=model_id, @@ -557,7 +530,6 @@ async def delete_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=[], security_source=get_security_from_env( @@ -569,14 +541,12 @@ async def delete_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.DeleteModelOut) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -627,8 +597,6 @@ def update( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( model_id=model_id, @@ -667,7 +635,6 @@ def update( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -731,8 +698,6 @@ async def update_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( model_id=model_id, @@ -771,7 +736,6 @@ async def update_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -831,8 +795,6 @@ def archive( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( model_id=model_id, @@ -864,7 +826,6 @@ def archive( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -924,8 +885,6 @@ async def archive_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( model_id=model_id, @@ -957,7 +916,6 @@ async def archive_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -1017,8 +975,6 @@ def unarchive( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( model_id=model_id, @@ -1050,7 +1006,6 @@ def unarchive( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -1110,8 +1065,6 @@ async def unarchive_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( model_id=model_id, @@ -1143,7 +1096,6 @@ async def unarchive_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( diff --git a/src/mistralai/ocr.py b/src/mistralai/ocr.py index 5d0e2414..56c1da51 100644 --- a/src/mistralai/ocr.py +++ b/src/mistralai/ocr.py @@ -47,8 +47,6 @@ def process( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.OCRRequest( model=model, @@ -89,7 +87,6 @@ def process( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="ocr_v1_ocr_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -101,14 +98,12 @@ def process( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.OCRResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -165,8 +160,6 @@ async def process_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.OCRRequest( model=model, @@ -207,7 +200,6 @@ async def process_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="ocr_v1_ocr_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -219,14 +211,12 @@ async def process_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.OCRResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index e801eaf3..00d8370a 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -68,19 +68,15 @@ def __init__( :param retry_config: The retry configuration to use for all supported methods :param timeout_ms: Optional request timeout applied to each operation in milliseconds """ - client_supplied = True if client is None: client = httpx.Client() - client_supplied = False assert issubclass( type(client), HttpClient ), "The provided client must implement the HttpClient protocol." - async_client_supplied = True if async_client is None: async_client = httpx.AsyncClient() - async_client_supplied = False if debug_logger is None: debug_logger = get_default_logger() @@ -104,9 +100,7 @@ def __init__( self, SDKConfiguration( client=client, - client_supplied=client_supplied, async_client=async_client, - async_client_supplied=async_client_supplied, security=security, server_url=server_url, server=server, @@ -120,7 +114,7 @@ def __init__( current_server_url, *_ = self.sdk_configuration.get_server_details() server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, client + current_server_url, self.sdk_configuration.client ) if current_server_url != server_url: self.sdk_configuration.server_url = server_url @@ -133,9 +127,7 @@ def __init__( close_clients, cast(ClientOwner, self.sdk_configuration), self.sdk_configuration.client, - self.sdk_configuration.client_supplied, self.sdk_configuration.async_client, - self.sdk_configuration.async_client_supplied, ) self._init_sdks() @@ -159,17 +151,9 @@ async def __aenter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.client is not None - and not self.sdk_configuration.client_supplied - ): + if self.sdk_configuration.client is not None: self.sdk_configuration.client.close() - self.sdk_configuration.client = None async def __aexit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.async_client is not None - and not self.sdk_configuration.async_client_supplied - ): + if self.sdk_configuration.async_client is not None: await self.sdk_configuration.async_client.aclose() - self.sdk_configuration.async_client = None diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 257ff01d..2ccbcbe1 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -26,10 +26,8 @@ @dataclass class SDKConfiguration: - client: Union[HttpClient, None] - client_supplied: bool - async_client: Union[AsyncHttpClient, None] - async_client_supplied: bool + client: HttpClient + async_client: AsyncHttpClient debug_logger: Logger security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" diff --git a/src/mistralai/utils/__init__.py b/src/mistralai/utils/__init__.py index d8b21128..151c87d4 100644 --- a/src/mistralai/utils/__init__.py +++ b/src/mistralai/utils/__init__.py @@ -43,7 +43,6 @@ match_content_type, match_status_codes, match_response, - cast_partial, ) from .logger import Logger, get_body_content, get_default_logger @@ -97,5 +96,4 @@ "validate_float", "validate_int", "validate_open_enum", - "cast_partial", ] diff --git a/src/mistralai/utils/serializers.py b/src/mistralai/utils/serializers.py index baa41fbd..c5eb3659 100644 --- a/src/mistralai/utils/serializers.py +++ b/src/mistralai/utils/serializers.py @@ -7,15 +7,14 @@ from typing_extensions import get_origin from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspection.typing_objects import is_union +from typing_inspect import is_optional_type from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset def serialize_decimal(as_str: bool): def serialize(d): - # Optional[T] is a Union[T, None] - if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: + if is_optional_type(type(d)) and d is None: return None if isinstance(d, Unset): return d @@ -43,8 +42,7 @@ def validate_decimal(d): def serialize_float(as_str: bool): def serialize(f): - # Optional[T] is a Union[T, None] - if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: + if is_optional_type(type(f)) and f is None: return None if isinstance(f, Unset): return f @@ -72,8 +70,7 @@ def validate_float(f): def serialize_int(as_str: bool): def serialize(i): - # Optional[T] is a Union[T, None] - if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: + if is_optional_type(type(i)) and i is None: return None if isinstance(i, Unset): return i @@ -121,8 +118,7 @@ def validate(e): def validate_const(v): def validate(c): - # Optional[T] is a Union[T, None] - if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: + if is_optional_type(type(c)) and c is None: return None if v != c: @@ -167,7 +163,7 @@ def marshal_json(val, typ): if len(d) == 0: return "" - return json.dumps(d[next(iter(d))], separators=(",", ":")) + return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) def is_nullable(field): diff --git a/src/mistralai/utils/values.py b/src/mistralai/utils/values.py index dae01a44..2b4b6832 100644 --- a/src/mistralai/utils/values.py +++ b/src/mistralai/utils/values.py @@ -3,9 +3,8 @@ from datetime import datetime from enum import Enum from email.message import Message -from functools import partial import os -from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union from httpx import Response from pydantic import BaseModel @@ -52,8 +51,6 @@ def match_status_codes(status_codes: List[str], status_code: int) -> bool: T = TypeVar("T") -def cast_partial(typ): - return partial(cast, typ) def get_global_from_env( value: Optional[T], env_key: str, type_cast: Callable[[str], T] From 67175023413584f0258c15183952978dfbdca696 Mon Sep 17 00:00:00 2001 From: gaspardBT Date: Wed, 19 Mar 2025 09:31:54 +0100 Subject: [PATCH 2/3] Revert "chore: update pyproject (#201)" This reverts commit 81f02dd942fb404d1afb8614a2f21e764a0e9487. --- .github/workflows/run_example_scripts.yaml | 4 +- pyproject.toml | 43 ++++++++++------------ 2 files changed, 23 insertions(+), 24 deletions(-) diff --git a/.github/workflows/run_example_scripts.yaml b/.github/workflows/run_example_scripts.yaml index b3cc08ac..db0a30aa 100644 --- a/.github/workflows/run_example_scripts.yaml +++ b/.github/workflows/run_example_scripts.yaml @@ -14,7 +14,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: [ '3.9', '3.10', '3.11', '3.12', '3.13'] + python-version: ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13'] steps: - name: Checkout code @@ -27,6 +27,8 @@ jobs: - name: Install Poetry uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 + with: + version: ${{ matrix.python-version == '3.8' && '1.8.5' || '2.1.1' }} - name: Build and install client run: | diff --git a/pyproject.toml b/pyproject.toml index 5e7ddb8c..8eec1a78 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,19 +1,9 @@ -[project] +[tool.poetry] name = "mistralai" version = "1.5.1" description = "Python Client SDK for the Mistral AI API." -authors = [{ name = "Mistral" },] +authors = ["Mistral"] readme = "README-PYPI.md" -requires-python = ">=3.9" -dependencies = [ - "eval-type-backport >=0.2.0", - "httpx >=0.28.1", - "pydantic >=2.10.3", - "python-dateutil >=2.8.2", - "typing-inspection >=0.4.0", -] - -[tool.poetry] repository = "https://github.com/mistralai/client-python.git" packages = [ { include = "mistralai", from = "src" }, @@ -28,25 +18,32 @@ include = ["py.typed", "src/mistralai/py.typed"] [virtualenvs] in-project = true +[tool.poetry.dependencies] +python = ">=3.8" +eval-type-backport = ">=0.2.0" +httpx = ">=0.27.0" +jsonpath-python = ">=1.0.6" +pydantic = ">=2.9.0" +python-dateutil = ">=2.8.2" +typing-inspect = ">=0.9.0" +google-auth = { version = ">=2.27.0", optional = true } +requests = { version = ">=2.32.3", optional = true } + [tool.poetry.group.dev.dependencies] -mypy = "==1.14.1" -pylint = "==3.2.3" -pytest = "^8.2.2" -pytest-asyncio = "^0.23.7" -types-python-dateutil = "^2.9.0.20240316" +mypy = ">=1.13.0" +pylint = ">=3.2.3" +pytest = ">=8.2.2" +pytest-asyncio = ">=0.23.7" +types-python-dateutil = ">=2.9.0.20240316" -[project.optional-dependencies] -gcp = [ - "google-auth >=2.27.0", - "requests >=2.32.3" -] +[tool.poetry.extras] +gcp = ["google-auth", "requests"] [build-system] requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.pytest.ini_options] -asyncio_default_fixture_loop_scope = "function" pythonpath = ["src"] [tool.mypy] From 032650af2bc1d0f4dcb68b3f480ea5058c7476d1 Mon Sep 17 00:00:00 2001 From: gaspardBT Date: Wed, 19 Mar 2025 09:31:56 +0100 Subject: [PATCH 3/3] Revert "pin to latest (#200)" This reverts commit 32ff73893b9156f290b447da2a191ae9be4e184d. --- .speakeasy/workflow.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index 9f22c9f6..00aefc99 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -1,5 +1,5 @@ workflowVersion: 1.0.0 -speakeasyVersion: 1.517.3 +speakeasyVersion: 1.477.0 sources: mistral-azure-source: inputs: