From be7794950371311bbc4cf3f51a49bd96ab7ceeaf Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Mon, 9 Mar 2026 19:24:29 +0000 Subject: [PATCH 1/2] =?UTF-8?q?##=20Python=20SDK=20Changes:=20*=20`mistral?= =?UTF-8?q?.beta.conversations.start()`:=20=20=20*=20=20`request`=20**Chan?= =?UTF-8?q?ged**=20(Breaking=20=E2=9A=A0=EF=B8=8F)=20=20=20*=20=20`respons?= =?UTF-8?q?e`=20**Changed**=20(Breaking=20=E2=9A=A0=EF=B8=8F)=20=20=20*=20?= =?UTF-8?q?=20`error.detail[]`=20**Changed**=20*=20`mistral.beta.conversat?= =?UTF-8?q?ions.append()`:=20=20=20*=20=20`request.inputs.union(Array)[]`=20**Changed**=20(Breaking=20=E2=9A=A0=EF=B8=8F)?= =?UTF-8?q?=20=20=20*=20=20`response`=20**Changed**=20(Breaking=20?= =?UTF-8?q?=E2=9A=A0=EF=B8=8F)=20=20=20*=20=20`error.detail[]`=20**Changed?= =?UTF-8?q?**=20*=20`mistral.beta.conversations.get=5Fhistory()`:=20=20=20?= =?UTF-8?q?*=20=20`response.entries[]`=20**Changed**=20(Breaking=20?= =?UTF-8?q?=E2=9A=A0=EF=B8=8F)=20=20=20*=20=20`error.detail[]`=20**Changed?= =?UTF-8?q?**=20*=20`mistral.beta.conversations.get=5Fmessages()`:=20=20?= =?UTF-8?q?=20*=20=20`response.messages[]`=20**Changed**=20(Breaking=20?= =?UTF-8?q?=E2=9A=A0=EF=B8=8F)=20=20=20*=20=20`error.detail[]`=20**Changed?= =?UTF-8?q?**=20*=20`mistral.beta.conversations.restart()`:=20=20=20*=20?= =?UTF-8?q?=20`request`=20**Changed**=20(Breaking=20=E2=9A=A0=EF=B8=8F)=20?= =?UTF-8?q?=20=20*=20=20`response`=20**Changed**=20(Breaking=20=E2=9A=A0?= =?UTF-8?q?=EF=B8=8F)=20=20=20*=20=20`error.detail[]`=20**Changed**=20*=20?= =?UTF-8?q?`mistral.agents.stream()`:=20=20=20*=20=20`request.messages[].u?= =?UTF-8?q?nion(tool).content.union(Array)[]`=20**Changed**?= =?UTF-8?q?=20=20=20*=20=20`response.[].data.choices[].delta.content.union?= =?UTF-8?q?(Array)[]`=20**Changed**=20(Breaking=20=E2=9A=A0?= =?UTF-8?q?=EF=B8=8F)=20=20=20*=20=20`error.detail[]`=20**Changed**=20*=20?= =?UTF-8?q?`mistral.agents.complete()`:=20=20=20*=20=20`request.messages[]?= =?UTF-8?q?`=20**Changed**=20=20=20*=20=20`response.choices[].message.cont?= =?UTF-8?q?ent.union(Array)[]`=20**Changed**=20(Breaking=20?= =?UTF-8?q?=E2=9A=A0=EF=B8=8F)=20=20=20*=20=20`error.detail[]`=20**Changed?= =?UTF-8?q?**=20*=20`mistral.fim.stream()`:=20=20=20*=20=20`response.[].da?= =?UTF-8?q?ta.choices[].delta.content.union(Array)[]`=20**Ch?= =?UTF-8?q?anged**=20(Breaking=20=E2=9A=A0=EF=B8=8F)=20=20=20*=20=20`error?= =?UTF-8?q?.detail[]`=20**Changed**=20*=20`mistral.fim.complete()`:=20=20?= =?UTF-8?q?=20*=20=20`response.choices[].message.content.union(Array)[]`=20**Changed**=20(Breaking=20=E2=9A=A0=EF=B8=8F)=20?= =?UTF-8?q?=20=20*=20=20`error.detail[]`=20**Changed**=20*=20`mistral.chat?= =?UTF-8?q?.stream()`:=20=20=20*=20=20`request.messages[]`=20**Changed**?= =?UTF-8?q?=20=20=20*=20=20`response.[].data.choices[].delta.content.union?= =?UTF-8?q?(Array)[]`=20**Changed**=20(Breaking=20=E2=9A=A0?= =?UTF-8?q?=EF=B8=8F)=20=20=20*=20=20`error.detail[]`=20**Changed**=20*=20?= =?UTF-8?q?`mistral.chat.complete()`:=20=20=20*=20=20`request.messages[].u?= =?UTF-8?q?nion(tool).content.union(Array)[]`=20**Changed**?= =?UTF-8?q?=20=20=20*=20=20`response.choices[].message.content.union(Array?= =?UTF-8?q?)[]`=20**Changed**=20(Breaking=20=E2=9A=A0?= =?UTF-8?q?=EF=B8=8F)=20=20=20*=20=20`error.detail[]`=20**Changed**=20*=20?= =?UTF-8?q?`mistral.beta.conversations.restart=5Fstream()`:=20=20=20*=20?= =?UTF-8?q?=20`request`=20**Changed**=20(Breaking=20=E2=9A=A0=EF=B8=8F)=20?= =?UTF-8?q?=20=20*=20=20`response.[].data.union(message.output.delta).cont?= =?UTF-8?q?ent.union(OutputContentChunks)`=20**Changed**=20(Breaking=20?= =?UTF-8?q?=E2=9A=A0=EF=B8=8F)=20=20=20*=20=20`error.detail[]`=20**Changed?= =?UTF-8?q?**=20*=20`mistral.beta.conversations.append=5Fstream()`:=20=20?= =?UTF-8?q?=20*=20=20`request.inputs.union(Array)[]`=20**Cha?= =?UTF-8?q?nged**=20(Breaking=20=E2=9A=A0=EF=B8=8F)=20=20=20*=20=20`respon?= =?UTF-8?q?se.[].data.union(message.output.delta).content.union(OutputCont?= =?UTF-8?q?entChunks)`=20**Changed**=20(Breaking=20=E2=9A=A0=EF=B8=8F)=20?= =?UTF-8?q?=20=20*=20=20`error.detail[]`=20**Changed**=20*=20`mistral.beta?= =?UTF-8?q?.conversations.start=5Fstream()`:=20=20=20*=20=20`request`=20**?= =?UTF-8?q?Changed**=20(Breaking=20=E2=9A=A0=EF=B8=8F)=20=20=20*=20=20`res?= =?UTF-8?q?ponse.[].data.union(message.output.delta).content.union(OutputC?= =?UTF-8?q?ontentChunks)`=20**Changed**=20(Breaking=20=E2=9A=A0=EF=B8=8F)?= =?UTF-8?q?=20=20=20*=20=20`error.detail[]`=20**Changed**=20*=20`mistral.b?= =?UTF-8?q?eta.connectors.delete()`:=20**Added**=20*=20`mistral.beta.conve?= =?UTF-8?q?rsations.delete()`:=20=20`error.detail[]`=20**Changed**=20*=20`?= =?UTF-8?q?mistral.beta.observability.judges.delete()`:=20**Added**=20*=20?= =?UTF-8?q?`mistral.beta.observability.judges.update()`:=20**Added**=20*?= =?UTF-8?q?=20`mistral.beta.observability.campaigns.create()`:=20**Added**?= =?UTF-8?q?=20*=20`mistral.beta.observability.campaigns.list()`:=20**Added?= =?UTF-8?q?**=20*=20`mistral.beta.observability.campaigns.fetch()`:=20**Ad?= =?UTF-8?q?ded**=20*=20`mistral.beta.observability.campaigns.delete()`:=20?= =?UTF-8?q?**Added**=20*=20`mistral.beta.observability.campaigns.fetch=5Fs?= =?UTF-8?q?tatus()`:=20**Added**=20*=20`mistral.beta.observability.campaig?= =?UTF-8?q?ns.list=5Fevents()`:=20**Added**=20*=20`mistral.beta.observabil?= =?UTF-8?q?ity.datasets.create()`:=20**Added**=20*=20`mistral.beta.observa?= =?UTF-8?q?bility.datasets.list()`:=20**Added**=20*=20`mistral.beta.observ?= =?UTF-8?q?ability.datasets.fetch()`:=20**Added**=20*=20`mistral.beta.obse?= =?UTF-8?q?rvability.datasets.delete()`:=20**Added**=20*=20`mistral.beta.o?= =?UTF-8?q?bservability.datasets.update()`:=20**Added**=20*=20`mistral.bet?= =?UTF-8?q?a.observability.datasets.list=5Frecords()`:=20**Added**=20*=20`?= =?UTF-8?q?mistral.beta.observability.datasets.create=5Frecord()`:=20**Add?= =?UTF-8?q?ed**=20*=20`mistral.beta.observability.datasets.import=5Ffrom?= =?UTF-8?q?=5Fcampaign()`:=20**Added**=20*=20`mistral.beta.observability.d?= =?UTF-8?q?atasets.import=5Ffrom=5Fexplorer()`:=20**Added**=20*=20`mistral?= =?UTF-8?q?.beta.observability.datasets.import=5Ffrom=5Ffile()`:=20**Added?= =?UTF-8?q?**=20*=20`mistral.beta.observability.datasets.import=5Ffrom=5Fp?= =?UTF-8?q?layground()`:=20**Added**=20*=20`mistral.beta.observability.dat?= =?UTF-8?q?asets.import=5Ffrom=5Fdataset=5Frecords()`:=20**Added**=20*=20`?= =?UTF-8?q?mistral.beta.observability.datasets.export=5Fto=5Fjsonl()`:=20*?= =?UTF-8?q?*Added**=20*=20`mistral.beta.observability.datasets.fetch=5Ftas?= =?UTF-8?q?k()`:=20**Added**=20*=20`mistral.beta.observability.datasets.li?= =?UTF-8?q?st=5Ftasks()`:=20**Added**=20*=20`mistral.beta.observability.da?= =?UTF-8?q?tasets.records.fetch()`:=20**Added**=20*=20`mistral.beta.observ?= =?UTF-8?q?ability.datasets.records.delete()`:=20**Added**=20*=20`mistral.?= =?UTF-8?q?beta.observability.datasets.records.bulk=5Fdelete()`:=20**Added?= =?UTF-8?q?**=20*=20`mistral.beta.observability.datasets.records.judge()`:?= =?UTF-8?q?=20**Added**=20*=20`mistral.beta.observability.datasets.records?= =?UTF-8?q?.update=5Fpayload()`:=20**Added**=20*=20`mistral.beta.observabi?= =?UTF-8?q?lity.datasets.records.update=5Fproperties()`:=20**Added**=20*?= =?UTF-8?q?=20`mistral.beta.connectors.create()`:=20**Added**=20*=20`mistr?= =?UTF-8?q?al.beta.connectors.list()`:=20**Added**=20*=20`mistral.beta.con?= =?UTF-8?q?nectors.call=5Ftool()`:=20**Added**=20*=20`mistral.beta.connect?= =?UTF-8?q?ors.get()`:=20**Added**=20*=20`mistral.beta.connectors.update()?= =?UTF-8?q?`:=20**Added**=20*=20`mistral.beta.observability.judges.list()`?= =?UTF-8?q?:=20**Added**=20*=20`mistral.models.list()`:=20=20=20*=20=20`re?= =?UTF-8?q?quest`=20**Changed**=20=20=20*=20=20`error.status[422]`=20**Add?= =?UTF-8?q?ed**=20*=20`mistral.models.retrieve()`:=20=20`error.detail[]`?= =?UTF-8?q?=20**Changed**=20*=20`mistral.beta.observability.judges.create(?= =?UTF-8?q?)`:=20**Added**=20*=20`mistral.beta.observability.chat=5Fcomple?= =?UTF-8?q?tion=5Fevents.fields.fetch=5Foption=5Fcounts()`:=20**Added**=20?= =?UTF-8?q?*=20`mistral.beta.observability.chat=5Fcompletion=5Fevents.fiel?= =?UTF-8?q?ds.fetch=5Foptions()`:=20**Added**=20*=20`mistral.models.delete?= =?UTF-8?q?()`:=20=20`error.detail[]`=20**Changed**=20*=20`mistral.beta.co?= =?UTF-8?q?nversations.list()`:=20=20=20*=20=20`response.[].union(ModelCon?= =?UTF-8?q?versation).guardrails`=20**Added**=20=20=20*=20=20`error.detail?= =?UTF-8?q?[]`=20**Changed**=20*=20`mistral.beta.conversations.get()`:=20?= =?UTF-8?q?=20=20*=20=20`response.union(ModelConversation).guardrails`=20*?= =?UTF-8?q?*Added**=20=20=20*=20=20`error.detail[]`=20**Changed**=20*=20`m?= =?UTF-8?q?istral.beta.observability.judges.fetch()`:=20**Added**=20*=20`m?= =?UTF-8?q?istral.beta.agents.create()`:=20=20=20*=20=20`request.guardrail?= =?UTF-8?q?s`=20**Added**=20=20=20*=20=20`response.guardrails`=20**Added**?= =?UTF-8?q?=20=20=20*=20=20`error.detail[]`=20**Changed**=20*=20`mistral.b?= =?UTF-8?q?eta.agents.list()`:=20=20=20*=20=20`response.[].guardrails`=20*?= =?UTF-8?q?*Added**=20=20=20*=20=20`error.detail[]`=20**Changed**=20*=20`m?= =?UTF-8?q?istral.beta.agents.get()`:=20=20=20*=20=20`response.guardrails`?= =?UTF-8?q?=20**Added**=20=20=20*=20=20`error.detail[]`=20**Changed**=20*?= =?UTF-8?q?=20`mistral.beta.agents.update()`:=20=20=20*=20=20`request.guar?= =?UTF-8?q?drails`=20**Added**=20=20=20*=20=20`response.guardrails`=20**Ad?= =?UTF-8?q?ded**=20=20=20*=20=20`error.detail[]`=20**Changed**=20*=20`mist?= =?UTF-8?q?ral.beta.agents.delete()`:=20=20`error.detail[]`=20**Changed**?= =?UTF-8?q?=20*=20`mistral.beta.agents.update=5Fversion()`:=20=20=20*=20?= =?UTF-8?q?=20`response.guardrails`=20**Added**=20=20=20*=20=20`error.deta?= =?UTF-8?q?il[]`=20**Changed**=20*=20`mistral.beta.agents.list=5Fversions(?= =?UTF-8?q?)`:=20=20=20*=20=20`response.[].guardrails`=20**Added**=20=20?= =?UTF-8?q?=20*=20=20`error.detail[]`=20**Changed**=20*=20`mistral.beta.ag?= =?UTF-8?q?ents.get=5Fversion()`:=20=20=20*=20=20`response.guardrails`=20*?= =?UTF-8?q?*Added**=20=20=20*=20=20`error.detail[]`=20**Changed**=20*=20`m?= =?UTF-8?q?istral.beta.agents.create=5Fversion=5Falias()`:=20=20`error.det?= =?UTF-8?q?ail[]`=20**Changed**=20*=20`mistral.beta.agents.list=5Fversion?= =?UTF-8?q?=5Faliases()`:=20=20`error.detail[]`=20**Changed**=20*=20`mistr?= =?UTF-8?q?al.beta.agents.delete=5Fversion=5Falias()`:=20=20`error.detail[?= =?UTF-8?q?]`=20**Changed**=20*=20`mistral.beta.libraries.create()`:=20=20?= =?UTF-8?q?`error.detail[]`=20**Changed**=20*=20`mistral.beta.libraries.ge?= =?UTF-8?q?t()`:=20=20`error.detail[]`=20**Changed**=20*=20`mistral.beta.l?= =?UTF-8?q?ibraries.delete()`:=20=20`error.detail[]`=20**Changed**=20*=20`?= =?UTF-8?q?mistral.beta.libraries.update()`:=20=20`error.detail[]`=20**Cha?= =?UTF-8?q?nged**=20*=20`mistral.beta.libraries.documents.list()`:=20=20`e?= =?UTF-8?q?rror.detail[]`=20**Changed**=20*=20`mistral.beta.libraries.docu?= =?UTF-8?q?ments.upload()`:=20=20`error.detail[]`=20**Changed**=20*=20`mis?= =?UTF-8?q?tral.beta.libraries.documents.get()`:=20=20`error.detail[]`=20*?= =?UTF-8?q?*Changed**=20*=20`mistral.beta.libraries.documents.update()`:?= =?UTF-8?q?=20=20`error.detail[]`=20**Changed**=20*=20`mistral.beta.librar?= =?UTF-8?q?ies.documents.delete()`:=20=20`error.detail[]`=20**Changed**=20?= =?UTF-8?q?*=20`mistral.beta.libraries.documents.text=5Fcontent()`:=20=20`?= =?UTF-8?q?error.detail[]`=20**Changed**=20*=20`mistral.beta.libraries.doc?= =?UTF-8?q?uments.status()`:=20=20`error.detail[]`=20**Changed**=20*=20`mi?= =?UTF-8?q?stral.beta.libraries.documents.get=5Fsigned=5Furl()`:=20=20`err?= =?UTF-8?q?or.detail[]`=20**Changed**=20*=20`mistral.beta.libraries.docume?= =?UTF-8?q?nts.extracted=5Ftext=5Fsigned=5Furl()`:=20=20`error.detail[]`?= =?UTF-8?q?=20**Changed**=20*=20`mistral.beta.libraries.documents.reproces?= =?UTF-8?q?s()`:=20=20`error.detail[]`=20**Changed**=20*=20`mistral.beta.l?= =?UTF-8?q?ibraries.accesses.list()`:=20=20`error.detail[]`=20**Changed**?= =?UTF-8?q?=20*=20`mistral.beta.libraries.accesses.update=5For=5Fcreate()`?= =?UTF-8?q?:=20=20`error.detail[]`=20**Changed**=20*=20`mistral.beta.libra?= =?UTF-8?q?ries.accesses.delete()`:=20=20`error.detail[]`=20**Changed**=20?= =?UTF-8?q?*=20`mistral.files.upload()`:=20=20=20*=20=20`request`=20**Chan?= =?UTF-8?q?ged**=20=20=20*=20=20`response`=20**Changed**=20*=20`mistral.fi?= =?UTF-8?q?les.list()`:=20=20`response.data[]`=20**Changed**=20*=20`mistra?= =?UTF-8?q?l.files.retrieve()`:=20=20`response`=20**Changed**=20*=20`mistr?= =?UTF-8?q?al.beta.observability.chat=5Fcompletion=5Fevents.fields.list()`?= =?UTF-8?q?:=20**Added**=20*=20`mistral.beta.observability.chat=5Fcompleti?= =?UTF-8?q?on=5Fevents.judge()`:=20**Added**=20*=20`mistral.beta.observabi?= =?UTF-8?q?lity.chat=5Fcompletion=5Fevents.fetch=5Fsimilar=5Fevents()`:=20?= =?UTF-8?q?**Added**=20*=20`mistral.beta.observability.chat=5Fcompletion?= =?UTF-8?q?=5Fevents.fetch()`:=20**Added**=20*=20`mistral.beta.observabili?= =?UTF-8?q?ty.chat=5Fcompletion=5Fevents.search=5Fids()`:=20**Added**=20*?= =?UTF-8?q?=20`mistral.beta.observability.chat=5Fcompletion=5Fevents.searc?= =?UTF-8?q?h()`:=20**Added**=20*=20`mistral.embeddings.create()`:=20=20`er?= =?UTF-8?q?ror.detail[]`=20**Changed**=20*=20`mistral.classifiers.moderate?= =?UTF-8?q?()`:=20=20`error.detail[]`=20**Changed**=20*=20`mistral.classif?= =?UTF-8?q?iers.moderate=5Fchat()`:=20=20=20*=20=20`request.inputs.union(A?= =?UTF-8?q?rray<>)[]`=20**Changed**=20=20=20*=20=20`error.detail[]`=20**Ch?= =?UTF-8?q?anged**=20*=20`mistral.classifiers.classify()`:=20=20`error.det?= =?UTF-8?q?ail[]`=20**Changed**=20*=20`mistral.classifiers.classify=5Fchat?= =?UTF-8?q?()`:=20=20=20*=20=20`request.input.union(Array?= =?UTF-8?q?)[].messages[].union(tool).content.union(Array)[]?= =?UTF-8?q?`=20**Changed**=20=20=20*=20=20`error.detail[]`=20**Changed**?= =?UTF-8?q?=20*=20`mistral.ocr.process()`:=20=20`error.detail[]`=20**Chang?= =?UTF-8?q?ed**?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .speakeasy/gen.lock | 2109 ++++++++++-- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 12 +- README.md | 78 +- RELEASES.md | 12 +- USAGE.md | 4 +- docs/errors/observabilityerror.md | 8 + docs/models/agent.md | 1 + docs/models/and_.md | 17 + docs/models/annotations.md | 10 + docs/models/answer.md | 17 + docs/models/audience.md | 9 + docs/models/audiocontent.md | 15 + docs/models/audiourl.md | 13 + docs/models/audiourlchunk.md | 15 + docs/models/audiourlunion.md | 17 + docs/models/authdata.md | 9 + docs/models/basefielddefinition.md | 12 + docs/models/basetaskstatus.md | 15 + docs/models/blobresourcecontents.md | 14 + docs/models/campaign.md | 18 + docs/models/chatcompletionevent.md | 18 + docs/models/chatcompletioneventextrafields.md | 41 + docs/models/chatcompletioneventpreview.md | 13 + .../chatcompletioneventpreviewextrafields.md | 41 + docs/models/chattranscriptionevent.md | 10 + docs/models/connector.md | 15 + docs/models/connectorcalltoolrequest.md | 10 + docs/models/connectorcalltoolv1request.md | 10 + docs/models/connectordeletev1request.md | 8 + docs/models/connectorgetv1request.md | 10 + docs/models/connectorlistv1request.md | 10 + docs/models/connectorsqueryfilters.md | 9 + docs/models/connectortool.md | 18 + docs/models/connectortoolcallmetadata.md | 14 + docs/models/connectortoolcallresponse.md | 18 + .../connectortoolcallresponsecontent.md | 35 + docs/models/connectortoollocale.md | 10 + docs/models/connectortoolresultmetadata.md | 13 + docs/models/connectorupdatev1request.md | 9 + docs/models/contentchunk.md | 6 + docs/models/context.md | 7 + docs/models/conversationpayload.md | 9 + docs/models/conversationrequest.md | 1 + docs/models/conversationresponse.md | 13 +- ...utput.md => conversationresponseoutput.md} | 2 +- docs/models/conversationrestartrequest.md | 1 + .../conversationrestartstreamrequest.md | 1 + docs/models/conversationsource.md | 11 + docs/models/conversationstreamrequest.md | 1 + docs/models/createagentrequest.md | 1 + docs/models/createcampaignrequest.md | 12 + docs/models/createconnectorrequest.md | 15 + docs/models/createdatasetrecordrequest.md | 9 + ...litydatasetsdatasetidrecordspostrequest.md | 9 + docs/models/createdatasetrequest.md | 9 + docs/models/createfileresponse.md | 28 +- .../createfinetuningjobrequestrepository.md | 4 +- ...in.md => creategithubrepositoryrequest.md} | 2 +- docs/models/createjudgerequest.md | 13 + docs/models/createjudgerequestoutput.md | 17 + docs/models/dataset.md | 15 + docs/models/datasetimporttask.md | 17 + docs/models/datasetpreview.md | 15 + docs/models/datasetrecord.md | 15 + ...abilitycampaignscampaigniddeleterequest.md | 8 + docs/models/deletedatasetrecordsrequest.md | 8 + ...asetrecordsdatasetrecordiddeleterequest.md | 8 + ...rvabilitydatasetsdatasetiddeleterequest.md | 8 + ...observabilityjudgesjudgeiddeleterequest.md | 8 + ...letemodelout.md => deletemodelresponse.md} | 2 +- docs/models/embeddedresource.md | 17 + docs/models/executionconfig.md | 13 + docs/models/exportdatasetresponse.md | 8 + ...tasetsdatasetidexportstojsonlgetrequest.md | 8 + .../feedresultchatcompletioneventpreview.md | 10 + docs/models/fetchcampaignstatusresponse.md | 8 + ...fetchchatcompletionfieldoptionsresponse.md | 8 + docs/models/fetchfieldoptioncountsrequest.md | 8 + docs/models/fetchfieldoptioncountsresponse.md | 8 + docs/models/fieldgroup.md | 9 + docs/models/fieldoptioncountitem.md | 9 + .../filesapiroutesuploadfilefilevisibility.md | 9 + docs/models/fileschema.md | 28 +- docs/models/filevisibility.md | 9 + docs/models/filtercondition.md | 10 + docs/models/filtergroup.md | 9 + docs/models/filterpayload.md | 8 + docs/models/filters.md | 17 + ...ervabilitycampaignscampaignidgetrequest.md | 8 + ...aignscampaignidselectedeventsgetrequest.md | 10 + ...litycampaignscampaignidstatusgetrequest.md | 8 + ...aignsv1observabilitycampaignsgetrequest.md | 10 + ...ychatcompletioneventssearchpostrequest.md} | 8 +- ...tychatcompletioneventseventidgetrequest.md | 8 + ...fieldsfieldnameoptionscountspostrequest.md | 9 + ...pletionfieldsfieldnameoptionsgetrequest.md | 9 + ...bservabilitydatasetsdatasetidgetrequest.md | 8 + ...abilitydatasetsdatasetidtasksgetrequest.md | 10 + ...ydatasetsdatasetidtaskstaskidgetrequest.md | 9 + ...ilitydatasetsdatasetidrecordsgetrequest.md | 10 + ...datasetrecordsdatasetrecordidgetrequest.md | 8 + ...tasetsv1observabilitydatasetsgetrequest.md | 10 + docs/models/getfileresponse.md | 30 +- ...dv1observabilityjudgesjudgeidgetrequest.md | 8 + ...etjudgesv1observabilityjudgesgetrequest.md | 12 + ...ioneventseventidsimilareventsgetrequest.md | 8 + docs/models/guardrail.md | 7 + docs/models/guardrailconfig.md | 9 + docs/models/imagecontent.md | 15 + .../importdatasetfromcampaignrequest.md | 8 + .../models/importdatasetfromdatasetrequest.md | 8 + .../importdatasetfromexplorerrequest.md | 8 + docs/models/importdatasetfromfilerequest.md | 8 + .../importdatasetfromplaygroundrequest.md | 8 + docs/models/judge.md | 22 + .../models/judgechatcompletioneventrequest.md | 8 + ...tioneventseventidlivejudgingpostrequest.md | 9 + docs/models/judgeclassificationoutput.md | 9 + .../models/judgeclassificationoutputoption.md | 9 + docs/models/judgedatasetrecordrequest.md | 8 + ...dsdatasetrecordidlivejudgingpostrequest.md | 9 + docs/models/judgeoutput.md | 9 + docs/models/judgeoutputtype.md | 9 + docs/models/judgeoutputunion.md | 17 + docs/models/judgeregressionoutput.md | 12 + docs/models/librariessharecreatev1request.md | 8 +- .../listcampaignselectedeventsresponse.md | 8 + docs/models/listcampaignsresponse.md | 8 + .../listchatcompletionfieldsresponse.md | 9 + docs/models/listdatasetimporttasksresponse.md | 8 + docs/models/listdatasetrecordsresponse.md | 8 + docs/models/listdatasetsresponse.md | 8 + docs/models/listjudgesresponse.md | 8 + docs/models/listmodelsv1modelsgetrequest.md | 9 + docs/models/listsharingresponse.md | 8 + docs/models/mcpservericon.md | 13 + docs/models/messageinputcontentchunks.md | 4 +- docs/models/messageoutputcontentchunks.md | 4 +- docs/models/messageresponse.md | 8 + docs/models/modelconversation.md | 1 + docs/models/moderationllmv1action.md | 9 + .../moderationllmv1categorythresholds.md | 16 + docs/models/moderationllmv1config.md | 11 + docs/models/multipartbodyparams.md | 2 + docs/models/observabilityerrorcode.md | 50 + docs/models/observabilityerrordetail.md | 9 + docs/models/op.md | 26 + docs/models/operator.md | 28 + docs/models/option.md | 17 + docs/models/or_.md | 17 + docs/models/outputcontentchunks.md | 4 +- docs/models/paginatedconnectors.md | 9 + docs/models/paginatedresultcampaignpreview.md | 11 + ...ginatedresultchatcompletioneventpreview.md | 11 + .../paginatedresultdatasetimporttask.md | 11 + docs/models/paginatedresultdatasetpreview.md | 11 + docs/models/paginatedresultdatasetrecord.md | 11 + docs/models/paginatedresultjudgepreview.md | 11 + docs/models/paginationresponse.md | 9 + ...datasetidimportsfromcampaignpostrequest.md | 9 + ...sdatasetidimportsfromdatasetpostrequest.md | 9 + ...datasetidimportsfromexplorerpostrequest.md | 9 + ...setsdatasetidimportsfromfilepostrequest.md | 9 + ...tasetidimportsfromplaygroundpostrequest.md | 9 + ...essingstatusout.md => processingstatus.md} | 2 +- docs/models/resource.md | 17 + docs/models/resourcelink.md | 22 + docs/models/resourcevisibility.md | 11 + .../searchchatcompletioneventidsrequest.md | 9 + .../searchchatcompletioneventidsresponse.md | 8 + ...d => searchchatcompletioneventsrequest.md} | 5 +- .../searchchatcompletioneventsresponse.md | 8 + docs/models/{sharingout.md => sharing.md} | 2 +- .../{sharingin.md => sharingrequest.md} | 2 +- docs/models/supportedoperator.md | 26 + docs/models/textcontent.md | 14 + docs/models/textresourcecontents.md | 14 + docs/models/thinkchunk.md | 4 +- docs/models/thinkchunkthinking.md | 17 - ...ationthinkchunkthinking.md => thinking.md} | 8 +- docs/models/typeenum.md | 14 + docs/models/updateagentrequest.md | 1 + docs/models/updateconnectorrequest.md | 16 + .../updatedatasetrecordpayloadrequest.md | 8 + ...recordsdatasetrecordidpayloadputrequest.md | 9 + .../updatedatasetrecordpropertiesrequest.md | 8 + ...ordsdatasetrecordidpropertiesputrequest.md | 9 + docs/models/updatedatasetrequest.md | 9 + ...ervabilitydatasetsdatasetidpatchrequest.md | 9 + docs/models/updatejudgerequest.md | 13 + docs/models/updatejudgerequestoutput.md | 17 + ...ev1observabilityjudgesjudgeidputrequest.md | 9 + docs/models/validationerror.md | 12 +- docs/sdks/accesses/README.md | 6 +- docs/sdks/betaagents/README.md | 2 + docs/sdks/campaigns/README.md | 267 ++ docs/sdks/chatcompletionevents/README.md | 244 ++ docs/sdks/connectors/README.md | 282 ++ docs/sdks/conversations/README.md | 4 + docs/sdks/datasets/README.md | 669 ++++ docs/sdks/documents/README.md | 2 +- docs/sdks/fields/README.md | 133 + docs/sdks/files/README.md | 4 +- docs/sdks/judges/README.md | 236 ++ docs/sdks/models/README.md | 11 +- docs/sdks/records/README.md | 277 ++ src/mistralai/client/_version.py | 4 +- src/mistralai/client/accesses.py | 32 +- src/mistralai/client/beta.py | 9 + src/mistralai/client/beta_agents.py | 28 + src/mistralai/client/campaigns.py | 1150 +++++++ .../client/chat_completion_events.py | 1030 ++++++ src/mistralai/client/connectors.py | 1292 +++++++ src/mistralai/client/conversations.py | 56 + src/mistralai/client/datasets.py | 2972 +++++++++++++++++ src/mistralai/client/documents.py | 8 +- src/mistralai/client/errors/__init__.py | 5 + .../client/errors/observabilityerror.py | 32 + src/mistralai/client/fields.py | 588 ++++ src/mistralai/client/files.py | 16 + src/mistralai/client/judges.py | 1038 ++++++ src/mistralai/client/models/__init__.py | 1188 ++++++- src/mistralai/client/models/agent.py | 14 +- src/mistralai/client/models/annotations.py | 77 + src/mistralai/client/models/audiocontent.py | 94 + src/mistralai/client/models/audiourl.py | 26 + src/mistralai/client/models/audiourlchunk.py | 53 + src/mistralai/client/models/authdata.py | 17 + .../client/models/basefielddefinition.py | 100 + src/mistralai/client/models/basetaskstatus.py | 21 + .../client/models/blobresourcecontents.py | 87 + src/mistralai/client/models/campaign.py | 62 + .../client/models/chatcompletionevent.py | 61 + .../models/chatcompletioneventpreview.py | 43 + .../client/models/chattranscriptionevent.py | 21 + src/mistralai/client/models/connector.py | 70 + .../client/models/connector_call_tool_v1op.py | 32 + .../client/models/connector_delete_v1op.py | 17 + .../client/models/connector_get_v1op.py | 51 + .../client/models/connector_list_v1op.py | 67 + .../client/models/connector_update_v1op.py | 27 + .../client/models/connectorcalltoolrequest.py | 36 + .../client/models/connectorsqueryfilters.py | 57 + src/mistralai/client/models/connectortool.py | 83 + .../models/connectortoolcallmetadata.py | 80 + .../models/connectortoolcallresponse.py | 149 + .../client/models/connectortoollocale.py | 21 + .../models/connectortoolresultmetadata.py | 84 + src/mistralai/client/models/contentchunk.py | 4 + .../client/models/conversationpayload.py | 30 + .../client/models/conversationrequest.py | 6 + .../client/models/conversationresponse.py | 46 +- .../models/conversationrestartrequest.py | 9 +- .../conversationrestartstreamrequest.py | 9 +- .../client/models/conversationsource.py | 17 + .../models/conversationstreamrequest.py | 6 + .../client/models/conversationthinkchunk.py | 65 - ...lity_datasets_dataset_id_records_postop.py | 29 + .../client/models/createagentrequest.py | 14 +- .../client/models/createcampaignrequest.py | 27 + .../client/models/createconnectorrequest.py | 86 + .../models/createdatasetrecordrequest.py | 19 + .../client/models/createdatasetrequest.py | 17 + .../client/models/createfileresponse.py | 15 +- .../models/createfinetuningjobrequest.py | 9 +- ...in.py => creategithubrepositoryrequest.py} | 8 +- .../client/models/createjudgerequest.py | 47 + src/mistralai/client/models/dataset.py | 51 + .../client/models/datasetimporttask.py | 75 + src/mistralai/client/models/datasetpreview.py | 51 + src/mistralai/client/models/datasetrecord.py | 54 + ...vability_campaigns_campaign_id_deleteop.py | 17 + ...aset_records_dataset_record_id_deleteop.py | 21 + ...ervability_datasets_dataset_id_deleteop.py | 17 + ..._observability_judges_judge_id_deleteop.py | 17 + .../models/deletedatasetrecordsrequest.py | 15 + ...letemodelout.py => deletemodelresponse.py} | 6 +- .../client/models/embeddedresource.py | 110 + .../client/models/executionconfig.py | 40 + ...asets_dataset_id_exports_to_jsonl_getop.py | 21 + .../client/models/exportdatasetresponse.py | 14 + .../feedresultchatcompletioneventpreview.py | 57 + .../models/fetchcampaignstatusresponse.py | 15 + ...fetchchatcompletionfieldoptionsresponse.py | 53 + .../models/fetchfieldoptioncountsrequest.py | 47 + .../models/fetchfieldoptioncountsresponse.py | 16 + src/mistralai/client/models/fieldgroup.py | 17 + .../client/models/fieldoptioncountitem.py | 17 + .../models/files_api_routes_upload_fileop.py | 37 +- src/mistralai/client/models/fileschema.py | 15 +- src/mistralai/client/models/filevisibility.py | 15 + .../client/models/filtercondition.py | 47 + src/mistralai/client/models/filtergroup.py | 74 + src/mistralai/client/models/filterpayload.py | 40 + ...servability_campaigns_campaign_id_getop.py | 17 + ...aigns_campaign_id_selected_events_getop.py | 51 + ...lity_campaigns_campaign_id_status_getop.py | 21 + ...paigns_v1_observability_campaigns_getop.py | 63 + ...y_chat_completion_events_event_id_getop.py | 21 + ...ty_chat_completion_events_search_postop.py | 71 + ...fields_field_name_options_counts_postop.py | 31 + ...pletion_fields_field_name_options_getop.py | 53 + ...observability_datasets_dataset_id_getop.py | 17 + ...datasets_dataset_id_tasks_task_id_getop.py | 26 + ...ability_datasets_dataset_id_tasks_getop.py | 49 + ...dataset_records_dataset_record_id_getop.py | 19 + ...ility_datasets_dataset_id_records_getop.py | 49 + ...atasets_v1_observability_datasets_getop.py | 63 + ..._v1_observability_judges_judge_id_getop.py | 17 + ...et_judges_v1_observability_judges_getop.py | 80 + ...on_events_event_id_similar_events_getop.py | 21 + .../client/models/getfileresponse.py | 15 +- .../client/models/guardrailconfig.py | 47 + src/mistralai/client/models/imagecontent.py | 94 + .../importdatasetfromcampaignrequest.py | 14 + .../models/importdatasetfromdatasetrequest.py | 15 + .../importdatasetfromexplorerrequest.py | 15 + .../models/importdatasetfromfilerequest.py | 14 + .../importdatasetfromplaygroundrequest.py | 15 + src/mistralai/client/models/judge.py | 136 + ...ion_events_event_id_live_judging_postop.py | 31 + ...s_dataset_record_id_live_judging_postop.py | 31 + .../models/judgechatcompletioneventrequest.py | 15 + .../models/judgeclassificationoutput.py | 36 + .../models/judgeclassificationoutputoption.py | 17 + .../models/judgedatasetrecordrequest.py | 15 + src/mistralai/client/models/judgeoutput.py | 24 + .../client/models/judgeoutputtype.py | 11 + .../client/models/judgeregressionoutput.py | 56 + .../models/libraries_share_create_v1op.py | 9 +- .../models/list_models_v1_models_getop.py | 56 + .../listcampaignselectedeventsresponse.py | 18 + .../client/models/listcampaignsresponse.py | 18 + .../listchatcompletionfieldsresponse.py | 20 + .../models/listdatasetimporttasksresponse.py | 18 + .../models/listdatasetrecordsresponse.py | 18 + .../client/models/listdatasetsresponse.py | 18 + .../client/models/listjudgesresponse.py | 18 + src/mistralai/client/models/listsharingout.py | 16 - .../client/models/listsharingresponse.py | 16 + src/mistralai/client/models/mcpservericon.py | 82 + .../models/messageinputcontentchunks.py | 15 +- .../models/messageoutputcontentchunks.py | 9 +- .../client/models/messageresponse.py | 14 + .../client/models/modelconversation.py | 9 +- .../client/models/moderationllmv1action.py | 15 + .../moderationllmv1categorythresholds.py | 94 + .../client/models/moderationllmv1config.py | 78 + .../client/models/observabilityerrorcode.py | 56 + .../client/models/observabilityerrordetail.py | 33 + .../client/models/outputcontentchunks.py | 9 +- .../client/models/paginatedconnectors.py | 20 + .../models/paginatedresultcampaignpreview.py | 57 + ...ginatedresultchatcompletioneventpreview.py | 60 + .../paginatedresultdatasetimporttask.py | 57 + .../models/paginatedresultdatasetpreview.py | 57 + .../models/paginatedresultdatasetrecord.py | 57 + .../models/paginatedresultjudgepreview.py | 57 + .../client/models/paginationresponse.py | 49 + ...dataset_id_imports_from_campaign_postop.py | 31 + ..._dataset_id_imports_from_dataset_postop.py | 31 + ...dataset_id_imports_from_explorer_postop.py | 31 + ...ets_dataset_id_imports_from_file_postop.py | 31 + ...taset_id_imports_from_playground_postop.py | 31 + ...essingstatusout.py => processingstatus.py} | 6 +- src/mistralai/client/models/resourcelink.py | 140 + .../client/models/resourcevisibility.py | 17 + .../searchchatcompletioneventidsrequest.py | 51 + .../searchchatcompletioneventidsresponse.py | 15 + .../searchchatcompletioneventsrequest.py | 51 + .../searchchatcompletioneventsresponse.py | 18 + .../models/{sharingout.py => sharing.py} | 6 +- .../{sharingin.py => sharingrequest.py} | 6 +- src/mistralai/client/models/textcontent.py | 91 + .../client/models/textresourcecontents.py | 87 + src/mistralai/client/models/thinkchunk.py | 20 +- ...records_dataset_record_id_payload_putop.py | 31 + ...ords_dataset_record_id_properties_putop.py | 33 + ...servability_datasets_dataset_id_patchop.py | 24 + ..._v1_observability_judges_judge_id_putop.py | 24 + .../client/models/updateagentrequest.py | 6 + .../client/models/updateconnectorrequest.py | 114 + .../updatedatasetrecordpayloadrequest.py | 15 + .../updatedatasetrecordpropertiesrequest.py | 15 + .../client/models/updatedatasetrequest.py | 49 + .../client/models/updatejudgerequest.py | 47 + .../client/models/validationerror.py | 37 +- src/mistralai/client/models_.py | 48 +- src/mistralai/client/observability.py | 32 + src/mistralai/client/records.py | 1178 +++++++ 391 files changed, 22819 insertions(+), 575 deletions(-) create mode 100644 docs/errors/observabilityerror.md create mode 100644 docs/models/and_.md create mode 100644 docs/models/annotations.md create mode 100644 docs/models/answer.md create mode 100644 docs/models/audience.md create mode 100644 docs/models/audiocontent.md create mode 100644 docs/models/audiourl.md create mode 100644 docs/models/audiourlchunk.md create mode 100644 docs/models/audiourlunion.md create mode 100644 docs/models/authdata.md create mode 100644 docs/models/basefielddefinition.md create mode 100644 docs/models/basetaskstatus.md create mode 100644 docs/models/blobresourcecontents.md create mode 100644 docs/models/campaign.md create mode 100644 docs/models/chatcompletionevent.md create mode 100644 docs/models/chatcompletioneventextrafields.md create mode 100644 docs/models/chatcompletioneventpreview.md create mode 100644 docs/models/chatcompletioneventpreviewextrafields.md create mode 100644 docs/models/chattranscriptionevent.md create mode 100644 docs/models/connector.md create mode 100644 docs/models/connectorcalltoolrequest.md create mode 100644 docs/models/connectorcalltoolv1request.md create mode 100644 docs/models/connectordeletev1request.md create mode 100644 docs/models/connectorgetv1request.md create mode 100644 docs/models/connectorlistv1request.md create mode 100644 docs/models/connectorsqueryfilters.md create mode 100644 docs/models/connectortool.md create mode 100644 docs/models/connectortoolcallmetadata.md create mode 100644 docs/models/connectortoolcallresponse.md create mode 100644 docs/models/connectortoolcallresponsecontent.md create mode 100644 docs/models/connectortoollocale.md create mode 100644 docs/models/connectortoolresultmetadata.md create mode 100644 docs/models/connectorupdatev1request.md create mode 100644 docs/models/context.md create mode 100644 docs/models/conversationpayload.md rename docs/models/{output.md => conversationresponseoutput.md} (93%) create mode 100644 docs/models/conversationsource.md create mode 100644 docs/models/createcampaignrequest.md create mode 100644 docs/models/createconnectorrequest.md create mode 100644 docs/models/createdatasetrecordrequest.md create mode 100644 docs/models/createdatasetrecordv1observabilitydatasetsdatasetidrecordspostrequest.md create mode 100644 docs/models/createdatasetrequest.md rename docs/models/{githubrepositoryin.md => creategithubrepositoryrequest.md} (96%) create mode 100644 docs/models/createjudgerequest.md create mode 100644 docs/models/createjudgerequestoutput.md create mode 100644 docs/models/dataset.md create mode 100644 docs/models/datasetimporttask.md create mode 100644 docs/models/datasetpreview.md create mode 100644 docs/models/datasetrecord.md create mode 100644 docs/models/deletecampaignv1observabilitycampaignscampaigniddeleterequest.md create mode 100644 docs/models/deletedatasetrecordsrequest.md create mode 100644 docs/models/deletedatasetrecordv1observabilitydatasetrecordsdatasetrecordiddeleterequest.md create mode 100644 docs/models/deletedatasetv1observabilitydatasetsdatasetiddeleterequest.md create mode 100644 docs/models/deletejudgev1observabilityjudgesjudgeiddeleterequest.md rename docs/models/{deletemodelout.md => deletemodelresponse.md} (98%) create mode 100644 docs/models/embeddedresource.md create mode 100644 docs/models/executionconfig.md create mode 100644 docs/models/exportdatasetresponse.md create mode 100644 docs/models/exportdatasettojsonlv1observabilitydatasetsdatasetidexportstojsonlgetrequest.md create mode 100644 docs/models/feedresultchatcompletioneventpreview.md create mode 100644 docs/models/fetchcampaignstatusresponse.md create mode 100644 docs/models/fetchchatcompletionfieldoptionsresponse.md create mode 100644 docs/models/fetchfieldoptioncountsrequest.md create mode 100644 docs/models/fetchfieldoptioncountsresponse.md create mode 100644 docs/models/fieldgroup.md create mode 100644 docs/models/fieldoptioncountitem.md create mode 100644 docs/models/filesapiroutesuploadfilefilevisibility.md create mode 100644 docs/models/filevisibility.md create mode 100644 docs/models/filtercondition.md create mode 100644 docs/models/filtergroup.md create mode 100644 docs/models/filterpayload.md create mode 100644 docs/models/filters.md create mode 100644 docs/models/getcampaignbyidv1observabilitycampaignscampaignidgetrequest.md create mode 100644 docs/models/getcampaignselectedeventsv1observabilitycampaignscampaignidselectedeventsgetrequest.md create mode 100644 docs/models/getcampaignstatusbyidv1observabilitycampaignscampaignidstatusgetrequest.md create mode 100644 docs/models/getcampaignsv1observabilitycampaignsgetrequest.md rename docs/models/{conversationthinkchunk.md => getchatcompletioneventsv1observabilitychatcompletioneventssearchpostrequest.md} (73%) create mode 100644 docs/models/getchatcompletioneventv1observabilitychatcompletioneventseventidgetrequest.md create mode 100644 docs/models/getchatcompletionfieldoptionscountsv1observabilitychatcompletionfieldsfieldnameoptionscountspostrequest.md create mode 100644 docs/models/getchatcompletionfieldoptionsv1observabilitychatcompletionfieldsfieldnameoptionsgetrequest.md create mode 100644 docs/models/getdatasetbyidv1observabilitydatasetsdatasetidgetrequest.md create mode 100644 docs/models/getdatasetimporttasksv1observabilitydatasetsdatasetidtasksgetrequest.md create mode 100644 docs/models/getdatasetimporttaskv1observabilitydatasetsdatasetidtaskstaskidgetrequest.md create mode 100644 docs/models/getdatasetrecordsv1observabilitydatasetsdatasetidrecordsgetrequest.md create mode 100644 docs/models/getdatasetrecordv1observabilitydatasetrecordsdatasetrecordidgetrequest.md create mode 100644 docs/models/getdatasetsv1observabilitydatasetsgetrequest.md create mode 100644 docs/models/getjudgebyidv1observabilityjudgesjudgeidgetrequest.md create mode 100644 docs/models/getjudgesv1observabilityjudgesgetrequest.md create mode 100644 docs/models/getsimilarchatcompletioneventsv1observabilitychatcompletioneventseventidsimilareventsgetrequest.md create mode 100644 docs/models/guardrail.md create mode 100644 docs/models/guardrailconfig.md create mode 100644 docs/models/imagecontent.md create mode 100644 docs/models/importdatasetfromcampaignrequest.md create mode 100644 docs/models/importdatasetfromdatasetrequest.md create mode 100644 docs/models/importdatasetfromexplorerrequest.md create mode 100644 docs/models/importdatasetfromfilerequest.md create mode 100644 docs/models/importdatasetfromplaygroundrequest.md create mode 100644 docs/models/judge.md create mode 100644 docs/models/judgechatcompletioneventrequest.md create mode 100644 docs/models/judgechatcompletioneventv1observabilitychatcompletioneventseventidlivejudgingpostrequest.md create mode 100644 docs/models/judgeclassificationoutput.md create mode 100644 docs/models/judgeclassificationoutputoption.md create mode 100644 docs/models/judgedatasetrecordrequest.md create mode 100644 docs/models/judgedatasetrecordv1observabilitydatasetrecordsdatasetrecordidlivejudgingpostrequest.md create mode 100644 docs/models/judgeoutput.md create mode 100644 docs/models/judgeoutputtype.md create mode 100644 docs/models/judgeoutputunion.md create mode 100644 docs/models/judgeregressionoutput.md create mode 100644 docs/models/listcampaignselectedeventsresponse.md create mode 100644 docs/models/listcampaignsresponse.md create mode 100644 docs/models/listchatcompletionfieldsresponse.md create mode 100644 docs/models/listdatasetimporttasksresponse.md create mode 100644 docs/models/listdatasetrecordsresponse.md create mode 100644 docs/models/listdatasetsresponse.md create mode 100644 docs/models/listjudgesresponse.md create mode 100644 docs/models/listmodelsv1modelsgetrequest.md create mode 100644 docs/models/listsharingresponse.md create mode 100644 docs/models/mcpservericon.md create mode 100644 docs/models/messageresponse.md create mode 100644 docs/models/moderationllmv1action.md create mode 100644 docs/models/moderationllmv1categorythresholds.md create mode 100644 docs/models/moderationllmv1config.md create mode 100644 docs/models/observabilityerrorcode.md create mode 100644 docs/models/observabilityerrordetail.md create mode 100644 docs/models/op.md create mode 100644 docs/models/operator.md create mode 100644 docs/models/option.md create mode 100644 docs/models/or_.md create mode 100644 docs/models/paginatedconnectors.md create mode 100644 docs/models/paginatedresultcampaignpreview.md create mode 100644 docs/models/paginatedresultchatcompletioneventpreview.md create mode 100644 docs/models/paginatedresultdatasetimporttask.md create mode 100644 docs/models/paginatedresultdatasetpreview.md create mode 100644 docs/models/paginatedresultdatasetrecord.md create mode 100644 docs/models/paginatedresultjudgepreview.md create mode 100644 docs/models/paginationresponse.md create mode 100644 docs/models/postdatasetrecordsfromcampaignv1observabilitydatasetsdatasetidimportsfromcampaignpostrequest.md create mode 100644 docs/models/postdatasetrecordsfromdatasetv1observabilitydatasetsdatasetidimportsfromdatasetpostrequest.md create mode 100644 docs/models/postdatasetrecordsfromexplorerv1observabilitydatasetsdatasetidimportsfromexplorerpostrequest.md create mode 100644 docs/models/postdatasetrecordsfromfilev1observabilitydatasetsdatasetidimportsfromfilepostrequest.md create mode 100644 docs/models/postdatasetrecordsfromplaygroundv1observabilitydatasetsdatasetidimportsfromplaygroundpostrequest.md rename docs/models/{processingstatusout.md => processingstatus.md} (98%) create mode 100644 docs/models/resource.md create mode 100644 docs/models/resourcelink.md create mode 100644 docs/models/resourcevisibility.md create mode 100644 docs/models/searchchatcompletioneventidsrequest.md create mode 100644 docs/models/searchchatcompletioneventidsresponse.md rename docs/models/{listsharingout.md => searchchatcompletioneventsrequest.md} (58%) create mode 100644 docs/models/searchchatcompletioneventsresponse.md rename docs/models/{sharingout.md => sharing.md} (98%) rename docs/models/{sharingin.md => sharingrequest.md} (99%) create mode 100644 docs/models/supportedoperator.md create mode 100644 docs/models/textcontent.md create mode 100644 docs/models/textresourcecontents.md delete mode 100644 docs/models/thinkchunkthinking.md rename docs/models/{conversationthinkchunkthinking.md => thinking.md} (66%) create mode 100644 docs/models/typeenum.md create mode 100644 docs/models/updateconnectorrequest.md create mode 100644 docs/models/updatedatasetrecordpayloadrequest.md create mode 100644 docs/models/updatedatasetrecordpayloadv1observabilitydatasetrecordsdatasetrecordidpayloadputrequest.md create mode 100644 docs/models/updatedatasetrecordpropertiesrequest.md create mode 100644 docs/models/updatedatasetrecordpropertiesv1observabilitydatasetrecordsdatasetrecordidpropertiesputrequest.md create mode 100644 docs/models/updatedatasetrequest.md create mode 100644 docs/models/updatedatasetv1observabilitydatasetsdatasetidpatchrequest.md create mode 100644 docs/models/updatejudgerequest.md create mode 100644 docs/models/updatejudgerequestoutput.md create mode 100644 docs/models/updatejudgev1observabilityjudgesjudgeidputrequest.md create mode 100644 docs/sdks/campaigns/README.md create mode 100644 docs/sdks/chatcompletionevents/README.md create mode 100644 docs/sdks/connectors/README.md create mode 100644 docs/sdks/datasets/README.md create mode 100644 docs/sdks/fields/README.md create mode 100644 docs/sdks/judges/README.md create mode 100644 docs/sdks/records/README.md create mode 100644 src/mistralai/client/campaigns.py create mode 100644 src/mistralai/client/chat_completion_events.py create mode 100644 src/mistralai/client/connectors.py create mode 100644 src/mistralai/client/datasets.py create mode 100644 src/mistralai/client/errors/observabilityerror.py create mode 100644 src/mistralai/client/fields.py create mode 100644 src/mistralai/client/judges.py create mode 100644 src/mistralai/client/models/annotations.py create mode 100644 src/mistralai/client/models/audiocontent.py create mode 100644 src/mistralai/client/models/audiourl.py create mode 100644 src/mistralai/client/models/audiourlchunk.py create mode 100644 src/mistralai/client/models/authdata.py create mode 100644 src/mistralai/client/models/basefielddefinition.py create mode 100644 src/mistralai/client/models/basetaskstatus.py create mode 100644 src/mistralai/client/models/blobresourcecontents.py create mode 100644 src/mistralai/client/models/campaign.py create mode 100644 src/mistralai/client/models/chatcompletionevent.py create mode 100644 src/mistralai/client/models/chatcompletioneventpreview.py create mode 100644 src/mistralai/client/models/chattranscriptionevent.py create mode 100644 src/mistralai/client/models/connector.py create mode 100644 src/mistralai/client/models/connector_call_tool_v1op.py create mode 100644 src/mistralai/client/models/connector_delete_v1op.py create mode 100644 src/mistralai/client/models/connector_get_v1op.py create mode 100644 src/mistralai/client/models/connector_list_v1op.py create mode 100644 src/mistralai/client/models/connector_update_v1op.py create mode 100644 src/mistralai/client/models/connectorcalltoolrequest.py create mode 100644 src/mistralai/client/models/connectorsqueryfilters.py create mode 100644 src/mistralai/client/models/connectortool.py create mode 100644 src/mistralai/client/models/connectortoolcallmetadata.py create mode 100644 src/mistralai/client/models/connectortoolcallresponse.py create mode 100644 src/mistralai/client/models/connectortoollocale.py create mode 100644 src/mistralai/client/models/connectortoolresultmetadata.py create mode 100644 src/mistralai/client/models/conversationpayload.py create mode 100644 src/mistralai/client/models/conversationsource.py delete mode 100644 src/mistralai/client/models/conversationthinkchunk.py create mode 100644 src/mistralai/client/models/create_dataset_record_v1_observability_datasets_dataset_id_records_postop.py create mode 100644 src/mistralai/client/models/createcampaignrequest.py create mode 100644 src/mistralai/client/models/createconnectorrequest.py create mode 100644 src/mistralai/client/models/createdatasetrecordrequest.py create mode 100644 src/mistralai/client/models/createdatasetrequest.py rename src/mistralai/client/models/{githubrepositoryin.py => creategithubrepositoryrequest.py} (90%) create mode 100644 src/mistralai/client/models/createjudgerequest.py create mode 100644 src/mistralai/client/models/dataset.py create mode 100644 src/mistralai/client/models/datasetimporttask.py create mode 100644 src/mistralai/client/models/datasetpreview.py create mode 100644 src/mistralai/client/models/datasetrecord.py create mode 100644 src/mistralai/client/models/delete_campaign_v1_observability_campaigns_campaign_id_deleteop.py create mode 100644 src/mistralai/client/models/delete_dataset_record_v1_observability_dataset_records_dataset_record_id_deleteop.py create mode 100644 src/mistralai/client/models/delete_dataset_v1_observability_datasets_dataset_id_deleteop.py create mode 100644 src/mistralai/client/models/delete_judge_v1_observability_judges_judge_id_deleteop.py create mode 100644 src/mistralai/client/models/deletedatasetrecordsrequest.py rename src/mistralai/client/models/{deletemodelout.py => deletemodelresponse.py} (90%) create mode 100644 src/mistralai/client/models/embeddedresource.py create mode 100644 src/mistralai/client/models/executionconfig.py create mode 100644 src/mistralai/client/models/export_dataset_to_jsonl_v1_observability_datasets_dataset_id_exports_to_jsonl_getop.py create mode 100644 src/mistralai/client/models/exportdatasetresponse.py create mode 100644 src/mistralai/client/models/feedresultchatcompletioneventpreview.py create mode 100644 src/mistralai/client/models/fetchcampaignstatusresponse.py create mode 100644 src/mistralai/client/models/fetchchatcompletionfieldoptionsresponse.py create mode 100644 src/mistralai/client/models/fetchfieldoptioncountsrequest.py create mode 100644 src/mistralai/client/models/fetchfieldoptioncountsresponse.py create mode 100644 src/mistralai/client/models/fieldgroup.py create mode 100644 src/mistralai/client/models/fieldoptioncountitem.py create mode 100644 src/mistralai/client/models/filevisibility.py create mode 100644 src/mistralai/client/models/filtercondition.py create mode 100644 src/mistralai/client/models/filtergroup.py create mode 100644 src/mistralai/client/models/filterpayload.py create mode 100644 src/mistralai/client/models/get_campaign_by_id_v1_observability_campaigns_campaign_id_getop.py create mode 100644 src/mistralai/client/models/get_campaign_selected_events_v1_observability_campaigns_campaign_id_selected_events_getop.py create mode 100644 src/mistralai/client/models/get_campaign_status_by_id_v1_observability_campaigns_campaign_id_status_getop.py create mode 100644 src/mistralai/client/models/get_campaigns_v1_observability_campaigns_getop.py create mode 100644 src/mistralai/client/models/get_chat_completion_event_v1_observability_chat_completion_events_event_id_getop.py create mode 100644 src/mistralai/client/models/get_chat_completion_events_v1_observability_chat_completion_events_search_postop.py create mode 100644 src/mistralai/client/models/get_chat_completion_field_options_counts_v1_observability_chat_completion_fields_field_name_options_counts_postop.py create mode 100644 src/mistralai/client/models/get_chat_completion_field_options_v1_observability_chat_completion_fields_field_name_options_getop.py create mode 100644 src/mistralai/client/models/get_dataset_by_id_v1_observability_datasets_dataset_id_getop.py create mode 100644 src/mistralai/client/models/get_dataset_import_task_v1_observability_datasets_dataset_id_tasks_task_id_getop.py create mode 100644 src/mistralai/client/models/get_dataset_import_tasks_v1_observability_datasets_dataset_id_tasks_getop.py create mode 100644 src/mistralai/client/models/get_dataset_record_v1_observability_dataset_records_dataset_record_id_getop.py create mode 100644 src/mistralai/client/models/get_dataset_records_v1_observability_datasets_dataset_id_records_getop.py create mode 100644 src/mistralai/client/models/get_datasets_v1_observability_datasets_getop.py create mode 100644 src/mistralai/client/models/get_judge_by_id_v1_observability_judges_judge_id_getop.py create mode 100644 src/mistralai/client/models/get_judges_v1_observability_judges_getop.py create mode 100644 src/mistralai/client/models/get_similar_chat_completion_events_v1_observability_chat_completion_events_event_id_similar_events_getop.py create mode 100644 src/mistralai/client/models/guardrailconfig.py create mode 100644 src/mistralai/client/models/imagecontent.py create mode 100644 src/mistralai/client/models/importdatasetfromcampaignrequest.py create mode 100644 src/mistralai/client/models/importdatasetfromdatasetrequest.py create mode 100644 src/mistralai/client/models/importdatasetfromexplorerrequest.py create mode 100644 src/mistralai/client/models/importdatasetfromfilerequest.py create mode 100644 src/mistralai/client/models/importdatasetfromplaygroundrequest.py create mode 100644 src/mistralai/client/models/judge.py create mode 100644 src/mistralai/client/models/judge_chat_completion_event_v1_observability_chat_completion_events_event_id_live_judging_postop.py create mode 100644 src/mistralai/client/models/judge_dataset_record_v1_observability_dataset_records_dataset_record_id_live_judging_postop.py create mode 100644 src/mistralai/client/models/judgechatcompletioneventrequest.py create mode 100644 src/mistralai/client/models/judgeclassificationoutput.py create mode 100644 src/mistralai/client/models/judgeclassificationoutputoption.py create mode 100644 src/mistralai/client/models/judgedatasetrecordrequest.py create mode 100644 src/mistralai/client/models/judgeoutput.py create mode 100644 src/mistralai/client/models/judgeoutputtype.py create mode 100644 src/mistralai/client/models/judgeregressionoutput.py create mode 100644 src/mistralai/client/models/list_models_v1_models_getop.py create mode 100644 src/mistralai/client/models/listcampaignselectedeventsresponse.py create mode 100644 src/mistralai/client/models/listcampaignsresponse.py create mode 100644 src/mistralai/client/models/listchatcompletionfieldsresponse.py create mode 100644 src/mistralai/client/models/listdatasetimporttasksresponse.py create mode 100644 src/mistralai/client/models/listdatasetrecordsresponse.py create mode 100644 src/mistralai/client/models/listdatasetsresponse.py create mode 100644 src/mistralai/client/models/listjudgesresponse.py delete mode 100644 src/mistralai/client/models/listsharingout.py create mode 100644 src/mistralai/client/models/listsharingresponse.py create mode 100644 src/mistralai/client/models/mcpservericon.py create mode 100644 src/mistralai/client/models/messageresponse.py create mode 100644 src/mistralai/client/models/moderationllmv1action.py create mode 100644 src/mistralai/client/models/moderationllmv1categorythresholds.py create mode 100644 src/mistralai/client/models/moderationllmv1config.py create mode 100644 src/mistralai/client/models/observabilityerrorcode.py create mode 100644 src/mistralai/client/models/observabilityerrordetail.py create mode 100644 src/mistralai/client/models/paginatedconnectors.py create mode 100644 src/mistralai/client/models/paginatedresultcampaignpreview.py create mode 100644 src/mistralai/client/models/paginatedresultchatcompletioneventpreview.py create mode 100644 src/mistralai/client/models/paginatedresultdatasetimporttask.py create mode 100644 src/mistralai/client/models/paginatedresultdatasetpreview.py create mode 100644 src/mistralai/client/models/paginatedresultdatasetrecord.py create mode 100644 src/mistralai/client/models/paginatedresultjudgepreview.py create mode 100644 src/mistralai/client/models/paginationresponse.py create mode 100644 src/mistralai/client/models/post_dataset_records_from_campaign_v1_observability_datasets_dataset_id_imports_from_campaign_postop.py create mode 100644 src/mistralai/client/models/post_dataset_records_from_dataset_v1_observability_datasets_dataset_id_imports_from_dataset_postop.py create mode 100644 src/mistralai/client/models/post_dataset_records_from_explorer_v1_observability_datasets_dataset_id_imports_from_explorer_postop.py create mode 100644 src/mistralai/client/models/post_dataset_records_from_file_v1_observability_datasets_dataset_id_imports_from_file_postop.py create mode 100644 src/mistralai/client/models/post_dataset_records_from_playground_v1_observability_datasets_dataset_id_imports_from_playground_postop.py rename src/mistralai/client/models/{processingstatusout.py => processingstatus.py} (77%) create mode 100644 src/mistralai/client/models/resourcelink.py create mode 100644 src/mistralai/client/models/resourcevisibility.py create mode 100644 src/mistralai/client/models/searchchatcompletioneventidsrequest.py create mode 100644 src/mistralai/client/models/searchchatcompletioneventidsresponse.py create mode 100644 src/mistralai/client/models/searchchatcompletioneventsrequest.py create mode 100644 src/mistralai/client/models/searchchatcompletioneventsresponse.py rename src/mistralai/client/models/{sharingout.py => sharing.py} (93%) rename src/mistralai/client/models/{sharingin.py => sharingrequest.py} (94%) create mode 100644 src/mistralai/client/models/textcontent.py create mode 100644 src/mistralai/client/models/textresourcecontents.py create mode 100644 src/mistralai/client/models/update_dataset_record_payload_v1_observability_dataset_records_dataset_record_id_payload_putop.py create mode 100644 src/mistralai/client/models/update_dataset_record_properties_v1_observability_dataset_records_dataset_record_id_properties_putop.py create mode 100644 src/mistralai/client/models/update_dataset_v1_observability_datasets_dataset_id_patchop.py create mode 100644 src/mistralai/client/models/update_judge_v1_observability_judges_judge_id_putop.py create mode 100644 src/mistralai/client/models/updateconnectorrequest.py create mode 100644 src/mistralai/client/models/updatedatasetrecordpayloadrequest.py create mode 100644 src/mistralai/client/models/updatedatasetrecordpropertiesrequest.py create mode 100644 src/mistralai/client/models/updatedatasetrequest.py create mode 100644 src/mistralai/client/models/updatejudgerequest.py create mode 100644 src/mistralai/client/observability.py create mode 100644 src/mistralai/client/records.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 8e89c12b..dd18eda6 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,19 +1,19 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 9ea68a20ee2ef4565df16947f204034b + docChecksum: ae098de142fd81aa02c9a5a26a6591b8 docVersion: 1.0.0 speakeasyVersion: 1.729.0 generationVersion: 2.841.0 - releaseVersion: 2.0.0rc1 - configChecksum: ba30d47e402a93dc30b5001c33116a3d + releaseVersion: 2.0.0 + configChecksum: 60e6aae39454140823667a495d507fca repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true persistentEdits: - generation_id: 92ab8a00-49e7-471b-bca6-d18f761863df - pristine_commit_hash: 5eb9662433e80c22603fb3a3bf921f6b285fa2d4 - pristine_tree_hash: 9e781b9b07960a689815c5fa6008765ae4a60716 + generation_id: 1fa80ee6-4130-423f-9d6e-8b865b18cc7f + pristine_commit_hash: f2c5b1b11c5e7496fda888469f22e3dd22af1603 + pristine_tree_hash: 4bf96963cbd890c55a9d591243f7aea7311db58b features: python: additionalDependencies: 1.0.0 @@ -26,6 +26,7 @@ features: downloadStreams: 1.0.1 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 + errors: 3.3.5 examples: 3.0.2 flatRequests: 1.0.1 flattening: 3.1.1 @@ -59,16 +60,20 @@ trackedFiles: deleted: true USAGE.md: id: 3aed33ce6e6f - last_write_checksum: sha1:50cc0351d6145a805d1d5ae8be4dfce58178e648 - pristine_git_object: f71bbabc223b8cef8d923816fce8d572f3901884 + last_write_checksum: sha1:d172deb3ee1630f16b279de22aec1f8f68d7565f + pristine_git_object: 9d32240a60c8885479c95388bb81c8024b5604ff docs/errors/httpvalidationerror.md: id: 7fe2e5327e07 last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc + docs/errors/observabilityerror.md: + id: 750ead4ce7ee + last_write_checksum: sha1:a3fa64f01329ca02fdd8d7a6470c3d2f051985e5 + pristine_git_object: 615552c32013c702b3e7476b1691952b0a4a65df docs/models/agent.md: id: ffdbb4c53c87 - last_write_checksum: sha1:c87b05a17785cd83fdfc58cb2d55b6d77d3bc23e - pristine_git_object: 4de5a901d120b85ba5940490a2ec3fd4f1a91136 + last_write_checksum: sha1:a4438b1b0db1d00df3aacfc432acc523a3ca57da + pristine_git_object: 94e8b035a5a9f61d3a0e345daf977a83e352ba07 docs/models/agentaliasresponse.md: id: 5ac4721d8947 last_write_checksum: sha1:15dcc6820e89d2c6bb799e331463419ce29ec167 @@ -213,6 +218,18 @@ trackedFiles: id: 513b8b7bc0b7 last_write_checksum: sha1:9154d0ac6b0ab8970a10a8ad7716009d62e80ce7 pristine_git_object: 022f7e10edb22cb1b1d741c13ac586bd136d03b5 + docs/models/and_.md: + id: a4692aa4d8e7 + last_write_checksum: sha1:86773c7178b6c04a7adc487d860c39a51116b0cb + pristine_git_object: 591b7bb362df8c1544b51c935c78be2111c0e18a + docs/models/annotations.md: + id: f3368d2e0511 + last_write_checksum: sha1:2ffa9e219b9cf7b28e8591ddbc7c27d92d07022e + pristine_git_object: 2a23157c270b9f7a5fa4872aae5b541a3fa25051 + docs/models/answer.md: + id: 9ac735faad2d + last_write_checksum: sha1:4364632d9aad9c1a5ff2dfb7381d2b0db01efbb9 + pristine_git_object: 324a8e4e8656b3eef77355cc24c70ac0a2e1aa9c docs/models/apiendpoint.md: id: be613fd9b947 last_write_checksum: sha1:4d984c11248f7da42c949164e69b53995d5942c4 @@ -237,10 +254,18 @@ trackedFiles: id: ececf40457de last_write_checksum: sha1:9f23adf16a682cc43346d157f7e971c596b416ef pristine_git_object: 147708d9238e40e1cdb222beee15fbe8c1603050 + docs/models/audience.md: + id: 6a718c512c7f + last_write_checksum: sha1:9d46fde31c3f8f43af7288359d6de60bfa8dfe6c + pristine_git_object: ac325076feae9be8b54492d4bd85cb5c52828bc0 docs/models/audiochunk.md: id: 88315a758fd4 last_write_checksum: sha1:b47b295122cea28d66212d75a1f0eccd70a248cc pristine_git_object: 1ba8b0f578fa94b4f8dddf559798e033a1704e7b + docs/models/audiocontent.md: + id: f0b2c5004edf + last_write_checksum: sha1:ce23944b8c8556d20c1b3a9b7a7715dea232e124 + pristine_git_object: 64ab6d26b5561e15722cab1eab3134c5a0688a91 docs/models/audioencoding.md: id: 1e0dfee9c2a0 last_write_checksum: sha1:5d47cfaca916d7a47adbea71748595b3ab69a478 @@ -257,10 +282,34 @@ trackedFiles: id: 79b5f721b753 last_write_checksum: sha1:df6825c05b5a02dcf904ebaa40fb97e9186248cc pristine_git_object: 5d64964d1a635da912f2553c306fb8654ebfca2e + docs/models/audiourl.md: + id: 5cf317caac89 + last_write_checksum: sha1:2ab877ab65fd947d3d3833266d64167a78f83350 + pristine_git_object: 45ad74938a74eeed51440b4b62db941cac19e095 + docs/models/audiourlchunk.md: + id: dc995cbb4163 + last_write_checksum: sha1:f85ceef0dd3cf36caf7889eaf645c5fc6a329baf + pristine_git_object: 56af9df98b9d948575f733b88bfaf6c9202832b3 + docs/models/audiourlunion.md: + id: fe1a2fe0351a + last_write_checksum: sha1:9d271e3711f30e7715720e1d68b7b5d2a6a74cc1 + pristine_git_object: 9666906674288fa3e251549d6edb355eabbce1d9 + docs/models/authdata.md: + id: e510ac107a56 + last_write_checksum: sha1:1cfbc35a6112a6eca7b4199c37959a0fef390ca8 + pristine_git_object: d0784e66112b8f79036d1acff2a7fc242058e4a0 + docs/models/basefielddefinition.md: + id: f9e0bbae859b + last_write_checksum: sha1:28a4d2774231873af97debedcf2fba4f49e83bf2 + pristine_git_object: 3f7abea981c142e1fb1ba993a10dbd0e347b2df2 docs/models/basemodelcard.md: id: 2f62bfbd650e last_write_checksum: sha1:4b29e0d24060b6724e82aeee05befe1cddb316f4 pristine_git_object: 0f42504fd6446c0baf4686bfbb8481658b6789cd + docs/models/basetaskstatus.md: + id: b709621d5c71 + last_write_checksum: sha1:3f78b970cb0c8ba22f100af3b8b6e1730a4f86f7 + pristine_git_object: 8fad1e10f1b631d75303dbd8a977f37cacd26a33 docs/models/batcherror.md: id: 8053e29a3f26 last_write_checksum: sha1:23a12dc2e95f92a7a3691bd65a1b05012c669f0f @@ -277,10 +326,18 @@ trackedFiles: id: b113ca846594 last_write_checksum: sha1:f9dc702c27b8257e008390519df744290e09c4b4 pristine_git_object: 6ee3b394a8b1125769a355359b5a44bc7c3224ea + docs/models/blobresourcecontents.md: + id: 19b9f897373e + last_write_checksum: sha1:0a40334024da0f41ccab87e10add590ea87a8b01 + pristine_git_object: c862e53730393d65a061ed63a96640859fe74adc docs/models/builtinconnectors.md: id: 9d14e972f08a last_write_checksum: sha1:1f32eb515e32c58685d0bdc15de09656194c508c pristine_git_object: f96f50444aaa23ca291db2fd0dc69db0d9d149d9 + docs/models/campaign.md: + id: cc1272bc909c + last_write_checksum: sha1:291f6152431f3f14c16df9005a4392907dbf03e2 + pristine_git_object: 1f2a7a365ac4a3811ccf9760e56eabbf9106638d docs/models/chatclassificationrequest.md: id: 57b86771c870 last_write_checksum: sha1:bfd2fb8e2c83578ca0cea5209ea3f18c3bcd2ae5 @@ -293,6 +350,22 @@ trackedFiles: id: 225764da91d3 last_write_checksum: sha1:b894d3408cb801e072c3c302a5676ff939d59284 pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b + docs/models/chatcompletionevent.md: + id: fc1097c5bfe2 + last_write_checksum: sha1:ef1f417581e943503acb38b503aedde3e157d934 + pristine_git_object: 500192f661b0657f594f85da4d38896954426db5 + docs/models/chatcompletioneventextrafields.md: + id: 686e5af33206 + last_write_checksum: sha1:1f9951574bf3f554beb8b1273af5fa7cf3c6381e + pristine_git_object: c2b7f855e76a35a3f61d7f11c7488245d8d99707 + docs/models/chatcompletioneventpreview.md: + id: 89dcfcc3bd32 + last_write_checksum: sha1:ea5a4776fd299a1c8208392e54061615ddb19ad2 + pristine_git_object: 855e8ab0ccf2851aad23067cc6386211bd1e80f0 + docs/models/chatcompletioneventpreviewextrafields.md: + id: 6562107fac56 + last_write_checksum: sha1:5bd3841a27c41dcc345a216e36b2bd1f90596d24 + pristine_git_object: dd2138278eb73abffc15fdc2583fe61c6c1f17d1 docs/models/chatcompletionrequest.md: id: adffe90369d0 last_write_checksum: sha1:4980b698006c641b1c84495c5b601cc8662b05f6 @@ -345,6 +418,10 @@ trackedFiles: id: aec173bca43b last_write_checksum: sha1:14ce49ace5845bc467fe1559b12374bfd36bc9a7 pristine_git_object: ff1c6ea32233d5c5e8d6292c62f9e8eacd3340c3 + docs/models/chattranscriptionevent.md: + id: 3932410b4664 + last_write_checksum: sha1:db91f483894b9dadc0baa66358c3b81874fea59c + pristine_git_object: c2a38ed5a167c698a22b332bca4bf3dc3b69f204 docs/models/checkpoint.md: id: 9c97119961cf last_write_checksum: sha1:0e7732d9c30f67d59fe4d9ad1d165ad0cd80c790 @@ -477,10 +554,70 @@ trackedFiles: id: 19b9e48a3c2e last_write_checksum: sha1:eb6494cb19f23c6df62afb009cc88ce38d24af86 pristine_git_object: fd6e6aaa58cabba0cdec1b76ac50fb6e46f91b07 + docs/models/connector.md: + id: cbf7c2c53983 + last_write_checksum: sha1:477e81b9e908f8c40e22cffcf4e7cd6ae38fe34e + pristine_git_object: a5d6073ea8da1284bea490d74fec8e6224fc3910 + docs/models/connectorcalltoolrequest.md: + id: d0f10cee6826 + last_write_checksum: sha1:81fa16df68355ba258c66189a2c9b6d0cdb2dfb8 + pristine_git_object: 9ef7a35154327673593f323a142fe63ec706e799 + docs/models/connectorcalltoolv1request.md: + id: a7a294c4280c + last_write_checksum: sha1:296a9c31a7b9dc7621b871000f17f6ced3d3a288 + pristine_git_object: cdda08cf9a0c30e7b2e8e1017cd633472ea398c7 + docs/models/connectordeletev1request.md: + id: 8d5621ba6395 + last_write_checksum: sha1:881b88b2e0788f7c16938115a6cada5f0ebe144b + pristine_git_object: e50c7296f28bbf48daf724af3c848a50e8424ad7 + docs/models/connectorgetv1request.md: + id: 844c1f489684 + last_write_checksum: sha1:e36ac8ff28ee258340868f8aa4f3172ed3b82ef3 + pristine_git_object: c45148b97aad128744e0e6ccebb00bf22d400eff + docs/models/connectorlistv1request.md: + id: 68b7a11faff6 + last_write_checksum: sha1:e08effc0f17a95383c3ba96b06b1dba80f4dc767 + pristine_git_object: 6b9a287ead2996c402ade5c18368eaff92c76b2a + docs/models/connectorsqueryfilters.md: + id: d6fb981cad1e + last_write_checksum: sha1:3a33088e1e9332318aff437fba7d01239d417d84 + pristine_git_object: aea47e14f02b61dbbd726bc11f6197b6854bb358 + docs/models/connectortool.md: + id: 1c640bab9aeb + last_write_checksum: sha1:ab4ad137330bda6699043da2337aea477c33241e + pristine_git_object: af5cc03b98b973030b6e037165ce725d25293284 + docs/models/connectortoolcallmetadata.md: + id: 9ce26ae7e772 + last_write_checksum: sha1:e43e147bd2729139d6186ade29b851570797c9c0 + pristine_git_object: 4d44a2d0a8d1d4ffa8f66075391e514623c55278 + docs/models/connectortoolcallresponse.md: + id: 7d00da4bf949 + last_write_checksum: sha1:3bbbeffaba03f8eaf98725c7cd8e1d20ff1f3de6 + pristine_git_object: 1c51b9ac1fed0e1993f9d794fe07784d898ccf27 + docs/models/connectortoolcallresponsecontent.md: + id: 0bfb1af7b63a + last_write_checksum: sha1:d1d7453c72a0eb79ccb8e2b36e0963067ce13487 + pristine_git_object: d7f93edac88369164136b3eeb3a707adda9489bd + docs/models/connectortoollocale.md: + id: 44c1e21a5647 + last_write_checksum: sha1:7901679d8cc601169927e1b7e5808456149145de + pristine_git_object: b882c419a94971d7b8c8041ffca4a6b640b06751 + docs/models/connectortoolresultmetadata.md: + id: 18ff112e8af2 + last_write_checksum: sha1:f4d7703c0a1ce40ebd8434e96f1758a5c0b24b45 + pristine_git_object: 77a327dc7782d538c467095c9e15fdcf575334ad + docs/models/connectorupdatev1request.md: + id: fdbeb5fee83e + last_write_checksum: sha1:9082d8f0cfe9f950d129971b152d37f936f7aaea + pristine_git_object: db9cc9b4c4379865ad15292c5385dce864b5935b docs/models/contentchunk.md: id: d2d3a32080cd - last_write_checksum: sha1:b253e4b802adb5b66d896bfc6245ac4d21a0c67c - pristine_git_object: cb7e51d3a6e05f197fceff4a4999594f3e340dac + last_write_checksum: sha1:26f4ec3431c023af6ef3da732f753b66a52bdeac + pristine_git_object: 5a8ef58c968cd6d4e29a8c6c02fbc5e8bacd6839 + docs/models/context.md: + id: e8f2f9e54b8e + last_write_checksum: sha1:4c14b386f9f89d98f4dd9487ab030f10478c0e63 + pristine_git_object: 107f1bd428b1f39b36f75e5a10532bc462967ce5 docs/models/conversationappendrequest.md: id: 722746e5065c last_write_checksum: sha1:c8a4a49f0a1fe5cdd2ef6264ef9c600cfc8f7beb @@ -517,10 +654,14 @@ trackedFiles: id: 46684ffdf874 last_write_checksum: sha1:5b10a9f3f19591a2675979c21dd8383d5249d728 pristine_git_object: 8fa51571697ee375bfbc708de854bc0b1129eec7 + docs/models/conversationpayload.md: + id: df641edf7b7e + last_write_checksum: sha1:ffb2c51b5e7d06dcffd41cede9fd24b6aee34518 + pristine_git_object: 481f18e67f70226c928522726c73b3e07b2a7c4c docs/models/conversationrequest.md: id: dd7f4d6807f2 - last_write_checksum: sha1:e4da423f9eb7a8a5d0c21948b50e8df08a63552c - pristine_git_object: bd7823a88a07d4bc8fe1da82e51f843e70480ee1 + last_write_checksum: sha1:093de161c340171af6f89228f7e248668e09b3a1 + pristine_git_object: 9aabc52a331b638ce4633834328fe50ffb8d09eb docs/models/conversationrequestagentversion.md: id: 68aad87b1459 last_write_checksum: sha1:fd2e9cd7ed2499b5843c592505ec5e0596a50b33 @@ -535,12 +676,16 @@ trackedFiles: pristine_git_object: 2e4e8d01b5482c4e0644be52e55bf6912aeff69e docs/models/conversationresponse.md: id: 2eccf42d48af - last_write_checksum: sha1:8a86a4d0df6d13b121d5e41a8ee45555b69bf927 - pristine_git_object: 2732f785cdd706274ec5ff383f25fc201e6d0f78 + last_write_checksum: sha1:55493c0b639cb8bda3b5f3bca57a7eec6060b148 + pristine_git_object: 3f0340ba8f382754589a29758d06e7822446488f + docs/models/conversationresponseoutput.md: + id: b4663dc32fc3 + last_write_checksum: sha1:5f188564b4b1fd9460a5155bc433c9adf7df031a + pristine_git_object: fe8ad1b5f8f3a48b48b9e2d09814af7df0ed3788 docs/models/conversationrestartrequest.md: id: 558e9daa00bd - last_write_checksum: sha1:434e6c94b5d6c37b9026d536308cd1d3ff56e8d6 - pristine_git_object: ad3ff3624f533e4d4f751264d9bc6dd1849b3b69 + last_write_checksum: sha1:40ca7ee3ad13bf14f19816a6f54e44a4088f8317 + pristine_git_object: 08f47bba0663f5d7ea4d9c8d604814de095c1c87 docs/models/conversationrestartrequestagentversion.md: id: e6ea289c6b23 last_write_checksum: sha1:a5abf95a81b7e080bd3cadf65c2db38ca458573f @@ -551,8 +696,8 @@ trackedFiles: pristine_git_object: 5790624b82ce47ea99e5c25c825fbc25145bfb8e docs/models/conversationrestartstreamrequest.md: id: 01b92ab1b56d - last_write_checksum: sha1:e9755598b5be197a938f1f74aa77ac24ccac8457 - pristine_git_object: 865a1e8f666d7f6878c40eb70fe5ab1c63da3066 + last_write_checksum: sha1:4f1ed18522112ed6b2c2f5def38b418d0c04dab3 + pristine_git_object: 3ad739aa8d254dba746590245e8490268c71dc51 docs/models/conversationrestartstreamrequestagentversion.md: id: 395265f34ff6 last_write_checksum: sha1:ebf4e89a478ab40e1f8cd3f9a000e179426bda47 @@ -561,10 +706,14 @@ trackedFiles: id: 3e9c4a9ab94d last_write_checksum: sha1:300e197f11ad5efc654b51198b75049890258eef pristine_git_object: 97266b43444f5ed50eeedf574abd99cb201199fd + docs/models/conversationsource.md: + id: b50fdc875f45 + last_write_checksum: sha1:7a3fd66a8fa02456465f6218559a73bf9ae5413e + pristine_git_object: 12bf7c1fa98223675c6b27f6010d05c9b78e7017 docs/models/conversationstreamrequest.md: id: 833f266c4f96 - last_write_checksum: sha1:5cb58852d393eb6cc504b45d8b238fc2f3eecd2a - pristine_git_object: 8b74f9e7cdea83a5622df2c3b79debe3c4427288 + last_write_checksum: sha1:19602f1f58e594960340559d57733bc11223c494 + pristine_git_object: f792c26369b64508b8aa6b5032cedcd16aa556e9 docs/models/conversationstreamrequestagentversion.md: id: e99ccc842929 last_write_checksum: sha1:0ba5fca217681cdc5e08e0d82db67884bed076a6 @@ -577,22 +726,14 @@ trackedFiles: id: 71df6212ff44 last_write_checksum: sha1:f2882742a74dd2b4f74383efa444c7ab968249dc pristine_git_object: 0f75f82b38f224340bed468ceecfe622066740ba - docs/models/conversationthinkchunk.md: - id: b9a8324da8f1 - last_write_checksum: sha1:80aed188198434ceca134e7aa7351ddba82c92c9 - pristine_git_object: 1fb16bd99f2b6277f87cd40d5c1eca389819d725 - docs/models/conversationthinkchunkthinking.md: - id: 477db2d543bd - last_write_checksum: sha1:d9f8c37fe933a3e52e2adb3ffe283d79c187cd36 - pristine_git_object: 84b800188b248166aac0043994fa27d4d79aad9d docs/models/conversationusageinfo.md: id: 57ef89d3ab83 last_write_checksum: sha1:d92408ad37d7261b0f83588e6216871074a50225 pristine_git_object: 57e260335959c605a0b9b4eaa8bf1f8272f73ae0 docs/models/createagentrequest.md: id: 9484bab389c1 - last_write_checksum: sha1:b3228a622081b6f4b2a8bdaa60ca16049517d819 - pristine_git_object: cca3a079c532d3426f65a15bb0affdd34fd1d3ac + last_write_checksum: sha1:5c24b11438c77dbbdb3a6e5275952889e4da7cb9 + pristine_git_object: e9665545ae14415bb0c61dc0312136dd2930ee13 docs/models/createagentrequesttool.md: id: 72e5f99878c5 last_write_checksum: sha1:a90ad01c15da321f0c8ec700ba359a5371c5dcbb @@ -601,10 +742,30 @@ trackedFiles: id: e79afe8f495c last_write_checksum: sha1:6cedce49f3108b9d5bc80e6d11712c594f2d9e50 pristine_git_object: d094e2d518b31ada68c282241af3aa1483e98ff6 + docs/models/createcampaignrequest.md: + id: fe54d07fe49b + last_write_checksum: sha1:fa1ea12391459548b455fb5ad22c4704415d6e5c + pristine_git_object: 2e81d26d19950ef6c1bc7186b8497e1f739e8f0b + docs/models/createconnectorrequest.md: + id: 7a0ef9d82658 + last_write_checksum: sha1:f536d97ffe5535b0d43f3390d4e90fe0ea74b1e1 + pristine_git_object: 4668e1a61f0b140599e283bf1144b29591df760c + docs/models/createdatasetrecordrequest.md: + id: e2c8a858a8e6 + last_write_checksum: sha1:6848c7398d763f7a046dbb41cda8bc33cbb98230 + pristine_git_object: 3ea0d68db2cc15e0dcdb51ca77a90823557604f2 + docs/models/createdatasetrecordv1observabilitydatasetsdatasetidrecordspostrequest.md: + id: 599b04b8f392 + last_write_checksum: sha1:022456acf96559cefa8cdec2c006e13107241261 + pristine_git_object: 7fe42faac9ff2f46fb8e7c0a62068cd2e8f674eb + docs/models/createdatasetrequest.md: + id: ff55db5edd8c + last_write_checksum: sha1:840cddba3faaee46a0a65a92e3f09b6079ac410d + pristine_git_object: d4c166436a4d3cdafcc5990215b0ee2ea2ff9571 docs/models/createfileresponse.md: id: ea1396cebae8 - last_write_checksum: sha1:7b26d0a466004aca5cefaeb29f84dafc405c51ff - pristine_git_object: 8152922b0d4ce199e269df955e5a25d4acf71e28 + last_write_checksum: sha1:cdda40f3d8165c27ac40a0548b79a9ffafaef4e0 + pristine_git_object: 84be4dc64637998c25005d98ef335576a3db7f00 docs/models/createfinetuningjobrequest.md: id: 36824ba035ff last_write_checksum: sha1:78f019530e9f5deace91c454c91ec6c4d0d23a20 @@ -615,20 +776,68 @@ trackedFiles: pristine_git_object: 0054a4a683a88fe67f92c1659bcb8c792ca8d286 docs/models/createfinetuningjobrequestrepository.md: id: e113eb1929b5 - last_write_checksum: sha1:6bd504d3ecb219f3245a83d306c1792133b96769 - pristine_git_object: 32be1b6dc3fcf7f6ee1a1d71abee4c81493655c2 + last_write_checksum: sha1:41eee36f6a027638e2c2cf087734d4dd91ac3839 + pristine_git_object: 42b6c6cab36dc523926e319cdfbb3937bfdbd3f6 + docs/models/creategithubrepositoryrequest.md: + id: c12cba110d76 + last_write_checksum: sha1:38c125d3aff11137dbab5b6b1c430c892ff897cb + pristine_git_object: 502afa7b708f40698f443dc76e504cc6324916a9 + docs/models/createjudgerequest.md: + id: 13683829ce5d + last_write_checksum: sha1:84122cf178b65c87a11a9c012d8d066441e2401d + pristine_git_object: ccb1119a99d87cc229681d75fde017e8917642a3 + docs/models/createjudgerequestoutput.md: + id: afddf2bb224f + last_write_checksum: sha1:64b6a44de253d40c4e3f0e133b31b4c5a9cd13a5 + pristine_git_object: a72676719b90a7b840a2b21d043c61bdf83ce294 docs/models/createlibraryrequest.md: id: 8935b2ed9d13 last_write_checksum: sha1:c00abfe1abb0f0323e434b084dafa0d451eb3e51 pristine_git_object: 71562806dbec6444dcdd0a19852a31ca00b6229a + docs/models/dataset.md: + id: 31af4d237e19 + last_write_checksum: sha1:073fa9a0b7891ccd9b6ae64ba960d74eeb5c9007 + pristine_git_object: 9d235433850f4024ea920e85567d5c2d6df13fb0 + docs/models/datasetimporttask.md: + id: 32402ad24d69 + last_write_checksum: sha1:8a41338cc24d1a48d4495e1be3124960bad09fef + pristine_git_object: 21e6130fcd7d2e1e4c303b4bc4292491fcd9ce81 + docs/models/datasetpreview.md: + id: 5e080239e3e1 + last_write_checksum: sha1:5914284c85165976de7d53ac06fd30ebdc5a8c15 + pristine_git_object: f729908d470767dd2bde499fcfb198e5587c7d40 + docs/models/datasetrecord.md: + id: 449ed69d3872 + last_write_checksum: sha1:55a649fc627e297da420482599680da87989c297 + pristine_git_object: dbc7c3d0046af0423eebab4d0d982e55a3ed1d38 + docs/models/deletecampaignv1observabilitycampaignscampaigniddeleterequest.md: + id: "853118278484" + last_write_checksum: sha1:b8dc39c31de60ed218a063efd7e8f07fcbb1c63f + pristine_git_object: 4114509d79e30e560f0fe2fb02bb37742bf68acd + docs/models/deletedatasetrecordsrequest.md: + id: eb3d7e70be8c + last_write_checksum: sha1:71a7cbb957cf75206f4e5783bf7b2ff11294d5c2 + pristine_git_object: 1afc46d6a9ff00ff2f002a1671809bbe9a677280 + docs/models/deletedatasetrecordv1observabilitydatasetrecordsdatasetrecordiddeleterequest.md: + id: 6ec026edf3f3 + last_write_checksum: sha1:c23fce1ecb41e7d523b9ad2d9145d72c6ae94ff2 + pristine_git_object: b2425068d8d53047c0f74b316ad456dfdcb5b18c + docs/models/deletedatasetv1observabilitydatasetsdatasetiddeleterequest.md: + id: 1324ff11f62a + last_write_checksum: sha1:b4e444986d7185513653466f232ce378a45ea98e + pristine_git_object: 4557d3c603d6a0b6ae2873e2ac11cca58604849b docs/models/deletefileresponse.md: id: ab3aa44589a0 last_write_checksum: sha1:47ebc2474e4725e9ecb0f0d5940c604d9a82a4df pristine_git_object: 188e2504606b051674352339c6aa999116a43b61 - docs/models/deletemodelout.md: - id: 5643e76768d5 - last_write_checksum: sha1:1593c64f7673e59b7ef1f4ae9f5f6b556dd6a269 - pristine_git_object: 5fd4df7a7013dcd4f6489ad29cdc664714d32efd + docs/models/deletejudgev1observabilityjudgesjudgeiddeleterequest.md: + id: 9031fd7f46f4 + last_write_checksum: sha1:6879788359096a08d1adc32320e63f3657f1a2dd + pristine_git_object: 44042efca294d38b53502e18fc574cf5dc67ab8a + docs/models/deletemodelresponse.md: + id: a5a703ee49a7 + last_write_checksum: sha1:cb8ef720c216ec0f9ae5a8663052e0602d3a2536 + pristine_git_object: d8bd2f32f83840769942c8c27dd9401b34515ea8 docs/models/deletemodelv1modelsmodeliddeleterequest.md: id: c838cee0f093 last_write_checksum: sha1:e5b6d18b4f8ab91630ae34a4f50f01e536e08d99 @@ -665,6 +874,10 @@ trackedFiles: id: 48437d297408 last_write_checksum: sha1:5f9294355929d66834c52c67990ba36a7f81387d pristine_git_object: 9dbfbe5074de81b9fcf6f5bae8a0423fb2c82f71 + docs/models/embeddedresource.md: + id: f7ea3888f520 + last_write_checksum: sha1:8f0869b4403767c45c464cdd7b6c640994c65ab9 + pristine_git_object: 102fca261d4ada514a7df14415383c1da9e59782 docs/models/embeddingdtype.md: id: 22786e732e28 last_write_checksum: sha1:dbd16968cdecf706c890769d8d1557298f41ef71 @@ -701,6 +914,46 @@ trackedFiles: id: 311c22a8574a last_write_checksum: sha1:627793d6aed5e378e3f2eeb4087808eb50e948d5 pristine_git_object: 3eebffca874b8614a5be3d75be3cb7b0e52c2339 + docs/models/executionconfig.md: + id: c96b31c33dcd + last_write_checksum: sha1:971187596dde6a53f9e7f4c26cb0f37d5cbafb40 + pristine_git_object: 1033b7ea323917f0ffadb8cb3854426a1956f904 + docs/models/exportdatasetresponse.md: + id: f4594898de85 + last_write_checksum: sha1:2e68bd840ee44b5b61aac0945d608060bd6506f4 + pristine_git_object: 7187ee5c0121f42e1ebbb490563e7e11afb1f812 + docs/models/exportdatasettojsonlv1observabilitydatasetsdatasetidexportstojsonlgetrequest.md: + id: 16660f92d7d1 + last_write_checksum: sha1:fa22e53a929291e6b057283482ca7871fb6d2062 + pristine_git_object: efe4bbb058c168ad531be27cc135f09464d0da13 + docs/models/feedresultchatcompletioneventpreview.md: + id: 5ed9f0e8db01 + last_write_checksum: sha1:c5950602d174d49f293cb85047d871d360e6af16 + pristine_git_object: 08a6e2f837872ee058b2c73089cf151fa944ff90 + docs/models/fetchcampaignstatusresponse.md: + id: 175907eb768d + last_write_checksum: sha1:537f33701542d6cbab3d9bb9fc661339a9e9c748 + pristine_git_object: 7aac0f2537d4282f9a75ece71e57f2a71ce66af3 + docs/models/fetchchatcompletionfieldoptionsresponse.md: + id: f8cabbbafbbe + last_write_checksum: sha1:5e7731de2cc4fa729599a77809970675697d2823 + pristine_git_object: 86beebc1aedaa03019136eeb8842ec31354f32ac + docs/models/fetchfieldoptioncountsrequest.md: + id: 88e23bf278ee + last_write_checksum: sha1:019472cecf84c50fb2a23ff25598dfb075b4505c + pristine_git_object: b13f6312e98e8a2e59b9c0890880a06cf496ea9d + docs/models/fetchfieldoptioncountsresponse.md: + id: 0de8d989349c + last_write_checksum: sha1:800612d664838b6cc7330f5c1430a4b8a83bf830 + pristine_git_object: dabf6b5ebfe86b50a80f4986efb05fea6256bc6b + docs/models/fieldgroup.md: + id: 792d72891fc5 + last_write_checksum: sha1:599b1c695c4a0d4be6a090f06ded45be41392bc7 + pristine_git_object: 8fff281d48e4ccc4b870adb66a4b63dcac1aa853 + docs/models/fieldoptioncountitem.md: + id: "945553556487" + last_write_checksum: sha1:b18c6c530c025bebd9fa0ecb121cf34c3f2b714d + pristine_git_object: 92bc971fc5a0244482ca6107e3751dd75f923ea7 docs/models/file.md: id: 4ad31355bd1c last_write_checksum: sha1:ade4d3c908c664a07a3c333cc24bc1bfb43ab88b @@ -733,10 +986,34 @@ trackedFiles: id: 2783bfd9c4b9 last_write_checksum: sha1:a1249ef0aedb3056e613078488832c96b91f8cab pristine_git_object: 961bae1f51a4ae9df21b28fd7a5ca91dc7b3888b + docs/models/filesapiroutesuploadfilefilevisibility.md: + id: 7b934c9d9d25 + last_write_checksum: sha1:10c16a937f01a3eaa6b508ef2b8b4c99d968ed9e + pristine_git_object: 055f071b90f9a9d0e744f4fe1b4bead3e4647bf2 docs/models/fileschema.md: id: 9a05a660399d - last_write_checksum: sha1:97987d64285ff3092635754c78ad7b68d863e197 - pristine_git_object: 4f3e72dba17a964155007755ad9d69f0304b2adb + last_write_checksum: sha1:e69c38d738bfba7e1d7cb5938c17fcbc56fecf4c + pristine_git_object: 154548404d257f0b7339e2e3e74d2dcc23a98a4d + docs/models/filevisibility.md: + id: 2cdcd7761007 + last_write_checksum: sha1:61af0b14a6b504e9383cd7d68ad66eb0abc84e5f + pristine_git_object: 4ed11692a8773cb06edbfcd0b4afb14766d6807a + docs/models/filtercondition.md: + id: dd561c647715 + last_write_checksum: sha1:5ead258c0e3905e8a10b16e4e5b9b267e3a12747 + pristine_git_object: ba2eea6046e66d6d8101917120aef5e966aa40bd + docs/models/filtergroup.md: + id: abf06b0bda42 + last_write_checksum: sha1:a1bfdc1c9127d8cd3474ec1fccd60fe5cfaac79f + pristine_git_object: 974c724cdf274a3ba70fb7dc140395e24dd0572c + docs/models/filterpayload.md: + id: 770a71875018 + last_write_checksum: sha1:f8328419c79b46907206e7c810ad192f6dfc9154 + pristine_git_object: 49f273c0b46c968ce7097611001b097d383c52a7 + docs/models/filters.md: + id: 1370bfdd2199 + last_write_checksum: sha1:684f242882f17c17766db11e5a21bae1b5c74dcd + pristine_git_object: 4595b82b40fe37dc093bee73a4ffa9c8ac61d77d docs/models/fimcompletionrequest.md: id: b44677ecc293 last_write_checksum: sha1:24bcb54d39b3fabd487549a27b4c0a65dd5ffe50 @@ -817,26 +1094,102 @@ trackedFiles: id: 5fb499088cdf last_write_checksum: sha1:a9a3b6530b1c48a8575402b48cde7b65efb33a7d pristine_git_object: 0226b7045c9d82186e1111bb2025e96a4de90bd6 + docs/models/getcampaignbyidv1observabilitycampaignscampaignidgetrequest.md: + id: 159acf508d84 + last_write_checksum: sha1:e26a519f087140a799466e9fb530d1d37211dd7e + pristine_git_object: 9e781961e6894d501e781e829291b8b60c780555 + docs/models/getcampaignselectedeventsv1observabilitycampaignscampaignidselectedeventsgetrequest.md: + id: 81929b823585 + last_write_checksum: sha1:b21949b1919bd150dba84dd40ef9162014edc7d5 + pristine_git_object: 18de3f10971bf6a9e9232d357a8217cb64306ec5 + docs/models/getcampaignstatusbyidv1observabilitycampaignscampaignidstatusgetrequest.md: + id: cc2d0a9ea54a + last_write_checksum: sha1:a84735430e0c59c05cc729442a035e8f13b4223c + pristine_git_object: 947c4d64a47021cebd5daafd00e6686e890431d2 + docs/models/getcampaignsv1observabilitycampaignsgetrequest.md: + id: 58b33ee9275c + last_write_checksum: sha1:d2bba9d68540a37ee5f29843d74ba0a54f3d1503 + pristine_git_object: f83c3b64f5d8086e0c718c49d1c82481143810cb + docs/models/getchatcompletioneventsv1observabilitychatcompletioneventssearchpostrequest.md: + id: 20e3ca9fea94 + last_write_checksum: sha1:b845ba4a23ca227905dd67d7c29dec16085093c0 + pristine_git_object: b7b92279a6210655a82d5e4c773c3577a151c22c + docs/models/getchatcompletioneventv1observabilitychatcompletioneventseventidgetrequest.md: + id: 85fe583b6dfe + last_write_checksum: sha1:a186d8c7c77536e9df6bbb851497fbab0de86578 + pristine_git_object: 8fe18c47ee45a040a6118da180ea580ae1edef94 + docs/models/getchatcompletionfieldoptionscountsv1observabilitychatcompletionfieldsfieldnameoptionscountspostrequest.md: + id: d76d58e32375 + last_write_checksum: sha1:b878094387e88d9ec1d5916e3eb373e92bc8d956 + pristine_git_object: 339a1de84030de59425436a670578e3713d5a9d5 + docs/models/getchatcompletionfieldoptionsv1observabilitychatcompletionfieldsfieldnameoptionsgetrequest.md: + id: 38d3a71b425c + last_write_checksum: sha1:f8bd7fc9913588f4883cb6ecad475fcbaaa467f9 + pristine_git_object: 973a1a4bbb1502b566ff2edd221dfa24e309257f + docs/models/getdatasetbyidv1observabilitydatasetsdatasetidgetrequest.md: + id: a5811667ed1e + last_write_checksum: sha1:91f96f1449ad0adf83fccf11fdf270682a3e3cb4 + pristine_git_object: 60f2d162d96fe498dc01d844973236130d5eead1 + docs/models/getdatasetimporttasksv1observabilitydatasetsdatasetidtasksgetrequest.md: + id: d02f57214a55 + last_write_checksum: sha1:0e9ee3c15dba7d6e2081f75174ff6bdac73cbc67 + pristine_git_object: d4226eca158b7d346e49c1982a200c7fb65a35c8 + docs/models/getdatasetimporttaskv1observabilitydatasetsdatasetidtaskstaskidgetrequest.md: + id: 04bc3bb2be8f + last_write_checksum: sha1:6400ca140ed4d79169abdae516fb82423f83f018 + pristine_git_object: 98ffc3c62fa10a89ab5fbd7fad53d11dc489b13d + docs/models/getdatasetrecordsv1observabilitydatasetsdatasetidrecordsgetrequest.md: + id: 544765ca9499 + last_write_checksum: sha1:030efa16a9ec71b6adf325d8a7e083611e345e43 + pristine_git_object: 82453f0c4169758ccb48c465bf95677f712dc257 + docs/models/getdatasetrecordv1observabilitydatasetrecordsdatasetrecordidgetrequest.md: + id: fedf1584cb11 + last_write_checksum: sha1:1f6ad7fd1efccb3bdef3748bec748d23d00e3821 + pristine_git_object: 6b9eb3bc4793595d73416994c3537bfad4d7ab70 + docs/models/getdatasetsv1observabilitydatasetsgetrequest.md: + id: c8c1559b454f + last_write_checksum: sha1:98b580deaae5c4720cf905907db49e966b5e294a + pristine_git_object: 073ab76999d4f1b8b0b6b89feacc5137b156c4e5 docs/models/getfileresponse.md: id: a983b3c8acd6 - last_write_checksum: sha1:5ca732ae5b384937473c04de6736fbab34deca24 - pristine_git_object: 0edd13e0818fc70c9c4db1e08b1490c1e146ea63 + last_write_checksum: sha1:643a3c92ce26f21a915bd485fc4af7817e79f864 + pristine_git_object: 38ad49438c06b48ed8eeecf773d4bed51cdb1c9d + docs/models/getjudgebyidv1observabilityjudgesjudgeidgetrequest.md: + id: 06b097dd2a61 + last_write_checksum: sha1:089d9b646a90f2180dbfc85271804e0857204b34 + pristine_git_object: f9f1a2487032af6a3b9a0f7a4d7d4da61a4b854d + docs/models/getjudgesv1observabilityjudgesgetrequest.md: + id: 5a7a03200f1f + last_write_checksum: sha1:f9bbebd7b36957b6d9807063f2926b4a37c73a7e + pristine_git_object: 154ece82c1932053d4764d7d8fb2ab0f394027b2 docs/models/getsignedurlresponse.md: id: 5539e5d7c3d4 last_write_checksum: sha1:7198474f48bfba6d47326cd436e4a00a8ba70ce3 pristine_git_object: bde693236406fe092f48c315e3b68a2fbbe6f9a4 + docs/models/getsimilarchatcompletioneventsv1observabilitychatcompletioneventseventidsimilareventsgetrequest.md: + id: 38d58bb7d102 + last_write_checksum: sha1:63b80dff98aa4fc959f71d1e41faa0eeec4801aa + pristine_git_object: cf276b5e4297eec4d5d4c6996fde7144d54dd8c0 docs/models/githubrepository.md: id: 66c120df624b last_write_checksum: sha1:045e538dd7faffc1c6c6e6816563c5c8e776a276 pristine_git_object: 827b6f34ae68ace7b8b4811764f59de2e0fcdd22 - docs/models/githubrepositoryin.md: - id: b42209ef8423 - last_write_checksum: sha1:5ab33fc1b0b5513086b1cae07f416d502441db23 - pristine_git_object: 241cf584d5e2425e46e065f47a18bea50fa624db + docs/models/guardrail.md: + id: 34d09568ab25 + last_write_checksum: sha1:8c95d17ab908018f763f74cc79e14bd5137f5544 + pristine_git_object: 319e1415d9396412c455056135189d817e2fc473 + docs/models/guardrailconfig.md: + id: f60bf2eefb45 + last_write_checksum: sha1:517e5d142f8e259795cc0083522041ec006baeb7 + pristine_git_object: 235d7204080f3aef0b7910ed32d009cae6d4247c docs/models/hyperparameters.md: id: c167bad5b302 last_write_checksum: sha1:e391cf72690e6cd01a2878081b8d87938e1c6639 pristine_git_object: b6c00c3647d21789c92ad7d32dd29c3089ca134f + docs/models/imagecontent.md: + id: afb5d93d3b4a + last_write_checksum: sha1:bde8cb2d1020d49ef07d97d2b4503add29f8fe2c + pristine_git_object: 5145469e26759e7df9477dce921c800b70af424b docs/models/imagedetail.md: id: f8217529b496 last_write_checksum: sha1:fdf19ac9459f64616240955cb81a84ef03e775c8 @@ -857,6 +1210,26 @@ trackedFiles: id: 9d3c691a9db0 last_write_checksum: sha1:4e32bcd7d44746d2ddbfafbef96152bb2bdb2a15 pristine_git_object: db97130f26199dcb354ecb7469d09530b035daa2 + docs/models/importdatasetfromcampaignrequest.md: + id: 11d5444a3e05 + last_write_checksum: sha1:33c9d7848282c89068abc219f1a208f1fa17d4e5 + pristine_git_object: aa1ecc85ed49d1c20391ff4e6c9e98328e64494d + docs/models/importdatasetfromdatasetrequest.md: + id: aae16ea119a6 + last_write_checksum: sha1:5e2ac86cbab1183ef936a75d4c903be1fb227b1d + pristine_git_object: cf8d373fea2e3e0403ba1e048d4466fdd5b36c08 + docs/models/importdatasetfromexplorerrequest.md: + id: b6aefe9a5761 + last_write_checksum: sha1:e49a21dce39376c4ecbf6df8a178f668f26fa7ce + pristine_git_object: 668cdcbf88b1d0d0f506f4166fb97ee97b685527 + docs/models/importdatasetfromfilerequest.md: + id: 72ced1c6e5e8 + last_write_checksum: sha1:8c59196fc4645fbaf5c735f26f2fbb2e1f6109a2 + pristine_git_object: a05e97258f266440456381634538cb6d97ac015f + docs/models/importdatasetfromplaygroundrequest.md: + id: 74bd521abc26 + last_write_checksum: sha1:a4efe4ff8c52137af47d6ab750d1d2f7cba77e7c + pristine_git_object: 72a586bc65f6219abed50105d9df9cb8b6ec8d02 docs/models/inputentries.md: id: a5c647d5ad90 last_write_checksum: sha1:4231bb97837bdcff4515ae1b00ff5e7712256e53 @@ -945,6 +1318,50 @@ trackedFiles: id: a6b15ed6fac8 last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f pristine_git_object: 7ff7c070353c58290416aff5b01d1dfc43905269 + docs/models/judge.md: + id: adc2c7c01fd5 + last_write_checksum: sha1:f248bf99d7d0a2087facec363c59c9d7718b575b + pristine_git_object: 34bcce9ba75ec20bcd3bb990df2a52be55cbbeb8 + docs/models/judgechatcompletioneventrequest.md: + id: dade3360cf3e + last_write_checksum: sha1:22fbf9ccc5ace7a491a119d6db7638a2f642d49c + pristine_git_object: b91a649246eae2ecdc25f30b560d30d20f626eb8 + docs/models/judgechatcompletioneventv1observabilitychatcompletioneventseventidlivejudgingpostrequest.md: + id: 7c7732afc4a2 + last_write_checksum: sha1:c61572bd803bcf7b65858f1e3c45c8896ec8daa0 + pristine_git_object: 6a9d93c95ba59811f58494685090d2f8a0cd37d1 + docs/models/judgeclassificationoutput.md: + id: 387d35df2462 + last_write_checksum: sha1:668c821fa926cef50a53d0de38c281b72040dd88 + pristine_git_object: 44d8462e378bbabea4083bf35631de6985bb8715 + docs/models/judgeclassificationoutputoption.md: + id: 41dde6c5cf31 + last_write_checksum: sha1:25cc262d748a2c9285cccdc8ed6c3d1572016a2b + pristine_git_object: 67e08ed21124756e4fe471bba5deccfc9bc7ebd2 + docs/models/judgedatasetrecordrequest.md: + id: 8b53c13e8af5 + last_write_checksum: sha1:6f636ff57171c21f214b012dbde5400be91057c4 + pristine_git_object: d82aabd9ecf595703eb43679502733483cb1d536 + docs/models/judgedatasetrecordv1observabilitydatasetrecordsdatasetrecordidlivejudgingpostrequest.md: + id: 41e358ee6a41 + last_write_checksum: sha1:cc831164ee1feb176796ca2c8ac54356ec7ed646 + pristine_git_object: 9ce4f011d6ab9f26b7bf3c720fa48ab0ec65dcf7 + docs/models/judgeoutput.md: + id: 7a436a0118b9 + last_write_checksum: sha1:fff6b2d6ce5f958a68bcc75ae19718fef7d5a49e + pristine_git_object: 4abeffa50be5eb60802391062302e9106f1ccfd0 + docs/models/judgeoutputtype.md: + id: 49623b360404 + last_write_checksum: sha1:2bb675f4824785fdce67918e698eeec682a59e5d + pristine_git_object: 6e9dfd7200de7457590b9f68c2e95361e4ebf128 + docs/models/judgeoutputunion.md: + id: 636200cea9aa + last_write_checksum: sha1:382aa75d840423705f9477cb77ca2a5832a1ee69 + pristine_git_object: 618295642a13f3c153084efa1146ddaab9d17e2b + docs/models/judgeregressionoutput.md: + id: fb323d9098ab + last_write_checksum: sha1:e3dc55fef190040b70467ec96c724b9150b41461 + pristine_git_object: 8f020dfba2e068d3b0c3cb7e355f9705a00b0b76 docs/models/legacyjobmetadata.md: id: 50ac14d9b270 last_write_checksum: sha1:ebe37a176ca318e797fee7ebf4eef73fb9938a12 @@ -999,8 +1416,8 @@ trackedFiles: pristine_git_object: 6e1e04c39c15a85d96710f8d3a8ed11a22412816 docs/models/librariessharecreatev1request.md: id: 99e7bb8f7fed - last_write_checksum: sha1:e40d710ad1023768a0574b3283ef35544f6b0088 - pristine_git_object: 4c05241de4ee5a76df335ae9ea71004bd02b8669 + last_write_checksum: sha1:f37578c7882eab83cca3cb2aaf1ac17b9a21934c + pristine_git_object: 8af7cc9dcd387d209343cab071551542a9829ebe docs/models/librariessharedeletev1request.md: id: bc8adba83f39 last_write_checksum: sha1:79fc5a9a3cee5b060f29edd95f00e0fea32579cf @@ -1021,6 +1438,30 @@ trackedFiles: id: e03025d58630 last_write_checksum: sha1:de42c9396546fc8487d0bd6ed15b4076599fa83f pristine_git_object: c23e32201d12a2594f97a493f63b2b7b42b9e337 + docs/models/listcampaignselectedeventsresponse.md: + id: e004d0b21d0d + last_write_checksum: sha1:3ff807be72c465b8de127a031e82c2a0ce03422d + pristine_git_object: eb6ea27f8e2966624da05ebcec56909fd0f0f4b7 + docs/models/listcampaignsresponse.md: + id: b81dfede5d8c + last_write_checksum: sha1:09b8a734298d398cba8be04a544e9f3d20841c40 + pristine_git_object: 2fcc7d194b4a94267dae86380769b76a11834e4d + docs/models/listchatcompletionfieldsresponse.md: + id: 379ca3b00d2d + last_write_checksum: sha1:82d8a580b417766ad34b31f19a47136b38fcb1d6 + pristine_git_object: c552805e2b9d96d202377244e0936e28bbc5ff75 + docs/models/listdatasetimporttasksresponse.md: + id: 8a4a3c88410e + last_write_checksum: sha1:87fba295324e978949ba8f9172662e3610367ec7 + pristine_git_object: ce2e9057176f3054466c9d4fdf2aa5ce0ae3f1a5 + docs/models/listdatasetrecordsresponse.md: + id: ce200ac11b8c + last_write_checksum: sha1:6b55916029922e18bdcc9abb5c1fec188bb8d66b + pristine_git_object: 25d2618ae300874654cb3209117cb1ffb737763d + docs/models/listdatasetsresponse.md: + id: a776dbfbc267 + last_write_checksum: sha1:abba75226b6bc439381777ea05d83bc8e910e53c + pristine_git_object: af046696ebac829f4d7f6333075a4e5b26dd855e docs/models/listdocumentsresponse.md: id: f2091cee0405 last_write_checksum: sha1:335d0ccd3a448e65739d5a0cfa2c67614daec031 @@ -1037,26 +1478,38 @@ trackedFiles: id: 59c80de4086d last_write_checksum: sha1:5a0d91c251b4b9283895d9f19f7b9416f93d4468 pristine_git_object: adb0644475841c6a4686e8c42790dd44eed43dc1 + docs/models/listjudgesresponse.md: + id: dd610841e2f0 + last_write_checksum: sha1:d90e6af66b36c914af2c478b5e3cf7d4fb79e096 + pristine_git_object: 66883d64d0e040f5eef0976a88c1a83b7a3137ed docs/models/listlibrariesresponse.md: id: 87e3bec10745 last_write_checksum: sha1:00522e685ec71a54f5f272d66b82e650848eaf36 pristine_git_object: e21b9ced628f6fd5ae891d4a46666ebc94546859 - docs/models/listsharingout.md: - id: a3249129f37e - last_write_checksum: sha1:4831e4f02e1d5e86f138c7bb6b04d095aa4df30f - pristine_git_object: bcac4834f3bd008868435189f40bbf9e368da0d2 + docs/models/listmodelsv1modelsgetrequest.md: + id: ade37f6d014a + last_write_checksum: sha1:10d4e1242cdac6cdc7597881e0d25ce06760971f + pristine_git_object: 537269f7e774b31c45ac75c82c096530c0bd2b4e + docs/models/listsharingresponse.md: + id: 165871ba2e7d + last_write_checksum: sha1:2c6c18123e297829dde6e877f3df984ce20aeef3 + pristine_git_object: 4c29d4d4fc8087424104ff7d5312177ec4940094 docs/models/loc.md: id: b071d5a509cc last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 + docs/models/mcpservericon.md: + id: 0b20f03adab8 + last_write_checksum: sha1:d66113a4b0486f144b1f73f423559360ec751631 + pristine_git_object: b0ae7da069119f7477ffa5ad756f353f56f213f1 docs/models/messageentries.md: id: 9af3a27b862b last_write_checksum: sha1:a3eb6e37b780644313738f84e6c5ac653b4686bc pristine_git_object: 76256fb913376a15d5bcd2531b18f1a78b980c9d docs/models/messageinputcontentchunks.md: id: 34aac9c271db - last_write_checksum: sha1:d8ffdfd8b5458497e2cb6a32f52900c3ca2a6ddf - pristine_git_object: 0561785082c741f39f930ab7ded5b6c6a9ade6ad + last_write_checksum: sha1:641cd1dba3721f85b049c5ee514879f067483949 + pristine_git_object: 4fd18a0dcb4f6af4a9c3956116f8958dc2fa78d1 docs/models/messageinputentry.md: id: eb74af2b9341 last_write_checksum: sha1:c91bfdf9426c51236b6ff33d127dbe62b051a9da @@ -1067,8 +1520,8 @@ trackedFiles: pristine_git_object: 65e55d97606cf6f3119b7b297074587e88d3d01e docs/models/messageoutputcontentchunks.md: id: 802048198dc0 - last_write_checksum: sha1:8cf4e4ea6b6988e22c117d8f689bbfb0869816ad - pristine_git_object: c4a7777e7675ebf2384311ec82b2713da69e5900 + last_write_checksum: sha1:d70a638af21ee46126aa0434bf2d66c8dd8e43ff + pristine_git_object: d9c3d50e295b50618f106ef5f6b40929a28164df docs/models/messageoutputentry.md: id: f969119c8134 last_write_checksum: sha1:f50b955cd622a6160c0ada34b0e14bff612802b7 @@ -1085,6 +1538,10 @@ trackedFiles: id: cecea075d823 last_write_checksum: sha1:16dac25382642cf2614e24cb8dcef6538be34914 pristine_git_object: 16d8d52f6ff9f43798a94e96c5219314731ab5fb + docs/models/messageresponse.md: + id: 3dd409ff05ba + last_write_checksum: sha1:f7dd5b03360f01417c32a33fd8131e6899ec6430 + pristine_git_object: 504aa9de165ea21ffd7178dd092b7532fc3f9e6a docs/models/metric.md: id: a812a3e37338 last_write_checksum: sha1:14016848dcfaba90014b482634ed6d5715caa860 @@ -1099,8 +1556,8 @@ trackedFiles: pristine_git_object: c7dd2710011451c2db15f53ebc659770e786c4ca docs/models/modelconversation.md: id: 497521ee9bd6 - last_write_checksum: sha1:22a8d7502eeaf176fbd1c7b22b512b4f9e4e043f - pristine_git_object: af2e5c6149339a561b03b954cd0e71f9d9aeffd6 + last_write_checksum: sha1:aea6f51b6276c5e36447c828e78a30367f15a978 + pristine_git_object: 190a6f6eb48191b2507a4861525707f041961538 docs/models/modelconversationtool.md: id: 2dd28167bc36 last_write_checksum: sha1:9b33f73330e5ae31de877a904954efe342e99c4f @@ -1113,6 +1570,18 @@ trackedFiles: id: e2eb639c646f last_write_checksum: sha1:7394ba5645f990163c4d777ebbfc71f24c5d3a74 pristine_git_object: b44e84a00d0c54f8df78650d45de0a409c901048 + docs/models/moderationllmv1action.md: + id: ebbde3bb4ffb + last_write_checksum: sha1:4b8a7001254c7c1f56bba10e4aa5445a41def160 + pristine_git_object: e24d6a2296ded90c15dffccf636c6628d0de6c4f + docs/models/moderationllmv1categorythresholds.md: + id: b474ca28a6e7 + last_write_checksum: sha1:9080b6f5a7a6a00ed04c1cb32a43cb997cbeb5d5 + pristine_git_object: 90ae213feda5e63ad71fbd7bb0bfb07ee7b1e2c1 + docs/models/moderationllmv1config.md: + id: e39a45c3e458 + last_write_checksum: sha1:7bc088d2563fa4bb89175423b94908da4aaf7a54 + pristine_git_object: 0c410947c9554348a77fd02d48a1a5d7e40f0abb docs/models/moderationobject.md: id: 4e84364835f5 last_write_checksum: sha1:2831033dcc3d93d32b8813498f6eb3082e2d3c4e @@ -1123,8 +1592,16 @@ trackedFiles: pristine_git_object: 75a5eec74071fdd0d330c9f3e10dac0873077f20 docs/models/multipartbodyparams.md: id: f5be2d861921 - last_write_checksum: sha1:34e68e3795c7987138abd152177fa07198d2f6f6 - pristine_git_object: f14b95737fde09a120b35e2f922568ca31825bd5 + last_write_checksum: sha1:624f30759b7b7de1913b1ef3b8bb2187a95b9570 + pristine_git_object: 9d7a00c44e1507edd12f16fbb6b3864f1c0bbd81 + docs/models/observabilityerrorcode.md: + id: 61d16ff95b87 + last_write_checksum: sha1:cba4ebf5b8b3007046c124504e1472bcd464f109 + pristine_git_object: 0c387f570c7b96c402c0e3d60108ee4cdcb22764 + docs/models/observabilityerrordetail.md: + id: 38061447dfbb + last_write_checksum: sha1:1da7bf6e72c2f972b0e22f0201a1cb9b76f53ccd + pristine_git_object: a95e25c91c54edc4948ef82c1626773975190110 docs/models/ocrimageobject.md: id: b72f3c5853b2 last_write_checksum: sha1:90c5158dec6a7b31c858677b6a8efa1e3cabd504 @@ -1153,30 +1630,94 @@ trackedFiles: id: 419abbb8353a last_write_checksum: sha1:6e717a3f3de3c464e8b3237f06867cdfecec339e pristine_git_object: d9d79125cb02bc2b09d8dc543a5e2d4a6c55571c + docs/models/op.md: + id: 4f953e6b890f + last_write_checksum: sha1:ca76665f125d37edbcf87a36a2aaafdb722d03a4 + pristine_git_object: fa3e2f3a9e437f6bd82ad87e1589484392ccb68c + docs/models/operator.md: + id: 1b6d3fc58add + last_write_checksum: sha1:e1008f48087ee8da75dd25be6207f7b2098fe131 + pristine_git_object: ca54debcf338366f66c540fdb2020e85959ef5b9 + docs/models/option.md: + id: b70901c8beab + last_write_checksum: sha1:aeac15a068312b38625c3efcd08f5f179f607f92 + pristine_git_object: 0f600be4b142e088e916a243e196d57ffb3dd76e + docs/models/or_.md: + id: 4062b9894f72 + last_write_checksum: sha1:cbf92175e3a4f829d7ad84da51301a42bbfa80cd + pristine_git_object: 2a2172695146be50782a3ef32815950d4829cf64 docs/models/orderby.md: id: 9e749ed80f72 last_write_checksum: sha1:6ec002e3e59f37002ccb14e347b790ca4daef773 pristine_git_object: bba50df10855a8d6acdf4b061ec2ffeb0279fd7f - docs/models/output.md: - id: 376633b966cd - last_write_checksum: sha1:600058f0b0f589d8688e9589762c45a0dd18cc9b - pristine_git_object: d0ee0db93f56c40f6684fcfdb5873aba586bc876 docs/models/outputcontentchunks.md: id: f7e175c8e002 - last_write_checksum: sha1:5adb0733a8ca9b224155dfef66dfb37b7f416972 - pristine_git_object: e5185014faa41b6e6d1567d713fc390f551fad01 + last_write_checksum: sha1:5094466110028801726cc825e8809f524fe1ee24 + pristine_git_object: c76bc31d4d8791b7bef4dc6cbff6671b38a7927d + docs/models/paginatedconnectors.md: + id: 108e699a33a1 + last_write_checksum: sha1:bbec5d2256b55b26ea1429439ff5d7abe744ab04 + pristine_git_object: 3fff5b95a6ccbed2356e4fbfe907ed2fb8334ea2 + docs/models/paginatedresultcampaignpreview.md: + id: cbc69a4c26bd + last_write_checksum: sha1:032f0179ec659ed0f1401b8fd9bbef10637d8afc + pristine_git_object: 7e8e5715b373b00ac7ea06a568da27dc777173a9 + docs/models/paginatedresultchatcompletioneventpreview.md: + id: bc3919eaf32b + last_write_checksum: sha1:1a537bafaa390ee5e8dd7869c5b8905a35526f29 + pristine_git_object: 96b4b7ae6913846f39be80a3732b85eeddfeb93f + docs/models/paginatedresultdatasetimporttask.md: + id: 461b15377067 + last_write_checksum: sha1:6f916fe2ffc98bfabbc8905a470513016211ba4c + pristine_git_object: ce067d528e53e859577217e60e83b63eb8d5199c + docs/models/paginatedresultdatasetpreview.md: + id: 509ba8c49460 + last_write_checksum: sha1:b3f072c50ace6b4c52d0e8a43a39bc84767391dc + pristine_git_object: f6ca903772591eefbdd02d836fb2227635380858 + docs/models/paginatedresultdatasetrecord.md: + id: 79bed183ed1c + last_write_checksum: sha1:9f3d08b3f0b2618d66047f198121d762e8782a17 + pristine_git_object: 31b086eb6ce8db231af2ca60d07b10d20a99ce9a + docs/models/paginatedresultjudgepreview.md: + id: 90506e511844 + last_write_checksum: sha1:01368f99123b7db0e8dee1f03640f8258f89ea8b + pristine_git_object: 91bf0c35876a6dcc2bf5cd34c8cbf1ade82b4d0c docs/models/paginationinfo.md: id: 3d2b61cbbf88 last_write_checksum: sha1:1da38e172024fe703f3180ea3c6ec91fe3c51ed0 pristine_git_object: ad1fbb86c714c152a5e6e99d8a741e7346884e55 + docs/models/paginationresponse.md: + id: 592cf80526b8 + last_write_checksum: sha1:e72a2238fe984cc4fa0d2522b462aa9ad247af78 + pristine_git_object: 7ed17a697771f10febe2b893f092865b09dc04c7 + docs/models/postdatasetrecordsfromcampaignv1observabilitydatasetsdatasetidimportsfromcampaignpostrequest.md: + id: d5158d7e7b7f + last_write_checksum: sha1:7e425ce967a5d92b14b2efa1b98e9fcf578d9c92 + pristine_git_object: b3bf2a61d1e14f54b53e746326649a51beb3419b + docs/models/postdatasetrecordsfromdatasetv1observabilitydatasetsdatasetidimportsfromdatasetpostrequest.md: + id: 6a1b28e01fd6 + last_write_checksum: sha1:63acf2f14db16c43da0347c0b5547cf90bf91e55 + pristine_git_object: 33dafda2ecabe8d4d92b973ae78932c6037809f5 + docs/models/postdatasetrecordsfromexplorerv1observabilitydatasetsdatasetidimportsfromexplorerpostrequest.md: + id: 92f8f5c18f8e + last_write_checksum: sha1:0de74481df2b1114c0c6e5bae6c8b83e09e65c7d + pristine_git_object: ba25323e33b0ab40287aa944ae77fe79148a9459 + docs/models/postdatasetrecordsfromfilev1observabilitydatasetsdatasetidimportsfromfilepostrequest.md: + id: ffc31f79f514 + last_write_checksum: sha1:f07eefba3eff14afa69786c37beacaac8484a99a + pristine_git_object: 0bff196b59382962b6e49884c547354c2f71af03 + docs/models/postdatasetrecordsfromplaygroundv1observabilitydatasetsdatasetidimportsfromplaygroundpostrequest.md: + id: a3c711a50d05 + last_write_checksum: sha1:130c16a00743f65394279d00af233e6ee4337451 + pristine_git_object: 86ab87b8d016644f31b762a9010b368337c3b141 docs/models/prediction.md: id: 3c70b2262201 last_write_checksum: sha1:ca8a77219e6113f2358a5363e935288d90df0725 pristine_git_object: fae3c1ca4ba2c2ddb3b7de401ecdc8d56dcc7740 - docs/models/processingstatusout.md: - id: 83c8c59c1802 - last_write_checksum: sha1:7dbbfe790616ab4388e532bd78ffc1a5183b332d - pristine_git_object: bc40d3209c4c641dd7416c925b965c1bf7b73b1b + docs/models/processingstatus.md: + id: 06047222d2ff + last_write_checksum: sha1:fd2f5d605c6e5f8aa86461e8442c50db3fbea07d + pristine_git_object: 514caa50e524ae5afab802b8394cb27189b2bfbe docs/models/processstatus.md: id: "336054835357" last_write_checksum: sha1:9b87de1980428307af6c29c2086c0e1f612ebd72 @@ -1233,6 +1774,18 @@ trackedFiles: id: 8857ab6025c4 last_write_checksum: sha1:4b7ecc7c5327c74e46e2b98bd6e3814935cdecdf pristine_git_object: c81c115992439350d56c91d2e3351a13df40676b + docs/models/resource.md: + id: 94a32a903140 + last_write_checksum: sha1:0c141e37497f5b07d2da77a41777617ed3653582 + pristine_git_object: 30d74c40f3add81023ad86271bb02dfe018c1013 + docs/models/resourcelink.md: + id: 1086204e8a19 + last_write_checksum: sha1:f1a1c228a16e8e99cd5ff04622455e080418dd6d + pristine_git_object: 074c573e64e98cbaf177fde279364f26df04a2bf + docs/models/resourcevisibility.md: + id: 891d464e9c47 + last_write_checksum: sha1:acf48cc74f27035f479f976e73897ea02d7eaaa8 + pristine_git_object: 9c06af2d034cb7de730aa39aa3a3188aac4bc764 docs/models/response.md: id: 583c991c7a30 last_write_checksum: sha1:0791cb4aa4045708ab64d42bf67bd6ab74bc7752 @@ -1277,6 +1830,22 @@ trackedFiles: id: 0e09775cd9d3 last_write_checksum: sha1:33cef5c5b097ab7a9cd6232fe3f7bca65cd1187a pristine_git_object: 34a6a012b1daeeb22626417650269e9376cc9170 + docs/models/searchchatcompletioneventidsrequest.md: + id: ace22de1b563 + last_write_checksum: sha1:0368a82e5b6399806252670aec01714e5bb78331 + pristine_git_object: 7d0c4a509c8388cb9762724badd4c20a4494bf6c + docs/models/searchchatcompletioneventidsresponse.md: + id: d35dba44c6e3 + last_write_checksum: sha1:ecaf8fbe6e098c63df02d016fa391320e4bb2f92 + pristine_git_object: 6e429684ac0246438d1b1c5ae799ec499a5bbe64 + docs/models/searchchatcompletioneventsrequest.md: + id: 429f092e3dcb + last_write_checksum: sha1:caacd88fdf6e4c28c0c1a6a33357ab0f728996ef + pristine_git_object: 11bc3ab9d2c7096775e68fed59aecab0c3d25834 + docs/models/searchchatcompletioneventsresponse.md: + id: e7cb0de862bc + last_write_checksum: sha1:d8e860c666fe6b152d3944e9182e90be539c3ce2 + pristine_git_object: 9474c1109d62fc5f7ff0bae32d386d54951d7fa2 docs/models/security.md: id: 452e4d4eb67a last_write_checksum: sha1:45b7b8881a6560a468153662d61b99605a492edf @@ -1285,18 +1854,18 @@ trackedFiles: id: 53a713500576 last_write_checksum: sha1:9d45d4bd272e6c146c3a8a21fd759acf2ae22148 pristine_git_object: dc5d2b68a810c2983b5a47fbff747dfc2cc17598 + docs/models/sharing.md: + id: 9f1e88417a87 + last_write_checksum: sha1:876fd514d08a6987646bf0fec636bf3fef89155b + pristine_git_object: fc718632594e04c682c6d6a3931a94d8b1fd4059 docs/models/sharingdelete.md: id: 165cac179416 last_write_checksum: sha1:1a0b3c95f4b56173510e234d7a76df85c593f360 pristine_git_object: 1dcec0950c7fcd264ea9369c24244b54ba2bcfbf - docs/models/sharingin.md: - id: 08d396ee70ad - last_write_checksum: sha1:662edfc07a007e94fe1e54a07cf89d7c83c08df5 - pristine_git_object: bac18c8d43f801e8b5cf5b3cd089f9da0ee2281a - docs/models/sharingout.md: - id: 5db4547c7c56 - last_write_checksum: sha1:bd15c318d1a3f5bee7d7104d34cbd8ba6233bbb8 - pristine_git_object: 35aeff43593f3c9067c22a2f8b1468d7faa5af34 + docs/models/sharingrequest.md: + id: cd53ce3913a5 + last_write_checksum: sha1:380d0621a0a8ec9cd4be2b53a6e326b8c9c3d201 + pristine_git_object: 21b8ec1f74c1f903aa087cd3b4d13918c0ea9dad docs/models/source.md: id: 6541ef7b41e7 last_write_checksum: sha1:d0015be42fe759d818ebd75b0cec9f83535a3b89 @@ -1305,6 +1874,10 @@ trackedFiles: id: 6a902241137c last_write_checksum: sha1:567027284c7572c0fa24132cd119e956386ff9d0 pristine_git_object: ae06b5e870d31b10f17224c99af1628a7252bbc3 + docs/models/supportedoperator.md: + id: 000f0770e0f9 + last_write_checksum: sha1:7702df7346f66b739b5a7e1d26b2d5ebc9d19c74 + pristine_git_object: 74cf9b8e6705059a24e7d1d0e311943ed1ce5c40 docs/models/systemmessage.md: id: fdb7963e1cdf last_write_checksum: sha1:c7603c5ce77ba2bcbda9eff65eeafdb1e9ecbec7 @@ -1325,14 +1898,22 @@ trackedFiles: id: 6cd12e0ef110 last_write_checksum: sha1:d9fe94c670c5e0578212752c11a0c405a9da8518 pristine_git_object: df0e61c32bc93ef17dbba50d026edace139fee6a + docs/models/textcontent.md: + id: 82aa22f226fc + last_write_checksum: sha1:b359ace0ab7d8960b9c0a205a788fb26189cc6ac + pristine_git_object: ddd2df027b7f0cca132c69fa47bb76e5e8afdc87 + docs/models/textresourcecontents.md: + id: aa82a3efa4ec + last_write_checksum: sha1:58468c602ae4e85c860b909f39384f49f7471a4b + pristine_git_object: daa531e1bf60f55962069ef586b4f1ae56c32662 docs/models/thinkchunk.md: id: bca24d7153f6 - last_write_checksum: sha1:0f861f1653035dea2018be9a977c15f54add9531 - pristine_git_object: 70c0369f16465e1b1f5f46e8cd799e5db536cdde - docs/models/thinkchunkthinking.md: - id: 22de7b5060fb - last_write_checksum: sha1:5e0722b8d513b38d60fbfe28635bdea40b951593 - pristine_git_object: dd1ecca12b5cda76a51b1e13335f1757a9dd7a68 + last_write_checksum: sha1:7fa91717a3c5678d33bac70c758635c50f72be47 + pristine_git_object: 98603d8f97b3e432014be8e30b0418d1d462c016 + docs/models/thinking.md: + id: 07234f8dd364 + last_write_checksum: sha1:90c0b34284137712678b0671e9f4bfb319548cbf + pristine_git_object: d9e51d7dc93b24edd807b018393eab38143d46f4 docs/models/timestampgranularity.md: id: eb4d5a8e6f08 last_write_checksum: sha1:e256a5e8c6010d500841295b89d88d0eface3b88 @@ -1461,22 +2042,66 @@ trackedFiles: id: 69a13554b554 last_write_checksum: sha1:d969f462034ed356f2c8713b601ee7d873d4ce07 pristine_git_object: 77bd0ddcf8a1d95707fa9e041de3a47bb9e7f56d + docs/models/typeenum.md: + id: d306d1d601a4 + last_write_checksum: sha1:506e72f893c4cebf7bbeb59fd05fbe5caafdb68c + pristine_git_object: 80a66af314d5ba9f5a99b07f3db9760849edad53 docs/models/unarchivemodelresponse.md: id: a690f43df567 last_write_checksum: sha1:5c9d4b78c92d30bb4835cb724d1ea22a19bf5327 pristine_git_object: 375962a7110f814288ea9f72323383bd8194e843 docs/models/updateagentrequest.md: id: 371bfedd9f89 - last_write_checksum: sha1:97170995ed40391023f0dce5096cfebe83fa7dc8 - pristine_git_object: d3428d92a8f23670a6b587a6017a353d2c12a815 + last_write_checksum: sha1:73936f78ae35e5f84ebe32aabb150d6792ce40d1 + pristine_git_object: b8aa01f66b13d050839bd0aacf1550f669bc8d13 docs/models/updateagentrequesttool.md: id: bdf961d2c886 last_write_checksum: sha1:5355f8c97b2aef98aebff251e1f4830ddbaa7881 pristine_git_object: e358b1edb9035667104700dde890bb0b43074543 + docs/models/updateconnectorrequest.md: + id: 40e38d9c7c2a + last_write_checksum: sha1:183567baed17e6b95628cfe17455e1861fd48758 + pristine_git_object: d6d76631ab4841a1bd6a75a978f4d3837449d7e9 + docs/models/updatedatasetrecordpayloadrequest.md: + id: ada11a6c544f + last_write_checksum: sha1:220ce1184c9f6b6c6d1e5cdf2dc18197778f7af2 + pristine_git_object: f152d84329ffd358b56392025a69f13f6d2b7546 + docs/models/updatedatasetrecordpayloadv1observabilitydatasetrecordsdatasetrecordidpayloadputrequest.md: + id: abb1793448c5 + last_write_checksum: sha1:4bc30c277c9d2fa5f8fdf8938f65c61e1da8abf8 + pristine_git_object: fbb4b308362ac1f8157554ab6deeae1658231879 + docs/models/updatedatasetrecordpropertiesrequest.md: + id: 8e3f0136f445 + last_write_checksum: sha1:afa0832f5a234c6e7434a4414cb16f9b2c9ae45d + pristine_git_object: 6e98944d42f9adc02dd45c0e152dd28ba7ad6cdf + docs/models/updatedatasetrecordpropertiesv1observabilitydatasetrecordsdatasetrecordidpropertiesputrequest.md: + id: 2f433ceabf6d + last_write_checksum: sha1:18a646d17ae785ab5813eb36c43dfda857429558 + pristine_git_object: 6b09e479247a14a4c4e5d4786c2705a8c01ba5c8 + docs/models/updatedatasetrequest.md: + id: c8f941dea81b + last_write_checksum: sha1:3b4276274ab943411faf51b2a788226f0ac8343d + pristine_git_object: 2a5194e493808629a9031f518a36bbe68d2c04a6 + docs/models/updatedatasetv1observabilitydatasetsdatasetidpatchrequest.md: + id: 4be4c812536f + last_write_checksum: sha1:600212552869daf41c872ff6ec7a4cbff1e00b63 + pristine_git_object: 78eae8fb8193b201064416858d719fc28bb964b1 docs/models/updatedocumentrequest.md: id: ee4e094a6aa7 last_write_checksum: sha1:4c4d774c67449402eb7e1476b9d0fef5b63f2b99 pristine_git_object: 7e0b41b7be9f559b27a3430f46ed53d0453f6e03 + docs/models/updatejudgerequest.md: + id: c220d8a2a289 + last_write_checksum: sha1:7c85b0f8b6ca133ca9e4b435322e50f97afd8fbd + pristine_git_object: bf28ae7acdb67cf7b2beb0bd34403ff33fc7e477 + docs/models/updatejudgerequestoutput.md: + id: 3d8371657f6e + last_write_checksum: sha1:ba478cb39ba8ebc739477e4ac763bbe87b0f0a8e + pristine_git_object: a8db68ab3266b42741f22348e54524a2134f081a + docs/models/updatejudgev1observabilityjudgesjudgeidputrequest.md: + id: e900c1258e5e + last_write_checksum: sha1:4125c640bf8271131c8e57c0c617aa9348f29e67 + pristine_git_object: bfe3648d706b6c1f95b8c5515d4ee999d556fa2d docs/models/updatelibraryrequest.md: id: 2eda82f12f31 last_write_checksum: sha1:436e08988daa8ca04ece36a4790ed84e0629b81a @@ -1503,8 +2128,8 @@ trackedFiles: pristine_git_object: 69dd549ec7f5f885101d08dd502e25748183aebf docs/models/validationerror.md: id: 304bdf06ef8b - last_write_checksum: sha1:1889f608099577e6a116c14b211a6811d6b22786 - pristine_git_object: 7a1654a1a5cfb3ab92360b361e8e962bf2db4582 + last_write_checksum: sha1:1f297f34069668f6107f2c0389606efe413ff5a8 + pristine_git_object: 5bcea5b5d12e72222720af2c014d64ec4bdfee4a docs/models/wandbintegration.md: id: ba1f7fe1b1a3 last_write_checksum: sha1:ef35648cec304e58ccd804eafaebe9547d78ddcf @@ -1523,8 +2148,8 @@ trackedFiles: pristine_git_object: 4ca7333c412ad819e3e02c61debe402e3f9b0af9 docs/sdks/accesses/README.md: id: 2ea167c2eff2 - last_write_checksum: sha1:279d3b3a4f625b89b25e9a2a47886ac6008b3ca0 - pristine_git_object: c50456df9ea2bb71f78a83ad28f90e089d2e2cd7 + last_write_checksum: sha1:663516c8c94ca324b938a5d5bd1196663cb1de88 + pristine_git_object: 51051e2f8def9bfd6032617530ba9bead989404f docs/sdks/agents/README.md: id: 5965d8232fd8 last_write_checksum: sha1:a73ae6719acef32b47be55ea5c5684e91f7eda68 @@ -1535,32 +2160,52 @@ trackedFiles: pristine_git_object: 3633fe4ee136c1ac90f9446425f62a0d68fa4f90 docs/sdks/betaagents/README.md: id: 5df79b1612d8 - last_write_checksum: sha1:9ec1c7a967bc653fe175a7986ddec74d5feb0714 - pristine_git_object: aaa5110e6db30f5450877b67d70d46e53b98996b + last_write_checksum: sha1:6d14ffeace9a068f45ed9c62ee6663dc3768239f + pristine_git_object: b936538c54d38e52a67ffdb8c65ec35999197a3a + docs/sdks/campaigns/README.md: + id: 18dcc3c717ca + last_write_checksum: sha1:758413376668e53883ded94d41b1167cbc00fa04 + pristine_git_object: d5d7e4d4681811419874f58c294cb59688e595aa docs/sdks/chat/README.md: id: 393193527c2c last_write_checksum: sha1:5e7a43def5636140d70a7c781ed417e527ce9819 pristine_git_object: 1bf4aeadc762f5d696c278eefaa759f35993e9d5 + docs/sdks/chatcompletionevents/README.md: + id: 6965539e0525 + last_write_checksum: sha1:ae7c0a0e7576a1114a85b9e607cb91c2ac1181a2 + pristine_git_object: c3f19868c6707a94f0efaeeb456c09221105ff74 docs/sdks/classifiers/README.md: id: 74eb09b8d620 last_write_checksum: sha1:9f11740f8cf1a3af44fff15b63916305f1882505 pristine_git_object: dc0f4984380b5b137266421e87a1505af5260e89 + docs/sdks/connectors/README.md: + id: 7633a87d946d + last_write_checksum: sha1:1c6650e6d5511bc5eaafb0d05d78805d333f3704 + pristine_git_object: b8150eb85d182af6e7ed8216cc4b4a535271a53a docs/sdks/conversations/README.md: id: e22a9d2c5424 - last_write_checksum: sha1:4c5f8ea93d560956cb23c26e0d5f6d7cbc129e07 - pristine_git_object: e77d329b735dc21f620470bcf82220a79bc34e18 + last_write_checksum: sha1:5c75be39ef1f2a15249e363e33ebf8f3e964301c + pristine_git_object: 083b293d12fd00ba9b74a00666a9f9542cca9f4c + docs/sdks/datasets/README.md: + id: deb5d90f4faf + last_write_checksum: sha1:cf41aa56b5fe1296961ddb769b96cb0f451ed2f2 + pristine_git_object: c04ced0cf5e5f7774ba9b1d25722085b92b8f0c2 docs/sdks/documents/README.md: id: 9758e88a0a9d - last_write_checksum: sha1:ac7ab2598066971e8b371a3e73aa266ec697df1b - pristine_git_object: 9c219b6709d5d5bfa28113efca92012e8c5a5112 + last_write_checksum: sha1:a77fb7acf2be6e18a3017855b30f5ad58576698f + pristine_git_object: 2c9440f93d68a220c2cb2cf03c5cd656eece3be8 docs/sdks/embeddings/README.md: id: 15b5b04486c1 last_write_checksum: sha1:4a279bf9bcd84a9878ef979c78b8b75af3d52f02 pristine_git_object: cb207d8be2ca86b00dc797fc06eabd1498adb770 + docs/sdks/fields/README.md: + id: fdb6c4f3bd69 + last_write_checksum: sha1:ea6dea75f85d25fd0ccdd2c659ecec43d0e5242d + pristine_git_object: 3c8424419bae5c7e6e47b4cb23e35064531a8345 docs/sdks/files/README.md: id: e576d7a117f0 - last_write_checksum: sha1:f5861c42227b901742fd8afe7155ed6d634b1b4c - pristine_git_object: 9507326be83eaf750daa12c0b1421d819b72340d + last_write_checksum: sha1:7022f6e63afea7c7988b251351d18361fd1b5e4a + pristine_git_object: 7db76611c5788e2a0440f7c95d07928851e37d6a docs/sdks/fim/README.md: id: 499b227bf6ca last_write_checksum: sha1:5b2ce811df8d867d14fe0126f2c9619cca779f56 @@ -1569,18 +2214,26 @@ trackedFiles: id: 03d609f6ebdd last_write_checksum: sha1:2d7ff255c1462d5f1dff617a1993e730ec3911ea pristine_git_object: 4262b3a9833180ce86da43a26ee7ab27403f2cd0 + docs/sdks/judges/README.md: + id: 330e067ff8b7 + last_write_checksum: sha1:270582ab7a4200c44b0c865afb6b82d6e0c95182 + pristine_git_object: f1187518e46b00d8fb1b2bb0c4c72f51cd828a2b docs/sdks/libraries/README.md: id: df9a982905a3 last_write_checksum: sha1:e3eb0e9efb3f758fdf830aa1752c942d59a4f72b pristine_git_object: 7df1ef4e26449af572412f052ee7ad189039544f docs/sdks/models/README.md: id: b35bdf4bc7ed - last_write_checksum: sha1:2aa91ffe637c049aed0d63d24ac39688b6ecb270 - pristine_git_object: 311a2db6e213902ac5a2c27acf19f856dae2c264 + last_write_checksum: sha1:ca04fe883c5440abf402640cf26a1a0e9799a55f + pristine_git_object: f585dcbeadeb87d5634a505450f3599af21df155 docs/sdks/ocr/README.md: id: 545e35d2613e last_write_checksum: sha1:da377d75b6b7480c335d7f721bb06fe11492be38 pristine_git_object: fde2a82339e10c74aca6d1b4168b62501d7bbf83 + docs/sdks/records/README.md: + id: db86bab024d3 + last_write_checksum: sha1:2ef5b164016e9b40c27d8a4915aeb8d3d2ed42c7 + pristine_git_object: ce8f1f689512a9eac118c05ec1e9acf17e931556 docs/sdks/transcriptions/README.md: id: 089cf94ecf47 last_write_checksum: sha1:15d118796f147bc5b0bf4146ba39bfa9edfbc996 @@ -1611,12 +2264,12 @@ trackedFiles: pristine_git_object: 036d44b8cfc51599873bd5c401a6aed30450536c src/mistralai/client/_version.py: id: cc807b30de19 - last_write_checksum: sha1:a48c2cc43ac028eb2e31a129a9551ad2fda3d33f - pristine_git_object: 805648e42e48831658907f664d6536e8bdcd98c0 + last_write_checksum: sha1:9ca227af0848f7e58917b55ae2433118ff5f073d + pristine_git_object: c7251c9ce55d10a4ef89ce5ab58a05332b9b21ff src/mistralai/client/accesses.py: id: 76fc53bfcf59 - last_write_checksum: sha1:ed94623aa8a2bd502572a699a2f54c9281ec283e - pristine_git_object: 0761b0bc6080ab0d891be70089a1908d435559fa + last_write_checksum: sha1:4b1cf5d760f690d35582f9037df44c97c11e7e14 + pristine_git_object: 2cfdc7da91be41177f62b8ac7b2fe48aba42e496 src/mistralai/client/agents.py: id: e946546e3eaa last_write_checksum: sha1:7049cab7c308888c88b0341fb29f0132e154e3cb @@ -1639,36 +2292,52 @@ trackedFiles: pristine_git_object: 0e135b30cd122d1a813ee67bf2f9037953448e73 src/mistralai/client/beta.py: id: 981417f45147 - last_write_checksum: sha1:85f42fc6c2318eef94c90405b985120220c9c617 - pristine_git_object: 65b761d18f7274cc33162a83efa5b33211f78952 + last_write_checksum: sha1:0971bda6a9024dcbdf8b4aaad1086417b01ee40f + pristine_git_object: 83b8fc8792469efbd03f0bab240d4a6dc8135db5 src/mistralai/client/beta_agents.py: id: b64ad29b7174 - last_write_checksum: sha1:7c900a6b1483108a367050440667c069b08fbb92 - pristine_git_object: 157c5de4c66273e6df468f8a12b4399f9efb32fb + last_write_checksum: sha1:b63c905fbf5b8f85379d3df719964fc7c9675a51 + pristine_git_object: 19f8d2b261505870509a5f751988f99980385b5f + src/mistralai/client/campaigns.py: + id: 9e64fcf4e60e + last_write_checksum: sha1:0139096765ca8b44ef209ee0fc62e72040f3a6ff + pristine_git_object: a1ffcebc7c98a3c889ccace55c9a6c9cf3ff28df src/mistralai/client/chat.py: id: 7eba0f088d47 last_write_checksum: sha1:520b0da011d63c60bd0d3a960a410a8f4a6a3e22 pristine_git_object: 13b9c01f035c4fd6f60b78f20a1801bedf3b582b + src/mistralai/client/chat_completion_events.py: + id: 1813f339625b + last_write_checksum: sha1:cde21c8338e4fd60143fdc6ca361c0c42ec54775 + pristine_git_object: c060235c2c673b41c5fe0d1c8ac3a5c2a3d44e62 src/mistralai/client/classifiers.py: id: 26e773725732 last_write_checksum: sha1:ee94a4e50cda893f9c19c2304adda8b23fc2de9e pristine_git_object: 67199b601e38dff6fc6a4317eb845fbde6c25de0 + src/mistralai/client/connectors.py: + id: 39da03126050 + last_write_checksum: sha1:e407ace631bdafb1a0e62e292a4c27e5ce00bc38 + pristine_git_object: 37e018330e85109692a83d97d62a5efe7a15ee6f src/mistralai/client/conversations.py: id: 40692a878064 - last_write_checksum: sha1:1101b9e374010ba9cb080c30789672cfcfc45c55 - pristine_git_object: ec33b1fb12d1923ef5f686ed09c5fe5ae889e40c + last_write_checksum: sha1:eba234effe5c649b220c370a1c9098ba9a4c686d + pristine_git_object: a4af31f3abe3ece7791a76d20de9146bb83e1fcc + src/mistralai/client/datasets.py: + id: e5a6ae2a2d85 + last_write_checksum: sha1:2483bd56b90599039573c2c152dcbffa8ba8b3b8 + pristine_git_object: 48ecbdd8c1b2f42fa77a033aa0e4b4f49d20f088 src/mistralai/client/documents.py: id: bcc17286c31c - last_write_checksum: sha1:37669f51eba1b352a5e3c7f3a17d79c27c7ea772 - pristine_git_object: b3130364c0f3cc90ed1e4407a070bd99e3cce606 + last_write_checksum: sha1:29e7edd6f45d8a1a7ec8e9c9734bebe56ff97e7e + pristine_git_object: 95ea432390fcf4c8c07b3e99c7c1f5776007ce10 src/mistralai/client/embeddings.py: id: f9c17258207e last_write_checksum: sha1:d1610bf12dba8b2f8cb27d2f0aa592594dfe6b3a pristine_git_object: 5d55ffc43c0c98d46e04b238ab23a08d1b9e6a6a src/mistralai/client/errors/__init__.py: id: 0b2db51246df - last_write_checksum: sha1:0befddc505c9c47388683126750c7ad0e3fbef52 - pristine_git_object: 58a591a1cc2896f26df2075ffca378ca6c982d1e + last_write_checksum: sha1:0032f4eb3501c71fafb8f68113a8c80e677311a4 + pristine_git_object: f6fd99a097e71ec901f14f3c726a50f6351939de src/mistralai/client/errors/httpvalidationerror.py: id: ac3de4a52bb6 last_write_checksum: sha1:73251adb99a07d11b56d0bc0399a2362ff9ccdba @@ -1681,6 +2350,10 @@ trackedFiles: id: 8b469ecb0906 last_write_checksum: sha1:0b3fdb1136472c41a4a739a5cbf9e2a4ce0c63a4 pristine_git_object: d71dfa7b24146f1390ac6830e61acf337b99ca83 + src/mistralai/client/errors/observabilityerror.py: + id: 4946ae06717e + last_write_checksum: sha1:dc65670ba779ef19f015f65d133ec595ba002878 + pristine_git_object: a360bac4c0c6620e19d3b27f4de6d722b2d420c2 src/mistralai/client/errors/responsevalidationerror.py: id: 6cfaa3147abe last_write_checksum: sha1:6862d178d4d1964bc03db47b76709aa406546981 @@ -1689,10 +2362,14 @@ trackedFiles: id: c489ffe1e9ca last_write_checksum: sha1:f708168e46c2960dd51896083aee75ccdb36f9dd pristine_git_object: 25b87255a51021079f8ba5cc60b43509e12f9a4d + src/mistralai/client/fields.py: + id: 862335210b20 + last_write_checksum: sha1:789ac221e3aec61d0d31abd1761e368e383fba88 + pristine_git_object: a5b8003ca5fc766a7db89d6c95b9d0e42952af86 src/mistralai/client/files.py: id: f12df4b2ce43 - last_write_checksum: sha1:a16c8702d15339200b09c62948c06f79e720d79c - pristine_git_object: a5f3adf6dd9b60a202c70edf7d2a148a626ce471 + last_write_checksum: sha1:e60cfc2841ef8888a6a52479adf76ec32ecae6ac + pristine_git_object: a843cb7b2b1b55ba20093c4d43faddb972d8f645 src/mistralai/client/fim.py: id: 217bea5d701d last_write_checksum: sha1:dc427c9e954dfb9a7fe2df8b5c544877a28cdc73 @@ -1709,18 +2386,22 @@ trackedFiles: id: 3e46bde74327 last_write_checksum: sha1:0f4ecc805be1dc3d6e0ca090f0feb7d988f6eb9d pristine_git_object: 544af7f87d6b7097935290bebd08e30e5f485672 + src/mistralai/client/judges.py: + id: b6024a41ecb4 + last_write_checksum: sha1:83cf149e1f7ccd75799da72bcc11f6ec99515c13 + pristine_git_object: e5dacd7fad62473a2b946afc4b05f34e3789c722 src/mistralai/client/libraries.py: id: d43a5f78045f last_write_checksum: sha1:6440b3df71fe557ecba5c23768d115efd4ceb26f pristine_git_object: b8728362b87349118ac6f163f50613dd18c43340 src/mistralai/client/models/__init__.py: id: e0e8dad92725 - last_write_checksum: sha1:0ac0c956f0f87979e871a00c32884ee3102b6d2b - pristine_git_object: 7d2dfd970d48d54d798f1661206abdc697134434 + last_write_checksum: sha1:d776a15e3fa0fd11027c50d0f3c8e77882eb29b5 + pristine_git_object: 741c80f30cdaac0f0f03c440f2436bbb453af2f6 src/mistralai/client/models/agent.py: id: 1336849c84fb - last_write_checksum: sha1:6090ddf2b5b40656dfbf3325f1022a40ae418948 - pristine_git_object: 686a6eb84ecd27e725e3773b3f7773dddac1c10c + last_write_checksum: sha1:7fede9394ca174a3f0ee3d5a01d95ec265783e2c + pristine_git_object: 1fa27f08eaf0a7737a0f505aeae920f598f6968e src/mistralai/client/models/agentaliasresponse.py: id: 3899a98a55dd last_write_checksum: sha1:d7e12ea05431361ad0219f5c8dee11273cd60397 @@ -1825,6 +2506,10 @@ trackedFiles: id: 1b73f90befc2 last_write_checksum: sha1:b46298a653359bca205b6b1975bcd1909e563dff pristine_git_object: c2cf35522236f29ca1b9f2a438dfc79a59ca3e2a + src/mistralai/client/models/annotations.py: + id: 3ae9e07de11d + last_write_checksum: sha1:a67ea607358e534448fe556c6008105a8cf89a07 + pristine_git_object: ba085e2c7784ca442c4a5da72dddf3561dda0d9d src/mistralai/client/models/apiendpoint.py: id: 00b34ce0a24d last_write_checksum: sha1:733e852bf75956acd2c72a23443627abfa090b7b @@ -1841,6 +2526,10 @@ trackedFiles: id: ce5dce4dced2 last_write_checksum: sha1:d3c2e28583d661a9583c40c237430a1f63ea7631 pristine_git_object: 68866cd2c3c640cf56258f2f98b8a2385ea6fcdb + src/mistralai/client/models/audiocontent.py: + id: 8e39736e73f0 + last_write_checksum: sha1:9f96ac9f1b3035b66f0920df1b46a59df7c6bca8 + pristine_git_object: b0a9505b05c9fc27a5562aca4bf2d1de37eb3100 src/mistralai/client/models/audioencoding.py: id: b14e6a50f730 last_write_checksum: sha1:92ca06dce513cd39b2c7d9e5848cf426b40598ce @@ -1857,10 +2546,30 @@ trackedFiles: id: 33a07317a3b3 last_write_checksum: sha1:6e648ff58a70a0a3bd63a24676122b80eba4baf7 pristine_git_object: 2d1e9269b51d84cd8b21643fe04accd00839b013 + src/mistralai/client/models/audiourl.py: + id: f62d73a27faa + last_write_checksum: sha1:25e4a2f40630d678779befb4802a70b7fea582c1 + pristine_git_object: 98fef9770865c6ead929902a16286e7e697291c0 + src/mistralai/client/models/audiourlchunk.py: + id: f582b26db67b + last_write_checksum: sha1:dbfedb012b00140bc3ea91ac3f7ebf7b2a17ed17 + pristine_git_object: 89ebce795e95ab55e59652468bf55690e980a697 + src/mistralai/client/models/authdata.py: + id: b4d3fb07196e + last_write_checksum: sha1:70f8bbaa8d72e5684eb5cda5d95c0f17ed61a8f7 + pristine_git_object: fb8b79723f3f0f4485c0c6bb1c52b0a5db2fa1fe + src/mistralai/client/models/basefielddefinition.py: + id: ffa42818fea3 + last_write_checksum: sha1:0cd6a43a558734d38fbc39f1120a54c4cc825fe6 + pristine_git_object: f8b8faf759b795c4a9d072c2e4b50a4d22c80f7c src/mistralai/client/models/basemodelcard.py: id: 556ebdc33276 last_write_checksum: sha1:e2c3d1effee5b434fea9b958c0dd54fa96143924 pristine_git_object: 9c9e9a2045a10f4606f11ee5886a19ccf03bbf0e + src/mistralai/client/models/basetaskstatus.py: + id: 7b381554d5c7 + last_write_checksum: sha1:8124cb3871ff7565865cd8b06181b5b0a8eb3428 + pristine_git_object: ff2f1c63a6dceceeabe6b8c6bc8bf1a7c64c70a0 src/mistralai/client/models/batcherror.py: id: 1563e2a576ec last_write_checksum: sha1:51c9e9a4d306c2de45dc0879ade62daed3fc2972 @@ -1877,10 +2586,18 @@ trackedFiles: id: 6f36819eeb46 last_write_checksum: sha1:b2a71163e37a9483e172dc13b6320749bee38f2f pristine_git_object: 911a9a0554b9b8cb6dedcb3a86a06c39890b875e + src/mistralai/client/models/blobresourcecontents.py: + id: fa924bc295ad + last_write_checksum: sha1:2d0d6f84e274e544e2d1535513726f547fc2d1b7 + pristine_git_object: c2a55f95a156006e86f5f945c70bee8025e38fea src/mistralai/client/models/builtinconnectors.py: id: 2d276ce938dc last_write_checksum: sha1:4ceb3182009b6535c07d652ccf46661b553b6272 pristine_git_object: ecf60d3c1a83028d9cf755d4c9d5459f6b56e72a + src/mistralai/client/models/campaign.py: + id: c91d862fb405 + last_write_checksum: sha1:def484a2eefa9be72db2bfa63d93201e2a07f554 + pristine_git_object: a52bfcb90d6d8d29f4e23f71c5e95564bf3c95ec src/mistralai/client/models/chatclassificationrequest.py: id: afd9cdc71834 last_write_checksum: sha1:a29088359142ebd6409f45569168b2096014119e @@ -1889,6 +2606,14 @@ trackedFiles: id: 7e6a512f6a04 last_write_checksum: sha1:de0281a258140f081012b303e3c14e0b42acdf63 pristine_git_object: 2c515f6e9a290ebab43bae41e07493e4b99afe8f + src/mistralai/client/models/chatcompletionevent.py: + id: d85484d0205e + last_write_checksum: sha1:b709e6f710b62fee646eb7d12b24d69125522088 + pristine_git_object: 86253f5d97139c8b9043b9d9f2a71aba53b961c0 + src/mistralai/client/models/chatcompletioneventpreview.py: + id: 1cd843828e99 + last_write_checksum: sha1:f42767c2d344bc9cd9431acd7b911c81cafa9bc7 + pristine_git_object: e7fef9d08ca3d8ea1fa1f63c5847a5f8a6d74201 src/mistralai/client/models/chatcompletionrequest.py: id: 9979805d8c38 last_write_checksum: sha1:1f0390718ab06126a05e06797ef6af310ccab543 @@ -1905,6 +2630,10 @@ trackedFiles: id: 057aecb07275 last_write_checksum: sha1:7677494c0e36ccbc201384cb587abeb852a1a924 pristine_git_object: 228e7d26b8b172c3e11f01d4f260bf6e5195b318 + src/mistralai/client/models/chattranscriptionevent.py: + id: 8ca679b2c39a + last_write_checksum: sha1:b49bd47918ea1f954f3c789c8ea0579ec893c49f + pristine_git_object: b23adf744944631e00f376c3dadf6f17a24a7df0 src/mistralai/client/models/checkpoint.py: id: 1a530d3674d8 last_write_checksum: sha1:418f08c61b64fa7ffb053c6f5912e211acab1330 @@ -1985,10 +2714,62 @@ trackedFiles: id: be202ea0d5a6 last_write_checksum: sha1:1a797019770795edcd911ff5b3580bedb83c05f4 pristine_git_object: ca50a7ad521b46f275dd3a39c98911f13ee527c8 + src/mistralai/client/models/connector.py: + id: 1a4facac922d + last_write_checksum: sha1:4acd40b285482f549ac76ef120aa32cd4ec2f171 + pristine_git_object: 967e454f870a44c9354a8ab1493800daeff35cac + src/mistralai/client/models/connector_call_tool_v1op.py: + id: 7948899b3068 + last_write_checksum: sha1:09dfd8f2d560f33fb12cba74cadcd505831d2389 + pristine_git_object: df5783d0a78128863d32c86230e6413bdf80ead9 + src/mistralai/client/models/connector_delete_v1op.py: + id: a377930b1435 + last_write_checksum: sha1:2c9a501ab2e2b05829f2e3fd838f88a610d56781 + pristine_git_object: 74134361aeeaf83af57d7edc7d9ae26ab09d96fb + src/mistralai/client/models/connector_get_v1op.py: + id: 73ca3a446dcc + last_write_checksum: sha1:f3a7480774e8f826ba8bc509a0dce280a18ee855 + pristine_git_object: 5770ee096fe7924384a8cd31ed1e40a11eff3e35 + src/mistralai/client/models/connector_list_v1op.py: + id: 5ec0889995f5 + last_write_checksum: sha1:4460df62af1ad834656484e9d34dc3f612fa4d84 + pristine_git_object: a89da710092dbcd14522b0190ef9dbefe49ff76a + src/mistralai/client/models/connector_update_v1op.py: + id: 6f884d18ac56 + last_write_checksum: sha1:be8044958ac76ecfc486dc5cb5f0876e595dcc38 + pristine_git_object: 6b00d0b0ca6d83b4ac1fce1ab204ac09ca5ceeb5 + src/mistralai/client/models/connectorcalltoolrequest.py: + id: 7dc7ec295301 + last_write_checksum: sha1:59caca0ee1ec5c43b8191087b1c202ea3ba3e2bc + pristine_git_object: 4e9a4d45dc4eb242ceda1c31736901e451dec862 + src/mistralai/client/models/connectorsqueryfilters.py: + id: 3b9fc81aa726 + last_write_checksum: sha1:dc90b518462978d5bdba53a1cfcea59f8afe8612 + pristine_git_object: 1aea18b01273aa5b1321e138f1b79e5c50edee07 + src/mistralai/client/models/connectortool.py: + id: 41ca596b44f8 + last_write_checksum: sha1:ead8ba36ca4a3b7c0edd06d38aff4a30aaddded2 + pristine_git_object: 26a8b7b75cb8283b00b379902b717c9e7d6e6329 + src/mistralai/client/models/connectortoolcallmetadata.py: + id: 2d27189e58e6 + last_write_checksum: sha1:3faa513be03ab9be9d9081976dab971e0dc2f231 + pristine_git_object: 9f3be6c4e6604f7ed2d2deeaf78569b61697ad45 + src/mistralai/client/models/connectortoolcallresponse.py: + id: c4f7a932bd2e + last_write_checksum: sha1:2666a1fb1c6b6a67f9e3147ab84a0ad7340740fe + pristine_git_object: 47071a944e80a79053a14dc57f7a3dee28a8d173 + src/mistralai/client/models/connectortoollocale.py: + id: 247ebe411537 + last_write_checksum: sha1:e68370e4bcf2a1f5f2f9036416fc889821668244 + pristine_git_object: ec1359c8ccf883bff066fd1500503f5e28913e05 + src/mistralai/client/models/connectortoolresultmetadata.py: + id: 86730e16aa67 + last_write_checksum: sha1:0b49abdb7cfdf461d225387ef03ed5e2413fdb2b + pristine_git_object: 520fb99b6430b317c326bdd9be86b63bd3822bd1 src/mistralai/client/models/contentchunk.py: id: c007f5ee0325 - last_write_checksum: sha1:b921b03b4c1e300b0e3f51ea9eadd4d7c4b7a0ea - pristine_git_object: e3de7591a089a3739af17108cecdc2d4240f10bf + last_write_checksum: sha1:872274333d03c88665b115a2eb99c2650ac7e3bb + pristine_git_object: 6d521e1a49c4ed1cdf6a3a3b85bfb17aa631f994 src/mistralai/client/models/conversationappendrequest.py: id: 81ce529e0865 last_write_checksum: sha1:bdae860241893ec3ab3f22bd57c45dede2927da3 @@ -2013,66 +2794,134 @@ trackedFiles: id: 011c39501c26 last_write_checksum: sha1:95e3abe55199f2118e6fb7e5d8520af6a929449a pristine_git_object: 84664b62337dcdc408bb01e0494fa598e6a86832 + src/mistralai/client/models/conversationpayload.py: + id: 4581218c84c1 + last_write_checksum: sha1:51c3787091d7d2a09a0625d854a74db3c46d6a1a + pristine_git_object: 6bfc5894b329531c7845d0457209a0d5c35c0145 src/mistralai/client/models/conversationrequest.py: id: 58e3ae67f149 - last_write_checksum: sha1:f7a67082e06c1789f4c6a4c56bfef5f21cce5034 - pristine_git_object: 83d599ebf984f1df2390d97dbe651881f7dee0e2 + last_write_checksum: sha1:56e87a8f6e4670392e10f392bec44a923f3e14f8 + pristine_git_object: cb5dc6a5f2d69331263c12870ff5603e703d6060 src/mistralai/client/models/conversationresponse.py: id: ad7a8472c7bf - last_write_checksum: sha1:99148d75abcb18c91ba0a801174461346508f5fb - pristine_git_object: f6c10969a931eaf1a4667b0fcff3765f57658b15 + last_write_checksum: sha1:86c11bbd5003fc1a6838636257e07404da981ca7 + pristine_git_object: 2fc8504dbc7e2157623c0a6077e18ca96a0a0f5c src/mistralai/client/models/conversationrestartrequest.py: id: 681d90d50514 - last_write_checksum: sha1:99123cee7c54f44c02b56111305af399143b4e5a - pristine_git_object: 7ae16aff4de36a91093d3021b66283e657b00897 + last_write_checksum: sha1:9a015a4eb700aa97311231704da0f0eef4b4c7c3 + pristine_git_object: d5079689256f25d2d252aa3ad01ece94994b5abd src/mistralai/client/models/conversationrestartstreamrequest.py: id: 521c2b5bfb2b - last_write_checksum: sha1:abfd14652b4785c36de84a59593b55f7a6a2d613 - pristine_git_object: 0e247261d997ac3d8ff0155ba54cc4cafe9ac65a + last_write_checksum: sha1:42f885e4419853a2d5ee9766147b6c2c80cd1faf + pristine_git_object: 5cea41465634f71912cc2fdf79b63c7221939be0 + src/mistralai/client/models/conversationsource.py: + id: 24d6a0861d4b + last_write_checksum: sha1:e28921788ab1b6d2b6daf08b714bf34b2047eb6c + pristine_git_object: a3b93b61b842002fc9fbb05815b800fb1294cb52 src/mistralai/client/models/conversationstreamrequest.py: id: 58d633507527 - last_write_checksum: sha1:7dc25a12979f4082ed7d7e37584bb9c30297f196 - pristine_git_object: a20dccae1a60753ed95f59da0df78c204c19d515 - src/mistralai/client/models/conversationthinkchunk.py: - id: 77e59cde5c0f - last_write_checksum: sha1:5db067661a5d4b0c13db92ad93da1aab9e0e7a34 - pristine_git_object: e0e172e3edbe46c000e82e712c135b96a65312e9 + last_write_checksum: sha1:c349ea248a85a9a329fbf878507b32fc761c4639 + pristine_git_object: 2559a8872df6a9bf9dd088f05259c5464df19a7c src/mistralai/client/models/conversationusageinfo.py: id: 6685e3b50b50 last_write_checksum: sha1:3e0489836936a7a77fa3b41adde1eb459ecd176d pristine_git_object: 1e80f89ee4f7a3d464df2bf39990b467029e86c1 + src/mistralai/client/models/create_dataset_record_v1_observability_datasets_dataset_id_records_postop.py: + id: 1ddc53a46c74 + last_write_checksum: sha1:5df5b6676f6c115da0b1c242a6f3cc7f8c077927 + pristine_git_object: 03156fa31f51655dfd9bda21082710b1d1f42907 src/mistralai/client/models/createagentrequest.py: id: 442629bd914b - last_write_checksum: sha1:273dde9338cc1eb166ee40f4c6215f90cae908ab - pristine_git_object: 54b09880eefe348d2e003ed1b238b67cb58b8e34 + last_write_checksum: sha1:28dd507f0d965b6e4ede3fb2248681e034b2bb4a + pristine_git_object: 8acbbd1b90bce84275d5e76ca74f2a2105c65f3f src/mistralai/client/models/createbatchjobrequest.py: id: 56e24cd24e98 last_write_checksum: sha1:e648017622cd6e860cb15e5dd2b29bf9f2a00572 pristine_git_object: 9a901fefee0ea6a825274af6fd0aa5775a61c521 + src/mistralai/client/models/createcampaignrequest.py: + id: 60012b559aee + last_write_checksum: sha1:b45a2ee90bf901bec844aa20e9184ca1afba4172 + pristine_git_object: b3957df9ef369ecc1746ee3efd1b463bc10b4be7 + src/mistralai/client/models/createconnectorrequest.py: + id: 3da192d6491a + last_write_checksum: sha1:c0ea3626315e622bb5ef9207662b2c58c420d9e2 + pristine_git_object: 8fd3d3abfdb32742bc8658b9f36a1463768da16d + src/mistralai/client/models/createdatasetrecordrequest.py: + id: 9455e38a8c31 + last_write_checksum: sha1:df3e8d7425500f23101c25267738e617eeb2d467 + pristine_git_object: 6fd2bf96a28dceb4096d4e9bebe51495ef7a2103 + src/mistralai/client/models/createdatasetrequest.py: + id: 046a094d3ef9 + last_write_checksum: sha1:5c38317b544af6ca66fb895c627634d160bc3975 + pristine_git_object: ecffc52a82e3e0baca00cf94de047ff5124e3a04 src/mistralai/client/models/createfileresponse.py: id: fea5e4832dcc - last_write_checksum: sha1:b7f3ba95a09a3225eae80b53152fe2b7d3806fbe - pristine_git_object: 768212803bc3535ac8a27a9c0d48f147e3d536b7 + last_write_checksum: sha1:934437a8c7b19b2642c1979f876ee49fc23c2529 + pristine_git_object: 1f03b275c79e47051f90b1b4cd471f3d291a714f src/mistralai/client/models/createfinetuningjobrequest.py: id: c60d2a45d66b - last_write_checksum: sha1:2e8e608140860bba9ecfa9498d61cf807f96680a - pristine_git_object: e328d944ce2a71ffbec027965d31075070647dbc + last_write_checksum: sha1:0f1f4f7ad97884415de5e340e8192df9ebd2fd34 + pristine_git_object: be97bd07039197523769ac30de2c42deb8c9a7eb + src/mistralai/client/models/creategithubrepositoryrequest.py: + id: 0814afcf63bb + last_write_checksum: sha1:22826d609c93b24b5088ac945f08f4af2ab9f3ea + pristine_git_object: 61ffde27d5b5dfbbf4c57c6bc8ae825ab866c0df + src/mistralai/client/models/createjudgerequest.py: + id: e606837a626e + last_write_checksum: sha1:d5c16104089075af1b8b2fe922006aea9d74b0e3 + pristine_git_object: 7c30aa5ffde4e23ce9ebcb0d67a0e1a3c875c698 src/mistralai/client/models/createlibraryrequest.py: id: 1c489bec2f53 last_write_checksum: sha1:45fa65be82712ce99304027c88f953f0932bdae4 pristine_git_object: 58874e014275b06ce19d145aaa34a48d11ca0950 + src/mistralai/client/models/dataset.py: + id: cbf14670ee00 + last_write_checksum: sha1:4636e54c4ca7825b93d55ef1f69b640557b26467 + pristine_git_object: 2acd911d010b2274f09061fa48c6849d0f5160d2 + src/mistralai/client/models/datasetimporttask.py: + id: c20f7db9633c + last_write_checksum: sha1:b288850d2a124aae3287ede5269d741a8b644d34 + pristine_git_object: 4ef52e9ec7ecd356360035bfcded7fbc9eec4f32 + src/mistralai/client/models/datasetpreview.py: + id: 128c29db3f37 + last_write_checksum: sha1:2e659243a524db189fe496969187c40986b3e87f + pristine_git_object: 55493568244a888f16b1c17dafd5f44f2629ed58 + src/mistralai/client/models/datasetrecord.py: + id: 87ddebf2a0ce + last_write_checksum: sha1:07bbb2c5c2023511cc83ef27edc442300701b790 + pristine_git_object: 9c9f42e55ea1b26071c158b095f33815ac6b55a1 + src/mistralai/client/models/delete_campaign_v1_observability_campaigns_campaign_id_deleteop.py: + id: 3d1cd35fecc6 + last_write_checksum: sha1:276676f082fa84f14ab74502302d32361277d4c8 + pristine_git_object: f33a3747bcc06b8131e178ea86fa5256e2f156a5 + src/mistralai/client/models/delete_dataset_record_v1_observability_dataset_records_dataset_record_id_deleteop.py: + id: 66b2054bda8c + last_write_checksum: sha1:06f6736ce3d02ea899edc1e90b2f1e6af053a9bf + pristine_git_object: ffeefca659a79f77480655bc960cb8c74dc4899d + src/mistralai/client/models/delete_dataset_v1_observability_datasets_dataset_id_deleteop.py: + id: 446419cd07d2 + last_write_checksum: sha1:fc4e716afa0920b4c07fb9bf90c69c3e50a3f958 + pristine_git_object: af8697706726676862d2b27317b64ebd52510282 + src/mistralai/client/models/delete_judge_v1_observability_judges_judge_id_deleteop.py: + id: 415724e139bd + last_write_checksum: sha1:0fc88a1a4edefd593e8b2d6f18bdd77383142209 + pristine_git_object: c2f7b79701af71ecf95839c1037e50776469ea41 src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py: id: 767aba526e43 last_write_checksum: sha1:73568f2f450bf9c23aca3649372a92e1b9a2fc54 pristine_git_object: 199614f53501f34088cb112d6fe1114e1e588d8a + src/mistralai/client/models/deletedatasetrecordsrequest.py: + id: e7ef16596e54 + last_write_checksum: sha1:971e81be9064277df5dd656e912b9f319474e742 + pristine_git_object: 8c33a9634f99fcf30475545c9af175434b5f41cf src/mistralai/client/models/deletefileresponse.py: id: 3ee464763a32 last_write_checksum: sha1:2c0df66fc8c4384d50e54ac03577da3da2997cf5 pristine_git_object: ffd0e0d015e38e5f6113da036ebeba98441444f4 - src/mistralai/client/models/deletemodelout.py: - id: ef6a1671c739 - last_write_checksum: sha1:d67ac7c3fa143be40c74455c7206c94bfb5a2134 - pristine_git_object: fa0c20a419c59b8fc168c150b28d703398ea7f40 + src/mistralai/client/models/deletemodelresponse.py: + id: 8957175b7482 + last_write_checksum: sha1:486183140d7fde18c05ae8a6c59303a23bd8627a + pristine_git_object: fac884bc1d833dbaeccc130873f3fea75ef851f5 src/mistralai/client/models/deltamessage.py: id: 68f53d67a140 last_write_checksum: sha1:b18350de03a8685bea5ac52e1441415b5e58bdf4 @@ -2093,6 +2942,10 @@ trackedFiles: id: 4309807f6048 last_write_checksum: sha1:33cdaccb3a4f231730c7fa1db9f338a71e6311b2 pristine_git_object: 43444d98b8b7fb430f9c33562c35072d9c79a263 + src/mistralai/client/models/embeddedresource.py: + id: 94a23f656f72 + last_write_checksum: sha1:7984f6ed695b8df81ad70b72a27d481950453abe + pristine_git_object: 54cb17fdface0703cbe64079543bd399d7abebe0 src/mistralai/client/models/embeddingdtype.py: id: 77f9526a78df last_write_checksum: sha1:a4e2ce6d00e6d1db287a5d9f4254b0947227f337 @@ -2121,6 +2974,46 @@ trackedFiles: id: e5a68ac2dd57 last_write_checksum: sha1:8ed848fe2e74c7f18ee8f4dcba39ad1c951c16d2 pristine_git_object: c40ae2b1a1b8131a90c637e3268872b97b22683e + src/mistralai/client/models/executionconfig.py: + id: 14518c40a13b + last_write_checksum: sha1:351fb4a74622cb70969b728ac65b62ca670fc7e5 + pristine_git_object: 56b58ae369ffb75c9556ef20af79b985fe3db710 + src/mistralai/client/models/export_dataset_to_jsonl_v1_observability_datasets_dataset_id_exports_to_jsonl_getop.py: + id: 74f5f3183b64 + last_write_checksum: sha1:8d7dde90d9c55b520aa9300a25c843a0b866638b + pristine_git_object: 07b4e58fe2a7065e1f61e571355a7e6997abcb56 + src/mistralai/client/models/exportdatasetresponse.py: + id: 22cc29d258db + last_write_checksum: sha1:83a787b852101f1862f0aabc1638f68753b5d607 + pristine_git_object: f1600cac74e1d50979277d3eb6830ea5e84b69ec + src/mistralai/client/models/feedresultchatcompletioneventpreview.py: + id: 19109368b436 + last_write_checksum: sha1:7ed72c5afeaa35a68bef880405b122db6ca1b57b + pristine_git_object: e87e007e571df43ab2453a5506b19836ed23698a + src/mistralai/client/models/fetchcampaignstatusresponse.py: + id: b74b57603a4c + last_write_checksum: sha1:62b1fd7d42f79518e15abdb07b11cac817837e1e + pristine_git_object: 6004892e377a9852e406bf911bd91afe891ea7b3 + src/mistralai/client/models/fetchchatcompletionfieldoptionsresponse.py: + id: 2191cab4638b + last_write_checksum: sha1:971b32a645c582a4a2cccb5329481f29ed5e6792 + pristine_git_object: d3cbd653cc62ef4712fc41e2e64d74a12f413d17 + src/mistralai/client/models/fetchfieldoptioncountsrequest.py: + id: 2fd8caa0697d + last_write_checksum: sha1:c3798f54e192e91552d0d8004002ede779899ce9 + pristine_git_object: 19698b9b475bd281ebc5488f8d529b0111afefc3 + src/mistralai/client/models/fetchfieldoptioncountsresponse.py: + id: 16ce9431fc7b + last_write_checksum: sha1:a633398597cb0ccf0c3e36aca8566f679b527850 + pristine_git_object: 4c9f9d0f4027655c4a9e496321314241b6949296 + src/mistralai/client/models/fieldgroup.py: + id: 201353e45f89 + last_write_checksum: sha1:58c9c30558556e8b5f24f8b342d55e998d6154d9 + pristine_git_object: 07f9e7c7bb798338a593e59c737398b3595574e1 + src/mistralai/client/models/fieldoptioncountitem.py: + id: 5b707a343930 + last_write_checksum: sha1:20f2d847c42591c222d06ac5fce04d75ad9d90d8 + pristine_git_object: 0b50c148c524f59fbc02d2b22ee961dc4f7a2d6f src/mistralai/client/models/file.py: id: f972c39edfcf last_write_checksum: sha1:609381a40a4bfdda2e7e750a848cd2bb38d6ac0f @@ -2155,12 +3048,28 @@ trackedFiles: pristine_git_object: 5f8de05f1bba07517dc2ee33a4f05122503b54b5 src/mistralai/client/models/files_api_routes_upload_fileop.py: id: f13b84de6fa7 - last_write_checksum: sha1:d38a86b9e7d338278e14c68756654d85bc330070 - pristine_git_object: 54ff4e4951a58e13993be0f5d2c16b0cb11c0978 + last_write_checksum: sha1:de49ee09350784e9d5c7adcafdfbf86c9ad7d659 + pristine_git_object: b66836391c8bc43e8535688c5829ad9cab04af49 src/mistralai/client/models/fileschema.py: id: 19cde41ca32a - last_write_checksum: sha1:0b3acb889a2c70998da4076e2f4eef3698e8b117 - pristine_git_object: e99066a9eb19daebcf29f356225635a297c444e1 + last_write_checksum: sha1:f65bf25e367c4e9be2be84af9b41e9d9c6719d0e + pristine_git_object: 11caa083108941befa3bc1e499bbcbf578811884 + src/mistralai/client/models/filevisibility.py: + id: 11a670fa3b71 + last_write_checksum: sha1:8d44761845eea1d9bb239864863fc058e7c09bf2 + pristine_git_object: 56cfe8810059e7fcb0eae8f50dcdc1f91e2db157 + src/mistralai/client/models/filtercondition.py: + id: ba62f90873c5 + last_write_checksum: sha1:f8a602ea82374e57ba80e3e1ad1eff572dcf2936 + pristine_git_object: fe62d6ddc1187e1177ac834a85bd4e1b6a86ee52 + src/mistralai/client/models/filtergroup.py: + id: dbc0c34fbc2f + last_write_checksum: sha1:074e2088caee0c3af4698927d840f5b00123074d + pristine_git_object: 06616a010b984e17c2a73aa6164120aa1a2ab5bf + src/mistralai/client/models/filterpayload.py: + id: 56757b849f7a + last_write_checksum: sha1:4ef55aaa20ccfc57cad906558886d0920220ffcd + pristine_git_object: 188375f1070630f46f3950271322fb9bc1b64fed src/mistralai/client/models/fimcompletionrequest.py: id: cf3558adc3ab last_write_checksum: sha1:20bca1f6a0ab6e84f48b6e332f0c3242da84ae45 @@ -2221,10 +3130,78 @@ trackedFiles: id: 2e9ef5800117 last_write_checksum: sha1:bce744d77a3dac92d4776a37be497311674bdc7d pristine_git_object: eae872643c85115a825c2feda11d9a6c12a06b99 + src/mistralai/client/models/get_campaign_by_id_v1_observability_campaigns_campaign_id_getop.py: + id: "288520184035" + last_write_checksum: sha1:ba2f5a6ef8e1f7a2e859f014510ea4fffe4fd7ea + pristine_git_object: a8446737ead106f400900bc6b73d57db306bb47f + src/mistralai/client/models/get_campaign_selected_events_v1_observability_campaigns_campaign_id_selected_events_getop.py: + id: 270800e2c264 + last_write_checksum: sha1:ceea6d622e9c62e38241a4819b771e78bf553b46 + pristine_git_object: e1b04950b7d7d5b738ce0244cbb04261fd7ee4b3 + src/mistralai/client/models/get_campaign_status_by_id_v1_observability_campaigns_campaign_id_status_getop.py: + id: 853a43ee6b98 + last_write_checksum: sha1:9e3fd0b27e1ff30e596fb2a81b0770cb6afbcf39 + pristine_git_object: 57ac44905fd7db69e96533092c6d6e8bbe0eb083 + src/mistralai/client/models/get_campaigns_v1_observability_campaigns_getop.py: + id: 598a7340fc98 + last_write_checksum: sha1:ca0f08deb402ee817dafbcd3bf7b05b9d3b7c5a3 + pristine_git_object: a6a329c729b68f11c211f570beaab63c539fc257 + src/mistralai/client/models/get_chat_completion_event_v1_observability_chat_completion_events_event_id_getop.py: + id: 98aff68bc7c7 + last_write_checksum: sha1:dcdfebf2d374e5bbaa61e34c985d100b4a736581 + pristine_git_object: f39b0f562ee8a75e611f6ad2929c43c83c230e99 + src/mistralai/client/models/get_chat_completion_events_v1_observability_chat_completion_events_search_postop.py: + id: 36957d0f73aa + last_write_checksum: sha1:cfd5144c778defd849d029ad13625e4674660a69 + pristine_git_object: 5a8b7450a685ade70cc728dc269a0f2577ba600d + ? src/mistralai/client/models/get_chat_completion_field_options_counts_v1_observability_chat_completion_fields_field_name_options_counts_postop.py + : id: 0b1bd06b24af + last_write_checksum: sha1:e3489181ee8230c478e5785e93c4f5270950fd33 + pristine_git_object: f579260de403280d3768115d185d60090203a721 + ? src/mistralai/client/models/get_chat_completion_field_options_v1_observability_chat_completion_fields_field_name_options_getop.py + : id: 4fb7f3c0e51b + last_write_checksum: sha1:534ea0a18c71be6c04c64817c4e2029e6a8b83a8 + pristine_git_object: c044bb92fbc871852bb358ade46f1157d01b7e65 + src/mistralai/client/models/get_dataset_by_id_v1_observability_datasets_dataset_id_getop.py: + id: cfd3282e7f33 + last_write_checksum: sha1:5f3de85d18b2cbd085a01946ac1c193d41e1ab71 + pristine_git_object: 0259485d3d2d36f1042d3807abf38f4b122b9b5e + src/mistralai/client/models/get_dataset_import_task_v1_observability_datasets_dataset_id_tasks_task_id_getop.py: + id: b45f77cb328c + last_write_checksum: sha1:70df627813ed0c25aa8d4d2d51a3861b942a014c + pristine_git_object: cf900d0e4ef6eff4114c5e7737d03d0ab0e07d6f + src/mistralai/client/models/get_dataset_import_tasks_v1_observability_datasets_dataset_id_tasks_getop.py: + id: 07ece48f664d + last_write_checksum: sha1:4cfeaf15f32193c8278759bf0274f5e4426ff057 + pristine_git_object: 43e0dc5b2f37ef1624547cd6bcc63e3780837629 + src/mistralai/client/models/get_dataset_record_v1_observability_dataset_records_dataset_record_id_getop.py: + id: 6ea6a0dab32f + last_write_checksum: sha1:8a8027a201a49fa1fe6717a1dbae4a6c242b8dbe + pristine_git_object: 0cb239dfd04d98488b8ce7ee143bb3cc589326e8 + src/mistralai/client/models/get_dataset_records_v1_observability_datasets_dataset_id_records_getop.py: + id: 77967c965aea + last_write_checksum: sha1:69e3467aa6225ce25c1f008649b683c43f6a34b8 + pristine_git_object: 7617a5d204f4f2fa66610ca711b803b1b5fbc0ad + src/mistralai/client/models/get_datasets_v1_observability_datasets_getop.py: + id: 3e4f4e2447ac + last_write_checksum: sha1:dc0305b848582ea310b935c15da10f044a832978 + pristine_git_object: ea52dd5180f5580cc5b94ecdbaa5ef67d89e5621 + src/mistralai/client/models/get_judge_by_id_v1_observability_judges_judge_id_getop.py: + id: 4201c3c5a891 + last_write_checksum: sha1:44c9a7d21ea727e0f849cfc5dc4a02220bcf6e74 + pristine_git_object: 375db2e9b8ff839f6ec3bdfdf06de9b7a24fbe7c + src/mistralai/client/models/get_judges_v1_observability_judges_getop.py: + id: fa04e3db7781 + last_write_checksum: sha1:bfc31bc3de6ddacf3a01cf3f5a74be3fea83b3a0 + pristine_git_object: 7228cfa13211d7bca8ccbca8f9a3137e7d6689d6 + ? src/mistralai/client/models/get_similar_chat_completion_events_v1_observability_chat_completion_events_event_id_similar_events_getop.py + : id: d651bdc06c1b + last_write_checksum: sha1:25331ac322d230cb7d9fc2a6aff2d7db561fdf2f + pristine_git_object: 7689415dd70da6eec8d0b416cbf020c4b9adeecf src/mistralai/client/models/getfileresponse.py: id: 81919086e371 - last_write_checksum: sha1:fc0232e54c0de355058c5bd82e424953b1659b56 - pristine_git_object: f625c153799dcd38e4990504d48371112b65cd15 + last_write_checksum: sha1:5a70dc35ee1dc2360eacda5e0dbe08b346f8b480 + pristine_git_object: 82280899020739d61aca44ef731d175e6a66f325 src/mistralai/client/models/getsignedurlresponse.py: id: cee4e4197372 last_write_checksum: sha1:ab9adbc06e7f02e791dc549ad1850ce1b1a250a7 @@ -2233,10 +3210,14 @@ trackedFiles: id: 4bc83ce18378 last_write_checksum: sha1:21aa04bc426158ccbe1ded3bc65b46e6869e897d pristine_git_object: 84b01078c2192de5d6668a6943d416a2ff30db5f - src/mistralai/client/models/githubrepositoryin.py: - id: eef26fbd2876 - last_write_checksum: sha1:18bd07155fff4b99d114353fee95e6bd828aeacd - pristine_git_object: 38bcc2087630f2fd4e9e5fa149449c32e21fdb07 + src/mistralai/client/models/guardrailconfig.py: + id: c72b74412547 + last_write_checksum: sha1:0054fd2ef6b4b169d5eddd9863b893ee160a86e8 + pristine_git_object: 9af986cf73791a19aeecc6d3b9cc28326b7b45f2 + src/mistralai/client/models/imagecontent.py: + id: 3abe7faee278 + last_write_checksum: sha1:22229b74dd00ebe78ea3589964367e0785f8bcf0 + pristine_git_object: 13c3f449b20aff0da0325cb3c955c8e64aec61c6 src/mistralai/client/models/imagedetail.py: id: c1084b549abb last_write_checksum: sha1:375db5c8fa87712dc37e46d0bf72283ae6cd6400 @@ -2253,6 +3234,26 @@ trackedFiles: id: 746fde62f637 last_write_checksum: sha1:0ac388d25cae5348ffb3821706c3a8b64e716ff5 pristine_git_object: 7134b46e7428cee52eda859cb78387c99f7e1f5a + src/mistralai/client/models/importdatasetfromcampaignrequest.py: + id: ee475b85bfc7 + last_write_checksum: sha1:34367bb23d1528b87e67f36d06cc362a05a02c6b + pristine_git_object: 5db45e8ea9a0e431eb1cb28638e2f4c2342a37f6 + src/mistralai/client/models/importdatasetfromdatasetrequest.py: + id: 77aea4882ccb + last_write_checksum: sha1:89fe7ea38a0b073ec539c88dc6b0865f28caeb15 + pristine_git_object: ebf2a649f3b00f4472bd9db0c65cc1a53140cf49 + src/mistralai/client/models/importdatasetfromexplorerrequest.py: + id: 852a3cbc1631 + last_write_checksum: sha1:ee8f1a1d6ebb5b450a00e37724a9d497da22fe10 + pristine_git_object: 85df7af6a855581a03bd745105e8f8134e66aee4 + src/mistralai/client/models/importdatasetfromfilerequest.py: + id: b2882fa57029 + last_write_checksum: sha1:fb867b50ff6d9a8ab14ec32592c951f834854d32 + pristine_git_object: 9a4867765aab522a33f3846a9e2bf0162270c916 + src/mistralai/client/models/importdatasetfromplaygroundrequest.py: + id: 8d809b14b144 + last_write_checksum: sha1:3c858d0475e826d072c96a84d868e97593a399a3 + pristine_git_object: f5bd720db16c37504e1993cc6e651712551d2612 src/mistralai/client/models/inputentries.py: id: 44727997dacb last_write_checksum: sha1:9e2a776be59c5043ea4179a60ac082faf064cc3d @@ -2317,6 +3318,46 @@ trackedFiles: id: e1fc1d8a434a last_write_checksum: sha1:d01507ab0a1f6067cbc65aaba199de340ccc68aa pristine_git_object: dfababa694305c96f98ddebf2f09e448e737c855 + src/mistralai/client/models/judge.py: + id: d6adc687c2d9 + last_write_checksum: sha1:bce628dde18d918345c497037d6f2a9c2e60dda0 + pristine_git_object: 1fb1d3866bb719696e5c95d9cefc834f828c65e9 + src/mistralai/client/models/judge_chat_completion_event_v1_observability_chat_completion_events_event_id_live_judging_postop.py: + id: 98c823e7cc1b + last_write_checksum: sha1:74dd8fba663c0338a24c497949572b5e5df7e494 + pristine_git_object: a6b9c969d506481b3c3bf5c86611905f30f6ec88 + src/mistralai/client/models/judge_dataset_record_v1_observability_dataset_records_dataset_record_id_live_judging_postop.py: + id: 4749566fd16d + last_write_checksum: sha1:cc2201d706b884e41bd83333d4db37970cf7bcd5 + pristine_git_object: 4d54fa42aaad05d1deb44b16bd93a0eaa1948a1d + src/mistralai/client/models/judgechatcompletioneventrequest.py: + id: 4fad8a510f7d + last_write_checksum: sha1:eb193a8ee840c016a383eb0c33b569396222e763 + pristine_git_object: 59c68801ce1270a8058a4d775a4aa8f7b8c96e2b + src/mistralai/client/models/judgeclassificationoutput.py: + id: 683ae72d0efa + last_write_checksum: sha1:36cae71fa78777aad28bba3dacbfe931169fc5cc + pristine_git_object: aa799682d6a6cd360856525d174621a88e73f8bd + src/mistralai/client/models/judgeclassificationoutputoption.py: + id: c238f17d786b + last_write_checksum: sha1:1adbac09caf15374b52b61f257d838db2e486690 + pristine_git_object: 64fad49fa2336b0cbaa69dac7493a18593021275 + src/mistralai/client/models/judgedatasetrecordrequest.py: + id: 9ad8915328dd + last_write_checksum: sha1:c37f9c158061e4d3c6c6eda2d7ee8a5d5e7f5afa + pristine_git_object: 11499067e594adae0510334e455b38b23b58ca20 + src/mistralai/client/models/judgeoutput.py: + id: 8c8099403e62 + last_write_checksum: sha1:7eee16a1cbff3978eca638e6d74ada983244d8ec + pristine_git_object: 2224a797049eb11cd05dde1376cab7fbdb779168 + src/mistralai/client/models/judgeoutputtype.py: + id: 3f07e1eb25f9 + last_write_checksum: sha1:35de2d9f5d03caa87d4a19cccf325a9fb67f543c + pristine_git_object: 4fa99498fb6a60e9d4d8358660eb0ecd57d34178 + src/mistralai/client/models/judgeregressionoutput.py: + id: c61d451066dc + last_write_checksum: sha1:0687cabd00d205ec50e238872e57547ee4a3dc3a + pristine_git_object: 63394d8bdc8aa95fa8bfe99b39ae3863aee01ff4 src/mistralai/client/models/legacyjobmetadata.py: id: 0330b8930f65 last_write_checksum: sha1:3c2f669a05cc01227f62d6a8da1840d9c458d52f @@ -2371,8 +3412,8 @@ trackedFiles: pristine_git_object: 7a51d6053aa2cf2e6524a80487fe9549eec3dfa1 src/mistralai/client/models/libraries_share_create_v1op.py: id: feaacfd46dd3 - last_write_checksum: sha1:72e07fb60edbe1989865ba2ac90349edeb183f7e - pristine_git_object: 00ea74824b2efc4150d2e547e2eee416e5f6f2ee + last_write_checksum: sha1:cdb7e60f1aceb2c7aa54fe2b9ba5dafc2bb70995 + pristine_git_object: 9751045c01e389ad5b77891b8a311dc0a9e04e09 src/mistralai/client/models/libraries_share_delete_v1op.py: id: 7f3a679ca384 last_write_checksum: sha1:897857c11cf0c14a0a81ef122dec4395dc16c0ce @@ -2389,10 +3430,38 @@ trackedFiles: id: 028a34b08f9c last_write_checksum: sha1:65f02f963a0540385681b88c7c7fba98d0d704f4 pristine_git_object: 1953b6fbc6d7ad245ccacd9d665fb29853b00af7 + src/mistralai/client/models/list_models_v1_models_getop.py: + id: 1843a7aa68e5 + last_write_checksum: sha1:7058318c4642e630886ba60d3a6dda7ff5dd909b + pristine_git_object: 70b0ab82a26bdbc6c2ea63e6ecb2b3b3046d7eb3 src/mistralai/client/models/listbatchjobsresponse.py: id: 99d94c86a871 last_write_checksum: sha1:7530be5f80a0756527be94758e800e8118e53210 pristine_git_object: 35a348a1160dcf6d82d58c70cea07e11730359fb + src/mistralai/client/models/listcampaignselectedeventsresponse.py: + id: 8e28cb9aff1a + last_write_checksum: sha1:7ada7958f9c87fa6ae7a329fd03cd626a279ca93 + pristine_git_object: a6133eccdfd4de563b829e0e3ce99fee7f38c2c3 + src/mistralai/client/models/listcampaignsresponse.py: + id: 307695cde5c2 + last_write_checksum: sha1:bb03fc71d3c940945c928a9dd150b06edbdd34f6 + pristine_git_object: 741b1b21ee3f6c5a50e7553329d3be021f916c41 + src/mistralai/client/models/listchatcompletionfieldsresponse.py: + id: 2f8837a6a5c3 + last_write_checksum: sha1:49aca9a0bc6779464db07a229253dd505483b639 + pristine_git_object: d260463aad003c4755c3ee9125d05ef3ae5bbd4c + src/mistralai/client/models/listdatasetimporttasksresponse.py: + id: f635c1a3d02b + last_write_checksum: sha1:5df6977a3511d0f03c2a550d266a3ed6115d5a0e + pristine_git_object: 15bea396f3dfea85101efb4b8ef7fa19efbf8770 + src/mistralai/client/models/listdatasetrecordsresponse.py: + id: 13b97e8095c4 + last_write_checksum: sha1:02ed64f409f2d266685baaee0d81fc921a522303 + pristine_git_object: 2341577a0847bd02d2f458db3c47d9502dfbfd0d + src/mistralai/client/models/listdatasetsresponse.py: + id: e18de4849423 + last_write_checksum: sha1:746cbde66531f4ec9f526d28a3e4206016dc795b + pristine_git_object: a35e9a73e4e7c63fd5cbd85cb1c51c4804249254 src/mistralai/client/models/listdocumentsresponse.py: id: f593d8e66833 last_write_checksum: sha1:0d842168856056ff681b2a1c36b87df8e0d96570 @@ -2405,30 +3474,38 @@ trackedFiles: id: 118e05dbfbbd last_write_checksum: sha1:f0582740a6777039e9695d97f072b5a3c34b483e pristine_git_object: 1e434c5986bf577e2b42cca943cc6896a83d1fa2 + src/mistralai/client/models/listjudgesresponse.py: + id: db389a8abc34 + last_write_checksum: sha1:4ba57c7b7966b7eb81472d8ecc0b5ce726458360 + pristine_git_object: 0284cb99375d1205339a5de99a527c212ac2dfbd src/mistralai/client/models/listlibrariesresponse.py: id: df556a618365 last_write_checksum: sha1:55afb46b1fa797bc46574e5256cd063574c6fcbf pristine_git_object: 337fe105731d8f3ced1f8f1299ff4081b9d5bfbe - src/mistralai/client/models/listsharingout.py: - id: ee708a7ccdad - last_write_checksum: sha1:18e6501b00a566121dfd6a1ce7b0e23fef297e45 - pristine_git_object: 443ad0d6a275c1c8bae4adda3e67621b068c0412 + src/mistralai/client/models/listsharingresponse.py: + id: 487c6addf089 + last_write_checksum: sha1:cebb9e6ab7db8c067a3403211765ebfffec0190a + pristine_git_object: f3e6dc8714311989d1e6c7275c8944e228f3f0c5 + src/mistralai/client/models/mcpservericon.py: + id: a5b508a322d7 + last_write_checksum: sha1:090bcf0f2856c453d32f00fd434988300572b811 + pristine_git_object: 6068c1c4b3b205b8e7e7db0c31a9b2dfbfbe1f7e src/mistralai/client/models/messageentries.py: id: e13f9009902b last_write_checksum: sha1:43aebdc9eaecc8341298dc6b281d0d57edf4e9e6 pristine_git_object: a95098e01843fe3b4087319881967dc42c6e4fef src/mistralai/client/models/messageinputcontentchunks.py: id: 01025c12866a - last_write_checksum: sha1:6a0988d4e52aa2e9f7b09ae1e3266ecf9639c22b - pristine_git_object: 1e04ce24d62db6667129b35eb28dabcfd4135ea8 + last_write_checksum: sha1:9eab6d7734dcd4bf9da5222c1927f5f40ef45db0 + pristine_git_object: 63cf14e7fcbc7c3969220b4f07109473b246bf49 src/mistralai/client/models/messageinputentry.py: id: c0a4b5179095 last_write_checksum: sha1:b5bad18b88c0bfbbddfdafa6dc50a09e40a6ebd7 pristine_git_object: c948a13e3cc2071dd1b3d11c419ea61d51470152 src/mistralai/client/models/messageoutputcontentchunks.py: id: 2ed248515035 - last_write_checksum: sha1:dc7456e44084cba9cc6a46553fd64b1eb25f8d77 - pristine_git_object: bf455d17db16e4bc11da0ebb105a9f6ad4d63c01 + last_write_checksum: sha1:df4ef4d17ce48df271ff2b8cab297ae305aa08ec + pristine_git_object: def7a4d27cd3d1479864a1d6af19e89bd57bff70 src/mistralai/client/models/messageoutputentry.py: id: a07577d2268d last_write_checksum: sha1:38ad03422407925087835ab888c0be40bf5fa7fa @@ -2437,6 +3514,10 @@ trackedFiles: id: a2bbf63615c6 last_write_checksum: sha1:c3317ab9279c499dd7fb26f45799ca9369676ac7 pristine_git_object: d765f4fd3c4e43c37063833368e4b21cc0bfbcf2 + src/mistralai/client/models/messageresponse.py: + id: 6b388bc155dd + last_write_checksum: sha1:14ce5842084a2086505dd80eaa6200638a278000 + pristine_git_object: c8fbdff77b3464b630b0cd18f25d9179c5ff9eca src/mistralai/client/models/metric.py: id: c6a65acdd1a2 last_write_checksum: sha1:5ef7c75b278f16b412b42889ff0f2fc19d87cb7d @@ -2451,12 +3532,24 @@ trackedFiles: pristine_git_object: d9293ccc163995cfe0419d05c90fe1ae8e75cf57 src/mistralai/client/models/modelconversation.py: id: fea0a651f888 - last_write_checksum: sha1:4c1b31d95351dea877e24bd452b32d8e22edf42e - pristine_git_object: bb33d2e0e047bc075cb7ae284958b80a5b5ee657 + last_write_checksum: sha1:87bcbce66e719b06fa229edd875ff952cadb4ae9 + pristine_git_object: 74e113bf84fff58b037b1140e950c251801e347b src/mistralai/client/models/modellist.py: id: 00693c7eec60 last_write_checksum: sha1:de62fc6787f482e5df0ff0e70415f493f177b9a1 pristine_git_object: 5fd835f24cd1098a153ebfb3e958038a183d28a7 + src/mistralai/client/models/moderationllmv1action.py: + id: c95110c21e79 + last_write_checksum: sha1:53e2b0e06df3890cf9a1822dd19551adcc2f1a22 + pristine_git_object: ae3a6ee957e7e9a7a94e56a48204367e2b776a2d + src/mistralai/client/models/moderationllmv1categorythresholds.py: + id: 0c51d6766440 + last_write_checksum: sha1:17f74a67d19a59214e93c54e5cabc4132bdbee48 + pristine_git_object: 0451ba3afcbff22d1cc41b784c896419bf49e5f5 + src/mistralai/client/models/moderationllmv1config.py: + id: 483378b56394 + last_write_checksum: sha1:be0b266e84480aa46ca1c5048357a6afd22d1ddb + pristine_git_object: b34dbbd34b61990962b67e7d8d8dea18297bd7dc src/mistralai/client/models/moderationobject.py: id: 132faad0549a last_write_checksum: sha1:a8c1454a533e466216ef98dd198ae8959f51fa76 @@ -2465,6 +3558,14 @@ trackedFiles: id: 06bab279cb31 last_write_checksum: sha1:b9158e575276c1e0a510c129347b9a98c5a70567 pristine_git_object: a8a8ec3d8d8a58deb3c1f8358c6dce5a9734f89c + src/mistralai/client/models/observabilityerrorcode.py: + id: ae572b470a30 + last_write_checksum: sha1:f52abcb9e8f504fd9993679611912d3b64270452 + pristine_git_object: 99360d410f7128756a7a7f6be6772238fa15c12f + src/mistralai/client/models/observabilityerrordetail.py: + id: cb6e8a484a38 + last_write_checksum: sha1:c3c57f0c1c7de0a90900ade464caebfc0b78694b + pristine_git_object: a6236b661f20569cba1ee394ebae24219e82f2d7 src/mistralai/client/models/ocrimageobject.py: id: 685faeb41a80 last_write_checksum: sha1:13f4e4d33d8fb5b0ee842695d4cc8329bd7ca382 @@ -2495,20 +3596,72 @@ trackedFiles: pristine_git_object: 2ec1322b29d7fe5246b9ad355a4997222b37970f src/mistralai/client/models/outputcontentchunks.py: id: 9ad9741f4975 - last_write_checksum: sha1:16c43816ac7b7afd134bce1cda5bb44485d9fafe - pristine_git_object: fab7907b105cc9d9c738c5cca9c09eba9d5c4781 + last_write_checksum: sha1:afb76f3af2952c2afab5397e348ddfd6dbb56c4f + pristine_git_object: 1a115fe8b4874a6bd86719d91332cd3db6d95b46 + src/mistralai/client/models/paginatedconnectors.py: + id: 17e125b1022c + last_write_checksum: sha1:b9daf0fd7f3241b03f91e9f7edcb792b91962592 + pristine_git_object: 291da1115294da17b7ca7babc8dc2d7ef672d709 + src/mistralai/client/models/paginatedresultcampaignpreview.py: + id: 6653cba0f982 + last_write_checksum: sha1:8b3d00eec103d4ee7b27cda1e08af9656c2c49c1 + pristine_git_object: d82ee741f4673fd96baaba619f437e348fc02379 + src/mistralai/client/models/paginatedresultchatcompletioneventpreview.py: + id: 8c640682ccf9 + last_write_checksum: sha1:3b7e4de0836bccf0b3247962c17acc55a6caab35 + pristine_git_object: 97bbfea7c0901cf5487cf3a1743417a81232c254 + src/mistralai/client/models/paginatedresultdatasetimporttask.py: + id: 8fd0a8bad4e7 + last_write_checksum: sha1:f16cfea21df8209caea5b9f8e786a8793b2def50 + pristine_git_object: 9d6dcd052bbedb3e8cb618ce28db9bba1da3c54e + src/mistralai/client/models/paginatedresultdatasetpreview.py: + id: bf20489474ce + last_write_checksum: sha1:80544bb270892461e255c9697decace639c1f8ec + pristine_git_object: 51566cc5899b587d7005aadaa05314429285585f + src/mistralai/client/models/paginatedresultdatasetrecord.py: + id: 2556a91b48c4 + last_write_checksum: sha1:3e797b6e84b02e62b7db248cbc605a7c723ed370 + pristine_git_object: 1d6e15c1fb7df86cb66cf4263907b9a5ac19d8b6 + src/mistralai/client/models/paginatedresultjudgepreview.py: + id: 4d5aab2705a0 + last_write_checksum: sha1:77371c66e748507206fe1b00b228ba378e89f387 + pristine_git_object: 57dbc1e5dea2e462036ec2b5f48bf3fa18099edb src/mistralai/client/models/paginationinfo.py: id: 48851e82d67e last_write_checksum: sha1:166961e2c0f573ba0677ee803820bb944a8a5efb pristine_git_object: 2b9dab6258249f7be87e1d4a73a2502e21fe1f0d + src/mistralai/client/models/paginationresponse.py: + id: d64678967bf0 + last_write_checksum: sha1:683972d8248b032e3f30489c8aa3cdcdcc728862 + pristine_git_object: f05ee380b8a7e95f67545a61d333fe28956585ae + ? src/mistralai/client/models/post_dataset_records_from_campaign_v1_observability_datasets_dataset_id_imports_from_campaign_postop.py + : id: 3e8e390b7fa1 + last_write_checksum: sha1:7785e0e41ffed189357a2f5682a0158fbcfe80ca + pristine_git_object: 3d57251729621b291e32ae4055a756583905de38 + ? src/mistralai/client/models/post_dataset_records_from_dataset_v1_observability_datasets_dataset_id_imports_from_dataset_postop.py + : id: d396e018c804 + last_write_checksum: sha1:6b4a9b21f1443c40be7298058117d589a0ffd8b2 + pristine_git_object: aac48bd62ab8cd7d32caf4ae5fd190159e1c7580 + ? src/mistralai/client/models/post_dataset_records_from_explorer_v1_observability_datasets_dataset_id_imports_from_explorer_postop.py + : id: 046c79ed47c7 + last_write_checksum: sha1:630755741aac13b4d8fca9028e9e4bb82bfe56da + pristine_git_object: 6524c4d9c34e763fe9c3a186c352a64c51f24dc2 + src/mistralai/client/models/post_dataset_records_from_file_v1_observability_datasets_dataset_id_imports_from_file_postop.py: + id: 6e93e5363630 + last_write_checksum: sha1:e7c66567203632bddbd6d50b8f0bbd730180d4e7 + pristine_git_object: 17f90d48656e697e74195236c3a7b295d63c1a81 + ? src/mistralai/client/models/post_dataset_records_from_playground_v1_observability_datasets_dataset_id_imports_from_playground_postop.py + : id: 09334d96c26d + last_write_checksum: sha1:db69f60fa1e25cd24b958e997c8b4c635b2202ea + pristine_git_object: 7423375af779911d2842ff9736452fc19a385daf src/mistralai/client/models/prediction.py: id: 1cc842a069a5 last_write_checksum: sha1:3ee24375eb7f00cea0c9db6eebc564ce7067f295 pristine_git_object: 0c6f4182ca8140e595f601b12fbd582034257587 - src/mistralai/client/models/processingstatusout.py: - id: 3df842c4140f - last_write_checksum: sha1:d5acc98adcfc76cdc4fc26e090ecfc4d7835a438 - pristine_git_object: ed2a4f22dcffe787ce69bb9c6011a95216cf3928 + src/mistralai/client/models/processingstatus.py: + id: 28146eaecfcf + last_write_checksum: sha1:a3e3ebeae169441cf430c302c9fe278de79ce215 + pristine_git_object: 7e93308f4cf6af8f968e93f20765c41694b8fad9 src/mistralai/client/models/processstatus.py: id: "0205512146e6" last_write_checksum: sha1:22ad3d5fc80fbf3f83db61512e7bc79295c5fc91 @@ -2561,6 +3714,14 @@ trackedFiles: id: 3f2774d9e609 last_write_checksum: sha1:1ce68530a46793968f1122d29df722f0a5c9d267 pristine_git_object: fc4433cb4e657b06aa6a4c078094c2df342810e2 + src/mistralai/client/models/resourcelink.py: + id: 4251cc3c7797 + last_write_checksum: sha1:f2a70cf2edb98dc0ab7e71fc3bf6f26d28cfbba7 + pristine_git_object: 8ed5613cb6db09f80e7bd0fcb1d8b06c231390ac + src/mistralai/client/models/resourcevisibility.py: + id: b5819dd5f981 + last_write_checksum: sha1:3c7e91baa8c8a0bb2ba55a6b7c14db04350278d7 + pristine_git_object: 56f91f154594a121c988497332e5b978cee4264c src/mistralai/client/models/responsedoneevent.py: id: cf8a686bf82c last_write_checksum: sha1:144a8bf407391948946f3f5362db78a33c45ee6c @@ -2589,6 +3750,22 @@ trackedFiles: id: a9309422fed7 last_write_checksum: sha1:86a61340a647696f6c35a82d945509b1c85aa6f7 pristine_git_object: dfec7cce1e22ab607b6a9e947fa940284426086d + src/mistralai/client/models/searchchatcompletioneventidsrequest.py: + id: cabc8ef82d67 + last_write_checksum: sha1:39384c10e2da9806d984be7176a19eec16ed5482 + pristine_git_object: f84760811493458509eb34462254f6cfd09ac851 + src/mistralai/client/models/searchchatcompletioneventidsresponse.py: + id: a5f0bad3ba10 + last_write_checksum: sha1:a521e3466db8258702fe5c286698d9404cc22b8c + pristine_git_object: f4751159faff30b92a979d6f03ecbfc0f780261e + src/mistralai/client/models/searchchatcompletioneventsrequest.py: + id: a437333780bc + last_write_checksum: sha1:5147c9776bb4d6f18b1af6fd6d4bf584710357e7 + pristine_git_object: 95524c5b6f497e4d2c0c58622661469e88dd550b + src/mistralai/client/models/searchchatcompletioneventsresponse.py: + id: f96acbcd45f7 + last_write_checksum: sha1:539caf622ef9e7579fdc82e7986dcc3a2aedae53 + pristine_git_object: 8b9b10b51fe76de43895168df11e5bbcdfc29a62 src/mistralai/client/models/security.py: id: c2ca0e2a36b7 last_write_checksum: sha1:d74333517caae2a1aa58517e8e935e46913bcc66 @@ -2597,18 +3774,18 @@ trackedFiles: id: a0e2a7a16bf8 last_write_checksum: sha1:15a84d57ceeb74cfb37275f714954e42d8e9b3ba pristine_git_object: 08ffeb7e46fbbc28b7c93ef2aa4a49aff7c0d35e + src/mistralai/client/models/sharing.py: + id: 324f5ac27249 + last_write_checksum: sha1:be77302c873a36d561fb4594fcb7addac923c9c6 + pristine_git_object: d6142e839545fb6be9bbac5d53c34e333112d467 src/mistralai/client/models/sharingdelete.py: id: f5ecce372e06 last_write_checksum: sha1:247d793bd1ddc0ad35d010c17e5b32eba826e3a1 pristine_git_object: 33ccd7e71b8f65d2a9329d8632b5446ca0431d0a - src/mistralai/client/models/sharingin.py: - id: e953dda09c02 - last_write_checksum: sha1:7c2b5333c634ed7889fc907edbf89c6066db5928 - pristine_git_object: 7c1a52b049db4afbd6a06b5f39966dbec4f862ba - src/mistralai/client/models/sharingout.py: - id: 0b8804effb5c - last_write_checksum: sha1:a78e4f6bf2f49ae8250787e1680b5004563b32ac - pristine_git_object: ab3679a4cbcc2826ff2672a09e4eaf4990b5c6a9 + src/mistralai/client/models/sharingrequest.py: + id: 2439b732dfae + last_write_checksum: sha1:bb79913b3b243cd32b13ae012f0fc3fcc1b457d8 + pristine_git_object: 76424b8ee6c9cc0cff7d96649cc7695be6e3ceec src/mistralai/client/models/source.py: id: fcee60a4ea0d last_write_checksum: sha1:4d4277d75f7ce001780a069898b38afa7c8addc0 @@ -2629,10 +3806,18 @@ trackedFiles: id: 9c96fb86a9ab last_write_checksum: sha1:89cbb66753d7a3585ce58c70219a349f770909cc pristine_git_object: ac9f3137dddc15e1cd10aa6385b76510e6c23e33 + src/mistralai/client/models/textcontent.py: + id: 60805b9f7050 + last_write_checksum: sha1:23e396fda502398565dfd996b39107332584f52a + pristine_git_object: 7468b046de5d72085e804e774575519732d60742 + src/mistralai/client/models/textresourcecontents.py: + id: 58fe427f427f + last_write_checksum: sha1:56009344486b16740ba6569103ec8d20a15f9601 + pristine_git_object: c497bb4dc9177047100914169fb0e5358efa2292 src/mistralai/client/models/thinkchunk.py: id: 294bfce193a4 - last_write_checksum: sha1:9126c530e93ae7532235d4bfa3e2b202423a0f24 - pristine_git_object: 5995e6010bfb63d0ab2ded6e0f55b7dca23f769a + last_write_checksum: sha1:9107800bae26e36436296efa317e7b00197f105a + pristine_git_object: 03573f8e1d1a470ba6dc545089beb9b40cebad2a src/mistralai/client/models/timestampgranularity.py: id: 68ddf8d702ea last_write_checksum: sha1:64e7b198a75f026590e26758112651d31984076f @@ -2733,14 +3918,50 @@ trackedFiles: id: 22e2ccbb0c80 last_write_checksum: sha1:a69d8dc8636f3326eb61892b85a9b60044b457fe pristine_git_object: 5c75d30edaade853f085533da0f9f5de221b6e44 + src/mistralai/client/models/update_dataset_record_payload_v1_observability_dataset_records_dataset_record_id_payload_putop.py: + id: fa5d55a9d6cf + last_write_checksum: sha1:5cf094a6617b1900745deece2e34edfb1b040905 + pristine_git_object: d5a2bd9dabc6b10c0cfd5ac1aa6ff56b3abf803c + ? src/mistralai/client/models/update_dataset_record_properties_v1_observability_dataset_records_dataset_record_id_properties_putop.py + : id: b0af26e00bfa + last_write_checksum: sha1:5cd8dc574dd7e063fc725ab97185ffce58345087 + pristine_git_object: b828aa80dd6c0293ac783802470fbec1a01560ce + src/mistralai/client/models/update_dataset_v1_observability_datasets_dataset_id_patchop.py: + id: 7bb459765cba + last_write_checksum: sha1:56ae41144cef62f4c6384994f30dbaede3d30925 + pristine_git_object: 5a44056454e368ae54485532875e72750fe82e27 + src/mistralai/client/models/update_judge_v1_observability_judges_judge_id_putop.py: + id: a1bace5342e6 + last_write_checksum: sha1:2fd7c698f8fe16e5df54e49c7aba85e247d3f19b + pristine_git_object: 41c3a36574b60207207535a6930ae094c23a8fff src/mistralai/client/models/updateagentrequest.py: id: 914b4b2be67a - last_write_checksum: sha1:f37178288254e905ce298befbe801fa6ba63ec0e - pristine_git_object: b751ff74396ca0e74411a7a1549c6e0b4988fc49 + last_write_checksum: sha1:940fbc17f2b76bacbdc38b1ad29abeb5eda33ed9 + pristine_git_object: dbe3499ed323b995453cd06ec54b55f75d2c4241 + src/mistralai/client/models/updateconnectorrequest.py: + id: a761cd154109 + last_write_checksum: sha1:cd461734ab9df9ca0d254c69a552b74b8f63f20e + pristine_git_object: 2fe42ba8b72ab5d851fed49ddbe5d5cfeeb022bd + src/mistralai/client/models/updatedatasetrecordpayloadrequest.py: + id: bd45f357a538 + last_write_checksum: sha1:774ba70d9f417e84b546b9f4c5c85eb2876dd56d + pristine_git_object: 155ea78df17aeda6f1e7f70e4ae6d18dfdae1499 + src/mistralai/client/models/updatedatasetrecordpropertiesrequest.py: + id: c457ead40a69 + last_write_checksum: sha1:ef265db7817eb0f59ee182e327482918aefbf36c + pristine_git_object: ccba4a5c71aa8717c4fcb1af61f4f10c17b84ebd + src/mistralai/client/models/updatedatasetrequest.py: + id: bbb067caa23f + last_write_checksum: sha1:1045b7f77b20e9ede79240467927bac20b9d54f5 + pristine_git_object: 0297408982cd4204c15447204e539cef16b82a01 src/mistralai/client/models/updatedocumentrequest.py: id: a8cfda07d337 last_write_checksum: sha1:c644725ae379f22550d00b42baefb511d1cc3667 pristine_git_object: 61e696555c0654208b0d9dcd63fc475ad85297d4 + src/mistralai/client/models/updatejudgerequest.py: + id: f6ad6fb901a0 + last_write_checksum: sha1:0cc5d951aa36d1ba6cf82020d9ade4ac85bc3a94 + pristine_git_object: 04c86ab65599b96478f9b6a0cfa6ce3af1a489bb src/mistralai/client/models/updatelibraryrequest.py: id: 51bc63885337 last_write_checksum: sha1:622d6a7af58d2e86d7d2dd4e312883d11ce5a8a8 @@ -2759,8 +3980,8 @@ trackedFiles: pristine_git_object: 63e7679246a11fe8e7a3db06e382779c05c64366 src/mistralai/client/models/validationerror.py: id: 15df3c7368ab - last_write_checksum: sha1:63df5739d68f984470d4d1b8661a875201cc301d - pristine_git_object: 385714c8cb80a8afbca6d5142a2d378d0d165cf9 + last_write_checksum: sha1:feacaef605de97f7eec36e69c5dda2a10e5b75ac + pristine_git_object: d856e24c3ac25fb6d2802ca7038ab6947ac6a2da src/mistralai/client/models/wandbintegration.py: id: 4823c1e80942 last_write_checksum: sha1:cc0a7ce49756928f4d261375526a3498b9e4f05d @@ -2779,8 +4000,12 @@ trackedFiles: pristine_git_object: 6871080f6279ef42a0525c1e26368baafc98fbb7 src/mistralai/client/models_.py: id: 1d277958a843 - last_write_checksum: sha1:b9ea906a7704aa57efe5d13ac547e502d961d3b5 - pristine_git_object: a287c413ddf48bd5ff7fc0a685e05d4bcdabb6e5 + last_write_checksum: sha1:f68fc105aca375b135a00026dbbec818cd55cd73 + pristine_git_object: 7bb6dc1de1309509b5b49e743d5b5ac80cf976f3 + src/mistralai/client/observability.py: + id: 453a1d06d130 + last_write_checksum: sha1:62d4d03a08807271b404a8684b1153a739d70b05 + pristine_git_object: 4057909ebc43fc419396582fd69e62ca5135ecb3 src/mistralai/client/ocr.py: id: 2f804a12fc62 last_write_checksum: sha1:707d91582149e76a3109df8b1a58bfd44111a93d @@ -2789,6 +4014,10 @@ trackedFiles: id: d95cd1565e33 last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + src/mistralai/client/records.py: + id: 10f90c990bd8 + last_write_checksum: sha1:47018c027eb2c9d9235f399b939073e396bd52ad + pristine_git_object: ceb8de4fe3edfbd818f6002381c365ea8437ac2c src/mistralai/client/sdk.py: id: 48edbcb38d7e last_write_checksum: sha1:365709e35dc4e450a2c4931e75dcbd04568ab361 @@ -3121,7 +4350,7 @@ examples: application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "batch", "sample_type": "batch_result", "source": "upload"} userExample: requestBody: - multipart/form-data: {"file": "x-file: example.file"} + multipart/form-data: {"visibility": "workspace", "file": "x-file: example.file"} responses: "200": application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "bytes": 13000, "created_at": 1759500189, "filename": "example.file.jsonl", "purpose": "fine-tune", "sample_type": "instruct", "num_lines": 2, "mimetype": "application/jsonl", "source": "upload", "signature": "d4821d2de1917341"} @@ -4308,15 +5537,495 @@ examples: responses: "422": application/json: {} + get_chat_completion_events_v1_observability_chat_completion_events_search_post: + speakeasy-default-get-chat-completion-events-v1-observability-chat-completion-events-search-post: + parameters: + query: + page_size: 50 + requestBody: + application/json: {"search_params": {"filters": null}} + responses: + "200": + application/json: {"completion_events": {}} + "400": + application/json: {"detail": {"message": "", "error_code": "AUTH_FORBIDDEN_WORKSPACE_NOT_FOUND"}} + get_chat_completion_event_ids_v1_observability_chat_completion_events_search_ids_post: + speakeasy-default-get-chat-completion-event-ids-v1-observability-chat-completion-events-search-ids-post: + requestBody: + application/json: {"search_params": {"filters": {"field": "", "op": "lt", "value": ""}}} + responses: + "200": + application/json: {"completion_event_ids": [""]} + "400": + application/json: {"detail": {"message": "", "error_code": null}} + get_chat_completion_event_v1_observability_chat_completion_events__event_id__get: + speakeasy-default-get-chat-completion-event-v1-observability-chat-completion-events-event-id-get: + parameters: + path: + event_id: "e79bf81b-b37f-425e-9dff-071a54592e44" + responses: + "200": + application/json: {"event_id": "", "correlation_id": "", "created_at": "2025-08-04T07:31:12.646Z", "extra_fields": {}, "nb_input_tokens": 337803, "nb_output_tokens": 297257, "enabled_tools": [{"key": ""}, {"key": "", "key1": "", "key2": ""}], "request_messages": [{"key": ""}, {"key": "", "key1": "", "key2": ""}], "response_messages": [{"key": "", "key1": "", "key2": ""}], "nb_messages": 229575, "chat_transcription_events": [{"audio_url": "https://considerate-tinderbox.com/", "model": "Cruze", "response_message": {}}]} + "400": + application/json: {"detail": {"message": "", "error_code": "AUTH_FORBIDDEN_ROLE_NOT_FOUND"}} + get_similar_chat_completion_events_v1_observability_chat_completion_events__event_id__similar_events_get: + speakeasy-default-get-similar-chat-completion-events-v1-observability-chat-completion-events-event-id-similar-events-get: + parameters: + path: + event_id: "b7be6e08-d068-45fc-b77a-966232e92fd6" + responses: + "200": + application/json: {"completion_events": {}} + "400": + application/json: {"detail": {"message": "", "error_code": "EVALUATION_RUN_TRANSITION_ERROR"}} + judge_chat_completion_event_v1_observability_chat_completion_events__event_id__live_judging_post: + speakeasy-default-judge-chat-completion-event-v1-observability-chat-completion-events-event-id-live-judging-post: + parameters: + path: + event_id: "dfcd5582-1373-4de5-af51-987464da561c" + requestBody: + application/json: {"judge_definition": {"name": "", "description": "total plain self-confidence candid hungrily partial astride cruelly brr", "model_name": "", "output": {"type": "CLASSIFICATION", "options": [{"value": "", "description": "indeed insolence delightfully following"}]}, "instructions": "", "tools": []}} + responses: + "200": + application/json: {"analysis": "", "answer": ""} + "400": + application/json: {"detail": {"message": "", "error_code": "DATASET_TASK_NOT_FOUND"}} + get_chat_completion_fields_v1_observability_chat_completion_fields_get: + speakeasy-default-get-chat-completion-fields-v1-observability-chat-completion-fields-get: + responses: + "200": + application/json: {"field_definitions": [{"name": "", "label": "", "type": "FLOAT", "supported_operators": []}], "field_groups": []} + "400": + application/json: {"detail": {"message": "", "error_code": "JUDGE_NOT_FOUND"}} + get_chat_completion_field_options_v1_observability_chat_completion_fields__field_name__options_get: + speakeasy-default-get-chat-completion-field-options-v1-observability-chat-completion-fields-field-name-options-get: + parameters: + path: + field_name: "" + query: + operator: "startswith" + responses: + "200": + application/json: {} + "400": + application/json: {"detail": {"message": "", "error_code": "TEMPLATE_SYNTAX_ERROR"}} + get_chat_completion_field_options_counts_v1_observability_chat_completion_fields__field_name__options_counts_post: + ? speakeasy-default-get-chat-completion-field-options-counts-v1-observability-chat-completion-fields-field-name-options-counts-post + : parameters: + path: + field_name: "" + requestBody: + application/json: {} + responses: + "200": + application/json: {"counts": []} + "400": + application/json: {"detail": {"message": "", "error_code": "DATASET_RECORD_NOT_FOUND"}} + create_judge_v1_observability_judges_post: + speakeasy-default-create-judge-v1-observability-judges-post: + requestBody: + application/json: {"name": "", "description": "border freely down whenever broadly whenever restructure catalyze after", "model_name": "", "output": {"type": "REGRESSION", "min": 0, "min_description": "", "max": 1, "max_description": ""}, "instructions": "", "tools": ["", ""]} + responses: + "201": + application/json: {"id": "64b223b0-e106-43bb-8cc9-77162463295b", "created_at": "2026-03-11T13:01:21.858Z", "updated_at": "2024-02-24T09:21:22.966Z", "deleted_at": "2026-02-16T14:45:07.865Z", "owner_id": "cf47c12a-77ac-4d6f-aa5e-9c791792f5bb", "workspace_id": "268fd016-1a79-4091-9ec2-104ca1ea5adf", "name": "", "description": "typify intervention seemingly", "model_name": "", "output": {"type": "CLASSIFICATION", "options": [{"value": "", "description": "masquerade readily fen"}]}, "instructions": "", "tools": [""]} + "400": + application/json: {"detail": {"message": "", "error_code": "DATASET_RECORD_NOT_FOUND"}} + get_judges_v1_observability_judges_get: + speakeasy-default-get-judges-v1-observability-judges-get: + parameters: + query: + page_size: 50 + page: 1 + responses: + "200": + application/json: {"judges": {"count": 165283}} + "400": + application/json: {"detail": {"message": "", "error_code": "CAMPAIGN_NOT_FOUND"}} + get_judge_by_id_v1_observability_judges__judge_id__get: + speakeasy-default-get-judge-by-id-v1-observability-judges-judge-id-get: + parameters: + path: + judge_id: "19ae5cf8-2ade-4a40-b9d2-730aaebe8429" + responses: + "200": + application/json: {"id": "88a908a1-116d-4887-b5ba-d57fd96451d3", "created_at": "2026-01-20T19:57:20.448Z", "updated_at": "2025-04-15T17:15:10.529Z", "deleted_at": "2026-05-08T22:17:53.176Z", "owner_id": "0fc619ee-fd86-480d-98da-9dc726fb311a", "workspace_id": "aa9b970b-e897-462a-8a4c-60902a0e81d3", "name": "", "description": "catalyst honestly brr underachieve per until free", "model_name": "", "output": {"type": "CLASSIFICATION", "options": [{"value": "", "description": "irritably for likewise wise linseed unethically handy"}]}, "instructions": "", "tools": ["", ""]} + "400": + application/json: {"detail": {"message": "", "error_code": "SEARCH_FILTER_TO_SQL_CONVERSION_ERROR"}} + delete_judge_v1_observability_judges__judge_id__delete: + speakeasy-default-delete-judge-v1-observability-judges-judge-id-delete: + parameters: + path: + judge_id: "80deecde-e10f-409c-a13a-c242d3760f6e" + responses: + "400": + application/json: {"detail": {"message": "", "error_code": "EVALUATION_RECORD_NOT_FOUND"}} + update_judge_v1_observability_judges__judge_id__put: + speakeasy-default-update-judge-v1-observability-judges-judge-id-put: + parameters: + path: + judge_id: "9f28c7db-1fb7-4e1c-b137-d7039561ddb7" + requestBody: + application/json: {"name": "", "description": "noteworthy and unless", "model_name": "", "output": {"type": "REGRESSION", "min": 0, "min_description": "", "max": 1, "max_description": ""}, "instructions": "", "tools": []} + responses: + "400": + application/json: {"detail": {"message": "", "error_code": "AUTH_FORBIDDEN_WORKSPACE_NOT_FOUND"}} + create_campaign_v1_observability_campaigns_post: + speakeasy-default-create-campaign-v1-observability-campaigns-post: + requestBody: + application/json: {"search_params": {"filters": {"field": "", "op": "lt", "value": ""}}, "judge_id": "9b501b9f-3525-44a7-a51a-5352679be9ed", "name": "", "description": "shakily triangular scotch requirement whether once oh", "max_nb_events": 232889} + responses: + "201": + application/json: {"id": "2c13806e-5da0-449e-aa8c-2efe20d619f9", "created_at": "2026-03-23T12:46:40.529Z", "updated_at": "2024-12-09T10:33:58.511Z", "deleted_at": "2024-03-23T06:04:33.659Z", "name": "", "owner_id": "92d6e73a-fda5-475c-8483-6f2477d27cb8", "workspace_id": "731e5ff9-f8e6-4f2e-9ec8-fa28e59f044e", "description": "artistic closely resolve yet untimely which", "max_nb_events": 562269, "search_params": {"filters": {"field": "", "op": "lt", "value": ""}}, "judge": {"id": "a4291d77-2586-4ec3-874f-fde6b8a25ea6", "created_at": "2024-10-20T09:21:44.933Z", "updated_at": "2026-12-21T16:40:13.059Z", "deleted_at": "2025-11-13T11:08:56.633Z", "owner_id": "f054cff6-2a46-43c8-bf5f-34726c713700", "workspace_id": "5141df7d-8618-4c38-971a-fbae043ac71a", "name": "", "description": "and brr striking save give", "model_name": "", "output": {"type": "CLASSIFICATION", "options": []}, "instructions": "", "tools": [""]}} + "400": + application/json: {"detail": {"message": "", "error_code": "SEARCH_FILTER_TO_SQL_CONVERSION_ERROR"}} + get_campaigns_v1_observability_campaigns_get: + speakeasy-default-get-campaigns-v1-observability-campaigns-get: + parameters: + query: + page_size: 50 + page: 1 + responses: + "200": + application/json: {"campaigns": {"count": 267257}} + "400": + application/json: {"detail": {"message": "", "error_code": "JUDGE_ALREADY_HAS_NEW_VERSION"}} + get_campaign_by_id_v1_observability_campaigns__campaign_id__get: + speakeasy-default-get-campaign-by-id-v1-observability-campaigns-campaign-id-get: + parameters: + path: + campaign_id: "fd7945d6-00e2-4852-9054-bcbb968d7f98" + responses: + "200": + application/json: {"id": "471e682e-fdeb-4e35-9d85-e02663dac589", "created_at": "2024-09-14T10:06:32.327Z", "updated_at": "2025-10-10T18:13:43.173Z", "deleted_at": "2026-05-08T12:03:39.691Z", "name": "", "owner_id": "86cda873-2f96-4f74-bcd0-92e6c505be69", "workspace_id": "4bfb0db9-d23f-432b-a2f3-1e36f1fb077c", "description": "event yet clearly yummy plus institute catalog", "max_nb_events": 567256, "search_params": {"filters": {}}, "judge": {"id": "1242c0b2-6c6b-463c-9e64-53616f48a9cd", "created_at": "2026-06-13T02:11:55.473Z", "updated_at": "2024-12-08T02:12:43.788Z", "deleted_at": "2025-05-03T05:53:54.100Z", "owner_id": "5a2445c5-eb01-490e-8653-b35964e59b07", "workspace_id": "308e0ead-b563-4544-9254-9c9ac1f4f270", "name": "", "description": "regularly carnival besides lone yippee woot softly besides even", "model_name": "", "output": {"type": "REGRESSION", "min": 0, "min_description": "", "max": 1, "max_description": ""}, "instructions": "", "tools": [""]}} + "400": + application/json: {"detail": {"message": "", "error_code": null}} + delete_campaign_v1_observability_campaigns__campaign_id__delete: + speakeasy-default-delete-campaign-v1-observability-campaigns-campaign-id-delete: + parameters: + path: + campaign_id: "90e07b45-8cf7-4081-8558-a786779e039d" + responses: + "400": + application/json: {"detail": {"message": "", "error_code": "JUDGE_MISTRAL_API_TIMEOUT"}} + get_campaign_status_by_id_v1_observability_campaigns__campaign_id__status_get: + speakeasy-default-get-campaign-status-by-id-v1-observability-campaigns-campaign-id-status-get: + parameters: + path: + campaign_id: "4b1dd9a5-8dc9-48e1-bd11-29443e959902" + responses: + "200": + application/json: {"status": "TIMED_OUT"} + "400": + application/json: {"detail": {"message": "", "error_code": "DATABASE_UNAVAILABLE"}} + get_campaign_selected_events_v1_observability_campaigns__campaign_id__selected_events_get: + speakeasy-default-get-campaign-selected-events-v1-observability-campaigns-campaign-id-selected-events-get: + parameters: + path: + campaign_id: "305b5e46-a650-4d8a-8b5b-d23ef90ec831" + query: + page_size: 50 + page: 1 + responses: + "200": + application/json: {"completion_events": {"count": 538671}} + "400": + application/json: {"detail": {"message": "", "error_code": null}} + create_dataset_v1_observability_datasets_post: + speakeasy-default-create-dataset-v1-observability-datasets-post: + requestBody: + application/json: {"name": "", "description": "citizen whoever sustenance necessary vibrant openly"} + responses: + "201": + application/json: {"id": "e86e57b5-7c22-4325-bd9a-1aa080190810", "created_at": "2026-02-21T13:59:16.796Z", "updated_at": "2026-05-02T06:33:42.170Z", "deleted_at": "2024-10-04T09:38:22.311Z", "name": "", "description": "faraway stained despite obtrude er whereas rough", "owner_id": "c9748a93-c271-44dd-8be4-eae95777dcf1", "workspace_id": "1c770558-393b-49ef-a65e-6953eebd09fe"} + "400": + application/json: {"detail": {"message": "", "error_code": "AGENT_MISTRAL_API_ERROR"}} + get_datasets_v1_observability_datasets_get: + speakeasy-default-get-datasets-v1-observability-datasets-get: + parameters: + query: + page_size: 50 + page: 1 + responses: + "200": + application/json: {"datasets": {"count": 734678}} + "400": + application/json: {"detail": {"message": "", "error_code": "DATABASE_TIMEOUT"}} + get_dataset_by_id_v1_observability_datasets__dataset_id__get: + speakeasy-default-get-dataset-by-id-v1-observability-datasets-dataset-id-get: + parameters: + path: + dataset_id: "036fa362-e080-4fa5-beff-a334a70efb58" + responses: + "200": + application/json: {"id": "a4355b62-5f75-44af-86e3-398b02d79718", "created_at": "2024-09-22T05:11:33.455Z", "updated_at": "2024-08-05T11:16:32.114Z", "deleted_at": "2024-11-17T02:39:46.669Z", "name": "", "description": "brr dearly under pish brr however careless ferret", "owner_id": "3500df1e-5139-4573-90c4-c40cc1553997", "workspace_id": "98f44a49-7d9c-44e0-819c-b87b8b697b40"} + "400": + application/json: {"detail": {"message": "", "error_code": "TEMPLATE_SYNTAX_ERROR"}} + delete_dataset_v1_observability_datasets__dataset_id__delete: + speakeasy-default-delete-dataset-v1-observability-datasets-dataset-id-delete: + parameters: + path: + dataset_id: "baf961a3-bb8e-4085-89ef-de9c5d8c4e77" + responses: + "400": + application/json: {"detail": {"message": "", "error_code": "JUDGE_MISTRAL_API_ERROR"}} + update_dataset_v1_observability_datasets__dataset_id__patch: + speakeasy-default-update-dataset-v1-observability-datasets-dataset-id-patch: + parameters: + path: + dataset_id: "95be9afc-fc05-44a6-af9f-2362de1224f9" + requestBody: + application/json: {} + responses: + "200": + application/json: {"id": "321a1909-2e5b-44a4-9344-3f6730da59f3", "created_at": "2024-02-08T01:14:12.266Z", "updated_at": "2026-01-30T21:57:19.199Z", "deleted_at": "2024-09-01T12:04:00.540Z", "name": "", "description": "hm vivaciously grandiose ditch", "owner_id": "dd537278-359d-4d34-aa69-9fbf9968649e", "workspace_id": "4f07d08f-2c7c-489c-abf9-f4f9c6d4d1b5"} + "400": + application/json: {"detail": {"message": "", "error_code": "FEATURE_NOT_SUPPORTED"}} + get_dataset_records_v1_observability_datasets__dataset_id__records_get: + speakeasy-default-get-dataset-records-v1-observability-datasets-dataset-id-records-get: + parameters: + path: + dataset_id: "444d2a88-e636-4bc0-ab6c-919bedaed112" + query: + page_size: 50 + page: 1 + responses: + "200": + application/json: {"records": {"count": 428843}} + "400": + application/json: {"detail": {"message": "", "error_code": null}} + create_dataset_record_v1_observability_datasets__dataset_id__records_post: + speakeasy-default-create-dataset-record-v1-observability-datasets-dataset-id-records-post: + parameters: + path: + dataset_id: "4c54ed13-1459-44e1-8696-1a6df06f7177" + requestBody: + application/json: {"payload": {"messages": [{"key": ""}, {"key": "", "key1": ""}]}, "properties": {"key": "", "key1": "", "key2": ""}} + responses: + "201": + application/json: {"id": "dd3d2088-f021-41f6-aa26-6b86d0352e3b", "created_at": "2025-08-03T16:57:36.654Z", "updated_at": "2024-08-13T19:23:55.490Z", "deleted_at": "2026-06-12T19:29:36.465Z", "dataset_id": "fa142dc2-521b-4396-b33d-045371fdfec0", "payload": {"messages": [{"key": ""}, {"key": "", "key1": ""}]}, "properties": {"key": "", "key1": "", "key2": ""}, "source": "DIRECT_INPUT"} + "400": + application/json: {"detail": {"message": "", "error_code": "SEARCH_SERVICE_UNAVAILABLE"}} + post_dataset_records_from_campaign_v1_observability_datasets__dataset_id__imports_from_campaign_post: + speakeasy-default-post-dataset-records-from-campaign-v1-observability-datasets-dataset-id-imports-from-campaign-post: + parameters: + path: + dataset_id: "306b5f31-e31c-4e06-9220-e3008c61bf1b" + requestBody: + application/json: {"campaign_id": "71a2e42d-7414-4fe6-89cb-44a2122b6f6b"} + responses: + "202": + application/json: {"id": "db513b3b-341e-4e14-953f-13e588b8c106", "created_at": "2026-07-10T21:30:19.189Z", "updated_at": "2025-08-15T09:09:46.978Z", "deleted_at": "2025-01-04T02:06:39.691Z", "creator_id": "9c74cc15-e7f7-4c15-903f-2485fcc52a87", "dataset_id": "d16b3225-1373-4338-b5d9-032cac7fed30", "workspace_id": "bd24c970-f6df-4a69-9911-63b52ad02f60", "status": "CANCELED"} + "400": + application/json: {"detail": {"message": "", "error_code": "JUDGE_CONVERSATION_FORMAT_ERROR"}} + post_dataset_records_from_explorer_v1_observability_datasets__dataset_id__imports_from_explorer_post: + speakeasy-default-post-dataset-records-from-explorer-v1-observability-datasets-dataset-id-imports-from-explorer-post: + parameters: + path: + dataset_id: "ee1930e9-54f7-4c68-aa8a-40fe5d2a3485" + requestBody: + application/json: {"completion_event_ids": ["", "", ""]} + responses: + "202": + application/json: {"id": "8c71ef83-5f8d-4ee8-8a8d-e4d0c5abc8e2", "created_at": "2026-05-31T08:20:12.578Z", "updated_at": "2025-02-03T03:31:36.033Z", "deleted_at": null, "creator_id": "7c5e163b-8c0d-4f60-9f6d-d44369ad1803", "dataset_id": "78461345-1124-43d7-943b-91676e5fb7a4", "workspace_id": "c97e6603-92cf-4635-95cf-00423feddaff", "status": "TERMINATED"} + "400": + application/json: {"detail": {"message": "", "error_code": "SEARCH_NOT_FOUND"}} + post_dataset_records_from_file_v1_observability_datasets__dataset_id__imports_from_file_post: + speakeasy-default-post-dataset-records-from-file-v1-observability-datasets-dataset-id-imports-from-file-post: + parameters: + path: + dataset_id: "1c96c925-cc58-4529-863d-9fe66a6f1924" + requestBody: + application/json: {"file_id": ""} + responses: + "202": + application/json: {"id": "8654123f-341f-49c3-8447-98828248facd", "created_at": "2026-12-16T17:56:34.179Z", "updated_at": "2026-04-20T22:41:58.691Z", "deleted_at": null, "creator_id": "eabdcb9a-cc81-4a95-a488-996d13c5a3b4", "dataset_id": "6a4ee4af-5d20-4a2a-a33f-1e652ecab674", "workspace_id": "a92c0b3a-fb84-4231-a417-58dd87ad39a2", "status": "COMPLETED"} + "400": + application/json: {"detail": {"message": "", "error_code": "SEARCH_SERVICE_UNAVAILABLE"}} + post_dataset_records_from_playground_v1_observability_datasets__dataset_id__imports_from_playground_post: + speakeasy-default-post-dataset-records-from-playground-v1-observability-datasets-dataset-id-imports-from-playground-post: + parameters: + path: + dataset_id: "5cb42584-5fcf-4837-997a-6a67c5e6900d" + requestBody: + application/json: {"conversation_ids": []} + responses: + "202": + application/json: {"id": "2cf93c3a-cd05-4a36-b3a6-e89bb5c172d6", "created_at": "2026-11-28T19:05:01.247Z", "updated_at": "2024-12-22T22:01:15.271Z", "deleted_at": "2026-09-14T02:10:46.354Z", "creator_id": "1b3ecd64-2756-4a66-9d66-9f22de0766e0", "dataset_id": "efac04d5-91fe-4307-9995-d43f9354c865", "workspace_id": "c838e342-2ead-4e36-82a1-c36678946451", "status": "COMPLETED"} + "400": + application/json: {"detail": {"message": "", "error_code": "EVALUATION_RECORD_NOT_FOUND"}} + post_dataset_records_from_dataset_v1_observability_datasets__dataset_id__imports_from_dataset_post: + speakeasy-default-post-dataset-records-from-dataset-v1-observability-datasets-dataset-id-imports-from-dataset-post: + parameters: + path: + dataset_id: "ada96a08-d724-4e5c-9111-aaf1bdb7d588" + requestBody: + application/json: {"dataset_record_ids": ["58fe798a-537b-4c61-9efc-d1d96d5d264a", "cfa1d197-deda-456e-906b-dd84dccfcd17"]} + responses: + "202": + application/json: {"id": "b97f7456-333f-481a-b1d7-b2fd0cabb81f", "created_at": "2026-07-07T08:57:39.299Z", "updated_at": "2025-04-06T16:16:54.540Z", "deleted_at": "2026-02-08T12:22:57.154Z", "creator_id": "008cae3e-ab6c-440d-a500-e1c0c6bfafea", "dataset_id": "92ce387e-43fe-4327-b337-87400761f84f", "workspace_id": "a231f2e2-3f68-4b37-9675-5bce4be8ce71", "status": "FAILED"} + "400": + application/json: {"detail": {"message": "", "error_code": "DATABASE_UNAVAILABLE"}} + export_dataset_to_jsonl_v1_observability_datasets__dataset_id__exports_to_jsonl_get: + speakeasy-default-export-dataset-to-jsonl-v1-observability-datasets-dataset-id-exports-to-jsonl-get: + parameters: + path: + dataset_id: "d521add6-d909-4a69-a460-cb880d87b773" + responses: + "200": + application/json: {"file_url": "https://indolent-reasoning.net/"} + "400": + application/json: {"detail": {"message": "", "error_code": null}} + get_dataset_import_task_v1_observability_datasets__dataset_id__tasks__task_id__get: + speakeasy-default-get-dataset-import-task-v1-observability-datasets-dataset-id-tasks-task-id-get: + parameters: + path: + dataset_id: "b64b504e-58a2-4d52-979b-e2634b301235" + task_id: "1713cde2-dea1-410d-851e-8cea964ffa14" + responses: + "200": + application/json: {"id": "dc7499d8-7f09-4d2a-8e29-0a4d065145f5", "created_at": "2025-01-07T06:23:24.736Z", "updated_at": "2025-05-24T23:56:27.316Z", "deleted_at": "2026-05-23T10:58:23.820Z", "creator_id": "0e259d11-14ba-43d1-adfe-3d3e8a7b4e8d", "dataset_id": "58388b6a-c7f3-4124-9259-e560e7c0d1ad", "workspace_id": "9c1ec3b6-b03d-421a-824b-b1731697acd3", "status": "RUNNING"} + "400": + application/json: {"detail": {"message": "", "error_code": "DATABASE_ERROR"}} + get_dataset_import_tasks_v1_observability_datasets__dataset_id__tasks_get: + speakeasy-default-get-dataset-import-tasks-v1-observability-datasets-dataset-id-tasks-get: + parameters: + path: + dataset_id: "29903443-7f9c-42a6-9b6b-fc5cbef4191a" + query: + page_size: 50 + page: 1 + responses: + "200": + application/json: {"tasks": {"count": 578476}} + "400": + application/json: {"detail": {"message": "", "error_code": "EVALUATION_RUN_TRANSITION_IS_INVALID"}} + get_dataset_record_v1_observability_dataset_records__dataset_record_id__get: + speakeasy-default-get-dataset-record-v1-observability-dataset-records-dataset-record-id-get: + parameters: + path: + dataset_record_id: "ce995349-abbf-45c0-be75-885fc1c4b4c0" + responses: + "200": + application/json: {"id": "c96bcf85-ee78-44b5-a9dc-d8f146764c9a", "created_at": "2025-01-04T07:24:23.318Z", "updated_at": "2024-05-08T06:55:20.351Z", "deleted_at": null, "dataset_id": "6689327c-88fd-4189-8dcb-00bb54335fe3", "payload": {"messages": []}, "properties": {"key": ""}, "source": "DIRECT_INPUT"} + "400": + application/json: {"detail": {"message": "", "error_code": "DATABASE_ERROR"}} + delete_dataset_record_v1_observability_dataset_records__dataset_record_id__delete: + speakeasy-default-delete-dataset-record-v1-observability-dataset-records-dataset-record-id-delete: + parameters: + path: + dataset_record_id: "799fed99-80b4-4a9a-a15e-05352b811702" + responses: + "400": + application/json: {"detail": {"message": "", "error_code": "FEATURE_NOT_SUPPORTED"}} + delete_dataset_records_v1_observability_dataset_records_bulk_delete_post: + speakeasy-default-delete-dataset-records-v1-observability-dataset-records-bulk-delete-post: + requestBody: + application/json: {"dataset_record_ids": ["22fc78f7-e774-4ab5-b1ea-63852992ef31", "1c533b4f-882e-4bd0-9ef6-9933b825f8b1"]} + responses: + "400": + application/json: {"detail": {"message": "", "error_code": "JUDGE_CONVERSATION_FORMAT_ERROR"}} + judge_dataset_record_v1_observability_dataset_records__dataset_record_id__live_judging_post: + speakeasy-default-judge-dataset-record-v1-observability-dataset-records-dataset-record-id-live-judging-post: + parameters: + path: + dataset_record_id: "9de5d7a1-787a-45dd-b668-9f3407e76d8b" + requestBody: + application/json: {"judge_definition": {"name": "", "description": "wisely railway deceivingly arcade minion back what yowza outrun service", "model_name": "", "output": {"type": "CLASSIFICATION", "options": [{"value": "", "description": "spork excluding without retrospectivity bah next yearly"}]}, "instructions": "", "tools": [""]}} + responses: + "200": + application/json: {"analysis": "", "answer": ""} + "400": + application/json: {"detail": {"message": "", "error_code": "DATASET_TASK_NOT_FOUND"}} + update_dataset_record_payload_v1_observability_dataset_records__dataset_record_id__payload_put: + speakeasy-default-update-dataset-record-payload-v1-observability-dataset-records-dataset-record-id-payload-put: + parameters: + path: + dataset_record_id: "17506b15-748e-4e7c-9737-c97c44d04b0f" + requestBody: + application/json: {"payload": {"messages": [{"key": ""}, {}, {"key": ""}]}} + responses: + "400": + application/json: {"detail": {"message": "", "error_code": "EVALUATION_RUN_TRANSITION_IS_RUNNING_ALREADY"}} + update_dataset_record_properties_v1_observability_dataset_records__dataset_record_id__properties_put: + speakeasy-default-update-dataset-record-properties-v1-observability-dataset-records-dataset-record-id-properties-put: + parameters: + path: + dataset_record_id: "a4deefc5-0905-427e-ad15-1090ef9e216d" + requestBody: + application/json: {"properties": {"key": "", "key1": "", "key2": ""}} + responses: + "400": + application/json: {"detail": {"message": "", "error_code": null}} + connector_create_v1: + speakeasy-default-connector-create-v1: + requestBody: + application/json: {"name": "", "description": "unibody usually despite slushy wherever reward stingy from", "server": "https://royal-majority.net/"} + responses: + "201": + application/json: {"id": "3633ef72-f5c6-4354-b510-7167244672aa", "name": "", "description": "why yahoo zany potentially", "created_at": "2024-05-03T05:17:01.129Z", "modified_at": "2025-09-26T20:42:01.861Z"} + "422": + application/json: {} + connector_list_v1: + speakeasy-default-connector-list-v1: + parameters: + query: + page_size: 100 + responses: + "200": + application/json: {"items": [], "pagination": {"page_size": 600083}} + "422": + application/json: {} + connector_call_tool_v1: + speakeasy-default-connector-call-tool-v1: + parameters: + path: + tool_name: "" + connector_id_or_name: "" + requestBody: + application/json: {} + responses: + "200": + application/json: {"content": [{"type": "resource", "resource": {"uri": "https://good-certification.net", "text": ""}}]} + "422": + application/json: {} + connector_get_v1: + speakeasy-default-connector-get-v1: + parameters: + path: + connector_id_or_name: "" + query: + fetch_customer_data: false + fetch_connection_secrets: false + responses: + "200": + application/json: {"id": "f6d4a827-9530-4a4c-af7f-ac5d6264abbf", "name": "", "description": "diversity nudge pulverize offensively", "created_at": "2024-12-11T18:18:39.930Z", "modified_at": "2026-11-21T21:28:01.899Z"} + "422": + application/json: {} + connector_update_v1: + speakeasy-default-connector-update-v1: + parameters: + path: + connector_id: "81d30634-113f-4dce-a89e-7786be2d8693" + requestBody: + application/json: {} + responses: + "200": + application/json: {"id": "987e4f32-6930-41a7-8adf-6e893eddc3d9", "name": "", "description": "especially usually collaborate likewise svelte tightly very", "created_at": "2026-09-13T13:33:05.718Z", "modified_at": "2026-06-23T06:11:22.023Z"} + "422": + application/json: {} + connector_delete_v1: + speakeasy-default-connector-delete-v1: + parameters: + path: + connector_id: "5c3269fe-6a18-4216-b1fb-b093005874cd" + responses: + "200": + application/json: {"message": ""} + "422": + application/json: {} examplesVersion: 1.0.2 generatedTests: {} -releaseNotes: | - ## Python SDK Changes: - * `mistral.beta.libraries.documents.list()`: `response.data[].process_status` **Added** - * `mistral.beta.libraries.documents.upload()`: `response.process_status` **Added** - * `mistral.beta.libraries.documents.get()`: `response.process_status` **Added** - * `mistral.beta.libraries.documents.update()`: `response.process_status` **Added** - * `mistral.beta.libraries.documents.status()`: `response.process_status` **Added** +releaseNotes: "## Python SDK Changes:\n* `mistral.beta.conversations.start()`: \n * `request` **Changed** (Breaking ⚠️)\n * `response` **Changed** (Breaking ⚠️)\n * `error.detail[]` **Changed**\n* `mistral.beta.conversations.append()`: \n * `request.inputs.union(Array)[]` **Changed** (Breaking ⚠️)\n * `response` **Changed** (Breaking ⚠️)\n * `error.detail[]` **Changed**\n* `mistral.beta.conversations.get_history()`: \n * `response.entries[]` **Changed** (Breaking ⚠️)\n * `error.detail[]` **Changed**\n* `mistral.beta.conversations.get_messages()`: \n * `response.messages[]` **Changed** (Breaking ⚠️)\n * `error.detail[]` **Changed**\n* `mistral.beta.conversations.restart()`: \n * `request` **Changed** (Breaking ⚠️)\n * `response` **Changed** (Breaking ⚠️)\n * `error.detail[]` **Changed**\n* `mistral.agents.stream()`: \n * `request.messages[].union(tool).content.union(Array)[]` **Changed**\n * `response.[].data.choices[].delta.content.union(Array)[]` **Changed** (Breaking ⚠️)\n * `error.detail[]` **Changed**\n* `mistral.agents.complete()`: \n * `request.messages[]` **Changed**\n * `response.choices[].message.content.union(Array)[]` **Changed** (Breaking ⚠️)\n * `error.detail[]` **Changed**\n* `mistral.fim.stream()`: \n * `response.[].data.choices[].delta.content.union(Array)[]` **Changed** (Breaking ⚠️)\n * `error.detail[]` **Changed**\n* `mistral.fim.complete()`: \n * `response.choices[].message.content.union(Array)[]` **Changed** (Breaking ⚠️)\n * `error.detail[]` **Changed**\n* `mistral.chat.stream()`: \n * `request.messages[]` **Changed**\n * `response.[].data.choices[].delta.content.union(Array)[]` **Changed** (Breaking ⚠️)\n * `error.detail[]` **Changed**\n* `mistral.chat.complete()`: \n * `request.messages[].union(tool).content.union(Array)[]` **Changed**\n * `response.choices[].message.content.union(Array)[]` **Changed** (Breaking ⚠️)\n * `error.detail[]` **Changed**\n* `mistral.beta.conversations.restart_stream()`: \n * `request` **Changed** (Breaking ⚠️)\n * `response.[].data.union(message.output.delta).content.union(OutputContentChunks)` **Changed** (Breaking ⚠️)\n * `error.detail[]` **Changed**\n* `mistral.beta.conversations.append_stream()`: \n * `request.inputs.union(Array)[]` **Changed** (Breaking ⚠️)\n * `response.[].data.union(message.output.delta).content.union(OutputContentChunks)` **Changed** (Breaking ⚠️)\n * `error.detail[]` **Changed**\n* `mistral.beta.conversations.start_stream()`: \n * `request` **Changed** (Breaking ⚠️)\n * `response.[].data.union(message.output.delta).content.union(OutputContentChunks)` **Changed** (Breaking ⚠️)\n * `error.detail[]` **Changed**\n* `mistral.beta.connectors.delete()`: **Added**\n* `mistral.beta.conversations.delete()`: `error.detail[]` **Changed**\n* `mistral.beta.observability.judges.delete()`: **Added**\n* `mistral.beta.observability.judges.update()`: **Added**\n* `mistral.beta.observability.campaigns.create()`: **Added**\n* `mistral.beta.observability.campaigns.list()`: **Added**\n* `mistral.beta.observability.campaigns.fetch()`: **Added**\n* `mistral.beta.observability.campaigns.delete()`: **Added**\n* `mistral.beta.observability.campaigns.fetch_status()`: **Added**\n* `mistral.beta.observability.campaigns.list_events()`: **Added**\n* `mistral.beta.observability.datasets.create()`: **Added**\n* `mistral.beta.observability.datasets.list()`: **Added**\n* `mistral.beta.observability.datasets.fetch()`: **Added**\n* `mistral.beta.observability.datasets.delete()`: **Added**\n* `mistral.beta.observability.datasets.update()`: **Added**\n* `mistral.beta.observability.datasets.list_records()`: **Added**\n* `mistral.beta.observability.datasets.create_record()`: **Added**\n* `mistral.beta.observability.datasets.import_from_campaign()`: **Added**\n* `mistral.beta.observability.datasets.import_from_explorer()`: **Added**\n* `mistral.beta.observability.datasets.import_from_file()`: **Added**\n* `mistral.beta.observability.datasets.import_from_playground()`: **Added**\n* `mistral.beta.observability.datasets.import_from_dataset_records()`: **Added**\n* `mistral.beta.observability.datasets.export_to_jsonl()`: **Added**\n* `mistral.beta.observability.datasets.fetch_task()`: **Added**\n* `mistral.beta.observability.datasets.list_tasks()`: **Added**\n* `mistral.beta.observability.datasets.records.fetch()`: **Added**\n* `mistral.beta.observability.datasets.records.delete()`: **Added**\n* `mistral.beta.observability.datasets.records.bulk_delete()`: **Added**\n* `mistral.beta.observability.datasets.records.judge()`: **Added**\n* `mistral.beta.observability.datasets.records.update_payload()`: **Added**\n* `mistral.beta.observability.datasets.records.update_properties()`: **Added**\n* `mistral.beta.connectors.create()`: **Added**\n* `mistral.beta.connectors.list()`: **Added**\n* `mistral.beta.connectors.call_tool()`: **Added**\n* `mistral.beta.connectors.get()`: **Added**\n* `mistral.beta.connectors.update()`: **Added**\n* `mistral.beta.observability.judges.list()`: **Added**\n* `mistral.models.list()`: \n * `request` **Changed**\n * `error.status[422]` **Added**\n* `mistral.models.retrieve()`: `error.detail[]` **Changed**\n* `mistral.beta.observability.judges.create()`: **Added**\n* `mistral.beta.observability.chat_completion_events.fields.fetch_option_counts()`: **Added**\n* `mistral.beta.observability.chat_completion_events.fields.fetch_options()`: **Added**\n* `mistral.models.delete()`: `error.detail[]` **Changed**\n* `mistral.beta.conversations.list()`: \n * `response.[].union(ModelConversation).guardrails` **Added**\n * `error.detail[]` **Changed**\n* `mistral.beta.conversations.get()`: \n * `response.union(ModelConversation).guardrails` **Added**\n * `error.detail[]` **Changed**\n* `mistral.beta.observability.judges.fetch()`: **Added**\n* `mistral.beta.agents.create()`: \n * `request.guardrails` **Added**\n * `response.guardrails` **Added**\n * `error.detail[]` **Changed**\n* `mistral.beta.agents.list()`: \n * `response.[].guardrails` **Added**\n * `error.detail[]` **Changed**\n* `mistral.beta.agents.get()`: \n * `response.guardrails` **Added**\n * `error.detail[]` **Changed**\n* `mistral.beta.agents.update()`: \n * `request.guardrails` **Added**\n * `response.guardrails` **Added**\n * `error.detail[]` **Changed**\n* `mistral.beta.agents.delete()`: `error.detail[]` **Changed**\n* `mistral.beta.agents.update_version()`: \n * `response.guardrails` **Added**\n * `error.detail[]` **Changed**\n* `mistral.beta.agents.list_versions()`: \n * `response.[].guardrails` **Added**\n * `error.detail[]` **Changed**\n* `mistral.beta.agents.get_version()`: \n * `response.guardrails` **Added**\n * `error.detail[]` **Changed**\n* `mistral.beta.agents.create_version_alias()`: `error.detail[]` **Changed**\n* `mistral.beta.agents.list_version_aliases()`: `error.detail[]` **Changed**\n* `mistral.beta.agents.delete_version_alias()`: `error.detail[]` **Changed**\n* `mistral.beta.libraries.create()`: `error.detail[]` **Changed**\n* `mistral.beta.libraries.get()`: `error.detail[]` **Changed**\n* `mistral.beta.libraries.delete()`: `error.detail[]` **Changed**\n* `mistral.beta.libraries.update()`: `error.detail[]` **Changed**\n* `mistral.beta.libraries.documents.list()`: `error.detail[]` **Changed**\n* `mistral.beta.libraries.documents.upload()`: `error.detail[]` **Changed**\n* `mistral.beta.libraries.documents.get()`: `error.detail[]` **Changed**\n* `mistral.beta.libraries.documents.update()`: `error.detail[]` **Changed**\n* `mistral.beta.libraries.documents.delete()`: `error.detail[]` **Changed**\n* `mistral.beta.libraries.documents.text_content()`: `error.detail[]` **Changed**\n* `mistral.beta.libraries.documents.status()`: `error.detail[]` **Changed**\n* `mistral.beta.libraries.documents.get_signed_url()`: `error.detail[]` **Changed**\n* `mistral.beta.libraries.documents.extracted_text_signed_url()`: `error.detail[]` **Changed**\n* `mistral.beta.libraries.documents.reprocess()`: `error.detail[]` **Changed**\n* `mistral.beta.libraries.accesses.list()`: `error.detail[]` **Changed**\n* `mistral.beta.libraries.accesses.update_or_create()`: `error.detail[]` **Changed**\n* `mistral.beta.libraries.accesses.delete()`: `error.detail[]` **Changed**\n* `mistral.files.upload()`: \n * `request` **Changed**\n * `response` **Changed**\n* `mistral.files.list()`: `response.data[]` **Changed**\n* `mistral.files.retrieve()`: `response` **Changed**\n* `mistral.beta.observability.chat_completion_events.fields.list()`: **Added**\n* `mistral.beta.observability.chat_completion_events.judge()`: **Added**\n* `mistral.beta.observability.chat_completion_events.fetch_similar_events()`: **Added**\n* `mistral.beta.observability.chat_completion_events.fetch()`: **Added**\n* `mistral.beta.observability.chat_completion_events.search_ids()`: **Added**\n* `mistral.beta.observability.chat_completion_events.search()`: **Added**\n* `mistral.embeddings.create()`: `error.detail[]` **Changed**\n* `mistral.classifiers.moderate()`: `error.detail[]` **Changed**\n* `mistral.classifiers.moderate_chat()`: \n * `request.inputs.union(Array<>)[]` **Changed**\n * `error.detail[]` **Changed**\n* `mistral.classifiers.classify()`: `error.detail[]` **Changed**\n* `mistral.classifiers.classify_chat()`: \n * `request.input.union(Array)[].messages[].union(tool).content.union(Array)[]` **Changed**\n * `error.detail[]` **Changed**\n* `mistral.ocr.process()`: `error.detail[]` **Changed**\n" generatedFiles: - .gitattributes - .vscode/settings.json diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 1c82d91a..1205fef8 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -31,7 +31,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0rc1 + version: 2.0.0 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 0e0db8ba..15c359ef 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -16,11 +16,11 @@ sources: - speakeasy-sdk-regen-1772455561 mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:52cd52dd6272c5afe08072790b36e34de9a65c41357bab87a45cf6635dc30db0 - sourceBlobDigest: sha256:7eb63e6d0b2226456aad34b5ae9edd75cc8e015643d478c09b717852e2852065 + sourceRevisionDigest: sha256:640293da26384ecedd813123997d7946815084924b8cbd91069aea0829d28b53 + sourceBlobDigest: sha256:940626b1345a63b95ae840bd523458ea2b418767d64b864ad06ded4d38c6ea4d tags: - latest - - speakeasy-sdk-regen-1772205200 + - speakeasy-sdk-regen-1773084143 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -39,10 +39,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:52cd52dd6272c5afe08072790b36e34de9a65c41357bab87a45cf6635dc30db0 - sourceBlobDigest: sha256:7eb63e6d0b2226456aad34b5ae9edd75cc8e015643d478c09b717852e2852065 + sourceRevisionDigest: sha256:640293da26384ecedd813123997d7946815084924b8cbd91069aea0829d28b53 + sourceBlobDigest: sha256:940626b1345a63b95ae840bd523458ea2b418767d64b864ad06ded4d38c6ea4d codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:534088a1428d166f80e9669ec6bc67d277e22113c745ef8904789f0c6e6381d9 + codeSamplesRevisionDigest: sha256:57a8f791effc62a15982c2ca9560bddbf4a9d03ae9cb0171f352e73a34e7cf61 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.729.0 diff --git a/README.md b/README.md index dd98b5cc..a2289844 100644 --- a/README.md +++ b/README.md @@ -218,7 +218,7 @@ with Mistral( res = mistral.files.upload(file={ "file_name": "example.file", "content": open("example.file", "rb"), - }) + }, visibility="workspace") # Handle response print(res) @@ -243,7 +243,7 @@ async def main(): res = await mistral.files.upload_async(file={ "file_name": "example.file", "content": open("example.file", "rb"), - }) + }, visibility="workspace") # Handle response print(res) @@ -498,6 +498,15 @@ print(res.choices[0].message.content) * [list_version_aliases](docs/sdks/betaagents/README.md#list_version_aliases) - List all aliases for an agent. * [delete_version_alias](docs/sdks/betaagents/README.md#delete_version_alias) - Delete an agent version alias. +### [Beta.Connectors](docs/sdks/connectors/README.md) + +* [create](docs/sdks/connectors/README.md#create) - Create a new connector. +* [list](docs/sdks/connectors/README.md#list) - List all connectors. +* [call_tool](docs/sdks/connectors/README.md#call_tool) - Call Connector Tool +* [get](docs/sdks/connectors/README.md#get) - Get a connector. +* [update](docs/sdks/connectors/README.md#update) - Update a connector. +* [delete](docs/sdks/connectors/README.md#delete) - Delete a connector. + ### [Beta.Conversations](docs/sdks/conversations/README.md) * [start](docs/sdks/conversations/README.md#start) - Create a conversation and append entries to it. @@ -539,6 +548,64 @@ print(res.choices[0].message.content) * [extracted_text_signed_url](docs/sdks/documents/README.md#extracted_text_signed_url) - Retrieve the signed URL of text extracted from a given document. * [reprocess](docs/sdks/documents/README.md#reprocess) - Reprocess a document. +### [Beta.Observability.Campaigns](docs/sdks/campaigns/README.md) + +* [create](docs/sdks/campaigns/README.md#create) - Create and start a new campaign +* [list](docs/sdks/campaigns/README.md#list) - Get all campaigns +* [fetch](docs/sdks/campaigns/README.md#fetch) - Get campaign by id +* [delete](docs/sdks/campaigns/README.md#delete) - Delete a campaign +* [fetch_status](docs/sdks/campaigns/README.md#fetch_status) - Get campaign status by campaign id +* [list_events](docs/sdks/campaigns/README.md#list_events) - Get event ids that were selected by the given campaign + +### [Beta.Observability.ChatCompletionEvents](docs/sdks/chatcompletionevents/README.md) + +* [search](docs/sdks/chatcompletionevents/README.md#search) - Get Chat Completion Events +* [search_ids](docs/sdks/chatcompletionevents/README.md#search_ids) - Alternative to /search that returns only the IDs and that can return many IDs at once +* [fetch](docs/sdks/chatcompletionevents/README.md#fetch) - Get Chat Completion Event +* [fetch_similar_events](docs/sdks/chatcompletionevents/README.md#fetch_similar_events) - Get Similar Chat Completion Events +* [judge](docs/sdks/chatcompletionevents/README.md#judge) - Run Judge on an event based on the given options + +#### [Beta.Observability.ChatCompletionEvents.Fields](docs/sdks/fields/README.md) + +* [list](docs/sdks/fields/README.md#list) - Get Chat Completion Fields +* [fetch_options](docs/sdks/fields/README.md#fetch_options) - Get Chat Completion Field Options +* [fetch_option_counts](docs/sdks/fields/README.md#fetch_option_counts) - Get Chat Completion Field Options Counts + +### [Beta.Observability.Datasets](docs/sdks/datasets/README.md) + +* [create](docs/sdks/datasets/README.md#create) - Create a new empty dataset +* [list](docs/sdks/datasets/README.md#list) - List existing datasets +* [fetch](docs/sdks/datasets/README.md#fetch) - Get dataset by id +* [delete](docs/sdks/datasets/README.md#delete) - Delete a dataset +* [update](docs/sdks/datasets/README.md#update) - Patch dataset +* [list_records](docs/sdks/datasets/README.md#list_records) - List existing records in the dataset +* [create_record](docs/sdks/datasets/README.md#create_record) - Add a conversation to the dataset +* [import_from_campaign](docs/sdks/datasets/README.md#import_from_campaign) - Populate the dataset with a campaign +* [import_from_explorer](docs/sdks/datasets/README.md#import_from_explorer) - Populate the dataset with samples from the explorer +* [import_from_file](docs/sdks/datasets/README.md#import_from_file) - Populate the dataset with samples from an uploaded file +* [import_from_playground](docs/sdks/datasets/README.md#import_from_playground) - Populate the dataset with samples from the playground +* [import_from_dataset_records](docs/sdks/datasets/README.md#import_from_dataset_records) - Populate the dataset with samples from another dataset +* [export_to_jsonl](docs/sdks/datasets/README.md#export_to_jsonl) - Export to the Files API and retrieve presigned URL to download the resulting JSONL file +* [fetch_task](docs/sdks/datasets/README.md#fetch_task) - Get status of a dataset import task +* [list_tasks](docs/sdks/datasets/README.md#list_tasks) - List import tasks for the given dataset + +#### [Beta.Observability.Datasets.Records](docs/sdks/records/README.md) + +* [fetch](docs/sdks/records/README.md#fetch) - Get the content of a given conversation from a dataset +* [delete](docs/sdks/records/README.md#delete) - Delete a record from a dataset +* [bulk_delete](docs/sdks/records/README.md#bulk_delete) - Delete multiple records from datasets +* [judge](docs/sdks/records/README.md#judge) - Run Judge on a dataset record based on the given options +* [update_payload](docs/sdks/records/README.md#update_payload) - Update a dataset record conversation payload +* [update_properties](docs/sdks/records/README.md#update_properties) - Update conversation properties + +### [Beta.Observability.Judges](docs/sdks/judges/README.md) + +* [create](docs/sdks/judges/README.md#create) - Create a new judge +* [list](docs/sdks/judges/README.md#list) - Get judges with optional filtering and search +* [fetch](docs/sdks/judges/README.md#fetch) - Get judge by id +* [delete](docs/sdks/judges/README.md#delete) - Delete a judge +* [update](docs/sdks/judges/README.md#update) - Update a judge + ### [Chat](docs/sdks/chat/README.md) * [complete](docs/sdks/chat/README.md#complete) - Chat Completion @@ -739,7 +806,7 @@ with Mistral( res = None try: - res = mistral.models.retrieve(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") + res = mistral.models.list() # Handle response print(res) @@ -762,7 +829,7 @@ with Mistral( **Primary error:** * [`MistralError`](./src/mistralai/client/errors/mistralerror.py): The base class for HTTP error responses. -
Less common errors (6) +
Less common errors (7)
@@ -773,7 +840,8 @@ with Mistral( **Inherit from [`MistralError`](./src/mistralai/client/errors/mistralerror.py)**: -* [`HTTPValidationError`](./src/mistralai/client/errors/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 53 of 75 methods.* +* [`HTTPValidationError`](./src/mistralai/client/errors/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 60 of 121 methods.* +* [`ObservabilityError`](./src/mistralai/client/errors/observabilityerror.py): Bad Request - Invalid request parameters or data. Applicable to 40 of 121 methods.* * [`ResponseValidationError`](./src/mistralai/client/errors/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute.
diff --git a/RELEASES.md b/RELEASES.md index 1a631692..99c72716 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -388,4 +388,14 @@ Based on: ### Generated - [python v2.0.0rc1] . ### Releases -- [PyPI v2.0.0rc1] https://pypi.org/project/mistralai/2.0.0rc1 - . \ No newline at end of file +- [PyPI v2.0.0rc1] https://pypi.org/project/mistralai/2.0.0rc1 - . + +## 2026-03-09 19:22:03 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0] . +### Releases +- [PyPI v2.0.0] https://pypi.org/project/mistralai/2.0.0 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index f71bbabc..9d32240a 100644 --- a/USAGE.md +++ b/USAGE.md @@ -74,7 +74,7 @@ with Mistral( res = mistral.files.upload(file={ "file_name": "example.file", "content": open("example.file", "rb"), - }) + }, visibility="workspace") # Handle response print(res) @@ -99,7 +99,7 @@ async def main(): res = await mistral.files.upload_async(file={ "file_name": "example.file", "content": open("example.file", "rb"), - }) + }, visibility="workspace") # Handle response print(res) diff --git a/docs/errors/observabilityerror.md b/docs/errors/observabilityerror.md new file mode 100644 index 00000000..615552c3 --- /dev/null +++ b/docs/errors/observabilityerror.md @@ -0,0 +1,8 @@ +# ObservabilityError + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `detail` | [models.ObservabilityErrorDetail](../models/observabilityerrordetail.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agent.md b/docs/models/agent.md index 4de5a901..94e8b035 100644 --- a/docs/models/agent.md +++ b/docs/models/agent.md @@ -8,6 +8,7 @@ | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | | `tools` | List[[models.AgentTool](../models/agenttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `guardrails` | List[[models.GuardrailConfig](../models/guardrailconfig.md)] | :heavy_minus_sign: | N/A | | `model` | *str* | :heavy_check_mark: | N/A | | `name` | *str* | :heavy_check_mark: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/and_.md b/docs/models/and_.md new file mode 100644 index 00000000..591b7bb3 --- /dev/null +++ b/docs/models/and_.md @@ -0,0 +1,17 @@ +# And + + +## Supported Types + +### `models.FilterGroup` + +```python +value: models.FilterGroup = /* values here */ +``` + +### `models.FilterCondition` + +```python +value: models.FilterCondition = /* values here */ +``` + diff --git a/docs/models/annotations.md b/docs/models/annotations.md new file mode 100644 index 00000000..2a23157c --- /dev/null +++ b/docs/models/annotations.md @@ -0,0 +1,10 @@ +# Annotations + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `audience` | List[[models.Audience](../models/audience.md)] | :heavy_minus_sign: | N/A | +| `priority` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/answer.md b/docs/models/answer.md new file mode 100644 index 00000000..324a8e4e --- /dev/null +++ b/docs/models/answer.md @@ -0,0 +1,17 @@ +# Answer + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `float` + +```python +value: float = /* values here */ +``` + diff --git a/docs/models/audience.md b/docs/models/audience.md new file mode 100644 index 00000000..ac325076 --- /dev/null +++ b/docs/models/audience.md @@ -0,0 +1,9 @@ +# Audience + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `USER` | user | +| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/docs/models/audiocontent.md b/docs/models/audiocontent.md new file mode 100644 index 00000000..64ab6d26 --- /dev/null +++ b/docs/models/audiocontent.md @@ -0,0 +1,15 @@ +# AudioContent + +Audio content for a message. + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | *Literal["audio"]* | :heavy_check_mark: | N/A | +| `data` | *str* | :heavy_check_mark: | N/A | +| `mime_type` | *str* | :heavy_check_mark: | N/A | +| `annotations` | [OptionalNullable[models.Annotations]](../models/annotations.md) | :heavy_minus_sign: | N/A | +| `meta` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/audiourl.md b/docs/models/audiourl.md new file mode 100644 index 00000000..45ad7493 --- /dev/null +++ b/docs/models/audiourl.md @@ -0,0 +1,13 @@ +# AudioURL + +Audio URL. + +Attributes: + url: The URL of the audio file. + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `url` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/audiourlchunk.md b/docs/models/audiourlchunk.md new file mode 100644 index 00000000..56af9df9 --- /dev/null +++ b/docs/models/audiourlchunk.md @@ -0,0 +1,15 @@ +# AudioURLChunk + +Audio URL chunk. + +Attributes: + type: The type of the chunk, which is always `ChunkTypes.audio_url`. + audio_url: The URL of the audio file. + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `type` | *Literal["audio_url"]* | :heavy_check_mark: | N/A | +| `audio_url` | [models.AudioURLUnion](../models/audiourlunion.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/audiourlunion.md b/docs/models/audiourlunion.md new file mode 100644 index 00000000..96669066 --- /dev/null +++ b/docs/models/audiourlunion.md @@ -0,0 +1,17 @@ +# AudioURLUnion + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `models.AudioURL` + +```python +value: models.AudioURL = /* values here */ +``` + diff --git a/docs/models/authdata.md b/docs/models/authdata.md new file mode 100644 index 00000000..d0784e66 --- /dev/null +++ b/docs/models/authdata.md @@ -0,0 +1,9 @@ +# AuthData + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `client_id` | *str* | :heavy_check_mark: | N/A | +| `client_secret` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/basefielddefinition.md b/docs/models/basefielddefinition.md new file mode 100644 index 00000000..3f7abea9 --- /dev/null +++ b/docs/models/basefielddefinition.md @@ -0,0 +1,12 @@ +# BaseFieldDefinition + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `label` | *str* | :heavy_check_mark: | N/A | +| `type` | [models.TypeEnum](../models/typeenum.md) | :heavy_check_mark: | N/A | +| `group` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `supported_operators` | List[[models.SupportedOperator](../models/supportedoperator.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/basetaskstatus.md b/docs/models/basetaskstatus.md new file mode 100644 index 00000000..8fad1e10 --- /dev/null +++ b/docs/models/basetaskstatus.md @@ -0,0 +1,15 @@ +# BaseTaskStatus + + +## Values + +| Name | Value | +| ------------------ | ------------------ | +| `RUNNING` | RUNNING | +| `COMPLETED` | COMPLETED | +| `FAILED` | FAILED | +| `CANCELED` | CANCELED | +| `TERMINATED` | TERMINATED | +| `CONTINUED_AS_NEW` | CONTINUED_AS_NEW | +| `TIMED_OUT` | TIMED_OUT | +| `UNKNOWN` | UNKNOWN | \ No newline at end of file diff --git a/docs/models/blobresourcecontents.md b/docs/models/blobresourcecontents.md new file mode 100644 index 00000000..c862e537 --- /dev/null +++ b/docs/models/blobresourcecontents.md @@ -0,0 +1,14 @@ +# BlobResourceContents + +Binary contents of a resource. + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `uri` | *str* | :heavy_check_mark: | N/A | +| `mime_type` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `meta` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `blob` | *str* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/campaign.md b/docs/models/campaign.md new file mode 100644 index 00000000..1f2a7a36 --- /dev/null +++ b/docs/models/campaign.md @@ -0,0 +1,18 @@ +# Campaign + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `deleted_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `owner_id` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | +| `description` | *str* | :heavy_check_mark: | N/A | +| `max_nb_events` | *int* | :heavy_check_mark: | N/A | +| `search_params` | [models.FilterPayload](../models/filterpayload.md) | :heavy_check_mark: | N/A | +| `judge` | [models.Judge](../models/judge.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/chatcompletionevent.md b/docs/models/chatcompletionevent.md new file mode 100644 index 00000000..500192f6 --- /dev/null +++ b/docs/models/chatcompletionevent.md @@ -0,0 +1,18 @@ +# ChatCompletionEvent + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | +| `event_id` | *str* | :heavy_check_mark: | N/A | +| `correlation_id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `extra_fields` | Dict[str, [Nullable[models.ChatCompletionEventExtraFields]](../models/chatcompletioneventextrafields.md)] | :heavy_check_mark: | N/A | +| `nb_input_tokens` | *int* | :heavy_check_mark: | N/A | +| `nb_output_tokens` | *int* | :heavy_check_mark: | N/A | +| `enabled_tools` | List[Dict[str, *Any*]] | :heavy_check_mark: | N/A | +| `request_messages` | List[Dict[str, *Any*]] | :heavy_check_mark: | N/A | +| `response_messages` | List[Dict[str, *Any*]] | :heavy_check_mark: | N/A | +| `nb_messages` | *int* | :heavy_check_mark: | N/A | +| `chat_transcription_events` | List[[models.ChatTranscriptionEvent](../models/chattranscriptionevent.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/chatcompletioneventextrafields.md b/docs/models/chatcompletioneventextrafields.md new file mode 100644 index 00000000..c2b7f855 --- /dev/null +++ b/docs/models/chatcompletioneventextrafields.md @@ -0,0 +1,41 @@ +# ChatCompletionEventExtraFields + + +## Supported Types + +### `bool` + +```python +value: bool = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + +### `float` + +```python +value: float = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + +### `datetime` + +```python +value: datetime = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/chatcompletioneventpreview.md b/docs/models/chatcompletioneventpreview.md new file mode 100644 index 00000000..855e8ab0 --- /dev/null +++ b/docs/models/chatcompletioneventpreview.md @@ -0,0 +1,13 @@ +# ChatCompletionEventPreview + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | +| `event_id` | *str* | :heavy_check_mark: | N/A | +| `correlation_id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `extra_fields` | Dict[str, [Nullable[models.ChatCompletionEventPreviewExtraFields]](../models/chatcompletioneventpreviewextrafields.md)] | :heavy_check_mark: | N/A | +| `nb_input_tokens` | *int* | :heavy_check_mark: | N/A | +| `nb_output_tokens` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/chatcompletioneventpreviewextrafields.md b/docs/models/chatcompletioneventpreviewextrafields.md new file mode 100644 index 00000000..dd213827 --- /dev/null +++ b/docs/models/chatcompletioneventpreviewextrafields.md @@ -0,0 +1,41 @@ +# ChatCompletionEventPreviewExtraFields + + +## Supported Types + +### `bool` + +```python +value: bool = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + +### `float` + +```python +value: float = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + +### `datetime` + +```python +value: datetime = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/chattranscriptionevent.md b/docs/models/chattranscriptionevent.md new file mode 100644 index 00000000..c2a38ed5 --- /dev/null +++ b/docs/models/chattranscriptionevent.md @@ -0,0 +1,10 @@ +# ChatTranscriptionEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `audio_url` | *str* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `response_message` | Dict[str, *Any*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/connector.md b/docs/models/connector.md new file mode 100644 index 00000000..a5d6073e --- /dev/null +++ b/docs/models/connector.md @@ -0,0 +1,15 @@ +# Connector + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `modified_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `server` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `auth_type` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConnectorTool](../models/connectortool.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/connectorcalltoolrequest.md b/docs/models/connectorcalltoolrequest.md new file mode 100644 index 00000000..9ef7a351 --- /dev/null +++ b/docs/models/connectorcalltoolrequest.md @@ -0,0 +1,10 @@ +# ConnectorCallToolRequest + +Request body for calling an MCP tool. + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `arguments` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/connectorcalltoolv1request.md b/docs/models/connectorcalltoolv1request.md new file mode 100644 index 00000000..cdda08cf --- /dev/null +++ b/docs/models/connectorcalltoolv1request.md @@ -0,0 +1,10 @@ +# ConnectorCallToolV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `tool_name` | *str* | :heavy_check_mark: | N/A | +| `connector_id_or_name` | *str* | :heavy_check_mark: | N/A | +| `connector_call_tool_request` | [models.ConnectorCallToolRequest](../models/connectorcalltoolrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/connectordeletev1request.md b/docs/models/connectordeletev1request.md new file mode 100644 index 00000000..e50c7296 --- /dev/null +++ b/docs/models/connectordeletev1request.md @@ -0,0 +1,8 @@ +# ConnectorDeleteV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `connector_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/connectorgetv1request.md b/docs/models/connectorgetv1request.md new file mode 100644 index 00000000..c45148b9 --- /dev/null +++ b/docs/models/connectorgetv1request.md @@ -0,0 +1,10 @@ +# ConnectorGetV1Request + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | +| `fetch_customer_data` | *Optional[bool]* | :heavy_minus_sign: | Fetch the customer data associated with the connector (e.g. customer secrets / config). | +| `fetch_connection_secrets` | *Optional[bool]* | :heavy_minus_sign: | Fetch the general connection secrets associated with the connector. | +| `connector_id_or_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/connectorlistv1request.md b/docs/models/connectorlistv1request.md new file mode 100644 index 00000000..6b9a287e --- /dev/null +++ b/docs/models/connectorlistv1request.md @@ -0,0 +1,10 @@ +# ConnectorListV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `query_filters` | [Optional[models.ConnectorsQueryFilters]](../models/connectorsqueryfilters.md) | :heavy_minus_sign: | N/A | +| `cursor` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/connectorsqueryfilters.md b/docs/models/connectorsqueryfilters.md new file mode 100644 index 00000000..aea47e14 --- /dev/null +++ b/docs/models/connectorsqueryfilters.md @@ -0,0 +1,9 @@ +# ConnectorsQueryFilters + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `active` | *OptionalNullable[bool]* | :heavy_minus_sign: | Filter for active connectors for a given user, workspace and organization. | +| `fetch_connection_secrets` | *Optional[bool]* | :heavy_minus_sign: | Fetch connection secrets. | \ No newline at end of file diff --git a/docs/models/connectortool.md b/docs/models/connectortool.md new file mode 100644 index 00000000..af5cc03b --- /dev/null +++ b/docs/models/connectortool.md @@ -0,0 +1,18 @@ +# ConnectorTool + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *str* | :heavy_check_mark: | N/A | +| `system_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `locale` | [OptionalNullable[models.ConnectorToolLocale]](../models/connectortoollocale.md) | :heavy_minus_sign: | N/A | +| `jsonschema` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `execution_config` | [Nullable[models.ExecutionConfig]](../models/executionconfig.md) | :heavy_check_mark: | N/A | +| `visibility` | [models.ResourceVisibility](../models/resourcevisibility.md) | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `modified_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `active` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/connectortoolcallmetadata.md b/docs/models/connectortoolcallmetadata.md new file mode 100644 index 00000000..4d44a2d0 --- /dev/null +++ b/docs/models/connectortoolcallmetadata.md @@ -0,0 +1,14 @@ +# ConnectorToolCallMetadata + +Metadata wrapper for MCP tool call responses. + +Nests MCP-specific fields under `mcp_meta` to avoid collisions with other +metadata keys (e.g. `tool_call_result`) in Harmattan's streaming deltas. + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | +| `mcp_meta` | [OptionalNullable[models.ConnectorToolResultMetadata]](../models/connectortoolresultmetadata.md) | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/connectortoolcallresponse.md b/docs/models/connectortoolcallresponse.md new file mode 100644 index 00000000..1c51b9ac --- /dev/null +++ b/docs/models/connectortoolcallresponse.md @@ -0,0 +1,18 @@ +# ConnectorToolCallResponse + +Response from calling an MCP tool. + +We override mcp_types.CallToolResult because: +- Models only support `content`, not `structuredContent` at top level +- Downstream consumers (le-chat, etc.) need structuredContent/isError/_meta via metadata + +SYNC: Keep in sync with Harmattan (orchestrator) for harmonized tool result processing. + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | +| `content` | List[[models.ConnectorToolCallResponseContent](../models/connectortoolcallresponsecontent.md)] | :heavy_check_mark: | N/A | +| `metadata` | [OptionalNullable[models.ConnectorToolCallMetadata]](../models/connectortoolcallmetadata.md) | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/connectortoolcallresponsecontent.md b/docs/models/connectortoolcallresponsecontent.md new file mode 100644 index 00000000..d7f93eda --- /dev/null +++ b/docs/models/connectortoolcallresponsecontent.md @@ -0,0 +1,35 @@ +# ConnectorToolCallResponseContent + + +## Supported Types + +### `models.TextContent` + +```python +value: models.TextContent = /* values here */ +``` + +### `models.ImageContent` + +```python +value: models.ImageContent = /* values here */ +``` + +### `models.AudioContent` + +```python +value: models.AudioContent = /* values here */ +``` + +### `models.ResourceLink` + +```python +value: models.ResourceLink = /* values here */ +``` + +### `models.EmbeddedResource` + +```python +value: models.EmbeddedResource = /* values here */ +``` + diff --git a/docs/models/connectortoollocale.md b/docs/models/connectortoollocale.md new file mode 100644 index 00000000..b882c419 --- /dev/null +++ b/docs/models/connectortoollocale.md @@ -0,0 +1,10 @@ +# ConnectorToolLocale + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `name` | Dict[str, *str*] | :heavy_check_mark: | N/A | +| `description` | Dict[str, *str*] | :heavy_check_mark: | N/A | +| `usage_sentence` | Dict[str, *str*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/connectortoolresultmetadata.md b/docs/models/connectortoolresultmetadata.md new file mode 100644 index 00000000..77a327dc --- /dev/null +++ b/docs/models/connectortoolresultmetadata.md @@ -0,0 +1,13 @@ +# ConnectorToolResultMetadata + +MCP-specific result metadata (isError, structuredContent, _meta). + + +## Fields + +| Field | Type | Required | Description | +| -------------------- | -------------------- | -------------------- | -------------------- | +| `is_error` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `structured_content` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `meta` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/connectorupdatev1request.md b/docs/models/connectorupdatev1request.md new file mode 100644 index 00000000..db9cc9b4 --- /dev/null +++ b/docs/models/connectorupdatev1request.md @@ -0,0 +1,9 @@ +# ConnectorUpdateV1Request + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `connector_id` | *str* | :heavy_check_mark: | N/A | +| `update_connector_request` | [models.UpdateConnectorRequest](../models/updateconnectorrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/contentchunk.md b/docs/models/contentchunk.md index cb7e51d3..5a8ef58c 100644 --- a/docs/models/contentchunk.md +++ b/docs/models/contentchunk.md @@ -45,3 +45,9 @@ value: models.ThinkChunk = /* values here */ value: models.AudioChunk = /* values here */ ``` +### `models.AudioURLChunk` + +```python +value: models.AudioURLChunk = /* values here */ +``` + diff --git a/docs/models/context.md b/docs/models/context.md new file mode 100644 index 00000000..107f1bd4 --- /dev/null +++ b/docs/models/context.md @@ -0,0 +1,7 @@ +# Context + + +## Fields + +| Field | Type | Required | Description | +| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/models/conversationpayload.md b/docs/models/conversationpayload.md new file mode 100644 index 00000000..481f18e6 --- /dev/null +++ b/docs/models/conversationpayload.md @@ -0,0 +1,9 @@ +# ConversationPayload + + +## Fields + +| Field | Type | Required | Description | +| ---------------------- | ---------------------- | ---------------------- | ---------------------- | +| `messages` | List[Dict[str, *Any*]] | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationrequest.md b/docs/models/conversationrequest.md index bd7823a8..9aabc52a 100644 --- a/docs/models/conversationrequest.md +++ b/docs/models/conversationrequest.md @@ -12,6 +12,7 @@ | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `tools` | List[[models.ConversationRequestTool](../models/conversationrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `guardrails` | List[[models.GuardrailConfig](../models/guardrailconfig.md)] | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | diff --git a/docs/models/conversationresponse.md b/docs/models/conversationresponse.md index 2732f785..3f0340ba 100644 --- a/docs/models/conversationresponse.md +++ b/docs/models/conversationresponse.md @@ -5,9 +5,10 @@ The response after appending new entries to the conversation. ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | -| `object` | *Optional[Literal["conversation.response"]]* | :heavy_minus_sign: | N/A | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | -| `outputs` | List[[models.Output](../models/output.md)] | :heavy_check_mark: | N/A | -| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `object` | *Optional[Literal["conversation.response"]]* | :heavy_minus_sign: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `outputs` | List[[models.ConversationResponseOutput](../models/conversationresponseoutput.md)] | :heavy_check_mark: | N/A | +| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | +| `guardrails` | List[[models.Guardrail](../models/guardrail.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/output.md b/docs/models/conversationresponseoutput.md similarity index 93% rename from docs/models/output.md rename to docs/models/conversationresponseoutput.md index d0ee0db9..fe8ad1b5 100644 --- a/docs/models/output.md +++ b/docs/models/conversationresponseoutput.md @@ -1,4 +1,4 @@ -# Output +# ConversationResponseOutput ## Supported Types diff --git a/docs/models/conversationrestartrequest.md b/docs/models/conversationrestartrequest.md index ad3ff362..08f47bba 100644 --- a/docs/models/conversationrestartrequest.md +++ b/docs/models/conversationrestartrequest.md @@ -12,6 +12,7 @@ Request to restart a new conversation from a given entry in the conversation. | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `guardrails` | List[[models.GuardrailConfig](../models/guardrailconfig.md)] | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | | `from_entry_id` | *str* | :heavy_check_mark: | N/A | | `agent_version` | [OptionalNullable[models.ConversationRestartRequestAgentVersion]](../models/conversationrestartrequestagentversion.md) | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | \ No newline at end of file diff --git a/docs/models/conversationrestartstreamrequest.md b/docs/models/conversationrestartstreamrequest.md index 865a1e8f..3ad739aa 100644 --- a/docs/models/conversationrestartstreamrequest.md +++ b/docs/models/conversationrestartstreamrequest.md @@ -12,6 +12,7 @@ Request to restart a new conversation from a given entry in the conversation. | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `guardrails` | List[[models.GuardrailConfig](../models/guardrailconfig.md)] | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | | `from_entry_id` | *str* | :heavy_check_mark: | N/A | | `agent_version` | [OptionalNullable[models.ConversationRestartStreamRequestAgentVersion]](../models/conversationrestartstreamrequestagentversion.md) | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | \ No newline at end of file diff --git a/docs/models/conversationsource.md b/docs/models/conversationsource.md new file mode 100644 index 00000000..12bf7c1f --- /dev/null +++ b/docs/models/conversationsource.md @@ -0,0 +1,11 @@ +# ConversationSource + + +## Values + +| Name | Value | +| --------------- | --------------- | +| `EXPLORER` | EXPLORER | +| `UPLOADED_FILE` | UPLOADED_FILE | +| `DIRECT_INPUT` | DIRECT_INPUT | +| `PLAYGROUND` | PLAYGROUND | \ No newline at end of file diff --git a/docs/models/conversationstreamrequest.md b/docs/models/conversationstreamrequest.md index 8b74f9e7..f792c263 100644 --- a/docs/models/conversationstreamrequest.md +++ b/docs/models/conversationstreamrequest.md @@ -12,6 +12,7 @@ | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `tools` | List[[models.ConversationStreamRequestTool](../models/conversationstreamrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `guardrails` | List[[models.GuardrailConfig](../models/guardrailconfig.md)] | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | diff --git a/docs/models/createagentrequest.md b/docs/models/createagentrequest.md index cca3a079..e9665545 100644 --- a/docs/models/createagentrequest.md +++ b/docs/models/createagentrequest.md @@ -8,6 +8,7 @@ | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | | `tools` | List[[models.CreateAgentRequestTool](../models/createagentrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `guardrails` | List[[models.GuardrailConfig](../models/guardrailconfig.md)] | :heavy_minus_sign: | N/A | | `model` | *str* | :heavy_check_mark: | N/A | | `name` | *str* | :heavy_check_mark: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/createcampaignrequest.md b/docs/models/createcampaignrequest.md new file mode 100644 index 00000000..2e81d26d --- /dev/null +++ b/docs/models/createcampaignrequest.md @@ -0,0 +1,12 @@ +# CreateCampaignRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `search_params` | [models.FilterPayload](../models/filterpayload.md) | :heavy_check_mark: | N/A | +| `judge_id` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *str* | :heavy_check_mark: | N/A | +| `max_nb_events` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/createconnectorrequest.md b/docs/models/createconnectorrequest.md new file mode 100644 index 00000000..4668e1a6 --- /dev/null +++ b/docs/models/createconnectorrequest.md @@ -0,0 +1,15 @@ +# CreateConnectorRequest + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | +| `name` | *str* | :heavy_check_mark: | The name of the connector. Should be 64 char length maximum, alphanumeric, only underscores/dashes. | +| `description` | *str* | :heavy_check_mark: | The description of the connector. | +| `icon_url` | *OptionalNullable[str]* | :heavy_minus_sign: | The optional url of the icon you want to associate to the connector. | +| `visibility` | [Optional[models.ResourceVisibility]](../models/resourcevisibility.md) | :heavy_minus_sign: | N/A | +| `server` | *str* | :heavy_check_mark: | The url of the MCP server. | +| `headers` | Dict[str, *Any*] | :heavy_minus_sign: | Optional organization-level headers to be sent with the request to the mcp server. | +| `auth_data` | [OptionalNullable[models.AuthData]](../models/authdata.md) | :heavy_minus_sign: | Optional additional authentication data for the connector. | +| `system_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional system prompt for the connector. | \ No newline at end of file diff --git a/docs/models/createdatasetrecordrequest.md b/docs/models/createdatasetrecordrequest.md new file mode 100644 index 00000000..3ea0d68d --- /dev/null +++ b/docs/models/createdatasetrecordrequest.md @@ -0,0 +1,9 @@ +# CreateDatasetRecordRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | +| `payload` | [models.ConversationPayload](../models/conversationpayload.md) | :heavy_check_mark: | N/A | +| `properties` | Dict[str, *Any*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/createdatasetrecordv1observabilitydatasetsdatasetidrecordspostrequest.md b/docs/models/createdatasetrecordv1observabilitydatasetsdatasetidrecordspostrequest.md new file mode 100644 index 00000000..7fe42faa --- /dev/null +++ b/docs/models/createdatasetrecordv1observabilitydatasetsdatasetidrecordspostrequest.md @@ -0,0 +1,9 @@ +# CreateDatasetRecordV1ObservabilityDatasetsDatasetIDRecordsPostRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `create_dataset_record_request` | [models.CreateDatasetRecordRequest](../models/createdatasetrecordrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/createdatasetrequest.md b/docs/models/createdatasetrequest.md new file mode 100644 index 00000000..d4c16643 --- /dev/null +++ b/docs/models/createdatasetrequest.md @@ -0,0 +1,9 @@ +# CreateDatasetRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/createfileresponse.md b/docs/models/createfileresponse.md index 8152922b..84be4dc6 100644 --- a/docs/models/createfileresponse.md +++ b/docs/models/createfileresponse.md @@ -3,16 +3,18 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | -| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | -| `size_bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | -| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | -| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | -| `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | -| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | -| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | -| `mimetype` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | -| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | -| `signature` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | +| `size_bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | +| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | +| `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | +| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `mimetype` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `signature` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `expires_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `visibility` | [OptionalNullable[models.FileVisibility]](../models/filevisibility.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/createfinetuningjobrequestrepository.md b/docs/models/createfinetuningjobrequestrepository.md index 32be1b6d..42b6c6ca 100644 --- a/docs/models/createfinetuningjobrequestrepository.md +++ b/docs/models/createfinetuningjobrequestrepository.md @@ -3,9 +3,9 @@ ## Supported Types -### `models.GithubRepositoryIn` +### `models.CreateGithubRepositoryRequest` ```python -value: models.GithubRepositoryIn = /* values here */ +value: models.CreateGithubRepositoryRequest = /* values here */ ``` diff --git a/docs/models/githubrepositoryin.md b/docs/models/creategithubrepositoryrequest.md similarity index 96% rename from docs/models/githubrepositoryin.md rename to docs/models/creategithubrepositoryrequest.md index 241cf584..502afa7b 100644 --- a/docs/models/githubrepositoryin.md +++ b/docs/models/creategithubrepositoryrequest.md @@ -1,4 +1,4 @@ -# GithubRepositoryIn +# CreateGithubRepositoryRequest ## Fields diff --git a/docs/models/createjudgerequest.md b/docs/models/createjudgerequest.md new file mode 100644 index 00000000..ccb1119a --- /dev/null +++ b/docs/models/createjudgerequest.md @@ -0,0 +1,13 @@ +# CreateJudgeRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *str* | :heavy_check_mark: | N/A | +| `model_name` | *str* | :heavy_check_mark: | N/A | +| `output` | [models.CreateJudgeRequestOutput](../models/createjudgerequestoutput.md) | :heavy_check_mark: | N/A | +| `instructions` | *str* | :heavy_check_mark: | N/A | +| `tools` | List[*str*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/createjudgerequestoutput.md b/docs/models/createjudgerequestoutput.md new file mode 100644 index 00000000..a7267671 --- /dev/null +++ b/docs/models/createjudgerequestoutput.md @@ -0,0 +1,17 @@ +# CreateJudgeRequestOutput + + +## Supported Types + +### `models.JudgeClassificationOutput` + +```python +value: models.JudgeClassificationOutput = /* values here */ +``` + +### `models.JudgeRegressionOutput` + +```python +value: models.JudgeRegressionOutput = /* values here */ +``` + diff --git a/docs/models/dataset.md b/docs/models/dataset.md new file mode 100644 index 00000000..9d235433 --- /dev/null +++ b/docs/models/dataset.md @@ -0,0 +1,15 @@ +# Dataset + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `deleted_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *str* | :heavy_check_mark: | N/A | +| `owner_id` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/datasetimporttask.md b/docs/models/datasetimporttask.md new file mode 100644 index 00000000..21e6130f --- /dev/null +++ b/docs/models/datasetimporttask.md @@ -0,0 +1,17 @@ +# DatasetImportTask + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `deleted_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `creator_id` | *str* | :heavy_check_mark: | N/A | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | +| `status` | [models.BaseTaskStatus](../models/basetaskstatus.md) | :heavy_check_mark: | N/A | +| `progress` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/datasetpreview.md b/docs/models/datasetpreview.md new file mode 100644 index 00000000..f729908d --- /dev/null +++ b/docs/models/datasetpreview.md @@ -0,0 +1,15 @@ +# DatasetPreview + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `deleted_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *str* | :heavy_check_mark: | N/A | +| `owner_id` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/datasetrecord.md b/docs/models/datasetrecord.md new file mode 100644 index 00000000..dbc7c3d0 --- /dev/null +++ b/docs/models/datasetrecord.md @@ -0,0 +1,15 @@ +# DatasetRecord + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `deleted_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `payload` | [models.ConversationPayload](../models/conversationpayload.md) | :heavy_check_mark: | N/A | +| `properties` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `source` | [models.ConversationSource](../models/conversationsource.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/deletecampaignv1observabilitycampaignscampaigniddeleterequest.md b/docs/models/deletecampaignv1observabilitycampaignscampaigniddeleterequest.md new file mode 100644 index 00000000..4114509d --- /dev/null +++ b/docs/models/deletecampaignv1observabilitycampaignscampaigniddeleterequest.md @@ -0,0 +1,8 @@ +# DeleteCampaignV1ObservabilityCampaignsCampaignIDDeleteRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `campaign_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/deletedatasetrecordsrequest.md b/docs/models/deletedatasetrecordsrequest.md new file mode 100644 index 00000000..1afc46d6 --- /dev/null +++ b/docs/models/deletedatasetrecordsrequest.md @@ -0,0 +1,8 @@ +# DeleteDatasetRecordsRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------- | -------------------- | -------------------- | -------------------- | +| `dataset_record_ids` | List[*str*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/deletedatasetrecordv1observabilitydatasetrecordsdatasetrecordiddeleterequest.md b/docs/models/deletedatasetrecordv1observabilitydatasetrecordsdatasetrecordiddeleterequest.md new file mode 100644 index 00000000..b2425068 --- /dev/null +++ b/docs/models/deletedatasetrecordv1observabilitydatasetrecordsdatasetrecordiddeleterequest.md @@ -0,0 +1,8 @@ +# DeleteDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDDeleteRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------- | ------------------- | ------------------- | ------------------- | +| `dataset_record_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/deletedatasetv1observabilitydatasetsdatasetiddeleterequest.md b/docs/models/deletedatasetv1observabilitydatasetsdatasetiddeleterequest.md new file mode 100644 index 00000000..4557d3c6 --- /dev/null +++ b/docs/models/deletedatasetv1observabilitydatasetsdatasetiddeleterequest.md @@ -0,0 +1,8 @@ +# DeleteDatasetV1ObservabilityDatasetsDatasetIDDeleteRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/deletejudgev1observabilityjudgesjudgeiddeleterequest.md b/docs/models/deletejudgev1observabilityjudgesjudgeiddeleterequest.md new file mode 100644 index 00000000..44042efc --- /dev/null +++ b/docs/models/deletejudgev1observabilityjudgesjudgeiddeleterequest.md @@ -0,0 +1,8 @@ +# DeleteJudgeV1ObservabilityJudgesJudgeIDDeleteRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `judge_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/deletemodelout.md b/docs/models/deletemodelresponse.md similarity index 98% rename from docs/models/deletemodelout.md rename to docs/models/deletemodelresponse.md index 5fd4df7a..d8bd2f32 100644 --- a/docs/models/deletemodelout.md +++ b/docs/models/deletemodelresponse.md @@ -1,4 +1,4 @@ -# DeleteModelOut +# DeleteModelResponse ## Fields diff --git a/docs/models/embeddedresource.md b/docs/models/embeddedresource.md new file mode 100644 index 00000000..102fca26 --- /dev/null +++ b/docs/models/embeddedresource.md @@ -0,0 +1,17 @@ +# EmbeddedResource + +The contents of a resource, embedded into a prompt or tool call result. + +It is up to the client how best to render embedded resources for the benefit +of the LLM and/or the user. + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | *Literal["resource"]* | :heavy_check_mark: | N/A | +| `resource` | [models.Resource](../models/resource.md) | :heavy_check_mark: | N/A | +| `annotations` | [OptionalNullable[models.Annotations]](../models/annotations.md) | :heavy_minus_sign: | N/A | +| `meta` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/executionconfig.md b/docs/models/executionconfig.md new file mode 100644 index 00000000..1033b7ea --- /dev/null +++ b/docs/models/executionconfig.md @@ -0,0 +1,13 @@ +# ExecutionConfig + +Not typed since mcp config can changed / not stable +we allow all extra fields and this is a dict +TODO: once mcp is stable, we need to type this + + +## Fields + +| Field | Type | Required | Description | +| -------------------- | -------------------- | -------------------- | -------------------- | +| `type` | *str* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/exportdatasetresponse.md b/docs/models/exportdatasetresponse.md new file mode 100644 index 00000000..7187ee5c --- /dev/null +++ b/docs/models/exportdatasetresponse.md @@ -0,0 +1,8 @@ +# ExportDatasetResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `file_url` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/exportdatasettojsonlv1observabilitydatasetsdatasetidexportstojsonlgetrequest.md b/docs/models/exportdatasettojsonlv1observabilitydatasetsdatasetidexportstojsonlgetrequest.md new file mode 100644 index 00000000..efe4bbb0 --- /dev/null +++ b/docs/models/exportdatasettojsonlv1observabilitydatasetsdatasetidexportstojsonlgetrequest.md @@ -0,0 +1,8 @@ +# ExportDatasetToJsonlV1ObservabilityDatasetsDatasetIDExportsToJsonlGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/feedresultchatcompletioneventpreview.md b/docs/models/feedresultchatcompletioneventpreview.md new file mode 100644 index 00000000..08a6e2f8 --- /dev/null +++ b/docs/models/feedresultchatcompletioneventpreview.md @@ -0,0 +1,10 @@ +# FeedResultChatCompletionEventPreview + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `results` | List[[models.ChatCompletionEventPreview](../models/chatcompletioneventpreview.md)] | :heavy_minus_sign: | N/A | +| `next` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `cursor` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/fetchcampaignstatusresponse.md b/docs/models/fetchcampaignstatusresponse.md new file mode 100644 index 00000000..7aac0f25 --- /dev/null +++ b/docs/models/fetchcampaignstatusresponse.md @@ -0,0 +1,8 @@ +# FetchCampaignStatusResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `status` | [models.BaseTaskStatus](../models/basetaskstatus.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/fetchchatcompletionfieldoptionsresponse.md b/docs/models/fetchchatcompletionfieldoptionsresponse.md new file mode 100644 index 00000000..86beebc1 --- /dev/null +++ b/docs/models/fetchchatcompletionfieldoptionsresponse.md @@ -0,0 +1,8 @@ +# FetchChatCompletionFieldOptionsResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `options` | List[[Nullable[models.Option]](../models/option.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/fetchfieldoptioncountsrequest.md b/docs/models/fetchfieldoptioncountsrequest.md new file mode 100644 index 00000000..b13f6312 --- /dev/null +++ b/docs/models/fetchfieldoptioncountsrequest.md @@ -0,0 +1,8 @@ +# FetchFieldOptionCountsRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `filter_params` | [OptionalNullable[models.FilterPayload]](../models/filterpayload.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/fetchfieldoptioncountsresponse.md b/docs/models/fetchfieldoptioncountsresponse.md new file mode 100644 index 00000000..dabf6b5e --- /dev/null +++ b/docs/models/fetchfieldoptioncountsresponse.md @@ -0,0 +1,8 @@ +# FetchFieldOptionCountsResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `counts` | List[[models.FieldOptionCountItem](../models/fieldoptioncountitem.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/fieldgroup.md b/docs/models/fieldgroup.md new file mode 100644 index 00000000..8fff281d --- /dev/null +++ b/docs/models/fieldgroup.md @@ -0,0 +1,9 @@ +# FieldGroup + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `label` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/fieldoptioncountitem.md b/docs/models/fieldoptioncountitem.md new file mode 100644 index 00000000..92bc971f --- /dev/null +++ b/docs/models/fieldoptioncountitem.md @@ -0,0 +1,9 @@ +# FieldOptionCountItem + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `value` | *str* | :heavy_check_mark: | N/A | +| `count` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/filesapiroutesuploadfilefilevisibility.md b/docs/models/filesapiroutesuploadfilefilevisibility.md new file mode 100644 index 00000000..055f071b --- /dev/null +++ b/docs/models/filesapiroutesuploadfilefilevisibility.md @@ -0,0 +1,9 @@ +# FilesAPIRoutesUploadFileFileVisibility + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `WORKSPACE` | workspace | +| `USER` | user | \ No newline at end of file diff --git a/docs/models/fileschema.md b/docs/models/fileschema.md index 4f3e72db..15454840 100644 --- a/docs/models/fileschema.md +++ b/docs/models/fileschema.md @@ -3,16 +3,18 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | -| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | -| `size_bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | -| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | -| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | -| `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | -| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | -| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | -| `mimetype` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | -| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | -| `signature` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | +| `size_bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | +| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | +| `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | +| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `mimetype` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `signature` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `expires_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `visibility` | [OptionalNullable[models.FileVisibility]](../models/filevisibility.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/filevisibility.md b/docs/models/filevisibility.md new file mode 100644 index 00000000..4ed11692 --- /dev/null +++ b/docs/models/filevisibility.md @@ -0,0 +1,9 @@ +# FileVisibility + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `WORKSPACE` | workspace | +| `USER` | user | \ No newline at end of file diff --git a/docs/models/filtercondition.md b/docs/models/filtercondition.md new file mode 100644 index 00000000..ba2eea60 --- /dev/null +++ b/docs/models/filtercondition.md @@ -0,0 +1,10 @@ +# FilterCondition + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------- | ---------------------------- | ---------------------------- | ---------------------------- | +| `field` | *str* | :heavy_check_mark: | N/A | +| `op` | [models.Op](../models/op.md) | :heavy_check_mark: | N/A | +| `value` | *Any* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/filtergroup.md b/docs/models/filtergroup.md new file mode 100644 index 00000000..974c724c --- /dev/null +++ b/docs/models/filtergroup.md @@ -0,0 +1,9 @@ +# FilterGroup + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------- | ------------------------------------- | ------------------------------------- | ------------------------------------- | +| `and_` | List[[models.And](../models/and_.md)] | :heavy_minus_sign: | N/A | +| `or_` | List[[models.Or](../models/or_.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/filterpayload.md b/docs/models/filterpayload.md new file mode 100644 index 00000000..49f273c0 --- /dev/null +++ b/docs/models/filterpayload.md @@ -0,0 +1,8 @@ +# FilterPayload + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | +| `filters` | [Nullable[models.Filters]](../models/filters.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/filters.md b/docs/models/filters.md new file mode 100644 index 00000000..4595b82b --- /dev/null +++ b/docs/models/filters.md @@ -0,0 +1,17 @@ +# Filters + + +## Supported Types + +### `models.FilterGroup` + +```python +value: models.FilterGroup = /* values here */ +``` + +### `models.FilterCondition` + +```python +value: models.FilterCondition = /* values here */ +``` + diff --git a/docs/models/getcampaignbyidv1observabilitycampaignscampaignidgetrequest.md b/docs/models/getcampaignbyidv1observabilitycampaignscampaignidgetrequest.md new file mode 100644 index 00000000..9e781961 --- /dev/null +++ b/docs/models/getcampaignbyidv1observabilitycampaignscampaignidgetrequest.md @@ -0,0 +1,8 @@ +# GetCampaignByIDV1ObservabilityCampaignsCampaignIDGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `campaign_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/getcampaignselectedeventsv1observabilitycampaignscampaignidselectedeventsgetrequest.md b/docs/models/getcampaignselectedeventsv1observabilitycampaignscampaignidselectedeventsgetrequest.md new file mode 100644 index 00000000..18de3f10 --- /dev/null +++ b/docs/models/getcampaignselectedeventsv1observabilitycampaignscampaignidselectedeventsgetrequest.md @@ -0,0 +1,10 @@ +# GetCampaignSelectedEventsV1ObservabilityCampaignsCampaignIDSelectedEventsGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `campaign_id` | *str* | :heavy_check_mark: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/getcampaignstatusbyidv1observabilitycampaignscampaignidstatusgetrequest.md b/docs/models/getcampaignstatusbyidv1observabilitycampaignscampaignidstatusgetrequest.md new file mode 100644 index 00000000..947c4d64 --- /dev/null +++ b/docs/models/getcampaignstatusbyidv1observabilitycampaignscampaignidstatusgetrequest.md @@ -0,0 +1,8 @@ +# GetCampaignStatusByIDV1ObservabilityCampaignsCampaignIDStatusGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `campaign_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/getcampaignsv1observabilitycampaignsgetrequest.md b/docs/models/getcampaignsv1observabilitycampaignsgetrequest.md new file mode 100644 index 00000000..f83c3b64 --- /dev/null +++ b/docs/models/getcampaignsv1observabilitycampaignsgetrequest.md @@ -0,0 +1,10 @@ +# GetCampaignsV1ObservabilityCampaignsGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `q` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationthinkchunk.md b/docs/models/getchatcompletioneventsv1observabilitychatcompletioneventssearchpostrequest.md similarity index 73% rename from docs/models/conversationthinkchunk.md rename to docs/models/getchatcompletioneventsv1observabilitychatcompletioneventssearchpostrequest.md index 1fb16bd9..b7b92279 100644 --- a/docs/models/conversationthinkchunk.md +++ b/docs/models/getchatcompletioneventsv1observabilitychatcompletioneventssearchpostrequest.md @@ -1,10 +1,10 @@ -# ConversationThinkChunk +# GetChatCompletionEventsV1ObservabilityChatCompletionEventsSearchPostRequest ## Fields | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | -| `type` | *Optional[Literal["thinking"]]* | :heavy_minus_sign: | N/A | -| `thinking` | List[[models.ConversationThinkChunkThinking](../models/conversationthinkchunkthinking.md)] | :heavy_check_mark: | N/A | -| `closed` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `cursor` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `search_chat_completion_events_request` | [models.SearchChatCompletionEventsRequest](../models/searchchatcompletioneventsrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/getchatcompletioneventv1observabilitychatcompletioneventseventidgetrequest.md b/docs/models/getchatcompletioneventv1observabilitychatcompletioneventseventidgetrequest.md new file mode 100644 index 00000000..8fe18c47 --- /dev/null +++ b/docs/models/getchatcompletioneventv1observabilitychatcompletioneventseventidgetrequest.md @@ -0,0 +1,8 @@ +# GetChatCompletionEventV1ObservabilityChatCompletionEventsEventIDGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `event_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/getchatcompletionfieldoptionscountsv1observabilitychatcompletionfieldsfieldnameoptionscountspostrequest.md b/docs/models/getchatcompletionfieldoptionscountsv1observabilitychatcompletionfieldsfieldnameoptionscountspostrequest.md new file mode 100644 index 00000000..339a1de8 --- /dev/null +++ b/docs/models/getchatcompletionfieldoptionscountsv1observabilitychatcompletionfieldsfieldnameoptionscountspostrequest.md @@ -0,0 +1,9 @@ +# GetChatCompletionFieldOptionsCountsV1ObservabilityChatCompletionFieldsFieldNameOptionsCountsPostRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `field_name` | *str* | :heavy_check_mark: | N/A | +| `fetch_field_option_counts_request` | [models.FetchFieldOptionCountsRequest](../models/fetchfieldoptioncountsrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/getchatcompletionfieldoptionsv1observabilitychatcompletionfieldsfieldnameoptionsgetrequest.md b/docs/models/getchatcompletionfieldoptionsv1observabilitychatcompletionfieldsfieldnameoptionsgetrequest.md new file mode 100644 index 00000000..973a1a4b --- /dev/null +++ b/docs/models/getchatcompletionfieldoptionsv1observabilitychatcompletionfieldsfieldnameoptionsgetrequest.md @@ -0,0 +1,9 @@ +# GetChatCompletionFieldOptionsV1ObservabilityChatCompletionFieldsFieldNameOptionsGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------- | ----------------------------------------- | ----------------------------------------- | ----------------------------------------- | +| `field_name` | *str* | :heavy_check_mark: | N/A | +| `operator` | [models.Operator](../models/operator.md) | :heavy_check_mark: | The operator to use for filtering options | \ No newline at end of file diff --git a/docs/models/getdatasetbyidv1observabilitydatasetsdatasetidgetrequest.md b/docs/models/getdatasetbyidv1observabilitydatasetsdatasetidgetrequest.md new file mode 100644 index 00000000..60f2d162 --- /dev/null +++ b/docs/models/getdatasetbyidv1observabilitydatasetsdatasetidgetrequest.md @@ -0,0 +1,8 @@ +# GetDatasetByIDV1ObservabilityDatasetsDatasetIDGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/getdatasetimporttasksv1observabilitydatasetsdatasetidtasksgetrequest.md b/docs/models/getdatasetimporttasksv1observabilitydatasetsdatasetidtasksgetrequest.md new file mode 100644 index 00000000..d4226eca --- /dev/null +++ b/docs/models/getdatasetimporttasksv1observabilitydatasetsdatasetidtasksgetrequest.md @@ -0,0 +1,10 @@ +# GetDatasetImportTasksV1ObservabilityDatasetsDatasetIDTasksGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/getdatasetimporttaskv1observabilitydatasetsdatasetidtaskstaskidgetrequest.md b/docs/models/getdatasetimporttaskv1observabilitydatasetsdatasetidtaskstaskidgetrequest.md new file mode 100644 index 00000000..98ffc3c6 --- /dev/null +++ b/docs/models/getdatasetimporttaskv1observabilitydatasetsdatasetidtaskstaskidgetrequest.md @@ -0,0 +1,9 @@ +# GetDatasetImportTaskV1ObservabilityDatasetsDatasetIDTasksTaskIDGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `task_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/getdatasetrecordsv1observabilitydatasetsdatasetidrecordsgetrequest.md b/docs/models/getdatasetrecordsv1observabilitydatasetsdatasetidrecordsgetrequest.md new file mode 100644 index 00000000..82453f0c --- /dev/null +++ b/docs/models/getdatasetrecordsv1observabilitydatasetsdatasetidrecordsgetrequest.md @@ -0,0 +1,10 @@ +# GetDatasetRecordsV1ObservabilityDatasetsDatasetIDRecordsGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/getdatasetrecordv1observabilitydatasetrecordsdatasetrecordidgetrequest.md b/docs/models/getdatasetrecordv1observabilitydatasetrecordsdatasetrecordidgetrequest.md new file mode 100644 index 00000000..6b9eb3bc --- /dev/null +++ b/docs/models/getdatasetrecordv1observabilitydatasetrecordsdatasetrecordidgetrequest.md @@ -0,0 +1,8 @@ +# GetDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------- | ------------------- | ------------------- | ------------------- | +| `dataset_record_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/getdatasetsv1observabilitydatasetsgetrequest.md b/docs/models/getdatasetsv1observabilitydatasetsgetrequest.md new file mode 100644 index 00000000..073ab769 --- /dev/null +++ b/docs/models/getdatasetsv1observabilitydatasetsgetrequest.md @@ -0,0 +1,10 @@ +# GetDatasetsV1ObservabilityDatasetsGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `q` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/getfileresponse.md b/docs/models/getfileresponse.md index 0edd13e0..38ad4943 100644 --- a/docs/models/getfileresponse.md +++ b/docs/models/getfileresponse.md @@ -3,17 +3,19 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | -| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | -| `size_bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | -| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | -| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | -| `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | -| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | -| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | -| `mimetype` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | -| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | -| `signature` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | -| `deleted` | *bool* | :heavy_check_mark: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | +| `size_bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | +| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | +| `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | +| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `mimetype` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `signature` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `expires_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `visibility` | [OptionalNullable[models.FileVisibility]](../models/filevisibility.md) | :heavy_minus_sign: | N/A | | +| `deleted` | *bool* | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/getjudgebyidv1observabilityjudgesjudgeidgetrequest.md b/docs/models/getjudgebyidv1observabilityjudgesjudgeidgetrequest.md new file mode 100644 index 00000000..f9f1a248 --- /dev/null +++ b/docs/models/getjudgebyidv1observabilityjudgesjudgeidgetrequest.md @@ -0,0 +1,8 @@ +# GetJudgeByIDV1ObservabilityJudgesJudgeIDGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `judge_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/getjudgesv1observabilityjudgesgetrequest.md b/docs/models/getjudgesv1observabilityjudgesgetrequest.md new file mode 100644 index 00000000..154ece82 --- /dev/null +++ b/docs/models/getjudgesv1observabilityjudgesgetrequest.md @@ -0,0 +1,12 @@ +# GetJudgesV1ObservabilityJudgesGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `type_filter` | List[[models.JudgeOutputType](../models/judgeoutputtype.md)] | :heavy_minus_sign: | Filter by judge output types | +| `model_filter` | List[*str*] | :heavy_minus_sign: | Filter by model names | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `q` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/getsimilarchatcompletioneventsv1observabilitychatcompletioneventseventidsimilareventsgetrequest.md b/docs/models/getsimilarchatcompletioneventsv1observabilitychatcompletioneventseventidsimilareventsgetrequest.md new file mode 100644 index 00000000..cf276b5e --- /dev/null +++ b/docs/models/getsimilarchatcompletioneventsv1observabilitychatcompletioneventseventidsimilareventsgetrequest.md @@ -0,0 +1,8 @@ +# GetSimilarChatCompletionEventsV1ObservabilityChatCompletionEventsEventIDSimilarEventsGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `event_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/guardrail.md b/docs/models/guardrail.md new file mode 100644 index 00000000..319e1415 --- /dev/null +++ b/docs/models/guardrail.md @@ -0,0 +1,7 @@ +# Guardrail + + +## Fields + +| Field | Type | Required | Description | +| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/models/guardrailconfig.md b/docs/models/guardrailconfig.md new file mode 100644 index 00000000..235d7204 --- /dev/null +++ b/docs/models/guardrailconfig.md @@ -0,0 +1,9 @@ +# GuardrailConfig + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `block_on_error` | *Optional[bool]* | :heavy_minus_sign: | If true, return HTTP 403 and block request in the event of a server-side error | +| `moderation_llm_v1` | [Nullable[models.ModerationLlmv1Config]](../models/moderationllmv1config.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/imagecontent.md b/docs/models/imagecontent.md new file mode 100644 index 00000000..5145469e --- /dev/null +++ b/docs/models/imagecontent.md @@ -0,0 +1,15 @@ +# ImageContent + +Image content for a message. + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | *Literal["image"]* | :heavy_check_mark: | N/A | +| `data` | *str* | :heavy_check_mark: | N/A | +| `mime_type` | *str* | :heavy_check_mark: | N/A | +| `annotations` | [OptionalNullable[models.Annotations]](../models/annotations.md) | :heavy_minus_sign: | N/A | +| `meta` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/importdatasetfromcampaignrequest.md b/docs/models/importdatasetfromcampaignrequest.md new file mode 100644 index 00000000..aa1ecc85 --- /dev/null +++ b/docs/models/importdatasetfromcampaignrequest.md @@ -0,0 +1,8 @@ +# ImportDatasetFromCampaignRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `campaign_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/importdatasetfromdatasetrequest.md b/docs/models/importdatasetfromdatasetrequest.md new file mode 100644 index 00000000..cf8d373f --- /dev/null +++ b/docs/models/importdatasetfromdatasetrequest.md @@ -0,0 +1,8 @@ +# ImportDatasetFromDatasetRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------- | -------------------- | -------------------- | -------------------- | +| `dataset_record_ids` | List[*str*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/importdatasetfromexplorerrequest.md b/docs/models/importdatasetfromexplorerrequest.md new file mode 100644 index 00000000..668cdcbf --- /dev/null +++ b/docs/models/importdatasetfromexplorerrequest.md @@ -0,0 +1,8 @@ +# ImportDatasetFromExplorerRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------- | ---------------------- | ---------------------- | ---------------------- | +| `completion_event_ids` | List[*str*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/importdatasetfromfilerequest.md b/docs/models/importdatasetfromfilerequest.md new file mode 100644 index 00000000..a05e9725 --- /dev/null +++ b/docs/models/importdatasetfromfilerequest.md @@ -0,0 +1,8 @@ +# ImportDatasetFromFileRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `file_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/importdatasetfromplaygroundrequest.md b/docs/models/importdatasetfromplaygroundrequest.md new file mode 100644 index 00000000..72a586bc --- /dev/null +++ b/docs/models/importdatasetfromplaygroundrequest.md @@ -0,0 +1,8 @@ +# ImportDatasetFromPlaygroundRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `conversation_ids` | List[*str*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/judge.md b/docs/models/judge.md new file mode 100644 index 00000000..34bcce9b --- /dev/null +++ b/docs/models/judge.md @@ -0,0 +1,22 @@ +# Judge + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `deleted_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `owner_id` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *str* | :heavy_check_mark: | N/A | +| `model_name` | *str* | :heavy_check_mark: | N/A | +| `output` | [models.JudgeOutputUnion](../models/judgeoutputunion.md) | :heavy_check_mark: | N/A | +| `instructions` | *str* | :heavy_check_mark: | N/A | +| `tools` | List[*str*] | :heavy_check_mark: | N/A | +| `up_revision` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `down_revision` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `base_revision` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/judgechatcompletioneventrequest.md b/docs/models/judgechatcompletioneventrequest.md new file mode 100644 index 00000000..b91a6492 --- /dev/null +++ b/docs/models/judgechatcompletioneventrequest.md @@ -0,0 +1,8 @@ +# JudgeChatCompletionEventRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `judge_definition` | [models.CreateJudgeRequest](../models/createjudgerequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/judgechatcompletioneventv1observabilitychatcompletioneventseventidlivejudgingpostrequest.md b/docs/models/judgechatcompletioneventv1observabilitychatcompletioneventseventidlivejudgingpostrequest.md new file mode 100644 index 00000000..6a9d93c9 --- /dev/null +++ b/docs/models/judgechatcompletioneventv1observabilitychatcompletioneventseventidlivejudgingpostrequest.md @@ -0,0 +1,9 @@ +# JudgeChatCompletionEventV1ObservabilityChatCompletionEventsEventIDLiveJudgingPostRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `event_id` | *str* | :heavy_check_mark: | N/A | +| `judge_chat_completion_event_request` | [models.JudgeChatCompletionEventRequest](../models/judgechatcompletioneventrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/judgeclassificationoutput.md b/docs/models/judgeclassificationoutput.md new file mode 100644 index 00000000..44d8462e --- /dev/null +++ b/docs/models/judgeclassificationoutput.md @@ -0,0 +1,9 @@ +# JudgeClassificationOutput + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | +| `type` | *Literal["CLASSIFICATION"]* | :heavy_check_mark: | N/A | +| `options` | List[[models.JudgeClassificationOutputOption](../models/judgeclassificationoutputoption.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/judgeclassificationoutputoption.md b/docs/models/judgeclassificationoutputoption.md new file mode 100644 index 00000000..67e08ed2 --- /dev/null +++ b/docs/models/judgeclassificationoutputoption.md @@ -0,0 +1,9 @@ +# JudgeClassificationOutputOption + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `value` | *str* | :heavy_check_mark: | N/A | +| `description` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/judgedatasetrecordrequest.md b/docs/models/judgedatasetrecordrequest.md new file mode 100644 index 00000000..d82aabd9 --- /dev/null +++ b/docs/models/judgedatasetrecordrequest.md @@ -0,0 +1,8 @@ +# JudgeDatasetRecordRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `judge_definition` | [models.CreateJudgeRequest](../models/createjudgerequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/judgedatasetrecordv1observabilitydatasetrecordsdatasetrecordidlivejudgingpostrequest.md b/docs/models/judgedatasetrecordv1observabilitydatasetrecordsdatasetrecordidlivejudgingpostrequest.md new file mode 100644 index 00000000..9ce4f011 --- /dev/null +++ b/docs/models/judgedatasetrecordv1observabilitydatasetrecordsdatasetrecordidlivejudgingpostrequest.md @@ -0,0 +1,9 @@ +# JudgeDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDLiveJudgingPostRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `dataset_record_id` | *str* | :heavy_check_mark: | N/A | +| `judge_dataset_record_request` | [models.JudgeDatasetRecordRequest](../models/judgedatasetrecordrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/judgeoutput.md b/docs/models/judgeoutput.md new file mode 100644 index 00000000..4abeffa5 --- /dev/null +++ b/docs/models/judgeoutput.md @@ -0,0 +1,9 @@ +# JudgeOutput + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | +| `analysis` | *str* | :heavy_check_mark: | N/A | +| `answer` | [models.Answer](../models/answer.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/judgeoutputtype.md b/docs/models/judgeoutputtype.md new file mode 100644 index 00000000..6e9dfd72 --- /dev/null +++ b/docs/models/judgeoutputtype.md @@ -0,0 +1,9 @@ +# JudgeOutputType + + +## Values + +| Name | Value | +| ---------------- | ---------------- | +| `REGRESSION` | REGRESSION | +| `CLASSIFICATION` | CLASSIFICATION | \ No newline at end of file diff --git a/docs/models/judgeoutputunion.md b/docs/models/judgeoutputunion.md new file mode 100644 index 00000000..61829564 --- /dev/null +++ b/docs/models/judgeoutputunion.md @@ -0,0 +1,17 @@ +# JudgeOutputUnion + + +## Supported Types + +### `models.JudgeClassificationOutput` + +```python +value: models.JudgeClassificationOutput = /* values here */ +``` + +### `models.JudgeRegressionOutput` + +```python +value: models.JudgeRegressionOutput = /* values here */ +``` + diff --git a/docs/models/judgeregressionoutput.md b/docs/models/judgeregressionoutput.md new file mode 100644 index 00000000..8f020dfb --- /dev/null +++ b/docs/models/judgeregressionoutput.md @@ -0,0 +1,12 @@ +# JudgeRegressionOutput + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `type` | *Literal["REGRESSION"]* | :heavy_check_mark: | N/A | +| `min` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `min_description` | *str* | :heavy_check_mark: | N/A | +| `max` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `max_description` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariessharecreatev1request.md b/docs/models/librariessharecreatev1request.md index 4c05241d..8af7cc9d 100644 --- a/docs/models/librariessharecreatev1request.md +++ b/docs/models/librariessharecreatev1request.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `sharing_in` | [models.SharingIn](../models/sharingin.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `sharing_request` | [models.SharingRequest](../models/sharingrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listcampaignselectedeventsresponse.md b/docs/models/listcampaignselectedeventsresponse.md new file mode 100644 index 00000000..eb6ea27f --- /dev/null +++ b/docs/models/listcampaignselectedeventsresponse.md @@ -0,0 +1,8 @@ +# ListCampaignSelectedEventsResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `completion_events` | [models.PaginatedResultChatCompletionEventPreview](../models/paginatedresultchatcompletioneventpreview.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listcampaignsresponse.md b/docs/models/listcampaignsresponse.md new file mode 100644 index 00000000..2fcc7d19 --- /dev/null +++ b/docs/models/listcampaignsresponse.md @@ -0,0 +1,8 @@ +# ListCampaignsResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| `campaigns` | [models.PaginatedResultCampaignPreview](../models/paginatedresultcampaignpreview.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listchatcompletionfieldsresponse.md b/docs/models/listchatcompletionfieldsresponse.md new file mode 100644 index 00000000..c552805e --- /dev/null +++ b/docs/models/listchatcompletionfieldsresponse.md @@ -0,0 +1,9 @@ +# ListChatCompletionFieldsResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `field_definitions` | List[[models.BaseFieldDefinition](../models/basefielddefinition.md)] | :heavy_check_mark: | N/A | +| `field_groups` | List[[models.FieldGroup](../models/fieldgroup.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listdatasetimporttasksresponse.md b/docs/models/listdatasetimporttasksresponse.md new file mode 100644 index 00000000..ce2e9057 --- /dev/null +++ b/docs/models/listdatasetimporttasksresponse.md @@ -0,0 +1,8 @@ +# ListDatasetImportTasksResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `tasks` | [models.PaginatedResultDatasetImportTask](../models/paginatedresultdatasetimporttask.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listdatasetrecordsresponse.md b/docs/models/listdatasetrecordsresponse.md new file mode 100644 index 00000000..25d2618a --- /dev/null +++ b/docs/models/listdatasetrecordsresponse.md @@ -0,0 +1,8 @@ +# ListDatasetRecordsResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `records` | [models.PaginatedResultDatasetRecord](../models/paginatedresultdatasetrecord.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listdatasetsresponse.md b/docs/models/listdatasetsresponse.md new file mode 100644 index 00000000..af046696 --- /dev/null +++ b/docs/models/listdatasetsresponse.md @@ -0,0 +1,8 @@ +# ListDatasetsResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `datasets` | [models.PaginatedResultDatasetPreview](../models/paginatedresultdatasetpreview.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listjudgesresponse.md b/docs/models/listjudgesresponse.md new file mode 100644 index 00000000..66883d64 --- /dev/null +++ b/docs/models/listjudgesresponse.md @@ -0,0 +1,8 @@ +# ListJudgesResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `judges` | [models.PaginatedResultJudgePreview](../models/paginatedresultjudgepreview.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listmodelsv1modelsgetrequest.md b/docs/models/listmodelsv1modelsgetrequest.md new file mode 100644 index 00000000..537269f7 --- /dev/null +++ b/docs/models/listmodelsv1modelsgetrequest.md @@ -0,0 +1,9 @@ +# ListModelsV1ModelsGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `provider` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/listsharingresponse.md b/docs/models/listsharingresponse.md new file mode 100644 index 00000000..4c29d4d4 --- /dev/null +++ b/docs/models/listsharingresponse.md @@ -0,0 +1,8 @@ +# ListSharingResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | +| `data` | List[[models.Sharing](../models/sharing.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/mcpservericon.md b/docs/models/mcpservericon.md new file mode 100644 index 00000000..b0ae7da0 --- /dev/null +++ b/docs/models/mcpservericon.md @@ -0,0 +1,13 @@ +# MCPServerIcon + +An icon for display in user interfaces. + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `src` | *str* | :heavy_check_mark: | N/A | +| `mime_type` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `sizes` | List[*str*] | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/messageinputcontentchunks.md b/docs/models/messageinputcontentchunks.md index 05617850..4fd18a0d 100644 --- a/docs/models/messageinputcontentchunks.md +++ b/docs/models/messageinputcontentchunks.md @@ -27,9 +27,9 @@ value: models.ToolFileChunk = /* values here */ value: models.DocumentURLChunk = /* values here */ ``` -### `models.ConversationThinkChunk` +### `models.ThinkChunk` ```python -value: models.ConversationThinkChunk = /* values here */ +value: models.ThinkChunk = /* values here */ ``` diff --git a/docs/models/messageoutputcontentchunks.md b/docs/models/messageoutputcontentchunks.md index c4a7777e..d9c3d50e 100644 --- a/docs/models/messageoutputcontentchunks.md +++ b/docs/models/messageoutputcontentchunks.md @@ -27,10 +27,10 @@ value: models.ToolFileChunk = /* values here */ value: models.DocumentURLChunk = /* values here */ ``` -### `models.ConversationThinkChunk` +### `models.ThinkChunk` ```python -value: models.ConversationThinkChunk = /* values here */ +value: models.ThinkChunk = /* values here */ ``` ### `models.ToolReferenceChunk` diff --git a/docs/models/messageresponse.md b/docs/models/messageresponse.md new file mode 100644 index 00000000..504aa9de --- /dev/null +++ b/docs/models/messageresponse.md @@ -0,0 +1,8 @@ +# MessageResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `message` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/modelconversation.md b/docs/models/modelconversation.md index af2e5c61..190a6f6e 100644 --- a/docs/models/modelconversation.md +++ b/docs/models/modelconversation.md @@ -8,6 +8,7 @@ | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | | `tools` | List[[models.ModelConversationTool](../models/modelconversationtool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `guardrails` | List[[models.GuardrailConfig](../models/guardrailconfig.md)] | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | diff --git a/docs/models/moderationllmv1action.md b/docs/models/moderationllmv1action.md new file mode 100644 index 00000000..e24d6a22 --- /dev/null +++ b/docs/models/moderationllmv1action.md @@ -0,0 +1,9 @@ +# ModerationLlmv1Action + + +## Values + +| Name | Value | +| ------- | ------- | +| `NONE` | none | +| `BLOCK` | block | \ No newline at end of file diff --git a/docs/models/moderationllmv1categorythresholds.md b/docs/models/moderationllmv1categorythresholds.md new file mode 100644 index 00000000..90ae213f --- /dev/null +++ b/docs/models/moderationllmv1categorythresholds.md @@ -0,0 +1,16 @@ +# ModerationLlmv1CategoryThresholds + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------- | -------------------------------- | -------------------------------- | -------------------------------- | +| `sexual` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `hate_and_discrimination` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `violence_and_threats` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `dangerous_and_criminal_content` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `selfharm` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `health` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `financial` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `law` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `pii` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/moderationllmv1config.md b/docs/models/moderationllmv1config.md new file mode 100644 index 00000000..0c410947 --- /dev/null +++ b/docs/models/moderationllmv1config.md @@ -0,0 +1,11 @@ +# ModerationLlmv1Config + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------ | +| `model_name` | *Optional[str]* | :heavy_minus_sign: | Override model name. Should be omitted in general. | +| `custom_category_thresholds` | [OptionalNullable[models.ModerationLlmv1CategoryThresholds]](../models/moderationllmv1categorythresholds.md) | :heavy_minus_sign: | Override default thresholds for specific categories. | +| `ignore_other_categories` | *Optional[bool]* | :heavy_minus_sign: | If true, only evaluate categories in custom_category_thresholds; others are ignored. | +| `action` | [Optional[models.ModerationLlmv1Action]](../models/moderationllmv1action.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/multipartbodyparams.md b/docs/models/multipartbodyparams.md index f14b9573..9d7a00c4 100644 --- a/docs/models/multipartbodyparams.md +++ b/docs/models/multipartbodyparams.md @@ -5,5 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `expiry` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `visibility` | [Optional[models.FilesAPIRoutesUploadFileFileVisibility]](../models/filesapiroutesuploadfilefilevisibility.md) | :heavy_minus_sign: | N/A | | `purpose` | [Optional[models.FilePurpose]](../models/filepurpose.md) | :heavy_minus_sign: | N/A | | `file` | [models.File](../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | \ No newline at end of file diff --git a/docs/models/observabilityerrorcode.md b/docs/models/observabilityerrorcode.md new file mode 100644 index 00000000..0c387f57 --- /dev/null +++ b/docs/models/observabilityerrorcode.md @@ -0,0 +1,50 @@ +# ObservabilityErrorCode + + +## Values + +| Name | Value | +| ---------------------------------------------- | ---------------------------------------------- | +| `UNKNOWN_ERROR` | UNKNOWN_ERROR | +| `VALIDATION_ERROR` | VALIDATION_ERROR | +| `AUTH_FORBIDDEN` | AUTH_FORBIDDEN | +| `AUTH_FORBIDDEN_NOT_WORKSPACE_ADMIN` | AUTH_FORBIDDEN_NOT_WORKSPACE_ADMIN | +| `AUTH_FORBIDDEN_WORKSPACE_NOT_FOUND` | AUTH_FORBIDDEN_WORKSPACE_NOT_FOUND | +| `AUTH_FORBIDDEN_ROLE_NOT_FOUND` | AUTH_FORBIDDEN_ROLE_NOT_FOUND | +| `AUTH_FORBIDDEN_ORG_NOT_WHITELISTED` | AUTH_FORBIDDEN_ORG_NOT_WHITELISTED | +| `AUTH_UNAUTHORIZED` | AUTH_UNAUTHORIZED | +| `FEATURE_NOT_SUPPORTED` | FEATURE_NOT_SUPPORTED | +| `FIELDS_BAD_REQUEST` | FIELDS_BAD_REQUEST | +| `FIELDS_NOT_FOUND` | FIELDS_NOT_FOUND | +| `SEARCH_NOT_FOUND` | SEARCH_NOT_FOUND | +| `SEARCH_BAD_REQUEST` | SEARCH_BAD_REQUEST | +| `SEARCH_SERVICE_UNAVAILABLE` | SEARCH_SERVICE_UNAVAILABLE | +| `DATABASE_ERROR` | DATABASE_ERROR | +| `DATABASE_TIMEOUT` | DATABASE_TIMEOUT | +| `DATABASE_UNAVAILABLE` | DATABASE_UNAVAILABLE | +| `DATABASE_QUERY_ERROR` | DATABASE_QUERY_ERROR | +| `SEARCH_FILTER_TO_SQL_CONVERSION_ERROR` | SEARCH_FILTER_TO_SQL_CONVERSION_ERROR | +| `JUDGE_CONVERSATION_FORMAT_ERROR` | JUDGE_CONVERSATION_FORMAT_ERROR | +| `JUDGE_MISTRAL_API_ERROR` | JUDGE_MISTRAL_API_ERROR | +| `JUDGE_MISTRAL_API_TIMEOUT` | JUDGE_MISTRAL_API_TIMEOUT | +| `JUDGE_NAME_ALREADY_EXISTS` | JUDGE_NAME_ALREADY_EXISTS | +| `JUDGE_NOT_FOUND` | JUDGE_NOT_FOUND | +| `JUDGE_ALREADY_HAS_NEW_VERSION` | JUDGE_ALREADY_HAS_NEW_VERSION | +| `JUDGE_USED_IN_CAMPAIGN_CANNOT_BE_UPDATED` | JUDGE_USED_IN_CAMPAIGN_CANNOT_BE_UPDATED | +| `JUDGE_DID_NOT_CHANGE` | JUDGE_DID_NOT_CHANGE | +| `CAMPAIGN_NOT_FOUND` | CAMPAIGN_NOT_FOUND | +| `CAMPAIGN_NO_MATCHING_EVENTS` | CAMPAIGN_NO_MATCHING_EVENTS | +| `DATASET_NOT_FOUND` | DATASET_NOT_FOUND | +| `DATASET_TASK_NOT_FOUND` | DATASET_TASK_NOT_FOUND | +| `DATASET_RECORD_NOT_FOUND` | DATASET_RECORD_NOT_FOUND | +| `DATASET_RECORD_FORMAT_ERROR` | DATASET_RECORD_FORMAT_ERROR | +| `AGENT_NOT_FOUND` | AGENT_NOT_FOUND | +| `AGENT_MISTRAL_API_ERROR` | AGENT_MISTRAL_API_ERROR | +| `EVALUATION_NOT_FOUND` | EVALUATION_NOT_FOUND | +| `EVALUATION_CURRENTLY_RUNNING` | EVALUATION_CURRENTLY_RUNNING | +| `EVALUATION_RECORD_NOT_FOUND` | EVALUATION_RECORD_NOT_FOUND | +| `EVALUATION_RUN_NOT_FOUND` | EVALUATION_RUN_NOT_FOUND | +| `EVALUATION_RUN_TRANSITION_IS_INVALID` | EVALUATION_RUN_TRANSITION_IS_INVALID | +| `EVALUATION_RUN_TRANSITION_IS_RUNNING_ALREADY` | EVALUATION_RUN_TRANSITION_IS_RUNNING_ALREADY | +| `EVALUATION_RUN_TRANSITION_ERROR` | EVALUATION_RUN_TRANSITION_ERROR | +| `TEMPLATE_SYNTAX_ERROR` | TEMPLATE_SYNTAX_ERROR | \ No newline at end of file diff --git a/docs/models/observabilityerrordetail.md b/docs/models/observabilityerrordetail.md new file mode 100644 index 00000000..a95e25c9 --- /dev/null +++ b/docs/models/observabilityerrordetail.md @@ -0,0 +1,9 @@ +# ObservabilityErrorDetail + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `message` | *str* | :heavy_check_mark: | N/A | +| `error_code` | [Nullable[models.ObservabilityErrorCode]](../models/observabilityerrorcode.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/op.md b/docs/models/op.md new file mode 100644 index 00000000..fa3e2f3a --- /dev/null +++ b/docs/models/op.md @@ -0,0 +1,26 @@ +# Op + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `LT` | lt | +| `LTE` | lte | +| `GT` | gt | +| `GTE` | gte | +| `STARTSWITH` | startswith | +| `ISTARTSWITH` | istartswith | +| `ENDSWITH` | endswith | +| `IENDSWITH` | iendswith | +| `CONTAINS` | contains | +| `ICONTAINS` | icontains | +| `MATCHES` | matches | +| `NOTCONTAINS` | notcontains | +| `INOTCONTAINS` | inotcontains | +| `EQ` | eq | +| `NEQ` | neq | +| `ISNULL` | isnull | +| `INCLUDES` | includes | +| `EXCLUDES` | excludes | +| `LEN_EQ` | len_eq | \ No newline at end of file diff --git a/docs/models/operator.md b/docs/models/operator.md new file mode 100644 index 00000000..ca54debc --- /dev/null +++ b/docs/models/operator.md @@ -0,0 +1,28 @@ +# Operator + +The operator to use for filtering options + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `LT` | lt | +| `LTE` | lte | +| `GT` | gt | +| `GTE` | gte | +| `STARTSWITH` | startswith | +| `ISTARTSWITH` | istartswith | +| `ENDSWITH` | endswith | +| `IENDSWITH` | iendswith | +| `CONTAINS` | contains | +| `ICONTAINS` | icontains | +| `MATCHES` | matches | +| `NOTCONTAINS` | notcontains | +| `INOTCONTAINS` | inotcontains | +| `EQ` | eq | +| `NEQ` | neq | +| `ISNULL` | isnull | +| `INCLUDES` | includes | +| `EXCLUDES` | excludes | +| `LEN_EQ` | len_eq | \ No newline at end of file diff --git a/docs/models/option.md b/docs/models/option.md new file mode 100644 index 00000000..0f600be4 --- /dev/null +++ b/docs/models/option.md @@ -0,0 +1,17 @@ +# Option + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `bool` + +```python +value: bool = /* values here */ +``` + diff --git a/docs/models/or_.md b/docs/models/or_.md new file mode 100644 index 00000000..2a217269 --- /dev/null +++ b/docs/models/or_.md @@ -0,0 +1,17 @@ +# Or + + +## Supported Types + +### `models.FilterGroup` + +```python +value: models.FilterGroup = /* values here */ +``` + +### `models.FilterCondition` + +```python +value: models.FilterCondition = /* values here */ +``` + diff --git a/docs/models/outputcontentchunks.md b/docs/models/outputcontentchunks.md index e5185014..c76bc31d 100644 --- a/docs/models/outputcontentchunks.md +++ b/docs/models/outputcontentchunks.md @@ -27,10 +27,10 @@ value: models.ToolFileChunk = /* values here */ value: models.DocumentURLChunk = /* values here */ ``` -### `models.ConversationThinkChunk` +### `models.ThinkChunk` ```python -value: models.ConversationThinkChunk = /* values here */ +value: models.ThinkChunk = /* values here */ ``` ### `models.ToolReferenceChunk` diff --git a/docs/models/paginatedconnectors.md b/docs/models/paginatedconnectors.md new file mode 100644 index 00000000..3fff5b95 --- /dev/null +++ b/docs/models/paginatedconnectors.md @@ -0,0 +1,9 @@ +# PaginatedConnectors + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `items` | List[[models.Connector](../models/connector.md)] | :heavy_check_mark: | N/A | +| `pagination` | [models.PaginationResponse](../models/paginationresponse.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/paginatedresultcampaignpreview.md b/docs/models/paginatedresultcampaignpreview.md new file mode 100644 index 00000000..7e8e5715 --- /dev/null +++ b/docs/models/paginatedresultcampaignpreview.md @@ -0,0 +1,11 @@ +# PaginatedResultCampaignPreview + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `results` | List[[models.Campaign](../models/campaign.md)] | :heavy_minus_sign: | N/A | +| `count` | *int* | :heavy_check_mark: | N/A | +| `next` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `previous` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/paginatedresultchatcompletioneventpreview.md b/docs/models/paginatedresultchatcompletioneventpreview.md new file mode 100644 index 00000000..96b4b7ae --- /dev/null +++ b/docs/models/paginatedresultchatcompletioneventpreview.md @@ -0,0 +1,11 @@ +# PaginatedResultChatCompletionEventPreview + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `results` | List[[models.ChatCompletionEventPreview](../models/chatcompletioneventpreview.md)] | :heavy_minus_sign: | N/A | +| `count` | *int* | :heavy_check_mark: | N/A | +| `next` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `previous` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/paginatedresultdatasetimporttask.md b/docs/models/paginatedresultdatasetimporttask.md new file mode 100644 index 00000000..ce067d52 --- /dev/null +++ b/docs/models/paginatedresultdatasetimporttask.md @@ -0,0 +1,11 @@ +# PaginatedResultDatasetImportTask + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `results` | List[[models.DatasetImportTask](../models/datasetimporttask.md)] | :heavy_minus_sign: | N/A | +| `count` | *int* | :heavy_check_mark: | N/A | +| `next` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `previous` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/paginatedresultdatasetpreview.md b/docs/models/paginatedresultdatasetpreview.md new file mode 100644 index 00000000..f6ca9037 --- /dev/null +++ b/docs/models/paginatedresultdatasetpreview.md @@ -0,0 +1,11 @@ +# PaginatedResultDatasetPreview + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `results` | List[[models.DatasetPreview](../models/datasetpreview.md)] | :heavy_minus_sign: | N/A | +| `count` | *int* | :heavy_check_mark: | N/A | +| `next` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `previous` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/paginatedresultdatasetrecord.md b/docs/models/paginatedresultdatasetrecord.md new file mode 100644 index 00000000..31b086eb --- /dev/null +++ b/docs/models/paginatedresultdatasetrecord.md @@ -0,0 +1,11 @@ +# PaginatedResultDatasetRecord + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `results` | List[[models.DatasetRecord](../models/datasetrecord.md)] | :heavy_minus_sign: | N/A | +| `count` | *int* | :heavy_check_mark: | N/A | +| `next` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `previous` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/paginatedresultjudgepreview.md b/docs/models/paginatedresultjudgepreview.md new file mode 100644 index 00000000..91bf0c35 --- /dev/null +++ b/docs/models/paginatedresultjudgepreview.md @@ -0,0 +1,11 @@ +# PaginatedResultJudgePreview + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `results` | List[[models.Judge](../models/judge.md)] | :heavy_minus_sign: | N/A | +| `count` | *int* | :heavy_check_mark: | N/A | +| `next` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `previous` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/paginationresponse.md b/docs/models/paginationresponse.md new file mode 100644 index 00000000..7ed17a69 --- /dev/null +++ b/docs/models/paginationresponse.md @@ -0,0 +1,9 @@ +# PaginationResponse + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `next_cursor` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `page_size` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/postdatasetrecordsfromcampaignv1observabilitydatasetsdatasetidimportsfromcampaignpostrequest.md b/docs/models/postdatasetrecordsfromcampaignv1observabilitydatasetsdatasetidimportsfromcampaignpostrequest.md new file mode 100644 index 00000000..b3bf2a61 --- /dev/null +++ b/docs/models/postdatasetrecordsfromcampaignv1observabilitydatasetsdatasetidimportsfromcampaignpostrequest.md @@ -0,0 +1,9 @@ +# PostDatasetRecordsFromCampaignV1ObservabilityDatasetsDatasetIDImportsFromCampaignPostRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `import_dataset_from_campaign_request` | [models.ImportDatasetFromCampaignRequest](../models/importdatasetfromcampaignrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/postdatasetrecordsfromdatasetv1observabilitydatasetsdatasetidimportsfromdatasetpostrequest.md b/docs/models/postdatasetrecordsfromdatasetv1observabilitydatasetsdatasetidimportsfromdatasetpostrequest.md new file mode 100644 index 00000000..33dafda2 --- /dev/null +++ b/docs/models/postdatasetrecordsfromdatasetv1observabilitydatasetsdatasetidimportsfromdatasetpostrequest.md @@ -0,0 +1,9 @@ +# PostDatasetRecordsFromDatasetV1ObservabilityDatasetsDatasetIDImportsFromDatasetPostRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `import_dataset_from_dataset_request` | [models.ImportDatasetFromDatasetRequest](../models/importdatasetfromdatasetrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/postdatasetrecordsfromexplorerv1observabilitydatasetsdatasetidimportsfromexplorerpostrequest.md b/docs/models/postdatasetrecordsfromexplorerv1observabilitydatasetsdatasetidimportsfromexplorerpostrequest.md new file mode 100644 index 00000000..ba25323e --- /dev/null +++ b/docs/models/postdatasetrecordsfromexplorerv1observabilitydatasetsdatasetidimportsfromexplorerpostrequest.md @@ -0,0 +1,9 @@ +# PostDatasetRecordsFromExplorerV1ObservabilityDatasetsDatasetIDImportsFromExplorerPostRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `import_dataset_from_explorer_request` | [models.ImportDatasetFromExplorerRequest](../models/importdatasetfromexplorerrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/postdatasetrecordsfromfilev1observabilitydatasetsdatasetidimportsfromfilepostrequest.md b/docs/models/postdatasetrecordsfromfilev1observabilitydatasetsdatasetidimportsfromfilepostrequest.md new file mode 100644 index 00000000..0bff196b --- /dev/null +++ b/docs/models/postdatasetrecordsfromfilev1observabilitydatasetsdatasetidimportsfromfilepostrequest.md @@ -0,0 +1,9 @@ +# PostDatasetRecordsFromFileV1ObservabilityDatasetsDatasetIDImportsFromFilePostRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `import_dataset_from_file_request` | [models.ImportDatasetFromFileRequest](../models/importdatasetfromfilerequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/postdatasetrecordsfromplaygroundv1observabilitydatasetsdatasetidimportsfromplaygroundpostrequest.md b/docs/models/postdatasetrecordsfromplaygroundv1observabilitydatasetsdatasetidimportsfromplaygroundpostrequest.md new file mode 100644 index 00000000..86ab87b8 --- /dev/null +++ b/docs/models/postdatasetrecordsfromplaygroundv1observabilitydatasetsdatasetidimportsfromplaygroundpostrequest.md @@ -0,0 +1,9 @@ +# PostDatasetRecordsFromPlaygroundV1ObservabilityDatasetsDatasetIDImportsFromPlaygroundPostRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `import_dataset_from_playground_request` | [models.ImportDatasetFromPlaygroundRequest](../models/importdatasetfromplaygroundrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/processingstatusout.md b/docs/models/processingstatus.md similarity index 98% rename from docs/models/processingstatusout.md rename to docs/models/processingstatus.md index bc40d320..514caa50 100644 --- a/docs/models/processingstatusout.md +++ b/docs/models/processingstatus.md @@ -1,4 +1,4 @@ -# ProcessingStatusOut +# ProcessingStatus ## Fields diff --git a/docs/models/resource.md b/docs/models/resource.md new file mode 100644 index 00000000..30d74c40 --- /dev/null +++ b/docs/models/resource.md @@ -0,0 +1,17 @@ +# Resource + + +## Supported Types + +### `models.TextResourceContents` + +```python +value: models.TextResourceContents = /* values here */ +``` + +### `models.BlobResourceContents` + +```python +value: models.BlobResourceContents = /* values here */ +``` + diff --git a/docs/models/resourcelink.md b/docs/models/resourcelink.md new file mode 100644 index 00000000..074c573e --- /dev/null +++ b/docs/models/resourcelink.md @@ -0,0 +1,22 @@ +# ResourceLink + +A resource that the server is capable of reading, included in a prompt or tool call result. + +Note: resource links returned by tools are not guaranteed to appear in the results of `resources/list` requests. + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `title` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `uri` | *str* | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `mime_type` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `size` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `icons` | List[[models.MCPServerIcon](../models/mcpservericon.md)] | :heavy_minus_sign: | N/A | +| `annotations` | [OptionalNullable[models.Annotations]](../models/annotations.md) | :heavy_minus_sign: | N/A | +| `meta` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `type` | *Literal["resource_link"]* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/resourcevisibility.md b/docs/models/resourcevisibility.md new file mode 100644 index 00000000..9c06af2d --- /dev/null +++ b/docs/models/resourcevisibility.md @@ -0,0 +1,11 @@ +# ResourceVisibility + + +## Values + +| Name | Value | +| ------------------ | ------------------ | +| `SHARED_GLOBAL` | shared_global | +| `SHARED_ORG` | shared_org | +| `SHARED_WORKSPACE` | shared_workspace | +| `PRIVATE` | private | \ No newline at end of file diff --git a/docs/models/searchchatcompletioneventidsrequest.md b/docs/models/searchchatcompletioneventidsrequest.md new file mode 100644 index 00000000..7d0c4a50 --- /dev/null +++ b/docs/models/searchchatcompletioneventidsrequest.md @@ -0,0 +1,9 @@ +# SearchChatCompletionEventIdsRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `search_params` | [models.FilterPayload](../models/filterpayload.md) | :heavy_check_mark: | N/A | +| `extra_fields` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/searchchatcompletioneventidsresponse.md b/docs/models/searchchatcompletioneventidsresponse.md new file mode 100644 index 00000000..6e429684 --- /dev/null +++ b/docs/models/searchchatcompletioneventidsresponse.md @@ -0,0 +1,8 @@ +# SearchChatCompletionEventIdsResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------- | ---------------------- | ---------------------- | ---------------------- | +| `completion_event_ids` | List[*str*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listsharingout.md b/docs/models/searchchatcompletioneventsrequest.md similarity index 58% rename from docs/models/listsharingout.md rename to docs/models/searchchatcompletioneventsrequest.md index bcac4834..11bc3ab9 100644 --- a/docs/models/listsharingout.md +++ b/docs/models/searchchatcompletioneventsrequest.md @@ -1,8 +1,9 @@ -# ListSharingOut +# SearchChatCompletionEventsRequest ## Fields | Field | Type | Required | Description | | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -| `data` | List[[models.SharingOut](../models/sharingout.md)] | :heavy_check_mark: | N/A | \ No newline at end of file +| `search_params` | [models.FilterPayload](../models/filterpayload.md) | :heavy_check_mark: | N/A | +| `extra_fields` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/searchchatcompletioneventsresponse.md b/docs/models/searchchatcompletioneventsresponse.md new file mode 100644 index 00000000..9474c110 --- /dev/null +++ b/docs/models/searchchatcompletioneventsresponse.md @@ -0,0 +1,8 @@ +# SearchChatCompletionEventsResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | +| `completion_events` | [models.FeedResultChatCompletionEventPreview](../models/feedresultchatcompletioneventpreview.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/sharingout.md b/docs/models/sharing.md similarity index 98% rename from docs/models/sharingout.md rename to docs/models/sharing.md index 35aeff43..fc718632 100644 --- a/docs/models/sharingout.md +++ b/docs/models/sharing.md @@ -1,4 +1,4 @@ -# SharingOut +# Sharing ## Fields diff --git a/docs/models/sharingin.md b/docs/models/sharingrequest.md similarity index 99% rename from docs/models/sharingin.md rename to docs/models/sharingrequest.md index bac18c8d..21b8ec1f 100644 --- a/docs/models/sharingin.md +++ b/docs/models/sharingrequest.md @@ -1,4 +1,4 @@ -# SharingIn +# SharingRequest ## Fields diff --git a/docs/models/supportedoperator.md b/docs/models/supportedoperator.md new file mode 100644 index 00000000..74cf9b8e --- /dev/null +++ b/docs/models/supportedoperator.md @@ -0,0 +1,26 @@ +# SupportedOperator + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `LT` | lt | +| `LTE` | lte | +| `GT` | gt | +| `GTE` | gte | +| `STARTSWITH` | startswith | +| `ISTARTSWITH` | istartswith | +| `ENDSWITH` | endswith | +| `IENDSWITH` | iendswith | +| `CONTAINS` | contains | +| `ICONTAINS` | icontains | +| `MATCHES` | matches | +| `NOTCONTAINS` | notcontains | +| `INOTCONTAINS` | inotcontains | +| `EQ` | eq | +| `NEQ` | neq | +| `ISNULL` | isnull | +| `INCLUDES` | includes | +| `EXCLUDES` | excludes | +| `LEN_EQ` | len_eq | \ No newline at end of file diff --git a/docs/models/textcontent.md b/docs/models/textcontent.md new file mode 100644 index 00000000..ddd2df02 --- /dev/null +++ b/docs/models/textcontent.md @@ -0,0 +1,14 @@ +# TextContent + +Text content for a message. + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | *Literal["text"]* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | +| `annotations` | [OptionalNullable[models.Annotations]](../models/annotations.md) | :heavy_minus_sign: | N/A | +| `meta` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/textresourcecontents.md b/docs/models/textresourcecontents.md new file mode 100644 index 00000000..daa531e1 --- /dev/null +++ b/docs/models/textresourcecontents.md @@ -0,0 +1,14 @@ +# TextResourceContents + +Text contents of a resource. + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `uri` | *str* | :heavy_check_mark: | N/A | +| `mime_type` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `meta` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/thinkchunk.md b/docs/models/thinkchunk.md index 70c0369f..98603d8f 100644 --- a/docs/models/thinkchunk.md +++ b/docs/models/thinkchunk.md @@ -5,6 +5,6 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | -| `type` | *Literal["thinking"]* | :heavy_check_mark: | N/A | -| `thinking` | List[[models.ThinkChunkThinking](../models/thinkchunkthinking.md)] | :heavy_check_mark: | N/A | +| `type` | *Optional[Literal["thinking"]]* | :heavy_minus_sign: | N/A | +| `thinking` | List[[models.Thinking](../models/thinking.md)] | :heavy_check_mark: | N/A | | `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | \ No newline at end of file diff --git a/docs/models/thinkchunkthinking.md b/docs/models/thinkchunkthinking.md deleted file mode 100644 index dd1ecca1..00000000 --- a/docs/models/thinkchunkthinking.md +++ /dev/null @@ -1,17 +0,0 @@ -# ThinkChunkThinking - - -## Supported Types - -### `models.ReferenceChunk` - -```python -value: models.ReferenceChunk = /* values here */ -``` - -### `models.TextChunk` - -```python -value: models.TextChunk = /* values here */ -``` - diff --git a/docs/models/conversationthinkchunkthinking.md b/docs/models/thinking.md similarity index 66% rename from docs/models/conversationthinkchunkthinking.md rename to docs/models/thinking.md index 84b80018..d9e51d7d 100644 --- a/docs/models/conversationthinkchunkthinking.md +++ b/docs/models/thinking.md @@ -1,4 +1,4 @@ -# ConversationThinkChunkThinking +# Thinking ## Supported Types @@ -15,3 +15,9 @@ value: models.TextChunk = /* values here */ value: models.ToolReferenceChunk = /* values here */ ``` +### `models.ReferenceChunk` + +```python +value: models.ReferenceChunk = /* values here */ +``` + diff --git a/docs/models/typeenum.md b/docs/models/typeenum.md new file mode 100644 index 00000000..80a66af3 --- /dev/null +++ b/docs/models/typeenum.md @@ -0,0 +1,14 @@ +# TypeEnum + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `ENUM` | ENUM | +| `TEXT` | TEXT | +| `INT` | INT | +| `FLOAT` | FLOAT | +| `BOOL` | BOOL | +| `TIMESTAMP` | TIMESTAMP | +| `ARRAY` | ARRAY | \ No newline at end of file diff --git a/docs/models/updateagentrequest.md b/docs/models/updateagentrequest.md index d3428d92..b8aa01f6 100644 --- a/docs/models/updateagentrequest.md +++ b/docs/models/updateagentrequest.md @@ -8,6 +8,7 @@ | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | | `tools` | List[[models.UpdateAgentRequestTool](../models/updateagentrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `guardrails` | List[[models.GuardrailConfig](../models/guardrailconfig.md)] | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/updateconnectorrequest.md b/docs/models/updateconnectorrequest.md new file mode 100644 index 00000000..d6d76631 --- /dev/null +++ b/docs/models/updateconnectorrequest.md @@ -0,0 +1,16 @@ +# UpdateConnectorRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the connector. | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | The description of the connector. | +| `icon_url` | *OptionalNullable[str]* | :heavy_minus_sign: | The optional url of the icon you want to associate to the connector. | +| `system_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional system prompt for the connector. | +| `connection_config` | Dict[str, *Any*] | :heavy_minus_sign: | Optional new connection config. | +| `connection_secrets` | Dict[str, *Any*] | :heavy_minus_sign: | Optional new connection secrets | +| `server` | *OptionalNullable[str]* | :heavy_minus_sign: | New server url for your mcp connector. | +| `headers` | Dict[str, *Any*] | :heavy_minus_sign: | New headers for your mcp connector. | +| `auth_data` | [OptionalNullable[models.AuthData]](../models/authdata.md) | :heavy_minus_sign: | New authentication data for your mcp connector. | \ No newline at end of file diff --git a/docs/models/updatedatasetrecordpayloadrequest.md b/docs/models/updatedatasetrecordpayloadrequest.md new file mode 100644 index 00000000..f152d843 --- /dev/null +++ b/docs/models/updatedatasetrecordpayloadrequest.md @@ -0,0 +1,8 @@ +# UpdateDatasetRecordPayloadRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | +| `payload` | [models.ConversationPayload](../models/conversationpayload.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/updatedatasetrecordpayloadv1observabilitydatasetrecordsdatasetrecordidpayloadputrequest.md b/docs/models/updatedatasetrecordpayloadv1observabilitydatasetrecordsdatasetrecordidpayloadputrequest.md new file mode 100644 index 00000000..fbb4b308 --- /dev/null +++ b/docs/models/updatedatasetrecordpayloadv1observabilitydatasetrecordsdatasetrecordidpayloadputrequest.md @@ -0,0 +1,9 @@ +# UpdateDatasetRecordPayloadV1ObservabilityDatasetRecordsDatasetRecordIDPayloadPutRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `dataset_record_id` | *str* | :heavy_check_mark: | N/A | +| `update_dataset_record_payload_request` | [models.UpdateDatasetRecordPayloadRequest](../models/updatedatasetrecordpayloadrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/updatedatasetrecordpropertiesrequest.md b/docs/models/updatedatasetrecordpropertiesrequest.md new file mode 100644 index 00000000..6e98944d --- /dev/null +++ b/docs/models/updatedatasetrecordpropertiesrequest.md @@ -0,0 +1,8 @@ +# UpdateDatasetRecordPropertiesRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `properties` | Dict[str, *Any*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/updatedatasetrecordpropertiesv1observabilitydatasetrecordsdatasetrecordidpropertiesputrequest.md b/docs/models/updatedatasetrecordpropertiesv1observabilitydatasetrecordsdatasetrecordidpropertiesputrequest.md new file mode 100644 index 00000000..6b09e479 --- /dev/null +++ b/docs/models/updatedatasetrecordpropertiesv1observabilitydatasetrecordsdatasetrecordidpropertiesputrequest.md @@ -0,0 +1,9 @@ +# UpdateDatasetRecordPropertiesV1ObservabilityDatasetRecordsDatasetRecordIDPropertiesPutRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | +| `dataset_record_id` | *str* | :heavy_check_mark: | N/A | +| `update_dataset_record_properties_request` | [models.UpdateDatasetRecordPropertiesRequest](../models/updatedatasetrecordpropertiesrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/updatedatasetrequest.md b/docs/models/updatedatasetrequest.md new file mode 100644 index 00000000..2a5194e4 --- /dev/null +++ b/docs/models/updatedatasetrequest.md @@ -0,0 +1,9 @@ +# UpdateDatasetRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/updatedatasetv1observabilitydatasetsdatasetidpatchrequest.md b/docs/models/updatedatasetv1observabilitydatasetsdatasetidpatchrequest.md new file mode 100644 index 00000000..78eae8fb --- /dev/null +++ b/docs/models/updatedatasetv1observabilitydatasetsdatasetidpatchrequest.md @@ -0,0 +1,9 @@ +# UpdateDatasetV1ObservabilityDatasetsDatasetIDPatchRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `update_dataset_request` | [models.UpdateDatasetRequest](../models/updatedatasetrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/updatejudgerequest.md b/docs/models/updatejudgerequest.md new file mode 100644 index 00000000..bf28ae7a --- /dev/null +++ b/docs/models/updatejudgerequest.md @@ -0,0 +1,13 @@ +# UpdateJudgeRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *str* | :heavy_check_mark: | N/A | +| `model_name` | *str* | :heavy_check_mark: | N/A | +| `output` | [models.UpdateJudgeRequestOutput](../models/updatejudgerequestoutput.md) | :heavy_check_mark: | N/A | +| `instructions` | *str* | :heavy_check_mark: | N/A | +| `tools` | List[*str*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/updatejudgerequestoutput.md b/docs/models/updatejudgerequestoutput.md new file mode 100644 index 00000000..a8db68ab --- /dev/null +++ b/docs/models/updatejudgerequestoutput.md @@ -0,0 +1,17 @@ +# UpdateJudgeRequestOutput + + +## Supported Types + +### `models.JudgeClassificationOutput` + +```python +value: models.JudgeClassificationOutput = /* values here */ +``` + +### `models.JudgeRegressionOutput` + +```python +value: models.JudgeRegressionOutput = /* values here */ +``` + diff --git a/docs/models/updatejudgev1observabilityjudgesjudgeidputrequest.md b/docs/models/updatejudgev1observabilityjudgesjudgeidputrequest.md new file mode 100644 index 00000000..bfe3648d --- /dev/null +++ b/docs/models/updatejudgev1observabilityjudgesjudgeidputrequest.md @@ -0,0 +1,9 @@ +# UpdateJudgeV1ObservabilityJudgesJudgeIDPutRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `judge_id` | *str* | :heavy_check_mark: | N/A | +| `update_judge_request` | [models.UpdateJudgeRequest](../models/updatejudgerequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/validationerror.md b/docs/models/validationerror.md index 7a1654a1..5bcea5b5 100644 --- a/docs/models/validationerror.md +++ b/docs/models/validationerror.md @@ -3,8 +3,10 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | -| `loc` | List[[models.Loc](../models/loc.md)] | :heavy_check_mark: | N/A | -| `msg` | *str* | :heavy_check_mark: | N/A | -| `type` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | +| `loc` | List[[models.Loc](../models/loc.md)] | :heavy_check_mark: | N/A | +| `msg` | *str* | :heavy_check_mark: | N/A | +| `type` | *str* | :heavy_check_mark: | N/A | +| `input` | *Optional[Any]* | :heavy_minus_sign: | N/A | +| `ctx` | [Optional[models.Context]](../models/context.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/sdks/accesses/README.md b/docs/sdks/accesses/README.md index c50456df..51051e2f 100644 --- a/docs/sdks/accesses/README.md +++ b/docs/sdks/accesses/README.md @@ -42,7 +42,7 @@ with Mistral( ### Response -**[models.ListSharingOut](../../models/listsharingout.md)** +**[models.ListSharingResponse](../../models/listsharingresponse.md)** ### Errors @@ -87,7 +87,7 @@ with Mistral( ### Response -**[models.SharingOut](../../models/sharingout.md)** +**[models.Sharing](../../models/sharing.md)** ### Errors @@ -131,7 +131,7 @@ with Mistral( ### Response -**[models.SharingOut](../../models/sharingout.md)** +**[models.Sharing](../../models/sharing.md)** ### Errors diff --git a/docs/sdks/betaagents/README.md b/docs/sdks/betaagents/README.md index aaa5110e..b936538c 100644 --- a/docs/sdks/betaagents/README.md +++ b/docs/sdks/betaagents/README.md @@ -54,6 +54,7 @@ with Mistral( | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | | `tools` | List[[models.CreateAgentRequestTool](../../models/createagentrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `guardrails` | List[[models.GuardrailConfig](../../models/guardrailconfig.md)] | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | @@ -196,6 +197,7 @@ with Mistral( | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | | `tools` | List[[models.UpdateAgentRequestTool](../../models/updateagentrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `guardrails` | List[[models.GuardrailConfig](../../models/guardrailconfig.md)] | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | diff --git a/docs/sdks/campaigns/README.md b/docs/sdks/campaigns/README.md new file mode 100644 index 00000000..d5d7e4d4 --- /dev/null +++ b/docs/sdks/campaigns/README.md @@ -0,0 +1,267 @@ +# Beta.Observability.Campaigns + +## Overview + +### Available Operations + +* [create](#create) - Create and start a new campaign +* [list](#list) - Get all campaigns +* [fetch](#fetch) - Get campaign by id +* [delete](#delete) - Delete a campaign +* [fetch_status](#fetch_status) - Get campaign status by campaign id +* [list_events](#list_events) - Get event ids that were selected by the given campaign + +## create + +Create and start a new campaign + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.campaigns.create(search_params={ + "filters": { + "field": "", + "op": "lt", + "value": "", + }, + }, judge_id="9b501b9f-3525-44a7-a51a-5352679be9ed", name="", description="shakily triangular scotch requirement whether once oh", max_nb_events=232889) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `search_params` | [models.FilterPayload](../../models/filterpayload.md) | :heavy_check_mark: | N/A | +| `judge_id` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *str* | :heavy_check_mark: | N/A | +| `max_nb_events` | *int* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Campaign](../../models/campaign.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## list + +Get all campaigns + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.campaigns.list(page_size=50, page=1) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `q` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ListCampaignsResponse](../../models/listcampaignsresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## fetch + +Get campaign by id + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.campaigns.fetch(campaign_id="fd7945d6-00e2-4852-9054-bcbb968d7f98") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `campaign_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Campaign](../../models/campaign.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## delete + +Delete a campaign + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.observability.campaigns.delete(campaign_id="90e07b45-8cf7-4081-8558-a786779e039d") + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `campaign_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## fetch_status + +Get campaign status by campaign id + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.campaigns.fetch_status(campaign_id="4b1dd9a5-8dc9-48e1-bd11-29443e959902") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `campaign_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.FetchCampaignStatusResponse](../../models/fetchcampaignstatusresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## list_events + +Get event ids that were selected by the given campaign + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.campaigns.list_events(campaign_id="305b5e46-a650-4d8a-8b5b-d23ef90ec831", page_size=50, page=1) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `campaign_id` | *str* | :heavy_check_mark: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ListCampaignSelectedEventsResponse](../../models/listcampaignselectedeventsresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/chatcompletionevents/README.md b/docs/sdks/chatcompletionevents/README.md new file mode 100644 index 00000000..c3f19868 --- /dev/null +++ b/docs/sdks/chatcompletionevents/README.md @@ -0,0 +1,244 @@ +# Beta.Observability.ChatCompletionEvents + +## Overview + +### Available Operations + +* [search](#search) - Get Chat Completion Events +* [search_ids](#search_ids) - Alternative to /search that returns only the IDs and that can return many IDs at once +* [fetch](#fetch) - Get Chat Completion Event +* [fetch_similar_events](#fetch_similar_events) - Get Similar Chat Completion Events +* [judge](#judge) - Run Judge on an event based on the given options + +## search + +Get Chat Completion Events + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.chat_completion_events.search(search_params={ + "filters": None, + }, page_size=50) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `search_params` | [models.FilterPayload](../../models/filterpayload.md) | :heavy_check_mark: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `cursor` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `extra_fields` | List[*str*] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.SearchChatCompletionEventsResponse](../../models/searchchatcompletioneventsresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## search_ids + +Alternative to /search that returns only the IDs and that can return many IDs at once + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.chat_completion_events.search_ids(search_params={ + "filters": { + "field": "", + "op": "lt", + "value": "", + }, + }) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `search_params` | [models.FilterPayload](../../models/filterpayload.md) | :heavy_check_mark: | N/A | +| `extra_fields` | List[*str*] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.SearchChatCompletionEventIdsResponse](../../models/searchchatcompletioneventidsresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## fetch + +Get Chat Completion Event + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.chat_completion_events.fetch(event_id="e79bf81b-b37f-425e-9dff-071a54592e44") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `event_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ChatCompletionEvent](../../models/chatcompletionevent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## fetch_similar_events + +Get Similar Chat Completion Events + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.chat_completion_events.fetch_similar_events(event_id="b7be6e08-d068-45fc-b77a-966232e92fd6") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `event_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.SearchChatCompletionEventsResponse](../../models/searchchatcompletioneventsresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## judge + +Run Judge on an event based on the given options + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.chat_completion_events.judge(event_id="dfcd5582-1373-4de5-af51-987464da561c", judge_definition={ + "name": "", + "description": "total plain self-confidence candid hungrily partial astride cruelly brr", + "model_name": "", + "output": { + "type": "CLASSIFICATION", + "options": [ + { + "value": "", + "description": "indeed insolence delightfully following", + }, + ], + }, + "instructions": "", + "tools": [], + }) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `event_id` | *str* | :heavy_check_mark: | N/A | +| `judge_definition` | [models.CreateJudgeRequest](../../models/createjudgerequest.md) | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.JudgeOutput](../../models/judgeoutput.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/connectors/README.md b/docs/sdks/connectors/README.md new file mode 100644 index 00000000..b8150eb8 --- /dev/null +++ b/docs/sdks/connectors/README.md @@ -0,0 +1,282 @@ +# Beta.Connectors + +## Overview + +(beta) Connectors API - manage your connectors + +### Available Operations + +* [create](#create) - Create a new connector. +* [list](#list) - List all connectors. +* [call_tool](#call_tool) - Call Connector Tool +* [get](#get) - Get a connector. +* [update](#update) - Update a connector. +* [delete](#delete) - Delete a connector. + +## create + +Create a new MCP connector. You can customize its visibility, url and auth type. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.connectors.create(name="", description="unibody usually despite slushy wherever reward stingy from", server="https://royal-majority.net/") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| --------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | +| `name` | *str* | :heavy_check_mark: | The name of the connector. Should be 64 char length maximum, alphanumeric, only underscores/dashes. | +| `description` | *str* | :heavy_check_mark: | The description of the connector. | +| `server` | *str* | :heavy_check_mark: | The url of the MCP server. | +| `icon_url` | *OptionalNullable[str]* | :heavy_minus_sign: | The optional url of the icon you want to associate to the connector. | +| `visibility` | [Optional[models.ResourceVisibility]](../../models/resourcevisibility.md) | :heavy_minus_sign: | N/A | +| `headers` | Dict[str, *Any*] | :heavy_minus_sign: | Optional organization-level headers to be sent with the request to the mcp server. | +| `auth_data` | [OptionalNullable[models.AuthData]](../../models/authdata.md) | :heavy_minus_sign: | Optional additional authentication data for the connector. | +| `system_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional system prompt for the connector. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Connector](../../models/connector.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## list + +List all your custom connectors with keyset pagination and filters. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.connectors.list(page_size=100) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | +| `query_filters` | [Optional[models.ConnectorsQueryFilters]](../../models/connectorsqueryfilters.md) | :heavy_minus_sign: | N/A | +| `cursor` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.PaginatedConnectors](../../models/paginatedconnectors.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## call_tool + +Call a tool on an MCP connector. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.connectors.call_tool(tool_name="", connector_id_or_name="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `tool_name` | *str* | :heavy_check_mark: | N/A | +| `connector_id_or_name` | *str* | :heavy_check_mark: | N/A | +| `arguments` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ConnectorToolCallResponse](../../models/connectortoolcallresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## get + +Get a connector by its ID or name. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.connectors.get(connector_id_or_name="", fetch_customer_data=False, fetch_connection_secrets=False) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| --------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | +| `connector_id_or_name` | *str* | :heavy_check_mark: | N/A | +| `fetch_customer_data` | *Optional[bool]* | :heavy_minus_sign: | Fetch the customer data associated with the connector (e.g. customer secrets / config). | +| `fetch_connection_secrets` | *Optional[bool]* | :heavy_minus_sign: | Fetch the general connection secrets associated with the connector. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Connector](../../models/connector.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## update + +Update a connector by its ID. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.connectors.update(connector_id="81d30634-113f-4dce-a89e-7786be2d8693") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `connector_id` | *str* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the connector. | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | The description of the connector. | +| `icon_url` | *OptionalNullable[str]* | :heavy_minus_sign: | The optional url of the icon you want to associate to the connector. | +| `system_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional system prompt for the connector. | +| `connection_config` | Dict[str, *Any*] | :heavy_minus_sign: | Optional new connection config. | +| `connection_secrets` | Dict[str, *Any*] | :heavy_minus_sign: | Optional new connection secrets | +| `server` | *OptionalNullable[str]* | :heavy_minus_sign: | New server url for your mcp connector. | +| `headers` | Dict[str, *Any*] | :heavy_minus_sign: | New headers for your mcp connector. | +| `auth_data` | [OptionalNullable[models.AuthData]](../../models/authdata.md) | :heavy_minus_sign: | New authentication data for your mcp connector. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Connector](../../models/connector.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## delete + +Delete a connector by its ID. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.connectors.delete(connector_id="5c3269fe-6a18-4216-b1fb-b093005874cd") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `connector_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.MessageResponse](../../models/messageresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index e77d329b..083b293d 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -56,6 +56,7 @@ with Mistral( | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `tools` | List[[models.ConversationRequestTool](../../models/conversationrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `guardrails` | List[[models.GuardrailConfig](../../models/guardrailconfig.md)] | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | @@ -366,6 +367,7 @@ with Mistral( | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `guardrails` | List[[models.GuardrailConfig](../../models/guardrailconfig.md)] | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | | `agent_version` | [OptionalNullable[models.ConversationRestartRequestAgentVersion]](../../models/conversationrestartrequestagentversion.md) | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | @@ -428,6 +430,7 @@ with Mistral( | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `tools` | List[[models.ConversationStreamRequestTool](../../models/conversationstreamrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `guardrails` | List[[models.GuardrailConfig](../../models/guardrailconfig.md)] | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | @@ -540,6 +543,7 @@ with Mistral( | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `guardrails` | List[[models.GuardrailConfig](../../models/guardrailconfig.md)] | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | | `agent_version` | [OptionalNullable[models.ConversationRestartStreamRequestAgentVersion]](../../models/conversationrestartstreamrequestagentversion.md) | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | diff --git a/docs/sdks/datasets/README.md b/docs/sdks/datasets/README.md new file mode 100644 index 00000000..c04ced0c --- /dev/null +++ b/docs/sdks/datasets/README.md @@ -0,0 +1,669 @@ +# Beta.Observability.Datasets + +## Overview + +### Available Operations + +* [create](#create) - Create a new empty dataset +* [list](#list) - List existing datasets +* [fetch](#fetch) - Get dataset by id +* [delete](#delete) - Delete a dataset +* [update](#update) - Patch dataset +* [list_records](#list_records) - List existing records in the dataset +* [create_record](#create_record) - Add a conversation to the dataset +* [import_from_campaign](#import_from_campaign) - Populate the dataset with a campaign +* [import_from_explorer](#import_from_explorer) - Populate the dataset with samples from the explorer +* [import_from_file](#import_from_file) - Populate the dataset with samples from an uploaded file +* [import_from_playground](#import_from_playground) - Populate the dataset with samples from the playground +* [import_from_dataset_records](#import_from_dataset_records) - Populate the dataset with samples from another dataset +* [export_to_jsonl](#export_to_jsonl) - Export to the Files API and retrieve presigned URL to download the resulting JSONL file +* [fetch_task](#fetch_task) - Get status of a dataset import task +* [list_tasks](#list_tasks) - List import tasks for the given dataset + +## create + +Create a new empty dataset + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.datasets.create(name="", description="citizen whoever sustenance necessary vibrant openly") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Dataset](../../models/dataset.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## list + +List existing datasets + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.datasets.list(page_size=50, page=1) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `q` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ListDatasetsResponse](../../models/listdatasetsresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## fetch + +Get dataset by id + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.datasets.fetch(dataset_id="036fa362-e080-4fa5-beff-a334a70efb58") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.DatasetPreview](../../models/datasetpreview.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## delete + +Delete a dataset + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.observability.datasets.delete(dataset_id="baf961a3-bb8e-4085-89ef-de9c5d8c4e77") + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## update + +Patch dataset + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.datasets.update(dataset_id="95be9afc-fc05-44a6-af9f-2362de1224f9") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.DatasetPreview](../../models/datasetpreview.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## list_records + +List existing records in the dataset + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.datasets.list_records(dataset_id="444d2a88-e636-4bc0-ab6c-919bedaed112", page_size=50, page=1) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ListDatasetRecordsResponse](../../models/listdatasetrecordsresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## create_record + +Add a conversation to the dataset + +### Example Usage + + +```python +from mistralai.client import Mistral, models +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.datasets.create_record(dataset_id="4c54ed13-1459-44e1-8696-1a6df06f7177", payload=models.ConversationPayload( + messages=[ + { + "key": "", + }, + { + "key": "", + "key1": "", + }, + ], + ), properties={ + "key": "", + "key1": "", + "key2": "", + }) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `payload` | [models.ConversationPayload](../../models/conversationpayload.md) | :heavy_check_mark: | N/A | +| `properties` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.DatasetRecord](../../models/datasetrecord.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## import_from_campaign + +Populate the dataset with a campaign + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.datasets.import_from_campaign(dataset_id="306b5f31-e31c-4e06-9220-e3008c61bf1b", campaign_id="71a2e42d-7414-4fe6-89cb-44a2122b6f6b") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `campaign_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.DatasetImportTask](../../models/datasetimporttask.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## import_from_explorer + +Populate the dataset with samples from the explorer + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.datasets.import_from_explorer(dataset_id="ee1930e9-54f7-4c68-aa8a-40fe5d2a3485", completion_event_ids=[ + "", + "", + "", + ]) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `completion_event_ids` | List[*str*] | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.DatasetImportTask](../../models/datasetimporttask.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## import_from_file + +Populate the dataset with samples from an uploaded file + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.datasets.import_from_file(dataset_id="1c96c925-cc58-4529-863d-9fe66a6f1924", file_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.DatasetImportTask](../../models/datasetimporttask.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## import_from_playground + +Populate the dataset with samples from the playground + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.datasets.import_from_playground(dataset_id="5cb42584-5fcf-4837-997a-6a67c5e6900d", conversation_ids=[]) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_ids` | List[*str*] | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.DatasetImportTask](../../models/datasetimporttask.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## import_from_dataset_records + +Populate the dataset with samples from another dataset + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.datasets.import_from_dataset_records(dataset_id="ada96a08-d724-4e5c-9111-aaf1bdb7d588", dataset_record_ids=[ + "58fe798a-537b-4c61-9efc-d1d96d5d264a", + "cfa1d197-deda-456e-906b-dd84dccfcd17", + ]) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `dataset_record_ids` | List[*str*] | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.DatasetImportTask](../../models/datasetimporttask.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## export_to_jsonl + +Export to the Files API and retrieve presigned URL to download the resulting JSONL file + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.datasets.export_to_jsonl(dataset_id="d521add6-d909-4a69-a460-cb880d87b773") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ExportDatasetResponse](../../models/exportdatasetresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## fetch_task + +Get status of a dataset import task + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.datasets.fetch_task(dataset_id="b64b504e-58a2-4d52-979b-e2634b301235", task_id="1713cde2-dea1-410d-851e-8cea964ffa14") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `task_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.DatasetImportTask](../../models/datasetimporttask.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## list_tasks + +List import tasks for the given dataset + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.datasets.list_tasks(dataset_id="29903443-7f9c-42a6-9b6b-fc5cbef4191a", page_size=50, page=1) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `dataset_id` | *str* | :heavy_check_mark: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ListDatasetImportTasksResponse](../../models/listdatasetimporttasksresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/documents/README.md b/docs/sdks/documents/README.md index 9c219b67..2c9440f9 100644 --- a/docs/sdks/documents/README.md +++ b/docs/sdks/documents/README.md @@ -307,7 +307,7 @@ with Mistral( ### Response -**[models.ProcessingStatusOut](../../models/processingstatusout.md)** +**[models.ProcessingStatus](../../models/processingstatus.md)** ### Errors diff --git a/docs/sdks/fields/README.md b/docs/sdks/fields/README.md new file mode 100644 index 00000000..3c842441 --- /dev/null +++ b/docs/sdks/fields/README.md @@ -0,0 +1,133 @@ +# Beta.Observability.ChatCompletionEvents.Fields + +## Overview + +### Available Operations + +* [list](#list) - Get Chat Completion Fields +* [fetch_options](#fetch_options) - Get Chat Completion Field Options +* [fetch_option_counts](#fetch_option_counts) - Get Chat Completion Field Options Counts + +## list + +Get Chat Completion Fields + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.chat_completion_events.fields.list() + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ListChatCompletionFieldsResponse](../../models/listchatcompletionfieldsresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## fetch_options + +Get Chat Completion Field Options + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.chat_completion_events.fields.fetch_options(field_name="", operator="startswith") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `field_name` | *str* | :heavy_check_mark: | N/A | +| `operator` | [models.Operator](../../models/operator.md) | :heavy_check_mark: | The operator to use for filtering options | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.FetchChatCompletionFieldOptionsResponse](../../models/fetchchatcompletionfieldoptionsresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## fetch_option_counts + +Get Chat Completion Field Options Counts + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.chat_completion_events.fields.fetch_option_counts(field_name="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | +| `field_name` | *str* | :heavy_check_mark: | N/A | +| `filter_params` | [OptionalNullable[models.FilterPayload]](../../models/filterpayload.md) | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.FetchFieldOptionCountsResponse](../../models/fetchfieldoptioncountsresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index 9507326b..7db76611 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -36,7 +36,7 @@ with Mistral( res = mistral.files.upload(file={ "file_name": "example.file", "content": open("example.file", "rb"), - }) + }, visibility="workspace") # Handle response print(res) @@ -48,6 +48,8 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `file` | [models.File](../../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | +| `expiry` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `visibility` | [Optional[models.FilesAPIRoutesUploadFileFileVisibility]](../../models/filesapiroutesuploadfilefilevisibility.md) | :heavy_minus_sign: | N/A | | `purpose` | [Optional[models.FilePurpose]](../../models/filepurpose.md) | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | diff --git a/docs/sdks/judges/README.md b/docs/sdks/judges/README.md new file mode 100644 index 00000000..f1187518 --- /dev/null +++ b/docs/sdks/judges/README.md @@ -0,0 +1,236 @@ +# Beta.Observability.Judges + +## Overview + +### Available Operations + +* [create](#create) - Create a new judge +* [list](#list) - Get judges with optional filtering and search +* [fetch](#fetch) - Get judge by id +* [delete](#delete) - Delete a judge +* [update](#update) - Update a judge + +## create + +Create a new judge + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.judges.create(name="", description="border freely down whenever broadly whenever restructure catalyze after", model_name="", output={ + "type": "REGRESSION", + "min": 0, + "min_description": "", + "max": 1, + "max_description": "", + }, instructions="", tools=[ + "", + "", + ]) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| --------------------------------------------------------------------------- | --------------------------------------------------------------------------- | --------------------------------------------------------------------------- | --------------------------------------------------------------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *str* | :heavy_check_mark: | N/A | +| `model_name` | *str* | :heavy_check_mark: | N/A | +| `output` | [models.CreateJudgeRequestOutput](../../models/createjudgerequestoutput.md) | :heavy_check_mark: | N/A | +| `instructions` | *str* | :heavy_check_mark: | N/A | +| `tools` | List[*str*] | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Judge](../../models/judge.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## list + +Get judges with optional filtering and search + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.judges.list(page_size=50, page=1) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `type_filter` | List[[models.JudgeOutputType](../../models/judgeoutputtype.md)] | :heavy_minus_sign: | Filter by judge output types | +| `model_filter` | List[*str*] | :heavy_minus_sign: | Filter by model names | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `q` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ListJudgesResponse](../../models/listjudgesresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## fetch + +Get judge by id + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.judges.fetch(judge_id="19ae5cf8-2ade-4a40-b9d2-730aaebe8429") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `judge_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Judge](../../models/judge.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## delete + +Delete a judge + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.observability.judges.delete(judge_id="80deecde-e10f-409c-a13a-c242d3760f6e") + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `judge_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## update + +Update a judge + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.observability.judges.update(judge_id="9f28c7db-1fb7-4e1c-b137-d7039561ddb7", name="", description="noteworthy and unless", model_name="", output={ + "type": "REGRESSION", + "min": 0, + "min_description": "", + "max": 1, + "max_description": "", + }, instructions="", tools=[]) + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| --------------------------------------------------------------------------- | --------------------------------------------------------------------------- | --------------------------------------------------------------------------- | --------------------------------------------------------------------------- | +| `judge_id` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *str* | :heavy_check_mark: | N/A | +| `model_name` | *str* | :heavy_check_mark: | N/A | +| `output` | [models.UpdateJudgeRequestOutput](../../models/updatejudgerequestoutput.md) | :heavy_check_mark: | N/A | +| `instructions` | *str* | :heavy_check_mark: | N/A | +| `tools` | List[*str*] | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 311a2db6..f585dcbe 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -40,6 +40,8 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `provider` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -48,9 +50,10 @@ with Mistral( ### Errors -| Error Type | Status Code | Content Type | -| --------------- | --------------- | --------------- | -| errors.SDKError | 4XX, 5XX | \*/\* | +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## retrieve @@ -125,7 +128,7 @@ with Mistral( ### Response -**[models.DeleteModelOut](../../models/deletemodelout.md)** +**[models.DeleteModelResponse](../../models/deletemodelresponse.md)** ### Errors diff --git a/docs/sdks/records/README.md b/docs/sdks/records/README.md new file mode 100644 index 00000000..ce8f1f68 --- /dev/null +++ b/docs/sdks/records/README.md @@ -0,0 +1,277 @@ +# Beta.Observability.Datasets.Records + +## Overview + +### Available Operations + +* [fetch](#fetch) - Get the content of a given conversation from a dataset +* [delete](#delete) - Delete a record from a dataset +* [bulk_delete](#bulk_delete) - Delete multiple records from datasets +* [judge](#judge) - Run Judge on a dataset record based on the given options +* [update_payload](#update_payload) - Update a dataset record conversation payload +* [update_properties](#update_properties) - Update conversation properties + +## fetch + +Get the content of a given conversation from a dataset + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.datasets.records.fetch(dataset_record_id="ce995349-abbf-45c0-be75-885fc1c4b4c0") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `dataset_record_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.DatasetRecord](../../models/datasetrecord.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## delete + +Delete a record from a dataset + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.observability.datasets.records.delete(dataset_record_id="799fed99-80b4-4a9a-a15e-05352b811702") + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `dataset_record_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## bulk_delete + +Delete multiple records from datasets + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.observability.datasets.records.bulk_delete(dataset_record_ids=[ + "22fc78f7-e774-4ab5-b1ea-63852992ef31", + "1c533b4f-882e-4bd0-9ef6-9933b825f8b1", + ]) + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `dataset_record_ids` | List[*str*] | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## judge + +Run Judge on a dataset record based on the given options + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.observability.datasets.records.judge(dataset_record_id="9de5d7a1-787a-45dd-b668-9f3407e76d8b", judge_definition={ + "name": "", + "description": "wisely railway deceivingly arcade minion back what yowza outrun service", + "model_name": "", + "output": { + "type": "CLASSIFICATION", + "options": [ + { + "value": "", + "description": "spork excluding without retrospectivity bah next yearly", + }, + ], + }, + "instructions": "", + "tools": [ + "", + ], + }) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `dataset_record_id` | *str* | :heavy_check_mark: | N/A | +| `judge_definition` | [models.CreateJudgeRequest](../../models/createjudgerequest.md) | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.JudgeOutput](../../models/judgeoutput.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## update_payload + +Update a dataset record conversation payload + +### Example Usage + + +```python +from mistralai.client import Mistral, models +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.observability.datasets.records.update_payload(dataset_record_id="17506b15-748e-4e7c-9737-c97c44d04b0f", payload=models.ConversationPayload( + messages=[ + { + "key": "", + }, + { + + }, + { + "key": "", + }, + ], + )) + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `dataset_record_id` | *str* | :heavy_check_mark: | N/A | +| `payload` | [models.ConversationPayload](../../models/conversationpayload.md) | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## update_properties + +Update conversation properties + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.observability.datasets.records.update_properties(dataset_record_id="a4deefc5-0905-427e-ad15-1090ef9e216d", properties={ + "key": "", + "key1": "", + "key2": "", + }) + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `dataset_record_id` | *str* | :heavy_check_mark: | N/A | +| `properties` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| ------------------------- | ------------------------- | ------------------------- | +| errors.ObservabilityError | 400, 404, 408, 409, 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/src/mistralai/client/_version.py b/src/mistralai/client/_version.py index 805648e4..c7251c9c 100644 --- a/src/mistralai/client/_version.py +++ b/src/mistralai/client/_version.py @@ -4,10 +4,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "2.0.0rc1" +__version__: str = "2.0.0" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.841.0" -__user_agent__: str = "speakeasy-sdk/python 2.0.0rc1 2.841.0 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 2.0.0 2.841.0 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/client/accesses.py b/src/mistralai/client/accesses.py index 0761b0bc..2cfdc7da 100644 --- a/src/mistralai/client/accesses.py +++ b/src/mistralai/client/accesses.py @@ -21,7 +21,7 @@ def list( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListSharingOut: + ) -> models.ListSharingResponse: r"""List all of the access to this library. Given a library, list all of the Entity that have access and to what level. @@ -88,7 +88,7 @@ def list( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListSharingOut, http_res) + return unmarshal_json_response(models.ListSharingResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( errors.HTTPValidationErrorData, http_res @@ -111,7 +111,7 @@ async def list_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListSharingOut: + ) -> models.ListSharingResponse: r"""List all of the access to this library. Given a library, list all of the Entity that have access and to what level. @@ -178,7 +178,7 @@ async def list_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListSharingOut, http_res) + return unmarshal_json_response(models.ListSharingResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( errors.HTTPValidationErrorData, http_res @@ -205,7 +205,7 @@ def update_or_create( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.SharingOut: + ) -> models.Sharing: r"""Create or update an access level. Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. @@ -232,7 +232,7 @@ def update_or_create( request = models.LibrariesShareCreateV1Request( library_id=library_id, - sharing_in=models.SharingIn( + sharing_request=models.SharingRequest( org_id=org_id, level=level, share_with_uuid=share_with_uuid, @@ -254,7 +254,7 @@ def update_or_create( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.sharing_in, False, False, "json", models.SharingIn + request.sharing_request, False, False, "json", models.SharingRequest ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -285,7 +285,7 @@ def update_or_create( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.SharingOut, http_res) + return unmarshal_json_response(models.Sharing, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( errors.HTTPValidationErrorData, http_res @@ -312,7 +312,7 @@ async def update_or_create_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.SharingOut: + ) -> models.Sharing: r"""Create or update an access level. Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. @@ -339,7 +339,7 @@ async def update_or_create_async( request = models.LibrariesShareCreateV1Request( library_id=library_id, - sharing_in=models.SharingIn( + sharing_request=models.SharingRequest( org_id=org_id, level=level, share_with_uuid=share_with_uuid, @@ -361,7 +361,7 @@ async def update_or_create_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.sharing_in, False, False, "json", models.SharingIn + request.sharing_request, False, False, "json", models.SharingRequest ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -392,7 +392,7 @@ async def update_or_create_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.SharingOut, http_res) + return unmarshal_json_response(models.Sharing, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( errors.HTTPValidationErrorData, http_res @@ -418,7 +418,7 @@ def delete( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.SharingOut: + ) -> models.Sharing: r"""Delete an access level. Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. @@ -496,7 +496,7 @@ def delete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.SharingOut, http_res) + return unmarshal_json_response(models.Sharing, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( errors.HTTPValidationErrorData, http_res @@ -522,7 +522,7 @@ async def delete_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.SharingOut: + ) -> models.Sharing: r"""Delete an access level. Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. @@ -600,7 +600,7 @@ async def delete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.SharingOut, http_res) + return unmarshal_json_response(models.Sharing, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( errors.HTTPValidationErrorData, http_res diff --git a/src/mistralai/client/beta.py b/src/mistralai/client/beta.py index 65b761d1..83b8fc87 100644 --- a/src/mistralai/client/beta.py +++ b/src/mistralai/client/beta.py @@ -4,8 +4,10 @@ from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration from mistralai.client.beta_agents import BetaAgents +from mistralai.client.connectors import Connectors from mistralai.client.conversations import Conversations from mistralai.client.libraries import Libraries +from mistralai.client.observability import Observability from typing import Optional @@ -16,6 +18,9 @@ class Beta(BaseSDK): r"""(beta) Agents API""" libraries: Libraries r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities.""" + observability: Observability + connectors: Connectors + r"""(beta) Connectors API - manage your connectors""" def __init__( self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None @@ -30,3 +35,7 @@ def _init_sdks(self): ) self.agents = BetaAgents(self.sdk_configuration, parent_ref=self.parent_ref) self.libraries = Libraries(self.sdk_configuration, parent_ref=self.parent_ref) + self.observability = Observability( + self.sdk_configuration, parent_ref=self.parent_ref + ) + self.connectors = Connectors(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/client/beta_agents.py b/src/mistralai/client/beta_agents.py index 157c5de4..19f8d2b2 100644 --- a/src/mistralai/client/beta_agents.py +++ b/src/mistralai/client/beta_agents.py @@ -28,6 +28,9 @@ def create( completion_args: Optional[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, + guardrails: OptionalNullable[ + Union[List[models.GuardrailConfig], List[models.GuardrailConfigTypedDict]] + ] = UNSET, description: OptionalNullable[str] = UNSET, handoffs: OptionalNullable[List[str]] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, @@ -46,6 +49,7 @@ def create( :param instructions: Instruction prompt the model will follow during the conversation. :param tools: List of tools which are available to the model during the conversation. :param completion_args: White-listed arguments from the completion API + :param guardrails: :param description: :param handoffs: :param metadata: @@ -73,6 +77,9 @@ def create( completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + guardrails=utils.get_pydantic_model( + guardrails, OptionalNullable[List[models.GuardrailConfig]] + ), model=model, name=name, description=description, @@ -156,6 +163,9 @@ async def create_async( completion_args: Optional[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, + guardrails: OptionalNullable[ + Union[List[models.GuardrailConfig], List[models.GuardrailConfigTypedDict]] + ] = UNSET, description: OptionalNullable[str] = UNSET, handoffs: OptionalNullable[List[str]] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, @@ -174,6 +184,7 @@ async def create_async( :param instructions: Instruction prompt the model will follow during the conversation. :param tools: List of tools which are available to the model during the conversation. :param completion_args: White-listed arguments from the completion API + :param guardrails: :param description: :param handoffs: :param metadata: @@ -201,6 +212,9 @@ async def create_async( completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + guardrails=utils.get_pydantic_model( + guardrails, OptionalNullable[List[models.GuardrailConfig]] + ), model=model, name=name, description=description, @@ -701,6 +715,9 @@ def update( completion_args: Optional[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, + guardrails: OptionalNullable[ + Union[List[models.GuardrailConfig], List[models.GuardrailConfigTypedDict]] + ] = UNSET, model: OptionalNullable[str] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, @@ -721,6 +738,7 @@ def update( :param instructions: Instruction prompt the model will follow during the conversation. :param tools: List of tools which are available to the model during the conversation. :param completion_args: White-listed arguments from the completion API + :param guardrails: :param model: :param name: :param description: @@ -753,6 +771,9 @@ def update( completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + guardrails=utils.get_pydantic_model( + guardrails, OptionalNullable[List[models.GuardrailConfig]] + ), model=model, name=name, description=description, @@ -841,6 +862,9 @@ async def update_async( completion_args: Optional[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, + guardrails: OptionalNullable[ + Union[List[models.GuardrailConfig], List[models.GuardrailConfigTypedDict]] + ] = UNSET, model: OptionalNullable[str] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, @@ -861,6 +885,7 @@ async def update_async( :param instructions: Instruction prompt the model will follow during the conversation. :param tools: List of tools which are available to the model during the conversation. :param completion_args: White-listed arguments from the completion API + :param guardrails: :param model: :param name: :param description: @@ -893,6 +918,9 @@ async def update_async( completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + guardrails=utils.get_pydantic_model( + guardrails, OptionalNullable[List[models.GuardrailConfig]] + ), model=model, name=name, description=description, diff --git a/src/mistralai/client/campaigns.py b/src/mistralai/client/campaigns.py new file mode 100644 index 00000000..a1ffcebc --- /dev/null +++ b/src/mistralai/client/campaigns.py @@ -0,0 +1,1150 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9e64fcf4e60e + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Mapping, Optional, Union + + +class Campaigns(BaseSDK): + def create( + self, + *, + search_params: Union[models.FilterPayload, models.FilterPayloadTypedDict], + judge_id: str, + name: str, + description: str, + max_nb_events: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Campaign: + r"""Create and start a new campaign + + :param search_params: + :param judge_id: + :param name: + :param description: + :param max_nb_events: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateCampaignRequest( + search_params=utils.get_pydantic_model(search_params, models.FilterPayload), + judge_id=judge_id, + name=name, + description=description, + max_nb_events=max_nb_events, + ) + + req = self._build_request( + method="POST", + path="/v1/observability/campaigns", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateCampaignRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="create_campaign_v1_observability_campaigns_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "201", "application/json"): + return unmarshal_json_response(models.Campaign, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + search_params: Union[models.FilterPayload, models.FilterPayloadTypedDict], + judge_id: str, + name: str, + description: str, + max_nb_events: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Campaign: + r"""Create and start a new campaign + + :param search_params: + :param judge_id: + :param name: + :param description: + :param max_nb_events: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateCampaignRequest( + search_params=utils.get_pydantic_model(search_params, models.FilterPayload), + judge_id=judge_id, + name=name, + description=description, + max_nb_events=max_nb_events, + ) + + req = self._build_request_async( + method="POST", + path="/v1/observability/campaigns", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateCampaignRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="create_campaign_v1_observability_campaigns_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "201", "application/json"): + return unmarshal_json_response(models.Campaign, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def list( + self, + *, + page_size: Optional[int] = 50, + page: Optional[int] = 1, + q: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListCampaignsResponse: + r"""Get all campaigns + + :param page_size: + :param page: + :param q: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetCampaignsV1ObservabilityCampaignsGetRequest( + page_size=page_size, + page=page, + q=q, + ) + + req = self._build_request( + method="GET", + path="/v1/observability/campaigns", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_campaigns_v1_observability_campaigns_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListCampaignsResponse, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page_size: Optional[int] = 50, + page: Optional[int] = 1, + q: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListCampaignsResponse: + r"""Get all campaigns + + :param page_size: + :param page: + :param q: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetCampaignsV1ObservabilityCampaignsGetRequest( + page_size=page_size, + page=page, + q=q, + ) + + req = self._build_request_async( + method="GET", + path="/v1/observability/campaigns", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_campaigns_v1_observability_campaigns_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListCampaignsResponse, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def fetch( + self, + *, + campaign_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Campaign: + r"""Get campaign by id + + :param campaign_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetCampaignByIDV1ObservabilityCampaignsCampaignIDGetRequest( + campaign_id=campaign_id, + ) + + req = self._build_request( + method="GET", + path="/v1/observability/campaigns/{campaign_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_campaign_by_id_v1_observability_campaigns__campaign_id__get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Campaign, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def fetch_async( + self, + *, + campaign_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Campaign: + r"""Get campaign by id + + :param campaign_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetCampaignByIDV1ObservabilityCampaignsCampaignIDGetRequest( + campaign_id=campaign_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/observability/campaigns/{campaign_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_campaign_by_id_v1_observability_campaigns__campaign_id__get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Campaign, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + campaign_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a campaign + + :param campaign_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.DeleteCampaignV1ObservabilityCampaignsCampaignIDDeleteRequest( + campaign_id=campaign_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/observability/campaigns/{campaign_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="delete_campaign_v1_observability_campaigns__campaign_id__delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + campaign_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a campaign + + :param campaign_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.DeleteCampaignV1ObservabilityCampaignsCampaignIDDeleteRequest( + campaign_id=campaign_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/observability/campaigns/{campaign_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="delete_campaign_v1_observability_campaigns__campaign_id__delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def fetch_status( + self, + *, + campaign_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FetchCampaignStatusResponse: + r"""Get campaign status by campaign id + + :param campaign_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetCampaignStatusByIDV1ObservabilityCampaignsCampaignIDStatusGetRequest( + campaign_id=campaign_id, + ) + + req = self._build_request( + method="GET", + path="/v1/observability/campaigns/{campaign_id}/status", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_campaign_status_by_id_v1_observability_campaigns__campaign_id__status_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.FetchCampaignStatusResponse, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def fetch_status_async( + self, + *, + campaign_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FetchCampaignStatusResponse: + r"""Get campaign status by campaign id + + :param campaign_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetCampaignStatusByIDV1ObservabilityCampaignsCampaignIDStatusGetRequest( + campaign_id=campaign_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/observability/campaigns/{campaign_id}/status", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_campaign_status_by_id_v1_observability_campaigns__campaign_id__status_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.FetchCampaignStatusResponse, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def list_events( + self, + *, + campaign_id: str, + page_size: Optional[int] = 50, + page: Optional[int] = 1, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListCampaignSelectedEventsResponse: + r"""Get event ids that were selected by the given campaign + + :param campaign_id: + :param page_size: + :param page: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetCampaignSelectedEventsV1ObservabilityCampaignsCampaignIDSelectedEventsGetRequest( + campaign_id=campaign_id, + page_size=page_size, + page=page, + ) + + req = self._build_request( + method="GET", + path="/v1/observability/campaigns/{campaign_id}/selected-events", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_campaign_selected_events_v1_observability_campaigns__campaign_id__selected_events_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.ListCampaignSelectedEventsResponse, http_res + ) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_events_async( + self, + *, + campaign_id: str, + page_size: Optional[int] = 50, + page: Optional[int] = 1, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListCampaignSelectedEventsResponse: + r"""Get event ids that were selected by the given campaign + + :param campaign_id: + :param page_size: + :param page: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetCampaignSelectedEventsV1ObservabilityCampaignsCampaignIDSelectedEventsGetRequest( + campaign_id=campaign_id, + page_size=page_size, + page=page, + ) + + req = self._build_request_async( + method="GET", + path="/v1/observability/campaigns/{campaign_id}/selected-events", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_campaign_selected_events_v1_observability_campaigns__campaign_id__selected_events_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.ListCampaignSelectedEventsResponse, http_res + ) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/chat_completion_events.py b/src/mistralai/client/chat_completion_events.py new file mode 100644 index 00000000..c060235c --- /dev/null +++ b/src/mistralai/client/chat_completion_events.py @@ -0,0 +1,1030 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1813f339625b + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.fields import Fields +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, List, Mapping, Optional, Union + + +class ChatCompletionEvents(BaseSDK): + fields: Fields + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.fields = Fields(self.sdk_configuration, parent_ref=self.parent_ref) + + def search( + self, + *, + search_params: Union[models.FilterPayload, models.FilterPayloadTypedDict], + page_size: Optional[int] = 50, + cursor: OptionalNullable[str] = UNSET, + extra_fields: OptionalNullable[List[str]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SearchChatCompletionEventsResponse: + r"""Get Chat Completion Events + + :param search_params: + :param page_size: + :param cursor: + :param extra_fields: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetChatCompletionEventsV1ObservabilityChatCompletionEventsSearchPostRequest( + page_size=page_size, + cursor=cursor, + search_chat_completion_events_request=models.SearchChatCompletionEventsRequest( + search_params=utils.get_pydantic_model( + search_params, models.FilterPayload + ), + extra_fields=extra_fields, + ), + ) + + req = self._build_request( + method="POST", + path="/v1/observability/chat-completion-events/search", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.search_chat_completion_events_request, + False, + False, + "json", + models.SearchChatCompletionEventsRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_chat_completion_events_v1_observability_chat_completion_events_search_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.SearchChatCompletionEventsResponse, http_res + ) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def search_async( + self, + *, + search_params: Union[models.FilterPayload, models.FilterPayloadTypedDict], + page_size: Optional[int] = 50, + cursor: OptionalNullable[str] = UNSET, + extra_fields: OptionalNullable[List[str]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SearchChatCompletionEventsResponse: + r"""Get Chat Completion Events + + :param search_params: + :param page_size: + :param cursor: + :param extra_fields: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetChatCompletionEventsV1ObservabilityChatCompletionEventsSearchPostRequest( + page_size=page_size, + cursor=cursor, + search_chat_completion_events_request=models.SearchChatCompletionEventsRequest( + search_params=utils.get_pydantic_model( + search_params, models.FilterPayload + ), + extra_fields=extra_fields, + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/observability/chat-completion-events/search", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.search_chat_completion_events_request, + False, + False, + "json", + models.SearchChatCompletionEventsRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_chat_completion_events_v1_observability_chat_completion_events_search_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.SearchChatCompletionEventsResponse, http_res + ) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def search_ids( + self, + *, + search_params: Union[models.FilterPayload, models.FilterPayloadTypedDict], + extra_fields: OptionalNullable[List[str]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SearchChatCompletionEventIdsResponse: + r"""Alternative to /search that returns only the IDs and that can return many IDs at once + + :param search_params: + :param extra_fields: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.SearchChatCompletionEventIdsRequest( + search_params=utils.get_pydantic_model(search_params, models.FilterPayload), + extra_fields=extra_fields, + ) + + req = self._build_request( + method="POST", + path="/v1/observability/chat-completion-events/search-ids", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, + False, + False, + "json", + models.SearchChatCompletionEventIdsRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_chat_completion_event_ids_v1_observability_chat_completion_events_search_ids_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.SearchChatCompletionEventIdsResponse, http_res + ) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def search_ids_async( + self, + *, + search_params: Union[models.FilterPayload, models.FilterPayloadTypedDict], + extra_fields: OptionalNullable[List[str]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SearchChatCompletionEventIdsResponse: + r"""Alternative to /search that returns only the IDs and that can return many IDs at once + + :param search_params: + :param extra_fields: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.SearchChatCompletionEventIdsRequest( + search_params=utils.get_pydantic_model(search_params, models.FilterPayload), + extra_fields=extra_fields, + ) + + req = self._build_request_async( + method="POST", + path="/v1/observability/chat-completion-events/search-ids", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, + False, + False, + "json", + models.SearchChatCompletionEventIdsRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_chat_completion_event_ids_v1_observability_chat_completion_events_search_ids_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.SearchChatCompletionEventIdsResponse, http_res + ) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def fetch( + self, + *, + event_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionEvent: + r"""Get Chat Completion Event + + :param event_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetChatCompletionEventV1ObservabilityChatCompletionEventsEventIDGetRequest( + event_id=event_id, + ) + + req = self._build_request( + method="GET", + path="/v1/observability/chat-completion-events/{event_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_chat_completion_event_v1_observability_chat_completion_events__event_id__get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionEvent, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def fetch_async( + self, + *, + event_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionEvent: + r"""Get Chat Completion Event + + :param event_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetChatCompletionEventV1ObservabilityChatCompletionEventsEventIDGetRequest( + event_id=event_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/observability/chat-completion-events/{event_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_chat_completion_event_v1_observability_chat_completion_events__event_id__get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionEvent, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def fetch_similar_events( + self, + *, + event_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SearchChatCompletionEventsResponse: + r"""Get Similar Chat Completion Events + + :param event_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetSimilarChatCompletionEventsV1ObservabilityChatCompletionEventsEventIDSimilarEventsGetRequest( + event_id=event_id, + ) + + req = self._build_request( + method="GET", + path="/v1/observability/chat-completion-events/{event_id}/similar-events", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_similar_chat_completion_events_v1_observability_chat_completion_events__event_id__similar_events_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.SearchChatCompletionEventsResponse, http_res + ) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def fetch_similar_events_async( + self, + *, + event_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SearchChatCompletionEventsResponse: + r"""Get Similar Chat Completion Events + + :param event_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetSimilarChatCompletionEventsV1ObservabilityChatCompletionEventsEventIDSimilarEventsGetRequest( + event_id=event_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/observability/chat-completion-events/{event_id}/similar-events", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_similar_chat_completion_events_v1_observability_chat_completion_events__event_id__similar_events_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.SearchChatCompletionEventsResponse, http_res + ) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def judge( + self, + *, + event_id: str, + judge_definition: Union[ + models.CreateJudgeRequest, models.CreateJudgeRequestTypedDict + ], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JudgeOutput: + r"""Run Judge on an event based on the given options + + :param event_id: + :param judge_definition: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JudgeChatCompletionEventV1ObservabilityChatCompletionEventsEventIDLiveJudgingPostRequest( + event_id=event_id, + judge_chat_completion_event_request=models.JudgeChatCompletionEventRequest( + judge_definition=utils.get_pydantic_model( + judge_definition, models.CreateJudgeRequest + ), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/observability/chat-completion-events/{event_id}/live-judging", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.judge_chat_completion_event_request, + False, + False, + "json", + models.JudgeChatCompletionEventRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="judge_chat_completion_event_v1_observability_chat_completion_events__event_id__live_judging_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.JudgeOutput, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def judge_async( + self, + *, + event_id: str, + judge_definition: Union[ + models.CreateJudgeRequest, models.CreateJudgeRequestTypedDict + ], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JudgeOutput: + r"""Run Judge on an event based on the given options + + :param event_id: + :param judge_definition: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JudgeChatCompletionEventV1ObservabilityChatCompletionEventsEventIDLiveJudgingPostRequest( + event_id=event_id, + judge_chat_completion_event_request=models.JudgeChatCompletionEventRequest( + judge_definition=utils.get_pydantic_model( + judge_definition, models.CreateJudgeRequest + ), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/observability/chat-completion-events/{event_id}/live-judging", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.judge_chat_completion_event_request, + False, + False, + "json", + models.JudgeChatCompletionEventRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="judge_chat_completion_event_v1_observability_chat_completion_events__event_id__live_judging_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.JudgeOutput, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/connectors.py b/src/mistralai/client/connectors.py new file mode 100644 index 00000000..37e01833 --- /dev/null +++ b/src/mistralai/client/connectors.py @@ -0,0 +1,1292 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 39da03126050 + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, Mapping, Optional, Union + + +class Connectors(BaseSDK): + r"""(beta) Connectors API - manage your connectors""" + + def create( + self, + *, + name: str, + description: str, + server: str, + icon_url: OptionalNullable[str] = UNSET, + visibility: Optional[models.ResourceVisibility] = None, + headers: OptionalNullable[Dict[str, Any]] = UNSET, + auth_data: OptionalNullable[ + Union[models.AuthData, models.AuthDataTypedDict] + ] = UNSET, + system_prompt: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Connector: + r"""Create a new connector. + + Create a new MCP connector. You can customize its visibility, url and auth type. + + :param name: The name of the connector. Should be 64 char length maximum, alphanumeric, only underscores/dashes. + :param description: The description of the connector. + :param server: The url of the MCP server. + :param icon_url: The optional url of the icon you want to associate to the connector. + :param visibility: + :param headers: Optional organization-level headers to be sent with the request to the mcp server. + :param auth_data: Optional additional authentication data for the connector. + :param system_prompt: Optional system prompt for the connector. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateConnectorRequest( + name=name, + description=description, + icon_url=icon_url, + visibility=visibility, + server=server, + headers=headers, + auth_data=utils.get_pydantic_model( + auth_data, OptionalNullable[models.AuthData] + ), + system_prompt=system_prompt, + ) + + req = self._build_request( + method="POST", + path="/v1/connectors", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateConnectorRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="connector_create_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "201", "application/json"): + return unmarshal_json_response(models.Connector, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + name: str, + description: str, + server: str, + icon_url: OptionalNullable[str] = UNSET, + visibility: Optional[models.ResourceVisibility] = None, + headers: OptionalNullable[Dict[str, Any]] = UNSET, + auth_data: OptionalNullable[ + Union[models.AuthData, models.AuthDataTypedDict] + ] = UNSET, + system_prompt: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Connector: + r"""Create a new connector. + + Create a new MCP connector. You can customize its visibility, url and auth type. + + :param name: The name of the connector. Should be 64 char length maximum, alphanumeric, only underscores/dashes. + :param description: The description of the connector. + :param server: The url of the MCP server. + :param icon_url: The optional url of the icon you want to associate to the connector. + :param visibility: + :param headers: Optional organization-level headers to be sent with the request to the mcp server. + :param auth_data: Optional additional authentication data for the connector. + :param system_prompt: Optional system prompt for the connector. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateConnectorRequest( + name=name, + description=description, + icon_url=icon_url, + visibility=visibility, + server=server, + headers=headers, + auth_data=utils.get_pydantic_model( + auth_data, OptionalNullable[models.AuthData] + ), + system_prompt=system_prompt, + ) + + req = self._build_request_async( + method="POST", + path="/v1/connectors", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateConnectorRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="connector_create_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "201", "application/json"): + return unmarshal_json_response(models.Connector, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def list( + self, + *, + query_filters: Optional[ + Union[models.ConnectorsQueryFilters, models.ConnectorsQueryFiltersTypedDict] + ] = None, + cursor: OptionalNullable[str] = UNSET, + page_size: Optional[int] = 100, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.PaginatedConnectors: + r"""List all connectors. + + List all your custom connectors with keyset pagination and filters. + + :param query_filters: + :param cursor: + :param page_size: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConnectorListV1Request( + query_filters=utils.get_pydantic_model( + query_filters, Optional[models.ConnectorsQueryFilters] + ), + cursor=cursor, + page_size=page_size, + ) + + req = self._build_request( + method="GET", + path="/v1/connectors", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="connector_list_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.PaginatedConnectors, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + query_filters: Optional[ + Union[models.ConnectorsQueryFilters, models.ConnectorsQueryFiltersTypedDict] + ] = None, + cursor: OptionalNullable[str] = UNSET, + page_size: Optional[int] = 100, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.PaginatedConnectors: + r"""List all connectors. + + List all your custom connectors with keyset pagination and filters. + + :param query_filters: + :param cursor: + :param page_size: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConnectorListV1Request( + query_filters=utils.get_pydantic_model( + query_filters, Optional[models.ConnectorsQueryFilters] + ), + cursor=cursor, + page_size=page_size, + ) + + req = self._build_request_async( + method="GET", + path="/v1/connectors", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="connector_list_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.PaginatedConnectors, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def call_tool( + self, + *, + tool_name: str, + connector_id_or_name: str, + arguments: Optional[Dict[str, Any]] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConnectorToolCallResponse: + r"""Call Connector Tool + + Call a tool on an MCP connector. + + :param tool_name: + :param connector_id_or_name: + :param arguments: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConnectorCallToolV1Request( + tool_name=tool_name, + connector_id_or_name=connector_id_or_name, + connector_call_tool_request=models.ConnectorCallToolRequest( + arguments=arguments, + ), + ) + + req = self._build_request( + method="POST", + path="/v1/connectors/{connector_id_or_name}/tools/{tool_name}/call", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.connector_call_tool_request, + False, + False, + "json", + models.ConnectorCallToolRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="connector_call_tool_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConnectorToolCallResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def call_tool_async( + self, + *, + tool_name: str, + connector_id_or_name: str, + arguments: Optional[Dict[str, Any]] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConnectorToolCallResponse: + r"""Call Connector Tool + + Call a tool on an MCP connector. + + :param tool_name: + :param connector_id_or_name: + :param arguments: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConnectorCallToolV1Request( + tool_name=tool_name, + connector_id_or_name=connector_id_or_name, + connector_call_tool_request=models.ConnectorCallToolRequest( + arguments=arguments, + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/connectors/{connector_id_or_name}/tools/{tool_name}/call", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.connector_call_tool_request, + False, + False, + "json", + models.ConnectorCallToolRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="connector_call_tool_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConnectorToolCallResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + connector_id_or_name: str, + fetch_customer_data: Optional[bool] = False, + fetch_connection_secrets: Optional[bool] = False, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Connector: + r"""Get a connector. + + Get a connector by its ID or name. + + :param connector_id_or_name: + :param fetch_customer_data: Fetch the customer data associated with the connector (e.g. customer secrets / config). + :param fetch_connection_secrets: Fetch the general connection secrets associated with the connector. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConnectorGetV1Request( + fetch_customer_data=fetch_customer_data, + fetch_connection_secrets=fetch_connection_secrets, + connector_id_or_name=connector_id_or_name, + ) + + req = self._build_request( + method="GET", + path="/v1/connectors/{connector_id_or_name}#idOrName", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="connector_get_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Connector, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + connector_id_or_name: str, + fetch_customer_data: Optional[bool] = False, + fetch_connection_secrets: Optional[bool] = False, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Connector: + r"""Get a connector. + + Get a connector by its ID or name. + + :param connector_id_or_name: + :param fetch_customer_data: Fetch the customer data associated with the connector (e.g. customer secrets / config). + :param fetch_connection_secrets: Fetch the general connection secrets associated with the connector. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConnectorGetV1Request( + fetch_customer_data=fetch_customer_data, + fetch_connection_secrets=fetch_connection_secrets, + connector_id_or_name=connector_id_or_name, + ) + + req = self._build_request_async( + method="GET", + path="/v1/connectors/{connector_id_or_name}#idOrName", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="connector_get_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Connector, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def update( + self, + *, + connector_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + icon_url: OptionalNullable[str] = UNSET, + system_prompt: OptionalNullable[str] = UNSET, + connection_config: OptionalNullable[Dict[str, Any]] = UNSET, + connection_secrets: OptionalNullable[Dict[str, Any]] = UNSET, + server: OptionalNullable[str] = UNSET, + headers: OptionalNullable[Dict[str, Any]] = UNSET, + auth_data: OptionalNullable[ + Union[models.AuthData, models.AuthDataTypedDict] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Connector: + r"""Update a connector. + + Update a connector by its ID. + + :param connector_id: + :param name: The name of the connector. + :param description: The description of the connector. + :param icon_url: The optional url of the icon you want to associate to the connector. + :param system_prompt: Optional system prompt for the connector. + :param connection_config: Optional new connection config. + :param connection_secrets: Optional new connection secrets + :param server: New server url for your mcp connector. + :param headers: New headers for your mcp connector. + :param auth_data: New authentication data for your mcp connector. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConnectorUpdateV1Request( + connector_id=connector_id, + update_connector_request=models.UpdateConnectorRequest( + name=name, + description=description, + icon_url=icon_url, + system_prompt=system_prompt, + connection_config=connection_config, + connection_secrets=connection_secrets, + server=server, + headers=headers, + auth_data=utils.get_pydantic_model( + auth_data, OptionalNullable[models.AuthData] + ), + ), + ) + + req = self._build_request( + method="PATCH", + path="/v1/connectors/{connector_id}#id", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_connector_request, + False, + False, + "json", + models.UpdateConnectorRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="connector_update_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Connector, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def update_async( + self, + *, + connector_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + icon_url: OptionalNullable[str] = UNSET, + system_prompt: OptionalNullable[str] = UNSET, + connection_config: OptionalNullable[Dict[str, Any]] = UNSET, + connection_secrets: OptionalNullable[Dict[str, Any]] = UNSET, + server: OptionalNullable[str] = UNSET, + headers: OptionalNullable[Dict[str, Any]] = UNSET, + auth_data: OptionalNullable[ + Union[models.AuthData, models.AuthDataTypedDict] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Connector: + r"""Update a connector. + + Update a connector by its ID. + + :param connector_id: + :param name: The name of the connector. + :param description: The description of the connector. + :param icon_url: The optional url of the icon you want to associate to the connector. + :param system_prompt: Optional system prompt for the connector. + :param connection_config: Optional new connection config. + :param connection_secrets: Optional new connection secrets + :param server: New server url for your mcp connector. + :param headers: New headers for your mcp connector. + :param auth_data: New authentication data for your mcp connector. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConnectorUpdateV1Request( + connector_id=connector_id, + update_connector_request=models.UpdateConnectorRequest( + name=name, + description=description, + icon_url=icon_url, + system_prompt=system_prompt, + connection_config=connection_config, + connection_secrets=connection_secrets, + server=server, + headers=headers, + auth_data=utils.get_pydantic_model( + auth_data, OptionalNullable[models.AuthData] + ), + ), + ) + + req = self._build_request_async( + method="PATCH", + path="/v1/connectors/{connector_id}#id", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_connector_request, + False, + False, + "json", + models.UpdateConnectorRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="connector_update_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Connector, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + connector_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.MessageResponse: + r"""Delete a connector. + + Delete a connector by its ID. + + :param connector_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConnectorDeleteV1Request( + connector_id=connector_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/connectors/{connector_id}#id", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="connector_delete_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.MessageResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + connector_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.MessageResponse: + r"""Delete a connector. + + Delete a connector by its ID. + + :param connector_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConnectorDeleteV1Request( + connector_id=connector_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/connectors/{connector_id}#id", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="connector_delete_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.MessageResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/conversations.py b/src/mistralai/client/conversations.py index ec33b1fb..a4af31f3 100644 --- a/src/mistralai/client/conversations.py +++ b/src/mistralai/client/conversations.py @@ -244,6 +244,9 @@ def start( completion_args: OptionalNullable[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = UNSET, + guardrails: OptionalNullable[ + Union[List[models.GuardrailConfig], List[models.GuardrailConfigTypedDict]] + ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, @@ -271,6 +274,7 @@ def start( :param instructions: :param tools: List of tools which are available to the model during the conversation. :param completion_args: + :param guardrails: :param name: :param description: :param metadata: @@ -304,6 +308,9 @@ def start( completion_args=utils.get_pydantic_model( completion_args, OptionalNullable[models.CompletionArgs] ), + guardrails=utils.get_pydantic_model( + guardrails, OptionalNullable[List[models.GuardrailConfig]] + ), name=name, description=description, metadata=metadata, @@ -391,6 +398,9 @@ async def start_async( completion_args: OptionalNullable[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = UNSET, + guardrails: OptionalNullable[ + Union[List[models.GuardrailConfig], List[models.GuardrailConfigTypedDict]] + ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, @@ -418,6 +428,7 @@ async def start_async( :param instructions: :param tools: List of tools which are available to the model during the conversation. :param completion_args: + :param guardrails: :param name: :param description: :param metadata: @@ -451,6 +462,9 @@ async def start_async( completion_args=utils.get_pydantic_model( completion_args, OptionalNullable[models.CompletionArgs] ), + guardrails=utils.get_pydantic_model( + guardrails, OptionalNullable[List[models.GuardrailConfig]] + ), name=name, description=description, metadata=metadata, @@ -1721,6 +1735,9 @@ def restart( completion_args: Optional[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, + guardrails: OptionalNullable[ + Union[List[models.GuardrailConfig], List[models.GuardrailConfigTypedDict]] + ] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_version: OptionalNullable[ Union[ @@ -1744,6 +1761,7 @@ def restart( :param store: Whether to store the results into our servers or not. :param handoff_execution: :param completion_args: White-listed arguments from the completion API + :param guardrails: :param metadata: Custom metadata for the conversation. :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. :param retries: Override the default retry configuration for this method @@ -1773,6 +1791,9 @@ def restart( completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + guardrails=utils.get_pydantic_model( + guardrails, OptionalNullable[List[models.GuardrailConfig]] + ), metadata=metadata, from_entry_id=from_entry_id, agent_version=agent_version, @@ -1859,6 +1880,9 @@ async def restart_async( completion_args: Optional[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, + guardrails: OptionalNullable[ + Union[List[models.GuardrailConfig], List[models.GuardrailConfigTypedDict]] + ] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_version: OptionalNullable[ Union[ @@ -1882,6 +1906,7 @@ async def restart_async( :param store: Whether to store the results into our servers or not. :param handoff_execution: :param completion_args: White-listed arguments from the completion API + :param guardrails: :param metadata: Custom metadata for the conversation. :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. :param retries: Override the default retry configuration for this method @@ -1911,6 +1936,9 @@ async def restart_async( completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + guardrails=utils.get_pydantic_model( + guardrails, OptionalNullable[List[models.GuardrailConfig]] + ), metadata=metadata, from_entry_id=from_entry_id, agent_version=agent_version, @@ -2000,6 +2028,9 @@ def start_stream( completion_args: OptionalNullable[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = UNSET, + guardrails: OptionalNullable[ + Union[List[models.GuardrailConfig], List[models.GuardrailConfigTypedDict]] + ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, @@ -2027,6 +2058,7 @@ def start_stream( :param instructions: :param tools: List of tools which are available to the model during the conversation. :param completion_args: + :param guardrails: :param name: :param description: :param metadata: @@ -2060,6 +2092,9 @@ def start_stream( completion_args=utils.get_pydantic_model( completion_args, OptionalNullable[models.CompletionArgs] ), + guardrails=utils.get_pydantic_model( + guardrails, OptionalNullable[List[models.GuardrailConfig]] + ), name=name, description=description, metadata=metadata, @@ -2154,6 +2189,9 @@ async def start_stream_async( completion_args: OptionalNullable[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = UNSET, + guardrails: OptionalNullable[ + Union[List[models.GuardrailConfig], List[models.GuardrailConfigTypedDict]] + ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, @@ -2181,6 +2219,7 @@ async def start_stream_async( :param instructions: :param tools: List of tools which are available to the model during the conversation. :param completion_args: + :param guardrails: :param name: :param description: :param metadata: @@ -2214,6 +2253,9 @@ async def start_stream_async( completion_args=utils.get_pydantic_model( completion_args, OptionalNullable[models.CompletionArgs] ), + guardrails=utils.get_pydantic_model( + guardrails, OptionalNullable[List[models.GuardrailConfig]] + ), name=name, description=description, metadata=metadata, @@ -2589,6 +2631,9 @@ def restart_stream( completion_args: Optional[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, + guardrails: OptionalNullable[ + Union[List[models.GuardrailConfig], List[models.GuardrailConfigTypedDict]] + ] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_version: OptionalNullable[ Union[ @@ -2612,6 +2657,7 @@ def restart_stream( :param store: Whether to store the results into our servers or not. :param handoff_execution: :param completion_args: White-listed arguments from the completion API + :param guardrails: :param metadata: Custom metadata for the conversation. :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. :param retries: Override the default retry configuration for this method @@ -2641,6 +2687,9 @@ def restart_stream( completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + guardrails=utils.get_pydantic_model( + guardrails, OptionalNullable[List[models.GuardrailConfig]] + ), metadata=metadata, from_entry_id=from_entry_id, agent_version=agent_version, @@ -2734,6 +2783,9 @@ async def restart_stream_async( completion_args: Optional[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, + guardrails: OptionalNullable[ + Union[List[models.GuardrailConfig], List[models.GuardrailConfigTypedDict]] + ] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_version: OptionalNullable[ Union[ @@ -2757,6 +2809,7 @@ async def restart_stream_async( :param store: Whether to store the results into our servers or not. :param handoff_execution: :param completion_args: White-listed arguments from the completion API + :param guardrails: :param metadata: Custom metadata for the conversation. :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. :param retries: Override the default retry configuration for this method @@ -2786,6 +2839,9 @@ async def restart_stream_async( completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + guardrails=utils.get_pydantic_model( + guardrails, OptionalNullable[List[models.GuardrailConfig]] + ), metadata=metadata, from_entry_id=from_entry_id, agent_version=agent_version, diff --git a/src/mistralai/client/datasets.py b/src/mistralai/client/datasets.py new file mode 100644 index 00000000..48ecbdd8 --- /dev/null +++ b/src/mistralai/client/datasets.py @@ -0,0 +1,2972 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e5a6ae2a2d85 + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.records import Records +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class Datasets(BaseSDK): + records: Records + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.records = Records(self.sdk_configuration, parent_ref=self.parent_ref) + + def create( + self, + *, + name: str, + description: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Dataset: + r"""Create a new empty dataset + + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateDatasetRequest( + name=name, + description=description, + ) + + req = self._build_request( + method="POST", + path="/v1/observability/datasets", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateDatasetRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="create_dataset_v1_observability_datasets_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "201", "application/json"): + return unmarshal_json_response(models.Dataset, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + name: str, + description: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Dataset: + r"""Create a new empty dataset + + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateDatasetRequest( + name=name, + description=description, + ) + + req = self._build_request_async( + method="POST", + path="/v1/observability/datasets", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateDatasetRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="create_dataset_v1_observability_datasets_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "201", "application/json"): + return unmarshal_json_response(models.Dataset, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def list( + self, + *, + page_size: Optional[int] = 50, + page: Optional[int] = 1, + q: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListDatasetsResponse: + r"""List existing datasets + + :param page_size: + :param page: + :param q: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetDatasetsV1ObservabilityDatasetsGetRequest( + page_size=page_size, + page=page, + q=q, + ) + + req = self._build_request( + method="GET", + path="/v1/observability/datasets", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_datasets_v1_observability_datasets_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListDatasetsResponse, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page_size: Optional[int] = 50, + page: Optional[int] = 1, + q: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListDatasetsResponse: + r"""List existing datasets + + :param page_size: + :param page: + :param q: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetDatasetsV1ObservabilityDatasetsGetRequest( + page_size=page_size, + page=page, + q=q, + ) + + req = self._build_request_async( + method="GET", + path="/v1/observability/datasets", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_datasets_v1_observability_datasets_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListDatasetsResponse, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def fetch( + self, + *, + dataset_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DatasetPreview: + r"""Get dataset by id + + :param dataset_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetDatasetByIDV1ObservabilityDatasetsDatasetIDGetRequest( + dataset_id=dataset_id, + ) + + req = self._build_request( + method="GET", + path="/v1/observability/datasets/{dataset_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_dataset_by_id_v1_observability_datasets__dataset_id__get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DatasetPreview, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def fetch_async( + self, + *, + dataset_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DatasetPreview: + r"""Get dataset by id + + :param dataset_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetDatasetByIDV1ObservabilityDatasetsDatasetIDGetRequest( + dataset_id=dataset_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/observability/datasets/{dataset_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_dataset_by_id_v1_observability_datasets__dataset_id__get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DatasetPreview, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + dataset_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a dataset + + :param dataset_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.DeleteDatasetV1ObservabilityDatasetsDatasetIDDeleteRequest( + dataset_id=dataset_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/observability/datasets/{dataset_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="delete_dataset_v1_observability_datasets__dataset_id__delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + dataset_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a dataset + + :param dataset_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.DeleteDatasetV1ObservabilityDatasetsDatasetIDDeleteRequest( + dataset_id=dataset_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/observability/datasets/{dataset_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="delete_dataset_v1_observability_datasets__dataset_id__delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def update( + self, + *, + dataset_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DatasetPreview: + r"""Patch dataset + + :param dataset_id: + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.UpdateDatasetV1ObservabilityDatasetsDatasetIDPatchRequest( + dataset_id=dataset_id, + update_dataset_request=models.UpdateDatasetRequest( + name=name, + description=description, + ), + ) + + req = self._build_request( + method="PATCH", + path="/v1/observability/datasets/{dataset_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_dataset_request, + False, + False, + "json", + models.UpdateDatasetRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="update_dataset_v1_observability_datasets__dataset_id__patch", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DatasetPreview, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def update_async( + self, + *, + dataset_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DatasetPreview: + r"""Patch dataset + + :param dataset_id: + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.UpdateDatasetV1ObservabilityDatasetsDatasetIDPatchRequest( + dataset_id=dataset_id, + update_dataset_request=models.UpdateDatasetRequest( + name=name, + description=description, + ), + ) + + req = self._build_request_async( + method="PATCH", + path="/v1/observability/datasets/{dataset_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_dataset_request, + False, + False, + "json", + models.UpdateDatasetRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="update_dataset_v1_observability_datasets__dataset_id__patch", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DatasetPreview, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def list_records( + self, + *, + dataset_id: str, + page_size: Optional[int] = 50, + page: Optional[int] = 1, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListDatasetRecordsResponse: + r"""List existing records in the dataset + + :param dataset_id: + :param page_size: + :param page: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = ( + models.GetDatasetRecordsV1ObservabilityDatasetsDatasetIDRecordsGetRequest( + dataset_id=dataset_id, + page_size=page_size, + page=page, + ) + ) + + req = self._build_request( + method="GET", + path="/v1/observability/datasets/{dataset_id}/records", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_dataset_records_v1_observability_datasets__dataset_id__records_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListDatasetRecordsResponse, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_records_async( + self, + *, + dataset_id: str, + page_size: Optional[int] = 50, + page: Optional[int] = 1, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListDatasetRecordsResponse: + r"""List existing records in the dataset + + :param dataset_id: + :param page_size: + :param page: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = ( + models.GetDatasetRecordsV1ObservabilityDatasetsDatasetIDRecordsGetRequest( + dataset_id=dataset_id, + page_size=page_size, + page=page, + ) + ) + + req = self._build_request_async( + method="GET", + path="/v1/observability/datasets/{dataset_id}/records", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_dataset_records_v1_observability_datasets__dataset_id__records_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListDatasetRecordsResponse, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def create_record( + self, + *, + dataset_id: str, + payload: Union[models.ConversationPayload, models.ConversationPayloadTypedDict], + properties: Dict[str, Any], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DatasetRecord: + r"""Add a conversation to the dataset + + :param dataset_id: + :param payload: + :param properties: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateDatasetRecordV1ObservabilityDatasetsDatasetIDRecordsPostRequest( + dataset_id=dataset_id, + create_dataset_record_request=models.CreateDatasetRecordRequest( + payload=utils.get_pydantic_model(payload, models.ConversationPayload), + properties=properties, + ), + ) + + req = self._build_request( + method="POST", + path="/v1/observability/datasets/{dataset_id}/records", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.create_dataset_record_request, + False, + False, + "json", + models.CreateDatasetRecordRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="create_dataset_record_v1_observability_datasets__dataset_id__records_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "201", "application/json"): + return unmarshal_json_response(models.DatasetRecord, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def create_record_async( + self, + *, + dataset_id: str, + payload: Union[models.ConversationPayload, models.ConversationPayloadTypedDict], + properties: Dict[str, Any], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DatasetRecord: + r"""Add a conversation to the dataset + + :param dataset_id: + :param payload: + :param properties: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateDatasetRecordV1ObservabilityDatasetsDatasetIDRecordsPostRequest( + dataset_id=dataset_id, + create_dataset_record_request=models.CreateDatasetRecordRequest( + payload=utils.get_pydantic_model(payload, models.ConversationPayload), + properties=properties, + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/observability/datasets/{dataset_id}/records", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.create_dataset_record_request, + False, + False, + "json", + models.CreateDatasetRecordRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="create_dataset_record_v1_observability_datasets__dataset_id__records_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "201", "application/json"): + return unmarshal_json_response(models.DatasetRecord, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def import_from_campaign( + self, + *, + dataset_id: str, + campaign_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DatasetImportTask: + r"""Populate the dataset with a campaign + + :param dataset_id: + :param campaign_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.PostDatasetRecordsFromCampaignV1ObservabilityDatasetsDatasetIDImportsFromCampaignPostRequest( + dataset_id=dataset_id, + import_dataset_from_campaign_request=models.ImportDatasetFromCampaignRequest( + campaign_id=campaign_id, + ), + ) + + req = self._build_request( + method="POST", + path="/v1/observability/datasets/{dataset_id}/imports/from-campaign", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.import_dataset_from_campaign_request, + False, + False, + "json", + models.ImportDatasetFromCampaignRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="post_dataset_records_from_campaign_v1_observability_datasets__dataset_id__imports_from_campaign_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "202", "application/json"): + return unmarshal_json_response(models.DatasetImportTask, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def import_from_campaign_async( + self, + *, + dataset_id: str, + campaign_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DatasetImportTask: + r"""Populate the dataset with a campaign + + :param dataset_id: + :param campaign_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.PostDatasetRecordsFromCampaignV1ObservabilityDatasetsDatasetIDImportsFromCampaignPostRequest( + dataset_id=dataset_id, + import_dataset_from_campaign_request=models.ImportDatasetFromCampaignRequest( + campaign_id=campaign_id, + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/observability/datasets/{dataset_id}/imports/from-campaign", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.import_dataset_from_campaign_request, + False, + False, + "json", + models.ImportDatasetFromCampaignRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="post_dataset_records_from_campaign_v1_observability_datasets__dataset_id__imports_from_campaign_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "202", "application/json"): + return unmarshal_json_response(models.DatasetImportTask, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def import_from_explorer( + self, + *, + dataset_id: str, + completion_event_ids: List[str], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DatasetImportTask: + r"""Populate the dataset with samples from the explorer + + :param dataset_id: + :param completion_event_ids: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.PostDatasetRecordsFromExplorerV1ObservabilityDatasetsDatasetIDImportsFromExplorerPostRequest( + dataset_id=dataset_id, + import_dataset_from_explorer_request=models.ImportDatasetFromExplorerRequest( + completion_event_ids=completion_event_ids, + ), + ) + + req = self._build_request( + method="POST", + path="/v1/observability/datasets/{dataset_id}/imports/from-explorer", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.import_dataset_from_explorer_request, + False, + False, + "json", + models.ImportDatasetFromExplorerRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="post_dataset_records_from_explorer_v1_observability_datasets__dataset_id__imports_from_explorer_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "202", "application/json"): + return unmarshal_json_response(models.DatasetImportTask, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def import_from_explorer_async( + self, + *, + dataset_id: str, + completion_event_ids: List[str], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DatasetImportTask: + r"""Populate the dataset with samples from the explorer + + :param dataset_id: + :param completion_event_ids: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.PostDatasetRecordsFromExplorerV1ObservabilityDatasetsDatasetIDImportsFromExplorerPostRequest( + dataset_id=dataset_id, + import_dataset_from_explorer_request=models.ImportDatasetFromExplorerRequest( + completion_event_ids=completion_event_ids, + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/observability/datasets/{dataset_id}/imports/from-explorer", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.import_dataset_from_explorer_request, + False, + False, + "json", + models.ImportDatasetFromExplorerRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="post_dataset_records_from_explorer_v1_observability_datasets__dataset_id__imports_from_explorer_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "202", "application/json"): + return unmarshal_json_response(models.DatasetImportTask, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def import_from_file( + self, + *, + dataset_id: str, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DatasetImportTask: + r"""Populate the dataset with samples from an uploaded file + + :param dataset_id: + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.PostDatasetRecordsFromFileV1ObservabilityDatasetsDatasetIDImportsFromFilePostRequest( + dataset_id=dataset_id, + import_dataset_from_file_request=models.ImportDatasetFromFileRequest( + file_id=file_id, + ), + ) + + req = self._build_request( + method="POST", + path="/v1/observability/datasets/{dataset_id}/imports/from-file", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.import_dataset_from_file_request, + False, + False, + "json", + models.ImportDatasetFromFileRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="post_dataset_records_from_file_v1_observability_datasets__dataset_id__imports_from_file_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "202", "application/json"): + return unmarshal_json_response(models.DatasetImportTask, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def import_from_file_async( + self, + *, + dataset_id: str, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DatasetImportTask: + r"""Populate the dataset with samples from an uploaded file + + :param dataset_id: + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.PostDatasetRecordsFromFileV1ObservabilityDatasetsDatasetIDImportsFromFilePostRequest( + dataset_id=dataset_id, + import_dataset_from_file_request=models.ImportDatasetFromFileRequest( + file_id=file_id, + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/observability/datasets/{dataset_id}/imports/from-file", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.import_dataset_from_file_request, + False, + False, + "json", + models.ImportDatasetFromFileRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="post_dataset_records_from_file_v1_observability_datasets__dataset_id__imports_from_file_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "202", "application/json"): + return unmarshal_json_response(models.DatasetImportTask, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def import_from_playground( + self, + *, + dataset_id: str, + conversation_ids: List[str], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DatasetImportTask: + r"""Populate the dataset with samples from the playground + + :param dataset_id: + :param conversation_ids: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.PostDatasetRecordsFromPlaygroundV1ObservabilityDatasetsDatasetIDImportsFromPlaygroundPostRequest( + dataset_id=dataset_id, + import_dataset_from_playground_request=models.ImportDatasetFromPlaygroundRequest( + conversation_ids=conversation_ids, + ), + ) + + req = self._build_request( + method="POST", + path="/v1/observability/datasets/{dataset_id}/imports/from-playground", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.import_dataset_from_playground_request, + False, + False, + "json", + models.ImportDatasetFromPlaygroundRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="post_dataset_records_from_playground_v1_observability_datasets__dataset_id__imports_from_playground_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "202", "application/json"): + return unmarshal_json_response(models.DatasetImportTask, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def import_from_playground_async( + self, + *, + dataset_id: str, + conversation_ids: List[str], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DatasetImportTask: + r"""Populate the dataset with samples from the playground + + :param dataset_id: + :param conversation_ids: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.PostDatasetRecordsFromPlaygroundV1ObservabilityDatasetsDatasetIDImportsFromPlaygroundPostRequest( + dataset_id=dataset_id, + import_dataset_from_playground_request=models.ImportDatasetFromPlaygroundRequest( + conversation_ids=conversation_ids, + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/observability/datasets/{dataset_id}/imports/from-playground", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.import_dataset_from_playground_request, + False, + False, + "json", + models.ImportDatasetFromPlaygroundRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="post_dataset_records_from_playground_v1_observability_datasets__dataset_id__imports_from_playground_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "202", "application/json"): + return unmarshal_json_response(models.DatasetImportTask, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def import_from_dataset_records( + self, + *, + dataset_id: str, + dataset_record_ids: List[str], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DatasetImportTask: + r"""Populate the dataset with samples from another dataset + + :param dataset_id: + :param dataset_record_ids: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.PostDatasetRecordsFromDatasetV1ObservabilityDatasetsDatasetIDImportsFromDatasetPostRequest( + dataset_id=dataset_id, + import_dataset_from_dataset_request=models.ImportDatasetFromDatasetRequest( + dataset_record_ids=dataset_record_ids, + ), + ) + + req = self._build_request( + method="POST", + path="/v1/observability/datasets/{dataset_id}/imports/from-dataset", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.import_dataset_from_dataset_request, + False, + False, + "json", + models.ImportDatasetFromDatasetRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="post_dataset_records_from_dataset_v1_observability_datasets__dataset_id__imports_from_dataset_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "202", "application/json"): + return unmarshal_json_response(models.DatasetImportTask, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def import_from_dataset_records_async( + self, + *, + dataset_id: str, + dataset_record_ids: List[str], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DatasetImportTask: + r"""Populate the dataset with samples from another dataset + + :param dataset_id: + :param dataset_record_ids: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.PostDatasetRecordsFromDatasetV1ObservabilityDatasetsDatasetIDImportsFromDatasetPostRequest( + dataset_id=dataset_id, + import_dataset_from_dataset_request=models.ImportDatasetFromDatasetRequest( + dataset_record_ids=dataset_record_ids, + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/observability/datasets/{dataset_id}/imports/from-dataset", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.import_dataset_from_dataset_request, + False, + False, + "json", + models.ImportDatasetFromDatasetRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="post_dataset_records_from_dataset_v1_observability_datasets__dataset_id__imports_from_dataset_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "202", "application/json"): + return unmarshal_json_response(models.DatasetImportTask, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def export_to_jsonl( + self, + *, + dataset_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ExportDatasetResponse: + r"""Export to the Files API and retrieve presigned URL to download the resulting JSONL file + + :param dataset_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ExportDatasetToJsonlV1ObservabilityDatasetsDatasetIDExportsToJsonlGetRequest( + dataset_id=dataset_id, + ) + + req = self._build_request( + method="GET", + path="/v1/observability/datasets/{dataset_id}/exports/to-jsonl", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="export_dataset_to_jsonl_v1_observability_datasets__dataset_id__exports_to_jsonl_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ExportDatasetResponse, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def export_to_jsonl_async( + self, + *, + dataset_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ExportDatasetResponse: + r"""Export to the Files API and retrieve presigned URL to download the resulting JSONL file + + :param dataset_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ExportDatasetToJsonlV1ObservabilityDatasetsDatasetIDExportsToJsonlGetRequest( + dataset_id=dataset_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/observability/datasets/{dataset_id}/exports/to-jsonl", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="export_dataset_to_jsonl_v1_observability_datasets__dataset_id__exports_to_jsonl_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ExportDatasetResponse, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def fetch_task( + self, + *, + dataset_id: str, + task_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DatasetImportTask: + r"""Get status of a dataset import task + + :param dataset_id: + :param task_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetDatasetImportTaskV1ObservabilityDatasetsDatasetIDTasksTaskIDGetRequest( + dataset_id=dataset_id, + task_id=task_id, + ) + + req = self._build_request( + method="GET", + path="/v1/observability/datasets/{dataset_id}/tasks/{task_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_dataset_import_task_v1_observability_datasets__dataset_id__tasks__task_id__get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DatasetImportTask, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def fetch_task_async( + self, + *, + dataset_id: str, + task_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DatasetImportTask: + r"""Get status of a dataset import task + + :param dataset_id: + :param task_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetDatasetImportTaskV1ObservabilityDatasetsDatasetIDTasksTaskIDGetRequest( + dataset_id=dataset_id, + task_id=task_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/observability/datasets/{dataset_id}/tasks/{task_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_dataset_import_task_v1_observability_datasets__dataset_id__tasks__task_id__get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DatasetImportTask, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def list_tasks( + self, + *, + dataset_id: str, + page_size: Optional[int] = 50, + page: Optional[int] = 1, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListDatasetImportTasksResponse: + r"""List import tasks for the given dataset + + :param dataset_id: + :param page_size: + :param page: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = ( + models.GetDatasetImportTasksV1ObservabilityDatasetsDatasetIDTasksGetRequest( + dataset_id=dataset_id, + page_size=page_size, + page=page, + ) + ) + + req = self._build_request( + method="GET", + path="/v1/observability/datasets/{dataset_id}/tasks", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_dataset_import_tasks_v1_observability_datasets__dataset_id__tasks_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.ListDatasetImportTasksResponse, http_res + ) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_tasks_async( + self, + *, + dataset_id: str, + page_size: Optional[int] = 50, + page: Optional[int] = 1, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListDatasetImportTasksResponse: + r"""List import tasks for the given dataset + + :param dataset_id: + :param page_size: + :param page: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = ( + models.GetDatasetImportTasksV1ObservabilityDatasetsDatasetIDTasksGetRequest( + dataset_id=dataset_id, + page_size=page_size, + page=page, + ) + ) + + req = self._build_request_async( + method="GET", + path="/v1/observability/datasets/{dataset_id}/tasks", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_dataset_import_tasks_v1_observability_datasets__dataset_id__tasks_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.ListDatasetImportTasksResponse, http_res + ) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/documents.py b/src/mistralai/client/documents.py index b3130364..95ea4323 100644 --- a/src/mistralai/client/documents.py +++ b/src/mistralai/client/documents.py @@ -1228,7 +1228,7 @@ def status( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ProcessingStatusOut: + ) -> models.ProcessingStatus: r"""Retrieve the processing status of a specific document. Given a library and a document in that library, retrieve the processing status of that document. @@ -1297,7 +1297,7 @@ def status( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ProcessingStatusOut, http_res) + return unmarshal_json_response(models.ProcessingStatus, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( errors.HTTPValidationErrorData, http_res @@ -1321,7 +1321,7 @@ async def status_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ProcessingStatusOut: + ) -> models.ProcessingStatus: r"""Retrieve the processing status of a specific document. Given a library and a document in that library, retrieve the processing status of that document. @@ -1390,7 +1390,7 @@ async def status_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ProcessingStatusOut, http_res) + return unmarshal_json_response(models.ProcessingStatus, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( errors.HTTPValidationErrorData, http_res diff --git a/src/mistralai/client/errors/__init__.py b/src/mistralai/client/errors/__init__.py index 58a591a1..f6fd99a0 100644 --- a/src/mistralai/client/errors/__init__.py +++ b/src/mistralai/client/errors/__init__.py @@ -9,6 +9,7 @@ if TYPE_CHECKING: from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData from .no_response_error import NoResponseError + from .observabilityerror import ObservabilityError, ObservabilityErrorData from .responsevalidationerror import ResponseValidationError from .sdkerror import SDKError @@ -17,6 +18,8 @@ "HTTPValidationErrorData", "MistralError", "NoResponseError", + "ObservabilityError", + "ObservabilityErrorData", "ResponseValidationError", "SDKError", ] @@ -25,6 +28,8 @@ "HTTPValidationError": ".httpvalidationerror", "HTTPValidationErrorData": ".httpvalidationerror", "NoResponseError": ".no_response_error", + "ObservabilityError": ".observabilityerror", + "ObservabilityErrorData": ".observabilityerror", "ResponseValidationError": ".responsevalidationerror", "SDKError": ".sdkerror", } diff --git a/src/mistralai/client/errors/observabilityerror.py b/src/mistralai/client/errors/observabilityerror.py new file mode 100644 index 00000000..a360bac4 --- /dev/null +++ b/src/mistralai/client/errors/observabilityerror.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4946ae06717e + +from __future__ import annotations +from dataclasses import dataclass, field +import httpx +from mistralai.client.errors import MistralError +from mistralai.client.models import ( + observabilityerrordetail as models_observabilityerrordetail, +) +from mistralai.client.types import BaseModel +from typing import Optional + + +class ObservabilityErrorData(BaseModel): + detail: models_observabilityerrordetail.ObservabilityErrorDetail + + +@dataclass(unsafe_hash=True) +class ObservabilityError(MistralError): + data: ObservabilityErrorData = field(hash=False) + + def __init__( + self, + data: ObservabilityErrorData, + raw_response: httpx.Response, + body: Optional[str] = None, + ): + fallback = body or raw_response.text + message = str(data.detail.message) or fallback + super().__init__(message, raw_response, body) + object.__setattr__(self, "data", data) diff --git a/src/mistralai/client/fields.py b/src/mistralai/client/fields.py new file mode 100644 index 00000000..a5b8003c --- /dev/null +++ b/src/mistralai/client/fields.py @@ -0,0 +1,588 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 862335210b20 + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Mapping, Optional, Union + + +class Fields(BaseSDK): + def list( + self, + *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListChatCompletionFieldsResponse: + r"""Get Chat Completion Fields + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + req = self._build_request( + method="GET", + path="/v1/observability/chat-completion-fields", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_chat_completion_fields_v1_observability_chat_completion_fields_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.ListChatCompletionFieldsResponse, http_res + ) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListChatCompletionFieldsResponse: + r"""Get Chat Completion Fields + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + req = self._build_request_async( + method="GET", + path="/v1/observability/chat-completion-fields", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_chat_completion_fields_v1_observability_chat_completion_fields_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.ListChatCompletionFieldsResponse, http_res + ) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def fetch_options( + self, + *, + field_name: str, + operator: models.Operator, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FetchChatCompletionFieldOptionsResponse: + r"""Get Chat Completion Field Options + + :param field_name: + :param operator: The operator to use for filtering options + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetChatCompletionFieldOptionsV1ObservabilityChatCompletionFieldsFieldNameOptionsGetRequest( + field_name=field_name, + operator=operator, + ) + + req = self._build_request( + method="GET", + path="/v1/observability/chat-completion-fields/{field_name}/options", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_chat_completion_field_options_v1_observability_chat_completion_fields__field_name__options_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.FetchChatCompletionFieldOptionsResponse, http_res + ) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def fetch_options_async( + self, + *, + field_name: str, + operator: models.Operator, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FetchChatCompletionFieldOptionsResponse: + r"""Get Chat Completion Field Options + + :param field_name: + :param operator: The operator to use for filtering options + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetChatCompletionFieldOptionsV1ObservabilityChatCompletionFieldsFieldNameOptionsGetRequest( + field_name=field_name, + operator=operator, + ) + + req = self._build_request_async( + method="GET", + path="/v1/observability/chat-completion-fields/{field_name}/options", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_chat_completion_field_options_v1_observability_chat_completion_fields__field_name__options_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.FetchChatCompletionFieldOptionsResponse, http_res + ) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def fetch_option_counts( + self, + *, + field_name: str, + filter_params: OptionalNullable[ + Union[models.FilterPayload, models.FilterPayloadTypedDict] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FetchFieldOptionCountsResponse: + r"""Get Chat Completion Field Options Counts + + :param field_name: + :param filter_params: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetChatCompletionFieldOptionsCountsV1ObservabilityChatCompletionFieldsFieldNameOptionsCountsPostRequest( + field_name=field_name, + fetch_field_option_counts_request=models.FetchFieldOptionCountsRequest( + filter_params=utils.get_pydantic_model( + filter_params, OptionalNullable[models.FilterPayload] + ), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/observability/chat-completion-fields/{field_name}/options-counts", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.fetch_field_option_counts_request, + False, + False, + "json", + models.FetchFieldOptionCountsRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_chat_completion_field_options_counts_v1_observability_chat_completion_fields__field_name__options_counts_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.FetchFieldOptionCountsResponse, http_res + ) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def fetch_option_counts_async( + self, + *, + field_name: str, + filter_params: OptionalNullable[ + Union[models.FilterPayload, models.FilterPayloadTypedDict] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FetchFieldOptionCountsResponse: + r"""Get Chat Completion Field Options Counts + + :param field_name: + :param filter_params: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetChatCompletionFieldOptionsCountsV1ObservabilityChatCompletionFieldsFieldNameOptionsCountsPostRequest( + field_name=field_name, + fetch_field_option_counts_request=models.FetchFieldOptionCountsRequest( + filter_params=utils.get_pydantic_model( + filter_params, OptionalNullable[models.FilterPayload] + ), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/observability/chat-completion-fields/{field_name}/options-counts", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.fetch_field_option_counts_request, + False, + False, + "json", + models.FetchFieldOptionCountsRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_chat_completion_field_options_counts_v1_observability_chat_completion_fields__field_name__options_counts_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.FetchFieldOptionCountsResponse, http_res + ) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/files.py b/src/mistralai/client/files.py index a5f3adf6..a843cb7b 100644 --- a/src/mistralai/client/files.py +++ b/src/mistralai/client/files.py @@ -18,6 +18,10 @@ def upload( self, *, file: Union[models.File, models.FileTypedDict], + expiry: OptionalNullable[int] = UNSET, + visibility: Optional[ + models.FilesAPIRoutesUploadFileFileVisibility + ] = "workspace", purpose: Optional[models.FilePurpose] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -41,6 +45,8 @@ def upload( ```bash file=@path/to/your/file.jsonl ``` + :param expiry: + :param visibility: :param purpose: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -58,6 +64,8 @@ def upload( base_url = self._get_url(base_url, url_variables) request = models.MultiPartBodyParams( + expiry=expiry, + visibility=visibility, purpose=purpose, file=utils.get_pydantic_model(file, models.File), ) @@ -120,6 +128,10 @@ async def upload_async( self, *, file: Union[models.File, models.FileTypedDict], + expiry: OptionalNullable[int] = UNSET, + visibility: Optional[ + models.FilesAPIRoutesUploadFileFileVisibility + ] = "workspace", purpose: Optional[models.FilePurpose] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -143,6 +155,8 @@ async def upload_async( ```bash file=@path/to/your/file.jsonl ``` + :param expiry: + :param visibility: :param purpose: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -160,6 +174,8 @@ async def upload_async( base_url = self._get_url(base_url, url_variables) request = models.MultiPartBodyParams( + expiry=expiry, + visibility=visibility, purpose=purpose, file=utils.get_pydantic_model(file, models.File), ) diff --git a/src/mistralai/client/judges.py b/src/mistralai/client/judges.py new file mode 100644 index 00000000..e5dacd7f --- /dev/null +++ b/src/mistralai/client/judges.py @@ -0,0 +1,1038 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b6024a41ecb4 + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, List, Mapping, Optional, Union + + +class Judges(BaseSDK): + def create( + self, + *, + name: str, + description: str, + model_name: str, + output: Union[ + models.CreateJudgeRequestOutput, models.CreateJudgeRequestOutputTypedDict + ], + instructions: str, + tools: List[str], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Judge: + r"""Create a new judge + + :param name: + :param description: + :param model_name: + :param output: + :param instructions: + :param tools: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateJudgeRequest( + name=name, + description=description, + model_name=model_name, + output=utils.get_pydantic_model(output, models.CreateJudgeRequestOutput), + instructions=instructions, + tools=tools, + ) + + req = self._build_request( + method="POST", + path="/v1/observability/judges", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateJudgeRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="create_judge_v1_observability_judges_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "201", "application/json"): + return unmarshal_json_response(models.Judge, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + name: str, + description: str, + model_name: str, + output: Union[ + models.CreateJudgeRequestOutput, models.CreateJudgeRequestOutputTypedDict + ], + instructions: str, + tools: List[str], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Judge: + r"""Create a new judge + + :param name: + :param description: + :param model_name: + :param output: + :param instructions: + :param tools: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.CreateJudgeRequest( + name=name, + description=description, + model_name=model_name, + output=utils.get_pydantic_model(output, models.CreateJudgeRequestOutput), + instructions=instructions, + tools=tools, + ) + + req = self._build_request_async( + method="POST", + path="/v1/observability/judges", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.CreateJudgeRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="create_judge_v1_observability_judges_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "201", "application/json"): + return unmarshal_json_response(models.Judge, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def list( + self, + *, + type_filter: OptionalNullable[List[models.JudgeOutputType]] = UNSET, + model_filter: OptionalNullable[List[str]] = UNSET, + page_size: Optional[int] = 50, + page: Optional[int] = 1, + q: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListJudgesResponse: + r"""Get judges with optional filtering and search + + :param type_filter: Filter by judge output types + :param model_filter: Filter by model names + :param page_size: + :param page: + :param q: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetJudgesV1ObservabilityJudgesGetRequest( + type_filter=type_filter, + model_filter=model_filter, + page_size=page_size, + page=page, + q=q, + ) + + req = self._build_request( + method="GET", + path="/v1/observability/judges", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_judges_v1_observability_judges_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListJudgesResponse, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + type_filter: OptionalNullable[List[models.JudgeOutputType]] = UNSET, + model_filter: OptionalNullable[List[str]] = UNSET, + page_size: Optional[int] = 50, + page: Optional[int] = 1, + q: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListJudgesResponse: + r"""Get judges with optional filtering and search + + :param type_filter: Filter by judge output types + :param model_filter: Filter by model names + :param page_size: + :param page: + :param q: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetJudgesV1ObservabilityJudgesGetRequest( + type_filter=type_filter, + model_filter=model_filter, + page_size=page_size, + page=page, + q=q, + ) + + req = self._build_request_async( + method="GET", + path="/v1/observability/judges", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_judges_v1_observability_judges_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListJudgesResponse, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def fetch( + self, + *, + judge_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Judge: + r"""Get judge by id + + :param judge_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetJudgeByIDV1ObservabilityJudgesJudgeIDGetRequest( + judge_id=judge_id, + ) + + req = self._build_request( + method="GET", + path="/v1/observability/judges/{judge_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_judge_by_id_v1_observability_judges__judge_id__get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Judge, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def fetch_async( + self, + *, + judge_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Judge: + r"""Get judge by id + + :param judge_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetJudgeByIDV1ObservabilityJudgesJudgeIDGetRequest( + judge_id=judge_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/observability/judges/{judge_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_judge_by_id_v1_observability_judges__judge_id__get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Judge, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + judge_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a judge + + :param judge_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.DeleteJudgeV1ObservabilityJudgesJudgeIDDeleteRequest( + judge_id=judge_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/observability/judges/{judge_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="delete_judge_v1_observability_judges__judge_id__delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + judge_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a judge + + :param judge_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.DeleteJudgeV1ObservabilityJudgesJudgeIDDeleteRequest( + judge_id=judge_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/observability/judges/{judge_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="delete_judge_v1_observability_judges__judge_id__delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def update( + self, + *, + judge_id: str, + name: str, + description: str, + model_name: str, + output: Union[ + models.UpdateJudgeRequestOutput, models.UpdateJudgeRequestOutputTypedDict + ], + instructions: str, + tools: List[str], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Update a judge + + :param judge_id: + :param name: + :param description: + :param model_name: + :param output: + :param instructions: + :param tools: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.UpdateJudgeV1ObservabilityJudgesJudgeIDPutRequest( + judge_id=judge_id, + update_judge_request=models.UpdateJudgeRequest( + name=name, + description=description, + model_name=model_name, + output=utils.get_pydantic_model( + output, models.UpdateJudgeRequestOutput + ), + instructions=instructions, + tools=tools, + ), + ) + + req = self._build_request( + method="PUT", + path="/v1/observability/judges/{judge_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_judge_request, + False, + False, + "json", + models.UpdateJudgeRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="update_judge_v1_observability_judges__judge_id__put", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def update_async( + self, + *, + judge_id: str, + name: str, + description: str, + model_name: str, + output: Union[ + models.UpdateJudgeRequestOutput, models.UpdateJudgeRequestOutputTypedDict + ], + instructions: str, + tools: List[str], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Update a judge + + :param judge_id: + :param name: + :param description: + :param model_name: + :param output: + :param instructions: + :param tools: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.UpdateJudgeV1ObservabilityJudgesJudgeIDPutRequest( + judge_id=judge_id, + update_judge_request=models.UpdateJudgeRequest( + name=name, + description=description, + model_name=model_name, + output=utils.get_pydantic_model( + output, models.UpdateJudgeRequestOutput + ), + instructions=instructions, + tools=tools, + ), + ) + + req = self._build_request_async( + method="PUT", + path="/v1/observability/judges/{judge_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_judge_request, + False, + False, + "json", + models.UpdateJudgeRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="update_judge_v1_observability_judges__judge_id__put", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/models/__init__.py b/src/mistralai/client/models/__init__.py index 7d2dfd97..741c80f3 100644 --- a/src/mistralai/client/models/__init__.py +++ b/src/mistralai/client/models/__init__.py @@ -1,6 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" # @generated-id: e0e8dad92725 +from .filtergroup import ( + And, + AndTypedDict, + FilterGroup, + FilterGroupTypedDict, + Or, + OrTypedDict, +) from typing import Any, TYPE_CHECKING from mistralai.client.utils.dynamic_imports import lazy_getattr, lazy_dir @@ -131,6 +139,7 @@ AgentsCompletionStreamRequestToolChoiceTypedDict, AgentsCompletionStreamRequestTypedDict, ) + from .annotations import Annotations, AnnotationsTypedDict, Audience from .apiendpoint import APIEndpoint from .archivemodelresponse import ( ArchiveModelResponse, @@ -143,6 +152,7 @@ AssistantMessageTypedDict, ) from .audiochunk import AudioChunk, AudioChunkTypedDict + from .audiocontent import AudioContent, AudioContentTypedDict from .audioencoding import AudioEncoding from .audioformat import AudioFormat, AudioFormatTypedDict from .audiotranscriptionrequest import ( @@ -153,12 +163,32 @@ AudioTranscriptionRequestStream, AudioTranscriptionRequestStreamTypedDict, ) + from .audiourl import AudioURL, AudioURLTypedDict + from .audiourlchunk import ( + AudioURLChunk, + AudioURLChunkTypedDict, + AudioURLUnion, + AudioURLUnionTypedDict, + ) + from .authdata import AuthData, AuthDataTypedDict + from .basefielddefinition import ( + BaseFieldDefinition, + BaseFieldDefinitionTypedDict, + SupportedOperator, + TypeEnum, + ) from .basemodelcard import BaseModelCard, BaseModelCardTypedDict + from .basetaskstatus import BaseTaskStatus from .batcherror import BatchError, BatchErrorTypedDict from .batchjob import BatchJob, BatchJobTypedDict from .batchjobstatus import BatchJobStatus from .batchrequest import BatchRequest, BatchRequestTypedDict + from .blobresourcecontents import ( + BlobResourceContents, + BlobResourceContentsTypedDict, + ) from .builtinconnectors import BuiltInConnectors + from .campaign import Campaign, CampaignTypedDict from .chatclassificationrequest import ( ChatClassificationRequest, ChatClassificationRequestTypedDict, @@ -168,6 +198,18 @@ ChatCompletionChoiceFinishReason, ChatCompletionChoiceTypedDict, ) + from .chatcompletionevent import ( + ChatCompletionEvent, + ChatCompletionEventExtraFields, + ChatCompletionEventExtraFieldsTypedDict, + ChatCompletionEventTypedDict, + ) + from .chatcompletioneventpreview import ( + ChatCompletionEventPreview, + ChatCompletionEventPreviewExtraFields, + ChatCompletionEventPreviewExtraFieldsTypedDict, + ChatCompletionEventPreviewTypedDict, + ) from .chatcompletionrequest import ( ChatCompletionRequest, ChatCompletionRequestMessage, @@ -202,6 +244,10 @@ ChatModerationRequestInputs3TypedDict, ChatModerationRequestTypedDict, ) + from .chattranscriptionevent import ( + ChatTranscriptionEvent, + ChatTranscriptionEventTypedDict, + ) from .checkpoint import Checkpoint, CheckpointTypedDict from .classificationrequest import ( ClassificationRequest, @@ -286,6 +332,52 @@ CompletionTrainingParameters, CompletionTrainingParametersTypedDict, ) + from .connector import Connector, ConnectorTypedDict + from .connector_call_tool_v1op import ( + ConnectorCallToolV1Request, + ConnectorCallToolV1RequestTypedDict, + ) + from .connector_delete_v1op import ( + ConnectorDeleteV1Request, + ConnectorDeleteV1RequestTypedDict, + ) + from .connector_get_v1op import ( + ConnectorGetV1Request, + ConnectorGetV1RequestTypedDict, + ) + from .connector_list_v1op import ( + ConnectorListV1Request, + ConnectorListV1RequestTypedDict, + ) + from .connector_update_v1op import ( + ConnectorUpdateV1Request, + ConnectorUpdateV1RequestTypedDict, + ) + from .connectorcalltoolrequest import ( + ConnectorCallToolRequest, + ConnectorCallToolRequestTypedDict, + ) + from .connectorsqueryfilters import ( + ConnectorsQueryFilters, + ConnectorsQueryFiltersTypedDict, + ) + from .connectortool import ConnectorTool, ConnectorToolTypedDict + from .connectortoolcallmetadata import ( + ConnectorToolCallMetadata, + ConnectorToolCallMetadataTypedDict, + ) + from .connectortoolcallresponse import ( + ConnectorToolCallResponse, + ConnectorToolCallResponseContent, + ConnectorToolCallResponseContentTypedDict, + ConnectorToolCallResponseTypedDict, + UnknownConnectorToolCallResponseContent, + ) + from .connectortoollocale import ConnectorToolLocale, ConnectorToolLocaleTypedDict + from .connectortoolresultmetadata import ( + ConnectorToolResultMetadata, + ConnectorToolResultMetadataTypedDict, + ) from .contentchunk import ContentChunk, ContentChunkTypedDict, UnknownContentChunk from .conversationappendrequest import ( ConversationAppendRequest, @@ -315,6 +407,7 @@ ConversationMessages, ConversationMessagesTypedDict, ) + from .conversationpayload import ConversationPayload, ConversationPayloadTypedDict from .conversationrequest import ( ConversationRequest, ConversationRequestAgentVersion, @@ -326,9 +419,11 @@ ) from .conversationresponse import ( ConversationResponse, + ConversationResponseOutput, + ConversationResponseOutputTypedDict, ConversationResponseTypedDict, - Output, - OutputTypedDict, + Guardrail, + GuardrailTypedDict, ) from .conversationrestartrequest import ( ConversationRestartRequest, @@ -344,6 +439,7 @@ ConversationRestartStreamRequestHandoffExecution, ConversationRestartStreamRequestTypedDict, ) + from .conversationsource import ConversationSource from .conversationstreamrequest import ( ConversationStreamRequest, ConversationStreamRequestAgentVersion, @@ -353,16 +449,14 @@ ConversationStreamRequestToolTypedDict, ConversationStreamRequestTypedDict, ) - from .conversationthinkchunk import ( - ConversationThinkChunk, - ConversationThinkChunkThinking, - ConversationThinkChunkThinkingTypedDict, - ConversationThinkChunkTypedDict, - ) from .conversationusageinfo import ( ConversationUsageInfo, ConversationUsageInfoTypedDict, ) + from .create_dataset_record_v1_observability_datasets_dataset_id_records_postop import ( + CreateDatasetRecordV1ObservabilityDatasetsDatasetIDRecordsPostRequest, + CreateDatasetRecordV1ObservabilityDatasetsDatasetIDRecordsPostRequestTypedDict, + ) from .createagentrequest import ( CreateAgentRequest, CreateAgentRequestTool, @@ -373,6 +467,22 @@ CreateBatchJobRequest, CreateBatchJobRequestTypedDict, ) + from .createcampaignrequest import ( + CreateCampaignRequest, + CreateCampaignRequestTypedDict, + ) + from .createconnectorrequest import ( + CreateConnectorRequest, + CreateConnectorRequestTypedDict, + ) + from .createdatasetrecordrequest import ( + CreateDatasetRecordRequest, + CreateDatasetRecordRequestTypedDict, + ) + from .createdatasetrequest import ( + CreateDatasetRequest, + CreateDatasetRequestTypedDict, + ) from .createfileresponse import CreateFileResponse, CreateFileResponseTypedDict from .createfinetuningjobrequest import ( CreateFineTuningJobRequest, @@ -384,16 +494,50 @@ Hyperparameters, HyperparametersTypedDict, ) + from .creategithubrepositoryrequest import ( + CreateGithubRepositoryRequest, + CreateGithubRepositoryRequestTypedDict, + ) + from .createjudgerequest import ( + CreateJudgeRequest, + CreateJudgeRequestOutput, + CreateJudgeRequestOutputTypedDict, + CreateJudgeRequestTypedDict, + ) from .createlibraryrequest import ( CreateLibraryRequest, CreateLibraryRequestTypedDict, ) + from .dataset import Dataset, DatasetTypedDict + from .datasetimporttask import DatasetImportTask, DatasetImportTaskTypedDict + from .datasetpreview import DatasetPreview, DatasetPreviewTypedDict + from .datasetrecord import DatasetRecord, DatasetRecordTypedDict + from .delete_campaign_v1_observability_campaigns_campaign_id_deleteop import ( + DeleteCampaignV1ObservabilityCampaignsCampaignIDDeleteRequest, + DeleteCampaignV1ObservabilityCampaignsCampaignIDDeleteRequestTypedDict, + ) + from .delete_dataset_record_v1_observability_dataset_records_dataset_record_id_deleteop import ( + DeleteDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDDeleteRequest, + DeleteDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDDeleteRequestTypedDict, + ) + from .delete_dataset_v1_observability_datasets_dataset_id_deleteop import ( + DeleteDatasetV1ObservabilityDatasetsDatasetIDDeleteRequest, + DeleteDatasetV1ObservabilityDatasetsDatasetIDDeleteRequestTypedDict, + ) + from .delete_judge_v1_observability_judges_judge_id_deleteop import ( + DeleteJudgeV1ObservabilityJudgesJudgeIDDeleteRequest, + DeleteJudgeV1ObservabilityJudgesJudgeIDDeleteRequestTypedDict, + ) from .delete_model_v1_models_model_id_deleteop import ( DeleteModelV1ModelsModelIDDeleteRequest, DeleteModelV1ModelsModelIDDeleteRequestTypedDict, ) + from .deletedatasetrecordsrequest import ( + DeleteDatasetRecordsRequest, + DeleteDatasetRecordsRequestTypedDict, + ) from .deletefileresponse import DeleteFileResponse, DeleteFileResponseTypedDict - from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict + from .deletemodelresponse import DeleteModelResponse, DeleteModelResponseTypedDict from .deltamessage import ( DeltaMessage, DeltaMessageContent, @@ -404,6 +548,12 @@ from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict from .documenttextcontent import DocumentTextContent, DocumentTextContentTypedDict from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict + from .embeddedresource import ( + EmbeddedResource, + EmbeddedResourceTypedDict, + Resource, + ResourceTypedDict, + ) from .embeddingdtype import EmbeddingDtype from .embeddingrequest import ( EmbeddingRequest, @@ -419,6 +569,42 @@ from .encodingformat import EncodingFormat from .entitytype import EntityType from .event import Event, EventTypedDict + from .executionconfig import ExecutionConfig, ExecutionConfigTypedDict + from .export_dataset_to_jsonl_v1_observability_datasets_dataset_id_exports_to_jsonl_getop import ( + ExportDatasetToJsonlV1ObservabilityDatasetsDatasetIDExportsToJsonlGetRequest, + ExportDatasetToJsonlV1ObservabilityDatasetsDatasetIDExportsToJsonlGetRequestTypedDict, + ) + from .exportdatasetresponse import ( + ExportDatasetResponse, + ExportDatasetResponseTypedDict, + ) + from .feedresultchatcompletioneventpreview import ( + FeedResultChatCompletionEventPreview, + FeedResultChatCompletionEventPreviewTypedDict, + ) + from .fetchcampaignstatusresponse import ( + FetchCampaignStatusResponse, + FetchCampaignStatusResponseTypedDict, + ) + from .fetchchatcompletionfieldoptionsresponse import ( + FetchChatCompletionFieldOptionsResponse, + FetchChatCompletionFieldOptionsResponseTypedDict, + Option, + OptionTypedDict, + ) + from .fetchfieldoptioncountsrequest import ( + FetchFieldOptionCountsRequest, + FetchFieldOptionCountsRequestTypedDict, + ) + from .fetchfieldoptioncountsresponse import ( + FetchFieldOptionCountsResponse, + FetchFieldOptionCountsResponseTypedDict, + ) + from .fieldgroup import FieldGroup, FieldGroupTypedDict + from .fieldoptioncountitem import ( + FieldOptionCountItem, + FieldOptionCountItemTypedDict, + ) from .file import File, FileTypedDict from .filechunk import FileChunk, FileChunkTypedDict from .filepurpose import FilePurpose @@ -443,10 +629,19 @@ FilesAPIRoutesRetrieveFileRequestTypedDict, ) from .files_api_routes_upload_fileop import ( + FilesAPIRoutesUploadFileFileVisibility, MultiPartBodyParams, MultiPartBodyParamsTypedDict, ) from .fileschema import FileSchema, FileSchemaTypedDict + from .filevisibility import FileVisibility + from .filtercondition import FilterCondition, FilterConditionTypedDict, Op + from .filterpayload import ( + FilterPayload, + FilterPayloadTypedDict, + Filters, + FiltersTypedDict, + ) from .fimcompletionrequest import ( FIMCompletionRequest, FIMCompletionRequestStop, @@ -494,13 +689,83 @@ from .functionname import FunctionName, FunctionNameTypedDict from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict from .functiontool import FunctionTool, FunctionToolTypedDict + from .get_campaign_by_id_v1_observability_campaigns_campaign_id_getop import ( + GetCampaignByIDV1ObservabilityCampaignsCampaignIDGetRequest, + GetCampaignByIDV1ObservabilityCampaignsCampaignIDGetRequestTypedDict, + ) + from .get_campaign_selected_events_v1_observability_campaigns_campaign_id_selected_events_getop import ( + GetCampaignSelectedEventsV1ObservabilityCampaignsCampaignIDSelectedEventsGetRequest, + GetCampaignSelectedEventsV1ObservabilityCampaignsCampaignIDSelectedEventsGetRequestTypedDict, + ) + from .get_campaign_status_by_id_v1_observability_campaigns_campaign_id_status_getop import ( + GetCampaignStatusByIDV1ObservabilityCampaignsCampaignIDStatusGetRequest, + GetCampaignStatusByIDV1ObservabilityCampaignsCampaignIDStatusGetRequestTypedDict, + ) + from .get_campaigns_v1_observability_campaigns_getop import ( + GetCampaignsV1ObservabilityCampaignsGetRequest, + GetCampaignsV1ObservabilityCampaignsGetRequestTypedDict, + ) + from .get_chat_completion_event_v1_observability_chat_completion_events_event_id_getop import ( + GetChatCompletionEventV1ObservabilityChatCompletionEventsEventIDGetRequest, + GetChatCompletionEventV1ObservabilityChatCompletionEventsEventIDGetRequestTypedDict, + ) + from .get_chat_completion_events_v1_observability_chat_completion_events_search_postop import ( + GetChatCompletionEventsV1ObservabilityChatCompletionEventsSearchPostRequest, + GetChatCompletionEventsV1ObservabilityChatCompletionEventsSearchPostRequestTypedDict, + ) + from .get_chat_completion_field_options_counts_v1_observability_chat_completion_fields_field_name_options_counts_postop import ( + GetChatCompletionFieldOptionsCountsV1ObservabilityChatCompletionFieldsFieldNameOptionsCountsPostRequest, + GetChatCompletionFieldOptionsCountsV1ObservabilityChatCompletionFieldsFieldNameOptionsCountsPostRequestTypedDict, + ) + from .get_chat_completion_field_options_v1_observability_chat_completion_fields_field_name_options_getop import ( + GetChatCompletionFieldOptionsV1ObservabilityChatCompletionFieldsFieldNameOptionsGetRequest, + GetChatCompletionFieldOptionsV1ObservabilityChatCompletionFieldsFieldNameOptionsGetRequestTypedDict, + Operator, + ) + from .get_dataset_by_id_v1_observability_datasets_dataset_id_getop import ( + GetDatasetByIDV1ObservabilityDatasetsDatasetIDGetRequest, + GetDatasetByIDV1ObservabilityDatasetsDatasetIDGetRequestTypedDict, + ) + from .get_dataset_import_task_v1_observability_datasets_dataset_id_tasks_task_id_getop import ( + GetDatasetImportTaskV1ObservabilityDatasetsDatasetIDTasksTaskIDGetRequest, + GetDatasetImportTaskV1ObservabilityDatasetsDatasetIDTasksTaskIDGetRequestTypedDict, + ) + from .get_dataset_import_tasks_v1_observability_datasets_dataset_id_tasks_getop import ( + GetDatasetImportTasksV1ObservabilityDatasetsDatasetIDTasksGetRequest, + GetDatasetImportTasksV1ObservabilityDatasetsDatasetIDTasksGetRequestTypedDict, + ) + from .get_dataset_record_v1_observability_dataset_records_dataset_record_id_getop import ( + GetDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDGetRequest, + GetDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDGetRequestTypedDict, + ) + from .get_dataset_records_v1_observability_datasets_dataset_id_records_getop import ( + GetDatasetRecordsV1ObservabilityDatasetsDatasetIDRecordsGetRequest, + GetDatasetRecordsV1ObservabilityDatasetsDatasetIDRecordsGetRequestTypedDict, + ) + from .get_datasets_v1_observability_datasets_getop import ( + GetDatasetsV1ObservabilityDatasetsGetRequest, + GetDatasetsV1ObservabilityDatasetsGetRequestTypedDict, + ) + from .get_judge_by_id_v1_observability_judges_judge_id_getop import ( + GetJudgeByIDV1ObservabilityJudgesJudgeIDGetRequest, + GetJudgeByIDV1ObservabilityJudgesJudgeIDGetRequestTypedDict, + ) + from .get_judges_v1_observability_judges_getop import ( + GetJudgesV1ObservabilityJudgesGetRequest, + GetJudgesV1ObservabilityJudgesGetRequestTypedDict, + ) + from .get_similar_chat_completion_events_v1_observability_chat_completion_events_event_id_similar_events_getop import ( + GetSimilarChatCompletionEventsV1ObservabilityChatCompletionEventsEventIDSimilarEventsGetRequest, + GetSimilarChatCompletionEventsV1ObservabilityChatCompletionEventsEventIDSimilarEventsGetRequestTypedDict, + ) from .getfileresponse import GetFileResponse, GetFileResponseTypedDict from .getsignedurlresponse import ( GetSignedURLResponse, GetSignedURLResponseTypedDict, ) from .githubrepository import GithubRepository, GithubRepositoryTypedDict - from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict + from .guardrailconfig import GuardrailConfig, GuardrailConfigTypedDict + from .imagecontent import ImageContent, ImageContentTypedDict from .imagedetail import ImageDetail from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict from .imageurl import ImageURL, ImageURLTypedDict @@ -510,6 +775,26 @@ ImageURLUnion, ImageURLUnionTypedDict, ) + from .importdatasetfromcampaignrequest import ( + ImportDatasetFromCampaignRequest, + ImportDatasetFromCampaignRequestTypedDict, + ) + from .importdatasetfromdatasetrequest import ( + ImportDatasetFromDatasetRequest, + ImportDatasetFromDatasetRequestTypedDict, + ) + from .importdatasetfromexplorerrequest import ( + ImportDatasetFromExplorerRequest, + ImportDatasetFromExplorerRequestTypedDict, + ) + from .importdatasetfromfilerequest import ( + ImportDatasetFromFileRequest, + ImportDatasetFromFileRequestTypedDict, + ) + from .importdatasetfromplaygroundrequest import ( + ImportDatasetFromPlaygroundRequest, + ImportDatasetFromPlaygroundRequestTypedDict, + ) from .inputentries import InputEntries, InputEntriesTypedDict from .inputs import Inputs, InputsTypedDict from .instructrequest import ( @@ -581,6 +866,43 @@ UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse, ) from .jsonschema import JSONSchema, JSONSchemaTypedDict + from .judge import ( + Judge, + JudgeOutputUnion, + JudgeOutputUnionTypedDict, + JudgeTypedDict, + UnknownJudgeOutputUnion, + ) + from .judge_chat_completion_event_v1_observability_chat_completion_events_event_id_live_judging_postop import ( + JudgeChatCompletionEventV1ObservabilityChatCompletionEventsEventIDLiveJudgingPostRequest, + JudgeChatCompletionEventV1ObservabilityChatCompletionEventsEventIDLiveJudgingPostRequestTypedDict, + ) + from .judge_dataset_record_v1_observability_dataset_records_dataset_record_id_live_judging_postop import ( + JudgeDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDLiveJudgingPostRequest, + JudgeDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDLiveJudgingPostRequestTypedDict, + ) + from .judgechatcompletioneventrequest import ( + JudgeChatCompletionEventRequest, + JudgeChatCompletionEventRequestTypedDict, + ) + from .judgeclassificationoutput import ( + JudgeClassificationOutput, + JudgeClassificationOutputTypedDict, + ) + from .judgeclassificationoutputoption import ( + JudgeClassificationOutputOption, + JudgeClassificationOutputOptionTypedDict, + ) + from .judgedatasetrecordrequest import ( + JudgeDatasetRecordRequest, + JudgeDatasetRecordRequestTypedDict, + ) + from .judgeoutput import Answer, AnswerTypedDict, JudgeOutput, JudgeOutputTypedDict + from .judgeoutputtype import JudgeOutputType + from .judgeregressionoutput import ( + JudgeRegressionOutput, + JudgeRegressionOutputTypedDict, + ) from .legacyjobmetadata import LegacyJobMetadata, LegacyJobMetadataTypedDict from .libraries_delete_v1op import ( LibrariesDeleteV1Request, @@ -649,10 +971,38 @@ LibrariesUpdateV1RequestTypedDict, ) from .library import Library, LibraryTypedDict + from .list_models_v1_models_getop import ( + ListModelsV1ModelsGetRequest, + ListModelsV1ModelsGetRequestTypedDict, + ) from .listbatchjobsresponse import ( ListBatchJobsResponse, ListBatchJobsResponseTypedDict, ) + from .listcampaignselectedeventsresponse import ( + ListCampaignSelectedEventsResponse, + ListCampaignSelectedEventsResponseTypedDict, + ) + from .listcampaignsresponse import ( + ListCampaignsResponse, + ListCampaignsResponseTypedDict, + ) + from .listchatcompletionfieldsresponse import ( + ListChatCompletionFieldsResponse, + ListChatCompletionFieldsResponseTypedDict, + ) + from .listdatasetimporttasksresponse import ( + ListDatasetImportTasksResponse, + ListDatasetImportTasksResponseTypedDict, + ) + from .listdatasetrecordsresponse import ( + ListDatasetRecordsResponse, + ListDatasetRecordsResponseTypedDict, + ) + from .listdatasetsresponse import ( + ListDatasetsResponse, + ListDatasetsResponseTypedDict, + ) from .listdocumentsresponse import ( ListDocumentsResponse, ListDocumentsResponseTypedDict, @@ -665,11 +1015,13 @@ ListFineTuningJobsResponseTypedDict, UnknownListFineTuningJobsResponseData, ) + from .listjudgesresponse import ListJudgesResponse, ListJudgesResponseTypedDict from .listlibrariesresponse import ( ListLibrariesResponse, ListLibrariesResponseTypedDict, ) - from .listsharingout import ListSharingOut, ListSharingOutTypedDict + from .listsharingresponse import ListSharingResponse, ListSharingResponseTypedDict + from .mcpservericon import MCPServerIcon, MCPServerIconTypedDict from .messageentries import MessageEntries, MessageEntriesTypedDict from .messageinputcontentchunks import ( MessageInputContentChunks, @@ -698,6 +1050,7 @@ MessageOutputEventContentTypedDict, MessageOutputEventTypedDict, ) + from .messageresponse import MessageResponse, MessageResponseTypedDict from .metric import Metric, MetricTypedDict from .mistralpromptmode import MistralPromptMode from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict @@ -715,8 +1068,22 @@ ModelListTypedDict, UnknownModelListData, ) + from .moderationllmv1action import ModerationLlmv1Action + from .moderationllmv1categorythresholds import ( + ModerationLlmv1CategoryThresholds, + ModerationLlmv1CategoryThresholdsTypedDict, + ) + from .moderationllmv1config import ( + ModerationLlmv1Config, + ModerationLlmv1ConfigTypedDict, + ) from .moderationobject import ModerationObject, ModerationObjectTypedDict from .moderationresponse import ModerationResponse, ModerationResponseTypedDict + from .observabilityerrorcode import ObservabilityErrorCode + from .observabilityerrordetail import ( + ObservabilityErrorDetail, + ObservabilityErrorDetailTypedDict, + ) from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict @@ -731,9 +1098,55 @@ from .ocrtableobject import Format, OCRTableObject, OCRTableObjectTypedDict from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict + from .paginatedconnectors import PaginatedConnectors, PaginatedConnectorsTypedDict + from .paginatedresultcampaignpreview import ( + PaginatedResultCampaignPreview, + PaginatedResultCampaignPreviewTypedDict, + ) + from .paginatedresultchatcompletioneventpreview import ( + PaginatedResultChatCompletionEventPreview, + PaginatedResultChatCompletionEventPreviewTypedDict, + ) + from .paginatedresultdatasetimporttask import ( + PaginatedResultDatasetImportTask, + PaginatedResultDatasetImportTaskTypedDict, + ) + from .paginatedresultdatasetpreview import ( + PaginatedResultDatasetPreview, + PaginatedResultDatasetPreviewTypedDict, + ) + from .paginatedresultdatasetrecord import ( + PaginatedResultDatasetRecord, + PaginatedResultDatasetRecordTypedDict, + ) + from .paginatedresultjudgepreview import ( + PaginatedResultJudgePreview, + PaginatedResultJudgePreviewTypedDict, + ) from .paginationinfo import PaginationInfo, PaginationInfoTypedDict + from .paginationresponse import PaginationResponse, PaginationResponseTypedDict + from .post_dataset_records_from_campaign_v1_observability_datasets_dataset_id_imports_from_campaign_postop import ( + PostDatasetRecordsFromCampaignV1ObservabilityDatasetsDatasetIDImportsFromCampaignPostRequest, + PostDatasetRecordsFromCampaignV1ObservabilityDatasetsDatasetIDImportsFromCampaignPostRequestTypedDict, + ) + from .post_dataset_records_from_dataset_v1_observability_datasets_dataset_id_imports_from_dataset_postop import ( + PostDatasetRecordsFromDatasetV1ObservabilityDatasetsDatasetIDImportsFromDatasetPostRequest, + PostDatasetRecordsFromDatasetV1ObservabilityDatasetsDatasetIDImportsFromDatasetPostRequestTypedDict, + ) + from .post_dataset_records_from_explorer_v1_observability_datasets_dataset_id_imports_from_explorer_postop import ( + PostDatasetRecordsFromExplorerV1ObservabilityDatasetsDatasetIDImportsFromExplorerPostRequest, + PostDatasetRecordsFromExplorerV1ObservabilityDatasetsDatasetIDImportsFromExplorerPostRequestTypedDict, + ) + from .post_dataset_records_from_file_v1_observability_datasets_dataset_id_imports_from_file_postop import ( + PostDatasetRecordsFromFileV1ObservabilityDatasetsDatasetIDImportsFromFilePostRequest, + PostDatasetRecordsFromFileV1ObservabilityDatasetsDatasetIDImportsFromFilePostRequestTypedDict, + ) + from .post_dataset_records_from_playground_v1_observability_datasets_dataset_id_imports_from_playground_postop import ( + PostDatasetRecordsFromPlaygroundV1ObservabilityDatasetsDatasetIDImportsFromPlaygroundPostRequest, + PostDatasetRecordsFromPlaygroundV1ObservabilityDatasetsDatasetIDImportsFromPlaygroundPostRequestTypedDict, + ) from .prediction import Prediction, PredictionTypedDict - from .processingstatusout import ProcessingStatusOut, ProcessingStatusOutTypedDict + from .processingstatus import ProcessingStatus, ProcessingStatusTypedDict from .processstatus import ProcessStatus from .realtimetranscriptionerror import ( RealtimeTranscriptionError, @@ -779,6 +1192,8 @@ ) from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .requestsource import RequestSource + from .resourcelink import ResourceLink, ResourceLinkTypedDict + from .resourcevisibility import ResourceVisibility from .responsedoneevent import ResponseDoneEvent, ResponseDoneEventTypedDict from .responseerrorevent import ResponseErrorEvent, ResponseErrorEventTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict @@ -795,11 +1210,27 @@ UnknownResponseRetrieveModelV1ModelsModelIDGet, ) from .sampletype import SampleType + from .searchchatcompletioneventidsrequest import ( + SearchChatCompletionEventIdsRequest, + SearchChatCompletionEventIdsRequestTypedDict, + ) + from .searchchatcompletioneventidsresponse import ( + SearchChatCompletionEventIdsResponse, + SearchChatCompletionEventIdsResponseTypedDict, + ) + from .searchchatcompletioneventsrequest import ( + SearchChatCompletionEventsRequest, + SearchChatCompletionEventsRequestTypedDict, + ) + from .searchchatcompletioneventsresponse import ( + SearchChatCompletionEventsResponse, + SearchChatCompletionEventsResponseTypedDict, + ) from .security import Security, SecurityTypedDict from .shareenum import ShareEnum + from .sharing import Sharing, SharingTypedDict from .sharingdelete import SharingDelete, SharingDeleteTypedDict - from .sharingin import SharingIn, SharingInTypedDict - from .sharingout import SharingOut, SharingOutTypedDict + from .sharingrequest import SharingRequest, SharingRequestTypedDict from .source import Source from .ssetypes import SSETypes from .systemmessage import ( @@ -813,12 +1244,12 @@ SystemMessageContentChunksTypedDict, ) from .textchunk import TextChunk, TextChunkTypedDict - from .thinkchunk import ( - ThinkChunk, - ThinkChunkThinking, - ThinkChunkThinkingTypedDict, - ThinkChunkTypedDict, + from .textcontent import TextContent, TextContentTypedDict + from .textresourcecontents import ( + TextResourceContents, + TextResourceContentsTypedDict, ) + from .thinkchunk import ThinkChunk, ThinkChunkTypedDict, Thinking, ThinkingTypedDict from .timestampgranularity import TimestampGranularity from .tool import Tool, ToolTypedDict from .toolcall import ToolCall, ToolCallTypedDict @@ -910,18 +1341,56 @@ UnarchiveModelResponse, UnarchiveModelResponseTypedDict, ) + from .update_dataset_record_payload_v1_observability_dataset_records_dataset_record_id_payload_putop import ( + UpdateDatasetRecordPayloadV1ObservabilityDatasetRecordsDatasetRecordIDPayloadPutRequest, + UpdateDatasetRecordPayloadV1ObservabilityDatasetRecordsDatasetRecordIDPayloadPutRequestTypedDict, + ) + from .update_dataset_record_properties_v1_observability_dataset_records_dataset_record_id_properties_putop import ( + UpdateDatasetRecordPropertiesV1ObservabilityDatasetRecordsDatasetRecordIDPropertiesPutRequest, + UpdateDatasetRecordPropertiesV1ObservabilityDatasetRecordsDatasetRecordIDPropertiesPutRequestTypedDict, + ) + from .update_dataset_v1_observability_datasets_dataset_id_patchop import ( + UpdateDatasetV1ObservabilityDatasetsDatasetIDPatchRequest, + UpdateDatasetV1ObservabilityDatasetsDatasetIDPatchRequestTypedDict, + ) + from .update_judge_v1_observability_judges_judge_id_putop import ( + UpdateJudgeV1ObservabilityJudgesJudgeIDPutRequest, + UpdateJudgeV1ObservabilityJudgesJudgeIDPutRequestTypedDict, + ) from .updateagentrequest import ( UpdateAgentRequest, UpdateAgentRequestTool, UpdateAgentRequestToolTypedDict, UpdateAgentRequestTypedDict, ) + from .updateconnectorrequest import ( + UpdateConnectorRequest, + UpdateConnectorRequestTypedDict, + ) + from .updatedatasetrecordpayloadrequest import ( + UpdateDatasetRecordPayloadRequest, + UpdateDatasetRecordPayloadRequestTypedDict, + ) + from .updatedatasetrecordpropertiesrequest import ( + UpdateDatasetRecordPropertiesRequest, + UpdateDatasetRecordPropertiesRequestTypedDict, + ) + from .updatedatasetrequest import ( + UpdateDatasetRequest, + UpdateDatasetRequestTypedDict, + ) from .updatedocumentrequest import ( Attributes, AttributesTypedDict, UpdateDocumentRequest, UpdateDocumentRequestTypedDict, ) + from .updatejudgerequest import ( + UpdateJudgeRequest, + UpdateJudgeRequestOutput, + UpdateJudgeRequestOutputTypedDict, + UpdateJudgeRequestTypedDict, + ) from .updatelibraryrequest import ( UpdateLibraryRequest, UpdateLibraryRequestTypedDict, @@ -935,6 +1404,8 @@ UserMessageTypedDict, ) from .validationerror import ( + Context, + ContextTypedDict, Loc, LocTypedDict, ValidationError, @@ -949,7 +1420,12 @@ WebSearchPremiumTool, WebSearchPremiumToolTypedDict, ) - from .websearchtool import WebSearchTool, WebSearchToolTypedDict + from .websearchtool import ( + WebSearchTool, + WebSearchToolTypedDict, + ) # Pydantic models with forward references +FilterGroup.model_rebuild() + __all__ = [ "APIEndpoint", @@ -1027,6 +1503,12 @@ "AgentsCompletionStreamRequestToolChoice", "AgentsCompletionStreamRequestToolChoiceTypedDict", "AgentsCompletionStreamRequestTypedDict", + "And", + "AndTypedDict", + "Annotations", + "AnnotationsTypedDict", + "Answer", + "AnswerTypedDict", "ArchiveModelResponse", "ArchiveModelResponseTypedDict", "Arguments", @@ -1037,8 +1519,11 @@ "AssistantMessageTypedDict", "Attributes", "AttributesTypedDict", + "Audience", "AudioChunk", "AudioChunkTypedDict", + "AudioContent", + "AudioContentTypedDict", "AudioEncoding", "AudioFormat", "AudioFormatTypedDict", @@ -1046,8 +1531,19 @@ "AudioTranscriptionRequestStream", "AudioTranscriptionRequestStreamTypedDict", "AudioTranscriptionRequestTypedDict", + "AudioURL", + "AudioURLChunk", + "AudioURLChunkTypedDict", + "AudioURLTypedDict", + "AudioURLUnion", + "AudioURLUnionTypedDict", + "AuthData", + "AuthDataTypedDict", + "BaseFieldDefinition", + "BaseFieldDefinitionTypedDict", "BaseModelCard", "BaseModelCardTypedDict", + "BaseTaskStatus", "BatchError", "BatchErrorTypedDict", "BatchJob", @@ -1055,12 +1551,24 @@ "BatchJobTypedDict", "BatchRequest", "BatchRequestTypedDict", + "BlobResourceContents", + "BlobResourceContentsTypedDict", "BuiltInConnectors", + "Campaign", + "CampaignTypedDict", "ChatClassificationRequest", "ChatClassificationRequestTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", + "ChatCompletionEvent", + "ChatCompletionEventExtraFields", + "ChatCompletionEventExtraFieldsTypedDict", + "ChatCompletionEventPreview", + "ChatCompletionEventPreviewExtraFields", + "ChatCompletionEventPreviewExtraFieldsTypedDict", + "ChatCompletionEventPreviewTypedDict", + "ChatCompletionEventTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessage", "ChatCompletionRequestMessageTypedDict", @@ -1087,6 +1595,8 @@ "ChatModerationRequestInputs3", "ChatModerationRequestInputs3TypedDict", "ChatModerationRequestTypedDict", + "ChatTranscriptionEvent", + "ChatTranscriptionEventTypedDict", "Checkpoint", "CheckpointTypedDict", "ClassificationRequest", @@ -1147,8 +1657,38 @@ "CompletionTrainingParameters", "CompletionTrainingParametersTypedDict", "Confirmation", + "Connector", + "ConnectorCallToolRequest", + "ConnectorCallToolRequestTypedDict", + "ConnectorCallToolV1Request", + "ConnectorCallToolV1RequestTypedDict", + "ConnectorDeleteV1Request", + "ConnectorDeleteV1RequestTypedDict", + "ConnectorGetV1Request", + "ConnectorGetV1RequestTypedDict", + "ConnectorListV1Request", + "ConnectorListV1RequestTypedDict", + "ConnectorTool", + "ConnectorToolCallMetadata", + "ConnectorToolCallMetadataTypedDict", + "ConnectorToolCallResponse", + "ConnectorToolCallResponseContent", + "ConnectorToolCallResponseContentTypedDict", + "ConnectorToolCallResponseTypedDict", + "ConnectorToolLocale", + "ConnectorToolLocaleTypedDict", + "ConnectorToolResultMetadata", + "ConnectorToolResultMetadataTypedDict", + "ConnectorToolTypedDict", + "ConnectorTypedDict", + "ConnectorUpdateV1Request", + "ConnectorUpdateV1RequestTypedDict", + "ConnectorsQueryFilters", + "ConnectorsQueryFiltersTypedDict", "ContentChunk", "ContentChunkTypedDict", + "Context", + "ContextTypedDict", "ConversationAppendRequest", "ConversationAppendRequestHandoffExecution", "ConversationAppendRequestTypedDict", @@ -1165,6 +1705,8 @@ "ConversationInputsTypedDict", "ConversationMessages", "ConversationMessagesTypedDict", + "ConversationPayload", + "ConversationPayloadTypedDict", "ConversationRequest", "ConversationRequestAgentVersion", "ConversationRequestAgentVersionTypedDict", @@ -1173,6 +1715,8 @@ "ConversationRequestToolTypedDict", "ConversationRequestTypedDict", "ConversationResponse", + "ConversationResponseOutput", + "ConversationResponseOutputTypedDict", "ConversationResponseTypedDict", "ConversationRestartRequest", "ConversationRestartRequestAgentVersion", @@ -1184,6 +1728,7 @@ "ConversationRestartStreamRequestAgentVersionTypedDict", "ConversationRestartStreamRequestHandoffExecution", "ConversationRestartStreamRequestTypedDict", + "ConversationSource", "ConversationStreamRequest", "ConversationStreamRequestAgentVersion", "ConversationStreamRequestAgentVersionTypedDict", @@ -1191,10 +1736,6 @@ "ConversationStreamRequestTool", "ConversationStreamRequestToolTypedDict", "ConversationStreamRequestTypedDict", - "ConversationThinkChunk", - "ConversationThinkChunkThinking", - "ConversationThinkChunkThinkingTypedDict", - "ConversationThinkChunkTypedDict", "ConversationUsageInfo", "ConversationUsageInfoTypedDict", "CreateAgentRequest", @@ -1203,6 +1744,16 @@ "CreateAgentRequestTypedDict", "CreateBatchJobRequest", "CreateBatchJobRequestTypedDict", + "CreateCampaignRequest", + "CreateCampaignRequestTypedDict", + "CreateConnectorRequest", + "CreateConnectorRequestTypedDict", + "CreateDatasetRecordRequest", + "CreateDatasetRecordRequestTypedDict", + "CreateDatasetRecordV1ObservabilityDatasetsDatasetIDRecordsPostRequest", + "CreateDatasetRecordV1ObservabilityDatasetsDatasetIDRecordsPostRequestTypedDict", + "CreateDatasetRequest", + "CreateDatasetRequestTypedDict", "CreateFileResponse", "CreateFileResponseTypedDict", "CreateFineTuningJobRequest", @@ -1211,12 +1762,36 @@ "CreateFineTuningJobRequestRepository", "CreateFineTuningJobRequestRepositoryTypedDict", "CreateFineTuningJobRequestTypedDict", + "CreateGithubRepositoryRequest", + "CreateGithubRepositoryRequestTypedDict", + "CreateJudgeRequest", + "CreateJudgeRequestOutput", + "CreateJudgeRequestOutputTypedDict", + "CreateJudgeRequestTypedDict", "CreateLibraryRequest", "CreateLibraryRequestTypedDict", + "Dataset", + "DatasetImportTask", + "DatasetImportTaskTypedDict", + "DatasetPreview", + "DatasetPreviewTypedDict", + "DatasetRecord", + "DatasetRecordTypedDict", + "DatasetTypedDict", + "DeleteCampaignV1ObservabilityCampaignsCampaignIDDeleteRequest", + "DeleteCampaignV1ObservabilityCampaignsCampaignIDDeleteRequestTypedDict", + "DeleteDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDDeleteRequest", + "DeleteDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDDeleteRequestTypedDict", + "DeleteDatasetRecordsRequest", + "DeleteDatasetRecordsRequestTypedDict", + "DeleteDatasetV1ObservabilityDatasetsDatasetIDDeleteRequest", + "DeleteDatasetV1ObservabilityDatasetsDatasetIDDeleteRequestTypedDict", "DeleteFileResponse", "DeleteFileResponseTypedDict", - "DeleteModelOut", - "DeleteModelOutTypedDict", + "DeleteJudgeV1ObservabilityJudgesJudgeIDDeleteRequest", + "DeleteJudgeV1ObservabilityJudgesJudgeIDDeleteRequestTypedDict", + "DeleteModelResponse", + "DeleteModelResponseTypedDict", "DeleteModelV1ModelsModelIDDeleteRequest", "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", "DeltaMessage", @@ -1235,6 +1810,8 @@ "DocumentUnionTypedDict", "DocumentUpload", "DocumentUploadTypedDict", + "EmbeddedResource", + "EmbeddedResourceTypedDict", "EmbeddingDtype", "EmbeddingRequest", "EmbeddingRequestInputs", @@ -1250,6 +1827,12 @@ "EntryTypedDict", "Event", "EventTypedDict", + "ExecutionConfig", + "ExecutionConfigTypedDict", + "ExportDatasetResponse", + "ExportDatasetResponseTypedDict", + "ExportDatasetToJsonlV1ObservabilityDatasetsDatasetIDExportsToJsonlGetRequest", + "ExportDatasetToJsonlV1ObservabilityDatasetsDatasetIDExportsToJsonlGetRequestTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", @@ -1263,6 +1846,20 @@ "FTClassifierLossFunction", "FTModelCard", "FTModelCardTypedDict", + "FeedResultChatCompletionEventPreview", + "FeedResultChatCompletionEventPreviewTypedDict", + "FetchCampaignStatusResponse", + "FetchCampaignStatusResponseTypedDict", + "FetchChatCompletionFieldOptionsResponse", + "FetchChatCompletionFieldOptionsResponseTypedDict", + "FetchFieldOptionCountsRequest", + "FetchFieldOptionCountsRequestTypedDict", + "FetchFieldOptionCountsResponse", + "FetchFieldOptionCountsResponseTypedDict", + "FieldGroup", + "FieldGroupTypedDict", + "FieldOptionCountItem", + "FieldOptionCountItemTypedDict", "File", "FileChunk", "FileChunkTypedDict", @@ -1270,6 +1867,7 @@ "FileSchema", "FileSchemaTypedDict", "FileTypedDict", + "FileVisibility", "FilesAPIRoutesDeleteFileRequest", "FilesAPIRoutesDeleteFileRequestTypedDict", "FilesAPIRoutesDownloadFileRequest", @@ -1280,6 +1878,15 @@ "FilesAPIRoutesListFilesRequestTypedDict", "FilesAPIRoutesRetrieveFileRequest", "FilesAPIRoutesRetrieveFileRequestTypedDict", + "FilesAPIRoutesUploadFileFileVisibility", + "FilterCondition", + "FilterConditionTypedDict", + "FilterGroup", + "FilterGroupTypedDict", + "FilterPayload", + "FilterPayloadTypedDict", + "Filters", + "FiltersTypedDict", "FineTuneableModelType", "FineTunedModelCapabilities", "FineTunedModelCapabilitiesTypedDict", @@ -1302,16 +1909,54 @@ "FunctionTool", "FunctionToolTypedDict", "FunctionTypedDict", + "GetCampaignByIDV1ObservabilityCampaignsCampaignIDGetRequest", + "GetCampaignByIDV1ObservabilityCampaignsCampaignIDGetRequestTypedDict", + "GetCampaignSelectedEventsV1ObservabilityCampaignsCampaignIDSelectedEventsGetRequest", + "GetCampaignSelectedEventsV1ObservabilityCampaignsCampaignIDSelectedEventsGetRequestTypedDict", + "GetCampaignStatusByIDV1ObservabilityCampaignsCampaignIDStatusGetRequest", + "GetCampaignStatusByIDV1ObservabilityCampaignsCampaignIDStatusGetRequestTypedDict", + "GetCampaignsV1ObservabilityCampaignsGetRequest", + "GetCampaignsV1ObservabilityCampaignsGetRequestTypedDict", + "GetChatCompletionEventV1ObservabilityChatCompletionEventsEventIDGetRequest", + "GetChatCompletionEventV1ObservabilityChatCompletionEventsEventIDGetRequestTypedDict", + "GetChatCompletionEventsV1ObservabilityChatCompletionEventsSearchPostRequest", + "GetChatCompletionEventsV1ObservabilityChatCompletionEventsSearchPostRequestTypedDict", + "GetChatCompletionFieldOptionsCountsV1ObservabilityChatCompletionFieldsFieldNameOptionsCountsPostRequest", + "GetChatCompletionFieldOptionsCountsV1ObservabilityChatCompletionFieldsFieldNameOptionsCountsPostRequestTypedDict", + "GetChatCompletionFieldOptionsV1ObservabilityChatCompletionFieldsFieldNameOptionsGetRequest", + "GetChatCompletionFieldOptionsV1ObservabilityChatCompletionFieldsFieldNameOptionsGetRequestTypedDict", + "GetDatasetByIDV1ObservabilityDatasetsDatasetIDGetRequest", + "GetDatasetByIDV1ObservabilityDatasetsDatasetIDGetRequestTypedDict", + "GetDatasetImportTaskV1ObservabilityDatasetsDatasetIDTasksTaskIDGetRequest", + "GetDatasetImportTaskV1ObservabilityDatasetsDatasetIDTasksTaskIDGetRequestTypedDict", + "GetDatasetImportTasksV1ObservabilityDatasetsDatasetIDTasksGetRequest", + "GetDatasetImportTasksV1ObservabilityDatasetsDatasetIDTasksGetRequestTypedDict", + "GetDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDGetRequest", + "GetDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDGetRequestTypedDict", + "GetDatasetRecordsV1ObservabilityDatasetsDatasetIDRecordsGetRequest", + "GetDatasetRecordsV1ObservabilityDatasetsDatasetIDRecordsGetRequestTypedDict", + "GetDatasetsV1ObservabilityDatasetsGetRequest", + "GetDatasetsV1ObservabilityDatasetsGetRequestTypedDict", "GetFileResponse", "GetFileResponseTypedDict", + "GetJudgeByIDV1ObservabilityJudgesJudgeIDGetRequest", + "GetJudgeByIDV1ObservabilityJudgesJudgeIDGetRequestTypedDict", + "GetJudgesV1ObservabilityJudgesGetRequest", + "GetJudgesV1ObservabilityJudgesGetRequestTypedDict", "GetSignedURLResponse", "GetSignedURLResponseTypedDict", + "GetSimilarChatCompletionEventsV1ObservabilityChatCompletionEventsEventIDSimilarEventsGetRequest", + "GetSimilarChatCompletionEventsV1ObservabilityChatCompletionEventsEventIDSimilarEventsGetRequestTypedDict", "GithubRepository", - "GithubRepositoryIn", - "GithubRepositoryInTypedDict", "GithubRepositoryTypedDict", + "Guardrail", + "GuardrailConfig", + "GuardrailConfigTypedDict", + "GuardrailTypedDict", "Hyperparameters", "HyperparametersTypedDict", + "ImageContent", + "ImageContentTypedDict", "ImageDetail", "ImageGenerationTool", "ImageGenerationToolTypedDict", @@ -1321,6 +1966,16 @@ "ImageURLTypedDict", "ImageURLUnion", "ImageURLUnionTypedDict", + "ImportDatasetFromCampaignRequest", + "ImportDatasetFromCampaignRequestTypedDict", + "ImportDatasetFromDatasetRequest", + "ImportDatasetFromDatasetRequestTypedDict", + "ImportDatasetFromExplorerRequest", + "ImportDatasetFromExplorerRequestTypedDict", + "ImportDatasetFromFileRequest", + "ImportDatasetFromFileRequestTypedDict", + "ImportDatasetFromPlaygroundRequest", + "ImportDatasetFromPlaygroundRequestTypedDict", "InputEntries", "InputEntriesTypedDict", "Inputs", @@ -1364,6 +2019,27 @@ "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse", "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", + "Judge", + "JudgeChatCompletionEventRequest", + "JudgeChatCompletionEventRequestTypedDict", + "JudgeChatCompletionEventV1ObservabilityChatCompletionEventsEventIDLiveJudgingPostRequest", + "JudgeChatCompletionEventV1ObservabilityChatCompletionEventsEventIDLiveJudgingPostRequestTypedDict", + "JudgeClassificationOutput", + "JudgeClassificationOutputOption", + "JudgeClassificationOutputOptionTypedDict", + "JudgeClassificationOutputTypedDict", + "JudgeDatasetRecordRequest", + "JudgeDatasetRecordRequestTypedDict", + "JudgeDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDLiveJudgingPostRequest", + "JudgeDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDLiveJudgingPostRequestTypedDict", + "JudgeOutput", + "JudgeOutputType", + "JudgeOutputTypedDict", + "JudgeOutputUnion", + "JudgeOutputUnionTypedDict", + "JudgeRegressionOutput", + "JudgeRegressionOutputTypedDict", + "JudgeTypedDict", "LegacyJobMetadata", "LegacyJobMetadataTypedDict", "LibrariesDeleteV1Request", @@ -1402,6 +2078,18 @@ "LibraryTypedDict", "ListBatchJobsResponse", "ListBatchJobsResponseTypedDict", + "ListCampaignSelectedEventsResponse", + "ListCampaignSelectedEventsResponseTypedDict", + "ListCampaignsResponse", + "ListCampaignsResponseTypedDict", + "ListChatCompletionFieldsResponse", + "ListChatCompletionFieldsResponseTypedDict", + "ListDatasetImportTasksResponse", + "ListDatasetImportTasksResponseTypedDict", + "ListDatasetRecordsResponse", + "ListDatasetRecordsResponseTypedDict", + "ListDatasetsResponse", + "ListDatasetsResponseTypedDict", "ListDocumentsResponse", "ListDocumentsResponseTypedDict", "ListFilesResponse", @@ -1410,12 +2098,18 @@ "ListFineTuningJobsResponseData", "ListFineTuningJobsResponseDataTypedDict", "ListFineTuningJobsResponseTypedDict", + "ListJudgesResponse", + "ListJudgesResponseTypedDict", "ListLibrariesResponse", "ListLibrariesResponseTypedDict", - "ListSharingOut", - "ListSharingOutTypedDict", + "ListModelsV1ModelsGetRequest", + "ListModelsV1ModelsGetRequestTypedDict", + "ListSharingResponse", + "ListSharingResponseTypedDict", "Loc", "LocTypedDict", + "MCPServerIcon", + "MCPServerIconTypedDict", "MessageEntries", "MessageEntriesTypedDict", "MessageInputContentChunks", @@ -1434,6 +2128,8 @@ "MessageOutputEventContent", "MessageOutputEventContentTypedDict", "MessageOutputEventTypedDict", + "MessageResponse", + "MessageResponseTypedDict", "Metric", "MetricTypedDict", "MistralPromptMode", @@ -1447,6 +2143,11 @@ "ModelListData", "ModelListDataTypedDict", "ModelListTypedDict", + "ModerationLlmv1Action", + "ModerationLlmv1CategoryThresholds", + "ModerationLlmv1CategoryThresholdsTypedDict", + "ModerationLlmv1Config", + "ModerationLlmv1ConfigTypedDict", "ModerationObject", "ModerationObjectTypedDict", "ModerationResponse", @@ -1467,18 +2168,51 @@ "OCRTableObjectTypedDict", "OCRUsageInfo", "OCRUsageInfoTypedDict", + "ObservabilityErrorCode", + "ObservabilityErrorDetail", + "ObservabilityErrorDetailTypedDict", + "Op", + "Operator", + "Option", + "OptionTypedDict", + "Or", + "OrTypedDict", "OrderBy", - "Output", "OutputContentChunks", "OutputContentChunksTypedDict", - "OutputTypedDict", + "PaginatedConnectors", + "PaginatedConnectorsTypedDict", + "PaginatedResultCampaignPreview", + "PaginatedResultCampaignPreviewTypedDict", + "PaginatedResultChatCompletionEventPreview", + "PaginatedResultChatCompletionEventPreviewTypedDict", + "PaginatedResultDatasetImportTask", + "PaginatedResultDatasetImportTaskTypedDict", + "PaginatedResultDatasetPreview", + "PaginatedResultDatasetPreviewTypedDict", + "PaginatedResultDatasetRecord", + "PaginatedResultDatasetRecordTypedDict", + "PaginatedResultJudgePreview", + "PaginatedResultJudgePreviewTypedDict", "PaginationInfo", "PaginationInfoTypedDict", + "PaginationResponse", + "PaginationResponseTypedDict", + "PostDatasetRecordsFromCampaignV1ObservabilityDatasetsDatasetIDImportsFromCampaignPostRequest", + "PostDatasetRecordsFromCampaignV1ObservabilityDatasetsDatasetIDImportsFromCampaignPostRequestTypedDict", + "PostDatasetRecordsFromDatasetV1ObservabilityDatasetsDatasetIDImportsFromDatasetPostRequest", + "PostDatasetRecordsFromDatasetV1ObservabilityDatasetsDatasetIDImportsFromDatasetPostRequestTypedDict", + "PostDatasetRecordsFromExplorerV1ObservabilityDatasetsDatasetIDImportsFromExplorerPostRequest", + "PostDatasetRecordsFromExplorerV1ObservabilityDatasetsDatasetIDImportsFromExplorerPostRequestTypedDict", + "PostDatasetRecordsFromFileV1ObservabilityDatasetsDatasetIDImportsFromFilePostRequest", + "PostDatasetRecordsFromFileV1ObservabilityDatasetsDatasetIDImportsFromFilePostRequestTypedDict", + "PostDatasetRecordsFromPlaygroundV1ObservabilityDatasetsDatasetIDImportsFromPlaygroundPostRequest", + "PostDatasetRecordsFromPlaygroundV1ObservabilityDatasetsDatasetIDImportsFromPlaygroundPostRequestTypedDict", "Prediction", "PredictionTypedDict", "ProcessStatus", - "ProcessingStatusOut", - "ProcessingStatusOutTypedDict", + "ProcessingStatus", + "ProcessingStatusTypedDict", "RealtimeTranscriptionError", "RealtimeTranscriptionErrorDetail", "RealtimeTranscriptionErrorDetailMessage", @@ -1504,6 +2238,11 @@ "ReferenceChunk", "ReferenceChunkTypedDict", "RequestSource", + "Resource", + "ResourceLink", + "ResourceLinkTypedDict", + "ResourceTypedDict", + "ResourceVisibility", "Response", "ResponseDoneEvent", "ResponseDoneEventTypedDict", @@ -1524,16 +2263,25 @@ "Role", "SSETypes", "SampleType", + "SearchChatCompletionEventIdsRequest", + "SearchChatCompletionEventIdsRequestTypedDict", + "SearchChatCompletionEventIdsResponse", + "SearchChatCompletionEventIdsResponseTypedDict", + "SearchChatCompletionEventsRequest", + "SearchChatCompletionEventsRequestTypedDict", + "SearchChatCompletionEventsResponse", + "SearchChatCompletionEventsResponseTypedDict", "Security", "SecurityTypedDict", "ShareEnum", + "Sharing", "SharingDelete", "SharingDeleteTypedDict", - "SharingIn", - "SharingInTypedDict", - "SharingOut", - "SharingOutTypedDict", + "SharingRequest", + "SharingRequestTypedDict", + "SharingTypedDict", "Source", + "SupportedOperator", "SystemMessage", "SystemMessageContent", "SystemMessageContentChunks", @@ -1543,10 +2291,14 @@ "TableFormat", "TextChunk", "TextChunkTypedDict", + "TextContent", + "TextContentTypedDict", + "TextResourceContents", + "TextResourceContentsTypedDict", "ThinkChunk", - "ThinkChunkThinking", - "ThinkChunkThinkingTypedDict", "ThinkChunkTypedDict", + "Thinking", + "ThinkingTypedDict", "TimestampGranularity", "Tool", "ToolCall", @@ -1607,6 +2359,7 @@ "TranscriptionStreamSegmentDeltaTypedDict", "TranscriptionStreamTextDelta", "TranscriptionStreamTextDeltaTypedDict", + "TypeEnum", "UnarchiveModelResponse", "UnarchiveModelResponseTypedDict", "UnknownAgentTool", @@ -1616,12 +2369,14 @@ "UnknownCompletionFineTuningJobDetailsRepository", "UnknownCompletionFineTuningJobIntegration", "UnknownCompletionFineTuningJobRepository", + "UnknownConnectorToolCallResponseContent", "UnknownContentChunk", "UnknownConversationEventsData", "UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse", "UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse", "UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse", "UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse", + "UnknownJudgeOutputUnion", "UnknownListFineTuningJobsResponseData", "UnknownModelConversationTool", "UnknownModelListData", @@ -1632,8 +2387,28 @@ "UpdateAgentRequestTool", "UpdateAgentRequestToolTypedDict", "UpdateAgentRequestTypedDict", + "UpdateConnectorRequest", + "UpdateConnectorRequestTypedDict", + "UpdateDatasetRecordPayloadRequest", + "UpdateDatasetRecordPayloadRequestTypedDict", + "UpdateDatasetRecordPayloadV1ObservabilityDatasetRecordsDatasetRecordIDPayloadPutRequest", + "UpdateDatasetRecordPayloadV1ObservabilityDatasetRecordsDatasetRecordIDPayloadPutRequestTypedDict", + "UpdateDatasetRecordPropertiesRequest", + "UpdateDatasetRecordPropertiesRequestTypedDict", + "UpdateDatasetRecordPropertiesV1ObservabilityDatasetRecordsDatasetRecordIDPropertiesPutRequest", + "UpdateDatasetRecordPropertiesV1ObservabilityDatasetRecordsDatasetRecordIDPropertiesPutRequestTypedDict", + "UpdateDatasetRequest", + "UpdateDatasetRequestTypedDict", + "UpdateDatasetV1ObservabilityDatasetsDatasetIDPatchRequest", + "UpdateDatasetV1ObservabilityDatasetsDatasetIDPatchRequestTypedDict", "UpdateDocumentRequest", "UpdateDocumentRequestTypedDict", + "UpdateJudgeRequest", + "UpdateJudgeRequestOutput", + "UpdateJudgeRequestOutputTypedDict", + "UpdateJudgeRequestTypedDict", + "UpdateJudgeV1ObservabilityJudgesJudgeIDPutRequest", + "UpdateJudgeV1ObservabilityJudgesJudgeIDPutRequestTypedDict", "UpdateLibraryRequest", "UpdateLibraryRequestTypedDict", "UpdateModelRequest", @@ -1734,6 +2509,9 @@ "AgentsCompletionStreamRequestToolChoice": ".agentscompletionstreamrequest", "AgentsCompletionStreamRequestToolChoiceTypedDict": ".agentscompletionstreamrequest", "AgentsCompletionStreamRequestTypedDict": ".agentscompletionstreamrequest", + "Annotations": ".annotations", + "AnnotationsTypedDict": ".annotations", + "Audience": ".annotations", "APIEndpoint": ".apiendpoint", "ArchiveModelResponse": ".archivemodelresponse", "ArchiveModelResponseTypedDict": ".archivemodelresponse", @@ -1743,6 +2521,8 @@ "AssistantMessageTypedDict": ".assistantmessage", "AudioChunk": ".audiochunk", "AudioChunkTypedDict": ".audiochunk", + "AudioContent": ".audiocontent", + "AudioContentTypedDict": ".audiocontent", "AudioEncoding": ".audioencoding", "AudioFormat": ".audioformat", "AudioFormatTypedDict": ".audioformat", @@ -1750,8 +2530,21 @@ "AudioTranscriptionRequestTypedDict": ".audiotranscriptionrequest", "AudioTranscriptionRequestStream": ".audiotranscriptionrequeststream", "AudioTranscriptionRequestStreamTypedDict": ".audiotranscriptionrequeststream", + "AudioURL": ".audiourl", + "AudioURLTypedDict": ".audiourl", + "AudioURLChunk": ".audiourlchunk", + "AudioURLChunkTypedDict": ".audiourlchunk", + "AudioURLUnion": ".audiourlchunk", + "AudioURLUnionTypedDict": ".audiourlchunk", + "AuthData": ".authdata", + "AuthDataTypedDict": ".authdata", + "BaseFieldDefinition": ".basefielddefinition", + "BaseFieldDefinitionTypedDict": ".basefielddefinition", + "SupportedOperator": ".basefielddefinition", + "TypeEnum": ".basefielddefinition", "BaseModelCard": ".basemodelcard", "BaseModelCardTypedDict": ".basemodelcard", + "BaseTaskStatus": ".basetaskstatus", "BatchError": ".batcherror", "BatchErrorTypedDict": ".batcherror", "BatchJob": ".batchjob", @@ -1759,12 +2552,24 @@ "BatchJobStatus": ".batchjobstatus", "BatchRequest": ".batchrequest", "BatchRequestTypedDict": ".batchrequest", + "BlobResourceContents": ".blobresourcecontents", + "BlobResourceContentsTypedDict": ".blobresourcecontents", "BuiltInConnectors": ".builtinconnectors", + "Campaign": ".campaign", + "CampaignTypedDict": ".campaign", "ChatClassificationRequest": ".chatclassificationrequest", "ChatClassificationRequestTypedDict": ".chatclassificationrequest", "ChatCompletionChoice": ".chatcompletionchoice", "ChatCompletionChoiceFinishReason": ".chatcompletionchoice", "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", + "ChatCompletionEvent": ".chatcompletionevent", + "ChatCompletionEventExtraFields": ".chatcompletionevent", + "ChatCompletionEventExtraFieldsTypedDict": ".chatcompletionevent", + "ChatCompletionEventTypedDict": ".chatcompletionevent", + "ChatCompletionEventPreview": ".chatcompletioneventpreview", + "ChatCompletionEventPreviewExtraFields": ".chatcompletioneventpreview", + "ChatCompletionEventPreviewExtraFieldsTypedDict": ".chatcompletioneventpreview", + "ChatCompletionEventPreviewTypedDict": ".chatcompletioneventpreview", "ChatCompletionRequest": ".chatcompletionrequest", "ChatCompletionRequestMessage": ".chatcompletionrequest", "ChatCompletionRequestMessageTypedDict": ".chatcompletionrequest", @@ -1791,6 +2596,8 @@ "ChatModerationRequestInputs3": ".chatmoderationrequest", "ChatModerationRequestInputs3TypedDict": ".chatmoderationrequest", "ChatModerationRequestTypedDict": ".chatmoderationrequest", + "ChatTranscriptionEvent": ".chattranscriptionevent", + "ChatTranscriptionEventTypedDict": ".chattranscriptionevent", "Checkpoint": ".checkpoint", "CheckpointTypedDict": ".checkpoint", "ClassificationRequest": ".classificationrequest", @@ -1856,6 +2663,35 @@ "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", "CompletionTrainingParameters": ".completiontrainingparameters", "CompletionTrainingParametersTypedDict": ".completiontrainingparameters", + "Connector": ".connector", + "ConnectorTypedDict": ".connector", + "ConnectorCallToolV1Request": ".connector_call_tool_v1op", + "ConnectorCallToolV1RequestTypedDict": ".connector_call_tool_v1op", + "ConnectorDeleteV1Request": ".connector_delete_v1op", + "ConnectorDeleteV1RequestTypedDict": ".connector_delete_v1op", + "ConnectorGetV1Request": ".connector_get_v1op", + "ConnectorGetV1RequestTypedDict": ".connector_get_v1op", + "ConnectorListV1Request": ".connector_list_v1op", + "ConnectorListV1RequestTypedDict": ".connector_list_v1op", + "ConnectorUpdateV1Request": ".connector_update_v1op", + "ConnectorUpdateV1RequestTypedDict": ".connector_update_v1op", + "ConnectorCallToolRequest": ".connectorcalltoolrequest", + "ConnectorCallToolRequestTypedDict": ".connectorcalltoolrequest", + "ConnectorsQueryFilters": ".connectorsqueryfilters", + "ConnectorsQueryFiltersTypedDict": ".connectorsqueryfilters", + "ConnectorTool": ".connectortool", + "ConnectorToolTypedDict": ".connectortool", + "ConnectorToolCallMetadata": ".connectortoolcallmetadata", + "ConnectorToolCallMetadataTypedDict": ".connectortoolcallmetadata", + "ConnectorToolCallResponse": ".connectortoolcallresponse", + "ConnectorToolCallResponseContent": ".connectortoolcallresponse", + "ConnectorToolCallResponseContentTypedDict": ".connectortoolcallresponse", + "ConnectorToolCallResponseTypedDict": ".connectortoolcallresponse", + "UnknownConnectorToolCallResponseContent": ".connectortoolcallresponse", + "ConnectorToolLocale": ".connectortoollocale", + "ConnectorToolLocaleTypedDict": ".connectortoollocale", + "ConnectorToolResultMetadata": ".connectortoolresultmetadata", + "ConnectorToolResultMetadataTypedDict": ".connectortoolresultmetadata", "ContentChunk": ".contentchunk", "ContentChunkTypedDict": ".contentchunk", "UnknownContentChunk": ".contentchunk", @@ -1878,6 +2714,8 @@ "ConversationInputsTypedDict": ".conversationinputs", "ConversationMessages": ".conversationmessages", "ConversationMessagesTypedDict": ".conversationmessages", + "ConversationPayload": ".conversationpayload", + "ConversationPayloadTypedDict": ".conversationpayload", "ConversationRequest": ".conversationrequest", "ConversationRequestAgentVersion": ".conversationrequest", "ConversationRequestAgentVersionTypedDict": ".conversationrequest", @@ -1886,9 +2724,11 @@ "ConversationRequestToolTypedDict": ".conversationrequest", "ConversationRequestTypedDict": ".conversationrequest", "ConversationResponse": ".conversationresponse", + "ConversationResponseOutput": ".conversationresponse", + "ConversationResponseOutputTypedDict": ".conversationresponse", "ConversationResponseTypedDict": ".conversationresponse", - "Output": ".conversationresponse", - "OutputTypedDict": ".conversationresponse", + "Guardrail": ".conversationresponse", + "GuardrailTypedDict": ".conversationresponse", "ConversationRestartRequest": ".conversationrestartrequest", "ConversationRestartRequestAgentVersion": ".conversationrestartrequest", "ConversationRestartRequestAgentVersionTypedDict": ".conversationrestartrequest", @@ -1899,6 +2739,7 @@ "ConversationRestartStreamRequestAgentVersionTypedDict": ".conversationrestartstreamrequest", "ConversationRestartStreamRequestHandoffExecution": ".conversationrestartstreamrequest", "ConversationRestartStreamRequestTypedDict": ".conversationrestartstreamrequest", + "ConversationSource": ".conversationsource", "ConversationStreamRequest": ".conversationstreamrequest", "ConversationStreamRequestAgentVersion": ".conversationstreamrequest", "ConversationStreamRequestAgentVersionTypedDict": ".conversationstreamrequest", @@ -1906,18 +2747,24 @@ "ConversationStreamRequestTool": ".conversationstreamrequest", "ConversationStreamRequestToolTypedDict": ".conversationstreamrequest", "ConversationStreamRequestTypedDict": ".conversationstreamrequest", - "ConversationThinkChunk": ".conversationthinkchunk", - "ConversationThinkChunkThinking": ".conversationthinkchunk", - "ConversationThinkChunkThinkingTypedDict": ".conversationthinkchunk", - "ConversationThinkChunkTypedDict": ".conversationthinkchunk", "ConversationUsageInfo": ".conversationusageinfo", "ConversationUsageInfoTypedDict": ".conversationusageinfo", + "CreateDatasetRecordV1ObservabilityDatasetsDatasetIDRecordsPostRequest": ".create_dataset_record_v1_observability_datasets_dataset_id_records_postop", + "CreateDatasetRecordV1ObservabilityDatasetsDatasetIDRecordsPostRequestTypedDict": ".create_dataset_record_v1_observability_datasets_dataset_id_records_postop", "CreateAgentRequest": ".createagentrequest", "CreateAgentRequestTool": ".createagentrequest", "CreateAgentRequestToolTypedDict": ".createagentrequest", "CreateAgentRequestTypedDict": ".createagentrequest", "CreateBatchJobRequest": ".createbatchjobrequest", "CreateBatchJobRequestTypedDict": ".createbatchjobrequest", + "CreateCampaignRequest": ".createcampaignrequest", + "CreateCampaignRequestTypedDict": ".createcampaignrequest", + "CreateConnectorRequest": ".createconnectorrequest", + "CreateConnectorRequestTypedDict": ".createconnectorrequest", + "CreateDatasetRecordRequest": ".createdatasetrecordrequest", + "CreateDatasetRecordRequestTypedDict": ".createdatasetrecordrequest", + "CreateDatasetRequest": ".createdatasetrequest", + "CreateDatasetRequestTypedDict": ".createdatasetrequest", "CreateFileResponse": ".createfileresponse", "CreateFileResponseTypedDict": ".createfileresponse", "CreateFineTuningJobRequest": ".createfinetuningjobrequest", @@ -1928,14 +2775,38 @@ "CreateFineTuningJobRequestTypedDict": ".createfinetuningjobrequest", "Hyperparameters": ".createfinetuningjobrequest", "HyperparametersTypedDict": ".createfinetuningjobrequest", + "CreateGithubRepositoryRequest": ".creategithubrepositoryrequest", + "CreateGithubRepositoryRequestTypedDict": ".creategithubrepositoryrequest", + "CreateJudgeRequest": ".createjudgerequest", + "CreateJudgeRequestOutput": ".createjudgerequest", + "CreateJudgeRequestOutputTypedDict": ".createjudgerequest", + "CreateJudgeRequestTypedDict": ".createjudgerequest", "CreateLibraryRequest": ".createlibraryrequest", "CreateLibraryRequestTypedDict": ".createlibraryrequest", + "Dataset": ".dataset", + "DatasetTypedDict": ".dataset", + "DatasetImportTask": ".datasetimporttask", + "DatasetImportTaskTypedDict": ".datasetimporttask", + "DatasetPreview": ".datasetpreview", + "DatasetPreviewTypedDict": ".datasetpreview", + "DatasetRecord": ".datasetrecord", + "DatasetRecordTypedDict": ".datasetrecord", + "DeleteCampaignV1ObservabilityCampaignsCampaignIDDeleteRequest": ".delete_campaign_v1_observability_campaigns_campaign_id_deleteop", + "DeleteCampaignV1ObservabilityCampaignsCampaignIDDeleteRequestTypedDict": ".delete_campaign_v1_observability_campaigns_campaign_id_deleteop", + "DeleteDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDDeleteRequest": ".delete_dataset_record_v1_observability_dataset_records_dataset_record_id_deleteop", + "DeleteDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDDeleteRequestTypedDict": ".delete_dataset_record_v1_observability_dataset_records_dataset_record_id_deleteop", + "DeleteDatasetV1ObservabilityDatasetsDatasetIDDeleteRequest": ".delete_dataset_v1_observability_datasets_dataset_id_deleteop", + "DeleteDatasetV1ObservabilityDatasetsDatasetIDDeleteRequestTypedDict": ".delete_dataset_v1_observability_datasets_dataset_id_deleteop", + "DeleteJudgeV1ObservabilityJudgesJudgeIDDeleteRequest": ".delete_judge_v1_observability_judges_judge_id_deleteop", + "DeleteJudgeV1ObservabilityJudgesJudgeIDDeleteRequestTypedDict": ".delete_judge_v1_observability_judges_judge_id_deleteop", "DeleteModelV1ModelsModelIDDeleteRequest": ".delete_model_v1_models_model_id_deleteop", "DeleteModelV1ModelsModelIDDeleteRequestTypedDict": ".delete_model_v1_models_model_id_deleteop", + "DeleteDatasetRecordsRequest": ".deletedatasetrecordsrequest", + "DeleteDatasetRecordsRequestTypedDict": ".deletedatasetrecordsrequest", "DeleteFileResponse": ".deletefileresponse", "DeleteFileResponseTypedDict": ".deletefileresponse", - "DeleteModelOut": ".deletemodelout", - "DeleteModelOutTypedDict": ".deletemodelout", + "DeleteModelResponse": ".deletemodelresponse", + "DeleteModelResponseTypedDict": ".deletemodelresponse", "DeltaMessage": ".deltamessage", "DeltaMessageContent": ".deltamessage", "DeltaMessageContentTypedDict": ".deltamessage", @@ -1948,6 +2819,10 @@ "DocumentTextContentTypedDict": ".documenttextcontent", "DocumentURLChunk": ".documenturlchunk", "DocumentURLChunkTypedDict": ".documenturlchunk", + "EmbeddedResource": ".embeddedresource", + "EmbeddedResourceTypedDict": ".embeddedresource", + "Resource": ".embeddedresource", + "ResourceTypedDict": ".embeddedresource", "EmbeddingDtype": ".embeddingdtype", "EmbeddingRequest": ".embeddingrequest", "EmbeddingRequestInputs": ".embeddingrequest", @@ -1961,6 +2836,28 @@ "EntityType": ".entitytype", "Event": ".event", "EventTypedDict": ".event", + "ExecutionConfig": ".executionconfig", + "ExecutionConfigTypedDict": ".executionconfig", + "ExportDatasetToJsonlV1ObservabilityDatasetsDatasetIDExportsToJsonlGetRequest": ".export_dataset_to_jsonl_v1_observability_datasets_dataset_id_exports_to_jsonl_getop", + "ExportDatasetToJsonlV1ObservabilityDatasetsDatasetIDExportsToJsonlGetRequestTypedDict": ".export_dataset_to_jsonl_v1_observability_datasets_dataset_id_exports_to_jsonl_getop", + "ExportDatasetResponse": ".exportdatasetresponse", + "ExportDatasetResponseTypedDict": ".exportdatasetresponse", + "FeedResultChatCompletionEventPreview": ".feedresultchatcompletioneventpreview", + "FeedResultChatCompletionEventPreviewTypedDict": ".feedresultchatcompletioneventpreview", + "FetchCampaignStatusResponse": ".fetchcampaignstatusresponse", + "FetchCampaignStatusResponseTypedDict": ".fetchcampaignstatusresponse", + "FetchChatCompletionFieldOptionsResponse": ".fetchchatcompletionfieldoptionsresponse", + "FetchChatCompletionFieldOptionsResponseTypedDict": ".fetchchatcompletionfieldoptionsresponse", + "Option": ".fetchchatcompletionfieldoptionsresponse", + "OptionTypedDict": ".fetchchatcompletionfieldoptionsresponse", + "FetchFieldOptionCountsRequest": ".fetchfieldoptioncountsrequest", + "FetchFieldOptionCountsRequestTypedDict": ".fetchfieldoptioncountsrequest", + "FetchFieldOptionCountsResponse": ".fetchfieldoptioncountsresponse", + "FetchFieldOptionCountsResponseTypedDict": ".fetchfieldoptioncountsresponse", + "FieldGroup": ".fieldgroup", + "FieldGroupTypedDict": ".fieldgroup", + "FieldOptionCountItem": ".fieldoptioncountitem", + "FieldOptionCountItemTypedDict": ".fieldoptioncountitem", "File": ".file", "FileTypedDict": ".file", "FileChunk": ".filechunk", @@ -1976,10 +2873,19 @@ "FilesAPIRoutesListFilesRequestTypedDict": ".files_api_routes_list_filesop", "FilesAPIRoutesRetrieveFileRequest": ".files_api_routes_retrieve_fileop", "FilesAPIRoutesRetrieveFileRequestTypedDict": ".files_api_routes_retrieve_fileop", + "FilesAPIRoutesUploadFileFileVisibility": ".files_api_routes_upload_fileop", "MultiPartBodyParams": ".files_api_routes_upload_fileop", "MultiPartBodyParamsTypedDict": ".files_api_routes_upload_fileop", "FileSchema": ".fileschema", "FileSchemaTypedDict": ".fileschema", + "FileVisibility": ".filevisibility", + "FilterCondition": ".filtercondition", + "FilterConditionTypedDict": ".filtercondition", + "Op": ".filtercondition", + "FilterPayload": ".filterpayload", + "FilterPayloadTypedDict": ".filterpayload", + "Filters": ".filterpayload", + "FiltersTypedDict": ".filterpayload", "FIMCompletionRequest": ".fimcompletionrequest", "FIMCompletionRequestStop": ".fimcompletionrequest", "FIMCompletionRequestStopTypedDict": ".fimcompletionrequest", @@ -2016,14 +2922,51 @@ "FunctionResultEntryTypedDict": ".functionresultentry", "FunctionTool": ".functiontool", "FunctionToolTypedDict": ".functiontool", + "GetCampaignByIDV1ObservabilityCampaignsCampaignIDGetRequest": ".get_campaign_by_id_v1_observability_campaigns_campaign_id_getop", + "GetCampaignByIDV1ObservabilityCampaignsCampaignIDGetRequestTypedDict": ".get_campaign_by_id_v1_observability_campaigns_campaign_id_getop", + "GetCampaignSelectedEventsV1ObservabilityCampaignsCampaignIDSelectedEventsGetRequest": ".get_campaign_selected_events_v1_observability_campaigns_campaign_id_selected_events_getop", + "GetCampaignSelectedEventsV1ObservabilityCampaignsCampaignIDSelectedEventsGetRequestTypedDict": ".get_campaign_selected_events_v1_observability_campaigns_campaign_id_selected_events_getop", + "GetCampaignStatusByIDV1ObservabilityCampaignsCampaignIDStatusGetRequest": ".get_campaign_status_by_id_v1_observability_campaigns_campaign_id_status_getop", + "GetCampaignStatusByIDV1ObservabilityCampaignsCampaignIDStatusGetRequestTypedDict": ".get_campaign_status_by_id_v1_observability_campaigns_campaign_id_status_getop", + "GetCampaignsV1ObservabilityCampaignsGetRequest": ".get_campaigns_v1_observability_campaigns_getop", + "GetCampaignsV1ObservabilityCampaignsGetRequestTypedDict": ".get_campaigns_v1_observability_campaigns_getop", + "GetChatCompletionEventV1ObservabilityChatCompletionEventsEventIDGetRequest": ".get_chat_completion_event_v1_observability_chat_completion_events_event_id_getop", + "GetChatCompletionEventV1ObservabilityChatCompletionEventsEventIDGetRequestTypedDict": ".get_chat_completion_event_v1_observability_chat_completion_events_event_id_getop", + "GetChatCompletionEventsV1ObservabilityChatCompletionEventsSearchPostRequest": ".get_chat_completion_events_v1_observability_chat_completion_events_search_postop", + "GetChatCompletionEventsV1ObservabilityChatCompletionEventsSearchPostRequestTypedDict": ".get_chat_completion_events_v1_observability_chat_completion_events_search_postop", + "GetChatCompletionFieldOptionsCountsV1ObservabilityChatCompletionFieldsFieldNameOptionsCountsPostRequest": ".get_chat_completion_field_options_counts_v1_observability_chat_completion_fields_field_name_options_counts_postop", + "GetChatCompletionFieldOptionsCountsV1ObservabilityChatCompletionFieldsFieldNameOptionsCountsPostRequestTypedDict": ".get_chat_completion_field_options_counts_v1_observability_chat_completion_fields_field_name_options_counts_postop", + "GetChatCompletionFieldOptionsV1ObservabilityChatCompletionFieldsFieldNameOptionsGetRequest": ".get_chat_completion_field_options_v1_observability_chat_completion_fields_field_name_options_getop", + "GetChatCompletionFieldOptionsV1ObservabilityChatCompletionFieldsFieldNameOptionsGetRequestTypedDict": ".get_chat_completion_field_options_v1_observability_chat_completion_fields_field_name_options_getop", + "Operator": ".get_chat_completion_field_options_v1_observability_chat_completion_fields_field_name_options_getop", + "GetDatasetByIDV1ObservabilityDatasetsDatasetIDGetRequest": ".get_dataset_by_id_v1_observability_datasets_dataset_id_getop", + "GetDatasetByIDV1ObservabilityDatasetsDatasetIDGetRequestTypedDict": ".get_dataset_by_id_v1_observability_datasets_dataset_id_getop", + "GetDatasetImportTaskV1ObservabilityDatasetsDatasetIDTasksTaskIDGetRequest": ".get_dataset_import_task_v1_observability_datasets_dataset_id_tasks_task_id_getop", + "GetDatasetImportTaskV1ObservabilityDatasetsDatasetIDTasksTaskIDGetRequestTypedDict": ".get_dataset_import_task_v1_observability_datasets_dataset_id_tasks_task_id_getop", + "GetDatasetImportTasksV1ObservabilityDatasetsDatasetIDTasksGetRequest": ".get_dataset_import_tasks_v1_observability_datasets_dataset_id_tasks_getop", + "GetDatasetImportTasksV1ObservabilityDatasetsDatasetIDTasksGetRequestTypedDict": ".get_dataset_import_tasks_v1_observability_datasets_dataset_id_tasks_getop", + "GetDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDGetRequest": ".get_dataset_record_v1_observability_dataset_records_dataset_record_id_getop", + "GetDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDGetRequestTypedDict": ".get_dataset_record_v1_observability_dataset_records_dataset_record_id_getop", + "GetDatasetRecordsV1ObservabilityDatasetsDatasetIDRecordsGetRequest": ".get_dataset_records_v1_observability_datasets_dataset_id_records_getop", + "GetDatasetRecordsV1ObservabilityDatasetsDatasetIDRecordsGetRequestTypedDict": ".get_dataset_records_v1_observability_datasets_dataset_id_records_getop", + "GetDatasetsV1ObservabilityDatasetsGetRequest": ".get_datasets_v1_observability_datasets_getop", + "GetDatasetsV1ObservabilityDatasetsGetRequestTypedDict": ".get_datasets_v1_observability_datasets_getop", + "GetJudgeByIDV1ObservabilityJudgesJudgeIDGetRequest": ".get_judge_by_id_v1_observability_judges_judge_id_getop", + "GetJudgeByIDV1ObservabilityJudgesJudgeIDGetRequestTypedDict": ".get_judge_by_id_v1_observability_judges_judge_id_getop", + "GetJudgesV1ObservabilityJudgesGetRequest": ".get_judges_v1_observability_judges_getop", + "GetJudgesV1ObservabilityJudgesGetRequestTypedDict": ".get_judges_v1_observability_judges_getop", + "GetSimilarChatCompletionEventsV1ObservabilityChatCompletionEventsEventIDSimilarEventsGetRequest": ".get_similar_chat_completion_events_v1_observability_chat_completion_events_event_id_similar_events_getop", + "GetSimilarChatCompletionEventsV1ObservabilityChatCompletionEventsEventIDSimilarEventsGetRequestTypedDict": ".get_similar_chat_completion_events_v1_observability_chat_completion_events_event_id_similar_events_getop", "GetFileResponse": ".getfileresponse", "GetFileResponseTypedDict": ".getfileresponse", "GetSignedURLResponse": ".getsignedurlresponse", "GetSignedURLResponseTypedDict": ".getsignedurlresponse", "GithubRepository": ".githubrepository", "GithubRepositoryTypedDict": ".githubrepository", - "GithubRepositoryIn": ".githubrepositoryin", - "GithubRepositoryInTypedDict": ".githubrepositoryin", + "GuardrailConfig": ".guardrailconfig", + "GuardrailConfigTypedDict": ".guardrailconfig", + "ImageContent": ".imagecontent", + "ImageContentTypedDict": ".imagecontent", "ImageDetail": ".imagedetail", "ImageGenerationTool": ".imagegenerationtool", "ImageGenerationToolTypedDict": ".imagegenerationtool", @@ -2033,6 +2976,16 @@ "ImageURLChunkTypedDict": ".imageurlchunk", "ImageURLUnion": ".imageurlchunk", "ImageURLUnionTypedDict": ".imageurlchunk", + "ImportDatasetFromCampaignRequest": ".importdatasetfromcampaignrequest", + "ImportDatasetFromCampaignRequestTypedDict": ".importdatasetfromcampaignrequest", + "ImportDatasetFromDatasetRequest": ".importdatasetfromdatasetrequest", + "ImportDatasetFromDatasetRequestTypedDict": ".importdatasetfromdatasetrequest", + "ImportDatasetFromExplorerRequest": ".importdatasetfromexplorerrequest", + "ImportDatasetFromExplorerRequestTypedDict": ".importdatasetfromexplorerrequest", + "ImportDatasetFromFileRequest": ".importdatasetfromfilerequest", + "ImportDatasetFromFileRequestTypedDict": ".importdatasetfromfilerequest", + "ImportDatasetFromPlaygroundRequest": ".importdatasetfromplaygroundrequest", + "ImportDatasetFromPlaygroundRequestTypedDict": ".importdatasetfromplaygroundrequest", "InputEntries": ".inputentries", "InputEntriesTypedDict": ".inputentries", "Inputs": ".inputs", @@ -2084,6 +3037,30 @@ "UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", "JSONSchema": ".jsonschema", "JSONSchemaTypedDict": ".jsonschema", + "Judge": ".judge", + "JudgeOutputUnion": ".judge", + "JudgeOutputUnionTypedDict": ".judge", + "JudgeTypedDict": ".judge", + "UnknownJudgeOutputUnion": ".judge", + "JudgeChatCompletionEventV1ObservabilityChatCompletionEventsEventIDLiveJudgingPostRequest": ".judge_chat_completion_event_v1_observability_chat_completion_events_event_id_live_judging_postop", + "JudgeChatCompletionEventV1ObservabilityChatCompletionEventsEventIDLiveJudgingPostRequestTypedDict": ".judge_chat_completion_event_v1_observability_chat_completion_events_event_id_live_judging_postop", + "JudgeDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDLiveJudgingPostRequest": ".judge_dataset_record_v1_observability_dataset_records_dataset_record_id_live_judging_postop", + "JudgeDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDLiveJudgingPostRequestTypedDict": ".judge_dataset_record_v1_observability_dataset_records_dataset_record_id_live_judging_postop", + "JudgeChatCompletionEventRequest": ".judgechatcompletioneventrequest", + "JudgeChatCompletionEventRequestTypedDict": ".judgechatcompletioneventrequest", + "JudgeClassificationOutput": ".judgeclassificationoutput", + "JudgeClassificationOutputTypedDict": ".judgeclassificationoutput", + "JudgeClassificationOutputOption": ".judgeclassificationoutputoption", + "JudgeClassificationOutputOptionTypedDict": ".judgeclassificationoutputoption", + "JudgeDatasetRecordRequest": ".judgedatasetrecordrequest", + "JudgeDatasetRecordRequestTypedDict": ".judgedatasetrecordrequest", + "Answer": ".judgeoutput", + "AnswerTypedDict": ".judgeoutput", + "JudgeOutput": ".judgeoutput", + "JudgeOutputTypedDict": ".judgeoutput", + "JudgeOutputType": ".judgeoutputtype", + "JudgeRegressionOutput": ".judgeregressionoutput", + "JudgeRegressionOutputTypedDict": ".judgeregressionoutput", "LegacyJobMetadata": ".legacyjobmetadata", "LegacyJobMetadataTypedDict": ".legacyjobmetadata", "LibrariesDeleteV1Request": ".libraries_delete_v1op", @@ -2122,8 +3099,22 @@ "LibrariesUpdateV1RequestTypedDict": ".libraries_update_v1op", "Library": ".library", "LibraryTypedDict": ".library", + "ListModelsV1ModelsGetRequest": ".list_models_v1_models_getop", + "ListModelsV1ModelsGetRequestTypedDict": ".list_models_v1_models_getop", "ListBatchJobsResponse": ".listbatchjobsresponse", "ListBatchJobsResponseTypedDict": ".listbatchjobsresponse", + "ListCampaignSelectedEventsResponse": ".listcampaignselectedeventsresponse", + "ListCampaignSelectedEventsResponseTypedDict": ".listcampaignselectedeventsresponse", + "ListCampaignsResponse": ".listcampaignsresponse", + "ListCampaignsResponseTypedDict": ".listcampaignsresponse", + "ListChatCompletionFieldsResponse": ".listchatcompletionfieldsresponse", + "ListChatCompletionFieldsResponseTypedDict": ".listchatcompletionfieldsresponse", + "ListDatasetImportTasksResponse": ".listdatasetimporttasksresponse", + "ListDatasetImportTasksResponseTypedDict": ".listdatasetimporttasksresponse", + "ListDatasetRecordsResponse": ".listdatasetrecordsresponse", + "ListDatasetRecordsResponseTypedDict": ".listdatasetrecordsresponse", + "ListDatasetsResponse": ".listdatasetsresponse", + "ListDatasetsResponseTypedDict": ".listdatasetsresponse", "ListDocumentsResponse": ".listdocumentsresponse", "ListDocumentsResponseTypedDict": ".listdocumentsresponse", "ListFilesResponse": ".listfilesresponse", @@ -2133,10 +3124,14 @@ "ListFineTuningJobsResponseDataTypedDict": ".listfinetuningjobsresponse", "ListFineTuningJobsResponseTypedDict": ".listfinetuningjobsresponse", "UnknownListFineTuningJobsResponseData": ".listfinetuningjobsresponse", + "ListJudgesResponse": ".listjudgesresponse", + "ListJudgesResponseTypedDict": ".listjudgesresponse", "ListLibrariesResponse": ".listlibrariesresponse", "ListLibrariesResponseTypedDict": ".listlibrariesresponse", - "ListSharingOut": ".listsharingout", - "ListSharingOutTypedDict": ".listsharingout", + "ListSharingResponse": ".listsharingresponse", + "ListSharingResponseTypedDict": ".listsharingresponse", + "MCPServerIcon": ".mcpservericon", + "MCPServerIconTypedDict": ".mcpservericon", "MessageEntries": ".messageentries", "MessageEntriesTypedDict": ".messageentries", "MessageInputContentChunks": ".messageinputcontentchunks", @@ -2156,6 +3151,8 @@ "MessageOutputEventContent": ".messageoutputevent", "MessageOutputEventContentTypedDict": ".messageoutputevent", "MessageOutputEventTypedDict": ".messageoutputevent", + "MessageResponse": ".messageresponse", + "MessageResponseTypedDict": ".messageresponse", "Metric": ".metric", "MetricTypedDict": ".metric", "MistralPromptMode": ".mistralpromptmode", @@ -2171,10 +3168,18 @@ "ModelListDataTypedDict": ".modellist", "ModelListTypedDict": ".modellist", "UnknownModelListData": ".modellist", + "ModerationLlmv1Action": ".moderationllmv1action", + "ModerationLlmv1CategoryThresholds": ".moderationllmv1categorythresholds", + "ModerationLlmv1CategoryThresholdsTypedDict": ".moderationllmv1categorythresholds", + "ModerationLlmv1Config": ".moderationllmv1config", + "ModerationLlmv1ConfigTypedDict": ".moderationllmv1config", "ModerationObject": ".moderationobject", "ModerationObjectTypedDict": ".moderationobject", "ModerationResponse": ".moderationresponse", "ModerationResponseTypedDict": ".moderationresponse", + "ObservabilityErrorCode": ".observabilityerrorcode", + "ObservabilityErrorDetail": ".observabilityerrordetail", + "ObservabilityErrorDetailTypedDict": ".observabilityerrordetail", "OCRImageObject": ".ocrimageobject", "OCRImageObjectTypedDict": ".ocrimageobject", "OCRPageDimensions": ".ocrpagedimensions", @@ -2195,12 +3200,38 @@ "OCRUsageInfoTypedDict": ".ocrusageinfo", "OutputContentChunks": ".outputcontentchunks", "OutputContentChunksTypedDict": ".outputcontentchunks", + "PaginatedConnectors": ".paginatedconnectors", + "PaginatedConnectorsTypedDict": ".paginatedconnectors", + "PaginatedResultCampaignPreview": ".paginatedresultcampaignpreview", + "PaginatedResultCampaignPreviewTypedDict": ".paginatedresultcampaignpreview", + "PaginatedResultChatCompletionEventPreview": ".paginatedresultchatcompletioneventpreview", + "PaginatedResultChatCompletionEventPreviewTypedDict": ".paginatedresultchatcompletioneventpreview", + "PaginatedResultDatasetImportTask": ".paginatedresultdatasetimporttask", + "PaginatedResultDatasetImportTaskTypedDict": ".paginatedresultdatasetimporttask", + "PaginatedResultDatasetPreview": ".paginatedresultdatasetpreview", + "PaginatedResultDatasetPreviewTypedDict": ".paginatedresultdatasetpreview", + "PaginatedResultDatasetRecord": ".paginatedresultdatasetrecord", + "PaginatedResultDatasetRecordTypedDict": ".paginatedresultdatasetrecord", + "PaginatedResultJudgePreview": ".paginatedresultjudgepreview", + "PaginatedResultJudgePreviewTypedDict": ".paginatedresultjudgepreview", "PaginationInfo": ".paginationinfo", "PaginationInfoTypedDict": ".paginationinfo", + "PaginationResponse": ".paginationresponse", + "PaginationResponseTypedDict": ".paginationresponse", + "PostDatasetRecordsFromCampaignV1ObservabilityDatasetsDatasetIDImportsFromCampaignPostRequest": ".post_dataset_records_from_campaign_v1_observability_datasets_dataset_id_imports_from_campaign_postop", + "PostDatasetRecordsFromCampaignV1ObservabilityDatasetsDatasetIDImportsFromCampaignPostRequestTypedDict": ".post_dataset_records_from_campaign_v1_observability_datasets_dataset_id_imports_from_campaign_postop", + "PostDatasetRecordsFromDatasetV1ObservabilityDatasetsDatasetIDImportsFromDatasetPostRequest": ".post_dataset_records_from_dataset_v1_observability_datasets_dataset_id_imports_from_dataset_postop", + "PostDatasetRecordsFromDatasetV1ObservabilityDatasetsDatasetIDImportsFromDatasetPostRequestTypedDict": ".post_dataset_records_from_dataset_v1_observability_datasets_dataset_id_imports_from_dataset_postop", + "PostDatasetRecordsFromExplorerV1ObservabilityDatasetsDatasetIDImportsFromExplorerPostRequest": ".post_dataset_records_from_explorer_v1_observability_datasets_dataset_id_imports_from_explorer_postop", + "PostDatasetRecordsFromExplorerV1ObservabilityDatasetsDatasetIDImportsFromExplorerPostRequestTypedDict": ".post_dataset_records_from_explorer_v1_observability_datasets_dataset_id_imports_from_explorer_postop", + "PostDatasetRecordsFromFileV1ObservabilityDatasetsDatasetIDImportsFromFilePostRequest": ".post_dataset_records_from_file_v1_observability_datasets_dataset_id_imports_from_file_postop", + "PostDatasetRecordsFromFileV1ObservabilityDatasetsDatasetIDImportsFromFilePostRequestTypedDict": ".post_dataset_records_from_file_v1_observability_datasets_dataset_id_imports_from_file_postop", + "PostDatasetRecordsFromPlaygroundV1ObservabilityDatasetsDatasetIDImportsFromPlaygroundPostRequest": ".post_dataset_records_from_playground_v1_observability_datasets_dataset_id_imports_from_playground_postop", + "PostDatasetRecordsFromPlaygroundV1ObservabilityDatasetsDatasetIDImportsFromPlaygroundPostRequestTypedDict": ".post_dataset_records_from_playground_v1_observability_datasets_dataset_id_imports_from_playground_postop", "Prediction": ".prediction", "PredictionTypedDict": ".prediction", - "ProcessingStatusOut": ".processingstatusout", - "ProcessingStatusOutTypedDict": ".processingstatusout", + "ProcessingStatus": ".processingstatus", + "ProcessingStatusTypedDict": ".processingstatus", "ProcessStatus": ".processstatus", "RealtimeTranscriptionError": ".realtimetranscriptionerror", "RealtimeTranscriptionErrorTypedDict": ".realtimetranscriptionerror", @@ -2227,6 +3258,9 @@ "ReferenceChunk": ".referencechunk", "ReferenceChunkTypedDict": ".referencechunk", "RequestSource": ".requestsource", + "ResourceLink": ".resourcelink", + "ResourceLinkTypedDict": ".resourcelink", + "ResourceVisibility": ".resourcevisibility", "ResponseDoneEvent": ".responsedoneevent", "ResponseDoneEventTypedDict": ".responsedoneevent", "ResponseErrorEvent": ".responseerrorevent", @@ -2242,15 +3276,23 @@ "RetrieveModelV1ModelsModelIDGetRequestTypedDict": ".retrieve_model_v1_models_model_id_getop", "UnknownResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", "SampleType": ".sampletype", + "SearchChatCompletionEventIdsRequest": ".searchchatcompletioneventidsrequest", + "SearchChatCompletionEventIdsRequestTypedDict": ".searchchatcompletioneventidsrequest", + "SearchChatCompletionEventIdsResponse": ".searchchatcompletioneventidsresponse", + "SearchChatCompletionEventIdsResponseTypedDict": ".searchchatcompletioneventidsresponse", + "SearchChatCompletionEventsRequest": ".searchchatcompletioneventsrequest", + "SearchChatCompletionEventsRequestTypedDict": ".searchchatcompletioneventsrequest", + "SearchChatCompletionEventsResponse": ".searchchatcompletioneventsresponse", + "SearchChatCompletionEventsResponseTypedDict": ".searchchatcompletioneventsresponse", "Security": ".security", "SecurityTypedDict": ".security", "ShareEnum": ".shareenum", + "Sharing": ".sharing", + "SharingTypedDict": ".sharing", "SharingDelete": ".sharingdelete", "SharingDeleteTypedDict": ".sharingdelete", - "SharingIn": ".sharingin", - "SharingInTypedDict": ".sharingin", - "SharingOut": ".sharingout", - "SharingOutTypedDict": ".sharingout", + "SharingRequest": ".sharingrequest", + "SharingRequestTypedDict": ".sharingrequest", "Source": ".source", "SSETypes": ".ssetypes", "SystemMessage": ".systemmessage", @@ -2261,10 +3303,14 @@ "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", "TextChunk": ".textchunk", "TextChunkTypedDict": ".textchunk", + "TextContent": ".textcontent", + "TextContentTypedDict": ".textcontent", + "TextResourceContents": ".textresourcecontents", + "TextResourceContentsTypedDict": ".textresourcecontents", "ThinkChunk": ".thinkchunk", - "ThinkChunkThinking": ".thinkchunk", - "ThinkChunkThinkingTypedDict": ".thinkchunk", "ThinkChunkTypedDict": ".thinkchunk", + "Thinking": ".thinkchunk", + "ThinkingTypedDict": ".thinkchunk", "TimestampGranularity": ".timestampgranularity", "Tool": ".tool", "ToolTypedDict": ".tool", @@ -2329,14 +3375,34 @@ "TranscriptionStreamTextDeltaTypedDict": ".transcriptionstreamtextdelta", "UnarchiveModelResponse": ".unarchivemodelresponse", "UnarchiveModelResponseTypedDict": ".unarchivemodelresponse", + "UpdateDatasetRecordPayloadV1ObservabilityDatasetRecordsDatasetRecordIDPayloadPutRequest": ".update_dataset_record_payload_v1_observability_dataset_records_dataset_record_id_payload_putop", + "UpdateDatasetRecordPayloadV1ObservabilityDatasetRecordsDatasetRecordIDPayloadPutRequestTypedDict": ".update_dataset_record_payload_v1_observability_dataset_records_dataset_record_id_payload_putop", + "UpdateDatasetRecordPropertiesV1ObservabilityDatasetRecordsDatasetRecordIDPropertiesPutRequest": ".update_dataset_record_properties_v1_observability_dataset_records_dataset_record_id_properties_putop", + "UpdateDatasetRecordPropertiesV1ObservabilityDatasetRecordsDatasetRecordIDPropertiesPutRequestTypedDict": ".update_dataset_record_properties_v1_observability_dataset_records_dataset_record_id_properties_putop", + "UpdateDatasetV1ObservabilityDatasetsDatasetIDPatchRequest": ".update_dataset_v1_observability_datasets_dataset_id_patchop", + "UpdateDatasetV1ObservabilityDatasetsDatasetIDPatchRequestTypedDict": ".update_dataset_v1_observability_datasets_dataset_id_patchop", + "UpdateJudgeV1ObservabilityJudgesJudgeIDPutRequest": ".update_judge_v1_observability_judges_judge_id_putop", + "UpdateJudgeV1ObservabilityJudgesJudgeIDPutRequestTypedDict": ".update_judge_v1_observability_judges_judge_id_putop", "UpdateAgentRequest": ".updateagentrequest", "UpdateAgentRequestTool": ".updateagentrequest", "UpdateAgentRequestToolTypedDict": ".updateagentrequest", "UpdateAgentRequestTypedDict": ".updateagentrequest", + "UpdateConnectorRequest": ".updateconnectorrequest", + "UpdateConnectorRequestTypedDict": ".updateconnectorrequest", + "UpdateDatasetRecordPayloadRequest": ".updatedatasetrecordpayloadrequest", + "UpdateDatasetRecordPayloadRequestTypedDict": ".updatedatasetrecordpayloadrequest", + "UpdateDatasetRecordPropertiesRequest": ".updatedatasetrecordpropertiesrequest", + "UpdateDatasetRecordPropertiesRequestTypedDict": ".updatedatasetrecordpropertiesrequest", + "UpdateDatasetRequest": ".updatedatasetrequest", + "UpdateDatasetRequestTypedDict": ".updatedatasetrequest", "Attributes": ".updatedocumentrequest", "AttributesTypedDict": ".updatedocumentrequest", "UpdateDocumentRequest": ".updatedocumentrequest", "UpdateDocumentRequestTypedDict": ".updatedocumentrequest", + "UpdateJudgeRequest": ".updatejudgerequest", + "UpdateJudgeRequestOutput": ".updatejudgerequest", + "UpdateJudgeRequestOutputTypedDict": ".updatejudgerequest", + "UpdateJudgeRequestTypedDict": ".updatejudgerequest", "UpdateLibraryRequest": ".updatelibraryrequest", "UpdateLibraryRequestTypedDict": ".updatelibraryrequest", "UpdateModelRequest": ".updatemodelrequest", @@ -2347,6 +3413,8 @@ "UserMessageContent": ".usermessage", "UserMessageContentTypedDict": ".usermessage", "UserMessageTypedDict": ".usermessage", + "Context": ".validationerror", + "ContextTypedDict": ".validationerror", "Loc": ".validationerror", "LocTypedDict": ".validationerror", "ValidationError": ".validationerror", diff --git a/src/mistralai/client/models/agent.py b/src/mistralai/client/models/agent.py index 686a6eb8..1fa27f08 100644 --- a/src/mistralai/client/models/agent.py +++ b/src/mistralai/client/models/agent.py @@ -6,6 +6,7 @@ from .completionargs import CompletionArgs, CompletionArgsTypedDict from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict from .functiontool import FunctionTool, FunctionToolTypedDict +from .guardrailconfig import GuardrailConfig, GuardrailConfigTypedDict from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict from .websearchtool import WebSearchTool, WebSearchToolTypedDict @@ -98,6 +99,7 @@ class AgentTypedDict(TypedDict): r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" + guardrails: NotRequired[Nullable[List[GuardrailConfigTypedDict]]] description: NotRequired[Nullable[str]] handoffs: NotRequired[Nullable[List[str]]] metadata: NotRequired[Nullable[Dict[str, Any]]] @@ -133,6 +135,8 @@ class Agent(BaseModel): completion_args: Optional[CompletionArgs] = None r"""White-listed arguments from the completion API""" + guardrails: OptionalNullable[List[GuardrailConfig]] = UNSET + description: OptionalNullable[str] = UNSET handoffs: OptionalNullable[List[str]] = UNSET @@ -153,6 +157,7 @@ def serialize_model(self, handler): "instructions", "tools", "completion_args", + "guardrails", "description", "handoffs", "metadata", @@ -161,7 +166,14 @@ def serialize_model(self, handler): ] ) nullable_fields = set( - ["instructions", "description", "handoffs", "metadata", "version_message"] + [ + "instructions", + "guardrails", + "description", + "handoffs", + "metadata", + "version_message", + ] ) serialized = handler(self) m = {} diff --git a/src/mistralai/client/models/annotations.py b/src/mistralai/client/models/annotations.py new file mode 100644 index 00000000..ba085e2c --- /dev/null +++ b/src/mistralai/client/models/annotations.py @@ -0,0 +1,77 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3ae9e07de11d + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, List, Literal, Union +from typing_extensions import NotRequired, TypedDict + + +Audience = Union[ + Literal[ + "user", + "assistant", + ], + UnrecognizedStr, +] + + +class AnnotationsTypedDict(TypedDict): + audience: NotRequired[Nullable[List[Audience]]] + priority: NotRequired[Nullable[float]] + + +class Annotations(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + audience: OptionalNullable[List[Audience]] = UNSET + + priority: OptionalNullable[float] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["audience", "priority"]) + nullable_fields = set(["audience", "priority"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/client/models/audiocontent.py b/src/mistralai/client/models/audiocontent.py new file mode 100644 index 00000000..b0a9505b --- /dev/null +++ b/src/mistralai/client/models/audiocontent.py @@ -0,0 +1,94 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8e39736e73f0 + +from __future__ import annotations +from .annotations import Annotations, AnnotationsTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AudioContentTypedDict(TypedDict): + r"""Audio content for a message.""" + + data: str + mime_type: str + type: Literal["audio"] + annotations: NotRequired[Nullable[AnnotationsTypedDict]] + meta: NotRequired[Nullable[Dict[str, Any]]] + + +class AudioContent(BaseModel): + r"""Audio content for a message.""" + + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + data: str + + mime_type: Annotated[str, pydantic.Field(alias="mimeType")] + + type: Annotated[ + Annotated[Literal["audio"], AfterValidator(validate_const("audio"))], + pydantic.Field(alias="type"), + ] = "audio" + + annotations: OptionalNullable[Annotations] = UNSET + + meta: Annotated[OptionalNullable[Dict[str, Any]], pydantic.Field(alias="_meta")] = ( + UNSET + ) + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["annotations", "_meta"]) + nullable_fields = set(["annotations", "_meta"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m + + +try: + AudioContent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/audiourl.py b/src/mistralai/client/models/audiourl.py new file mode 100644 index 00000000..98fef977 --- /dev/null +++ b/src/mistralai/client/models/audiourl.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f62d73a27faa + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class AudioURLTypedDict(TypedDict): + r"""Audio URL. + + Attributes: + url: The URL of the audio file. + """ + + url: str + + +class AudioURL(BaseModel): + r"""Audio URL. + + Attributes: + url: The URL of the audio file. + """ + + url: str diff --git a/src/mistralai/client/models/audiourlchunk.py b/src/mistralai/client/models/audiourlchunk.py new file mode 100644 index 00000000..89ebce79 --- /dev/null +++ b/src/mistralai/client/models/audiourlchunk.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f582b26db67b + +from __future__ import annotations +from .audiourl import AudioURL, AudioURLTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +AudioURLUnionTypedDict = TypeAliasType( + "AudioURLUnionTypedDict", Union[AudioURLTypedDict, str] +) + + +AudioURLUnion = TypeAliasType("AudioURLUnion", Union[AudioURL, str]) + + +class AudioURLChunkTypedDict(TypedDict): + r"""Audio URL chunk. + + Attributes: + type: The type of the chunk, which is always `ChunkTypes.audio_url`. + audio_url: The URL of the audio file. + """ + + audio_url: AudioURLUnionTypedDict + type: Literal["audio_url"] + + +class AudioURLChunk(BaseModel): + r"""Audio URL chunk. + + Attributes: + type: The type of the chunk, which is always `ChunkTypes.audio_url`. + audio_url: The URL of the audio file. + """ + + audio_url: AudioURLUnion + + type: Annotated[ + Annotated[Literal["audio_url"], AfterValidator(validate_const("audio_url"))], + pydantic.Field(alias="type"), + ] = "audio_url" + + +try: + AudioURLChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/authdata.py b/src/mistralai/client/models/authdata.py new file mode 100644 index 00000000..fb8b7972 --- /dev/null +++ b/src/mistralai/client/models/authdata.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b4d3fb07196e + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class AuthDataTypedDict(TypedDict): + client_id: str + client_secret: str + + +class AuthData(BaseModel): + client_id: str + + client_secret: str diff --git a/src/mistralai/client/models/basefielddefinition.py b/src/mistralai/client/models/basefielddefinition.py new file mode 100644 index 00000000..f8b8faf7 --- /dev/null +++ b/src/mistralai/client/models/basefielddefinition.py @@ -0,0 +1,100 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ffa42818fea3 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) +from pydantic import model_serializer +from typing import List, Literal, Union +from typing_extensions import NotRequired, TypedDict + + +TypeEnum = Union[ + Literal[ + "ENUM", + "TEXT", + "INT", + "FLOAT", + "BOOL", + "TIMESTAMP", + "ARRAY", + ], + UnrecognizedStr, +] + + +SupportedOperator = Union[ + Literal[ + "lt", + "lte", + "gt", + "gte", + "startswith", + "istartswith", + "endswith", + "iendswith", + "contains", + "icontains", + "matches", + "notcontains", + "inotcontains", + "eq", + "neq", + "isnull", + "includes", + "excludes", + "len_eq", + ], + UnrecognizedStr, +] + + +class BaseFieldDefinitionTypedDict(TypedDict): + name: str + label: str + type: TypeEnum + supported_operators: List[SupportedOperator] + group: NotRequired[Nullable[str]] + + +class BaseFieldDefinition(BaseModel): + name: str + + label: str + + type: TypeEnum + + supported_operators: List[SupportedOperator] + + group: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["group"]) + nullable_fields = set(["group"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/basetaskstatus.py b/src/mistralai/client/models/basetaskstatus.py new file mode 100644 index 00000000..ff2f1c63 --- /dev/null +++ b/src/mistralai/client/models/basetaskstatus.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7b381554d5c7 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +BaseTaskStatus = Union[ + Literal[ + "RUNNING", + "COMPLETED", + "FAILED", + "CANCELED", + "TERMINATED", + "CONTINUED_AS_NEW", + "TIMED_OUT", + "UNKNOWN", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/blobresourcecontents.py b/src/mistralai/client/models/blobresourcecontents.py new file mode 100644 index 00000000..c2a55f95 --- /dev/null +++ b/src/mistralai/client/models/blobresourcecontents.py @@ -0,0 +1,87 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fa924bc295ad + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict +from typing_extensions import Annotated, NotRequired, TypedDict + + +class BlobResourceContentsTypedDict(TypedDict): + r"""Binary contents of a resource.""" + + uri: str + blob: str + mime_type: NotRequired[Nullable[str]] + meta: NotRequired[Nullable[Dict[str, Any]]] + + +class BlobResourceContents(BaseModel): + r"""Binary contents of a resource.""" + + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + uri: str + + blob: str + + mime_type: Annotated[OptionalNullable[str], pydantic.Field(alias="mimeType")] = ( + UNSET + ) + + meta: Annotated[OptionalNullable[Dict[str, Any]], pydantic.Field(alias="_meta")] = ( + UNSET + ) + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["mimeType", "_meta"]) + nullable_fields = set(["mimeType", "_meta"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m + + +try: + BlobResourceContents.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/campaign.py b/src/mistralai/client/models/campaign.py new file mode 100644 index 00000000..a52bfcb9 --- /dev/null +++ b/src/mistralai/client/models/campaign.py @@ -0,0 +1,62 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c91d862fb405 + +from __future__ import annotations +from .filterpayload import FilterPayload, FilterPayloadTypedDict +from .judge import Judge, JudgeTypedDict +from datetime import datetime +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing_extensions import TypedDict + + +class CampaignTypedDict(TypedDict): + id: str + created_at: datetime + updated_at: datetime + deleted_at: Nullable[datetime] + name: str + owner_id: str + workspace_id: str + description: str + max_nb_events: int + search_params: FilterPayloadTypedDict + judge: JudgeTypedDict + + +class Campaign(BaseModel): + id: str + + created_at: datetime + + updated_at: datetime + + deleted_at: Nullable[datetime] + + name: str + + owner_id: str + + workspace_id: str + + description: str + + max_nb_events: int + + search_params: FilterPayload + + judge: Judge + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + m[k] = val + + return m diff --git a/src/mistralai/client/models/chatcompletionevent.py b/src/mistralai/client/models/chatcompletionevent.py new file mode 100644 index 00000000..86253f5d --- /dev/null +++ b/src/mistralai/client/models/chatcompletionevent.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d85484d0205e + +from __future__ import annotations +from .chattranscriptionevent import ( + ChatTranscriptionEvent, + ChatTranscriptionEventTypedDict, +) +from datetime import datetime +from mistralai.client.types import BaseModel, Nullable +from typing import Any, Dict, List, Union +from typing_extensions import TypeAliasType, TypedDict + + +ChatCompletionEventExtraFieldsTypedDict = TypeAliasType( + "ChatCompletionEventExtraFieldsTypedDict", + Union[bool, int, float, str, datetime, List[str]], +) + + +ChatCompletionEventExtraFields = TypeAliasType( + "ChatCompletionEventExtraFields", Union[bool, int, float, str, datetime, List[str]] +) + + +class ChatCompletionEventTypedDict(TypedDict): + event_id: str + correlation_id: str + created_at: datetime + extra_fields: Dict[str, Nullable[ChatCompletionEventExtraFieldsTypedDict]] + nb_input_tokens: int + nb_output_tokens: int + enabled_tools: List[Dict[str, Any]] + request_messages: List[Dict[str, Any]] + response_messages: List[Dict[str, Any]] + nb_messages: int + chat_transcription_events: List[ChatTranscriptionEventTypedDict] + + +class ChatCompletionEvent(BaseModel): + event_id: str + + correlation_id: str + + created_at: datetime + + extra_fields: Dict[str, Nullable[ChatCompletionEventExtraFields]] + + nb_input_tokens: int + + nb_output_tokens: int + + enabled_tools: List[Dict[str, Any]] + + request_messages: List[Dict[str, Any]] + + response_messages: List[Dict[str, Any]] + + nb_messages: int + + chat_transcription_events: List[ChatTranscriptionEvent] diff --git a/src/mistralai/client/models/chatcompletioneventpreview.py b/src/mistralai/client/models/chatcompletioneventpreview.py new file mode 100644 index 00000000..e7fef9d0 --- /dev/null +++ b/src/mistralai/client/models/chatcompletioneventpreview.py @@ -0,0 +1,43 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1cd843828e99 + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel, Nullable +from typing import Dict, List, Union +from typing_extensions import TypeAliasType, TypedDict + + +ChatCompletionEventPreviewExtraFieldsTypedDict = TypeAliasType( + "ChatCompletionEventPreviewExtraFieldsTypedDict", + Union[bool, int, float, str, datetime, List[str]], +) + + +ChatCompletionEventPreviewExtraFields = TypeAliasType( + "ChatCompletionEventPreviewExtraFields", + Union[bool, int, float, str, datetime, List[str]], +) + + +class ChatCompletionEventPreviewTypedDict(TypedDict): + event_id: str + correlation_id: str + created_at: datetime + extra_fields: Dict[str, Nullable[ChatCompletionEventPreviewExtraFieldsTypedDict]] + nb_input_tokens: int + nb_output_tokens: int + + +class ChatCompletionEventPreview(BaseModel): + event_id: str + + correlation_id: str + + created_at: datetime + + extra_fields: Dict[str, Nullable[ChatCompletionEventPreviewExtraFields]] + + nb_input_tokens: int + + nb_output_tokens: int diff --git a/src/mistralai/client/models/chattranscriptionevent.py b/src/mistralai/client/models/chattranscriptionevent.py new file mode 100644 index 00000000..b23adf74 --- /dev/null +++ b/src/mistralai/client/models/chattranscriptionevent.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8ca679b2c39a + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Any, Dict +from typing_extensions import TypedDict + + +class ChatTranscriptionEventTypedDict(TypedDict): + audio_url: str + model: str + response_message: Dict[str, Any] + + +class ChatTranscriptionEvent(BaseModel): + audio_url: str + + model: str + + response_message: Dict[str, Any] diff --git a/src/mistralai/client/models/connector.py b/src/mistralai/client/models/connector.py new file mode 100644 index 00000000..967e454f --- /dev/null +++ b/src/mistralai/client/models/connector.py @@ -0,0 +1,70 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1a4facac922d + +from __future__ import annotations +from .connectortool import ConnectorTool, ConnectorToolTypedDict +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List +from typing_extensions import NotRequired, TypedDict + + +class ConnectorTypedDict(TypedDict): + id: str + name: str + description: str + created_at: datetime + modified_at: datetime + server: NotRequired[Nullable[str]] + auth_type: NotRequired[Nullable[str]] + tools: NotRequired[Nullable[List[ConnectorToolTypedDict]]] + + +class Connector(BaseModel): + id: str + + name: str + + description: str + + created_at: datetime + + modified_at: datetime + + server: OptionalNullable[str] = UNSET + + auth_type: OptionalNullable[str] = UNSET + + tools: OptionalNullable[List[ConnectorTool]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["server", "auth_type", "tools"]) + nullable_fields = set(["server", "auth_type", "tools"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/connector_call_tool_v1op.py b/src/mistralai/client/models/connector_call_tool_v1op.py new file mode 100644 index 00000000..df5783d0 --- /dev/null +++ b/src/mistralai/client/models/connector_call_tool_v1op.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7948899b3068 + +from __future__ import annotations +from .connectorcalltoolrequest import ( + ConnectorCallToolRequest, + ConnectorCallToolRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class ConnectorCallToolV1RequestTypedDict(TypedDict): + tool_name: str + connector_id_or_name: str + connector_call_tool_request: ConnectorCallToolRequestTypedDict + + +class ConnectorCallToolV1Request(BaseModel): + tool_name: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + connector_id_or_name: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + connector_call_tool_request: Annotated[ + ConnectorCallToolRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/connector_delete_v1op.py b/src/mistralai/client/models/connector_delete_v1op.py new file mode 100644 index 00000000..74134361 --- /dev/null +++ b/src/mistralai/client/models/connector_delete_v1op.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a377930b1435 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class ConnectorDeleteV1RequestTypedDict(TypedDict): + connector_id: str + + +class ConnectorDeleteV1Request(BaseModel): + connector_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/connector_get_v1op.py b/src/mistralai/client/models/connector_get_v1op.py new file mode 100644 index 00000000..5770ee09 --- /dev/null +++ b/src/mistralai/client/models/connector_get_v1op.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 73ca3a446dcc + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ConnectorGetV1RequestTypedDict(TypedDict): + connector_id_or_name: str + fetch_customer_data: NotRequired[bool] + r"""Fetch the customer data associated with the connector (e.g. customer secrets / config).""" + fetch_connection_secrets: NotRequired[bool] + r"""Fetch the general connection secrets associated with the connector.""" + + +class ConnectorGetV1Request(BaseModel): + connector_id_or_name: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + fetch_customer_data: Annotated[ + Optional[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = False + r"""Fetch the customer data associated with the connector (e.g. customer secrets / config).""" + + fetch_connection_secrets: Annotated[ + Optional[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = False + r"""Fetch the general connection secrets associated with the connector.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["fetch_customer_data", "fetch_connection_secrets"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/connector_list_v1op.py b/src/mistralai/client/models/connector_list_v1op.py new file mode 100644 index 00000000..a89da710 --- /dev/null +++ b/src/mistralai/client/models/connector_list_v1op.py @@ -0,0 +1,67 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5ec0889995f5 + +from __future__ import annotations +from .connectorsqueryfilters import ( + ConnectorsQueryFilters, + ConnectorsQueryFiltersTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ConnectorListV1RequestTypedDict(TypedDict): + query_filters: NotRequired[ConnectorsQueryFiltersTypedDict] + cursor: NotRequired[Nullable[str]] + page_size: NotRequired[int] + + +class ConnectorListV1Request(BaseModel): + query_filters: Annotated[ + Optional[ConnectorsQueryFilters], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = None + + cursor: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["query_filters", "cursor", "page_size"]) + nullable_fields = set(["cursor"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/connector_update_v1op.py b/src/mistralai/client/models/connector_update_v1op.py new file mode 100644 index 00000000..6b00d0b0 --- /dev/null +++ b/src/mistralai/client/models/connector_update_v1op.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6f884d18ac56 + +from __future__ import annotations +from .updateconnectorrequest import ( + UpdateConnectorRequest, + UpdateConnectorRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class ConnectorUpdateV1RequestTypedDict(TypedDict): + connector_id: str + update_connector_request: UpdateConnectorRequestTypedDict + + +class ConnectorUpdateV1Request(BaseModel): + connector_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + update_connector_request: Annotated[ + UpdateConnectorRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/connectorcalltoolrequest.py b/src/mistralai/client/models/connectorcalltoolrequest.py new file mode 100644 index 00000000..4e9a4d45 --- /dev/null +++ b/src/mistralai/client/models/connectorcalltoolrequest.py @@ -0,0 +1,36 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7dc7ec295301 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class ConnectorCallToolRequestTypedDict(TypedDict): + r"""Request body for calling an MCP tool.""" + + arguments: NotRequired[Dict[str, Any]] + + +class ConnectorCallToolRequest(BaseModel): + r"""Request body for calling an MCP tool.""" + + arguments: Optional[Dict[str, Any]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["arguments"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/connectorsqueryfilters.py b/src/mistralai/client/models/connectorsqueryfilters.py new file mode 100644 index 00000000..1aea18b0 --- /dev/null +++ b/src/mistralai/client/models/connectorsqueryfilters.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3b9fc81aa726 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ConnectorsQueryFiltersTypedDict(TypedDict): + active: NotRequired[Nullable[bool]] + r"""Filter for active connectors for a given user, workspace and organization.""" + fetch_connection_secrets: NotRequired[bool] + r"""Fetch connection secrets.""" + + +class ConnectorsQueryFilters(BaseModel): + active: Annotated[OptionalNullable[bool], FieldMetadata(query=True)] = UNSET + r"""Filter for active connectors for a given user, workspace and organization.""" + + fetch_connection_secrets: Annotated[Optional[bool], FieldMetadata(query=True)] = ( + False + ) + r"""Fetch connection secrets.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["active", "fetch_connection_secrets"]) + nullable_fields = set(["active"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/connectortool.py b/src/mistralai/client/models/connectortool.py new file mode 100644 index 00000000..26a8b7b7 --- /dev/null +++ b/src/mistralai/client/models/connectortool.py @@ -0,0 +1,83 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 41ca596b44f8 + +from __future__ import annotations +from .connectortoollocale import ConnectorToolLocale, ConnectorToolLocaleTypedDict +from .executionconfig import ExecutionConfig, ExecutionConfigTypedDict +from .resourcevisibility import ResourceVisibility +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict +from typing_extensions import NotRequired, TypedDict + + +class ConnectorToolTypedDict(TypedDict): + id: str + name: str + description: str + execution_config: Nullable[ExecutionConfigTypedDict] + visibility: ResourceVisibility + created_at: datetime + modified_at: datetime + system_prompt: NotRequired[Nullable[str]] + locale: NotRequired[Nullable[ConnectorToolLocaleTypedDict]] + jsonschema: NotRequired[Nullable[Dict[str, Any]]] + active: NotRequired[Nullable[bool]] + + +class ConnectorTool(BaseModel): + id: str + + name: str + + description: str + + execution_config: Nullable[ExecutionConfig] + + visibility: ResourceVisibility + + created_at: datetime + + modified_at: datetime + + system_prompt: OptionalNullable[str] = UNSET + + locale: OptionalNullable[ConnectorToolLocale] = UNSET + + jsonschema: OptionalNullable[Dict[str, Any]] = UNSET + + active: OptionalNullable[bool] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["system_prompt", "locale", "jsonschema", "active"]) + nullable_fields = set( + ["system_prompt", "locale", "jsonschema", "execution_config", "active"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/connectortoolcallmetadata.py b/src/mistralai/client/models/connectortoolcallmetadata.py new file mode 100644 index 00000000..9f3be6c4 --- /dev/null +++ b/src/mistralai/client/models/connectortoolcallmetadata.py @@ -0,0 +1,80 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2d27189e58e6 + +from __future__ import annotations +from .connectortoolresultmetadata import ( + ConnectorToolResultMetadata, + ConnectorToolResultMetadataTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict +from typing_extensions import NotRequired, TypedDict + + +class ConnectorToolCallMetadataTypedDict(TypedDict): + r"""Metadata wrapper for MCP tool call responses. + + Nests MCP-specific fields under `mcp_meta` to avoid collisions with other + metadata keys (e.g. `tool_call_result`) in Harmattan's streaming deltas. + """ + + mcp_meta: NotRequired[Nullable[ConnectorToolResultMetadataTypedDict]] + + +class ConnectorToolCallMetadata(BaseModel): + r"""Metadata wrapper for MCP tool call responses. + + Nests MCP-specific fields under `mcp_meta` to avoid collisions with other + metadata keys (e.g. `tool_call_result`) in Harmattan's streaming deltas. + """ + + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + mcp_meta: OptionalNullable[ConnectorToolResultMetadata] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["mcp_meta"]) + nullable_fields = set(["mcp_meta"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/client/models/connectortoolcallresponse.py b/src/mistralai/client/models/connectortoolcallresponse.py new file mode 100644 index 00000000..47071a94 --- /dev/null +++ b/src/mistralai/client/models/connectortoolcallresponse.py @@ -0,0 +1,149 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c4f7a932bd2e + +from __future__ import annotations +from .audiocontent import AudioContent, AudioContentTypedDict +from .connectortoolcallmetadata import ( + ConnectorToolCallMetadata, + ConnectorToolCallMetadataTypedDict, +) +from .embeddedresource import EmbeddedResource, EmbeddedResourceTypedDict +from .imagecontent import ImageContent, ImageContentTypedDict +from .resourcelink import ResourceLink, ResourceLinkTypedDict +from .textcontent import TextContent, TextContentTypedDict +from functools import partial +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils.unions import parse_open_union +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import BeforeValidator +from typing import Any, Dict, List, Literal, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ConnectorToolCallResponseContentTypedDict = TypeAliasType( + "ConnectorToolCallResponseContentTypedDict", + Union[ + TextContentTypedDict, + EmbeddedResourceTypedDict, + ImageContentTypedDict, + AudioContentTypedDict, + ResourceLinkTypedDict, + ], +) + + +class UnknownConnectorToolCallResponseContent(BaseModel): + r"""A ConnectorToolCallResponseContent variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CONNECTOR_TOOL_CALL_RESPONSE_CONTENT_VARIANTS: dict[str, Any] = { + "text": TextContent, + "image": ImageContent, + "audio": AudioContent, + "resource_link": ResourceLink, + "resource": EmbeddedResource, +} + + +ConnectorToolCallResponseContent = Annotated[ + Union[ + TextContent, + ImageContent, + AudioContent, + ResourceLink, + EmbeddedResource, + UnknownConnectorToolCallResponseContent, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_CONNECTOR_TOOL_CALL_RESPONSE_CONTENT_VARIANTS, + unknown_cls=UnknownConnectorToolCallResponseContent, + union_name="ConnectorToolCallResponseContent", + ) + ), +] + + +class ConnectorToolCallResponseTypedDict(TypedDict): + r"""Response from calling an MCP tool. + + We override mcp_types.CallToolResult because: + - Models only support `content`, not `structuredContent` at top level + - Downstream consumers (le-chat, etc.) need structuredContent/isError/_meta via metadata + + SYNC: Keep in sync with Harmattan (orchestrator) for harmonized tool result processing. + """ + + content: List[ConnectorToolCallResponseContentTypedDict] + metadata: NotRequired[Nullable[ConnectorToolCallMetadataTypedDict]] + + +class ConnectorToolCallResponse(BaseModel): + r"""Response from calling an MCP tool. + + We override mcp_types.CallToolResult because: + - Models only support `content`, not `structuredContent` at top level + - Downstream consumers (le-chat, etc.) need structuredContent/isError/_meta via metadata + + SYNC: Keep in sync with Harmattan (orchestrator) for harmonized tool result processing. + """ + + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + content: List[ConnectorToolCallResponseContent] + + metadata: OptionalNullable[ConnectorToolCallMetadata] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["metadata"]) + nullable_fields = set(["metadata"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/client/models/connectortoollocale.py b/src/mistralai/client/models/connectortoollocale.py new file mode 100644 index 00000000..ec1359c8 --- /dev/null +++ b/src/mistralai/client/models/connectortoollocale.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 247ebe411537 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Dict +from typing_extensions import TypedDict + + +class ConnectorToolLocaleTypedDict(TypedDict): + name: Dict[str, str] + description: Dict[str, str] + usage_sentence: Dict[str, str] + + +class ConnectorToolLocale(BaseModel): + name: Dict[str, str] + + description: Dict[str, str] + + usage_sentence: Dict[str, str] diff --git a/src/mistralai/client/models/connectortoolresultmetadata.py b/src/mistralai/client/models/connectortoolresultmetadata.py new file mode 100644 index 00000000..520fb99b --- /dev/null +++ b/src/mistralai/client/models/connectortoolresultmetadata.py @@ -0,0 +1,84 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 86730e16aa67 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ConnectorToolResultMetadataTypedDict(TypedDict): + r"""MCP-specific result metadata (isError, structuredContent, _meta).""" + + is_error: NotRequired[bool] + structured_content: NotRequired[Nullable[Dict[str, Any]]] + meta: NotRequired[Nullable[Dict[str, Any]]] + + +class ConnectorToolResultMetadata(BaseModel): + r"""MCP-specific result metadata (isError, structuredContent, _meta).""" + + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + is_error: Annotated[Optional[bool], pydantic.Field(alias="isError")] = False + + structured_content: Annotated[ + OptionalNullable[Dict[str, Any]], pydantic.Field(alias="structuredContent") + ] = UNSET + + meta: Annotated[OptionalNullable[Dict[str, Any]], pydantic.Field(alias="_meta")] = ( + UNSET + ) + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["isError", "structuredContent", "_meta"]) + nullable_fields = set(["structuredContent", "_meta"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m + + +try: + ConnectorToolResultMetadata.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/contentchunk.py b/src/mistralai/client/models/contentchunk.py index e3de7591..6d521e1a 100644 --- a/src/mistralai/client/models/contentchunk.py +++ b/src/mistralai/client/models/contentchunk.py @@ -3,6 +3,7 @@ from __future__ import annotations from .audiochunk import AudioChunk, AudioChunkTypedDict +from .audiourlchunk import AudioURLChunk, AudioURLChunkTypedDict from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .filechunk import FileChunk, FileChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict @@ -26,6 +27,7 @@ ReferenceChunkTypedDict, FileChunkTypedDict, AudioChunkTypedDict, + AudioURLChunkTypedDict, DocumentURLChunkTypedDict, ThinkChunkTypedDict, ], @@ -50,6 +52,7 @@ class UnknownContentChunk(BaseModel): "file": FileChunk, "thinking": ThinkChunk, "input_audio": AudioChunk, + "audio_url": AudioURLChunk, } @@ -62,6 +65,7 @@ class UnknownContentChunk(BaseModel): FileChunk, ThinkChunk, AudioChunk, + AudioURLChunk, UnknownContentChunk, ], BeforeValidator( diff --git a/src/mistralai/client/models/conversationpayload.py b/src/mistralai/client/models/conversationpayload.py new file mode 100644 index 00000000..6bfc5894 --- /dev/null +++ b/src/mistralai/client/models/conversationpayload.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4581218c84c1 + +from __future__ import annotations +from mistralai.client.types import BaseModel +import pydantic +from pydantic import ConfigDict +from typing import Any, Dict, List +from typing_extensions import TypedDict + + +class ConversationPayloadTypedDict(TypedDict): + messages: List[Dict[str, Any]] + + +class ConversationPayload(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + messages: List[Dict[str, Any]] + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/client/models/conversationrequest.py b/src/mistralai/client/models/conversationrequest.py index 83d599eb..cb5dc6a5 100644 --- a/src/mistralai/client/models/conversationrequest.py +++ b/src/mistralai/client/models/conversationrequest.py @@ -7,6 +7,7 @@ from .conversationinputs import ConversationInputs, ConversationInputsTypedDict from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict from .functiontool import FunctionTool, FunctionToolTypedDict +from .guardrailconfig import GuardrailConfig, GuardrailConfigTypedDict from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict from .websearchtool import WebSearchTool, WebSearchToolTypedDict @@ -73,6 +74,7 @@ class ConversationRequestTypedDict(TypedDict): tools: NotRequired[List[ConversationRequestToolTypedDict]] r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] + guardrails: NotRequired[Nullable[List[GuardrailConfigTypedDict]]] name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] metadata: NotRequired[Nullable[Dict[str, Any]]] @@ -97,6 +99,8 @@ class ConversationRequest(BaseModel): completion_args: OptionalNullable[CompletionArgs] = UNSET + guardrails: OptionalNullable[List[GuardrailConfig]] = UNSET + name: OptionalNullable[str] = UNSET description: OptionalNullable[str] = UNSET @@ -119,6 +123,7 @@ def serialize_model(self, handler): "instructions", "tools", "completion_args", + "guardrails", "name", "description", "metadata", @@ -133,6 +138,7 @@ def serialize_model(self, handler): "handoff_execution", "instructions", "completion_args", + "guardrails", "name", "description", "metadata", diff --git a/src/mistralai/client/models/conversationresponse.py b/src/mistralai/client/models/conversationresponse.py index f6c10969..2fc8504d 100644 --- a/src/mistralai/client/models/conversationresponse.py +++ b/src/mistralai/client/models/conversationresponse.py @@ -7,17 +7,23 @@ from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict -from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai.client.utils import validate_const import pydantic from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import Annotated, TypeAliasType, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -OutputTypedDict = TypeAliasType( - "OutputTypedDict", +ConversationResponseOutputTypedDict = TypeAliasType( + "ConversationResponseOutputTypedDict", Union[ MessageOutputEntryTypedDict, AgentHandoffEntryTypedDict, @@ -27,19 +33,28 @@ ) -Output = TypeAliasType( - "Output", +ConversationResponseOutput = TypeAliasType( + "ConversationResponseOutput", Union[MessageOutputEntry, AgentHandoffEntry, ToolExecutionEntry, FunctionCallEntry], ) +class GuardrailTypedDict(TypedDict): + pass + + +class Guardrail(BaseModel): + pass + + class ConversationResponseTypedDict(TypedDict): r"""The response after appending new entries to the conversation.""" conversation_id: str - outputs: List[OutputTypedDict] + outputs: List[ConversationResponseOutputTypedDict] usage: ConversationUsageInfoTypedDict object: Literal["conversation.response"] + guardrails: NotRequired[Nullable[List[GuardrailTypedDict]]] class ConversationResponse(BaseModel): @@ -47,7 +62,7 @@ class ConversationResponse(BaseModel): conversation_id: str - outputs: List[Output] + outputs: List[ConversationResponseOutput] usage: ConversationUsageInfo @@ -59,18 +74,29 @@ class ConversationResponse(BaseModel): pydantic.Field(alias="object"), ] = "conversation.response" + guardrails: OptionalNullable[List[Guardrail]] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = set(["object"]) + optional_fields = set(["object", "guardrails"]) + nullable_fields = set(["guardrails"]) serialized = handler(self) m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) if val != UNSET_SENTINEL: - if val is not None or k not in optional_fields: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): m[k] = val return m diff --git a/src/mistralai/client/models/conversationrestartrequest.py b/src/mistralai/client/models/conversationrestartrequest.py index 7ae16aff..d5079689 100644 --- a/src/mistralai/client/models/conversationrestartrequest.py +++ b/src/mistralai/client/models/conversationrestartrequest.py @@ -4,6 +4,7 @@ from __future__ import annotations from .completionargs import CompletionArgs, CompletionArgsTypedDict from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from .guardrailconfig import GuardrailConfig, GuardrailConfigTypedDict from mistralai.client.types import ( BaseModel, Nullable, @@ -12,7 +13,7 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import Any, Dict, Literal, Optional, Union +from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import NotRequired, TypeAliasType, TypedDict @@ -45,6 +46,7 @@ class ConversationRestartRequestTypedDict(TypedDict): handoff_execution: NotRequired[ConversationRestartRequestHandoffExecution] completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" + guardrails: NotRequired[Nullable[List[GuardrailConfigTypedDict]]] metadata: NotRequired[Nullable[Dict[str, Any]]] r"""Custom metadata for the conversation.""" agent_version: NotRequired[ @@ -70,6 +72,8 @@ class ConversationRestartRequest(BaseModel): completion_args: Optional[CompletionArgs] = None r"""White-listed arguments from the completion API""" + guardrails: OptionalNullable[List[GuardrailConfig]] = UNSET + metadata: OptionalNullable[Dict[str, Any]] = UNSET r"""Custom metadata for the conversation.""" @@ -85,11 +89,12 @@ def serialize_model(self, handler): "store", "handoff_execution", "completion_args", + "guardrails", "metadata", "agent_version", ] ) - nullable_fields = set(["metadata", "agent_version"]) + nullable_fields = set(["guardrails", "metadata", "agent_version"]) serialized = handler(self) m = {} diff --git a/src/mistralai/client/models/conversationrestartstreamrequest.py b/src/mistralai/client/models/conversationrestartstreamrequest.py index 0e247261..5cea4146 100644 --- a/src/mistralai/client/models/conversationrestartstreamrequest.py +++ b/src/mistralai/client/models/conversationrestartstreamrequest.py @@ -4,6 +4,7 @@ from __future__ import annotations from .completionargs import CompletionArgs, CompletionArgsTypedDict from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from .guardrailconfig import GuardrailConfig, GuardrailConfigTypedDict from mistralai.client.types import ( BaseModel, Nullable, @@ -12,7 +13,7 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import Any, Dict, Literal, Optional, Union +from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import NotRequired, TypeAliasType, TypedDict @@ -45,6 +46,7 @@ class ConversationRestartStreamRequestTypedDict(TypedDict): handoff_execution: NotRequired[ConversationRestartStreamRequestHandoffExecution] completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" + guardrails: NotRequired[Nullable[List[GuardrailConfigTypedDict]]] metadata: NotRequired[Nullable[Dict[str, Any]]] r"""Custom metadata for the conversation.""" agent_version: NotRequired[ @@ -72,6 +74,8 @@ class ConversationRestartStreamRequest(BaseModel): completion_args: Optional[CompletionArgs] = None r"""White-listed arguments from the completion API""" + guardrails: OptionalNullable[List[GuardrailConfig]] = UNSET + metadata: OptionalNullable[Dict[str, Any]] = UNSET r"""Custom metadata for the conversation.""" @@ -89,11 +93,12 @@ def serialize_model(self, handler): "store", "handoff_execution", "completion_args", + "guardrails", "metadata", "agent_version", ] ) - nullable_fields = set(["metadata", "agent_version"]) + nullable_fields = set(["guardrails", "metadata", "agent_version"]) serialized = handler(self) m = {} diff --git a/src/mistralai/client/models/conversationsource.py b/src/mistralai/client/models/conversationsource.py new file mode 100644 index 00000000..a3b93b61 --- /dev/null +++ b/src/mistralai/client/models/conversationsource.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 24d6a0861d4b + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ConversationSource = Union[ + Literal[ + "EXPLORER", + "UPLOADED_FILE", + "DIRECT_INPUT", + "PLAYGROUND", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/conversationstreamrequest.py b/src/mistralai/client/models/conversationstreamrequest.py index a20dccae..2559a887 100644 --- a/src/mistralai/client/models/conversationstreamrequest.py +++ b/src/mistralai/client/models/conversationstreamrequest.py @@ -7,6 +7,7 @@ from .conversationinputs import ConversationInputs, ConversationInputsTypedDict from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict from .functiontool import FunctionTool, FunctionToolTypedDict +from .guardrailconfig import GuardrailConfig, GuardrailConfigTypedDict from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict from .websearchtool import WebSearchTool, WebSearchToolTypedDict @@ -73,6 +74,7 @@ class ConversationStreamRequestTypedDict(TypedDict): tools: NotRequired[List[ConversationStreamRequestToolTypedDict]] r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] + guardrails: NotRequired[Nullable[List[GuardrailConfigTypedDict]]] name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] metadata: NotRequired[Nullable[Dict[str, Any]]] @@ -99,6 +101,8 @@ class ConversationStreamRequest(BaseModel): completion_args: OptionalNullable[CompletionArgs] = UNSET + guardrails: OptionalNullable[List[GuardrailConfig]] = UNSET + name: OptionalNullable[str] = UNSET description: OptionalNullable[str] = UNSET @@ -121,6 +125,7 @@ def serialize_model(self, handler): "instructions", "tools", "completion_args", + "guardrails", "name", "description", "metadata", @@ -135,6 +140,7 @@ def serialize_model(self, handler): "handoff_execution", "instructions", "completion_args", + "guardrails", "name", "description", "metadata", diff --git a/src/mistralai/client/models/conversationthinkchunk.py b/src/mistralai/client/models/conversationthinkchunk.py deleted file mode 100644 index e0e172e3..00000000 --- a/src/mistralai/client/models/conversationthinkchunk.py +++ /dev/null @@ -1,65 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 77e59cde5c0f - -from __future__ import annotations -from .textchunk import TextChunk, TextChunkTypedDict -from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict -from mistralai.client.types import BaseModel, UNSET_SENTINEL -from mistralai.client.utils import validate_const -import pydantic -from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -ConversationThinkChunkThinkingTypedDict = TypeAliasType( - "ConversationThinkChunkThinkingTypedDict", - Union[TextChunkTypedDict, ToolReferenceChunkTypedDict], -) - - -ConversationThinkChunkThinking = TypeAliasType( - "ConversationThinkChunkThinking", Union[TextChunk, ToolReferenceChunk] -) - - -class ConversationThinkChunkTypedDict(TypedDict): - thinking: List[ConversationThinkChunkThinkingTypedDict] - type: Literal["thinking"] - closed: NotRequired[bool] - - -class ConversationThinkChunk(BaseModel): - thinking: List[ConversationThinkChunkThinking] - - type: Annotated[ - Annotated[ - Optional[Literal["thinking"]], AfterValidator(validate_const("thinking")) - ], - pydantic.Field(alias="type"), - ] = "thinking" - - closed: Optional[bool] = True - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = set(["type", "closed"]) - serialized = handler(self) - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - - if val != UNSET_SENTINEL: - if val is not None or k not in optional_fields: - m[k] = val - - return m - - -try: - ConversationThinkChunk.model_rebuild() -except NameError: - pass diff --git a/src/mistralai/client/models/create_dataset_record_v1_observability_datasets_dataset_id_records_postop.py b/src/mistralai/client/models/create_dataset_record_v1_observability_datasets_dataset_id_records_postop.py new file mode 100644 index 00000000..03156fa3 --- /dev/null +++ b/src/mistralai/client/models/create_dataset_record_v1_observability_datasets_dataset_id_records_postop.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1ddc53a46c74 + +from __future__ import annotations +from .createdatasetrecordrequest import ( + CreateDatasetRecordRequest, + CreateDatasetRecordRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class CreateDatasetRecordV1ObservabilityDatasetsDatasetIDRecordsPostRequestTypedDict( + TypedDict +): + dataset_id: str + create_dataset_record_request: CreateDatasetRecordRequestTypedDict + + +class CreateDatasetRecordV1ObservabilityDatasetsDatasetIDRecordsPostRequest(BaseModel): + dataset_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + create_dataset_record_request: Annotated[ + CreateDatasetRecordRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/createagentrequest.py b/src/mistralai/client/models/createagentrequest.py index 54b09880..8acbbd1b 100644 --- a/src/mistralai/client/models/createagentrequest.py +++ b/src/mistralai/client/models/createagentrequest.py @@ -6,6 +6,7 @@ from .completionargs import CompletionArgs, CompletionArgsTypedDict from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict from .functiontool import FunctionTool, FunctionToolTypedDict +from .guardrailconfig import GuardrailConfig, GuardrailConfigTypedDict from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict from .websearchtool import WebSearchTool, WebSearchToolTypedDict @@ -56,6 +57,7 @@ class CreateAgentRequestTypedDict(TypedDict): r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" + guardrails: NotRequired[Nullable[List[GuardrailConfigTypedDict]]] description: NotRequired[Nullable[str]] handoffs: NotRequired[Nullable[List[str]]] metadata: NotRequired[Nullable[Dict[str, Any]]] @@ -76,6 +78,8 @@ class CreateAgentRequest(BaseModel): completion_args: Optional[CompletionArgs] = None r"""White-listed arguments from the completion API""" + guardrails: OptionalNullable[List[GuardrailConfig]] = UNSET + description: OptionalNullable[str] = UNSET handoffs: OptionalNullable[List[str]] = UNSET @@ -91,6 +95,7 @@ def serialize_model(self, handler): "instructions", "tools", "completion_args", + "guardrails", "description", "handoffs", "metadata", @@ -98,7 +103,14 @@ def serialize_model(self, handler): ] ) nullable_fields = set( - ["instructions", "description", "handoffs", "metadata", "version_message"] + [ + "instructions", + "guardrails", + "description", + "handoffs", + "metadata", + "version_message", + ] ) serialized = handler(self) m = {} diff --git a/src/mistralai/client/models/createcampaignrequest.py b/src/mistralai/client/models/createcampaignrequest.py new file mode 100644 index 00000000..b3957df9 --- /dev/null +++ b/src/mistralai/client/models/createcampaignrequest.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 60012b559aee + +from __future__ import annotations +from .filterpayload import FilterPayload, FilterPayloadTypedDict +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class CreateCampaignRequestTypedDict(TypedDict): + search_params: FilterPayloadTypedDict + judge_id: str + name: str + description: str + max_nb_events: int + + +class CreateCampaignRequest(BaseModel): + search_params: FilterPayload + + judge_id: str + + name: str + + description: str + + max_nb_events: int diff --git a/src/mistralai/client/models/createconnectorrequest.py b/src/mistralai/client/models/createconnectorrequest.py new file mode 100644 index 00000000..8fd3d3ab --- /dev/null +++ b/src/mistralai/client/models/createconnectorrequest.py @@ -0,0 +1,86 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3da192d6491a + +from __future__ import annotations +from .authdata import AuthData, AuthDataTypedDict +from .resourcevisibility import ResourceVisibility +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class CreateConnectorRequestTypedDict(TypedDict): + name: str + r"""The name of the connector. Should be 64 char length maximum, alphanumeric, only underscores/dashes.""" + description: str + r"""The description of the connector.""" + server: str + r"""The url of the MCP server.""" + icon_url: NotRequired[Nullable[str]] + r"""The optional url of the icon you want to associate to the connector.""" + visibility: NotRequired[ResourceVisibility] + headers: NotRequired[Nullable[Dict[str, Any]]] + r"""Optional organization-level headers to be sent with the request to the mcp server.""" + auth_data: NotRequired[Nullable[AuthDataTypedDict]] + r"""Optional additional authentication data for the connector.""" + system_prompt: NotRequired[Nullable[str]] + r"""Optional system prompt for the connector.""" + + +class CreateConnectorRequest(BaseModel): + name: str + r"""The name of the connector. Should be 64 char length maximum, alphanumeric, only underscores/dashes.""" + + description: str + r"""The description of the connector.""" + + server: str + r"""The url of the MCP server.""" + + icon_url: OptionalNullable[str] = UNSET + r"""The optional url of the icon you want to associate to the connector.""" + + visibility: Optional[ResourceVisibility] = None + + headers: OptionalNullable[Dict[str, Any]] = UNSET + r"""Optional organization-level headers to be sent with the request to the mcp server.""" + + auth_data: OptionalNullable[AuthData] = UNSET + r"""Optional additional authentication data for the connector.""" + + system_prompt: OptionalNullable[str] = UNSET + r"""Optional system prompt for the connector.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + ["icon_url", "visibility", "headers", "auth_data", "system_prompt"] + ) + nullable_fields = set(["icon_url", "headers", "auth_data", "system_prompt"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/createdatasetrecordrequest.py b/src/mistralai/client/models/createdatasetrecordrequest.py new file mode 100644 index 00000000..6fd2bf96 --- /dev/null +++ b/src/mistralai/client/models/createdatasetrecordrequest.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9455e38a8c31 + +from __future__ import annotations +from .conversationpayload import ConversationPayload, ConversationPayloadTypedDict +from mistralai.client.types import BaseModel +from typing import Any, Dict +from typing_extensions import TypedDict + + +class CreateDatasetRecordRequestTypedDict(TypedDict): + payload: ConversationPayloadTypedDict + properties: Dict[str, Any] + + +class CreateDatasetRecordRequest(BaseModel): + payload: ConversationPayload + + properties: Dict[str, Any] diff --git a/src/mistralai/client/models/createdatasetrequest.py b/src/mistralai/client/models/createdatasetrequest.py new file mode 100644 index 00000000..ecffc52a --- /dev/null +++ b/src/mistralai/client/models/createdatasetrequest.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 046a094d3ef9 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class CreateDatasetRequestTypedDict(TypedDict): + name: str + description: str + + +class CreateDatasetRequest(BaseModel): + name: str + + description: str diff --git a/src/mistralai/client/models/createfileresponse.py b/src/mistralai/client/models/createfileresponse.py index 76821280..1f03b275 100644 --- a/src/mistralai/client/models/createfileresponse.py +++ b/src/mistralai/client/models/createfileresponse.py @@ -3,6 +3,7 @@ from __future__ import annotations from .filepurpose import FilePurpose +from .filevisibility import FileVisibility from .sampletype import SampleType from .source import Source from mistralai.client.types import ( @@ -34,6 +35,8 @@ class CreateFileResponseTypedDict(TypedDict): num_lines: NotRequired[Nullable[int]] mimetype: NotRequired[Nullable[str]] signature: NotRequired[Nullable[str]] + expires_at: NotRequired[Nullable[int]] + visibility: NotRequired[Nullable[FileVisibility]] class CreateFileResponse(BaseModel): @@ -64,10 +67,18 @@ class CreateFileResponse(BaseModel): signature: OptionalNullable[str] = UNSET + expires_at: OptionalNullable[int] = UNSET + + visibility: OptionalNullable[FileVisibility] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = set(["num_lines", "mimetype", "signature"]) - nullable_fields = set(["num_lines", "mimetype", "signature"]) + optional_fields = set( + ["num_lines", "mimetype", "signature", "expires_at", "visibility"] + ) + nullable_fields = set( + ["num_lines", "mimetype", "signature", "expires_at", "visibility"] + ) serialized = handler(self) m = {} diff --git a/src/mistralai/client/models/createfinetuningjobrequest.py b/src/mistralai/client/models/createfinetuningjobrequest.py index e328d944..be97bd07 100644 --- a/src/mistralai/client/models/createfinetuningjobrequest.py +++ b/src/mistralai/client/models/createfinetuningjobrequest.py @@ -11,8 +11,11 @@ CompletionTrainingParameters, CompletionTrainingParametersTypedDict, ) +from .creategithubrepositoryrequest import ( + CreateGithubRepositoryRequest, + CreateGithubRepositoryRequestTypedDict, +) from .finetuneablemodeltype import FineTuneableModelType -from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict from .trainingfile import TrainingFile, TrainingFileTypedDict from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict from mistralai.client.types import ( @@ -44,10 +47,10 @@ ) -CreateFineTuningJobRequestRepositoryTypedDict = GithubRepositoryInTypedDict +CreateFineTuningJobRequestRepositoryTypedDict = CreateGithubRepositoryRequestTypedDict -CreateFineTuningJobRequestRepository = GithubRepositoryIn +CreateFineTuningJobRequestRepository = CreateGithubRepositoryRequest class CreateFineTuningJobRequestTypedDict(TypedDict): diff --git a/src/mistralai/client/models/githubrepositoryin.py b/src/mistralai/client/models/creategithubrepositoryrequest.py similarity index 90% rename from src/mistralai/client/models/githubrepositoryin.py rename to src/mistralai/client/models/creategithubrepositoryrequest.py index 38bcc208..61ffde27 100644 --- a/src/mistralai/client/models/githubrepositoryin.py +++ b/src/mistralai/client/models/creategithubrepositoryrequest.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: eef26fbd2876 +# @generated-id: 0814afcf63bb from __future__ import annotations from mistralai.client.types import ( @@ -17,7 +17,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class GithubRepositoryInTypedDict(TypedDict): +class CreateGithubRepositoryRequestTypedDict(TypedDict): name: str owner: str token: str @@ -26,7 +26,7 @@ class GithubRepositoryInTypedDict(TypedDict): weight: NotRequired[float] -class GithubRepositoryIn(BaseModel): +class CreateGithubRepositoryRequest(BaseModel): name: str owner: str @@ -69,6 +69,6 @@ def serialize_model(self, handler): try: - GithubRepositoryIn.model_rebuild() + CreateGithubRepositoryRequest.model_rebuild() except NameError: pass diff --git a/src/mistralai/client/models/createjudgerequest.py b/src/mistralai/client/models/createjudgerequest.py new file mode 100644 index 00000000..7c30aa5f --- /dev/null +++ b/src/mistralai/client/models/createjudgerequest.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e606837a626e + +from __future__ import annotations +from .judgeclassificationoutput import ( + JudgeClassificationOutput, + JudgeClassificationOutputTypedDict, +) +from .judgeregressionoutput import JudgeRegressionOutput, JudgeRegressionOutputTypedDict +from mistralai.client.types import BaseModel +from pydantic import Field +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +CreateJudgeRequestOutputTypedDict = TypeAliasType( + "CreateJudgeRequestOutputTypedDict", + Union[JudgeClassificationOutputTypedDict, JudgeRegressionOutputTypedDict], +) + + +CreateJudgeRequestOutput = Annotated[ + Union[JudgeClassificationOutput, JudgeRegressionOutput], Field(discriminator="type") +] + + +class CreateJudgeRequestTypedDict(TypedDict): + name: str + description: str + model_name: str + output: CreateJudgeRequestOutputTypedDict + instructions: str + tools: List[str] + + +class CreateJudgeRequest(BaseModel): + name: str + + description: str + + model_name: str + + output: CreateJudgeRequestOutput + + instructions: str + + tools: List[str] diff --git a/src/mistralai/client/models/dataset.py b/src/mistralai/client/models/dataset.py new file mode 100644 index 00000000..2acd911d --- /dev/null +++ b/src/mistralai/client/models/dataset.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cbf14670ee00 + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing_extensions import TypedDict + + +class DatasetTypedDict(TypedDict): + id: str + created_at: datetime + updated_at: datetime + deleted_at: Nullable[datetime] + name: str + description: str + owner_id: str + workspace_id: str + + +class Dataset(BaseModel): + id: str + + created_at: datetime + + updated_at: datetime + + deleted_at: Nullable[datetime] + + name: str + + description: str + + owner_id: str + + workspace_id: str + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + m[k] = val + + return m diff --git a/src/mistralai/client/models/datasetimporttask.py b/src/mistralai/client/models/datasetimporttask.py new file mode 100644 index 00000000..4ef52e9e --- /dev/null +++ b/src/mistralai/client/models/datasetimporttask.py @@ -0,0 +1,75 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c20f7db9633c + +from __future__ import annotations +from .basetaskstatus import BaseTaskStatus +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class DatasetImportTaskTypedDict(TypedDict): + id: str + created_at: datetime + updated_at: datetime + deleted_at: Nullable[datetime] + creator_id: str + dataset_id: str + workspace_id: str + status: BaseTaskStatus + progress: NotRequired[Nullable[int]] + message: NotRequired[Nullable[str]] + + +class DatasetImportTask(BaseModel): + id: str + + created_at: datetime + + updated_at: datetime + + deleted_at: Nullable[datetime] + + creator_id: str + + dataset_id: str + + workspace_id: str + + status: BaseTaskStatus + + progress: OptionalNullable[int] = UNSET + + message: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["progress", "message"]) + nullable_fields = set(["deleted_at", "progress", "message"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/datasetpreview.py b/src/mistralai/client/models/datasetpreview.py new file mode 100644 index 00000000..55493568 --- /dev/null +++ b/src/mistralai/client/models/datasetpreview.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 128c29db3f37 + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing_extensions import TypedDict + + +class DatasetPreviewTypedDict(TypedDict): + id: str + created_at: datetime + updated_at: datetime + deleted_at: Nullable[datetime] + name: str + description: str + owner_id: str + workspace_id: str + + +class DatasetPreview(BaseModel): + id: str + + created_at: datetime + + updated_at: datetime + + deleted_at: Nullable[datetime] + + name: str + + description: str + + owner_id: str + + workspace_id: str + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + m[k] = val + + return m diff --git a/src/mistralai/client/models/datasetrecord.py b/src/mistralai/client/models/datasetrecord.py new file mode 100644 index 00000000..9c9f42e5 --- /dev/null +++ b/src/mistralai/client/models/datasetrecord.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 87ddebf2a0ce + +from __future__ import annotations +from .conversationpayload import ConversationPayload, ConversationPayloadTypedDict +from .conversationsource import ConversationSource +from datetime import datetime +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Any, Dict +from typing_extensions import TypedDict + + +class DatasetRecordTypedDict(TypedDict): + id: str + created_at: datetime + updated_at: datetime + deleted_at: Nullable[datetime] + dataset_id: str + payload: ConversationPayloadTypedDict + properties: Dict[str, Any] + source: ConversationSource + + +class DatasetRecord(BaseModel): + id: str + + created_at: datetime + + updated_at: datetime + + deleted_at: Nullable[datetime] + + dataset_id: str + + payload: ConversationPayload + + properties: Dict[str, Any] + + source: ConversationSource + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + m[k] = val + + return m diff --git a/src/mistralai/client/models/delete_campaign_v1_observability_campaigns_campaign_id_deleteop.py b/src/mistralai/client/models/delete_campaign_v1_observability_campaigns_campaign_id_deleteop.py new file mode 100644 index 00000000..f33a3747 --- /dev/null +++ b/src/mistralai/client/models/delete_campaign_v1_observability_campaigns_campaign_id_deleteop.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3d1cd35fecc6 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class DeleteCampaignV1ObservabilityCampaignsCampaignIDDeleteRequestTypedDict(TypedDict): + campaign_id: str + + +class DeleteCampaignV1ObservabilityCampaignsCampaignIDDeleteRequest(BaseModel): + campaign_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/delete_dataset_record_v1_observability_dataset_records_dataset_record_id_deleteop.py b/src/mistralai/client/models/delete_dataset_record_v1_observability_dataset_records_dataset_record_id_deleteop.py new file mode 100644 index 00000000..ffeefca6 --- /dev/null +++ b/src/mistralai/client/models/delete_dataset_record_v1_observability_dataset_records_dataset_record_id_deleteop.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 66b2054bda8c + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class DeleteDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDDeleteRequestTypedDict( + TypedDict +): + dataset_record_id: str + + +class DeleteDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDDeleteRequest( + BaseModel +): + dataset_record_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/delete_dataset_v1_observability_datasets_dataset_id_deleteop.py b/src/mistralai/client/models/delete_dataset_v1_observability_datasets_dataset_id_deleteop.py new file mode 100644 index 00000000..af869770 --- /dev/null +++ b/src/mistralai/client/models/delete_dataset_v1_observability_datasets_dataset_id_deleteop.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 446419cd07d2 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class DeleteDatasetV1ObservabilityDatasetsDatasetIDDeleteRequestTypedDict(TypedDict): + dataset_id: str + + +class DeleteDatasetV1ObservabilityDatasetsDatasetIDDeleteRequest(BaseModel): + dataset_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/delete_judge_v1_observability_judges_judge_id_deleteop.py b/src/mistralai/client/models/delete_judge_v1_observability_judges_judge_id_deleteop.py new file mode 100644 index 00000000..c2f7b797 --- /dev/null +++ b/src/mistralai/client/models/delete_judge_v1_observability_judges_judge_id_deleteop.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 415724e139bd + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class DeleteJudgeV1ObservabilityJudgesJudgeIDDeleteRequestTypedDict(TypedDict): + judge_id: str + + +class DeleteJudgeV1ObservabilityJudgesJudgeIDDeleteRequest(BaseModel): + judge_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/deletedatasetrecordsrequest.py b/src/mistralai/client/models/deletedatasetrecordsrequest.py new file mode 100644 index 00000000..8c33a963 --- /dev/null +++ b/src/mistralai/client/models/deletedatasetrecordsrequest.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e7ef16596e54 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class DeleteDatasetRecordsRequestTypedDict(TypedDict): + dataset_record_ids: List[str] + + +class DeleteDatasetRecordsRequest(BaseModel): + dataset_record_ids: List[str] diff --git a/src/mistralai/client/models/deletemodelout.py b/src/mistralai/client/models/deletemodelresponse.py similarity index 90% rename from src/mistralai/client/models/deletemodelout.py rename to src/mistralai/client/models/deletemodelresponse.py index fa0c20a4..fac884bc 100644 --- a/src/mistralai/client/models/deletemodelout.py +++ b/src/mistralai/client/models/deletemodelresponse.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: ef6a1671c739 +# @generated-id: 8957175b7482 from __future__ import annotations from mistralai.client.types import BaseModel, UNSET_SENTINEL @@ -8,7 +8,7 @@ from typing_extensions import NotRequired, TypedDict -class DeleteModelOutTypedDict(TypedDict): +class DeleteModelResponseTypedDict(TypedDict): id: str r"""The ID of the deleted model.""" object: NotRequired[str] @@ -17,7 +17,7 @@ class DeleteModelOutTypedDict(TypedDict): r"""The deletion status""" -class DeleteModelOut(BaseModel): +class DeleteModelResponse(BaseModel): id: str r"""The ID of the deleted model.""" diff --git a/src/mistralai/client/models/embeddedresource.py b/src/mistralai/client/models/embeddedresource.py new file mode 100644 index 00000000..54cb17fd --- /dev/null +++ b/src/mistralai/client/models/embeddedresource.py @@ -0,0 +1,110 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 94a23f656f72 + +from __future__ import annotations +from .annotations import Annotations, AnnotationsTypedDict +from .blobresourcecontents import BlobResourceContents, BlobResourceContentsTypedDict +from .textresourcecontents import TextResourceContents, TextResourceContentsTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ResourceTypedDict = TypeAliasType( + "ResourceTypedDict", + Union[TextResourceContentsTypedDict, BlobResourceContentsTypedDict], +) + + +Resource = TypeAliasType("Resource", Union[TextResourceContents, BlobResourceContents]) + + +class EmbeddedResourceTypedDict(TypedDict): + r"""The contents of a resource, embedded into a prompt or tool call result. + + It is up to the client how best to render embedded resources for the benefit + of the LLM and/or the user. + """ + + resource: ResourceTypedDict + type: Literal["resource"] + annotations: NotRequired[Nullable[AnnotationsTypedDict]] + meta: NotRequired[Nullable[Dict[str, Any]]] + + +class EmbeddedResource(BaseModel): + r"""The contents of a resource, embedded into a prompt or tool call result. + + It is up to the client how best to render embedded resources for the benefit + of the LLM and/or the user. + """ + + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + resource: Resource + + type: Annotated[ + Annotated[Literal["resource"], AfterValidator(validate_const("resource"))], + pydantic.Field(alias="type"), + ] = "resource" + + annotations: OptionalNullable[Annotations] = UNSET + + meta: Annotated[OptionalNullable[Dict[str, Any]], pydantic.Field(alias="_meta")] = ( + UNSET + ) + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["annotations", "_meta"]) + nullable_fields = set(["annotations", "_meta"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m + + +try: + EmbeddedResource.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/executionconfig.py b/src/mistralai/client/models/executionconfig.py new file mode 100644 index 00000000..56b58ae3 --- /dev/null +++ b/src/mistralai/client/models/executionconfig.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 14518c40a13b + +from __future__ import annotations +from mistralai.client.types import BaseModel +import pydantic +from pydantic import ConfigDict +from typing import Any, Dict +from typing_extensions import TypedDict + + +class ExecutionConfigTypedDict(TypedDict): + r"""Not typed since mcp config can changed / not stable + we allow all extra fields and this is a dict + TODO: once mcp is stable, we need to type this + """ + + type: str + + +class ExecutionConfig(BaseModel): + r"""Not typed since mcp config can changed / not stable + we allow all extra fields and this is a dict + TODO: once mcp is stable, we need to type this + """ + + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + type: str + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/client/models/export_dataset_to_jsonl_v1_observability_datasets_dataset_id_exports_to_jsonl_getop.py b/src/mistralai/client/models/export_dataset_to_jsonl_v1_observability_datasets_dataset_id_exports_to_jsonl_getop.py new file mode 100644 index 00000000..07b4e58f --- /dev/null +++ b/src/mistralai/client/models/export_dataset_to_jsonl_v1_observability_datasets_dataset_id_exports_to_jsonl_getop.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 74f5f3183b64 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class ExportDatasetToJsonlV1ObservabilityDatasetsDatasetIDExportsToJsonlGetRequestTypedDict( + TypedDict +): + dataset_id: str + + +class ExportDatasetToJsonlV1ObservabilityDatasetsDatasetIDExportsToJsonlGetRequest( + BaseModel +): + dataset_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/exportdatasetresponse.py b/src/mistralai/client/models/exportdatasetresponse.py new file mode 100644 index 00000000..f1600cac --- /dev/null +++ b/src/mistralai/client/models/exportdatasetresponse.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 22cc29d258db + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class ExportDatasetResponseTypedDict(TypedDict): + file_url: str + + +class ExportDatasetResponse(BaseModel): + file_url: str diff --git a/src/mistralai/client/models/feedresultchatcompletioneventpreview.py b/src/mistralai/client/models/feedresultchatcompletioneventpreview.py new file mode 100644 index 00000000..e87e007e --- /dev/null +++ b/src/mistralai/client/models/feedresultchatcompletioneventpreview.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 19109368b436 + +from __future__ import annotations +from .chatcompletioneventpreview import ( + ChatCompletionEventPreview, + ChatCompletionEventPreviewTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class FeedResultChatCompletionEventPreviewTypedDict(TypedDict): + results: NotRequired[List[ChatCompletionEventPreviewTypedDict]] + next: NotRequired[Nullable[str]] + cursor: NotRequired[Nullable[str]] + + +class FeedResultChatCompletionEventPreview(BaseModel): + results: Optional[List[ChatCompletionEventPreview]] = None + + next: OptionalNullable[str] = UNSET + + cursor: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["results", "next", "cursor"]) + nullable_fields = set(["next", "cursor"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/fetchcampaignstatusresponse.py b/src/mistralai/client/models/fetchcampaignstatusresponse.py new file mode 100644 index 00000000..6004892e --- /dev/null +++ b/src/mistralai/client/models/fetchcampaignstatusresponse.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b74b57603a4c + +from __future__ import annotations +from .basetaskstatus import BaseTaskStatus +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class FetchCampaignStatusResponseTypedDict(TypedDict): + status: BaseTaskStatus + + +class FetchCampaignStatusResponse(BaseModel): + status: BaseTaskStatus diff --git a/src/mistralai/client/models/fetchchatcompletionfieldoptionsresponse.py b/src/mistralai/client/models/fetchchatcompletionfieldoptionsresponse.py new file mode 100644 index 00000000..d3cbd653 --- /dev/null +++ b/src/mistralai/client/models/fetchchatcompletionfieldoptionsresponse.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2191cab4638b + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +OptionTypedDict = TypeAliasType("OptionTypedDict", Union[str, bool]) + + +Option = TypeAliasType("Option", Union[str, bool]) + + +class FetchChatCompletionFieldOptionsResponseTypedDict(TypedDict): + options: NotRequired[Nullable[List[Nullable[OptionTypedDict]]]] + + +class FetchChatCompletionFieldOptionsResponse(BaseModel): + options: OptionalNullable[List[Nullable[Option]]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["options"]) + nullable_fields = set(["options"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/fetchfieldoptioncountsrequest.py b/src/mistralai/client/models/fetchfieldoptioncountsrequest.py new file mode 100644 index 00000000..19698b9b --- /dev/null +++ b/src/mistralai/client/models/fetchfieldoptioncountsrequest.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2fd8caa0697d + +from __future__ import annotations +from .filterpayload import FilterPayload, FilterPayloadTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class FetchFieldOptionCountsRequestTypedDict(TypedDict): + filter_params: NotRequired[Nullable[FilterPayloadTypedDict]] + + +class FetchFieldOptionCountsRequest(BaseModel): + filter_params: OptionalNullable[FilterPayload] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["filter_params"]) + nullable_fields = set(["filter_params"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/fetchfieldoptioncountsresponse.py b/src/mistralai/client/models/fetchfieldoptioncountsresponse.py new file mode 100644 index 00000000..4c9f9d0f --- /dev/null +++ b/src/mistralai/client/models/fetchfieldoptioncountsresponse.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 16ce9431fc7b + +from __future__ import annotations +from .fieldoptioncountitem import FieldOptionCountItem, FieldOptionCountItemTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class FetchFieldOptionCountsResponseTypedDict(TypedDict): + counts: List[FieldOptionCountItemTypedDict] + + +class FetchFieldOptionCountsResponse(BaseModel): + counts: List[FieldOptionCountItem] diff --git a/src/mistralai/client/models/fieldgroup.py b/src/mistralai/client/models/fieldgroup.py new file mode 100644 index 00000000..07f9e7c7 --- /dev/null +++ b/src/mistralai/client/models/fieldgroup.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 201353e45f89 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class FieldGroupTypedDict(TypedDict): + name: str + label: str + + +class FieldGroup(BaseModel): + name: str + + label: str diff --git a/src/mistralai/client/models/fieldoptioncountitem.py b/src/mistralai/client/models/fieldoptioncountitem.py new file mode 100644 index 00000000..0b50c148 --- /dev/null +++ b/src/mistralai/client/models/fieldoptioncountitem.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5b707a343930 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class FieldOptionCountItemTypedDict(TypedDict): + value: str + count: int + + +class FieldOptionCountItem(BaseModel): + value: str + + count: int diff --git a/src/mistralai/client/models/files_api_routes_upload_fileop.py b/src/mistralai/client/models/files_api_routes_upload_fileop.py index 54ff4e49..b6683639 100644 --- a/src/mistralai/client/models/files_api_routes_upload_fileop.py +++ b/src/mistralai/client/models/files_api_routes_upload_fileop.py @@ -4,13 +4,25 @@ from __future__ import annotations from .file import File, FileTypedDict from .filepurpose import FilePurpose -from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai.client.utils import FieldMetadata, MultipartFormMetadata from pydantic import model_serializer -from typing import Optional +from typing import Literal, Optional from typing_extensions import Annotated, NotRequired, TypedDict +FilesAPIRoutesUploadFileFileVisibility = Literal[ + "workspace", + "user", +] + + class MultiPartBodyParamsTypedDict(TypedDict): file: FileTypedDict r"""The File object (not file name) to be uploaded. @@ -23,6 +35,8 @@ class MultiPartBodyParamsTypedDict(TypedDict): file=@path/to/your/file.jsonl ``` """ + expiry: NotRequired[Nullable[int]] + visibility: NotRequired[FilesAPIRoutesUploadFileFileVisibility] purpose: NotRequired[FilePurpose] @@ -39,20 +53,35 @@ class MultiPartBodyParams(BaseModel): ``` """ + expiry: Annotated[OptionalNullable[int], FieldMetadata(multipart=True)] = UNSET + + visibility: Annotated[ + Optional[FilesAPIRoutesUploadFileFileVisibility], FieldMetadata(multipart=True) + ] = "workspace" + purpose: Annotated[Optional[FilePurpose], FieldMetadata(multipart=True)] = None @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = set(["purpose"]) + optional_fields = set(["expiry", "visibility", "purpose"]) + nullable_fields = set(["expiry"]) serialized = handler(self) m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) if val != UNSET_SENTINEL: - if val is not None or k not in optional_fields: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): m[k] = val return m diff --git a/src/mistralai/client/models/fileschema.py b/src/mistralai/client/models/fileschema.py index e99066a9..11caa083 100644 --- a/src/mistralai/client/models/fileschema.py +++ b/src/mistralai/client/models/fileschema.py @@ -3,6 +3,7 @@ from __future__ import annotations from .filepurpose import FilePurpose +from .filevisibility import FileVisibility from .sampletype import SampleType from .source import Source from mistralai.client.types import ( @@ -34,6 +35,8 @@ class FileSchemaTypedDict(TypedDict): num_lines: NotRequired[Nullable[int]] mimetype: NotRequired[Nullable[str]] signature: NotRequired[Nullable[str]] + expires_at: NotRequired[Nullable[int]] + visibility: NotRequired[Nullable[FileVisibility]] class FileSchema(BaseModel): @@ -64,10 +67,18 @@ class FileSchema(BaseModel): signature: OptionalNullable[str] = UNSET + expires_at: OptionalNullable[int] = UNSET + + visibility: OptionalNullable[FileVisibility] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = set(["num_lines", "mimetype", "signature"]) - nullable_fields = set(["num_lines", "mimetype", "signature"]) + optional_fields = set( + ["num_lines", "mimetype", "signature", "expires_at", "visibility"] + ) + nullable_fields = set( + ["num_lines", "mimetype", "signature", "expires_at", "visibility"] + ) serialized = handler(self) m = {} diff --git a/src/mistralai/client/models/filevisibility.py b/src/mistralai/client/models/filevisibility.py new file mode 100644 index 00000000..56cfe881 --- /dev/null +++ b/src/mistralai/client/models/filevisibility.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 11a670fa3b71 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +FileVisibility = Union[ + Literal[ + "workspace", + "user", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/filtercondition.py b/src/mistralai/client/models/filtercondition.py new file mode 100644 index 00000000..fe62d6dd --- /dev/null +++ b/src/mistralai/client/models/filtercondition.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ba62f90873c5 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UnrecognizedStr +from typing import Any, Literal, Union +from typing_extensions import TypedDict + + +Op = Union[ + Literal[ + "lt", + "lte", + "gt", + "gte", + "startswith", + "istartswith", + "endswith", + "iendswith", + "contains", + "icontains", + "matches", + "notcontains", + "inotcontains", + "eq", + "neq", + "isnull", + "includes", + "excludes", + "len_eq", + ], + UnrecognizedStr, +] + + +class FilterConditionTypedDict(TypedDict): + field: str + op: Op + value: Any + + +class FilterCondition(BaseModel): + field: str + + op: Op + + value: Any diff --git a/src/mistralai/client/models/filtergroup.py b/src/mistralai/client/models/filtergroup.py new file mode 100644 index 00000000..06616a01 --- /dev/null +++ b/src/mistralai/client/models/filtergroup.py @@ -0,0 +1,74 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: dbc0c34fbc2f + +from __future__ import annotations +from .filtercondition import FilterCondition, FilterConditionTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import List, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +class FilterGroupTypedDict(TypedDict): + and_: NotRequired[Nullable[List[AndTypedDict]]] + or_: NotRequired[Nullable[List[OrTypedDict]]] + + +class FilterGroup(BaseModel): + and_: Annotated[OptionalNullable[List[And]], pydantic.Field(alias="AND")] = UNSET + + or_: Annotated[OptionalNullable[List[Or]], pydantic.Field(alias="OR")] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["AND", "OR"]) + nullable_fields = set(["AND", "OR"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +OrTypedDict = TypeAliasType( + "OrTypedDict", Union["FilterGroupTypedDict", FilterConditionTypedDict] +) + + +Or = TypeAliasType("Or", Union["FilterGroup", FilterCondition]) + + +AndTypedDict = TypeAliasType( + "AndTypedDict", Union["FilterGroupTypedDict", FilterConditionTypedDict] +) + + +And = TypeAliasType("And", Union["FilterGroup", FilterCondition]) + + +try: + FilterGroup.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/filterpayload.py b/src/mistralai/client/models/filterpayload.py new file mode 100644 index 00000000..188375f1 --- /dev/null +++ b/src/mistralai/client/models/filterpayload.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 56757b849f7a + +from __future__ import annotations +from .filtercondition import FilterCondition, FilterConditionTypedDict +from .filtergroup import FilterGroup, FilterGroupTypedDict +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Union +from typing_extensions import TypeAliasType, TypedDict + + +FiltersTypedDict = TypeAliasType( + "FiltersTypedDict", Union[FilterGroupTypedDict, FilterConditionTypedDict] +) + + +Filters = TypeAliasType("Filters", Union[FilterGroup, FilterCondition]) + + +class FilterPayloadTypedDict(TypedDict): + filters: Nullable[FiltersTypedDict] + + +class FilterPayload(BaseModel): + filters: Nullable[Filters] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + m[k] = val + + return m diff --git a/src/mistralai/client/models/get_campaign_by_id_v1_observability_campaigns_campaign_id_getop.py b/src/mistralai/client/models/get_campaign_by_id_v1_observability_campaigns_campaign_id_getop.py new file mode 100644 index 00000000..a8446737 --- /dev/null +++ b/src/mistralai/client/models/get_campaign_by_id_v1_observability_campaigns_campaign_id_getop.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 288520184035 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class GetCampaignByIDV1ObservabilityCampaignsCampaignIDGetRequestTypedDict(TypedDict): + campaign_id: str + + +class GetCampaignByIDV1ObservabilityCampaignsCampaignIDGetRequest(BaseModel): + campaign_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/get_campaign_selected_events_v1_observability_campaigns_campaign_id_selected_events_getop.py b/src/mistralai/client/models/get_campaign_selected_events_v1_observability_campaigns_campaign_id_selected_events_getop.py new file mode 100644 index 00000000..e1b04950 --- /dev/null +++ b/src/mistralai/client/models/get_campaign_selected_events_v1_observability_campaigns_campaign_id_selected_events_getop.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 270800e2c264 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class GetCampaignSelectedEventsV1ObservabilityCampaignsCampaignIDSelectedEventsGetRequestTypedDict( + TypedDict +): + campaign_id: str + page_size: NotRequired[int] + page: NotRequired[int] + + +class GetCampaignSelectedEventsV1ObservabilityCampaignsCampaignIDSelectedEventsGetRequest( + BaseModel +): + campaign_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 50 + + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["page_size", "page"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/get_campaign_status_by_id_v1_observability_campaigns_campaign_id_status_getop.py b/src/mistralai/client/models/get_campaign_status_by_id_v1_observability_campaigns_campaign_id_status_getop.py new file mode 100644 index 00000000..57ac4490 --- /dev/null +++ b/src/mistralai/client/models/get_campaign_status_by_id_v1_observability_campaigns_campaign_id_status_getop.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 853a43ee6b98 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class GetCampaignStatusByIDV1ObservabilityCampaignsCampaignIDStatusGetRequestTypedDict( + TypedDict +): + campaign_id: str + + +class GetCampaignStatusByIDV1ObservabilityCampaignsCampaignIDStatusGetRequest( + BaseModel +): + campaign_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/get_campaigns_v1_observability_campaigns_getop.py b/src/mistralai/client/models/get_campaigns_v1_observability_campaigns_getop.py new file mode 100644 index 00000000..a6a329c7 --- /dev/null +++ b/src/mistralai/client/models/get_campaigns_v1_observability_campaigns_getop.py @@ -0,0 +1,63 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 598a7340fc98 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class GetCampaignsV1ObservabilityCampaignsGetRequestTypedDict(TypedDict): + page_size: NotRequired[int] + page: NotRequired[int] + q: NotRequired[Nullable[str]] + + +class GetCampaignsV1ObservabilityCampaignsGetRequest(BaseModel): + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 50 + + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 1 + + q: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["page_size", "page", "q"]) + nullable_fields = set(["q"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/get_chat_completion_event_v1_observability_chat_completion_events_event_id_getop.py b/src/mistralai/client/models/get_chat_completion_event_v1_observability_chat_completion_events_event_id_getop.py new file mode 100644 index 00000000..f39b0f56 --- /dev/null +++ b/src/mistralai/client/models/get_chat_completion_event_v1_observability_chat_completion_events_event_id_getop.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 98aff68bc7c7 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class GetChatCompletionEventV1ObservabilityChatCompletionEventsEventIDGetRequestTypedDict( + TypedDict +): + event_id: str + + +class GetChatCompletionEventV1ObservabilityChatCompletionEventsEventIDGetRequest( + BaseModel +): + event_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/get_chat_completion_events_v1_observability_chat_completion_events_search_postop.py b/src/mistralai/client/models/get_chat_completion_events_v1_observability_chat_completion_events_search_postop.py new file mode 100644 index 00000000..5a8b7450 --- /dev/null +++ b/src/mistralai/client/models/get_chat_completion_events_v1_observability_chat_completion_events_search_postop.py @@ -0,0 +1,71 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 36957d0f73aa + +from __future__ import annotations +from .searchchatcompletioneventsrequest import ( + SearchChatCompletionEventsRequest, + SearchChatCompletionEventsRequestTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata, RequestMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class GetChatCompletionEventsV1ObservabilityChatCompletionEventsSearchPostRequestTypedDict( + TypedDict +): + search_chat_completion_events_request: SearchChatCompletionEventsRequestTypedDict + page_size: NotRequired[int] + cursor: NotRequired[Nullable[str]] + + +class GetChatCompletionEventsV1ObservabilityChatCompletionEventsSearchPostRequest( + BaseModel +): + search_chat_completion_events_request: Annotated[ + SearchChatCompletionEventsRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 50 + + cursor: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["page_size", "cursor"]) + nullable_fields = set(["cursor"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/get_chat_completion_field_options_counts_v1_observability_chat_completion_fields_field_name_options_counts_postop.py b/src/mistralai/client/models/get_chat_completion_field_options_counts_v1_observability_chat_completion_fields_field_name_options_counts_postop.py new file mode 100644 index 00000000..f579260d --- /dev/null +++ b/src/mistralai/client/models/get_chat_completion_field_options_counts_v1_observability_chat_completion_fields_field_name_options_counts_postop.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0b1bd06b24af + +from __future__ import annotations +from .fetchfieldoptioncountsrequest import ( + FetchFieldOptionCountsRequest, + FetchFieldOptionCountsRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class GetChatCompletionFieldOptionsCountsV1ObservabilityChatCompletionFieldsFieldNameOptionsCountsPostRequestTypedDict( + TypedDict +): + field_name: str + fetch_field_option_counts_request: FetchFieldOptionCountsRequestTypedDict + + +class GetChatCompletionFieldOptionsCountsV1ObservabilityChatCompletionFieldsFieldNameOptionsCountsPostRequest( + BaseModel +): + field_name: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + fetch_field_option_counts_request: Annotated[ + FetchFieldOptionCountsRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/get_chat_completion_field_options_v1_observability_chat_completion_fields_field_name_options_getop.py b/src/mistralai/client/models/get_chat_completion_field_options_v1_observability_chat_completion_fields_field_name_options_getop.py new file mode 100644 index 00000000..c044bb92 --- /dev/null +++ b/src/mistralai/client/models/get_chat_completion_field_options_v1_observability_chat_completion_fields_field_name_options_getop.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4fb7f3c0e51b + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing import Literal +from typing_extensions import Annotated, TypedDict + + +Operator = Literal[ + "lt", + "lte", + "gt", + "gte", + "startswith", + "istartswith", + "endswith", + "iendswith", + "contains", + "icontains", + "matches", + "notcontains", + "inotcontains", + "eq", + "neq", + "isnull", + "includes", + "excludes", + "len_eq", +] +r"""The operator to use for filtering options""" + + +class GetChatCompletionFieldOptionsV1ObservabilityChatCompletionFieldsFieldNameOptionsGetRequestTypedDict( + TypedDict +): + field_name: str + operator: Operator + r"""The operator to use for filtering options""" + + +class GetChatCompletionFieldOptionsV1ObservabilityChatCompletionFieldsFieldNameOptionsGetRequest( + BaseModel +): + field_name: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + operator: Annotated[ + Operator, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] + r"""The operator to use for filtering options""" diff --git a/src/mistralai/client/models/get_dataset_by_id_v1_observability_datasets_dataset_id_getop.py b/src/mistralai/client/models/get_dataset_by_id_v1_observability_datasets_dataset_id_getop.py new file mode 100644 index 00000000..0259485d --- /dev/null +++ b/src/mistralai/client/models/get_dataset_by_id_v1_observability_datasets_dataset_id_getop.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cfd3282e7f33 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class GetDatasetByIDV1ObservabilityDatasetsDatasetIDGetRequestTypedDict(TypedDict): + dataset_id: str + + +class GetDatasetByIDV1ObservabilityDatasetsDatasetIDGetRequest(BaseModel): + dataset_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/get_dataset_import_task_v1_observability_datasets_dataset_id_tasks_task_id_getop.py b/src/mistralai/client/models/get_dataset_import_task_v1_observability_datasets_dataset_id_tasks_task_id_getop.py new file mode 100644 index 00000000..cf900d0e --- /dev/null +++ b/src/mistralai/client/models/get_dataset_import_task_v1_observability_datasets_dataset_id_tasks_task_id_getop.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b45f77cb328c + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class GetDatasetImportTaskV1ObservabilityDatasetsDatasetIDTasksTaskIDGetRequestTypedDict( + TypedDict +): + dataset_id: str + task_id: str + + +class GetDatasetImportTaskV1ObservabilityDatasetsDatasetIDTasksTaskIDGetRequest( + BaseModel +): + dataset_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + task_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/get_dataset_import_tasks_v1_observability_datasets_dataset_id_tasks_getop.py b/src/mistralai/client/models/get_dataset_import_tasks_v1_observability_datasets_dataset_id_tasks_getop.py new file mode 100644 index 00000000..43e0dc5b --- /dev/null +++ b/src/mistralai/client/models/get_dataset_import_tasks_v1_observability_datasets_dataset_id_tasks_getop.py @@ -0,0 +1,49 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 07ece48f664d + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class GetDatasetImportTasksV1ObservabilityDatasetsDatasetIDTasksGetRequestTypedDict( + TypedDict +): + dataset_id: str + page_size: NotRequired[int] + page: NotRequired[int] + + +class GetDatasetImportTasksV1ObservabilityDatasetsDatasetIDTasksGetRequest(BaseModel): + dataset_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 50 + + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["page_size", "page"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/get_dataset_record_v1_observability_dataset_records_dataset_record_id_getop.py b/src/mistralai/client/models/get_dataset_record_v1_observability_dataset_records_dataset_record_id_getop.py new file mode 100644 index 00000000..0cb239df --- /dev/null +++ b/src/mistralai/client/models/get_dataset_record_v1_observability_dataset_records_dataset_record_id_getop.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6ea6a0dab32f + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class GetDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDGetRequestTypedDict( + TypedDict +): + dataset_record_id: str + + +class GetDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDGetRequest(BaseModel): + dataset_record_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/get_dataset_records_v1_observability_datasets_dataset_id_records_getop.py b/src/mistralai/client/models/get_dataset_records_v1_observability_datasets_dataset_id_records_getop.py new file mode 100644 index 00000000..7617a5d2 --- /dev/null +++ b/src/mistralai/client/models/get_dataset_records_v1_observability_datasets_dataset_id_records_getop.py @@ -0,0 +1,49 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 77967c965aea + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class GetDatasetRecordsV1ObservabilityDatasetsDatasetIDRecordsGetRequestTypedDict( + TypedDict +): + dataset_id: str + page_size: NotRequired[int] + page: NotRequired[int] + + +class GetDatasetRecordsV1ObservabilityDatasetsDatasetIDRecordsGetRequest(BaseModel): + dataset_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 50 + + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["page_size", "page"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/get_datasets_v1_observability_datasets_getop.py b/src/mistralai/client/models/get_datasets_v1_observability_datasets_getop.py new file mode 100644 index 00000000..ea52dd51 --- /dev/null +++ b/src/mistralai/client/models/get_datasets_v1_observability_datasets_getop.py @@ -0,0 +1,63 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3e4f4e2447ac + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class GetDatasetsV1ObservabilityDatasetsGetRequestTypedDict(TypedDict): + page_size: NotRequired[int] + page: NotRequired[int] + q: NotRequired[Nullable[str]] + + +class GetDatasetsV1ObservabilityDatasetsGetRequest(BaseModel): + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 50 + + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 1 + + q: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["page_size", "page", "q"]) + nullable_fields = set(["q"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/get_judge_by_id_v1_observability_judges_judge_id_getop.py b/src/mistralai/client/models/get_judge_by_id_v1_observability_judges_judge_id_getop.py new file mode 100644 index 00000000..375db2e9 --- /dev/null +++ b/src/mistralai/client/models/get_judge_by_id_v1_observability_judges_judge_id_getop.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4201c3c5a891 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class GetJudgeByIDV1ObservabilityJudgesJudgeIDGetRequestTypedDict(TypedDict): + judge_id: str + + +class GetJudgeByIDV1ObservabilityJudgesJudgeIDGetRequest(BaseModel): + judge_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/get_judges_v1_observability_judges_getop.py b/src/mistralai/client/models/get_judges_v1_observability_judges_getop.py new file mode 100644 index 00000000..7228cfa1 --- /dev/null +++ b/src/mistralai/client/models/get_judges_v1_observability_judges_getop.py @@ -0,0 +1,80 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fa04e3db7781 + +from __future__ import annotations +from .judgeoutputtype import JudgeOutputType +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class GetJudgesV1ObservabilityJudgesGetRequestTypedDict(TypedDict): + type_filter: NotRequired[Nullable[List[JudgeOutputType]]] + r"""Filter by judge output types""" + model_filter: NotRequired[Nullable[List[str]]] + r"""Filter by model names""" + page_size: NotRequired[int] + page: NotRequired[int] + q: NotRequired[Nullable[str]] + + +class GetJudgesV1ObservabilityJudgesGetRequest(BaseModel): + type_filter: Annotated[ + OptionalNullable[List[JudgeOutputType]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""Filter by judge output types""" + + model_filter: Annotated[ + OptionalNullable[List[str]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""Filter by model names""" + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 50 + + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 1 + + q: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type_filter", "model_filter", "page_size", "page", "q"]) + nullable_fields = set(["type_filter", "model_filter", "q"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/get_similar_chat_completion_events_v1_observability_chat_completion_events_event_id_similar_events_getop.py b/src/mistralai/client/models/get_similar_chat_completion_events_v1_observability_chat_completion_events_event_id_similar_events_getop.py new file mode 100644 index 00000000..7689415d --- /dev/null +++ b/src/mistralai/client/models/get_similar_chat_completion_events_v1_observability_chat_completion_events_event_id_similar_events_getop.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d651bdc06c1b + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class GetSimilarChatCompletionEventsV1ObservabilityChatCompletionEventsEventIDSimilarEventsGetRequestTypedDict( + TypedDict +): + event_id: str + + +class GetSimilarChatCompletionEventsV1ObservabilityChatCompletionEventsEventIDSimilarEventsGetRequest( + BaseModel +): + event_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/getfileresponse.py b/src/mistralai/client/models/getfileresponse.py index f625c153..82280899 100644 --- a/src/mistralai/client/models/getfileresponse.py +++ b/src/mistralai/client/models/getfileresponse.py @@ -3,6 +3,7 @@ from __future__ import annotations from .filepurpose import FilePurpose +from .filevisibility import FileVisibility from .sampletype import SampleType from .source import Source from mistralai.client.types import ( @@ -35,6 +36,8 @@ class GetFileResponseTypedDict(TypedDict): num_lines: NotRequired[Nullable[int]] mimetype: NotRequired[Nullable[str]] signature: NotRequired[Nullable[str]] + expires_at: NotRequired[Nullable[int]] + visibility: NotRequired[Nullable[FileVisibility]] class GetFileResponse(BaseModel): @@ -67,10 +70,18 @@ class GetFileResponse(BaseModel): signature: OptionalNullable[str] = UNSET + expires_at: OptionalNullable[int] = UNSET + + visibility: OptionalNullable[FileVisibility] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = set(["num_lines", "mimetype", "signature"]) - nullable_fields = set(["num_lines", "mimetype", "signature"]) + optional_fields = set( + ["num_lines", "mimetype", "signature", "expires_at", "visibility"] + ) + nullable_fields = set( + ["num_lines", "mimetype", "signature", "expires_at", "visibility"] + ) serialized = handler(self) m = {} diff --git a/src/mistralai/client/models/guardrailconfig.py b/src/mistralai/client/models/guardrailconfig.py new file mode 100644 index 00000000..9af986cf --- /dev/null +++ b/src/mistralai/client/models/guardrailconfig.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c72b74412547 + +from __future__ import annotations +from .moderationllmv1config import ModerationLlmv1Config, ModerationLlmv1ConfigTypedDict +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class GuardrailConfigTypedDict(TypedDict): + moderation_llm_v1: Nullable[ModerationLlmv1ConfigTypedDict] + block_on_error: NotRequired[bool] + r"""If true, return HTTP 403 and block request in the event of a server-side error""" + + +class GuardrailConfig(BaseModel): + moderation_llm_v1: Nullable[ModerationLlmv1Config] + + block_on_error: Optional[bool] = False + r"""If true, return HTTP 403 and block request in the event of a server-side error""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["block_on_error"]) + nullable_fields = set(["moderation_llm_v1"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/imagecontent.py b/src/mistralai/client/models/imagecontent.py new file mode 100644 index 00000000..13c3f449 --- /dev/null +++ b/src/mistralai/client/models/imagecontent.py @@ -0,0 +1,94 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3abe7faee278 + +from __future__ import annotations +from .annotations import Annotations, AnnotationsTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ImageContentTypedDict(TypedDict): + r"""Image content for a message.""" + + data: str + mime_type: str + type: Literal["image"] + annotations: NotRequired[Nullable[AnnotationsTypedDict]] + meta: NotRequired[Nullable[Dict[str, Any]]] + + +class ImageContent(BaseModel): + r"""Image content for a message.""" + + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + data: str + + mime_type: Annotated[str, pydantic.Field(alias="mimeType")] + + type: Annotated[ + Annotated[Literal["image"], AfterValidator(validate_const("image"))], + pydantic.Field(alias="type"), + ] = "image" + + annotations: OptionalNullable[Annotations] = UNSET + + meta: Annotated[OptionalNullable[Dict[str, Any]], pydantic.Field(alias="_meta")] = ( + UNSET + ) + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["annotations", "_meta"]) + nullable_fields = set(["annotations", "_meta"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m + + +try: + ImageContent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/importdatasetfromcampaignrequest.py b/src/mistralai/client/models/importdatasetfromcampaignrequest.py new file mode 100644 index 00000000..5db45e8e --- /dev/null +++ b/src/mistralai/client/models/importdatasetfromcampaignrequest.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ee475b85bfc7 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class ImportDatasetFromCampaignRequestTypedDict(TypedDict): + campaign_id: str + + +class ImportDatasetFromCampaignRequest(BaseModel): + campaign_id: str diff --git a/src/mistralai/client/models/importdatasetfromdatasetrequest.py b/src/mistralai/client/models/importdatasetfromdatasetrequest.py new file mode 100644 index 00000000..ebf2a649 --- /dev/null +++ b/src/mistralai/client/models/importdatasetfromdatasetrequest.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 77aea4882ccb + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ImportDatasetFromDatasetRequestTypedDict(TypedDict): + dataset_record_ids: List[str] + + +class ImportDatasetFromDatasetRequest(BaseModel): + dataset_record_ids: List[str] diff --git a/src/mistralai/client/models/importdatasetfromexplorerrequest.py b/src/mistralai/client/models/importdatasetfromexplorerrequest.py new file mode 100644 index 00000000..85df7af6 --- /dev/null +++ b/src/mistralai/client/models/importdatasetfromexplorerrequest.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 852a3cbc1631 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ImportDatasetFromExplorerRequestTypedDict(TypedDict): + completion_event_ids: List[str] + + +class ImportDatasetFromExplorerRequest(BaseModel): + completion_event_ids: List[str] diff --git a/src/mistralai/client/models/importdatasetfromfilerequest.py b/src/mistralai/client/models/importdatasetfromfilerequest.py new file mode 100644 index 00000000..9a486776 --- /dev/null +++ b/src/mistralai/client/models/importdatasetfromfilerequest.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b2882fa57029 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class ImportDatasetFromFileRequestTypedDict(TypedDict): + file_id: str + + +class ImportDatasetFromFileRequest(BaseModel): + file_id: str diff --git a/src/mistralai/client/models/importdatasetfromplaygroundrequest.py b/src/mistralai/client/models/importdatasetfromplaygroundrequest.py new file mode 100644 index 00000000..f5bd720d --- /dev/null +++ b/src/mistralai/client/models/importdatasetfromplaygroundrequest.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8d809b14b144 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ImportDatasetFromPlaygroundRequestTypedDict(TypedDict): + conversation_ids: List[str] + + +class ImportDatasetFromPlaygroundRequest(BaseModel): + conversation_ids: List[str] diff --git a/src/mistralai/client/models/judge.py b/src/mistralai/client/models/judge.py new file mode 100644 index 00000000..1fb1d386 --- /dev/null +++ b/src/mistralai/client/models/judge.py @@ -0,0 +1,136 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d6adc687c2d9 + +from __future__ import annotations +from .judgeclassificationoutput import ( + JudgeClassificationOutput, + JudgeClassificationOutputTypedDict, +) +from .judgeregressionoutput import JudgeRegressionOutput, JudgeRegressionOutputTypedDict +from datetime import datetime +from functools import partial +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import BeforeValidator +from typing import Any, List, Literal, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +JudgeOutputUnionTypedDict = TypeAliasType( + "JudgeOutputUnionTypedDict", + Union[JudgeClassificationOutputTypedDict, JudgeRegressionOutputTypedDict], +) + + +class UnknownJudgeOutputUnion(BaseModel): + r"""A JudgeOutputUnion variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_JUDGE_OUTPUT_UNION_VARIANTS: dict[str, Any] = { + "CLASSIFICATION": JudgeClassificationOutput, + "REGRESSION": JudgeRegressionOutput, +} + + +JudgeOutputUnion = Annotated[ + Union[JudgeClassificationOutput, JudgeRegressionOutput, UnknownJudgeOutputUnion], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_JUDGE_OUTPUT_UNION_VARIANTS, + unknown_cls=UnknownJudgeOutputUnion, + union_name="JudgeOutputUnion", + ) + ), +] + + +class JudgeTypedDict(TypedDict): + id: str + created_at: datetime + updated_at: datetime + deleted_at: Nullable[datetime] + owner_id: str + workspace_id: str + name: str + description: str + model_name: str + output: JudgeOutputUnionTypedDict + instructions: str + tools: List[str] + up_revision: NotRequired[Nullable[str]] + down_revision: NotRequired[Nullable[str]] + base_revision: NotRequired[Nullable[str]] + + +class Judge(BaseModel): + id: str + + created_at: datetime + + updated_at: datetime + + deleted_at: Nullable[datetime] + + owner_id: str + + workspace_id: str + + name: str + + description: str + + model_name: str + + output: JudgeOutputUnion + + instructions: str + + tools: List[str] + + up_revision: OptionalNullable[str] = UNSET + + down_revision: OptionalNullable[str] = UNSET + + base_revision: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["up_revision", "down_revision", "base_revision"]) + nullable_fields = set( + ["deleted_at", "up_revision", "down_revision", "base_revision"] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/judge_chat_completion_event_v1_observability_chat_completion_events_event_id_live_judging_postop.py b/src/mistralai/client/models/judge_chat_completion_event_v1_observability_chat_completion_events_event_id_live_judging_postop.py new file mode 100644 index 00000000..a6b9c969 --- /dev/null +++ b/src/mistralai/client/models/judge_chat_completion_event_v1_observability_chat_completion_events_event_id_live_judging_postop.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 98c823e7cc1b + +from __future__ import annotations +from .judgechatcompletioneventrequest import ( + JudgeChatCompletionEventRequest, + JudgeChatCompletionEventRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class JudgeChatCompletionEventV1ObservabilityChatCompletionEventsEventIDLiveJudgingPostRequestTypedDict( + TypedDict +): + event_id: str + judge_chat_completion_event_request: JudgeChatCompletionEventRequestTypedDict + + +class JudgeChatCompletionEventV1ObservabilityChatCompletionEventsEventIDLiveJudgingPostRequest( + BaseModel +): + event_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + judge_chat_completion_event_request: Annotated[ + JudgeChatCompletionEventRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/judge_dataset_record_v1_observability_dataset_records_dataset_record_id_live_judging_postop.py b/src/mistralai/client/models/judge_dataset_record_v1_observability_dataset_records_dataset_record_id_live_judging_postop.py new file mode 100644 index 00000000..4d54fa42 --- /dev/null +++ b/src/mistralai/client/models/judge_dataset_record_v1_observability_dataset_records_dataset_record_id_live_judging_postop.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4749566fd16d + +from __future__ import annotations +from .judgedatasetrecordrequest import ( + JudgeDatasetRecordRequest, + JudgeDatasetRecordRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class JudgeDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDLiveJudgingPostRequestTypedDict( + TypedDict +): + dataset_record_id: str + judge_dataset_record_request: JudgeDatasetRecordRequestTypedDict + + +class JudgeDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDLiveJudgingPostRequest( + BaseModel +): + dataset_record_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + judge_dataset_record_request: Annotated[ + JudgeDatasetRecordRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/judgechatcompletioneventrequest.py b/src/mistralai/client/models/judgechatcompletioneventrequest.py new file mode 100644 index 00000000..59c68801 --- /dev/null +++ b/src/mistralai/client/models/judgechatcompletioneventrequest.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4fad8a510f7d + +from __future__ import annotations +from .createjudgerequest import CreateJudgeRequest, CreateJudgeRequestTypedDict +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class JudgeChatCompletionEventRequestTypedDict(TypedDict): + judge_definition: CreateJudgeRequestTypedDict + + +class JudgeChatCompletionEventRequest(BaseModel): + judge_definition: CreateJudgeRequest diff --git a/src/mistralai/client/models/judgeclassificationoutput.py b/src/mistralai/client/models/judgeclassificationoutput.py new file mode 100644 index 00000000..aa799682 --- /dev/null +++ b/src/mistralai/client/models/judgeclassificationoutput.py @@ -0,0 +1,36 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 683ae72d0efa + +from __future__ import annotations +from .judgeclassificationoutputoption import ( + JudgeClassificationOutputOption, + JudgeClassificationOutputOptionTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal +from typing_extensions import Annotated, TypedDict + + +class JudgeClassificationOutputTypedDict(TypedDict): + options: List[JudgeClassificationOutputOptionTypedDict] + type: Literal["CLASSIFICATION"] + + +class JudgeClassificationOutput(BaseModel): + options: List[JudgeClassificationOutputOption] + + type: Annotated[ + Annotated[ + Literal["CLASSIFICATION"], AfterValidator(validate_const("CLASSIFICATION")) + ], + pydantic.Field(alias="type"), + ] = "CLASSIFICATION" + + +try: + JudgeClassificationOutput.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/judgeclassificationoutputoption.py b/src/mistralai/client/models/judgeclassificationoutputoption.py new file mode 100644 index 00000000..64fad49f --- /dev/null +++ b/src/mistralai/client/models/judgeclassificationoutputoption.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c238f17d786b + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class JudgeClassificationOutputOptionTypedDict(TypedDict): + value: str + description: str + + +class JudgeClassificationOutputOption(BaseModel): + value: str + + description: str diff --git a/src/mistralai/client/models/judgedatasetrecordrequest.py b/src/mistralai/client/models/judgedatasetrecordrequest.py new file mode 100644 index 00000000..11499067 --- /dev/null +++ b/src/mistralai/client/models/judgedatasetrecordrequest.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9ad8915328dd + +from __future__ import annotations +from .createjudgerequest import CreateJudgeRequest, CreateJudgeRequestTypedDict +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class JudgeDatasetRecordRequestTypedDict(TypedDict): + judge_definition: CreateJudgeRequestTypedDict + + +class JudgeDatasetRecordRequest(BaseModel): + judge_definition: CreateJudgeRequest diff --git a/src/mistralai/client/models/judgeoutput.py b/src/mistralai/client/models/judgeoutput.py new file mode 100644 index 00000000..2224a797 --- /dev/null +++ b/src/mistralai/client/models/judgeoutput.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8c8099403e62 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Union +from typing_extensions import TypeAliasType, TypedDict + + +AnswerTypedDict = TypeAliasType("AnswerTypedDict", Union[str, float]) + + +Answer = TypeAliasType("Answer", Union[str, float]) + + +class JudgeOutputTypedDict(TypedDict): + analysis: str + answer: AnswerTypedDict + + +class JudgeOutput(BaseModel): + analysis: str + + answer: Answer diff --git a/src/mistralai/client/models/judgeoutputtype.py b/src/mistralai/client/models/judgeoutputtype.py new file mode 100644 index 00000000..4fa99498 --- /dev/null +++ b/src/mistralai/client/models/judgeoutputtype.py @@ -0,0 +1,11 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3f07e1eb25f9 + +from __future__ import annotations +from typing import Literal + + +JudgeOutputType = Literal[ + "REGRESSION", + "CLASSIFICATION", +] diff --git a/src/mistralai/client/models/judgeregressionoutput.py b/src/mistralai/client/models/judgeregressionoutput.py new file mode 100644 index 00000000..63394d8b --- /dev/null +++ b/src/mistralai/client/models/judgeregressionoutput.py @@ -0,0 +1,56 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c61d451066dc + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JudgeRegressionOutputTypedDict(TypedDict): + min_description: str + max_description: str + type: Literal["REGRESSION"] + min: NotRequired[float] + max: NotRequired[float] + + +class JudgeRegressionOutput(BaseModel): + min_description: str + + max_description: str + + type: Annotated[ + Annotated[Literal["REGRESSION"], AfterValidator(validate_const("REGRESSION"))], + pydantic.Field(alias="type"), + ] = "REGRESSION" + + min: Optional[float] = 0 + + max: Optional[float] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["min", "max"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + JudgeRegressionOutput.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/libraries_share_create_v1op.py b/src/mistralai/client/models/libraries_share_create_v1op.py index 00ea7482..9751045c 100644 --- a/src/mistralai/client/models/libraries_share_create_v1op.py +++ b/src/mistralai/client/models/libraries_share_create_v1op.py @@ -2,7 +2,7 @@ # @generated-id: feaacfd46dd3 from __future__ import annotations -from .sharingin import SharingIn, SharingInTypedDict +from .sharingrequest import SharingRequest, SharingRequestTypedDict from mistralai.client.types import BaseModel from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata from typing_extensions import Annotated, TypedDict @@ -10,7 +10,7 @@ class LibrariesShareCreateV1RequestTypedDict(TypedDict): library_id: str - sharing_in: SharingInTypedDict + sharing_request: SharingRequestTypedDict class LibrariesShareCreateV1Request(BaseModel): @@ -18,6 +18,7 @@ class LibrariesShareCreateV1Request(BaseModel): str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] - sharing_in: Annotated[ - SharingIn, FieldMetadata(request=RequestMetadata(media_type="application/json")) + sharing_request: Annotated[ + SharingRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), ] diff --git a/src/mistralai/client/models/list_models_v1_models_getop.py b/src/mistralai/client/models/list_models_v1_models_getop.py new file mode 100644 index 00000000..70b0ab82 --- /dev/null +++ b/src/mistralai/client/models/list_models_v1_models_getop.py @@ -0,0 +1,56 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1843a7aa68e5 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ListModelsV1ModelsGetRequestTypedDict(TypedDict): + provider: NotRequired[Nullable[str]] + model: NotRequired[Nullable[str]] + + +class ListModelsV1ModelsGetRequest(BaseModel): + provider: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + model: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["provider", "model"]) + nullable_fields = set(["provider", "model"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/listcampaignselectedeventsresponse.py b/src/mistralai/client/models/listcampaignselectedeventsresponse.py new file mode 100644 index 00000000..a6133ecc --- /dev/null +++ b/src/mistralai/client/models/listcampaignselectedeventsresponse.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8e28cb9aff1a + +from __future__ import annotations +from .paginatedresultchatcompletioneventpreview import ( + PaginatedResultChatCompletionEventPreview, + PaginatedResultChatCompletionEventPreviewTypedDict, +) +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class ListCampaignSelectedEventsResponseTypedDict(TypedDict): + completion_events: PaginatedResultChatCompletionEventPreviewTypedDict + + +class ListCampaignSelectedEventsResponse(BaseModel): + completion_events: PaginatedResultChatCompletionEventPreview diff --git a/src/mistralai/client/models/listcampaignsresponse.py b/src/mistralai/client/models/listcampaignsresponse.py new file mode 100644 index 00000000..741b1b21 --- /dev/null +++ b/src/mistralai/client/models/listcampaignsresponse.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 307695cde5c2 + +from __future__ import annotations +from .paginatedresultcampaignpreview import ( + PaginatedResultCampaignPreview, + PaginatedResultCampaignPreviewTypedDict, +) +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class ListCampaignsResponseTypedDict(TypedDict): + campaigns: PaginatedResultCampaignPreviewTypedDict + + +class ListCampaignsResponse(BaseModel): + campaigns: PaginatedResultCampaignPreview diff --git a/src/mistralai/client/models/listchatcompletionfieldsresponse.py b/src/mistralai/client/models/listchatcompletionfieldsresponse.py new file mode 100644 index 00000000..d260463a --- /dev/null +++ b/src/mistralai/client/models/listchatcompletionfieldsresponse.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2f8837a6a5c3 + +from __future__ import annotations +from .basefielddefinition import BaseFieldDefinition, BaseFieldDefinitionTypedDict +from .fieldgroup import FieldGroup, FieldGroupTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ListChatCompletionFieldsResponseTypedDict(TypedDict): + field_definitions: List[BaseFieldDefinitionTypedDict] + field_groups: List[FieldGroupTypedDict] + + +class ListChatCompletionFieldsResponse(BaseModel): + field_definitions: List[BaseFieldDefinition] + + field_groups: List[FieldGroup] diff --git a/src/mistralai/client/models/listdatasetimporttasksresponse.py b/src/mistralai/client/models/listdatasetimporttasksresponse.py new file mode 100644 index 00000000..15bea396 --- /dev/null +++ b/src/mistralai/client/models/listdatasetimporttasksresponse.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f635c1a3d02b + +from __future__ import annotations +from .paginatedresultdatasetimporttask import ( + PaginatedResultDatasetImportTask, + PaginatedResultDatasetImportTaskTypedDict, +) +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class ListDatasetImportTasksResponseTypedDict(TypedDict): + tasks: PaginatedResultDatasetImportTaskTypedDict + + +class ListDatasetImportTasksResponse(BaseModel): + tasks: PaginatedResultDatasetImportTask diff --git a/src/mistralai/client/models/listdatasetrecordsresponse.py b/src/mistralai/client/models/listdatasetrecordsresponse.py new file mode 100644 index 00000000..2341577a --- /dev/null +++ b/src/mistralai/client/models/listdatasetrecordsresponse.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 13b97e8095c4 + +from __future__ import annotations +from .paginatedresultdatasetrecord import ( + PaginatedResultDatasetRecord, + PaginatedResultDatasetRecordTypedDict, +) +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class ListDatasetRecordsResponseTypedDict(TypedDict): + records: PaginatedResultDatasetRecordTypedDict + + +class ListDatasetRecordsResponse(BaseModel): + records: PaginatedResultDatasetRecord diff --git a/src/mistralai/client/models/listdatasetsresponse.py b/src/mistralai/client/models/listdatasetsresponse.py new file mode 100644 index 00000000..a35e9a73 --- /dev/null +++ b/src/mistralai/client/models/listdatasetsresponse.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e18de4849423 + +from __future__ import annotations +from .paginatedresultdatasetpreview import ( + PaginatedResultDatasetPreview, + PaginatedResultDatasetPreviewTypedDict, +) +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class ListDatasetsResponseTypedDict(TypedDict): + datasets: PaginatedResultDatasetPreviewTypedDict + + +class ListDatasetsResponse(BaseModel): + datasets: PaginatedResultDatasetPreview diff --git a/src/mistralai/client/models/listjudgesresponse.py b/src/mistralai/client/models/listjudgesresponse.py new file mode 100644 index 00000000..0284cb99 --- /dev/null +++ b/src/mistralai/client/models/listjudgesresponse.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: db389a8abc34 + +from __future__ import annotations +from .paginatedresultjudgepreview import ( + PaginatedResultJudgePreview, + PaginatedResultJudgePreviewTypedDict, +) +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class ListJudgesResponseTypedDict(TypedDict): + judges: PaginatedResultJudgePreviewTypedDict + + +class ListJudgesResponse(BaseModel): + judges: PaginatedResultJudgePreview diff --git a/src/mistralai/client/models/listsharingout.py b/src/mistralai/client/models/listsharingout.py deleted file mode 100644 index 443ad0d6..00000000 --- a/src/mistralai/client/models/listsharingout.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: ee708a7ccdad - -from __future__ import annotations -from .sharingout import SharingOut, SharingOutTypedDict -from mistralai.client.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class ListSharingOutTypedDict(TypedDict): - data: List[SharingOutTypedDict] - - -class ListSharingOut(BaseModel): - data: List[SharingOut] diff --git a/src/mistralai/client/models/listsharingresponse.py b/src/mistralai/client/models/listsharingresponse.py new file mode 100644 index 00000000..f3e6dc87 --- /dev/null +++ b/src/mistralai/client/models/listsharingresponse.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 487c6addf089 + +from __future__ import annotations +from .sharing import Sharing, SharingTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ListSharingResponseTypedDict(TypedDict): + data: List[SharingTypedDict] + + +class ListSharingResponse(BaseModel): + data: List[Sharing] diff --git a/src/mistralai/client/models/mcpservericon.py b/src/mistralai/client/models/mcpservericon.py new file mode 100644 index 00000000..6068c1c4 --- /dev/null +++ b/src/mistralai/client/models/mcpservericon.py @@ -0,0 +1,82 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a5b508a322d7 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, List +from typing_extensions import Annotated, NotRequired, TypedDict + + +class MCPServerIconTypedDict(TypedDict): + r"""An icon for display in user interfaces.""" + + src: str + mime_type: NotRequired[Nullable[str]] + sizes: NotRequired[Nullable[List[str]]] + + +class MCPServerIcon(BaseModel): + r"""An icon for display in user interfaces.""" + + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + src: str + + mime_type: Annotated[OptionalNullable[str], pydantic.Field(alias="mimeType")] = ( + UNSET + ) + + sizes: OptionalNullable[List[str]] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["mimeType", "sizes"]) + nullable_fields = set(["mimeType", "sizes"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m + + +try: + MCPServerIcon.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/messageinputcontentchunks.py b/src/mistralai/client/models/messageinputcontentchunks.py index 1e04ce24..63cf14e7 100644 --- a/src/mistralai/client/models/messageinputcontentchunks.py +++ b/src/mistralai/client/models/messageinputcontentchunks.py @@ -2,13 +2,10 @@ # @generated-id: 01025c12866a from __future__ import annotations -from .conversationthinkchunk import ( - ConversationThinkChunk, - ConversationThinkChunkTypedDict, -) from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict from typing import Union from typing_extensions import TypeAliasType @@ -20,7 +17,7 @@ TextChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict, - ConversationThinkChunkTypedDict, + ThinkChunkTypedDict, ToolFileChunkTypedDict, ], ) @@ -28,11 +25,5 @@ MessageInputContentChunks = TypeAliasType( "MessageInputContentChunks", - Union[ - TextChunk, - ImageURLChunk, - DocumentURLChunk, - ConversationThinkChunk, - ToolFileChunk, - ], + Union[TextChunk, ImageURLChunk, DocumentURLChunk, ThinkChunk, ToolFileChunk], ) diff --git a/src/mistralai/client/models/messageoutputcontentchunks.py b/src/mistralai/client/models/messageoutputcontentchunks.py index bf455d17..def7a4d2 100644 --- a/src/mistralai/client/models/messageoutputcontentchunks.py +++ b/src/mistralai/client/models/messageoutputcontentchunks.py @@ -2,13 +2,10 @@ # @generated-id: 2ed248515035 from __future__ import annotations -from .conversationthinkchunk import ( - ConversationThinkChunk, - ConversationThinkChunkTypedDict, -) from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict from typing import Union @@ -21,7 +18,7 @@ TextChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict, - ConversationThinkChunkTypedDict, + ThinkChunkTypedDict, ToolFileChunkTypedDict, ToolReferenceChunkTypedDict, ], @@ -34,7 +31,7 @@ TextChunk, ImageURLChunk, DocumentURLChunk, - ConversationThinkChunk, + ThinkChunk, ToolFileChunk, ToolReferenceChunk, ], diff --git a/src/mistralai/client/models/messageresponse.py b/src/mistralai/client/models/messageresponse.py new file mode 100644 index 00000000..c8fbdff7 --- /dev/null +++ b/src/mistralai/client/models/messageresponse.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6b388bc155dd + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class MessageResponseTypedDict(TypedDict): + message: str + + +class MessageResponse(BaseModel): + message: str diff --git a/src/mistralai/client/models/modelconversation.py b/src/mistralai/client/models/modelconversation.py index bb33d2e0..74e113bf 100644 --- a/src/mistralai/client/models/modelconversation.py +++ b/src/mistralai/client/models/modelconversation.py @@ -6,6 +6,7 @@ from .completionargs import CompletionArgs, CompletionArgsTypedDict from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict from .functiontool import FunctionTool, FunctionToolTypedDict +from .guardrailconfig import GuardrailConfig, GuardrailConfigTypedDict from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict from .websearchtool import WebSearchTool, WebSearchToolTypedDict @@ -93,6 +94,7 @@ class ModelConversationTypedDict(TypedDict): r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" + guardrails: NotRequired[Nullable[List[GuardrailConfigTypedDict]]] name: NotRequired[Nullable[str]] r"""Name given to the conversation.""" description: NotRequired[Nullable[str]] @@ -120,6 +122,8 @@ class ModelConversation(BaseModel): completion_args: Optional[CompletionArgs] = None r"""White-listed arguments from the completion API""" + guardrails: OptionalNullable[List[GuardrailConfig]] = UNSET + name: OptionalNullable[str] = UNSET r"""Name given to the conversation.""" @@ -144,13 +148,16 @@ def serialize_model(self, handler): "instructions", "tools", "completion_args", + "guardrails", "name", "description", "metadata", "object", ] ) - nullable_fields = set(["instructions", "name", "description", "metadata"]) + nullable_fields = set( + ["instructions", "guardrails", "name", "description", "metadata"] + ) serialized = handler(self) m = {} diff --git a/src/mistralai/client/models/moderationllmv1action.py b/src/mistralai/client/models/moderationllmv1action.py new file mode 100644 index 00000000..ae3a6ee9 --- /dev/null +++ b/src/mistralai/client/models/moderationllmv1action.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c95110c21e79 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ModerationLlmv1Action = Union[ + Literal[ + "none", + "block", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/moderationllmv1categorythresholds.py b/src/mistralai/client/models/moderationllmv1categorythresholds.py new file mode 100644 index 00000000..0451ba3a --- /dev/null +++ b/src/mistralai/client/models/moderationllmv1categorythresholds.py @@ -0,0 +1,94 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0c51d6766440 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class ModerationLlmv1CategoryThresholdsTypedDict(TypedDict): + sexual: NotRequired[Nullable[float]] + hate_and_discrimination: NotRequired[Nullable[float]] + violence_and_threats: NotRequired[Nullable[float]] + dangerous_and_criminal_content: NotRequired[Nullable[float]] + selfharm: NotRequired[Nullable[float]] + health: NotRequired[Nullable[float]] + financial: NotRequired[Nullable[float]] + law: NotRequired[Nullable[float]] + pii: NotRequired[Nullable[float]] + + +class ModerationLlmv1CategoryThresholds(BaseModel): + sexual: OptionalNullable[float] = UNSET + + hate_and_discrimination: OptionalNullable[float] = UNSET + + violence_and_threats: OptionalNullable[float] = UNSET + + dangerous_and_criminal_content: OptionalNullable[float] = UNSET + + selfharm: OptionalNullable[float] = UNSET + + health: OptionalNullable[float] = UNSET + + financial: OptionalNullable[float] = UNSET + + law: OptionalNullable[float] = UNSET + + pii: OptionalNullable[float] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "sexual", + "hate_and_discrimination", + "violence_and_threats", + "dangerous_and_criminal_content", + "selfharm", + "health", + "financial", + "law", + "pii", + ] + ) + nullable_fields = set( + [ + "sexual", + "hate_and_discrimination", + "violence_and_threats", + "dangerous_and_criminal_content", + "selfharm", + "health", + "financial", + "law", + "pii", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/moderationllmv1config.py b/src/mistralai/client/models/moderationllmv1config.py new file mode 100644 index 00000000..b34dbbd3 --- /dev/null +++ b/src/mistralai/client/models/moderationllmv1config.py @@ -0,0 +1,78 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 483378b56394 + +from __future__ import annotations +from .moderationllmv1action import ModerationLlmv1Action +from .moderationllmv1categorythresholds import ( + ModerationLlmv1CategoryThresholds, + ModerationLlmv1CategoryThresholdsTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ModerationLlmv1ConfigTypedDict(TypedDict): + model_name: NotRequired[str] + r"""Override model name. Should be omitted in general.""" + custom_category_thresholds: NotRequired[ + Nullable[ModerationLlmv1CategoryThresholdsTypedDict] + ] + r"""Override default thresholds for specific categories.""" + ignore_other_categories: NotRequired[bool] + r"""If true, only evaluate categories in custom_category_thresholds; others are ignored.""" + action: NotRequired[ModerationLlmv1Action] + + +class ModerationLlmv1Config(BaseModel): + model_name: Optional[str] = "mistral-moderation-2411" + r"""Override model name. Should be omitted in general.""" + + custom_category_thresholds: OptionalNullable[ModerationLlmv1CategoryThresholds] = ( + UNSET + ) + r"""Override default thresholds for specific categories.""" + + ignore_other_categories: Optional[bool] = False + r"""If true, only evaluate categories in custom_category_thresholds; others are ignored.""" + + action: Optional[ModerationLlmv1Action] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "model_name", + "custom_category_thresholds", + "ignore_other_categories", + "action", + ] + ) + nullable_fields = set(["custom_category_thresholds"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/observabilityerrorcode.py b/src/mistralai/client/models/observabilityerrorcode.py new file mode 100644 index 00000000..99360d41 --- /dev/null +++ b/src/mistralai/client/models/observabilityerrorcode.py @@ -0,0 +1,56 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ae572b470a30 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ObservabilityErrorCode = Union[ + Literal[ + "UNKNOWN_ERROR", + "VALIDATION_ERROR", + "AUTH_FORBIDDEN", + "AUTH_FORBIDDEN_NOT_WORKSPACE_ADMIN", + "AUTH_FORBIDDEN_WORKSPACE_NOT_FOUND", + "AUTH_FORBIDDEN_ROLE_NOT_FOUND", + "AUTH_FORBIDDEN_ORG_NOT_WHITELISTED", + "AUTH_UNAUTHORIZED", + "FEATURE_NOT_SUPPORTED", + "FIELDS_BAD_REQUEST", + "FIELDS_NOT_FOUND", + "SEARCH_NOT_FOUND", + "SEARCH_BAD_REQUEST", + "SEARCH_SERVICE_UNAVAILABLE", + "DATABASE_ERROR", + "DATABASE_TIMEOUT", + "DATABASE_UNAVAILABLE", + "DATABASE_QUERY_ERROR", + "SEARCH_FILTER_TO_SQL_CONVERSION_ERROR", + "JUDGE_CONVERSATION_FORMAT_ERROR", + "JUDGE_MISTRAL_API_ERROR", + "JUDGE_MISTRAL_API_TIMEOUT", + "JUDGE_NAME_ALREADY_EXISTS", + "JUDGE_NOT_FOUND", + "JUDGE_ALREADY_HAS_NEW_VERSION", + "JUDGE_USED_IN_CAMPAIGN_CANNOT_BE_UPDATED", + "JUDGE_DID_NOT_CHANGE", + "CAMPAIGN_NOT_FOUND", + "CAMPAIGN_NO_MATCHING_EVENTS", + "DATASET_NOT_FOUND", + "DATASET_TASK_NOT_FOUND", + "DATASET_RECORD_NOT_FOUND", + "DATASET_RECORD_FORMAT_ERROR", + "AGENT_NOT_FOUND", + "AGENT_MISTRAL_API_ERROR", + "EVALUATION_NOT_FOUND", + "EVALUATION_CURRENTLY_RUNNING", + "EVALUATION_RECORD_NOT_FOUND", + "EVALUATION_RUN_NOT_FOUND", + "EVALUATION_RUN_TRANSITION_IS_INVALID", + "EVALUATION_RUN_TRANSITION_IS_RUNNING_ALREADY", + "EVALUATION_RUN_TRANSITION_ERROR", + "TEMPLATE_SYNTAX_ERROR", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/observabilityerrordetail.py b/src/mistralai/client/models/observabilityerrordetail.py new file mode 100644 index 00000000..a6236b66 --- /dev/null +++ b/src/mistralai/client/models/observabilityerrordetail.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cb6e8a484a38 + +from __future__ import annotations +from .observabilityerrorcode import ObservabilityErrorCode +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing_extensions import TypedDict + + +class ObservabilityErrorDetailTypedDict(TypedDict): + message: str + error_code: Nullable[ObservabilityErrorCode] + + +class ObservabilityErrorDetail(BaseModel): + message: str + + error_code: Nullable[ObservabilityErrorCode] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + m[k] = val + + return m diff --git a/src/mistralai/client/models/outputcontentchunks.py b/src/mistralai/client/models/outputcontentchunks.py index fab7907b..1a115fe8 100644 --- a/src/mistralai/client/models/outputcontentchunks.py +++ b/src/mistralai/client/models/outputcontentchunks.py @@ -2,13 +2,10 @@ # @generated-id: 9ad9741f4975 from __future__ import annotations -from .conversationthinkchunk import ( - ConversationThinkChunk, - ConversationThinkChunkTypedDict, -) from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict from typing import Union @@ -21,7 +18,7 @@ TextChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict, - ConversationThinkChunkTypedDict, + ThinkChunkTypedDict, ToolFileChunkTypedDict, ToolReferenceChunkTypedDict, ], @@ -34,7 +31,7 @@ TextChunk, ImageURLChunk, DocumentURLChunk, - ConversationThinkChunk, + ThinkChunk, ToolFileChunk, ToolReferenceChunk, ], diff --git a/src/mistralai/client/models/paginatedconnectors.py b/src/mistralai/client/models/paginatedconnectors.py new file mode 100644 index 00000000..291da111 --- /dev/null +++ b/src/mistralai/client/models/paginatedconnectors.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 17e125b1022c + +from __future__ import annotations +from .connector import Connector, ConnectorTypedDict +from .paginationresponse import PaginationResponse, PaginationResponseTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class PaginatedConnectorsTypedDict(TypedDict): + items: List[ConnectorTypedDict] + pagination: PaginationResponseTypedDict + + +class PaginatedConnectors(BaseModel): + items: List[Connector] + + pagination: PaginationResponse diff --git a/src/mistralai/client/models/paginatedresultcampaignpreview.py b/src/mistralai/client/models/paginatedresultcampaignpreview.py new file mode 100644 index 00000000..d82ee741 --- /dev/null +++ b/src/mistralai/client/models/paginatedresultcampaignpreview.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6653cba0f982 + +from __future__ import annotations +from .campaign import Campaign, CampaignTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class PaginatedResultCampaignPreviewTypedDict(TypedDict): + count: int + results: NotRequired[List[CampaignTypedDict]] + next: NotRequired[Nullable[str]] + previous: NotRequired[Nullable[str]] + + +class PaginatedResultCampaignPreview(BaseModel): + count: int + + results: Optional[List[Campaign]] = None + + next: OptionalNullable[str] = UNSET + + previous: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["results", "next", "previous"]) + nullable_fields = set(["next", "previous"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/paginatedresultchatcompletioneventpreview.py b/src/mistralai/client/models/paginatedresultchatcompletioneventpreview.py new file mode 100644 index 00000000..97bbfea7 --- /dev/null +++ b/src/mistralai/client/models/paginatedresultchatcompletioneventpreview.py @@ -0,0 +1,60 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8c640682ccf9 + +from __future__ import annotations +from .chatcompletioneventpreview import ( + ChatCompletionEventPreview, + ChatCompletionEventPreviewTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class PaginatedResultChatCompletionEventPreviewTypedDict(TypedDict): + count: int + results: NotRequired[List[ChatCompletionEventPreviewTypedDict]] + next: NotRequired[Nullable[str]] + previous: NotRequired[Nullable[str]] + + +class PaginatedResultChatCompletionEventPreview(BaseModel): + count: int + + results: Optional[List[ChatCompletionEventPreview]] = None + + next: OptionalNullable[str] = UNSET + + previous: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["results", "next", "previous"]) + nullable_fields = set(["next", "previous"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/paginatedresultdatasetimporttask.py b/src/mistralai/client/models/paginatedresultdatasetimporttask.py new file mode 100644 index 00000000..9d6dcd05 --- /dev/null +++ b/src/mistralai/client/models/paginatedresultdatasetimporttask.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8fd0a8bad4e7 + +from __future__ import annotations +from .datasetimporttask import DatasetImportTask, DatasetImportTaskTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class PaginatedResultDatasetImportTaskTypedDict(TypedDict): + count: int + results: NotRequired[List[DatasetImportTaskTypedDict]] + next: NotRequired[Nullable[str]] + previous: NotRequired[Nullable[str]] + + +class PaginatedResultDatasetImportTask(BaseModel): + count: int + + results: Optional[List[DatasetImportTask]] = None + + next: OptionalNullable[str] = UNSET + + previous: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["results", "next", "previous"]) + nullable_fields = set(["next", "previous"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/paginatedresultdatasetpreview.py b/src/mistralai/client/models/paginatedresultdatasetpreview.py new file mode 100644 index 00000000..51566cc5 --- /dev/null +++ b/src/mistralai/client/models/paginatedresultdatasetpreview.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bf20489474ce + +from __future__ import annotations +from .datasetpreview import DatasetPreview, DatasetPreviewTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class PaginatedResultDatasetPreviewTypedDict(TypedDict): + count: int + results: NotRequired[List[DatasetPreviewTypedDict]] + next: NotRequired[Nullable[str]] + previous: NotRequired[Nullable[str]] + + +class PaginatedResultDatasetPreview(BaseModel): + count: int + + results: Optional[List[DatasetPreview]] = None + + next: OptionalNullable[str] = UNSET + + previous: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["results", "next", "previous"]) + nullable_fields = set(["next", "previous"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/paginatedresultdatasetrecord.py b/src/mistralai/client/models/paginatedresultdatasetrecord.py new file mode 100644 index 00000000..1d6e15c1 --- /dev/null +++ b/src/mistralai/client/models/paginatedresultdatasetrecord.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2556a91b48c4 + +from __future__ import annotations +from .datasetrecord import DatasetRecord, DatasetRecordTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class PaginatedResultDatasetRecordTypedDict(TypedDict): + count: int + results: NotRequired[List[DatasetRecordTypedDict]] + next: NotRequired[Nullable[str]] + previous: NotRequired[Nullable[str]] + + +class PaginatedResultDatasetRecord(BaseModel): + count: int + + results: Optional[List[DatasetRecord]] = None + + next: OptionalNullable[str] = UNSET + + previous: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["results", "next", "previous"]) + nullable_fields = set(["next", "previous"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/paginatedresultjudgepreview.py b/src/mistralai/client/models/paginatedresultjudgepreview.py new file mode 100644 index 00000000..57dbc1e5 --- /dev/null +++ b/src/mistralai/client/models/paginatedresultjudgepreview.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4d5aab2705a0 + +from __future__ import annotations +from .judge import Judge, JudgeTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class PaginatedResultJudgePreviewTypedDict(TypedDict): + count: int + results: NotRequired[List[JudgeTypedDict]] + next: NotRequired[Nullable[str]] + previous: NotRequired[Nullable[str]] + + +class PaginatedResultJudgePreview(BaseModel): + count: int + + results: Optional[List[Judge]] = None + + next: OptionalNullable[str] = UNSET + + previous: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["results", "next", "previous"]) + nullable_fields = set(["next", "previous"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/paginationresponse.py b/src/mistralai/client/models/paginationresponse.py new file mode 100644 index 00000000..f05ee380 --- /dev/null +++ b/src/mistralai/client/models/paginationresponse.py @@ -0,0 +1,49 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d64678967bf0 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class PaginationResponseTypedDict(TypedDict): + page_size: int + next_cursor: NotRequired[Nullable[str]] + + +class PaginationResponse(BaseModel): + page_size: int + + next_cursor: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["next_cursor"]) + nullable_fields = set(["next_cursor"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/post_dataset_records_from_campaign_v1_observability_datasets_dataset_id_imports_from_campaign_postop.py b/src/mistralai/client/models/post_dataset_records_from_campaign_v1_observability_datasets_dataset_id_imports_from_campaign_postop.py new file mode 100644 index 00000000..3d572517 --- /dev/null +++ b/src/mistralai/client/models/post_dataset_records_from_campaign_v1_observability_datasets_dataset_id_imports_from_campaign_postop.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3e8e390b7fa1 + +from __future__ import annotations +from .importdatasetfromcampaignrequest import ( + ImportDatasetFromCampaignRequest, + ImportDatasetFromCampaignRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class PostDatasetRecordsFromCampaignV1ObservabilityDatasetsDatasetIDImportsFromCampaignPostRequestTypedDict( + TypedDict +): + dataset_id: str + import_dataset_from_campaign_request: ImportDatasetFromCampaignRequestTypedDict + + +class PostDatasetRecordsFromCampaignV1ObservabilityDatasetsDatasetIDImportsFromCampaignPostRequest( + BaseModel +): + dataset_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + import_dataset_from_campaign_request: Annotated[ + ImportDatasetFromCampaignRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/post_dataset_records_from_dataset_v1_observability_datasets_dataset_id_imports_from_dataset_postop.py b/src/mistralai/client/models/post_dataset_records_from_dataset_v1_observability_datasets_dataset_id_imports_from_dataset_postop.py new file mode 100644 index 00000000..aac48bd6 --- /dev/null +++ b/src/mistralai/client/models/post_dataset_records_from_dataset_v1_observability_datasets_dataset_id_imports_from_dataset_postop.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d396e018c804 + +from __future__ import annotations +from .importdatasetfromdatasetrequest import ( + ImportDatasetFromDatasetRequest, + ImportDatasetFromDatasetRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class PostDatasetRecordsFromDatasetV1ObservabilityDatasetsDatasetIDImportsFromDatasetPostRequestTypedDict( + TypedDict +): + dataset_id: str + import_dataset_from_dataset_request: ImportDatasetFromDatasetRequestTypedDict + + +class PostDatasetRecordsFromDatasetV1ObservabilityDatasetsDatasetIDImportsFromDatasetPostRequest( + BaseModel +): + dataset_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + import_dataset_from_dataset_request: Annotated[ + ImportDatasetFromDatasetRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/post_dataset_records_from_explorer_v1_observability_datasets_dataset_id_imports_from_explorer_postop.py b/src/mistralai/client/models/post_dataset_records_from_explorer_v1_observability_datasets_dataset_id_imports_from_explorer_postop.py new file mode 100644 index 00000000..6524c4d9 --- /dev/null +++ b/src/mistralai/client/models/post_dataset_records_from_explorer_v1_observability_datasets_dataset_id_imports_from_explorer_postop.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 046c79ed47c7 + +from __future__ import annotations +from .importdatasetfromexplorerrequest import ( + ImportDatasetFromExplorerRequest, + ImportDatasetFromExplorerRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class PostDatasetRecordsFromExplorerV1ObservabilityDatasetsDatasetIDImportsFromExplorerPostRequestTypedDict( + TypedDict +): + dataset_id: str + import_dataset_from_explorer_request: ImportDatasetFromExplorerRequestTypedDict + + +class PostDatasetRecordsFromExplorerV1ObservabilityDatasetsDatasetIDImportsFromExplorerPostRequest( + BaseModel +): + dataset_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + import_dataset_from_explorer_request: Annotated[ + ImportDatasetFromExplorerRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/post_dataset_records_from_file_v1_observability_datasets_dataset_id_imports_from_file_postop.py b/src/mistralai/client/models/post_dataset_records_from_file_v1_observability_datasets_dataset_id_imports_from_file_postop.py new file mode 100644 index 00000000..17f90d48 --- /dev/null +++ b/src/mistralai/client/models/post_dataset_records_from_file_v1_observability_datasets_dataset_id_imports_from_file_postop.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6e93e5363630 + +from __future__ import annotations +from .importdatasetfromfilerequest import ( + ImportDatasetFromFileRequest, + ImportDatasetFromFileRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class PostDatasetRecordsFromFileV1ObservabilityDatasetsDatasetIDImportsFromFilePostRequestTypedDict( + TypedDict +): + dataset_id: str + import_dataset_from_file_request: ImportDatasetFromFileRequestTypedDict + + +class PostDatasetRecordsFromFileV1ObservabilityDatasetsDatasetIDImportsFromFilePostRequest( + BaseModel +): + dataset_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + import_dataset_from_file_request: Annotated[ + ImportDatasetFromFileRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/post_dataset_records_from_playground_v1_observability_datasets_dataset_id_imports_from_playground_postop.py b/src/mistralai/client/models/post_dataset_records_from_playground_v1_observability_datasets_dataset_id_imports_from_playground_postop.py new file mode 100644 index 00000000..7423375a --- /dev/null +++ b/src/mistralai/client/models/post_dataset_records_from_playground_v1_observability_datasets_dataset_id_imports_from_playground_postop.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 09334d96c26d + +from __future__ import annotations +from .importdatasetfromplaygroundrequest import ( + ImportDatasetFromPlaygroundRequest, + ImportDatasetFromPlaygroundRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class PostDatasetRecordsFromPlaygroundV1ObservabilityDatasetsDatasetIDImportsFromPlaygroundPostRequestTypedDict( + TypedDict +): + dataset_id: str + import_dataset_from_playground_request: ImportDatasetFromPlaygroundRequestTypedDict + + +class PostDatasetRecordsFromPlaygroundV1ObservabilityDatasetsDatasetIDImportsFromPlaygroundPostRequest( + BaseModel +): + dataset_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + import_dataset_from_playground_request: Annotated[ + ImportDatasetFromPlaygroundRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/processingstatusout.py b/src/mistralai/client/models/processingstatus.py similarity index 77% rename from src/mistralai/client/models/processingstatusout.py rename to src/mistralai/client/models/processingstatus.py index ed2a4f22..7e93308f 100644 --- a/src/mistralai/client/models/processingstatusout.py +++ b/src/mistralai/client/models/processingstatus.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 3df842c4140f +# @generated-id: 28146eaecfcf from __future__ import annotations from .processstatus import ProcessStatus @@ -7,13 +7,13 @@ from typing_extensions import TypedDict -class ProcessingStatusOutTypedDict(TypedDict): +class ProcessingStatusTypedDict(TypedDict): document_id: str process_status: ProcessStatus processing_status: str -class ProcessingStatusOut(BaseModel): +class ProcessingStatus(BaseModel): document_id: str process_status: ProcessStatus diff --git a/src/mistralai/client/models/resourcelink.py b/src/mistralai/client/models/resourcelink.py new file mode 100644 index 00000000..8ed5613c --- /dev/null +++ b/src/mistralai/client/models/resourcelink.py @@ -0,0 +1,140 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4251cc3c7797 + +from __future__ import annotations +from .annotations import Annotations, AnnotationsTypedDict +from .mcpservericon import MCPServerIcon, MCPServerIconTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, List, Literal +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ResourceLinkTypedDict(TypedDict): + r"""A resource that the server is capable of reading, included in a prompt or tool call result. + + Note: resource links returned by tools are not guaranteed to appear in the results of `resources/list` requests. + """ + + name: str + uri: str + title: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + mime_type: NotRequired[Nullable[str]] + size: NotRequired[Nullable[int]] + icons: NotRequired[Nullable[List[MCPServerIconTypedDict]]] + annotations: NotRequired[Nullable[AnnotationsTypedDict]] + meta: NotRequired[Nullable[Dict[str, Any]]] + type: Literal["resource_link"] + + +class ResourceLink(BaseModel): + r"""A resource that the server is capable of reading, included in a prompt or tool call result. + + Note: resource links returned by tools are not guaranteed to appear in the results of `resources/list` requests. + """ + + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + name: str + + uri: str + + title: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + mime_type: Annotated[OptionalNullable[str], pydantic.Field(alias="mimeType")] = ( + UNSET + ) + + size: OptionalNullable[int] = UNSET + + icons: OptionalNullable[List[MCPServerIcon]] = UNSET + + annotations: OptionalNullable[Annotations] = UNSET + + meta: Annotated[OptionalNullable[Dict[str, Any]], pydantic.Field(alias="_meta")] = ( + UNSET + ) + + type: Annotated[ + Annotated[ + Literal["resource_link"], AfterValidator(validate_const("resource_link")) + ], + pydantic.Field(alias="type"), + ] = "resource_link" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "title", + "description", + "mimeType", + "size", + "icons", + "annotations", + "_meta", + ] + ) + nullable_fields = set( + [ + "title", + "description", + "mimeType", + "size", + "icons", + "annotations", + "_meta", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m + + +try: + ResourceLink.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/resourcevisibility.py b/src/mistralai/client/models/resourcevisibility.py new file mode 100644 index 00000000..56f91f15 --- /dev/null +++ b/src/mistralai/client/models/resourcevisibility.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b5819dd5f981 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ResourceVisibility = Union[ + Literal[ + "shared_global", + "shared_org", + "shared_workspace", + "private", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/searchchatcompletioneventidsrequest.py b/src/mistralai/client/models/searchchatcompletioneventidsrequest.py new file mode 100644 index 00000000..f8476081 --- /dev/null +++ b/src/mistralai/client/models/searchchatcompletioneventidsrequest.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cabc8ef82d67 + +from __future__ import annotations +from .filterpayload import FilterPayload, FilterPayloadTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List +from typing_extensions import NotRequired, TypedDict + + +class SearchChatCompletionEventIdsRequestTypedDict(TypedDict): + search_params: FilterPayloadTypedDict + extra_fields: NotRequired[Nullable[List[str]]] + + +class SearchChatCompletionEventIdsRequest(BaseModel): + search_params: FilterPayload + + extra_fields: OptionalNullable[List[str]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["extra_fields"]) + nullable_fields = set(["extra_fields"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/searchchatcompletioneventidsresponse.py b/src/mistralai/client/models/searchchatcompletioneventidsresponse.py new file mode 100644 index 00000000..f4751159 --- /dev/null +++ b/src/mistralai/client/models/searchchatcompletioneventidsresponse.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a5f0bad3ba10 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class SearchChatCompletionEventIdsResponseTypedDict(TypedDict): + completion_event_ids: List[str] + + +class SearchChatCompletionEventIdsResponse(BaseModel): + completion_event_ids: List[str] diff --git a/src/mistralai/client/models/searchchatcompletioneventsrequest.py b/src/mistralai/client/models/searchchatcompletioneventsrequest.py new file mode 100644 index 00000000..95524c5b --- /dev/null +++ b/src/mistralai/client/models/searchchatcompletioneventsrequest.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a437333780bc + +from __future__ import annotations +from .filterpayload import FilterPayload, FilterPayloadTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List +from typing_extensions import NotRequired, TypedDict + + +class SearchChatCompletionEventsRequestTypedDict(TypedDict): + search_params: FilterPayloadTypedDict + extra_fields: NotRequired[Nullable[List[str]]] + + +class SearchChatCompletionEventsRequest(BaseModel): + search_params: FilterPayload + + extra_fields: OptionalNullable[List[str]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["extra_fields"]) + nullable_fields = set(["extra_fields"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/searchchatcompletioneventsresponse.py b/src/mistralai/client/models/searchchatcompletioneventsresponse.py new file mode 100644 index 00000000..8b9b10b5 --- /dev/null +++ b/src/mistralai/client/models/searchchatcompletioneventsresponse.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f96acbcd45f7 + +from __future__ import annotations +from .feedresultchatcompletioneventpreview import ( + FeedResultChatCompletionEventPreview, + FeedResultChatCompletionEventPreviewTypedDict, +) +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class SearchChatCompletionEventsResponseTypedDict(TypedDict): + completion_events: FeedResultChatCompletionEventPreviewTypedDict + + +class SearchChatCompletionEventsResponse(BaseModel): + completion_events: FeedResultChatCompletionEventPreview diff --git a/src/mistralai/client/models/sharingout.py b/src/mistralai/client/models/sharing.py similarity index 93% rename from src/mistralai/client/models/sharingout.py rename to src/mistralai/client/models/sharing.py index ab3679a4..d6142e83 100644 --- a/src/mistralai/client/models/sharingout.py +++ b/src/mistralai/client/models/sharing.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 0b8804effb5c +# @generated-id: 324f5ac27249 from __future__ import annotations from mistralai.client.types import ( @@ -13,7 +13,7 @@ from typing_extensions import NotRequired, TypedDict -class SharingOutTypedDict(TypedDict): +class SharingTypedDict(TypedDict): library_id: str org_id: str role: str @@ -22,7 +22,7 @@ class SharingOutTypedDict(TypedDict): user_id: NotRequired[Nullable[str]] -class SharingOut(BaseModel): +class Sharing(BaseModel): library_id: str org_id: str diff --git a/src/mistralai/client/models/sharingin.py b/src/mistralai/client/models/sharingrequest.py similarity index 94% rename from src/mistralai/client/models/sharingin.py rename to src/mistralai/client/models/sharingrequest.py index 7c1a52b0..76424b8e 100644 --- a/src/mistralai/client/models/sharingin.py +++ b/src/mistralai/client/models/sharingrequest.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: e953dda09c02 +# @generated-id: 2439b732dfae from __future__ import annotations from .entitytype import EntityType @@ -15,7 +15,7 @@ from typing_extensions import NotRequired, TypedDict -class SharingInTypedDict(TypedDict): +class SharingRequestTypedDict(TypedDict): level: ShareEnum share_with_uuid: str r"""The id of the entity (user, workspace or organization) to share with""" @@ -24,7 +24,7 @@ class SharingInTypedDict(TypedDict): org_id: NotRequired[Nullable[str]] -class SharingIn(BaseModel): +class SharingRequest(BaseModel): level: ShareEnum share_with_uuid: str diff --git a/src/mistralai/client/models/textcontent.py b/src/mistralai/client/models/textcontent.py new file mode 100644 index 00000000..7468b046 --- /dev/null +++ b/src/mistralai/client/models/textcontent.py @@ -0,0 +1,91 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 60805b9f7050 + +from __future__ import annotations +from .annotations import Annotations, AnnotationsTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal +from typing_extensions import Annotated, NotRequired, TypedDict + + +class TextContentTypedDict(TypedDict): + r"""Text content for a message.""" + + text: str + type: Literal["text"] + annotations: NotRequired[Nullable[AnnotationsTypedDict]] + meta: NotRequired[Nullable[Dict[str, Any]]] + + +class TextContent(BaseModel): + r"""Text content for a message.""" + + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + text: str + + type: Annotated[ + Annotated[Literal["text"], AfterValidator(validate_const("text"))], + pydantic.Field(alias="type"), + ] = "text" + + annotations: OptionalNullable[Annotations] = UNSET + + meta: Annotated[OptionalNullable[Dict[str, Any]], pydantic.Field(alias="_meta")] = ( + UNSET + ) + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["annotations", "_meta"]) + nullable_fields = set(["annotations", "_meta"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m + + +try: + TextContent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/textresourcecontents.py b/src/mistralai/client/models/textresourcecontents.py new file mode 100644 index 00000000..c497bb4d --- /dev/null +++ b/src/mistralai/client/models/textresourcecontents.py @@ -0,0 +1,87 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 58fe427f427f + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict +from typing_extensions import Annotated, NotRequired, TypedDict + + +class TextResourceContentsTypedDict(TypedDict): + r"""Text contents of a resource.""" + + uri: str + text: str + mime_type: NotRequired[Nullable[str]] + meta: NotRequired[Nullable[Dict[str, Any]]] + + +class TextResourceContents(BaseModel): + r"""Text contents of a resource.""" + + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + uri: str + + text: str + + mime_type: Annotated[OptionalNullable[str], pydantic.Field(alias="mimeType")] = ( + UNSET + ) + + meta: Annotated[OptionalNullable[Dict[str, Any]], pydantic.Field(alias="_meta")] = ( + UNSET + ) + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["mimeType", "_meta"]) + nullable_fields = set(["mimeType", "_meta"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + for k, v in serialized.items(): + m[k] = v + + return m + + +try: + TextResourceContents.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/thinkchunk.py b/src/mistralai/client/models/thinkchunk.py index 5995e601..03573f8e 100644 --- a/src/mistralai/client/models/thinkchunk.py +++ b/src/mistralai/client/models/thinkchunk.py @@ -4,6 +4,7 @@ from __future__ import annotations from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict +from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic @@ -13,28 +14,31 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -ThinkChunkThinkingTypedDict = TypeAliasType( - "ThinkChunkThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] +ThinkingTypedDict = TypeAliasType( + "ThinkingTypedDict", + Union[TextChunkTypedDict, ReferenceChunkTypedDict, ToolReferenceChunkTypedDict], ) -ThinkChunkThinking = TypeAliasType( - "ThinkChunkThinking", Union[ReferenceChunk, TextChunk] +Thinking = TypeAliasType( + "Thinking", Union[TextChunk, ReferenceChunk, ToolReferenceChunk] ) class ThinkChunkTypedDict(TypedDict): - thinking: List[ThinkChunkThinkingTypedDict] + thinking: List[ThinkingTypedDict] type: Literal["thinking"] closed: NotRequired[bool] r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" class ThinkChunk(BaseModel): - thinking: List[ThinkChunkThinking] + thinking: List[Thinking] type: Annotated[ - Annotated[Literal["thinking"], AfterValidator(validate_const("thinking"))], + Annotated[ + Optional[Literal["thinking"]], AfterValidator(validate_const("thinking")) + ], pydantic.Field(alias="type"), ] = "thinking" @@ -43,7 +47,7 @@ class ThinkChunk(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = set(["closed"]) + optional_fields = set(["type", "closed"]) serialized = handler(self) m = {} diff --git a/src/mistralai/client/models/update_dataset_record_payload_v1_observability_dataset_records_dataset_record_id_payload_putop.py b/src/mistralai/client/models/update_dataset_record_payload_v1_observability_dataset_records_dataset_record_id_payload_putop.py new file mode 100644 index 00000000..d5a2bd9d --- /dev/null +++ b/src/mistralai/client/models/update_dataset_record_payload_v1_observability_dataset_records_dataset_record_id_payload_putop.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fa5d55a9d6cf + +from __future__ import annotations +from .updatedatasetrecordpayloadrequest import ( + UpdateDatasetRecordPayloadRequest, + UpdateDatasetRecordPayloadRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class UpdateDatasetRecordPayloadV1ObservabilityDatasetRecordsDatasetRecordIDPayloadPutRequestTypedDict( + TypedDict +): + dataset_record_id: str + update_dataset_record_payload_request: UpdateDatasetRecordPayloadRequestTypedDict + + +class UpdateDatasetRecordPayloadV1ObservabilityDatasetRecordsDatasetRecordIDPayloadPutRequest( + BaseModel +): + dataset_record_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + update_dataset_record_payload_request: Annotated[ + UpdateDatasetRecordPayloadRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/update_dataset_record_properties_v1_observability_dataset_records_dataset_record_id_properties_putop.py b/src/mistralai/client/models/update_dataset_record_properties_v1_observability_dataset_records_dataset_record_id_properties_putop.py new file mode 100644 index 00000000..b828aa80 --- /dev/null +++ b/src/mistralai/client/models/update_dataset_record_properties_v1_observability_dataset_records_dataset_record_id_properties_putop.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b0af26e00bfa + +from __future__ import annotations +from .updatedatasetrecordpropertiesrequest import ( + UpdateDatasetRecordPropertiesRequest, + UpdateDatasetRecordPropertiesRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class UpdateDatasetRecordPropertiesV1ObservabilityDatasetRecordsDatasetRecordIDPropertiesPutRequestTypedDict( + TypedDict +): + dataset_record_id: str + update_dataset_record_properties_request: ( + UpdateDatasetRecordPropertiesRequestTypedDict + ) + + +class UpdateDatasetRecordPropertiesV1ObservabilityDatasetRecordsDatasetRecordIDPropertiesPutRequest( + BaseModel +): + dataset_record_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + update_dataset_record_properties_request: Annotated[ + UpdateDatasetRecordPropertiesRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/update_dataset_v1_observability_datasets_dataset_id_patchop.py b/src/mistralai/client/models/update_dataset_v1_observability_datasets_dataset_id_patchop.py new file mode 100644 index 00000000..5a440564 --- /dev/null +++ b/src/mistralai/client/models/update_dataset_v1_observability_datasets_dataset_id_patchop.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7bb459765cba + +from __future__ import annotations +from .updatedatasetrequest import UpdateDatasetRequest, UpdateDatasetRequestTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class UpdateDatasetV1ObservabilityDatasetsDatasetIDPatchRequestTypedDict(TypedDict): + dataset_id: str + update_dataset_request: UpdateDatasetRequestTypedDict + + +class UpdateDatasetV1ObservabilityDatasetsDatasetIDPatchRequest(BaseModel): + dataset_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + update_dataset_request: Annotated[ + UpdateDatasetRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/update_judge_v1_observability_judges_judge_id_putop.py b/src/mistralai/client/models/update_judge_v1_observability_judges_judge_id_putop.py new file mode 100644 index 00000000..41c3a365 --- /dev/null +++ b/src/mistralai/client/models/update_judge_v1_observability_judges_judge_id_putop.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a1bace5342e6 + +from __future__ import annotations +from .updatejudgerequest import UpdateJudgeRequest, UpdateJudgeRequestTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class UpdateJudgeV1ObservabilityJudgesJudgeIDPutRequestTypedDict(TypedDict): + judge_id: str + update_judge_request: UpdateJudgeRequestTypedDict + + +class UpdateJudgeV1ObservabilityJudgesJudgeIDPutRequest(BaseModel): + judge_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + update_judge_request: Annotated[ + UpdateJudgeRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/updateagentrequest.py b/src/mistralai/client/models/updateagentrequest.py index b751ff74..dbe3499e 100644 --- a/src/mistralai/client/models/updateagentrequest.py +++ b/src/mistralai/client/models/updateagentrequest.py @@ -6,6 +6,7 @@ from .completionargs import CompletionArgs, CompletionArgsTypedDict from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict from .functiontool import FunctionTool, FunctionToolTypedDict +from .guardrailconfig import GuardrailConfig, GuardrailConfigTypedDict from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict from .websearchtool import WebSearchTool, WebSearchToolTypedDict @@ -54,6 +55,7 @@ class UpdateAgentRequestTypedDict(TypedDict): r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" + guardrails: NotRequired[Nullable[List[GuardrailConfigTypedDict]]] model: NotRequired[Nullable[str]] name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] @@ -73,6 +75,8 @@ class UpdateAgentRequest(BaseModel): completion_args: Optional[CompletionArgs] = None r"""White-listed arguments from the completion API""" + guardrails: OptionalNullable[List[GuardrailConfig]] = UNSET + model: OptionalNullable[str] = UNSET name: OptionalNullable[str] = UNSET @@ -94,6 +98,7 @@ def serialize_model(self, handler): "instructions", "tools", "completion_args", + "guardrails", "model", "name", "description", @@ -106,6 +111,7 @@ def serialize_model(self, handler): nullable_fields = set( [ "instructions", + "guardrails", "model", "name", "description", diff --git a/src/mistralai/client/models/updateconnectorrequest.py b/src/mistralai/client/models/updateconnectorrequest.py new file mode 100644 index 00000000..2fe42ba8 --- /dev/null +++ b/src/mistralai/client/models/updateconnectorrequest.py @@ -0,0 +1,114 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a761cd154109 + +from __future__ import annotations +from .authdata import AuthData, AuthDataTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict +from typing_extensions import NotRequired, TypedDict + + +class UpdateConnectorRequestTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + r"""The name of the connector.""" + description: NotRequired[Nullable[str]] + r"""The description of the connector.""" + icon_url: NotRequired[Nullable[str]] + r"""The optional url of the icon you want to associate to the connector.""" + system_prompt: NotRequired[Nullable[str]] + r"""Optional system prompt for the connector.""" + connection_config: NotRequired[Nullable[Dict[str, Any]]] + r"""Optional new connection config.""" + connection_secrets: NotRequired[Nullable[Dict[str, Any]]] + r"""Optional new connection secrets""" + server: NotRequired[Nullable[str]] + r"""New server url for your mcp connector.""" + headers: NotRequired[Nullable[Dict[str, Any]]] + r"""New headers for your mcp connector.""" + auth_data: NotRequired[Nullable[AuthDataTypedDict]] + r"""New authentication data for your mcp connector.""" + + +class UpdateConnectorRequest(BaseModel): + name: OptionalNullable[str] = UNSET + r"""The name of the connector.""" + + description: OptionalNullable[str] = UNSET + r"""The description of the connector.""" + + icon_url: OptionalNullable[str] = UNSET + r"""The optional url of the icon you want to associate to the connector.""" + + system_prompt: OptionalNullable[str] = UNSET + r"""Optional system prompt for the connector.""" + + connection_config: OptionalNullable[Dict[str, Any]] = UNSET + r"""Optional new connection config.""" + + connection_secrets: OptionalNullable[Dict[str, Any]] = UNSET + r"""Optional new connection secrets""" + + server: OptionalNullable[str] = UNSET + r"""New server url for your mcp connector.""" + + headers: OptionalNullable[Dict[str, Any]] = UNSET + r"""New headers for your mcp connector.""" + + auth_data: OptionalNullable[AuthData] = UNSET + r"""New authentication data for your mcp connector.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "name", + "description", + "icon_url", + "system_prompt", + "connection_config", + "connection_secrets", + "server", + "headers", + "auth_data", + ] + ) + nullable_fields = set( + [ + "name", + "description", + "icon_url", + "system_prompt", + "connection_config", + "connection_secrets", + "server", + "headers", + "auth_data", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/updatedatasetrecordpayloadrequest.py b/src/mistralai/client/models/updatedatasetrecordpayloadrequest.py new file mode 100644 index 00000000..155ea78d --- /dev/null +++ b/src/mistralai/client/models/updatedatasetrecordpayloadrequest.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bd45f357a538 + +from __future__ import annotations +from .conversationpayload import ConversationPayload, ConversationPayloadTypedDict +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class UpdateDatasetRecordPayloadRequestTypedDict(TypedDict): + payload: ConversationPayloadTypedDict + + +class UpdateDatasetRecordPayloadRequest(BaseModel): + payload: ConversationPayload diff --git a/src/mistralai/client/models/updatedatasetrecordpropertiesrequest.py b/src/mistralai/client/models/updatedatasetrecordpropertiesrequest.py new file mode 100644 index 00000000..ccba4a5c --- /dev/null +++ b/src/mistralai/client/models/updatedatasetrecordpropertiesrequest.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c457ead40a69 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Any, Dict +from typing_extensions import TypedDict + + +class UpdateDatasetRecordPropertiesRequestTypedDict(TypedDict): + properties: Dict[str, Any] + + +class UpdateDatasetRecordPropertiesRequest(BaseModel): + properties: Dict[str, Any] diff --git a/src/mistralai/client/models/updatedatasetrequest.py b/src/mistralai/client/models/updatedatasetrequest.py new file mode 100644 index 00000000..02974089 --- /dev/null +++ b/src/mistralai/client/models/updatedatasetrequest.py @@ -0,0 +1,49 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bbb067caa23f + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class UpdateDatasetRequestTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + + +class UpdateDatasetRequest(BaseModel): + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["name", "description"]) + nullable_fields = set(["name", "description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/updatejudgerequest.py b/src/mistralai/client/models/updatejudgerequest.py new file mode 100644 index 00000000..04c86ab6 --- /dev/null +++ b/src/mistralai/client/models/updatejudgerequest.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f6ad6fb901a0 + +from __future__ import annotations +from .judgeclassificationoutput import ( + JudgeClassificationOutput, + JudgeClassificationOutputTypedDict, +) +from .judgeregressionoutput import JudgeRegressionOutput, JudgeRegressionOutputTypedDict +from mistralai.client.types import BaseModel +from pydantic import Field +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +UpdateJudgeRequestOutputTypedDict = TypeAliasType( + "UpdateJudgeRequestOutputTypedDict", + Union[JudgeClassificationOutputTypedDict, JudgeRegressionOutputTypedDict], +) + + +UpdateJudgeRequestOutput = Annotated[ + Union[JudgeClassificationOutput, JudgeRegressionOutput], Field(discriminator="type") +] + + +class UpdateJudgeRequestTypedDict(TypedDict): + name: str + description: str + model_name: str + output: UpdateJudgeRequestOutputTypedDict + instructions: str + tools: List[str] + + +class UpdateJudgeRequest(BaseModel): + name: str + + description: str + + model_name: str + + output: UpdateJudgeRequestOutput + + instructions: str + + tools: List[str] diff --git a/src/mistralai/client/models/validationerror.py b/src/mistralai/client/models/validationerror.py index 385714c8..d856e24c 100644 --- a/src/mistralai/client/models/validationerror.py +++ b/src/mistralai/client/models/validationerror.py @@ -2,9 +2,10 @@ # @generated-id: 15df3c7368ab from __future__ import annotations -from mistralai.client.types import BaseModel -from typing import List, Union -from typing_extensions import TypeAliasType, TypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Any, List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict LocTypedDict = TypeAliasType("LocTypedDict", Union[str, int]) @@ -13,10 +14,20 @@ Loc = TypeAliasType("Loc", Union[str, int]) +class ContextTypedDict(TypedDict): + pass + + +class Context(BaseModel): + pass + + class ValidationErrorTypedDict(TypedDict): loc: List[LocTypedDict] msg: str type: str + input: NotRequired[Any] + ctx: NotRequired[ContextTypedDict] class ValidationError(BaseModel): @@ -25,3 +36,23 @@ class ValidationError(BaseModel): msg: str type: str + + input: Optional[Any] = None + + ctx: Optional[Context] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["input", "ctx"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models_.py b/src/mistralai/client/models_.py index a287c413..7bb6dc1d 100644 --- a/src/mistralai/client/models_.py +++ b/src/mistralai/client/models_.py @@ -16,6 +16,8 @@ class Models(BaseSDK): def list( self, *, + provider: OptionalNullable[str] = UNSET, + model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -25,6 +27,8 @@ def list( List all models available to the user. + :param provider: + :param model: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -39,12 +43,18 @@ def list( base_url = server_url else: base_url = self._get_url(base_url, url_variables) + + request = models.ListModelsV1ModelsGetRequest( + provider=provider, + model=model, + ) + req = self._build_request( method="GET", path="/v1/models", base_url=base_url, url_variables=url_variables, - request=None, + request=request, request_body_required=False, request_has_path_params=False, request_has_query_params=True, @@ -75,12 +85,18 @@ def list( ), ), request=req, - error_status_codes=["4XX", "5XX"], + error_status_codes=["422", "4XX", "5XX"], retry_config=retry_config, ) + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return unmarshal_json_response(models.ModelList, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise errors.SDKError("API error occurred", http_res, http_res_text) @@ -93,6 +109,8 @@ def list( async def list_async( self, *, + provider: OptionalNullable[str] = UNSET, + model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -102,6 +120,8 @@ async def list_async( List all models available to the user. + :param provider: + :param model: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -116,12 +136,18 @@ async def list_async( base_url = server_url else: base_url = self._get_url(base_url, url_variables) + + request = models.ListModelsV1ModelsGetRequest( + provider=provider, + model=model, + ) + req = self._build_request_async( method="GET", path="/v1/models", base_url=base_url, url_variables=url_variables, - request=None, + request=request, request_body_required=False, request_has_path_params=False, request_has_query_params=True, @@ -152,12 +178,18 @@ async def list_async( ), ), request=req, - error_status_codes=["4XX", "5XX"], + error_status_codes=["422", "4XX", "5XX"], retry_config=retry_config, ) + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return unmarshal_json_response(models.ModelList, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + errors.HTTPValidationErrorData, http_res + ) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise errors.SDKError("API error occurred", http_res, http_res_text) @@ -359,7 +391,7 @@ def delete( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DeleteModelOut: + ) -> models.DeleteModelResponse: r"""Delete Model Delete a fine-tuned model. @@ -426,7 +458,7 @@ def delete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DeleteModelOut, http_res) + return unmarshal_json_response(models.DeleteModelResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( errors.HTTPValidationErrorData, http_res @@ -449,7 +481,7 @@ async def delete_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DeleteModelOut: + ) -> models.DeleteModelResponse: r"""Delete Model Delete a fine-tuned model. @@ -516,7 +548,7 @@ async def delete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DeleteModelOut, http_res) + return unmarshal_json_response(models.DeleteModelResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( errors.HTTPValidationErrorData, http_res diff --git a/src/mistralai/client/observability.py b/src/mistralai/client/observability.py new file mode 100644 index 00000000..4057909e --- /dev/null +++ b/src/mistralai/client/observability.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 453a1d06d130 + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client.campaigns import Campaigns +from mistralai.client.chat_completion_events import ChatCompletionEvents +from mistralai.client.datasets import Datasets +from mistralai.client.judges import Judges +from typing import Optional + + +class Observability(BaseSDK): + chat_completion_events: ChatCompletionEvents + judges: Judges + campaigns: Campaigns + datasets: Datasets + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.chat_completion_events = ChatCompletionEvents( + self.sdk_configuration, parent_ref=self.parent_ref + ) + self.judges = Judges(self.sdk_configuration, parent_ref=self.parent_ref) + self.campaigns = Campaigns(self.sdk_configuration, parent_ref=self.parent_ref) + self.datasets = Datasets(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/client/records.py b/src/mistralai/client/records.py new file mode 100644 index 00000000..ceb8de4f --- /dev/null +++ b/src/mistralai/client/records.py @@ -0,0 +1,1178 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 10f90c990bd8 + +from .basesdk import BaseSDK +from mistralai.client import errors, models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class Records(BaseSDK): + def fetch( + self, + *, + dataset_record_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DatasetRecord: + r"""Get the content of a given conversation from a dataset + + :param dataset_record_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDGetRequest( + dataset_record_id=dataset_record_id, + ) + + req = self._build_request( + method="GET", + path="/v1/observability/dataset-records/{dataset_record_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_dataset_record_v1_observability_dataset_records__dataset_record_id__get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DatasetRecord, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def fetch_async( + self, + *, + dataset_record_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DatasetRecord: + r"""Get the content of a given conversation from a dataset + + :param dataset_record_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.GetDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDGetRequest( + dataset_record_id=dataset_record_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/observability/dataset-records/{dataset_record_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="get_dataset_record_v1_observability_dataset_records__dataset_record_id__get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DatasetRecord, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + dataset_record_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a record from a dataset + + :param dataset_record_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.DeleteDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDDeleteRequest( + dataset_record_id=dataset_record_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/observability/dataset-records/{dataset_record_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="delete_dataset_record_v1_observability_dataset_records__dataset_record_id__delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + dataset_record_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a record from a dataset + + :param dataset_record_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.DeleteDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDDeleteRequest( + dataset_record_id=dataset_record_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/observability/dataset-records/{dataset_record_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="delete_dataset_record_v1_observability_dataset_records__dataset_record_id__delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def bulk_delete( + self, + *, + dataset_record_ids: List[str], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete multiple records from datasets + + :param dataset_record_ids: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.DeleteDatasetRecordsRequest( + dataset_record_ids=dataset_record_ids, + ) + + req = self._build_request( + method="POST", + path="/v1/observability/dataset-records/bulk-delete", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.DeleteDatasetRecordsRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="delete_dataset_records_v1_observability_dataset_records_bulk_delete_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def bulk_delete_async( + self, + *, + dataset_record_ids: List[str], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete multiple records from datasets + + :param dataset_record_ids: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.DeleteDatasetRecordsRequest( + dataset_record_ids=dataset_record_ids, + ) + + req = self._build_request_async( + method="POST", + path="/v1/observability/dataset-records/bulk-delete", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.DeleteDatasetRecordsRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="delete_dataset_records_v1_observability_dataset_records_bulk_delete_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def judge( + self, + *, + dataset_record_id: str, + judge_definition: Union[ + models.CreateJudgeRequest, models.CreateJudgeRequestTypedDict + ], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JudgeOutput: + r"""Run Judge on a dataset record based on the given options + + :param dataset_record_id: + :param judge_definition: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JudgeDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDLiveJudgingPostRequest( + dataset_record_id=dataset_record_id, + judge_dataset_record_request=models.JudgeDatasetRecordRequest( + judge_definition=utils.get_pydantic_model( + judge_definition, models.CreateJudgeRequest + ), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/observability/dataset-records/{dataset_record_id}/live-judging", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.judge_dataset_record_request, + False, + False, + "json", + models.JudgeDatasetRecordRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="judge_dataset_record_v1_observability_dataset_records__dataset_record_id__live_judging_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.JudgeOutput, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def judge_async( + self, + *, + dataset_record_id: str, + judge_definition: Union[ + models.CreateJudgeRequest, models.CreateJudgeRequestTypedDict + ], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JudgeOutput: + r"""Run Judge on a dataset record based on the given options + + :param dataset_record_id: + :param judge_definition: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JudgeDatasetRecordV1ObservabilityDatasetRecordsDatasetRecordIDLiveJudgingPostRequest( + dataset_record_id=dataset_record_id, + judge_dataset_record_request=models.JudgeDatasetRecordRequest( + judge_definition=utils.get_pydantic_model( + judge_definition, models.CreateJudgeRequest + ), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/observability/dataset-records/{dataset_record_id}/live-judging", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.judge_dataset_record_request, + False, + False, + "json", + models.JudgeDatasetRecordRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="judge_dataset_record_v1_observability_dataset_records__dataset_record_id__live_judging_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.JudgeOutput, http_res) + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def update_payload( + self, + *, + dataset_record_id: str, + payload: Union[models.ConversationPayload, models.ConversationPayloadTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Update a dataset record conversation payload + + :param dataset_record_id: + :param payload: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.UpdateDatasetRecordPayloadV1ObservabilityDatasetRecordsDatasetRecordIDPayloadPutRequest( + dataset_record_id=dataset_record_id, + update_dataset_record_payload_request=models.UpdateDatasetRecordPayloadRequest( + payload=utils.get_pydantic_model(payload, models.ConversationPayload), + ), + ) + + req = self._build_request( + method="PUT", + path="/v1/observability/dataset-records/{dataset_record_id}/payload", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_dataset_record_payload_request, + False, + False, + "json", + models.UpdateDatasetRecordPayloadRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="update_dataset_record_payload_v1_observability_dataset_records__dataset_record_id__payload_put", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def update_payload_async( + self, + *, + dataset_record_id: str, + payload: Union[models.ConversationPayload, models.ConversationPayloadTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Update a dataset record conversation payload + + :param dataset_record_id: + :param payload: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.UpdateDatasetRecordPayloadV1ObservabilityDatasetRecordsDatasetRecordIDPayloadPutRequest( + dataset_record_id=dataset_record_id, + update_dataset_record_payload_request=models.UpdateDatasetRecordPayloadRequest( + payload=utils.get_pydantic_model(payload, models.ConversationPayload), + ), + ) + + req = self._build_request_async( + method="PUT", + path="/v1/observability/dataset-records/{dataset_record_id}/payload", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_dataset_record_payload_request, + False, + False, + "json", + models.UpdateDatasetRecordPayloadRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="update_dataset_record_payload_v1_observability_dataset_records__dataset_record_id__payload_put", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + def update_properties( + self, + *, + dataset_record_id: str, + properties: Dict[str, Any], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Update conversation properties + + :param dataset_record_id: + :param properties: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.UpdateDatasetRecordPropertiesV1ObservabilityDatasetRecordsDatasetRecordIDPropertiesPutRequest( + dataset_record_id=dataset_record_id, + update_dataset_record_properties_request=models.UpdateDatasetRecordPropertiesRequest( + properties=properties, + ), + ) + + req = self._build_request( + method="PUT", + path="/v1/observability/dataset-records/{dataset_record_id}/properties", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_dataset_record_properties_request, + False, + False, + "json", + models.UpdateDatasetRecordPropertiesRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="update_dataset_record_properties_v1_observability_dataset_records__dataset_record_id__properties_put", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) + + async def update_properties_async( + self, + *, + dataset_record_id: str, + properties: Dict[str, Any], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Update conversation properties + + :param dataset_record_id: + :param properties: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.UpdateDatasetRecordPropertiesV1ObservabilityDatasetRecordsDatasetRecordIDPropertiesPutRequest( + dataset_record_id=dataset_record_id, + update_dataset_record_properties_request=models.UpdateDatasetRecordPropertiesRequest( + properties=properties, + ), + ) + + req = self._build_request_async( + method="PUT", + path="/v1/observability/dataset-records/{dataset_record_id}/properties", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_dataset_record_properties_request, + False, + False, + "json", + models.UpdateDatasetRecordPropertiesRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="update_dataset_record_properties_v1_observability_dataset_records__dataset_record_id__properties_put", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["400", "404", "408", "409", "422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response( + http_res, ["400", "404", "408", "409", "422"], "application/json" + ): + response_data = unmarshal_json_response( + errors.ObservabilityErrorData, http_res + ) + raise errors.ObservabilityError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise errors.SDKError("API error occurred", http_res, http_res_text) + + raise errors.SDKError("Unexpected response received", http_res) From 4ff7e25c52a2b03565aabd69d6b2eafc43102245 Mon Sep 17 00:00:00 2001 From: GitHub Action Date: Mon, 9 Mar 2026 19:25:00 +0000 Subject: [PATCH 2/2] chore: align pyproject.toml and uv.lock to version 2.0.0 --- pyproject.toml | 2 +- uv.lock | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 94572c05..bb175abf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "2.0.0rc1" +version = "2.0.0" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/uv.lock b/uv.lock index d10bd5b1..461e1d5c 100644 --- a/uv.lock +++ b/uv.lock @@ -551,7 +551,7 @@ wheels = [ [[package]] name = "mistralai" -version = "2.0.0rc1" +version = "2.0.0" source = { editable = "." } dependencies = [ { name = "eval-type-backport" },