diff --git a/portkey_ai/__init__.py b/portkey_ai/__init__.py index f945a67..c876d78 100644 --- a/portkey_ai/__init__.py +++ b/portkey_ai/__init__.py @@ -34,14 +34,34 @@ AsyncMainFiles, Models, AsyncModels, - ThreadFiles, - AsyncThreadFiles, - AssistantFiles, - AsyncAssistantFiles, Runs, AsyncRuns, Steps, AsyncSteps, + Moderations, + AsyncModerations, + Audio, + Transcriptions, + Translations, + Speech, + AsyncAudio, + AsyncTranscriptions, + AsyncTranslations, + AsyncSpeech, + Batches, + AsyncBatches, + FineTuning, + Jobs, + Checkpoints, + AsyncFineTuning, + AsyncJobs, + AsyncCheckpoints, + VectorStores, + VectorFiles, + VectorFileBatches, + AsyncVectorStores, + AsyncVectorFiles, + AsyncVectorFileBatches, ) from portkey_ai.version import VERSION @@ -95,12 +115,32 @@ "AsyncMainFiles", "Models", "AsyncModels", - "ThreadFiles", - "AsyncThreadFiles", - "AssistantFiles", - "AsyncAssistantFiles", "Runs", "AsyncRuns", "Steps", "AsyncSteps", + "Moderations", + "AsyncModerations", + "Audio", + "Transcriptions", + "Translations", + "Speech", + "AsyncAudio", + "AsyncTranscriptions", + "AsyncTranslations", + "AsyncSpeech", + "Batches", + "AsyncBatches", + "FineTuning", + "Jobs", + "Checkpoints", + "AsyncFineTuning", + "AsyncJobs", + "AsyncCheckpoints", + "VectorStores", + "VectorFiles", + "VectorFileBatches", + "AsyncVectorStores", + "AsyncVectorFiles", + "AsyncVectorFileBatches", ] diff --git a/portkey_ai/api_resources/__init__.py b/portkey_ai/api_resources/__init__.py index f489f0b..bdc8c0c 100644 --- a/portkey_ai/api_resources/__init__.py +++ b/portkey_ai/api_resources/__init__.py @@ -23,14 +23,34 @@ AsyncMainFiles, Models, AsyncModels, - ThreadFiles, - AsyncThreadFiles, - AssistantFiles, - AsyncAssistantFiles, Runs, AsyncRuns, Steps, AsyncSteps, + Moderations, + AsyncModerations, + Audio, + Transcriptions, + Translations, + Speech, + AsyncAudio, + AsyncTranscriptions, + AsyncTranslations, + AsyncSpeech, + Batches, + AsyncBatches, + FineTuning, + Jobs, + Checkpoints, + AsyncFineTuning, + AsyncJobs, + AsyncCheckpoints, + VectorStores, + VectorFiles, + VectorFileBatches, + AsyncVectorStores, + AsyncVectorFiles, + AsyncVectorFileBatches, ) from .utils import ( Modes, @@ -89,12 +109,32 @@ "AsyncMainFiles", "Models", "AsyncModels", - "ThreadFiles", - "AsyncThreadFiles", - "AssistantFiles", - "AsyncAssistantFiles", "Runs", "AsyncRuns", "Steps", "AsyncSteps", + "Moderations", + "AsyncModerations", + "Audio", + "Transcriptions", + "Translations", + "Speech", + "AsyncAudio", + "AsyncTranscriptions", + "AsyncTranslations", + "AsyncSpeech", + "Batches", + "AsyncBatches", + "FineTuning", + "Jobs", + "Checkpoints", + "AsyncFineTuning", + "AsyncJobs", + "AsyncCheckpoints", + "VectorStores", + "VectorFiles", + "VectorFileBatches", + "AsyncVectorStores", + "AsyncVectorFiles", + "AsyncVectorFileBatches", ] diff --git a/portkey_ai/api_resources/apis/__init__.py b/portkey_ai/api_resources/apis/__init__.py index e064e3c..04ff6c8 100644 --- a/portkey_ai/api_resources/apis/__init__.py +++ b/portkey_ai/api_resources/apis/__init__.py @@ -6,21 +6,48 @@ from .post import Post, AsyncPost from .embeddings import Embeddings, AsyncEmbeddings from .images import Images, AsyncImages -from .assistants import Assistants, AssistantFiles, AsyncAssistants, AsyncAssistantFiles +from .assistants import Assistants, AsyncAssistants from .threads import ( Threads, Messages, - ThreadFiles, Runs, Steps, AsyncThreads, AsyncMessages, - AsyncThreadFiles, AsyncRuns, AsyncSteps, ) from .main_files import MainFiles, AsyncMainFiles from .models import Models, AsyncModels +from .moderations import Moderations, AsyncModerations +from .audio import ( + Audio, + Transcriptions, + Translations, + Speech, + AsyncAudio, + AsyncTranscriptions, + AsyncTranslations, + AsyncSpeech, +) +from .batches import Batches, AsyncBatches +from .fine_tuning import ( + FineTuning, + Jobs, + Checkpoints, + AsyncFineTuning, + AsyncJobs, + AsyncCheckpoints, +) +from .vector_stores import ( + VectorStores, + VectorFiles, + VectorFileBatches, + AsyncVectorStores, + AsyncVectorFiles, + AsyncVectorFileBatches, +) + __all__ = [ "Completion", @@ -46,10 +73,6 @@ "AsyncMainFiles", "Models", "AsyncModels", - "AssistantFiles", - "ThreadFiles", - "AsyncAssistantFiles", - "AsyncThreadFiles", "Threads", "AsyncThreads", "Messages", @@ -58,4 +81,28 @@ "AsyncRuns", "Steps", "AsyncSteps", + "Moderations", + "AsyncModerations", + "Audio", + "Transcriptions", + "Translations", + "Speech", + "AsyncAudio", + "AsyncTranscriptions", + "AsyncTranslations", + "AsyncSpeech", + "Batches", + "AsyncBatches", + "FineTuning", + "Jobs", + "Checkpoints", + "AsyncFineTuning", + "AsyncJobs", + "AsyncCheckpoints", + "VectorStores", + "VectorFiles", + "VectorFileBatches", + "AsyncVectorStores", + "AsyncVectorFiles", + "AsyncVectorFileBatches", ] diff --git a/portkey_ai/api_resources/apis/assistants.py b/portkey_ai/api_resources/apis/assistants.py index 40b2dc4..f417b49 100644 --- a/portkey_ai/api_resources/apis/assistants.py +++ b/portkey_ai/api_resources/apis/assistants.py @@ -5,9 +5,6 @@ Assistant, AssistantList, AssistantDeleted, - AssistantFile, - AssistantFileList, - AssistantFileDeleted, ) @@ -15,7 +12,6 @@ class Assistants(APIResource): def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - self.files = AssistantFiles(client) def create(self, **kwargs) -> Assistant: response = self.openai_client.with_raw_response.beta.assistants.create(**kwargs) @@ -59,53 +55,10 @@ def delete(self, assistant_id, **kwargs) -> AssistantDeleted: return data -class AssistantFiles(APIResource): - def __init__(self, client: Portkey) -> None: - super().__init__(client) - self.openai_client = client.openai_client - - def create(self, assistant_id, file_id, **kwargs) -> AssistantFile: - response = self.openai_client.with_raw_response.beta.assistants.files.create( - assistant_id=assistant_id, file_id=file_id, **kwargs - ) - data = AssistantFile(**json.loads(response.text)) - data._headers = response.headers - - return data - - def list(self, assistant_id, **kwargs) -> AssistantFileList: - response = self.openai_client.with_raw_response.beta.assistants.files.list( - assistant_id=assistant_id, **kwargs - ) - data = AssistantFileList(**json.loads(response.text)) - data._headers = response.headers - - return data - - def retrieve(self, assistant_id, file_id, **kwargs) -> AssistantFile: - response = self.openai_client.with_raw_response.beta.assistants.files.retrieve( - assistant_id=assistant_id, file_id=file_id, **kwargs - ) - data = AssistantFile(**json.loads(response.text)) - data._headers = response.headers - - return data - - def delete(self, assistant_id, file_id, **kwargs) -> AssistantFileDeleted: - response = self.openai_client.with_raw_response.beta.assistants.files.delete( - assistant_id=assistant_id, file_id=file_id, **kwargs - ) - data = AssistantFileDeleted(**json.loads(response.text)) - data._headers = response.headers - - return data - - class AsyncAssistants(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - self.files = AsyncAssistantFiles(client) async def create(self, **kwargs) -> Assistant: response = await self.openai_client.with_raw_response.beta.assistants.create( @@ -151,53 +104,3 @@ async def delete(self, assistant_id, **kwargs) -> AssistantDeleted: data._headers = response.headers return data - - -class AsyncAssistantFiles(AsyncAPIResource): - def __init__(self, client: AsyncPortkey) -> None: - super().__init__(client) - self.openai_client = client.openai_client - - async def create(self, assistant_id, file_id, **kwargs) -> AssistantFile: - response = ( - await self.openai_client.with_raw_response.beta.assistants.files.create( - assistant_id=assistant_id, file_id=file_id, **kwargs - ) - ) - data = AssistantFile(**json.loads(response.text)) - data._headers = response.headers - - return data - - async def list(self, assistant_id, **kwargs) -> AssistantFileList: - response = ( - await self.openai_client.with_raw_response.beta.assistants.files.list( - assistant_id=assistant_id, **kwargs - ) - ) - data = AssistantFileList(**json.loads(response.text)) - data._headers = response.headers - - return data - - async def retrieve(self, assistant_id, file_id, **kwargs) -> AssistantFile: - response = ( - await self.openai_client.with_raw_response.beta.assistants.files.retrieve( - assistant_id=assistant_id, file_id=file_id, **kwargs - ) - ) - data = AssistantFile(**json.loads(response.text)) - data._headers = response.headers - - return data - - async def delete(self, assistant_id, file_id, **kwargs) -> AssistantFileDeleted: - response = ( - await self.openai_client.with_raw_response.beta.assistants.files.delete( - assistant_id=assistant_id, file_id=file_id, **kwargs - ) - ) - data = AssistantFileDeleted(**json.loads(response.text)) - data._headers = response.headers - - return data diff --git a/portkey_ai/api_resources/apis/audio.py b/portkey_ai/api_resources/apis/audio.py new file mode 100644 index 0000000..e0367b7 --- /dev/null +++ b/portkey_ai/api_resources/apis/audio.py @@ -0,0 +1,212 @@ +import json +from typing import Any, List, Union +from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from openai._types import NotGiven, NOT_GIVEN, FileTypes +from portkey_ai.api_resources.client import AsyncPortkey, Portkey +import typing + +from portkey_ai.api_resources.types.audio_types import Transcription, Translation + + +class Audio(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.transcriptions = Transcriptions(client) + self.translations = Translations(client) + self.speech = Speech(client) + + +class Transcriptions(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + @typing.no_type_check + def create( + self, + *, + file: FileTypes, + model: str, + language: Union[str, NotGiven] = NOT_GIVEN, + prompt: Union[str, NotGiven] = NOT_GIVEN, + response_format: Union[str, NotGiven] = NOT_GIVEN, + temperature: Union[float, NotGiven] = NOT_GIVEN, + timestamp_granularities: Union[List[str], NotGiven] = NOT_GIVEN, + **kwargs + ) -> Transcription: + response = self.openai_client.with_raw_response.audio.transcriptions.create( + file=file, + model=model, + language=language, + prompt=prompt, + response_format=response_format, + temperature=temperature, + timestamp_granularities=timestamp_granularities, + **kwargs + ) + data = Transcription(**json.loads(response.text)) + data._headers = response.headers + + return data + + +class Translations(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def create( + self, + *, + file: FileTypes, + model: str, + prompt: Union[str, NotGiven] = NOT_GIVEN, + response_format: Union[str, NotGiven] = NOT_GIVEN, + temperature: Union[float, NotGiven] = NOT_GIVEN, + **kwargs + ) -> Translation: + response = self.openai_client.with_raw_response.audio.translations.create( + file=file, + model=model, + prompt=prompt, + response_format=response_format, + temperature=temperature, + **kwargs + ) + data = Translation(**json.loads(response.text)) + data._headers = response.headers + + return data + + +class Speech(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + @typing.no_type_check + def create( + self, + *, + input: str, + model: str, + voice: str, + response_format: Union[str, NotGiven] = NOT_GIVEN, + speed: Union[float, NotGiven] = NOT_GIVEN, + **kwargs + ) -> Any: + response = self.openai_client.audio.speech.create( + input=input, + model=model, + voice=voice, + response_format=response_format, + speed=speed, + **kwargs + ) + + return response + + +class AsyncAudio(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.transcriptions = AsyncTranscriptions(client) + self.translations = AsyncTranslations(client) + self.speech = AsyncSpeech(client) + + +class AsyncTranscriptions(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + @typing.no_type_check + async def create( + self, + *, + file: FileTypes, + model: str, + language: Union[str, NotGiven] = NOT_GIVEN, + prompt: Union[str, NotGiven] = NOT_GIVEN, + response_format: Union[str, NotGiven] = NOT_GIVEN, + temperature: Union[float, NotGiven] = NOT_GIVEN, + timestamp_granularities: Union[List[str], NotGiven] = NOT_GIVEN, + **kwargs + ) -> Transcription: + response = ( + await self.openai_client.with_raw_response.audio.transcriptions.create( + file=file, + model=model, + language=language, + prompt=prompt, + response_format=response_format, + temperature=temperature, + timestamp_granularities=timestamp_granularities, + **kwargs + ) + ) + data = Transcription(**json.loads(response.text)) + data._headers = response.headers + + return data + + +class AsyncTranslations(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def create( + self, + *, + file: FileTypes, + model: str, + prompt: Union[str, NotGiven] = NOT_GIVEN, + response_format: Union[str, NotGiven] = NOT_GIVEN, + temperature: Union[float, NotGiven] = NOT_GIVEN, + **kwargs + ) -> Translation: + response = await self.openai_client.with_raw_response.audio.translations.create( + file=file, + model=model, + prompt=prompt, + response_format=response_format, + temperature=temperature, + **kwargs + ) + data = Translation(**json.loads(response.text)) + data._headers = response.headers + + return data + + +class AsyncSpeech(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + @typing.no_type_check + async def create( + self, + *, + input: str, + model: str, + voice: str, + response_format: Union[str, NotGiven] = NOT_GIVEN, + speed: Union[float, NotGiven] = NOT_GIVEN, + **kwargs + ) -> Any: + response = await self.openai_client.audio.speech.create( + input=input, + model=model, + voice=voice, + response_format=response_format, + speed=speed, + **kwargs + ) + + data = response + + return data diff --git a/portkey_ai/api_resources/apis/batches.py b/portkey_ai/api_resources/apis/batches.py new file mode 100644 index 0000000..8fd789a --- /dev/null +++ b/portkey_ai/api_resources/apis/batches.py @@ -0,0 +1,130 @@ +import json +from typing import Dict, Optional, Union +import typing +from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from openai._types import NotGiven, NOT_GIVEN + +from portkey_ai.api_resources.types.batches_type import Batch, BatchList + + +class Batches(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + @typing.no_type_check + def create( + self, + *, + completion_window: str, + endpoint: str, + input_file_id: str, + metadata: Union[Optional[Dict[str, str]], NotGiven] = NOT_GIVEN, + **kwargs + ) -> Batch: + response = self.openai_client.with_raw_response.batches.create( + completion_window=completion_window, + endpoint=endpoint, + input_file_id=input_file_id, + metadata=metadata, + **kwargs + ) + data = Batch(**json.loads(response.text)) + data._headers = response.headers + + return data + + def retrieve(self, batch_id, **kwargs) -> Batch: + response = self.openai_client.with_raw_response.batches.retrieve( + batch_id=batch_id, **kwargs + ) + data = Batch(**json.loads(response.text)) + data._headers = response.headers + + return data + + def list( + self, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + **kwargs + ) -> BatchList: + response = self.openai_client.with_raw_response.batches.list( + after=after, limit=limit, **kwargs + ) + data = BatchList(**json.loads(response.text)) + data._headers = response.headers + + return data + + def cancel(self, batch_id: str, **kwargs) -> Batch: + response = self.openai_client.with_raw_response.batches.cancel( + batch_id=batch_id, **kwargs + ) + data = Batch(**json.loads(response.text)) + data._headers = response.headers + + return data + + +class AsyncBatches(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + @typing.no_type_check + async def create( + self, + *, + completion_window: str, + endpoint: str, + input_file_id: str, + metadata: Union[Optional[Dict[str, str]], NotGiven] = NOT_GIVEN, + **kwargs + ) -> Batch: + response = await self.openai_client.with_raw_response.batches.create( + completion_window=completion_window, + endpoint=endpoint, + input_file_id=input_file_id, + metadata=metadata, + **kwargs + ) + data = Batch(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def retrieve(self, batch_id, **kwargs) -> Batch: + response = await self.openai_client.with_raw_response.batches.retrieve( + batch_id=batch_id, **kwargs + ) + data = Batch(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def list( + self, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + **kwargs + ) -> BatchList: + response = await self.openai_client.with_raw_response.batches.list( + after=after, limit=limit, **kwargs + ) + data = BatchList(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def cancel(self, batch_id: str, **kwargs) -> Batch: + response = await self.openai_client.with_raw_response.batches.cancel( + batch_id=batch_id, **kwargs + ) + data = Batch(**json.loads(response.text)) + data._headers = response.headers + + return data diff --git a/portkey_ai/api_resources/apis/chat_complete.py b/portkey_ai/api_resources/apis/chat_complete.py index 2cae938..5364a4d 100644 --- a/portkey_ai/api_resources/apis/chat_complete.py +++ b/portkey_ai/api_resources/apis/chat_complete.py @@ -43,7 +43,7 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def stream_create( + def stream_create( # type: ignore[return] self, model, messages, stream, temperature, max_tokens, top_p, **kwargs ) -> Union[ChatCompletions, Iterator[ChatCompletionChunk]]: with self.openai_client.with_streaming_response.chat.completions.create( diff --git a/portkey_ai/api_resources/apis/complete.py b/portkey_ai/api_resources/apis/complete.py index 84686da..9f4150f 100644 --- a/portkey_ai/api_resources/apis/complete.py +++ b/portkey_ai/api_resources/apis/complete.py @@ -17,7 +17,7 @@ def __init__(self, client: Portkey) -> None: self.openai_client = client.openai_client self.client = client - def stream_create( + def stream_create( # type: ignore[return] self, model, prompt, stream, temperature, max_tokens, top_p, **kwargs ) -> Union[TextCompletion, Iterator[TextCompletionChunk]]: with self.openai_client.with_streaming_response.completions.create( diff --git a/portkey_ai/api_resources/apis/fine_tuning.py b/portkey_ai/api_resources/apis/fine_tuning.py new file mode 100644 index 0000000..d1be95c --- /dev/null +++ b/portkey_ai/api_resources/apis/fine_tuning.py @@ -0,0 +1,256 @@ +import json +from typing import Iterable, Optional, Union +from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from openai._types import NotGiven, NOT_GIVEN +from openai.types.fine_tuning import job_create_params + +from portkey_ai.api_resources.types.fine_tuning_type import ( + FineTuningJob, + FineTuningJobCheckpointList, + FineTuningJobEventList, + FineTuningJobList, +) + + +class FineTuning(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.jobs = Jobs(client) + + +class Jobs(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.checkpoints = Checkpoints(client) + + def create( + self, + *, + model: str, + training_file: str, + hyperparameters: Union[job_create_params.Hyperparameters, NotGiven] = NOT_GIVEN, + integrations: Union[ + Optional[Iterable[job_create_params.Integration]], NotGiven + ] = NOT_GIVEN, + seed: Union[Optional[int], NotGiven] = NOT_GIVEN, + suffix: Union[Optional[str], NotGiven] = NOT_GIVEN, + validation_file: Union[Optional[str], NotGiven] = NOT_GIVEN, + **kwargs, + ) -> FineTuningJob: + response = self.openai_client.with_raw_response.fine_tuning.jobs.create( + model=model, + training_file=training_file, + hyperparameters=hyperparameters, + integrations=integrations, + seed=seed, + suffix=suffix, + validation_file=validation_file, + **kwargs, + ) + data = FineTuningJob(**json.loads(response.text)) + data._headers = response.headers + + return data + + def retrieve(self, fine_tuning_job_id: str, **kwargs) -> FineTuningJob: + response = self.openai_client.with_raw_response.fine_tuning.jobs.retrieve( + fine_tuning_job_id=fine_tuning_job_id, **kwargs + ) + data = FineTuningJob(**json.loads(response.text)) + data._headers = response.headers + + return data + + def list( + self, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> FineTuningJobList: + response = self.openai_client.with_raw_response.fine_tuning.jobs.list( + after=after, limit=limit, **kwargs + ) + data = FineTuningJobList(**json.loads(response.text)) + data._headers = response.headers + + return data + + def cancel(self, fine_tuning_job_id: str, **kwargs) -> FineTuningJob: + response = self.openai_client.with_raw_response.fine_tuning.jobs.cancel( + fine_tuning_job_id=fine_tuning_job_id, **kwargs + ) + data = FineTuningJob(**json.loads(response.text)) + data._headers = response.headers + + return data + + def list_events( + self, + fine_tuning_job_id: str, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> FineTuningJobEventList: + response = self.openai_client.with_raw_response.fine_tuning.jobs.list_events( + fine_tuning_job_id=fine_tuning_job_id, after=after, limit=limit, **kwargs + ) + data = FineTuningJobEventList(**json.loads(response.text)) + data._headers = response.headers + + return data + + +class Checkpoints(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def list( + self, + fine_tuning_job_id: str, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> FineTuningJobCheckpointList: + response = ( + self.openai_client.with_raw_response.fine_tuning.jobs.checkpoints.list( + fine_tuning_job_id=fine_tuning_job_id, + after=after, + limit=limit, + **kwargs, + ) + ) + + data = FineTuningJobCheckpointList(**json.loads(response.text)) + data._headers = response.headers + + return data + + +class AsyncFineTuning(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.jobs = AsyncJobs(client) + + +class AsyncJobs(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.checkpoints = AsyncCheckpoints(client) + + async def create( + self, + *, + model: str, + training_file: str, + hyperparameters: Union[job_create_params.Hyperparameters, NotGiven] = NOT_GIVEN, + integrations: Union[ + Optional[Iterable[job_create_params.Integration]], NotGiven + ] = NOT_GIVEN, + seed: Union[Optional[int], NotGiven] = NOT_GIVEN, + suffix: Union[Optional[str], NotGiven] = NOT_GIVEN, + validation_file: Union[Optional[str], NotGiven] = NOT_GIVEN, + **kwargs, + ) -> FineTuningJob: + response = await self.openai_client.with_raw_response.fine_tuning.jobs.create( + model=model, + training_file=training_file, + hyperparameters=hyperparameters, + integrations=integrations, + seed=seed, + suffix=suffix, + validation_file=validation_file, + **kwargs, + ) + data = FineTuningJob(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def retrieve(self, fine_tuning_job_id: str, **kwargs) -> FineTuningJob: + response = await self.openai_client.with_raw_response.fine_tuning.jobs.retrieve( + fine_tuning_job_id=fine_tuning_job_id, **kwargs + ) + data = FineTuningJob(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def list( + self, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> FineTuningJobList: + response = await self.openai_client.with_raw_response.fine_tuning.jobs.list( + after=after, limit=limit, **kwargs + ) + data = FineTuningJobList(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def cancel(self, fine_tuning_job_id: str, **kwargs) -> FineTuningJob: + response = await self.openai_client.with_raw_response.fine_tuning.jobs.cancel( + fine_tuning_job_id, **kwargs + ) + data = FineTuningJob(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def list_events( + self, + fine_tuning_job_id: str, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> FineTuningJobEventList: + response = ( + await self.openai_client.with_raw_response.fine_tuning.jobs.list_events( + fine_tuning_job_id=fine_tuning_job_id, + after=after, + limit=limit, + **kwargs, + ) + ) + data = FineTuningJobEventList(**json.loads(response.text)) + data._headers = response.headers + + return data + + +class AsyncCheckpoints(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def list( + self, + fine_tuning_job_id: str, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> FineTuningJobCheckpointList: + response = await self.openai_client.with_raw_response.fine_tuning.jobs.checkpoints.list( # noqa: E501 + fine_tuning_job_id=fine_tuning_job_id, + after=after, + limit=limit, + **kwargs, + ) + + data = FineTuningJobCheckpointList(**json.loads(response.text)) + data._headers = response.headers + + return data diff --git a/portkey_ai/api_resources/apis/main_files.py b/portkey_ai/api_resources/apis/main_files.py index 566d873..d3eba11 100644 --- a/portkey_ai/api_resources/apis/main_files.py +++ b/portkey_ai/api_resources/apis/main_files.py @@ -56,6 +56,22 @@ def retrieve_content(self, file_id, **kwargs) -> Any: response = self.openai_client.files.content(file_id=file_id, **kwargs) return response + def wait_for_processing( + self, + id: str, + *, + poll_interval: float = 5.0, + max_wait_seconds: float = 30 * 60, + **kwargs + ) -> Any: + response = self.openai_client.files.wait_for_processing( + id=id, + poll_interval=poll_interval, + max_wait_seconds=max_wait_seconds, + **kwargs + ) + return response + class AsyncMainFiles(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: @@ -103,3 +119,19 @@ async def content(self, file_id, **kwargs) -> Any: async def retrieve_content(self, file_id, **kwargs) -> Any: response = await self.openai_client.files.content(file_id=file_id, **kwargs) return response + + async def wait_for_processing( + self, + id: str, + *, + poll_interval: float = 5.0, + max_wait_seconds: float = 30 * 60, + **kwargs + ) -> Any: + response = await self.openai_client.files.wait_for_processing( + id=id, + poll_interval=poll_interval, + max_wait_seconds=max_wait_seconds, + **kwargs + ) + return response diff --git a/portkey_ai/api_resources/apis/moderations.py b/portkey_ai/api_resources/apis/moderations.py new file mode 100644 index 0000000..8b6f55d --- /dev/null +++ b/portkey_ai/api_resources/apis/moderations.py @@ -0,0 +1,48 @@ +import json +from typing import List, Union +from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from openai._types import NotGiven, NOT_GIVEN +from portkey_ai.api_resources.types.moderations_type import ModerationCreateResponse + + +class Moderations(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def create( + self, + *, + input: Union[str, List[str]], + model: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> ModerationCreateResponse: + response = self.openai_client.with_raw_response.moderations.create( + input=input, model=model, **kwargs + ) + data = ModerationCreateResponse(**json.loads(response.text)) + data._headers = response.headers + + return data + + +class AsyncModerations(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def create( + self, + *, + input: Union[str, List[str]], + model: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> ModerationCreateResponse: + response = await self.openai_client.with_raw_response.moderations.create( + input=input, model=model, **kwargs + ) + data = ModerationCreateResponse(**json.loads(response.text)) + data._headers = response.headers + + return data diff --git a/portkey_ai/api_resources/apis/threads.py b/portkey_ai/api_resources/apis/threads.py index 3ba0aec..a1e1c9c 100644 --- a/portkey_ai/api_resources/apis/threads.py +++ b/portkey_ai/api_resources/apis/threads.py @@ -1,11 +1,13 @@ import json +from typing import Iterable, Optional, Union +import typing from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.types.thread_message_type import ( - MessageFile, MessageList, ThreadMessage, + ThreadMessageDeleted, ) from portkey_ai.api_resources.types.thread_run_type import ( Run, @@ -14,6 +16,27 @@ RunStepList, ) from portkey_ai.api_resources.types.thread_type import Thread, ThreadDeleted +from openai._types import NotGiven, NOT_GIVEN +from openai.types.beta import thread_create_and_run_params +from openai.types.beta.assistant_response_format_option_param import ( + AssistantResponseFormatOptionParam, +) +from openai.types.beta.assistant_tool_choice_option_param import ( + AssistantToolChoiceOptionParam, +) +from openai.lib.streaming import ( + AssistantEventHandler, + AssistantEventHandlerT, + AssistantStreamManager, + AsyncAssistantEventHandler, + AsyncAssistantEventHandlerT, + AsyncAssistantStreamManager, +) +from openai.types.beta.threads import ( + run_create_params, + run_submit_tool_outputs_params, +) +from openai.types.beta.assistant_tool_param import AssistantToolParam class Threads(APIResource): @@ -70,12 +93,118 @@ def create_and_run(self, assistant_id, **kwargs) -> Run: data._headers = response.headers return data + def create_and_run_poll( + self, + *, + assistant_id: str, + instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + model: Union[str, None, NotGiven] = NOT_GIVEN, + response_format: Union[ + Optional[AssistantResponseFormatOptionParam], NotGiven + ] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + thread: Union[thread_create_and_run_params.Thread, NotGiven] = NOT_GIVEN, + tool_choice: Union[ + Optional[AssistantToolChoiceOptionParam], NotGiven + ] = NOT_GIVEN, + tool_resources: Union[ + Optional[thread_create_and_run_params.ToolResources], NotGiven + ] = NOT_GIVEN, + tools: Union[ + Optional[Iterable[thread_create_and_run_params.Tool]], NotGiven + ] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[ + Optional[thread_create_and_run_params.TruncationStrategy], NotGiven + ] = NOT_GIVEN, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> Run: + response = self.openai_client.beta.threads.create_and_run_poll( + assistant_id=assistant_id, + instructions=instructions, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + metadata=metadata, + model=model, + response_format=response_format, + temperature=temperature, + thread=thread, + tool_choice=tool_choice, + tool_resources=tool_resources, + tools=tools, + top_p=top_p, + truncation_strategy=truncation_strategy, + poll_interval_ms=poll_interval_ms, + **kwargs, + ) + data = response + + return data # type: ignore[return-value] + + @typing.no_type_check + def create_and_run_stream( + self, + *, + assistant_id: str, + instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + model: Union[str, None, NotGiven] = NOT_GIVEN, + response_format: Union[ + Optional[AssistantResponseFormatOptionParam], NotGiven + ] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + thread: Union[thread_create_and_run_params.Thread, NotGiven] = NOT_GIVEN, + tool_choice: Union[ + Optional[AssistantToolChoiceOptionParam], NotGiven + ] = NOT_GIVEN, + tool_resources: Union[ + Optional[thread_create_and_run_params.ToolResources], NotGiven + ] = NOT_GIVEN, + tools: Union[ + Optional[Iterable[thread_create_and_run_params.Tool]], NotGiven + ] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[ + Optional[thread_create_and_run_params.TruncationStrategy], NotGiven + ] = NOT_GIVEN, + event_handler: Union[AssistantEventHandlerT, None] = None, + **kwargs, + ) -> Union[ + AssistantStreamManager[AssistantEventHandler], + AssistantStreamManager[AssistantEventHandlerT], + ]: + response = self.openai_client.beta.threads.create_and_run_stream( + assistant_id=assistant_id, + instructions=instructions, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + metadata=metadata, + model=model, + response_format=response_format, + temperature=temperature, + thread=thread, + tool_choice=tool_choice, + tool_resources=tool_resources, + tools=tools, + top_p=top_p, + truncation_strategy=truncation_strategy, + event_handler=event_handler, + **kwargs, + ) + data = response + return data + class Messages(APIResource): def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - self.files = ThreadFiles(client) def create(self, thread_id, **kwargs) -> ThreadMessage: response = self.openai_client.with_raw_response.beta.threads.messages.create( @@ -110,30 +239,13 @@ def update(self, thread_id, message_id, **kwargs) -> ThreadMessage: data._headers = response.headers return data - -class ThreadFiles(APIResource): - def __init__(self, client: Portkey) -> None: - super().__init__(client) - self.openai_client = client.openai_client - - def list(self, thread_id, message_id, **kwargs) -> MessageList: - response = ( - self.openai_client.with_raw_response.beta.threads.messages.files.list( - thread_id=thread_id, message_id=message_id, **kwargs - ) - ) - data = MessageList(**json.loads(response.text)) - data._headers = response.headers - - return data - - def retrieve(self, thread_id, message_id, file_id, **kwargs) -> MessageFile: - response = ( - self.openai_client.with_raw_response.beta.threads.messages.files.retrieve( - thread_id=thread_id, message_id=message_id, file_id=file_id, **kwargs - ) + def delete( + self, message_id: str, *, thread_id: str, **kwargs + ) -> ThreadMessageDeleted: + response = self.openai_client.with_raw_response.beta.threads.messages.delete( + message_id=message_id, thread_id=thread_id, **kwargs ) - data = MessageFile(**json.loads(response.text)) + data = ThreadMessageDeleted(**json.loads(response.text)) data._headers = response.headers return data @@ -201,6 +313,221 @@ def cancel(self, thread_id, run_id, **kwargs) -> Run: return data + def create_and_poll( + self, + *, + assistant_id: str, + additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + additional_messages: Union[ + Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven + ] = NOT_GIVEN, + instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + model: Union[str, None, NotGiven] = NOT_GIVEN, + response_format: Union[ + Optional[AssistantResponseFormatOptionParam], NotGiven + ] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + tool_choice: Union[ + Optional[AssistantToolChoiceOptionParam], NotGiven + ] = NOT_GIVEN, + tools: Union[Optional[Iterable[AssistantToolParam]], NotGiven] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[ + Optional[run_create_params.TruncationStrategy], NotGiven + ] = NOT_GIVEN, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + thread_id: str, + **kwargs, + ) -> Run: + response = self.openai_client.beta.threads.runs.create_and_poll( + assistant_id=assistant_id, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + instructions=instructions, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + metadata=metadata, + model=model, + response_format=response_format, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + top_p=top_p, + truncation_strategy=truncation_strategy, + poll_interval_ms=poll_interval_ms, + thread_id=thread_id, + **kwargs, + ) + data = response + + return data # type: ignore[return-value] + + @typing.no_type_check + def create_and_stream( + self, + *, + assistant_id: str, + additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + additional_messages: Union[ + Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven + ] = NOT_GIVEN, + instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + model: Union[str, None, NotGiven] = NOT_GIVEN, + response_format: Union[ + Optional[AssistantResponseFormatOptionParam], NotGiven + ] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + tool_choice: Union[ + Optional[AssistantToolChoiceOptionParam], NotGiven + ] = NOT_GIVEN, + tools: Union[Optional[Iterable[AssistantToolParam]], NotGiven] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[ + Optional[run_create_params.TruncationStrategy], NotGiven + ] = NOT_GIVEN, + thread_id: str, + event_handler: Union[AssistantEventHandlerT, None] = None, + **kwargs, + ) -> Union[ + AssistantStreamManager[AssistantEventHandler], + AssistantStreamManager[AssistantEventHandlerT], + ]: + response = self.openai_client.beta.threads.runs.create_and_stream( + assistant_id=assistant_id, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + instructions=instructions, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + metadata=metadata, + model=model, + response_format=response_format, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + top_p=top_p, + truncation_strategy=truncation_strategy, + thread_id=thread_id, + event_handler=event_handler, + **kwargs, + ) + data = response + return data + + def poll( + self, + *, + run_id: str, + thread_id: str, + **kwargs, + ) -> Run: + response = self.openai_client.beta.threads.runs.poll( + run_id=run_id, thread_id=thread_id, **kwargs + ) + data = response + + return data # type: ignore[return-value] + + @typing.no_type_check + def stream( + self, + *, + assistant_id: str, + additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + additional_messages: Union[ + Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven + ] = NOT_GIVEN, + instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + model: Union[str, None, NotGiven] = NOT_GIVEN, + response_format: Union[ + Optional[AssistantResponseFormatOptionParam], NotGiven + ] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + tool_choice: Union[ + Optional[AssistantToolChoiceOptionParam], NotGiven + ] = NOT_GIVEN, + tools: Union[Optional[Iterable[AssistantToolParam]], NotGiven] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[ + Optional[run_create_params.TruncationStrategy], NotGiven + ] = NOT_GIVEN, + thread_id: str, + event_handler: Union[AssistantEventHandlerT, None] = None, + **kwargs, + ) -> Union[ + AssistantStreamManager[AssistantEventHandler], + AssistantStreamManager[AssistantEventHandlerT], + ]: + response = self.openai_client.beta.threads.runs.stream( + assistant_id=assistant_id, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + instructions=instructions, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + metadata=metadata, + model=model, + response_format=response_format, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + top_p=top_p, + truncation_strategy=truncation_strategy, + thread_id=thread_id, + event_handler=event_handler, + **kwargs, + ) + data = response + return data + + def submit_tool_outputs_and_poll( + self, + *, + tool_outputs: Union[Iterable[run_submit_tool_outputs_params.ToolOutput]], + run_id: str, + thread_id: str, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + ) -> Run: + response = self.openai_client.beta.threads.runs.submit_tool_outputs_and_poll( + tool_outputs=tool_outputs, + run_id=run_id, + thread_id=thread_id, + poll_interval_ms=poll_interval_ms, + ) + data = response + + return data # type: ignore[return-value] + + def submit_tool_outputs_stream( + self, + *, + tool_outputs: Union[Iterable[run_submit_tool_outputs_params.ToolOutput]], + run_id: str, + thread_id: str, + event_handler: Union[AssistantEventHandlerT, None] = None, + ) -> Union[ + AssistantStreamManager[AssistantEventHandler], + AssistantStreamManager[AssistantEventHandlerT], + ]: + response = self.openai_client.beta.threads.runs.submit_tool_outputs_stream( # type: ignore[type-var] + tool_outputs=tool_outputs, + run_id=run_id, + thread_id=thread_id, + event_handler=event_handler, + ) + data = response + + return data # type: ignore[return-value] + class Steps(APIResource): def __init__(self, client: Portkey) -> None: @@ -284,12 +611,120 @@ async def create_and_run(self, assistant_id, **kwargs) -> Run: data._headers = response.headers return data + async def create_and_run_poll( + self, + *, + assistant_id: str, + instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + model: Union[str, None, NotGiven] = NOT_GIVEN, + response_format: Union[ + Optional[AssistantResponseFormatOptionParam], NotGiven + ] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + thread: Union[thread_create_and_run_params.Thread, NotGiven] = NOT_GIVEN, + tool_choice: Union[ + Optional[AssistantToolChoiceOptionParam], NotGiven + ] = NOT_GIVEN, + tool_resources: Union[ + Optional[thread_create_and_run_params.ToolResources], NotGiven + ] = NOT_GIVEN, + tools: Union[ + Optional[Iterable[thread_create_and_run_params.Tool]], NotGiven + ] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[ + Optional[thread_create_and_run_params.TruncationStrategy], NotGiven + ] = NOT_GIVEN, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> Run: + response = await self.openai_client.beta.threads.create_and_run_poll( + assistant_id=assistant_id, + instructions=instructions, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + metadata=metadata, + model=model, + response_format=response_format, + temperature=temperature, + thread=thread, + tool_choice=tool_choice, + tool_resources=tool_resources, + tools=tools, + top_p=top_p, + truncation_strategy=truncation_strategy, + poll_interval_ms=poll_interval_ms, + **kwargs, + ) + data = response + + return data # type: ignore[return-value] + + @typing.no_type_check + async def create_and_run_stream( + self, + *, + assistant_id: str, + instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + model: Union[str, None, NotGiven] = NOT_GIVEN, + response_format: Union[ + Optional[AssistantResponseFormatOptionParam], NotGiven + ] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + thread: Union[thread_create_and_run_params.Thread, NotGiven] = NOT_GIVEN, + tool_choice: Union[ + Optional[AssistantToolChoiceOptionParam], NotGiven + ] = NOT_GIVEN, + tool_resources: Union[ + Optional[thread_create_and_run_params.ToolResources], NotGiven + ] = NOT_GIVEN, + tools: Union[ + Optional[Iterable[thread_create_and_run_params.Tool]], NotGiven + ] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[ + Optional[thread_create_and_run_params.TruncationStrategy], NotGiven + ] = NOT_GIVEN, + event_handler: Union[AsyncAssistantEventHandlerT, None] = None, + **kwargs, + ) -> ( + Union[ + AsyncAssistantStreamManager[AsyncAssistantEventHandler], + AsyncAssistantStreamManager[AsyncAssistantEventHandlerT], + ] + ): + response = await self.openai_client.beta.threads.create_and_run_stream( + assistant_id=assistant_id, + instructions=instructions, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + metadata=metadata, + model=model, + response_format=response_format, + temperature=temperature, + thread=thread, + tool_choice=tool_choice, + tool_resources=tool_resources, + tools=tools, + top_p=top_p, + truncation_strategy=truncation_strategy, + event_handler=event_handler, + **kwargs, + ) + data = response + return data + class AsyncMessages(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - self.files = AsyncThreadFiles(client) async def create(self, thread_id, **kwargs) -> ThreadMessage: response = ( @@ -332,39 +767,15 @@ async def update(self, thread_id, message_id, **kwargs) -> ThreadMessage: data._headers = response.headers return data - -class AsyncThreadFiles(AsyncAPIResource): - def __init__(self, client: AsyncPortkey) -> None: - super().__init__(client) - self.openai_client = client.openai_client - - async def list(self, thread_id, message_id, **kwargs) -> MessageList: + async def delete( + self, message_id: str, *, thread_id: str, **kwargs + ) -> ThreadMessageDeleted: response = ( - await self.openai_client.with_raw_response.beta.threads.messages.files.list( - thread_id=thread_id, message_id=message_id, **kwargs + await self.openai_client.with_raw_response.beta.threads.messages.delete( + message_id=message_id, thread_id=thread_id, **kwargs ) ) - data = MessageList(**json.loads(response.text)) - data._headers = response.headers - - return data - - async def retrieve(self, thread_id, message_id, file_id, **kwargs) -> MessageFile: - # fmt: off - response = await self.openai_client\ - .with_raw_response\ - .beta\ - .threads\ - .messages\ - .files\ - .retrieve( - thread_id=thread_id, - message_id=message_id, - file_id=file_id, - **kwargs - ) - # fmt: off - data = MessageFile(**json.loads( response.text)) + data = ThreadMessageDeleted(**json.loads(response.text)) data._headers = response.headers return data @@ -444,6 +855,227 @@ async def cancel(self, thread_id, run_id, **kwargs) -> Run: return data + async def create_and_poll( + self, + *, + assistant_id: str, + additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + additional_messages: Union[ + Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven + ] = NOT_GIVEN, + instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + model: Union[str, None, NotGiven] = NOT_GIVEN, + response_format: Union[ + Optional[AssistantResponseFormatOptionParam], NotGiven + ] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + tool_choice: Union[ + Optional[AssistantToolChoiceOptionParam], NotGiven + ] = NOT_GIVEN, + tools: Union[Optional[Iterable[AssistantToolParam]], NotGiven] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[ + Optional[run_create_params.TruncationStrategy], NotGiven + ] = NOT_GIVEN, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + thread_id: str, + **kwargs, + ) -> Run: + response = await self.openai_client.beta.threads.runs.create_and_poll( + assistant_id=assistant_id, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + instructions=instructions, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + metadata=metadata, + model=model, + response_format=response_format, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + top_p=top_p, + truncation_strategy=truncation_strategy, + poll_interval_ms=poll_interval_ms, + thread_id=thread_id, + **kwargs, + ) + data = response + + return data # type: ignore[return-value] + + @typing.no_type_check + async def create_and_stream( + self, + *, + assistant_id: str, + additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + additional_messages: Union[ + Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven + ] = NOT_GIVEN, + instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + model: Union[str, None, NotGiven] = NOT_GIVEN, + response_format: Union[ + Optional[AssistantResponseFormatOptionParam], NotGiven + ] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + tool_choice: Union[ + Optional[AssistantToolChoiceOptionParam], NotGiven + ] = NOT_GIVEN, + tools: Union[Optional[Iterable[AssistantToolParam]], NotGiven] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[ + Optional[run_create_params.TruncationStrategy], NotGiven + ] = NOT_GIVEN, + thread_id: str, + event_handler: Union[AsyncAssistantEventHandlerT, None] = None, + **kwargs, + ) -> ( + Union[ + AsyncAssistantStreamManager[AsyncAssistantEventHandler], + AsyncAssistantStreamManager[AsyncAssistantEventHandlerT], + ] + ): + response = await self.openai_client.beta.threads.runs.create_and_stream( + assistant_id=assistant_id, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + instructions=instructions, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + metadata=metadata, + model=model, + response_format=response_format, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + top_p=top_p, + truncation_strategy=truncation_strategy, + thread_id=thread_id, + event_handler=event_handler, + **kwargs, + ) + data = response + return data + + async def poll( + self, + *, + run_id: str, + thread_id: str, + **kwargs, + ) -> Run: + response = await self.openai_client.beta.threads.runs.poll( + run_id=run_id, thread_id=thread_id, **kwargs + ) + data = response + + return data # type: ignore[return-value] + + @typing.no_type_check + async def stream( + self, + *, + assistant_id: str, + additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + additional_messages: Union[ + Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven + ] = NOT_GIVEN, + instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + model: Union[str, None, NotGiven] = NOT_GIVEN, + response_format: Union[ + Optional[AssistantResponseFormatOptionParam], NotGiven + ] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + tool_choice: Union[ + Optional[AssistantToolChoiceOptionParam], NotGiven + ] = NOT_GIVEN, + tools: Union[Optional[Iterable[AssistantToolParam]], NotGiven] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + truncation_strategy: Union[ + Optional[run_create_params.TruncationStrategy], NotGiven + ] = NOT_GIVEN, + thread_id: str, + event_handler: Union[AsyncAssistantEventHandlerT, None] = None, + **kwargs, + ) -> ( + Union[ + AsyncAssistantStreamManager[AsyncAssistantEventHandler], + AsyncAssistantStreamManager[AsyncAssistantEventHandlerT], + ] + ): + response = await self.openai_client.beta.threads.runs.stream( + assistant_id=assistant_id, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + instructions=instructions, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + metadata=metadata, + model=model, + response_format=response_format, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + top_p=top_p, + truncation_strategy=truncation_strategy, + thread_id=thread_id, + event_handler=event_handler, + **kwargs, + ) + data = response + return data + + async def submit_tool_outputs_and_poll( + self, + *, + tool_outputs: Union[Iterable[run_submit_tool_outputs_params.ToolOutput]], + run_id: str, + thread_id: str, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + ) -> Run: + response = ( + await self.openai_client.beta.threads.runs.submit_tool_outputs_and_poll( + tool_outputs=tool_outputs, + run_id=run_id, + thread_id=thread_id, + poll_interval_ms=poll_interval_ms, + ) + ) + data = response + + return data # type: ignore[return-value] + + def submit_tool_outputs_stream( + self, + *, + tool_outputs: Union[Iterable[run_submit_tool_outputs_params.ToolOutput]], + run_id: str, + thread_id: str, + event_handler: Union[AsyncAssistantEventHandlerT, None] = None, + ) -> Union[ + AsyncAssistantStreamManager[AsyncAssistantEventHandler], + AsyncAssistantStreamManager[AsyncAssistantEventHandlerT], + ]: + response = self.openai_client.beta.threads.runs.submit_tool_outputs_stream( # type: ignore[type-var] + tool_outputs=tool_outputs, + run_id=run_id, + thread_id=thread_id, + event_handler=event_handler, + ) + data = response + + return data # type: ignore[return-value] + class AsyncSteps(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: diff --git a/portkey_ai/api_resources/apis/vector_stores.py b/portkey_ai/api_resources/apis/vector_stores.py new file mode 100644 index 0000000..4d9029a --- /dev/null +++ b/portkey_ai/api_resources/apis/vector_stores.py @@ -0,0 +1,809 @@ +import json +from typing import Iterable, List, Optional, Union +import typing +from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from openai._types import NotGiven, NOT_GIVEN, FileTypes +from openai.types.beta import ( + vector_store_create_params, + vector_store_update_params, +) + +from portkey_ai.api_resources.types.vector_stores_type import ( + VectorStore, + VectorStoreDeleted, + VectorStoreFile, + VectorStoreFileBatch, + VectorStoreFileDeleted, + VectorStoreFileList, + VectorStoreList, +) + + +class VectorStores(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.files = VectorFiles(client) + self.file_batches = VectorFileBatches(client) + + def create( + self, + *, + expires_after: Union[ + vector_store_create_params.ExpiresAfter, NotGiven + ] = NOT_GIVEN, + file_ids: Union[List[str], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + name: Union[str, NotGiven] = NOT_GIVEN, + ) -> VectorStore: + response = self.openai_client.with_raw_response.beta.vector_stores.create( + expires_after=expires_after, + file_ids=file_ids, + metadata=metadata, + name=name, + ) + data = VectorStore(**json.loads(response.text)) + data._headers = response.headers + + return data + + def retrieve( + self, + vector_store_id: str, + **kwargs, + ) -> VectorStore: + response = self.openai_client.with_raw_response.beta.vector_stores.retrieve( + vector_store_id=vector_store_id, + **kwargs, + ) + data = VectorStore(**json.loads(response.text)) + data._headers = response.headers + + return data + + def update( + self, + vector_store_id: str, + *, + expires_after: Union[ + vector_store_update_params.ExpiresAfter, NotGiven + ] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + name: Union[str, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStore: + response = self.openai_client.with_raw_response.beta.vector_stores.update( + vector_store_id=vector_store_id, + expires_after=expires_after, + metadata=metadata, + name=name, + **kwargs, + ) + data = VectorStore(**json.loads(response.text)) + data._headers = response.headers + + return data + + @typing.no_type_check + def list( + self, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + order: Union[str, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreList: + response = self.openai_client.with_raw_response.beta.vector_stores.list( + after=after, + limit=limit, + order=order, + **kwargs, + ) + data = VectorStoreList(**json.loads(response.text)) + data._headers = response.headers + + return data + + def delete( + self, + vector_store_id: str, + **kwargs, + ) -> VectorStoreDeleted: + response = self.openai_client.with_raw_response.beta.vector_stores.delete( + vector_store_id=vector_store_id, + **kwargs, + ) + data = VectorStoreDeleted(**json.loads(response.text)) + data._headers = response.headers + + return data + + +class VectorFiles(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def create( + self, + vector_store_id: str, + *, + file_id: str, + **kwargs, + ) -> VectorStoreFile: + response = self.openai_client.with_raw_response.beta.vector_stores.files.create( + vector_store_id=vector_store_id, + file_id=file_id, + **kwargs, + ) + data = VectorStoreFile(**json.loads(response.text)) + data._headers = response.headers + + return data + + def retrieve( + self, + file_id: str, + *, + vector_store_id: str, + **kwargs, + ) -> VectorStoreFile: + response = ( + self.openai_client.with_raw_response.beta.vector_stores.files.retrieve( + file_id=file_id, + vector_store_id=vector_store_id, + **kwargs, + ) + ) + data = VectorStoreFile(**json.loads(response.text)) + data._headers = response.headers + + return data + + @typing.no_type_check + def list( + self, + vector_store_id: str, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + before: Union[str, NotGiven] = NOT_GIVEN, + filter: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + order: Union[str, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFileList: + response = self.openai_client.with_raw_response.beta.vector_stores.files.list( + vector_store_id=vector_store_id, + after=after, + before=before, + filter=filter, + limit=limit, + order=order, + **kwargs, + ) + data = VectorStoreFileList(**json.loads(response.text)) + data._headers = response.headers + + return data + + def delete( + self, + file_id: str, + *, + vector_store_id: str, + **kwargs, + ) -> VectorStoreFileDeleted: + response = self.openai_client.with_raw_response.beta.vector_stores.files.delete( + file_id=file_id, + vector_store_id=vector_store_id, + **kwargs, + ) + data = VectorStoreFileDeleted(**json.loads(response.text)) + data._headers = response.headers + + return data + + def create_and_poll( + self, + file_id: str, + *, + vector_store_id: str, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFile: + response = self.openai_client.beta.vector_stores.files.create_and_poll( + file_id=file_id, + vector_store_id=vector_store_id, + poll_interval_ms=poll_interval_ms, + **kwargs, + ) + data = response + + return data # type: ignore[return-value] + + def poll( + self, + file_id: str, + *, + vector_store_id: str, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFile: + response = self.openai_client.beta.vector_stores.files.poll( + file_id=file_id, + vector_store_id=vector_store_id, + poll_interval_ms=poll_interval_ms, + **kwargs, + ) + data = response + + return data # type: ignore[return-value] + + def upload( + self, + *, + vector_store_id: str, + file: FileTypes, + **kwargs, + ) -> VectorStoreFile: + response = self.openai_client.beta.vector_stores.files.upload( + vector_store_id=vector_store_id, + file=file, + **kwargs, + ) + data = response + return data # type: ignore[return-value] + + def upload_and_poll( + self, + *, + vector_store_id: str, + file: FileTypes, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFile: + response = self.openai_client.beta.vector_stores.files.upload_and_poll( + vector_store_id=vector_store_id, + file=file, + poll_interval_ms=poll_interval_ms, + **kwargs, + ) + data = response + return data # type: ignore[return-value] + + +class VectorFileBatches(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def create( + self, + vector_store_id: str, + *, + file_ids: List[str], + **kwargs, + ) -> VectorStoreFileBatch: + response = ( + self.openai_client.with_raw_response.beta.vector_stores.file_batches.create( + vector_store_id=vector_store_id, + file_ids=file_ids, + **kwargs, + ) + ) + data = VectorStoreFileBatch(**json.loads(response.text)) + data._headers = response.headers + + return data + + def retrieve( + self, batch_id: str, *, vector_store_id: str, **kwargs + ) -> VectorStoreFileBatch: + response = self.openai_client.with_raw_response.beta.vector_stores.file_batches.retrieve( # noqa: E501 + batch_id=batch_id, + vector_store_id=vector_store_id, + **kwargs, + ) + data = VectorStoreFileBatch(**json.loads(response.text)) + data._headers = response.headers + + return data + + def cancel( + self, batch_id: str, *, vector_store_id: str, **kwargs + ) -> VectorStoreFileBatch: + response = ( + self.openai_client.with_raw_response.beta.vector_stores.file_batches.cancel( + batch_id=batch_id, + vector_store_id=vector_store_id, + **kwargs, + ) + ) + data = VectorStoreFileBatch(**json.loads(response.text)) + data._headers = response.headers + + return data + + def create_and_poll( + self, + vector_store_id: str, + *, + file_ids: List[str], + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFileBatch: + response = self.openai_client.beta.vector_stores.file_batches.create_and_poll( + vector_store_id=vector_store_id, + file_ids=file_ids, + poll_interval_ms=poll_interval_ms, + **kwargs, + ) + data = response + return data # type: ignore[return-value] + + @typing.no_type_check + def list_files( + self, + batch_id: str, + *, + vector_store_id: str, + after: Union[str, NotGiven] = NOT_GIVEN, + before: Union[str, NotGiven] = NOT_GIVEN, + filter: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + order: Union[str, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFileList: + response = self.openai_client.with_raw_response.beta.vector_stores.file_batches.list_files( # noqa: E501 + batch_id=batch_id, + vector_store_id=vector_store_id, + after=after, + before=before, + filter=filter, + limit=limit, + order=order, + **kwargs, + ) + data = VectorStoreFileBatch(**json.loads(response.text)) + data._headers = response.headers + + return data + + def poll( + self, + batch_id: str, + *, + vector_store_id: str, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFileBatch: + response = self.openai_client.beta.vector_stores.file_batches.poll( + batch_id=batch_id, + vector_store_id=vector_store_id, + poll_interval_ms=poll_interval_ms, + **kwargs, + ) + data = response + + return data # type: ignore[return-value] + + def upload_and_poll( + self, + vector_store_id: str, + *, + files: Iterable[FileTypes], + max_concurrency: int = 5, + file_ids: List[str] = [], + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFileBatch: + response = self.openai_client.beta.vector_stores.file_batches.upload_and_poll( + vector_store_id=vector_store_id, + files=files, + max_concurrency=max_concurrency, + file_ids=file_ids, + poll_interval_ms=poll_interval_ms, + **kwargs, + ) + data = response + + return data # type: ignore[return-value] + + +class AsyncVectorStores(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.files = AsyncVectorFiles(client) + self.file_batches = AsyncVectorFileBatches(client) + + async def create( + self, + *, + expires_after: Union[ + vector_store_create_params.ExpiresAfter, NotGiven + ] = NOT_GIVEN, + file_ids: Union[List[str], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + name: Union[str, NotGiven] = NOT_GIVEN, + ) -> VectorStore: + response = await self.openai_client.with_raw_response.beta.vector_stores.create( + expires_after=expires_after, + file_ids=file_ids, + metadata=metadata, + name=name, + ) + data = VectorStore(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def retrieve( + self, + vector_store_id: str, + **kwargs, + ) -> VectorStore: + response = ( + await self.openai_client.with_raw_response.beta.vector_stores.retrieve( + vector_store_id=vector_store_id, + **kwargs, + ) + ) + data = VectorStore(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def update( + self, + vector_store_id: str, + *, + expires_after: Union[ + vector_store_update_params.ExpiresAfter, NotGiven + ] = NOT_GIVEN, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + name: Union[str, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStore: + response = await self.openai_client.with_raw_response.beta.vector_stores.update( + vector_store_id=vector_store_id, + expires_after=expires_after, + metadata=metadata, + name=name, + **kwargs, + ) + data = VectorStore(**json.loads(response.text)) + data._headers = response.headers + + return data + + @typing.no_type_check + async def list( + self, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + order: Union[str, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreList: + response = await self.openai_client.with_raw_response.beta.vector_stores.list( + after=after, + limit=limit, + order=order, + **kwargs, + ) + data = VectorStoreList(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def delete( + self, + vector_store_id: str, + **kwargs, + ) -> VectorStoreDeleted: + response = await self.openai_client.with_raw_response.beta.vector_stores.delete( + vector_store_id=vector_store_id, + **kwargs, + ) + data = VectorStoreDeleted(**json.loads(response.text)) + data._headers = response.headers + + return data + + +class AsyncVectorFiles(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def create( + self, + vector_store_id: str, + *, + file_id: str, + **kwargs, + ) -> VectorStoreFile: + response = ( + await self.openai_client.with_raw_response.beta.vector_stores.files.create( + vector_store_id=vector_store_id, + file_id=file_id, + **kwargs, + ) + ) + data = VectorStoreFile(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def retrieve( + self, + file_id: str, + *, + vector_store_id: str, + **kwargs, + ) -> VectorStoreFile: + response = await self.openai_client.with_raw_response.beta.vector_stores.files.retrieve( # noqa: E501 + file_id=file_id, + vector_store_id=vector_store_id, + **kwargs, + ) + data = VectorStoreFile(**json.loads(response.text)) + data._headers = response.headers + + return data + + @typing.no_type_check + async def list( + self, + vector_store_id: str, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + before: Union[str, NotGiven] = NOT_GIVEN, + filter: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + order: Union[str, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFileList: + response = ( + await self.openai_client.with_raw_response.beta.vector_stores.files.list( + vector_store_id=vector_store_id, + after=after, + before=before, + filter=filter, + limit=limit, + order=order, + **kwargs, + ) + ) + data = VectorStoreFileList(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def delete( + self, + file_id: str, + *, + vector_store_id: str, + **kwargs, + ) -> VectorStoreFileDeleted: + response = ( + await self.openai_client.with_raw_response.beta.vector_stores.files.delete( + file_id=file_id, + vector_store_id=vector_store_id, + **kwargs, + ) + ) + data = VectorStoreFileDeleted(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def create_and_poll( + self, + file_id: str, + *, + vector_store_id: str, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFile: + response = await self.openai_client.beta.vector_stores.files.create_and_poll( + file_id=file_id, + vector_store_id=vector_store_id, + poll_interval_ms=poll_interval_ms, + **kwargs, + ) + data = response + + return data # type: ignore[return-value] + + async def poll( + self, + file_id: str, + *, + vector_store_id: str, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFile: + response = await self.openai_client.beta.vector_stores.files.poll( + file_id=file_id, + vector_store_id=vector_store_id, + poll_interval_ms=poll_interval_ms, + **kwargs, + ) + data = response + + return data # type: ignore[return-value] + + async def upload( + self, + *, + vector_store_id: str, + file: FileTypes, + **kwargs, + ) -> VectorStoreFile: + response = await self.openai_client.beta.vector_stores.files.upload( + vector_store_id=vector_store_id, + file=file, + **kwargs, + ) + data = response + return data # type: ignore[return-value] + + async def upload_and_poll( + self, + *, + vector_store_id: str, + file: FileTypes, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFile: + response = await self.openai_client.beta.vector_stores.files.upload_and_poll( + vector_store_id=vector_store_id, + file=file, + poll_interval_ms=poll_interval_ms, + **kwargs, + ) + data = response + return data # type: ignore[return-value] + + +class AsyncVectorFileBatches(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def create( + self, + vector_store_id: str, + *, + file_ids: List[str], + **kwargs, + ) -> VectorStoreFileBatch: + response = await self.openai_client.with_raw_response.beta.vector_stores.file_batches.create( # noqa: E501 + vector_store_id=vector_store_id, + file_ids=file_ids, + **kwargs, + ) + data = VectorStoreFileBatch(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def retrieve( + self, batch_id: str, *, vector_store_id: str, **kwargs + ) -> VectorStoreFileBatch: + response = await self.openai_client.with_raw_response.beta.vector_stores.file_batches.retrieve( # noqa: E501 + batch_id=batch_id, + vector_store_id=vector_store_id, + **kwargs, + ) + data = VectorStoreFileBatch(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def cancel( + self, batch_id: str, *, vector_store_id: str, **kwargs + ) -> VectorStoreFileBatch: + response = await self.openai_client.with_raw_response.beta.vector_stores.file_batches.cancel( # noqa: E501 + batch_id=batch_id, + vector_store_id=vector_store_id, + **kwargs, + ) + data = VectorStoreFileBatch(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def create_and_poll( + self, + vector_store_id: str, + *, + file_ids: List[str], + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFileBatch: + response = ( + await self.openai_client.beta.vector_stores.file_batches.create_and_poll( + vector_store_id=vector_store_id, + file_ids=file_ids, + poll_interval_ms=poll_interval_ms, + **kwargs, + ) + ) + data = response + + return data # type: ignore[return-value] + + @typing.no_type_check + async def list_files( + self, + batch_id: str, + *, + vector_store_id: str, + after: Union[str, NotGiven] = NOT_GIVEN, + before: Union[str, NotGiven] = NOT_GIVEN, + filter: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + order: Union[str, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFileList: + response = await self.openai_client.beta.with_raw_response.vector_stores.file_batches.list_files( # noqa: E501 + batch_id=batch_id, + vector_store_id=vector_store_id, + after=after, + before=before, + filter=filter, + limit=limit, + order=order, + **kwargs, + ) + data = VectorStoreFileBatch(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def poll( + self, + batch_id: str, + *, + vector_store_id: str, + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFileBatch: + response = await self.openai_client.beta.vector_stores.file_batches.poll( + batch_id=batch_id, + vector_store_id=vector_store_id, + poll_interval_ms=poll_interval_ms, + **kwargs, + ) + data = response + + return data # type: ignore[return-value] + + async def upload_and_poll( + self, + vector_store_id: str, + *, + files: Iterable[FileTypes], + max_concurrency: int = 5, + file_ids: List[str] = [], + poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> VectorStoreFileBatch: + response = ( + await self.openai_client.beta.vector_stores.file_batches.upload_and_poll( + vector_store_id=vector_store_id, + files=files, + max_concurrency=max_concurrency, + file_ids=file_ids, + poll_interval_ms=poll_interval_ms, + **kwargs, + ) + ) + data = response + + return data # type: ignore[return-value] diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index d927906..6dc8527 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -19,14 +19,20 @@ class Portkey(APIClient): images: apis.Images files: apis.MainFiles models: apis.Models + moderations: apis.Moderations + audio: apis.Audio + batches: apis.Batches + fine_tuning: apis.FineTuning class beta: assistants: apis.Assistants threads: apis.Threads + vector_stores: apis.VectorStores def __init__(self, client: Portkey) -> None: self.assistants = apis.Assistants(client) self.threads = apis.Threads(client) + self.vector_stores = apis.VectorStores(client) def __init__( self, @@ -66,6 +72,10 @@ def __init__( self.images = apis.Images(self) self.files = apis.MainFiles(self) self.models = apis.Models(self) + self.moderations = apis.Moderations(self) + self.audio = apis.Audio(self) + self.batches = apis.Batches(self) + self.fine_tuning = apis.FineTuning(self) self.beta = self.beta(self) # type: ignore def copy( @@ -107,14 +117,20 @@ class AsyncPortkey(AsyncAPIClient): images: apis.AsyncImages files: apis.AsyncMainFiles models: apis.AsyncModels + moderations: apis.AsyncModerations + audio: apis.AsyncAudio + batches: apis.AsyncBatches + fine_tuning: apis.AsyncFineTuning class beta: assistants: apis.AsyncAssistants threads: apis.AsyncThreads + vector_stores: apis.AsyncVectorStores def __init__(self, client: AsyncPortkey) -> None: self.assistants = apis.AsyncAssistants(client) self.threads = apis.AsyncThreads(client) + self.vector_stores = apis.AsyncVectorStores(client) def __init__( self, @@ -154,6 +170,10 @@ def __init__( self.images = apis.AsyncImages(self) self.files = apis.AsyncMainFiles(self) self.models = apis.AsyncModels(self) + self.moderations = apis.AsyncModerations(self) + self.audio = apis.AsyncAudio(self) + self.batches = apis.AsyncBatches(self) + self.fine_tuning = apis.AsyncFineTuning(self) self.beta = self.beta(self) # type: ignore def copy( diff --git a/portkey_ai/api_resources/types/assistant_type.py b/portkey_ai/api_resources/types/assistant_type.py index 8772044..f596719 100644 --- a/portkey_ai/api_resources/types/assistant_type.py +++ b/portkey_ai/api_resources/types/assistant_type.py @@ -9,9 +9,6 @@ "Assistant", "AssistantList", "AssistantDeleted", - "AssistantFile", - "AssistantFileList", - "AssistantFileDeleted", "ToolCodeInterpreter", "ToolRetrieval", "ToolFunction", @@ -38,7 +35,7 @@ class Assistant(BaseModel): id: Optional[str] created_at: Optional[int] description: Optional[str] = None - file_ids: Optional[List[str]] + file_ids: Optional[List[str]] = None instructions: Optional[str] = None metadata: Optional[object] = None model: Optional[str] @@ -94,46 +91,46 @@ def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) -class AssistantFile(BaseModel, extra="allow"): - id: Optional[str] - assistant_id: Optional[str] - created_at: Optional[int] - object: Optional[str] - _headers: Optional[httpx.Headers] = PrivateAttr() +# class AssistantFile(BaseModel, extra="allow"): +# id: Optional[str] +# assistant_id: Optional[str] +# created_at: Optional[int] +# object: Optional[str] +# _headers: Optional[httpx.Headers] = PrivateAttr() - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) +# def __str__(self): +# del self._headers +# return json.dumps(self.dict(), indent=4) - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) +# def get_headers(self) -> Optional[Dict[str, str]]: +# return parse_headers(self._headers) -class AssistantFileList(BaseModel, extra="allow"): - object: Optional[str] - data: Optional[List[AssistantFile]] - first_id: Optional[str] - last_id: Optional[str] - has_more: Optional[bool] - _headers: Optional[httpx.Headers] = PrivateAttr() +# class AssistantFileList(BaseModel, extra="allow"): +# object: Optional[str] +# data: Optional[List[AssistantFile]] +# first_id: Optional[str] +# last_id: Optional[str] +# has_more: Optional[bool] +# _headers: Optional[httpx.Headers] = PrivateAttr() - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) +# def __str__(self): +# del self._headers +# return json.dumps(self.dict(), indent=4) - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) +# def get_headers(self) -> Optional[Dict[str, str]]: +# return parse_headers(self._headers) -class AssistantFileDeleted(BaseModel, extra="allow"): - id: Optional[str] - deleted: Optional[bool] - object: Optional[str] - _headers: Optional[httpx.Headers] = PrivateAttr() +# class AssistantFileDeleted(BaseModel, extra="allow"): +# id: Optional[str] +# deleted: Optional[bool] +# object: Optional[str] +# _headers: Optional[httpx.Headers] = PrivateAttr() - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) +# def __str__(self): +# del self._headers +# return json.dumps(self.dict(), indent=4) - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) +# def get_headers(self) -> Optional[Dict[str, str]]: +# return parse_headers(self._headers) diff --git a/portkey_ai/api_resources/types/audio_types.py b/portkey_ai/api_resources/types/audio_types.py new file mode 100644 index 0000000..4989198 --- /dev/null +++ b/portkey_ai/api_resources/types/audio_types.py @@ -0,0 +1,44 @@ +import json +from typing import Dict, Optional +import httpx +from .utils import parse_headers +from typing import Any +from pydantic import BaseModel, PrivateAttr + +__all__ = ["Transcription", "Translation"] + + +class Transcription(BaseModel): + text: str + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class Translation(BaseModel): + text: str + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) diff --git a/portkey_ai/api_resources/types/batches_type.py b/portkey_ai/api_resources/types/batches_type.py new file mode 100644 index 0000000..4837d8f --- /dev/null +++ b/portkey_ai/api_resources/types/batches_type.py @@ -0,0 +1,74 @@ +import json +import builtins +from typing import Dict, Optional +import httpx +from .utils import parse_headers +from typing import List, Any +from pydantic import BaseModel, PrivateAttr +from openai.types.batch_error import BatchError +from openai.types.batch_request_counts import BatchRequestCounts + +__all__ = ["Batch", "BatchList", "Errors"] + + +class Errors(BaseModel): + data: Optional[List[BatchError]] = None + + object: Optional[str] = None + """The object type, which is always `list`.""" + + +class Batch(BaseModel): + id: str + completion_window: str + created_at: int + endpoint: str + input_file_id: str + object: str + status: str + cancelled_at: Optional[int] = None + cancelling_at: Optional[int] = None + completed_at: Optional[int] = None + error_file_id: Optional[str] = None + errors: Optional[Errors] = None + expired_at: Optional[int] = None + expires_at: Optional[int] = None + failed_at: Optional[int] = None + finalizing_at: Optional[int] = None + in_progress_at: Optional[int] = None + metadata: Optional[builtins.object] = None + output_file_id: Optional[str] = None + request_counts: Optional[BatchRequestCounts] = None + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class BatchList(BaseModel): + object: Optional[str] = None + data: Optional[List[Batch]] = None + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) diff --git a/portkey_ai/api_resources/types/fine_tuning_type.py b/portkey_ai/api_resources/types/fine_tuning_type.py new file mode 100644 index 0000000..59316b1 --- /dev/null +++ b/portkey_ai/api_resources/types/fine_tuning_type.py @@ -0,0 +1,176 @@ +import json +from typing import Dict, Optional, Union +import httpx +from .utils import parse_headers +from typing import List, Any +from pydantic import BaseModel, PrivateAttr +from openai.types.fine_tuning import FineTuningJobWandbIntegrationObject + +__all__ = [ + "Error", + "Hyperparameters", + "FineTuningJob", + "FineTuningJobList", + "FineTuningJobEvent", + "FineTuningJobEventList", + "Metrics", + "FineTuningJobCheckpoint", + "FineTuningJobCheckpointList", +] + + +class Error(BaseModel): + code: str + message: str + param: Optional[str] = None + + +class Hyperparameters(BaseModel): + n_epochs: Union[str, int] + + +class FineTuningJob(BaseModel): + id: str + created_at: int + error: Optional[Error] = None + fine_tuned_model: Optional[str] = None + finished_at: Optional[int] = None + hyperparameters: Hyperparameters + model: str + object: str + organization_id: str + result_files: List[str] + seed: int + status: str + trained_tokens: Optional[int] = None + training_file: str + validation_file: Optional[str] = None + estimated_finish: Optional[int] = None + integrations: Optional[List[FineTuningJobWandbIntegrationObject]] = None + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class FineTuningJobList(BaseModel): + object: Optional[str] = None + data: Optional[List[FineTuningJob]] = None + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class FineTuningJobEvent(BaseModel): + id: str + created_at: int + level: str + message: str + object: str + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class FineTuningJobEventList(BaseModel): + object: Optional[str] = None + data: Optional[List[FineTuningJobEvent]] = None + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class Metrics(BaseModel): + full_valid_loss: Optional[float] = None + full_valid_mean_token_accuracy: Optional[float] = None + step: Optional[float] = None + train_loss: Optional[float] = None + train_mean_token_accuracy: Optional[float] = None + valid_loss: Optional[float] = None + valid_mean_token_accuracy: Optional[float] = None + + +class FineTuningJobCheckpoint(BaseModel): + id: str + created_at: int + fine_tuned_model_checkpoint: str + fine_tuning_job_id: str + metrics: Metrics + object: str + step_number: int + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class FineTuningJobCheckpointList(BaseModel): + object: Optional[str] = None + data: Optional[List[FineTuningJobCheckpoint]] = None + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) diff --git a/portkey_ai/api_resources/types/moderations_type.py b/portkey_ai/api_resources/types/moderations_type.py new file mode 100644 index 0000000..c85547a --- /dev/null +++ b/portkey_ai/api_resources/types/moderations_type.py @@ -0,0 +1,30 @@ +import json +from typing import Dict, Optional +import httpx +from .utils import parse_headers +from typing import List, Any +from pydantic import BaseModel, PrivateAttr +from openai.types.moderation import Moderation + + +__all__ = ["ModerationCreateResponse"] + + +class ModerationCreateResponse(BaseModel): + id: str + model: str + results: List[Moderation] + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) diff --git a/portkey_ai/api_resources/types/thread_message_type.py b/portkey_ai/api_resources/types/thread_message_type.py index d754a46..0ceea6f 100644 --- a/portkey_ai/api_resources/types/thread_message_type.py +++ b/portkey_ai/api_resources/types/thread_message_type.py @@ -107,11 +107,25 @@ def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) -class MessageFile(BaseModel, extra="allow"): - id: Optional[str] - object: Optional[str] - created_at: Optional[int] - message_id: Optional[str] +# class MessageFile(BaseModel, extra="allow"): +# id: Optional[str] +# object: Optional[str] +# created_at: Optional[int] +# message_id: Optional[str] +# _headers: Optional[httpx.Headers] = PrivateAttr() + +# def __str__(self): +# del self._headers +# return json.dumps(self.dict(), indent=4) + +# def get_headers(self) -> Optional[Dict[str, str]]: +# return parse_headers(self._headers) + + +class ThreadMessageDeleted(BaseModel): + id: str + deleted: bool + object: str _headers: Optional[httpx.Headers] = PrivateAttr() def __str__(self): diff --git a/portkey_ai/api_resources/types/vector_stores_type.py b/portkey_ai/api_resources/types/vector_stores_type.py new file mode 100644 index 0000000..188a8ea --- /dev/null +++ b/portkey_ai/api_resources/types/vector_stores_type.py @@ -0,0 +1,147 @@ +import json +from typing import Dict, List, Optional +import httpx +from .utils import parse_headers +from pydantic import BaseModel, PrivateAttr + +__all__ = [ + "LastError", + "ExpiresAfter", + "VectorStore", + "VectorStoreList", + "VectorStoreDeleted", + "VectorStoreFile", + "VectorStoreFileList", + "VectorStoreFileDeleted", + "FileCounts", + "VectorStoreFileBatch", +] + + +class FileCounts(BaseModel): + cancelled: int + completed: int + failed: int + in_progress: int + total: int + + +class ExpiresAfter(BaseModel): + anchor: str + days: int + + +class VectorStore(BaseModel): + id: str + created_at: int + file_counts: FileCounts + last_active_at: Optional[int] = None + metadata: Optional[object] = None + name: str + object: str + status: str + usage_bytes: int + expires_after: Optional[ExpiresAfter] = None + expires_at: Optional[int] = None + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class VectorStoreList(BaseModel): + data: List[VectorStore] + object: str + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class VectorStoreDeleted(BaseModel): + id: str + deleted: bool + object: str + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class LastError(BaseModel): + code: str + message: str + + +class VectorStoreFile(BaseModel): + id: str + created_at: int + last_error: Optional[LastError] = None + object: str + status: str + usage_bytes: int + vector_store_id: str + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class VectorStoreFileList(BaseModel): + data: List[VectorStoreFile] + object: str + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class VectorStoreFileDeleted(BaseModel): + id: str + deleted: bool + object: str + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class VectorStoreFileBatch(BaseModel): + id: str + created_at: int + file_counts: FileCounts + object: str + status: str + vector_store_id: str + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) diff --git a/tests/configs/audio/single_provider/single_provider.json b/tests/configs/audio/single_provider/single_provider.json new file mode 100644 index 0000000..9471258 --- /dev/null +++ b/tests/configs/audio/single_provider/single_provider.json @@ -0,0 +1,3 @@ +{ + "virtual_key": "openai-virtual-key" +} \ No newline at end of file diff --git a/tests/configs/audio/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json b/tests/configs/audio/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json new file mode 100644 index 0000000..52281ce --- /dev/null +++ b/tests/configs/audio/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json @@ -0,0 +1,13 @@ +{ + "virtual_key": "openai-virtual-key", + "cache": { + "mode": "semantic", + "max_age": 60 + }, + "retry": { + "attempts": 5, + "on_status_codes": [ + 429 + ] + } +} \ No newline at end of file diff --git a/tests/configs/audio/single_with_basic_config/single_with_basic_config.json b/tests/configs/audio/single_with_basic_config/single_with_basic_config.json new file mode 100644 index 0000000..9471258 --- /dev/null +++ b/tests/configs/audio/single_with_basic_config/single_with_basic_config.json @@ -0,0 +1,3 @@ +{ + "virtual_key": "openai-virtual-key" +} \ No newline at end of file diff --git a/tests/configs/audio/speech.mp3 b/tests/configs/audio/speech.mp3 new file mode 100644 index 0000000..ea039ef Binary files /dev/null and b/tests/configs/audio/speech.mp3 differ diff --git a/tests/configs/moderations/single_provider/single_provider.json b/tests/configs/moderations/single_provider/single_provider.json new file mode 100644 index 0000000..9471258 --- /dev/null +++ b/tests/configs/moderations/single_provider/single_provider.json @@ -0,0 +1,3 @@ +{ + "virtual_key": "openai-virtual-key" +} \ No newline at end of file diff --git a/tests/configs/moderations/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json b/tests/configs/moderations/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json new file mode 100644 index 0000000..52281ce --- /dev/null +++ b/tests/configs/moderations/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json @@ -0,0 +1,13 @@ +{ + "virtual_key": "openai-virtual-key", + "cache": { + "mode": "semantic", + "max_age": 60 + }, + "retry": { + "attempts": 5, + "on_status_codes": [ + 429 + ] + } +} \ No newline at end of file diff --git a/tests/configs/moderations/single_with_basic_config/single_with_basic_config.json b/tests/configs/moderations/single_with_basic_config/single_with_basic_config.json new file mode 100644 index 0000000..9471258 --- /dev/null +++ b/tests/configs/moderations/single_with_basic_config/single_with_basic_config.json @@ -0,0 +1,3 @@ +{ + "virtual_key": "openai-virtual-key" +} \ No newline at end of file diff --git a/tests/models.json b/tests/models.json index b8af633..2924696 100644 --- a/tests/models.json +++ b/tests/models.json @@ -18,6 +18,10 @@ "image":[ "dall-e-3", "dall-e-2" + ], + "audio":[ + "tts-1", + "whisper-1" ] }, "anyscale": { @@ -36,7 +40,8 @@ "mistralai/Mistral-7B-Instruct-v0.1", "mistralai/Mixtral-8x7B-Instruct-v0.1" ], - "image":[] + "image":[], + "audio":[] }, "anthropic": { "env_variable": "ANTHROPIC_API_KEY", @@ -53,7 +58,8 @@ "claude-2.0", "claude-instant-1.2" ], - "image":[] + "image":[], + "audio":[] }, "cohere": { "env_variable": "COHERE_API_KEY", @@ -69,6 +75,7 @@ "command", "command-nightly" ], - "image":[] + "image":[], + "audio":[] } } \ No newline at end of file diff --git a/tests/test_assistants.py b/tests/test_assistants.py index 5622be4..15c21c2 100644 --- a/tests/test_assistants.py +++ b/tests/test_assistants.py @@ -112,7 +112,6 @@ def test_method_all_params(self, client: Any, virtual_key: str) -> None: assistant = portkey.beta.assistants.create( model=model, description="string", - file_ids=["file-m9QiEaDT9Le28LydiUTsUwDv"], instructions="You are a personal math tutor." + "Write and run code to answer math questions.", metadata=metadata, diff --git a/tests/test_async_audio_speech.py b/tests/test_async_audio_speech.py new file mode 100644 index 0000000..03a0a67 --- /dev/null +++ b/tests/test_async_audio_speech.py @@ -0,0 +1,154 @@ +from __future__ import annotations +import inspect + +import os +from os import walk +from typing import Any, Dict, List +import pytest +from uuid import uuid4 +from portkey_ai import AsyncPortkey +from time import sleep +from dotenv import load_dotenv +from .utils import read_json_file + + +load_dotenv(override=True) + +base_url = os.environ.get("PORTKEY_BASE_URL") +api_key = os.environ.get("PORTKEY_API_KEY") +virtual_api_key = os.environ.get("OPENAI_VIRTUAL_KEY") + +CONFIGS_PATH = "./tests/configs/audio" + + +def get_configs(folder_path) -> List[Dict[str, Any]]: + config_files = [] + for dirpath, _, file_names in walk(folder_path): + for f in file_names: + config_files.append(read_json_file(os.path.join(dirpath, f))) + + return config_files + + +class TestAudioSpeech: + client = AsyncPortkey + parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) + models = read_json_file("./tests/models.json") + + def get_metadata(self): + return { + "case": "testing", + "function": inspect.currentframe().f_back.f_code.co_name, + "random_id": str(uuid4()), + } + + # -------------------------- + # Test-1 + t1_params = [] + t = [] + for k, v in models.items(): + if k == "openai": + for i in v["audio"]: + t.append((client, k, os.environ.get(v["env_variable"]), i)) + + t1_params.extend(t) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, provider, auth, model", t1_params) + async def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"{auth}", + trace_id=str(uuid4()), + metadata=self.get_metadata(), + ) + + audio = await portkey.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog.", + ) + + assert isinstance(audio.content, bytes) is True + + # -------------------------- + # Test -2 + t2_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): + t2_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t2_params) + async def test_method_single_with_basic_config( + self, client: Any, config: Dict + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + metadata=self.get_metadata(), + config=config, + ) + + audio = await portkey.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog.", + ) + + assert isinstance(audio.content, bytes) is True + + # -------------------------- + # Test-3 + t3_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider_with_vk_retry_cache"): + t3_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t3_params) + async def test_method_single_provider_with_vk_retry_cache( + self, client: Any, config: Dict + ) -> None: + # 1. Make a new cache the cache + # 2. Make a cache hit and see if the response contains the data. + random_id = str(uuid4()) + metadata = self.get_metadata() + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + audio = await portkey.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog.", + ) + + assert isinstance(audio.content, bytes) is True + # Sleeping for the cache to reflect across the workers. The cache has an + # eventual consistency and not immediate consistency. + sleep(20) + portkey_2 = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + cached_audio = await portkey_2.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog.", + ) + + assert isinstance(cached_audio.content, bytes) is True diff --git a/tests/test_async_audio_transcript.py b/tests/test_async_audio_transcript.py new file mode 100644 index 0000000..3a99b34 --- /dev/null +++ b/tests/test_async_audio_transcript.py @@ -0,0 +1,161 @@ +from __future__ import annotations +import inspect + +import os +from os import walk +from typing import Any, Dict, List +import pytest +from uuid import uuid4 +from portkey_ai import AsyncPortkey +from time import sleep +from dotenv import load_dotenv +from .utils import read_json_file + + +load_dotenv(override=True) + +base_url = os.environ.get("PORTKEY_BASE_URL") +api_key = os.environ.get("PORTKEY_API_KEY") +virtual_api_key = os.environ.get("OPENAI_VIRTUAL_KEY") + +CONFIGS_PATH = "./tests/configs/audio" + + +def get_configs(folder_path) -> List[Dict[str, Any]]: + config_files = [] + for dirpath, _, file_names in walk(folder_path): + for f in file_names: + config_files.append(read_json_file(os.path.join(dirpath, f))) + + return config_files + + +class TestAudioTranscript: + client = AsyncPortkey + parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) + models = read_json_file("./tests/models.json") + + def get_metadata(self): + return { + "case": "testing", + "function": inspect.currentframe().f_back.f_code.co_name, + "random_id": str(uuid4()), + } + + # -------------------------- + # Test-4 + t4_params = [] + t4 = [] + + for k, v in models.items(): + if k == "openai": + for i in v["audio"]: + t4.append((client, k, os.environ.get(v["env_variable"]), i)) + + t4_params.extend(t4) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, provider, auth, model", t4_params) + async def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"{auth}", + trace_id=str(uuid4()), + metadata=self.get_metadata(), + ) + + audio_file = open( + "speech.mp3", + "rb", + ) + + transcript = await portkey.audio.transcriptions.create( + model="whisper-1", file=audio_file + ) + + assert isinstance(transcript.text, str) is True + + # -------------------------- + # Test -5 + t5_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): + t5_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t5_params) + async def test_method_single_with_basic_config( + self, client: Any, config: Dict + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + metadata=self.get_metadata(), + config=config, + ) + audio_file = open( + "speech.mp3", + "rb", + ) + + transcript = await portkey.audio.transcriptions.create( + model="whisper-1", file=audio_file + ) + + assert isinstance(transcript.text, str) is True + + # -------------------------- + # Test-6 + t6_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider_with_vk_retry_cache"): + t6_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t6_params) + async def test_method_single_provider_with_vk_retry_cache( + self, client: Any, config: Dict + ) -> None: + # 1. Make a new cache the cache + # 2. Make a cache hit and see if the response contains the data. + random_id = str(uuid4()) + metadata = self.get_metadata() + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + audio_file = open( + "speech.mp3", + "rb", + ) + + transcript = await portkey.audio.transcriptions.create( + model="whisper-1", file=audio_file + ) + + assert isinstance(transcript.text, str) is True + # Sleeping for the cache to reflect across the workers. The cache has an + # eventual consistency and not immediate consistency. + sleep(20) + portkey_2 = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + cached_transcript = await portkey_2.audio.transcriptions.create( + model="whisper-1", file=audio_file + ) + + assert isinstance(cached_transcript.text, str) is True diff --git a/tests/test_async_audio_translation.py b/tests/test_async_audio_translation.py new file mode 100644 index 0000000..6c6805d --- /dev/null +++ b/tests/test_async_audio_translation.py @@ -0,0 +1,160 @@ +from __future__ import annotations +import inspect + +import os +from os import walk +from typing import Any, Dict, List +import pytest +from uuid import uuid4 +from portkey_ai import AsyncPortkey +from time import sleep +from dotenv import load_dotenv +from .utils import read_json_file + + +load_dotenv(override=True) + +base_url = os.environ.get("PORTKEY_BASE_URL") +api_key = os.environ.get("PORTKEY_API_KEY") +virtual_api_key = os.environ.get("OPENAI_VIRTUAL_KEY") + +CONFIGS_PATH = "./tests/configs/audio" + + +def get_configs(folder_path) -> List[Dict[str, Any]]: + config_files = [] + for dirpath, _, file_names in walk(folder_path): + for f in file_names: + config_files.append(read_json_file(os.path.join(dirpath, f))) + + return config_files + + +class TestAudioTranslations: + client = AsyncPortkey + parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) + models = read_json_file("./tests/models.json") + + def get_metadata(self): + return { + "case": "testing", + "function": inspect.currentframe().f_back.f_code.co_name, + "random_id": str(uuid4()), + } + + # -------------------------- + # Test-4 + t4_params = [] + t4 = [] + for k, v in models.items(): + if k == "openai": + for i in v["audio"]: + t4.append((client, k, os.environ.get(v["env_variable"]), i)) + + t4_params.extend(t4) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, provider, auth, model", t4_params) + async def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"{auth}", + trace_id=str(uuid4()), + metadata=self.get_metadata(), + ) + + audio_file = open( + "speech.mp3", + "rb", + ) + + translations = await portkey.audio.translations.create( + model="whisper-1", file=audio_file + ) + + assert isinstance(translations.text, str) is True + + # -------------------------- + # Test -5 + t5_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): + t5_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t5_params) + async def test_method_single_with_basic_config( + self, client: Any, config: Dict + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + metadata=self.get_metadata(), + config=config, + ) + audio_file = open( + "speech.mp3", + "rb", + ) + + translations = await portkey.audio.translations.create( + model="whisper-1", file=audio_file + ) + + assert isinstance(translations.text, str) is True + + # -------------------------- + # Test-6 + t6_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider_with_vk_retry_cache"): + t6_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t6_params) + async def test_method_single_provider_with_vk_retry_cache( + self, client: Any, config: Dict + ) -> None: + # 1. Make a new cache the cache + # 2. Make a cache hit and see if the response contains the data. + random_id = str(uuid4()) + metadata = self.get_metadata() + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + audio_file = open( + "speech.mp3", + "rb", + ) + + translations = await portkey.audio.translations.create( + model="whisper-1", file=audio_file + ) + + assert isinstance(translations.text, str) is True + # Sleeping for the cache to reflect across the workers. The cache has an + # eventual consistency and not immediate consistency. + sleep(20) + portkey_2 = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + cached_translations = await portkey_2.audio.translations.create( + model="whisper-1", file=audio_file + ) + + assert isinstance(cached_translations.text, str) is True diff --git a/tests/test_async_moderations.py b/tests/test_async_moderations.py new file mode 100644 index 0000000..02a1f2d --- /dev/null +++ b/tests/test_async_moderations.py @@ -0,0 +1,145 @@ +from __future__ import annotations +import inspect + +import os +from os import walk +from typing import Any, Dict, List +import pytest +from uuid import uuid4 +from portkey_ai import AsyncPortkey +from time import sleep +from dotenv import load_dotenv +from .utils import read_json_file + + +load_dotenv(override=True) + +base_url = os.environ.get("PORTKEY_BASE_URL") +api_key = os.environ.get("PORTKEY_API_KEY") +virtual_api_key = os.environ.get("OPENAI_VIRTUAL_KEY") + +CONFIGS_PATH = "./tests/configs/moderations" + + +def get_configs(folder_path) -> List[Dict[str, Any]]: + config_files = [] + for dirpath, _, file_names in walk(folder_path): + for f in file_names: + config_files.append(read_json_file(os.path.join(dirpath, f))) + + return config_files + + +class TestModerations: + client = AsyncPortkey + parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) + models = read_json_file("./tests/models.json") + + def get_metadata(self): + return { + "case": "testing", + "function": inspect.currentframe().f_back.f_code.co_name, + "random_id": str(uuid4()), + } + + # -------------------------- + # Test-1 + t1_params = [] + t = [] + for k, v in models.items(): + for i in v["chat"]: + t.append((client, k, os.environ.get(v["env_variable"]), i)) + + t1_params.extend(t) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, provider, auth, model", t1_params) + async def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"{auth}", + trace_id=str(uuid4()), + metadata=self.get_metadata(), + ) + + moderations = await portkey.moderations.create( + input="I want to kill them.", model="text-moderation-stable" + ) + + assert isinstance(moderations.id, str) is True + + # -------------------------- + # Test -2 + t2_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): + t2_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t2_params) + async def test_method_single_with_basic_config( + self, client: Any, config: Dict + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + metadata=self.get_metadata(), + config=config, + ) + + moderations = await portkey.moderations.create( + input="I want to kill them.", model="text-moderation-stable" + ) + + assert isinstance(moderations.id, str) is True + + # -------------------------- + # Test-3 + t3_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider_with_vk_retry_cache"): + t3_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t3_params) + async def test_method_single_provider_with_vk_retry_cache( + self, client: Any, config: Dict + ) -> None: + # 1. Make a new cache the cache + # 2. Make a cache hit and see if the response contains the data. + random_id = str(uuid4()) + metadata = self.get_metadata() + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + moderations = await portkey.moderations.create( + input="I want to kill them.", model="text-moderation-stable" + ) + + assert isinstance(moderations.id, str) is True + # Sleeping for the cache to reflect across the workers. The cache has an + # eventual consistency and not immediate consistency. + sleep(20) + portkey_2 = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + cached_moderations = await portkey_2.moderations.create( + input="I want to kill them.", model="text-moderation-stable" + ) + + assert isinstance(cached_moderations.id, str) is True diff --git a/tests/test_audio_speech.py b/tests/test_audio_speech.py new file mode 100644 index 0000000..63aeece --- /dev/null +++ b/tests/test_audio_speech.py @@ -0,0 +1,149 @@ +from __future__ import annotations +import inspect + +import os +from os import walk +from typing import Any, Dict, List +import pytest +from uuid import uuid4 +from portkey_ai import Portkey +from time import sleep +from dotenv import load_dotenv +from .utils import read_json_file + + +load_dotenv(override=True) + +base_url = os.environ.get("PORTKEY_BASE_URL") +api_key = os.environ.get("PORTKEY_API_KEY") +virtual_api_key = os.environ.get("OPENAI_VIRTUAL_KEY") + +CONFIGS_PATH = "./tests/configs/audio" + + +def get_configs(folder_path) -> List[Dict[str, Any]]: + config_files = [] + for dirpath, _, file_names in walk(folder_path): + for f in file_names: + config_files.append(read_json_file(os.path.join(dirpath, f))) + + return config_files + + +class TestAudioSpeech: + client = Portkey + parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) + models = read_json_file("./tests/models.json") + + def get_metadata(self): + return { + "case": "testing", + "function": inspect.currentframe().f_back.f_code.co_name, + "random_id": str(uuid4()), + } + + # -------------------------- + # Test-1 + t1_params = [] + t = [] + for k, v in models.items(): + if k == "openai": + for i in v["audio"]: + t.append((client, k, os.environ.get(v["env_variable"]), i)) + + t1_params.extend(t) + + @pytest.mark.parametrize("client, provider, auth, model", t1_params) + def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"{auth}", + trace_id=str(uuid4()), + metadata=self.get_metadata(), + ) + + audio = portkey.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog.", + ) + + assert isinstance(audio.content, bytes) is True + + # -------------------------- + # Test -2 + t2_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): + t2_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t2_params) + def test_method_single_with_basic_config(self, client: Any, config: Dict) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + metadata=self.get_metadata(), + config=config, + ) + + audio = portkey.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog.", + ) + + assert isinstance(audio.content, bytes) is True + + # -------------------------- + # Test-3 + t3_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider_with_vk_retry_cache"): + t3_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t3_params) + def test_method_single_provider_with_vk_retry_cache( + self, client: Any, config: Dict + ) -> None: + # 1. Make a new cache the cache + # 2. Make a cache hit and see if the response contains the data. + random_id = str(uuid4()) + metadata = self.get_metadata() + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + audio = portkey.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog.", + ) + + assert isinstance(audio.content, bytes) is True + # Sleeping for the cache to reflect across the workers. The cache has an + # eventual consistency and not immediate consistency. + sleep(20) + portkey_2 = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + cached_audio = portkey_2.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog.", + ) + + assert isinstance(cached_audio.content, bytes) is True diff --git a/tests/test_audio_transcript.py b/tests/test_audio_transcript.py new file mode 100644 index 0000000..532e464 --- /dev/null +++ b/tests/test_audio_transcript.py @@ -0,0 +1,155 @@ +from __future__ import annotations +import inspect + +import os +from os import walk +from typing import Any, Dict, List +import pytest +from uuid import uuid4 +from portkey_ai import Portkey +from time import sleep +from dotenv import load_dotenv +from .utils import read_json_file + + +load_dotenv(override=True) + +base_url = os.environ.get("PORTKEY_BASE_URL") +api_key = os.environ.get("PORTKEY_API_KEY") +virtual_api_key = os.environ.get("OPENAI_VIRTUAL_KEY") + +CONFIGS_PATH = "./tests/configs/audio" + + +def get_configs(folder_path) -> List[Dict[str, Any]]: + config_files = [] + for dirpath, _, file_names in walk(folder_path): + for f in file_names: + config_files.append(read_json_file(os.path.join(dirpath, f))) + + return config_files + + +class TestAudioTranscript: + client = Portkey + parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) + models = read_json_file("./tests/models.json") + + def get_metadata(self): + return { + "case": "testing", + "function": inspect.currentframe().f_back.f_code.co_name, + "random_id": str(uuid4()), + } + + # -------------------------- + # Test-4 + t4_params = [] + t4 = [] + for k, v in models.items(): + if k == "openai": + for i in v["audio"]: + t4.append((client, k, os.environ.get(v["env_variable"]), i)) + + t4_params.extend(t4) + + @pytest.mark.parametrize("client, provider, auth, model", t4_params) + def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"{auth}", + trace_id=str(uuid4()), + metadata=self.get_metadata(), + ) + + audio_file = open( + "speech.mp3", + "rb", + ) + + transcript = portkey.audio.transcriptions.create( + model="whisper-1", file=audio_file + ) + + assert isinstance(transcript.text, str) is True + + # -------------------------- + # Test -5 + t5_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): + t5_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t5_params) + def test_method_single_with_basic_config(self, client: Any, config: Dict) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + metadata=self.get_metadata(), + config=config, + ) + audio_file = open( + "speech.mp3", + "rb", + ) + + transcript = portkey.audio.transcriptions.create( + model="whisper-1", file=audio_file + ) + + assert isinstance(transcript.text, str) is True + + # -------------------------- + # Test-6 + t6_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider_with_vk_retry_cache"): + t6_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t6_params) + def test_method_single_provider_with_vk_retry_cache( + self, client: Any, config: Dict + ) -> None: + # 1. Make a new cache the cache + # 2. Make a cache hit and see if the response contains the data. + random_id = str(uuid4()) + metadata = self.get_metadata() + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + audio_file = open( + "speech.mp3", + "rb", + ) + + transcript = portkey.audio.transcriptions.create( + model="whisper-1", file=audio_file + ) + + assert isinstance(transcript.text, str) is True + # Sleeping for the cache to reflect across the workers. The cache has an + # eventual consistency and not immediate consistency. + sleep(20) + portkey_2 = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + cached_transcript = portkey_2.audio.transcriptions.create( + model="whisper-1", file=audio_file + ) + + assert isinstance(cached_transcript.text, str) is True diff --git a/tests/test_audio_translation.py b/tests/test_audio_translation.py new file mode 100644 index 0000000..4667f6e --- /dev/null +++ b/tests/test_audio_translation.py @@ -0,0 +1,155 @@ +from __future__ import annotations +import inspect + +import os +from os import walk +from typing import Any, Dict, List +import pytest +from uuid import uuid4 +from portkey_ai import Portkey +from time import sleep +from dotenv import load_dotenv +from .utils import read_json_file + + +load_dotenv(override=True) + +base_url = os.environ.get("PORTKEY_BASE_URL") +api_key = os.environ.get("PORTKEY_API_KEY") +virtual_api_key = os.environ.get("OPENAI_VIRTUAL_KEY") + +CONFIGS_PATH = "./tests/configs/audio" + + +def get_configs(folder_path) -> List[Dict[str, Any]]: + config_files = [] + for dirpath, _, file_names in walk(folder_path): + for f in file_names: + config_files.append(read_json_file(os.path.join(dirpath, f))) + + return config_files + + +class TestAudioTranslations: + client = Portkey + parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) + models = read_json_file("./tests/models.json") + + def get_metadata(self): + return { + "case": "testing", + "function": inspect.currentframe().f_back.f_code.co_name, + "random_id": str(uuid4()), + } + + # -------------------------- + # Test-1 + t4_params = [] + t4 = [] + for k, v in models.items(): + if k == "openai": + for i in v["audio"]: + t4.append((client, k, os.environ.get(v["env_variable"]), i)) + + t4_params.extend(t4) + + @pytest.mark.parametrize("client, provider, auth, model", t4_params) + def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"{auth}", + trace_id=str(uuid4()), + metadata=self.get_metadata(), + ) + + audio_file = open( + "speech.mp3", + "rb", + ) + + translations = portkey.audio.translations.create( + model="whisper-1", file=audio_file + ) + + assert isinstance(translations.text, str) is True + + # -------------------------- + # Test -2 + t5_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): + t5_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t5_params) + def test_method_single_with_basic_config(self, client: Any, config: Dict) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + metadata=self.get_metadata(), + config=config, + ) + audio_file = open( + "speech.mp3", + "rb", + ) + + translations = portkey.audio.translations.create( + model="whisper-1", file=audio_file + ) + + assert isinstance(translations.text, str) is True + + # -------------------------- + # Test-3 + t6_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider_with_vk_retry_cache"): + t6_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t6_params) + def test_method_single_provider_with_vk_retry_cache( + self, client: Any, config: Dict + ) -> None: + # 1. Make a new cache the cache + # 2. Make a cache hit and see if the response contains the data. + random_id = str(uuid4()) + metadata = self.get_metadata() + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + audio_file = open( + "speech.mp3", + "rb", + ) + + translations = portkey.audio.translations.create( + model="whisper-1", file=audio_file + ) + + assert isinstance(translations.text, str) is True + # Sleeping for the cache to reflect across the workers. The cache has an + # eventual consistency and not immediate consistency. + sleep(20) + portkey_2 = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + cached_translations = portkey_2.audio.translations.create( + model="whisper-1", file=audio_file + ) + + assert isinstance(cached_translations.text, str) is True diff --git a/tests/test_moderations.py b/tests/test_moderations.py new file mode 100644 index 0000000..56ef453 --- /dev/null +++ b/tests/test_moderations.py @@ -0,0 +1,140 @@ +from __future__ import annotations +import inspect + +import os +from os import walk +from typing import Any, Dict, List +import pytest +from uuid import uuid4 +from portkey_ai import Portkey +from time import sleep +from dotenv import load_dotenv +from .utils import read_json_file + + +load_dotenv(override=True) + +base_url = os.environ.get("PORTKEY_BASE_URL") +api_key = os.environ.get("PORTKEY_API_KEY") +virtual_api_key = os.environ.get("OPENAI_VIRTUAL_KEY") + +CONFIGS_PATH = "./tests/configs/moderations" + + +def get_configs(folder_path) -> List[Dict[str, Any]]: + config_files = [] + for dirpath, _, file_names in walk(folder_path): + for f in file_names: + config_files.append(read_json_file(os.path.join(dirpath, f))) + + return config_files + + +class TestModerations: + client = Portkey + parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) + models = read_json_file("./tests/models.json") + + def get_metadata(self): + return { + "case": "testing", + "function": inspect.currentframe().f_back.f_code.co_name, + "random_id": str(uuid4()), + } + + # -------------------------- + # Test-1 + t1_params = [] + t = [] + for k, v in models.items(): + for i in v["chat"]: + t.append((client, k, os.environ.get(v["env_variable"]), i)) + + t1_params.extend(t) + + @pytest.mark.parametrize("client, provider, auth, model", t1_params) + def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"{auth}", + trace_id=str(uuid4()), + metadata=self.get_metadata(), + ) + + moderations = portkey.moderations.create( + input="I want to kill them.", model="text-moderation-stable" + ) + + assert isinstance(moderations.id, str) is True + + # -------------------------- + # Test -2 + t2_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): + t2_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t2_params) + def test_method_single_with_basic_config(self, client: Any, config: Dict) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + metadata=self.get_metadata(), + config=config, + ) + + moderations = portkey.moderations.create( + input="I want to kill them.", model="text-moderation-stable" + ) + + assert isinstance(moderations.id, str) is True + + # -------------------------- + # Test-3 + t3_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider_with_vk_retry_cache"): + t3_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t3_params) + def test_method_single_provider_with_vk_retry_cache( + self, client: Any, config: Dict + ) -> None: + # 1. Make a new cache the cache + # 2. Make a cache hit and see if the response contains the data. + random_id = str(uuid4()) + metadata = self.get_metadata() + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + moderations = portkey.moderations.create( + input="I want to kill them.", model="text-moderation-stable" + ) + + assert isinstance(moderations.id, str) is True + # Sleeping for the cache to reflect across the workers. The cache has an + # eventual consistency and not immediate consistency. + sleep(20) + portkey_2 = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + cached_moderations = portkey_2.moderations.create( + input="I want to kill them.", model="text-moderation-stable" + ) + + assert isinstance(cached_moderations.id, str) is True